diff --git a/.npmrc b/.npmrc new file mode 100644 index 00000000..8938bf04 --- /dev/null +++ b/.npmrc @@ -0,0 +1,2 @@ +//registry.npmjs.org/:_authToken=${NPM_TOKEN} + diff --git a/.yarnrc.yml b/.yarnrc.yml index 69bbd873..d5ccf9d2 100644 --- a/.yarnrc.yml +++ b/.yarnrc.yml @@ -1,3 +1,5 @@ +nodeLinker: node-modules + npmScopes: cloudgraph: npmAlwaysAuth: true diff --git a/README.md b/README.md index 3cb36c9e..f1f08c9f 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,10 @@ cg scan aws gcp azure | Benchmark | | --------------------------- -| [CIS Amazon Web Services Foundations 1.2.0](https://www.npmjs.com/package/@cloudgraph/policy-pack-aws-cis-1.2.0) | -| [CIS Google Cloud Platform Foundations 1.2.0](https://www.npmjs.com/package/@cloudgraph/policy-pack-gcp-cis-1.2.0) | -| [CIS Microsoft Azure Foundations 1.3.1](https://www.npmjs.com/package/@cloudgraph/policy-pack-azure-cis-1.3.1) | -| [PCI Data Security Standard version 3.2.1](https://www.npmjs.com/package/@cloudgraph/policy-pack-aws-pci-dss-3.2.1) | +| [CIS Amazon Web Services Foundations 1.2.0](https://www.npmjs.com/package/@cloudgraph/policy-pack-aws-cis-1.2.0) | +| [CIS Amazon Web Services Foundations 1.3.0](https://www.npmjs.com/package/@cloudgraph/policy-pack-aws-cis-1.3.0) | +| [CIS Amazon Web Services Foundations 1.4.0](https://www.npmjs.com/package/@cloudgraph/policy-pack-aws-cis-1.4.0) | +| [CIS Google Cloud Platform Foundations 1.2.0](https://www.npmjs.com/package/@cloudgraph/policy-pack-gcp-cis-1.2.0) | +| [CIS Microsoft Azure Foundations 1.3.1](https://www.npmjs.com/package/@cloudgraph/policy-pack-azure-cis-1.3.1) | +| [AWS PCI Data Security Standard version 3.2.1](https://www.npmjs.com/package/@cloudgraph/policy-pack-aws-pci-dss-3.2.1) | +| [NIST 800-53 Rev. 4 for Amazon Web Services](https://www.npmjs.com/package/@cloudgraph/policy-pack-aws-nist-800-53-rev4) | diff --git a/package.json b/package.json index 89e60658..640c88cb 100644 --- a/package.json +++ b/package.json @@ -18,6 +18,7 @@ "@semantic-release/github": "^8.0.1", "@semantic-release/npm": "^9.0.1", "@semrel-extra/npm": "^1.2.0", + "npm": "^8.8.0", "semantic-release": "^19.0.2" }, "resolutions": { @@ -31,7 +32,7 @@ "singleQuote": true }, "scripts": { - "release": "NPM_CONFIG_IGNORE_SCRIPTS='true' NODE_JQ_SKIP_INSTALL_BINARY='true' multi-semantic-release", + "release": "NODE_AUTH_TOKEN=$NPM_TOKEN NPM_CONFIG_IGNORE_SCRIPTS='true' NODE_JQ_SKIP_INSTALL_BINARY='true' multi-semantic-release --ignore-scripts", "clean": "yarn workspaces foreach -p run clean", "lint": "yarn workspaces foreach run lint", "lint:fix": "yarn workspaces foreach run lint:fix", diff --git a/src/aws/cis-1.3.0/.releaserc.yml b/src/aws/cis-1.3.0/.releaserc.yml index 70788a7a..cba7db8c 100644 --- a/src/aws/cis-1.3.0/.releaserc.yml +++ b/src/aws/cis-1.3.0/.releaserc.yml @@ -1,10 +1,13 @@ --- branches: - - name: main - - name: beta - prerelease: true - name: alpha + channel: alpha prerelease: true + - name: beta + channel: beta + prerelease: true + - name: main + plugins: - "@semantic-release/commit-analyzer" - "@semantic-release/release-notes-generator" @@ -12,24 +15,23 @@ plugins: - changelogFile: CHANGELOG.md - - "@semantic-release/git" - assets: - - CHANGELOG.md - - package.json - - - "@semantic-release/npm" - - npmPublish: false - - "@semantic-release/gitlab" + - CHANGELOG.md + - package.json + - - "@semrel-extra/npm" + - npmPublish: true + - "@semantic-release/github" verifyConditions: - "@semantic-release/changelog" - - "@semantic-release/gitlab" + - "@semantic-release/github" + - "@semrel-extra/npm" prepare: - "@semantic-release/changelog" - - "@semantic-release/npm" + - "@semrel-extra/npm" - - "@semantic-release/git" - - message: "chore(publish): ${nextRelease.version} \n\n${nextRelease.notes}" + - message: "chore(release): ${nextRelease.version} \n\n${nextRelease.notes}" publish: - - "@semantic-release/gitlab" -release: - noCi: true + - "@semantic-release/github" + - "@semrel-extra/npm" success: false fail: false -repositoryUrl: https://gitlab.com/auto-cloud/cloudgraph/policy-packs.git tagFormat: "${version}" diff --git a/src/aws/cis-1.3.0/README.md b/src/aws/cis-1.3.0/README.md index fcaf6656..a8a89c25 100644 --- a/src/aws/cis-1.3.0/README.md +++ b/src/aws/cis-1.3.0/README.md @@ -60,12 +60,39 @@ Policy Pack based on the [AWS Foundations 1.3.0](https://docs.aws.amazon.com/aud | AWS CIS 1.1 | Maintain current contact details | | AWS CIS 1.2 | Ensure security contact information is registered | | AWS CIS 1.3 | Ensure security questions are registered in the AWS account | +| AWS CIS 1.4 | Ensure no 'root' user account access key exists | +| AWS CIS 1.5 | Ensure MFA is enabled for the 'root user' account | +| AWS CIS 1.6 | Ensure hardware MFA is enabled for the 'root' user account | +| AWS CIS 1.7 | Eliminate use of the root user for administrative and daily tasks | +| AWS CIS 1.8 | Ensure IAM password policy requires minimum length of 14 or greater | +| AWS CIS 1.9 | Ensure IAM password policy prevents password reuse | +| AWS CIS 1.10 | Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password | | AWS CIS 1.11 | Do not setup access keys during initial user setup for all IAM users that have a console password | +| AWS CIS 1.12 | Ensure credentials unused for 90 days or greater are disabled | +| AWS CIS 1.13 | Ensure there is only one active access key available for any single IAM user | +| AWS CIS 1.14 | Ensure access keys are rotated every 90 days or less | +| AWS CIS 1.15 | Ensure IAM Users Receive Permissions Only Through Groups | +| AWS CIS 1.16 | Ensure IAM policies that allow full "*:*" administrative privileges are not attached | +| AWS CIS 1.17 | Ensure a support role has been created to manage incidents with AWS Support | | AWS CIS 1.18 | Ensure IAM instance roles are used for AWS resource access from instances | +| AWS CIS 1.19 | Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed | +| AWS CIS 1.20 | Ensure that S3 Buckets are configured with 'Block public access (bucket settings)' | +| AWS CIS 1.21 | Ensure that IAM Access analyzer is enabled | | AWS CIS 1.22 | Ensure IAM users are managed centrally via identity federation or AWS Organizations for multi-account environments | | AWS CIS 2.1.1 | Ensure all S3 buckets employ encryption-at-rest | | AWS CIS 2.1.2 | Ensure S3 Bucket Policy allows HTTPS requests | | AWS CIS 2.2.1 | Ensure EBS volume encryption is enabled | +| AWS CIS 3.1 | Ensure CloudTrail is enabled in all regions | +| AWS CIS 3.2 | Ensure CloudTrail log file validation is enabled | +| AWS CIS 3.3 | Ensure the S3 bucket used to store CloudTrail logs is not publicly accessible | +| AWS CIS 3.4 | Ensure CloudTrail trails are integrated with CloudWatch Logs | +| AWS CIS 3.5 | Ensure AWS Config is enabled in all regions | +| AWS CIS 3.6 | Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket | +| AWS CIS 3.7 | Ensure CloudTrail logs are encrypted at rest using KMS CMKs | +| AWS CIS 3.8 | Ensure rotation for customer created CMKs is enabled | +| AWS CIS 3.9 | Ensure VPC flow logging is enabled in all VPCs | +| AWS CIS 3.10 | Ensure that Object-level logging for write events is enabled for S3 bucket | +| AWS CIS 3.11 | Ensure that Object-level logging for read events is enabled for S3 bucket | | AWS CIS 4.1 | Ensure a log metric filter and alarm exist for unauthorized API calls | | AWS CIS 4.2 | Ensure a log metric filter and alarm exist for Management Console sign-in without MFA | | AWS CIS 4.3 | Ensure a log metric filter and alarm exist for usage of 'root' account | @@ -81,4 +108,7 @@ Policy Pack based on the [AWS Foundations 1.3.0](https://docs.aws.amazon.com/aud | AWS CIS 4.13 | Ensure a log metric filter and alarm exist for route table changes | | AWS CIS 4.14 | Ensure a log metric filter and alarm exist for VPC changes | | AWS CIS 4.15 | Ensure a log metric filter and alarm exists for AWS Organizations changes | +| AWS CIS 5.1 | Ensure no Network ACLs allow ingress from 0.0.0.0/0 to remote server administration ports | +| AWS CIS 5.2 | Ensure no security groups allow ingress from 0.0.0.0/0 to remote server administration ports | +| AWS CIS 5.3 | Ensure the default security group of every VPC restricts all traffic | | AWS CIS 5.4 | Ensure routing tables for VPC peering are "least access" | diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.10.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.10.ts new file mode 100644 index 00000000..dca712e1 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.10.ts @@ -0,0 +1,94 @@ +// AWS CIS 1.2.0 Rule equivalent 1.2 +export default { + id: 'aws-cis-1.3.0-1.10', + title: 'AWS CIS 1.10 Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password', + + description: 'Multi-Factor Authentication (MFA) adds an extra layer of authentication assurance beyond traditional credentials. With MFA enabled, when a user signs in to the AWS Console, they will be prompted for their user name and password as well as for an authentication code from their physical or virtual MFA token. It is recommended that MFA be enabled for all accounts that have a console password.', + + audit: `Perform the following to determine if a MFA device is enabled for all IAM users having a console password: + + **From Console:** + + 1. Open the IAM console at https://console.aws.amazon.com/iam/. + 2. In the left pane, select *Users* + 3. If the *MFA* or *Password age* columns are not visible in the table, click the gear icon at the upper right corner of the table and ensure a checkmark is next to both, then click *Close*. + 4. Ensure that for each user where the *Password age* column shows a password age, the MFA column shows *Virtual*, *U2F Security Key*, or *Hardware*. + + **From Command Line:** + + 1. Run the following command (OSX/Linux/UNIX) to generate a list of all IAM users along with their password and MFA status: + + aws iam generate-credential-report + + aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,8 + + 2. The output of this command will produce a table similar to the following: + + user,password_enabled,mfa_active + elise,false,false + brandon,true,true + rakesh,false,false + helene,false,false + paras,true,true + anitha,false,false + + 3. For any column having *password_enabled* set to *true*, ensure *mfa_active* is also set to *true*.`, + + rationale: 'Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that displays a time-sensitive key and have knowledge of a credential.', + + remediation: `Perform the following to enable MFA: + + **From Console:** + + 1. Sign in to the AWS Management Console and open the IAM console at 'https://console.aws.amazon.com/iam/' + 2. In the left pane, select *Users*. + 3. In the *User Name* list, choose the name of the intended MFA user. + 4. Choose the *Security Credentials* tab, and then choose *Manage MFA Device*. + 5. In the *Manage MFA Device wizard*, choose *Virtual MFA* device, and then choose *Continue*. + + IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. + + 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications at https://aws.amazon.com/iam/details/mfa/#Virtual_MFA_Applications). If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). + 7. Determine whether the MFA app supports QR codes, and then do one of the following: + + - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code. + - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. + + When you are finished, the virtual MFA device starts generating one-time passwords. + + 8. In the *Manage MFA Device wizard*, in the *MFA Code 1 box*, type the *one-time password* that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second *one-time password* into the *MFA Code 2 box*. + 9. Click *Assign MFA*.`, + + references: [ + 'https://tools.ietf.org/html/rfc6238', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#enable-mfa-for-privileged-users', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html', + 'CCE-78901-6', + 'https://blogs.aws.amazon.com/security/post/Tx2SJJYE082KBUK/How-to-Delegate-Management-of-Multi-Factor-Authentication-to-AWS-IAM-Users', + ], + gql: `{ + queryawsIamUser { + id + arn + accountId + __typename + passwordEnabled + mfaActive + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'high', + conditions: { + or: [ + { + path: '@.passwordEnabled', + equal: false, + }, + { + path: '@.mfaActive', + equal: true, + } + ] + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.12.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.12.ts new file mode 100644 index 00000000..72e1f6ba --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.12.ts @@ -0,0 +1,128 @@ +// AWS CIS 1.2.0 Rule equivalent 1.3 +export default { + id: 'aws-cis-1.3.0-1.12', + title: 'AWS CIS 1.12 Ensure credentials unused for 90 days or greater are disabled', + + description: 'AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in 90 or greater days be deactivated or removed.', + + audit: `Perform the following to determine if unused credentials exist: + + **From Console:** + + 1. Login to the AWS Management Console + 2. Click *Services* + 3. Click *IAM* + 4. Click on *Users* + 5. Click the *Settings* (gear) icon. + 6. Select *Console last sign-in*, *Access key last used*, and *Access Key Id* + 7. Click on *Close* + 8. Check and ensure that *Console last sign-in* is less than 90 days ago. + + **Note** - *Never* means the user has never logged in. + + 9. Check and ensure that *Access key age* is less than 90 days and that *Access key last used* does not say *None* + + If the user hasn't signed into the Console in the last 90 days or Access keys are over 90 days old refer to the remediation. + + **From Command Line:** + **Download Credential Report:** + + 1. Run the following commands: + + aws iam generate-credential-report + + aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,4,5,6,9,10,11,14,15,16 + + **Ensure unused credentials do not exist:** + + 2. For each user having *password_enabled* set to *TRUE*, ensure *password_last_used_date* is less than *90* days ago. + + - When *password_enabled* is set to *TRUE* and password_last_used is set to No_Information , ensure *password_last_changed* is less than *90* days ago. + + 3. For each user having an *access_key_1_active* or *access_key_2_active* to *TRUE* , ensure the corresponding *access_key_n_last_used_date* is less than *90* days ago. + + - When a user having an *access_key_x_active* (where x is 1 or 2) to *TRUE* and corresponding access_key_x_last_used_date is set to *N/A*', *ensure* access_key_x_last_rotated is less than 90 days ago.`, + + rationale: 'Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.', + + remediation: `**From Console:** + Perform the following to manage Unused Password (IAM user console access) + + 1. Login to the AWS Management Console: + 2. Click *Services* + 3. Click *IAM* + 4. Click on *Users* + 5. Click on *Security Credentials* + 6. Select user whose *Console last sign-in* is greater than 90 days + 7. Click *Security credentials* + 8. In section *Sign-in credentials*, *Console password* click *Manage* + 9. Under Console Access select *Disable* + 10. Click *Apply* + + Perform the following to deactivate Access Keys: + + 1. Login to the AWS Management Console: + 2. Click *Services* + 3. Click *IAM* + 4. Click on *Users* + 5. Click on *Security Credentials* + 6. Select any access keys that are over 90 days old and that have been used and + - Click on *Make Inactive* + 7. Select any access keys that are over 90 days old and that have not been used and + - Click the X to *Delete*`, + + references: [ + 'CCE-78900-8', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#remove-credentials', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_admin-change-user.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html', + ], + gql: `{ + queryawsIamUser { + id + arn + accountId + __typename + passwordLastUsed + accessKeyData { + lastUsedDate + } + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'medium', + conditions: { + or: [ + { + and: [ + { + path: '@.accessKeyData', + isEmpty: true + }, + { + not: { + path: '@.passwordLastUsed', + notIn: [null, 'N/A', ''] + } + } + ] + }, + { + and: [ + { + value: { daysAgo: {}, path: '@.passwordLastUsed' }, + lessThanInclusive: 90, + }, + { + path: '@.accessKeyData', + array_any: { + value: { daysAgo: {}, path: '[*].lastUsedDate' }, + lessThanInclusive: 90, + }, + }, + ], + }, + ], + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.13.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.13.ts new file mode 100644 index 00000000..61eb33bb --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.13.ts @@ -0,0 +1,92 @@ +export default { + id: 'aws-cis-1.3.0-1.13', + title: 'AWS CIS 1.13 Ensure there is only one active access key available for any single IAM user', + + description: 'Access keys are long-term credentials for an IAM user or the AWS account \'root\' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API (directly or using the AWS SDK)', + + audit: `**From Console:** + + 1. Sign in to the AWS Management Console and navigate to IAM dashboard at https://console.aws.amazon.com/iam/. + 2. In the left navigation panel, choose Users. + 3. Click on the IAM user name that you want to examine. + 4. On the IAM user configuration page, select Security Credentials tab. + 5. Under Access Keys section, in the Status column, check the current status for each access key associated with the IAM user. If the selected IAM user has more than one access key activated then the users access configuration does not adhere to security best practices and the risk of accidental exposures increases. + + - Repeat steps no. 3 – 5 for each IAM user in your AWS account. + + **From Command Line:** + + 1. Run list-users command to list all IAM users within your account: + + aws iam list-users --query "Users[*].UserName + + 2. Run list-access-keys command using the IAM user name list to return the current status of each access key associated with the selected IAM user: + + aws iam list-access-keys --user-name + + The command output should expose the metadata ("Username", "AccessKeyId", "Status", "CreateDate") for each access key on that user account. + + 3. Check the Status property value for each key returned to determine each keys current state. If the Status property value for more than one IAM access key is set to Active, the user access configuration does not adhere to this recommendation, refer to the remediation below. + + - Repeat steps no. 2 and 3 for each IAM user in your AWS account." + + The command output should return an array that contains all your IAM user names.`, + + rationale: 'Access keys are long-term credentials for an IAM user or the AWS account \'root\' user. You can use access keys to sign programmatic requests to the AWS CLI or AWS API. One of the best ways to protect your account is to not allow users to have multiple access keys.', + + remediation: `**From Console:** + + 1. Sign in to the AWS Management Console and navigate to IAM dashboard at https://console.aws.amazon.com/iam/. + 2. In the left navigation panel, choose Users. + 3. Click on the IAM user name that you want to examine. + 4. On the IAM user configuration page, select Security Credentials tab. + 5. In Access Keys section, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. + 6. In the same Access Keys section, identify your non-operational access keys (other than the chosen one) and deactivate it by clicking the Make Inactive link. + 7. If you receive the Change Key Status confirmation box, click Deactivate to switch off the selected key. + 8. Repeat steps no. 3 – 7 for each IAM user in your AWS account. + + **From Command Line:** + + 1. Using the IAM user and access key information provided in the Audit CLI, choose one access key that is less than 90 days old. This should be the only active key used by this IAM user to access AWS resources programmatically. Test your application(s) to make sure that the chosen access key is working. + 2. Run the update-access-key command below using the IAM user name and the non-operational access key IDs to deactivate the unnecessary key(s). Refer to the Audit section to identify the unnecessary access key ID for the selected IAM user + + **Note** - the command does not return any output: + + aws iam update-access-key --access-key-id --status Inactive --user-name + + 3. To confirm that the selected access key pair has been successfully deactivated run the list-access-keys audit command again for that IAM User: + + aws iam list-access-keys --user-name + + - The command output should expose the metadata for each access key associated with the IAM user. If the non-operational key pair(s) Status is set to Inactive, the key has been successfully deactivated and the IAM user access configuration adheres now to this recommendation. + + 4. Repeat steps no. 1 – 3 for each IAM user in your AWS account.`, + + references: [ + 'https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html', + ], + gql: `{ + queryawsIamUser { + id + arn + accountId + __typename + accessKeyData { + status + } + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'medium', + conditions: { + jq: '.accessKeyData | map(select(.status == "Active")) | { "oneOrLess" : (length <= 1) }', + path: '@', + and: [ + { + path: '@.oneOrLess', + equal: true, + }, + ], + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.14.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.14.ts new file mode 100644 index 00000000..c8549537 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.14.ts @@ -0,0 +1,116 @@ +// AWS CIS 1.2.0 Rule equivalent 1.4 +export default { + id: 'aws-cis-1.3.0-1.14', + title: 'AWS CIS 1.14 Ensure access keys are rotated every 90 days or less', + + description: 'Access keys consist of an access key ID and secret access key, which are used to sign programmatic requests that you make to AWS. AWS users need their own access keys to make programmatic calls to AWS from the AWS Command Line Interface (AWS CLI), Tools for Windows PowerShell, the AWS SDKs, or direct HTTP calls using the APIs for individual AWS services. It is recommended that all access keys be regularly rotated.', + + audit: `Perform the following to determine if access keys are rotated as prescribed: + + **From Console:** + + 1. Go to Management Console (https://console.aws.amazon.com/iam) + 2. Click on Users + 3. Click setting icon + 4. Select “Console last sign-in” + 5. Click Close + 6. Ensure that “Access key age” is less than 90 days ago. + + **Note:** "None" in the "Access key age" means the user has not used the access key. + + **From Command Line:** + + aws iam generate-credential-report + + aws iam get-credential-report --query 'Content' --output text | base64 -d + + The access_key_1_last_rotated field in this file notes The date and time, in ISO 8601 date-time format, when the user's access key was created or last changed. If the user does not have an active access key, the value in this field is N/A (not applicable).`, + + rationale: `Rotating access keys will reduce the window of opportunity for an access key that is associated with a compromised or terminated account to be used. + + Access keys should be rotated to ensure that data cannot be accessed with an old key which might have been lost, cracked, or stolen.`, + + remediation: `Perform the following to rotate access keys: + + **From Console:** + + 1. Go to Management Console (https://console.aws.amazon.com/iam) + 2. Click on Users + 3. Click on Security Credentials + 4. As an Administrator + - Click on Make Inactive for keys that have not been rotated in 90 Days + 5. As an IAM User + - Click on Make Inactive or Delete for keys which have not been rotated or used in 90 Days + 6. Click on Create Access Key + 7. Update programmatic call with new Access Key credentials + + **From Command Line:** + + 1. While the first access key is still active, create a second access key, which is active by default. Run the following command: + + aws iam create-access-key + + At this point, the user has two active access keys. + + 2. Update all applications and tools to use the new access key. + 3. Determine whether the first access key is still in use by using this command: + + aws iam get-access-key-last-used + + 4. One approach is to wait several days and then check the old access key for any use before proceeding. + + Even if step Step 3 indicates no use of the old key, it is recommended that you do not immediately delete the first access key. Instead, change the state of the first access key to Inactive using this command: + + aws iam update-access-key + + 5. Use only the new access key to confirm that your applications are working. Any applications and tools that still use the original access key will stop working at this point because they no longer have access to AWS resources. If you find such an application or tool, you can switch its state back to Active to reenable the first access key. Then return to step Step 2 and update this application to use the new key. + + 6. After you wait some period of time to ensure that all applications and tools have been updated, you can delete the first access key with this command: + + aws iam delete-access-key`, + + references: [ + 'CCE-78902-4', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#rotate-credentials', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html', + 'https://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html', + ], + gql: `{ + queryawsIamUser { + id + arn + accountId + __typename + accessKeyData { + status + lastRotated + } + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'medium', + conditions: { + or: [ + { + path: '@.accessKeyData', + isEmpty: true + }, + { + path: '@.accessKeyData', + array_any: { + and: [ + { + value: { daysAgo: {}, path: '[*].lastRotated' }, + lessThanInclusive: 90, + }, + { + path: '[*].status', + equal: 'Active', + }, + ], + }, + }, + ], + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.15.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.15.ts new file mode 100644 index 00000000..2c2cff75 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.15.ts @@ -0,0 +1,83 @@ +// AWS CIS 1.2.0 Rule equivalent 1.16 +export default { + id: 'aws-cis-1.3.0-1.15', + title: 'AWS CIS 1.15 Ensure IAM Users Receive Permissions Only Through Groups', + + description: `IAM users are granted access to services, functions, and data through IAM policies. There are three ways to define policies for a user: 1) Edit the user policy directly, aka an inline, or user, policy; 2) attach a policy directly to a user; 3) add the user to an IAM group that has an attached policy. + + Only the third implementation is recommended.`, + + audit: `Perform the following to determine if an inline policy is set or a policy is directly attached to users: + + 1. Run the following to get a list of IAM users: + + aws iam list-users --query 'Users[*].UserName' --output text + + 2. For each user returned, run the following command to determine if any policies are attached to them: + + aws iam list-attached-user-policies --user-name + aws iam list-user-policies --user-name + + 3. If any policies are returned, the user has an inline policy or direct policy attachment.`, + + rationale: 'Assigning IAM policy only through groups unifies permissions management to a single, flexible layer consistent with organizational functional roles. By unifying permissions management, the likelihood of excessive permissions is reduced.', + + remediation: `Perform the following to create an IAM group and assign a policy to it: + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam/. + 2. In the navigation pane, click Groups and then click Create New Group . + 3. In the Group Name box, type the name of the group and then click Next Step . + 4. In the list of policies, select the check box for each policy that you want to apply to all members of the group. Then click Next Step . + 5. Click Create Group + + Perform the following to add a user to a given group: + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam/. + 2. In the navigation pane, click Groups + 3. Select the group to add a user to + 4. Click Add Users To Group + 5. Select the users to be added to the group + 6. Click Add Users + + Perform the following to remove a direct association between a user and policy: + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam/. + 2. In the left navigation pane, click on Users + 3. For each user: + - Select the user + - Click on the Permissions tab + - Expand Permissions policies + - Click X for each policy; then click Detach or Remove (depending on policy type)`, + + references: [ + 'http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html', + 'http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html', + 'CCE-78912-3', + ], + gql: `{ + queryawsIamUser { + id + arn + accountId + __typename + iamAttachedPolicies { + name + }, + inlinePolicies + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.iamAttachedPolicies', + isEmpty: true, + }, + { + path: '@.inlinePolicies', + isEmpty: true, + }, + ], + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.16.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.16.ts new file mode 100644 index 00000000..49aa3f0b --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.16.ts @@ -0,0 +1,102 @@ +// AWS CIS 1.2.0 Rule equivalent 1.22 +export default { + id: 'aws-cis-1.3.0-1.16', + title: 'AWS CIS 1.16 Ensure IAM policies that allow full "*:*" administrative privileges are not attached', + + description: 'IAM policies are the means by which privileges are granted to users, groups, or roles. It is recommended and considered a standard security advice to grant least privilege -that is, granting only the permissions required to perform a task. Determine what users need to do and then craft policies for them that let the users perform only those tasks, instead of allowing full administrative privileges.', + + audit: `Perform the following to determine what policies are created: + + **From Command Line:** + + 1. Run the following to get a list of IAM policies: + + aws iam list-policies --only-attached --output text + + 2. For each policy returned, run the following command to determine if any policies is allowing full administrative privileges on the account: + + aws iam get-policy-version --policy-arn --version-id + + 3. In output ensure policy should not have any Statement block with "Effect": "Allow" and Action set to "*" and Resource set to "*"`, + + rationale: `It's more secure to start with a minimum set of permissions and grant additional permissions as necessary, rather than starting with permissions that are too lenient and then trying to tighten them later. + + Providing full administrative privileges instead of restricting to the minimum set of permissions that the user is required to do exposes the resources to potentially unwanted actions. + + IAM policies that have a statement with "Effect": "Allow" with "Action": "*" over "Resource": "*" should be removed.`, + + remediation: `**From Console:** + Perform the following to detach the policy that has full administrative privileges: + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam/. + 2. In the navigation pane, click Policies and then search for the policy name found in the audit step. + 3. Select the policy that needs to be deleted. + 4. In the policy action menu, select first Detach + 5. Select all Users, Groups, Roles that have this policy attached + 6. Click Detach Policy + 7. In the policy action menu, select Detach + + **From Command Line:** + Perform the following to detach the policy that has full administrative privileges as found in the audit step: + + 1. Lists all IAM users, groups, and roles that the specified managed policy is attached to. + + aws iam list-entities-for-policy --policy-arn + + 2. Detach the policy from all IAM Users: + + aws iam detach-user-policy --user-name --policy-arn + + 3. Detach the policy from all IAM Groups: + + aws iam detach-group-policy --group-name --policy-arn + + 4. Detach the policy from all IAM Roles: + + aws iam detach-role-policy --role-name --policy-arn `, + + references: [ + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html', + 'CCE-78912-3', + 'https://docs.aws.amazon.com/cli/latest/reference/iam/index.html#cli-aws-iam', + ], + gql: `{ + queryawsIamPolicy { + id + arn + accountId + __typename + policyContent { + statement { + effect + action + resource + } + } + } + }`, + resource: 'queryawsIamPolicy[*]', + severity: 'high', + conditions: { + not: { + path: '@.policyContent.statement', + array_any: { + and: [ + { + path: '[*].effect', + equal: 'Allow', + }, + { + path: '[*].action', + contains: '*', + }, + { + path: '[*].resource', + contains: '*', + }, + ], + }, + }, + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.17.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.17.ts new file mode 100644 index 00000000..06f672b9 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.17.ts @@ -0,0 +1,105 @@ +// AWS CIS 1.2.0 Rule equivalent 1.20 +export default { + id: 'aws-cis-1.3.0-1.17', + title: 'AWS CIS 1.17 Ensure a support role has been created to manage incidents with AWS Support', + + description: 'AWS provides a support center that can be used for incident notification and response, as well as technical support and customer services. Create an IAM Role to allow authorized users to manage incidents with AWS Support.', + + audit: `**From Command Line:** + + 1. List IAM policies, filter for the 'AWSSupportAccess' managed policy, and note the "Arn" element value: + + aws iam list-policies --query "Policies[?PolicyName == 'AWSSupportAccess']" + + 2. Check if the 'AWSSupportAccess' policy is attached to any role: + + aws iam list-entities-for-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess + + 3. In Output, Ensure PolicyRoles does not return empty. 'Example: Example: PolicyRoles: [ ]' + + If it returns empty refer to the remediation below.`, + + rationale: 'By implementing least privilege for access control, an IAM Role will require an appropriate IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support.', + + remediation: `**From Command Line:** + + 1. Create an IAM role for managing incidents with AWS: + + - Create a trust relationship policy document that allows to manage AWS incidents, and save it locally as /tmp/TrustPolicy.json: + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "" + }, + "Action": "sts:AssumeRole" + } + ] + } + + 2. Create the IAM role using the above trust policy: + + aws iam create-role --role-name --assume-role-policy-document file:///tmp/TrustPolicy.json + + 3. Attach 'AWSSupportAccess' managed policy to the created IAM role: + + aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AWSSupportAccess --role-name `, + + references: [ + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html', + 'https://aws.amazon.com/premiumsupport/pricing/', + 'https://docs.aws.amazon.com/cli/latest/reference/iam/list-policies.html', + 'https://docs.aws.amazon.com/cli/latest/reference/iam/attach-role-policy.html', + 'https://docs.aws.amazon.com/cli/latest/reference/iam/list-entities-for-policy.html', + ], + gql: `{ + queryawsAccount { + id + __typename + iamPolicies { + name + iamUsers { + arn + } + iamGroups { + arn + } + iamRoles { + arn + } + } + } + }`, + resource: 'queryawsAccount[*]', + severity: 'medium', + conditions: { + path: '@.iamPolicies', + array_any: { + and: [ + { + path: '[*].name', + equal: 'AWSSupportAccess', + }, + { + or: [ + { + path: '[*].iamUsers', + isEmpty: false, + }, + { + path: '[*].iamGroups', + isEmpty: false, + }, + { + path: '[*].iamRoles', + isEmpty: false, + }, + ], + }, + ], + }, + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.19.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.19.ts new file mode 100644 index 00000000..e76a171d --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.19.ts @@ -0,0 +1,69 @@ +// AWS CIS 1.4.0 Rule equivalent 1.19 +export default { + id: 'aws-cis-1.3.0-1.19', + title: 'AWS CIS 1.19 Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed', + + description: 'To enable HTTPS connections to your website or application in AWS, you need an SSL/TLS server certificate. You can use ACM or IAM to store and deploy server certificates. Use IAM as a certificate manager only when you must support HTTPS connections in a region that is not supported by ACM. IAM securely encrypts your private keys and stores the encrypted version in IAM SSL certificate storage. IAM supports deploying server certificates in all regions, but you must obtain your certificate from an external provider for use with AWS. You cannot upload an ACM certificate to IAM. Additionally, you cannot manage your certificates from the IAM Console.', + + audit: `**From Console:** + Getting the certificates expiration information via AWS Management Console is not currently supported. + To request information about the SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI). + + **From Command Line:** + Run list-server-certificates command to list all the IAM-stored server certificates: + + aws iam list-server-certificates + + The command output should return an array that contains all the SSL/TLS certificates currently stored in IAM and their metadata (name, ID, expiration date, etc): + + { + "ServerCertificateMetadataList": [ + { + "ServerCertificateId": "EHDGFRW7EJFYTE88D", + "ServerCertificateName": "MyServerCertificate", + "Expiration": "2018-07-10T23:59:59Z", + "Path": "/", + "Arn": "arn:aws:iam::012345678910:server-certificate/MySSLCertificate", + "UploadDate": "2018-06-10T11:56:08Z" + } + ] + } + + Verify the ServerCertificateName and Expiration parameter value (expiration date) for each SSL/TLS certificate returned by the list-server-certificates command and determine if there are any expired server certificates currently stored in AWS IAM. If so, use the AWS API to remove them. + If this command returns: + + { { "ServerCertificateMetadataList": [] } + + This means that there are no expired certificates, It DOES NOT mean that no certificates exist.`, + + rationale: 'Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can damage the credibility of the application/website behind the ELB. As a best practice, it is recommended to delete expired certificates.', + + remediation: `**From Console:** + Removing expired certificates via AWS Management Console is not currently supported. To delete SSL/TLS certificates stored in IAM via the AWS API use the Command Line Interface (CLI). + **From Command Line:** + To delete Expired Certificate run following command by replacing with the name of the certificate to delete: + + aws iam delete-server-certificate --server-certificate-name + + When the preceding command is successful, it does not return any output.`, + + references: [ + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html', + 'https://docs.aws.amazon.com/cli/latest/reference/iam/delete-server-certificate.html', + ], + gql: `{ + queryawsIamServerCertificate { + id + arn + accountId + __typename + expiration + } + }`, + resource: 'queryawsIamServerCertificate[*]', + severity: 'high', + conditions: { + value: { daysDiff: {}, path: '@.expiration' }, + greaterThanInclusive: 1, + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.20.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.20.ts new file mode 100644 index 00000000..e9365bad --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.20.ts @@ -0,0 +1,140 @@ +// AWS CIS 1.4.0 Rule equivalent 2.1.5 +export default { + id: 'aws-cis-1.3.0-1.20', + title: 'AWS CIS 1.20 Ensure that S3 Buckets are configured with \'Block public access (bucket settings)\'', + + description: 'Amazon S3 provides Block public access (bucket settings) and Block public access (account settings) to help you manage public access to Amazon S3 resources. By default, S3 buckets and objects are created with public access disabled. However, an IAM principal with sufficient S3 permissions can enable public access at the bucket and/or object level. While enabled, Block public access (bucket settings) prevents an individual bucket, and its contained objects, from becoming publicly accessible. Similarly, Block public access (account settings) prevents all buckets, and contained objects, from becoming publicly accessible across the entire account.', + + audit: `**If utilizing Block Public Access (bucket settings)** + **From Console:** + + 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ + 2. Select the Check box next to the Bucket. + 3. Click on 'Edit public access settings'. + 4. Ensure that block public access settings are set appropriately for this bucket + 5. Repeat for all the buckets in your AWS account. + + **From Command Line:** + + 1. List all of the S3 Buckets + + aws s3 ls + + 2. Find the public access setting on that bucket + + aws s3api get-public-access-block --bucket + + Output if Block Public access is enabled: + + { + "PublicAccessBlockConfiguration": { + "BlockPublicAcls": true, + "IgnorePublicAcls": true, + "BlockPublicPolicy": true, + "RestrictPublicBuckets": true + } + } + + If the output reads false for the separate configuration settings then proceed to the remediation. + + **If utilizing Block Public Access (account settings)** + **From Console:** + + 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ + 2. Choose Block public access (account settings) + 3. Ensure that block public access settings are set appropriately for your AWS account. + + **From Command Line:** + To check Public access settings for this account status, run the following command, + + aws s3control get-public-access-block --account-id --region + + Output if Block Public access is enabled: + + { + "PublicAccessBlockConfiguration": { + "IgnorePublicAcls": true, + "BlockPublicPolicy": true, + "BlockPublicAcls": true, + "RestrictPublicBuckets": true + } + } + + If the output reads *false* for the separate configuration settings then proceed to the remediation.`, + + rationale: `Amazon S3 Block public access (bucket settings) prevents the accidental or malicious public exposure of data contained within the respective bucket(s). + + Amazon S3 Block public access (account settings) prevents the accidental or malicious public exposure of data contained within all buckets of the respective AWS account. + + Whether blocking public access to all or some buckets is an organizational decision that should be based on data sensitivity, least privilege, and use case.`, + + remediation: `**If utilizing Block Public Access (bucket settings)** + **From Console:** + + 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ + 2. Select the Check box next to the Bucket. + 3. Click on 'Edit public access settings'. + 4. Click 'Block all public access' + 5. Repeat for all the buckets in your AWS account that contain sensitive data. + + **From Command Line:** + + 1. List all of the S3 Buckets + + aws s3 ls + + 2. Set the Block Public Access to true on that bucket + + aws s3api put-public-access-block --bucket --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" + + **If utilizing Block Public Access (account settings)** + **From Console:** + If the output reads *true* for the separate configuration settings then it is set on the account. + + 1. Login to AWS Management Console and open the Amazon S3 console using https://console.aws.amazon.com/s3/ + 2. Choose Block Public Access (account settings) + 3. Choose Edit to change the block public access settings for all the buckets in your AWS account + 4. Choose the settings you want to change, and then choose Save. For details about each setting, pause on the i icons. + 5. When you're asked for confirmation, enter confirm. Then Click Confirm to save your changes. + + **From Command Line:** + To set Block Public access settings for this account, run the following command: + + aws s3control put-public-access-block --public-access-block-configuration BlockPublicAcls=true, IgnorePublicAcls=true, BlockPublicPolicy=true, RestrictPublicBuckets=true --account-id `, + + references: ['https://docs.aws.amazon.com/AmazonS3/latest/user-guide/block-public-access-account.html'], + gql: `{ + queryawsS3 { + id + arn + accountId + __typename + blockPublicAcls + ignorePublicAcls + blockPublicPolicy + restrictPublicBuckets + } + }`, + resource: 'queryawsS3[*]', + severity: 'high', + conditions: { + and: [ + { + path: '@.blockPublicAcls', + equal: 'Yes', + }, + { + path: '@.ignorePublicAcls', + equal: 'Yes', + }, + { + path: '@.blockPublicPolicy', + equal: 'Yes', + }, + { + path: '@.restrictPublicBuckets', + equal: 'Yes', + }, + ], + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.21.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.21.ts new file mode 100644 index 00000000..40459f5c --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.21.ts @@ -0,0 +1,81 @@ +// AWS CIS 1.4.0 Rule equivalent 1.20 +export default { + id: 'aws-cis-1.3.0-1.21', + title: 'AWS CIS 1.21 Ensure that IAM Access analyzer is enabled', + + description: `Enable IAM Access analyzer for IAM policies about all resources. + + IAM Access Analyzer is a technology introduced at AWS reinvent 2019. After the Analyzer is enabled in IAM, scan results are displayed on the console showing the accessible resources. Scans show resources that other accounts and federated users can access, such as KMS keys and IAM roles. So the results allow you to determine if an unintended user is allowed, making it easier for administrators to monitor least privileges access.`, + + audit: `**From Console:** + + 1. Open the IAM console at https://console.aws.amazon.com/iam/ + 2. Choose Access analyzer + 3. Ensure that the STATUS is set to Active + + **From Command Line:** + + 1. Run the following command: + + aws accessanalyzer get-analyzer --analyzer-name | grep status + + 2. Ensure that the "status" is set to "ACTIVE"`, + + rationale: 'AWS IAM Access Analyzer helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. This lets you identify unintended access to your resources and data. Access Analyzer identifies resources that are shared with external principals by using logic-based reasoning to analyze the resource-based policies in your AWS environment. IAM Access Analyzer continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service) keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.', + + remediation: `**From Console:** + Perform the following to enable IAM Access analyzer for IAM policies: + + 1. Open the IAM console at https://console.aws.amazon.com/iam/. + 2. Choose *Access analyzer*. + 3. Choose *Create analyzer*. + 4. On the *Create analyzer* page, confirm that the Region displayed is the *Region* where you want to enable Access Analyzer. + 5. Enter a name for the analyzer. + 6. Optional. Add any tags that you want to apply to the analyzer. + 7. Choose Create Analyzer. + + **From Command Line:** + Run the following command: + + aws accessanalyzer create-analyzer --analyzer-name --type + + **Note:** The IAM Access Analyzer is successfully configured only when the account you use has the necessary permissions.`, + + references: [ + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-getting-started.html', + 'https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/get-analyzer.html', + 'https://docs.aws.amazon.com/cli/latest/reference/accessanalyzer/create-analyzer.html', + ], + gql: `{ + queryawsAccount { + id + arn + accountId + __typename + regions + iamAccessAnalyzers { + region + status + } + } + }`, + resource: 'queryawsAccount[*]', + severity: 'high', + conditions: { + and: [ + { + path: '@.iamAccessAnalyzers', + isEmpty: false, + }, + { + path: '@', + jq: '[.regions[] as $scanned | { scannedRegion: $scanned, analyzers: [.iamAccessAnalyzers[] | select(.region == $scanned )] }]', + array_all: { + path: '[*].analyzers[0].status', + equal: 'ACTIVE', + }, + }, + ], + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.4.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.4.ts new file mode 100644 index 00000000..4dd389a1 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.4.ts @@ -0,0 +1,59 @@ +// AWS CIS 1.2.0 Rule equivalent 1.12 +export default { + id: 'aws-cis-1.3.0-1.4', + title: 'AWS CIS 1.4 Ensure no root account access key exists', + description: `The root account is the most privileged user in an AWS account. AWS Access Keys provide + programmatic access to a given AWS account. It is recommended that all access keys + associated with the root account be removed.`, + audit: `Perform the following to determine if the root account has access keys: + Via the AWS Console + + 1. Login to the AWS Management Console + 2. Click *Services* + 3. Click *IAM* + 4. Click on *Credential Report* + 5. This will download an *.xls* file that contains credential usage for all IAM users within an AWS Account - open this file + 6. For the ** user, ensure the *access_key_1_active* and *access_key_2_active* fields are set to *FALSE*. + + Via CLI + + 1. Run the following commands: + + aws iam generate-credential-report + aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,9,14 | grep -B1 '' + + 2. For the ** user, ensure the *access_key_1_active* and *access_key_2_active* fields are set to *FALSE*.`, + rationale: 'Removing access keys associated with the root account limits vectors by which the account can be compromised. Additionally, removing the root access keys encourages the creation and use of role-based accounts that are least privileged.', + remediation: `Perform the following to delete or disable active root access keys being + Via the AWS Console + + 1. Sign in to the AWS Management Console as Root and open the IAM console at https://console.aws.amazon.com/iam/. + 2. Click on ** at the top right and select *Security Credentials* from the drop-down list + 3. On the pop-out screen Click on *Continue to Security Credentials* + 4. Click on *Access Keys* *(Access Key ID and Secret Access Key)* + 5. Under the *Status* column if there are any Keys that are Active + 1. Click on *Make Inactive* - (Temporarily disable Key - may be needed again) + 2. Click *Delete* - (Deleted keys cannot be recovered)`, + references: [ + 'http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html', + 'http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html', + 'http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetAccountSummary.html', + 'CCE-78910-7', + 'CIS CSC v6.0 #5.1', + ], + gql: `{ + queryawsIamUser(filter: { name: { eq: "root" } }) { + id + arn + accountId + __typename + accessKeysActive + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'high', + conditions: { + path: '@.accessKeysActive', + equal: false, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.5.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.5.ts new file mode 100644 index 00000000..280a4f26 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.5.ts @@ -0,0 +1,66 @@ +// AWS CIS 1.2.0 Rule equivalent 1.13 +export default { + id: 'aws-cis-1.3.0-1.5', + title: 'AWS CIS 1.5 Ensure MFA is enabled for the "root user" account', + description: `The root account is the most privileged user in an AWS account. MFA adds an extra layer of + protection on top of a user name and password. With MFA enabled, when a user signs in to + an AWS website, they will be prompted for their user name and password as well as for an + authentication code from their AWS MFA device. + + **Note:** When virtual MFA is used for root accounts, it is recommended that the device used + is NOT a personal device, but rather a dedicated mobile device (tablet or phone) that is + managed to be kept charged and secured independent of any individual personal devices. + ("non-personal virtual MFA") This lessens the risks of losing access to the MFA due to + device loss, device trade-in or if the individual owning the device is no longer employed at + the company.`, + audit: `Perform the following to determine if the root account has MFA setup: + + 1. Run the following command: + + aws iam get-account-summary | grep "AccountMFAEnabled" + + 2. Ensure the AccountMFAEnabled property is set to 1`, + rationale: 'Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that emits a time-sensitive key and have knowledge of a credential.', + remediation: `Perform the following to establish MFA for the root account: + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam/. + + + Note: to manage MFA devices for the root AWS account, you must use your root account credentials to sign in to AWS. You cannot manage MFA devices for the root account using other credentials. + + 2. Choose *Dashboard*, and under *Security Status*, expand *Activate MFA* on your root account. + 3. Choose *Activate MFA* + 4. In the wizard, choose *A virtual MFA* device and then choose *Next Step*. + 5. IAM generates and displays configuration information for the virtual MFA device, including a QR code graphic. The graphic is a representation of the 'secret configuration key' that is available for manual entry on devices that do not support QR codes. + 6. Open your virtual MFA application. (For a list of apps that you can use for hosting virtual MFA devices, see Virtual MFA Applications.) If the virtual MFA application supports multiple accounts (multiple virtual MFA devices), choose the option to create a new account (a new virtual MFA device). + 7. Determine whether the MFA app supports QR codes, and then do one of the following: + + + - Use the app to scan the QR code. For example, you might choose the camera icon or choose an option similar to Scan code, and then use the device's camera to scan the code. + - In the Manage MFA Device wizard, choose Show secret key for manual configuration, and then type the secret configuration key into your MFA application. + + When you are finished, the virtual MFA device starts generating one-time passwords. + + 1. In the Manage MFA Device wizard, in the Authentication Code 1 box, type the one-time password that currently appears in the virtual MFA device. Wait up to 30 seconds for the device to generate a new one-time password. Then type the second one-time password into the Authentication Code 2 box. Choose Active Virtual MFA.`, + references: [ + 'CCE-78911-5', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html#enable-virt-mfa-for-root', + ], + gql: `{ + queryawsIamUser(filter: { name: { eq: "root" } }) { + id + arn + accountId + __typename + name + mfaActive + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'high', + conditions: { + path: '@.mfaActive', + equal: true, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.6.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.6.ts new file mode 100644 index 00000000..fabf135a --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.6.ts @@ -0,0 +1,82 @@ +// AWS CIS 1.2.0 Rule equivalent 1.14 +export default { + id: 'aws-cis-1.3.0-1.6', + title: 'AWS CIS 1.6 Ensure hardware MFA is enabled for the "root user" account', + description: `The root account is the most privileged user in an AWS account. MFA adds an extra layer of + protection on top of a user name and password. With MFA enabled, when a user signs in to + an AWS website, they will be prompted for their user name and password as well as for an + authentication code from their AWS MFA device. For Level 2, it is recommended that the + root account be protected with a hardware MFA.`, + audit: `Perform the following to determine if the root account has a hardware MFA setup: + + 1. Run the following command to determine if the root account has MFA setup: + + aws iam get-account-summary | grep "AccountMFAEnabled" + + The *AccountMFAEnabled* property is set to 1 will ensure that the root account has MFA (Virtual or Hardware) Enabled. + If *AccountMFAEnabled* property is set to 0 the account is not compliant with this recommendation. + +
+ + 2. If *AccountMFAEnabled* property is set to 1, determine root account has Hardware MFA enabled. Run the following command to list all virtual MFA devices: + + aws iam list-virtual-mfa-devices + + If the output contains one MFA with the following Serial Number, it means the MFA is virtual, not hardware and the account is not compliant with this recommendation: *"SerialNumber": "arn:aws:iam::__:mfa/root-account-mfa-device"*`, + rationale: `A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer the attack surface introduced by the mobile smartphone on which a virtual MFA resides. + + **Note**: Using hardware MFA for many, many AWS accounts may create a logistical device management issue. If this is the case, consider implementing this Level 2 recommendation selectively to the highest security AWS accounts and the Level 1 recommendation applied to the remaining accounts. + + Link to order AWS compatible hardware MFA device: [http://onlinenoram.gemalto.com/](http://onlinenoram.gemalto.com/)`, + remediation: `Perform the following to establish a hardware MFA for the root account: + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam/. + + Note: to manage MFA devices for the root AWS account, you must use your root account credentials to sign in to AWS. You cannot manage MFA devices for the root account using other credentials. + + 2. Choose *Dashboard* , and under *Security Status* , expand *Activate MFA* on your root account. + 3. Choose *Activate MFA* + 4. In the wizard, choose *A hardware MFA* device and then choose *Next Step*. + 5. In the *Serial Number* box, enter the serial number that is found on the back of the MFA device. + 6. In the *Authentication Code 1* box, enter the six-digit number displayed by the MFA device. You might need to press the button on the front of the device to display the number. + 7. Wait 30 seconds while the device refreshes the code, and then enter the next six-digit number into the *Authentication Code 2* box. You might need to press the button on the front of the device again to display the second number. + 8. Choose *Next Step*. The MFA device is now associated with the AWS account. The next time you use your AWS account credentials to sign in, you must type a code from the hardware MFA device.`, + references: [ + 'CCE-78911-5', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_virtual.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html#enable-hw-mfa-for-root', + ], + gql: `{ + queryawsIamUser(filter: { name: { eq: "root" } }) { + id + arn + accountId + __typename + name + mfaActive + virtualMfaDevices { + serialNumber + } + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'high', + conditions: { + and: [ + { + path: '@.mfaActive', + equal: true, + }, + { + jq: '[select("arn:aws:iam::" + .accountId + ":mfa/root-account-mfa-device" == .virtualMfaDevices[].serialNumber)] | { "match" : (length > 0) }', + path: '@', + and: [ + { + path: '@.match', + notEqual: true, + }, + ], + }, + ], + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.7.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.7.ts new file mode 100644 index 00000000..3c7eb1f9 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.7.ts @@ -0,0 +1,101 @@ +export default { + id: 'aws-cis-1.3.0-1.7', + title: 'AWS CIS 1.7 Eliminate use of the root user for administrative and daily tasks', + + description: 'With the creation of an AWS account, a \'root user\' is created that cannot be disabled or deleted. That user has unrestricted access to and control over all resources in the AWS account. It is highly recommended that the use of this account be avoided for everyday tasks.', + + audit: `**From Console:** + + 1. Login to the AWS Management Console at https://console.aws.amazon.com/iam/ + 2. In the left pane, click *Credential Report* + 3. Click on *Download Report* + 4. Open of Save the file locally + 5. Locate the ** under the user column + 6. Review *password_last_used*, *access_key_1_last_used_date*, *access_key_2_last_used_date* to determine when the 'root user' was last used. + + **From Command Line:** + Run the following CLI commands to provide a credential report for determining the last time the 'root user' was used: + + aws iam generate-credential-report + + aws iam get-credential-report --query 'Content' --output text | base64 -d | cut -d, -f1,5,11,16 | grep -B1 '' + + Review *password_last_used*, *access_key_1_last_used_date*, *access_key_2_last_used_date* to determine when the root user was last used. + + **Note:** There are a few conditions under which the use of the 'root' user account is required. Please see the reference links for all of the tasks that require use of the 'root' user.`, + + rationale: 'The \'root user\' has unrestricted access to and control over all account resources. Use of it is inconsistent with the principles of least privilege and separation of duties, and can lead to unnecessary harm due to error or account compromise.', + + remediation: `Remediation: + + If you find that the 'root' user account is being used for daily activity to include administrative tasks that do not require the 'root' user: + + 1. Change the 'root' user password. + 2. Deactivate or delete any access keys associate with the 'root' user. + + **Remember, anyone who has 'root' user credentials for your AWS account has unrestricted access to and control of all the resources in your account, including billing information.`, + + references: [ + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html', + 'https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html', + ], + gql: `{ + queryawsIamUser(filter: { name: { eq: "root" } }) { + id + arn + accountId + __typename + passwordLastUsed + passwordEnabled + accessKeysActive + accessKeyData { + lastUsedDate + status + } + } + }`, + resource: 'queryawsIamUser[*]', + severity: 'high', + conditions: { + not: { + or: [ + { + and: [ + { + path: '@.passwordEnabled', + equal: true, + }, + { + value: { daysAgo: {}, path: '@.passwordLastUsed' }, + lessThanInclusive: 90, + }, + ], + }, + { + and: [ + { + path: '@.accessKeysActive', + equal: true, + }, + { + path: '@.accessKeyData', + array_any: { + and: [ + { + path: '[*].status', + equal: 'Active', + }, + { + value: { daysAgo: {}, path: '[*].lastUsedDate' }, + lessThanInclusive: 90, + }, + ], + }, + }, + ], + } + ] + }, + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.8.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.8.ts new file mode 100644 index 00000000..43ae8ac2 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.8.ts @@ -0,0 +1,62 @@ +// AWS CIS 1.2.0 Rule equivalent 1.9 +export default { + id: 'aws-cis-1.3.0-1.8', + title: 'AWS CIS 1.8 Ensure IAM password policy requires minimum length of 14 or greater', + + description: `Password policies are, in part, used to enforce password complexity requirements. IAM + password policies can be used to ensure password are at least a given length. It is + recommended that the password policy require a minimum password length 14.`, + + audit: `Perform the following to ensure the password policy is configured as prescribed: + + **From Console:** + + 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) + 2. Go to IAM Service on the AWS Console + 3. Click on Account Settings on the Left Pane + 4. Ensure "Minimum password length" is set to 14 or greater. + + **From Command Line:** + + aws iam get-account-password-policy + + Ensure the output of the above command includes "MinimumPasswordLength": 14 (or higher)`, + + rationale: 'Setting a password complexity policy increases account resiliency against brute force login attempts.', + + remediation: `Perform the following to set the password policy as prescribed: + + **From Console:** + + 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) + 2. Go to IAM Service on the AWS Console + 3. Click on Account Settings on the Left Pane + 4. Set "Minimum password length" to 14 or greater. + 5. Click "Apply password policy" + + **From Command Line:** + + aws iam update-account-password-policy --minimum-password-length 14 + + Note: All commands starting with "aws iam update-account-password-policy" can be combined into a single command.`, + + references: [ + 'CCE-78907-3', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy', + ], + gql: `{ + queryawsIamPasswordPolicy { + id + accountId + __typename + minimumPasswordLength + } + }`, + resource: 'queryawsIamPasswordPolicy[*]', + severity: 'medium', + conditions: { + path: '@.minimumPasswordLength', + greaterThanInclusive: 14, + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.9.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.9.ts new file mode 100644 index 00000000..34c02dc8 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-1.9.ts @@ -0,0 +1,61 @@ +// AWS CIS 1.2.0 Rule equivalent 1.10 +export default { + id: 'aws-cis-1.3.0-1.9', + title: 'AWS CIS 1.9 Ensure IAM password policy prevents password reuse', + description: `IAM password policies can prevent the reuse of a given password by the same user. It is + recommended that the password policy prevent the reuse of passwords.`, + + audit: `Perform the following to ensure the password policy is configured as prescribed: + + **From Console:** + + 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) + 2. Go to IAM Service on the AWS Console + 3. Click on Account Settings on the Left Pane + 4. Ensure "Prevent password reuse" is checked + 5. Ensure "Number of passwords to remember" is set to 24 + + **From Command Line:** + + aws iam get-account-password-policy + + Ensure the output of the above command includes "PasswordReusePrevention": 24`, + + rationale: 'Preventing password reuse increases account resiliency against brute force login attempts.', + + remediation: `Perform the following to set the password policy as prescribed: + + **From Console:** + + 1. Login to AWS Console (with appropriate permissions to View Identity Access Management Account Settings) + 2. Go to IAM Service on the AWS Console + 3. Click on Account Settings on the Left Pane + 4. Check "Prevent password reuse" + 5. Set "Number of passwords to remember" is set to 24 + + **From Command Line:** + + aws iam update-account-password-policy --password-reuse-prevention 24 + + Note: All commands starting with "aws iam update-account-password-policy" can be combined into a single command.`, + + references: [ + 'CCE-78908-1', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#configure-strong-password-policy', + ], + gql: `{ + queryawsIamPasswordPolicy { + id + accountId + __typename + passwordReusePrevention + } + }`, + resource: 'queryawsIamPasswordPolicy[*]', + severity: 'high', + conditions: { + path: '@.passwordReusePrevention', + greaterThanInclusive: 24, + }, +} \ No newline at end of file diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.1.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.1.ts new file mode 100644 index 00000000..6e407726 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.1.ts @@ -0,0 +1,118 @@ +// AWS CIS 1.2.0 Rule equivalent 2.1 +export default { + id: 'aws-cis-1.3.0-3.1', + title: 'AWS CIS 3.1 Ensure CloudTrail is enabled in all regions', + description: `AWS CloudTrail is a web service that records AWS API calls for your account and delivers + log files to you. The recorded information includes the identity of the API caller, the time of + the API call, the source IP address of the API caller, the request parameters, and the + response elements returned by the AWS service. CloudTrail provides a history of AWS API + calls for an account, including API calls made via the Management Console, SDKs, command + line tools, and higher-level AWS services (such as CloudFormation).`, + audit: `Perform the following to determine if CloudTrail is enabled for all regions: + Via the management Console + + 1. Sign in to the AWS Management Console and open the CloudTrail console at https://console.aws.amazon.com/cloudtrail + 2. Click on *Trails* on the left navigation pane + + - You will be presented with a list of trails across all regions + + 3. Ensure at least one Trail has *All* specified in the *Region* column + 4. Click on a trail via the link in the *Name* column + 5. Ensure *Logging* is set to *ON* + 6. Ensure *Apply trail to all regions* is set to *Yes* + 7. In section *Management Events* ensure *Read/Write Events* set to *ALL* + + Via CLI + + aws cloudtrail describe-trails + + Ensure *IsMultiRegionTrail* is set to *true* + + aws cloudtrail get-trail-status --name + + Ensure *IsLogging* is set to *true* + + aws cloudtrail get-event-selectors --trail-name + + Ensure there is at least one Event Selector for a Trail with *IncludeManagementEvents* set to *true* and *ReadWriteType* set to *All*`, + rationale: `The AWS API call history produced by CloudTrail enables security analysis, resource change tracking, and compliance auditing. Additionally, + + - ensuring that a multi-regions trail exists will ensure that unexpected activity occurring in otherwise unused regions is detected + - ensuring that a multi-regions trail exists will ensure that Global Service Logging is enabled for a trail by default to capture recording of events generated on AWS global services + - for a multi-regions trail, ensuring that management events configured for all type of Read/Writes ensures recording of management operations that are performed on all resources in an AWS account`, + remediation: `Perform the following to enable global (Multi-region) CloudTrail logging: + Via the management Console + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/cloudtrail + 2. Click on *Trails* on the left navigation pane + 3. Click *Get Started Now*, if presented + + + - Click *Add new trail* + - Enter a trail name in the *Trail* name box + - Set the *Apply trail to all regions* option to Yes + - Specify an S3 bucket name in the *S3 bucket* box + - Click *Create* + + 4. If 1 or more trails already exist, select the target trail to enable for global logging + 5. Click the edit icon (pencil) next to *Apply trail to all regions* , Click *Yes* and Click *Save*. + 6. Click the edit icon (pencil) next to *Management Events* click All for setting Read/Write Events and Click *Save*. + + Via CLI + + aws cloudtrail create-trail --name --bucket-name --is-multi-region-trail + aws cloudtrail update-trail --name --is-multi-region-trail + + Note: Creating CloudTrail via CLI without providing any overriding options configures *Management Events* to set *All* type of *Read/Writes* by default.`, + references: [ + 'CCE-78913-1', + 'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-concepts.html#cloudtrail-concepts-management-events', + 'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-management-and-data-events-with-cloudtrail.html?icmpid=docs_cloudtrail_console#logging-management-events', + 'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-supported-services.html#cloud-trail-supported-services-data-events', + ], + gql: `{ + queryawsAccount { + id + __typename + cloudtrail { + isMultiRegionTrail + status { + isLogging + } + eventSelectors { + readWriteType + includeManagementEvents + } + } + } + }`, + resource: 'queryawsAccount[*]', + severity: 'medium', + conditions: { + path: '@.cloudtrail', + array_any: { + and: [ + { + path: '[*].isMultiRegionTrail', + equal: 'Yes', + }, + { + path: '[*].status.isLogging', + equal: true, + }, + { + path: '[*].eventSelectors', + array_any: { + and: [ + { path: '[*].readWriteType', equal: 'All' }, + { + path: '[*].includeManagementEvents', + equal: true, + }, + ], + }, + }, + ], + }, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.10.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.10.ts new file mode 100644 index 00000000..4005cc2f --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.10.ts @@ -0,0 +1,93 @@ +// NIST 800-53 rev4 Rule equivalent 6.13 +export default { + id: 'aws-cis-1.3.0-3.10', + title: 'AWS CIS 3.10 Ensure that Object-level logging for write events is enabled for S3 bucket', + + description: 'S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don\'t log data events and so it is recommended to enable Object-level logging for S3 buckets.', + + audit: `**From Console:** + + 1. Login to the AWS Management Console and navigate to S3 dashboard at https://console.aws.amazon.com/s3/ + 2. In the left navigation panel, click *buckets* and then click on the S3 Bucket Name that you want to examine. + 3. Click *Properties* tab to see in detail bucket configuration. + 4. If the current status for *Object-level* logging is set to Disabled, then object-level logging of write events for the selected s3 bucket is not set. + 5. Repeat steps 2 to 4 to verify object level logging status of other S3 buckets. + + **From Command Line:** + + 1. Run *list-trails* command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region: + + aws cloudtrail list-trails --region --query Trails[*].Name + + 2. The command output will be a list of the requested trail names. + 3. Run *get-event-selectors* command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3bucket resources: + + aws cloudtrail get-event-selectors --region --trail-name --query EventSelectors[*].DataResources[] + + 4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. + 5. If the *get-event-selectors* command returns an empty array '[]', the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. + 6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events. + 7. Change the AWS region by updating the *--region* command parameter and perform the audit process for other regions.`, + + rationale: 'Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity within your S3 Buckets using Amazon CloudWatch Events.', + + remediation: `**From Console:** + + 1. Login to the AWS Management Console and navigate to S3 dashboard at https://console.aws.amazon.com/s3/ + 2. In the left navigation panel, click *buckets* and then click on the S3 Bucket Name that you want to examine. + 3. Click *Properties* tab to see in detail bucket configuration. + 4. Click on the *Object-level* logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link https://console.aws.amazon.com/cloudtrail/ + 5. Once the Cloudtrail is selected, check the *Write* event checkbox, so that *object-level* logging for Write events is enabled. + 6. Repeat steps 2 to 5 to enable object-level logging of write events for other S3 buckets. + + **From Command Line:** + + 1. To enable *object-level* data events logging for S3 buckets within your AWS account, run *put-event-selectors* command using the name of the trail that you want to reconfigure as identifier: + + aws cloudtrail put-event-selectors --region --trail-name --event-selectors '[{ "ReadWriteType": "WriteOnly", "IncludeManagementEvents":true, "DataResources": [{ "Type": "AWS::S3::Object", "Values": ["arn:aws:s3:::/"] }] }]' + + 2. The command output will be *object-level* event trail configuration. + 3. If you want to enable it for all buckets at once then change Values parameter to *["arn:aws:s3"]* in command given above. + 4. Repeat step 1 for each s3 bucket to update *object-level* logging of write events. + 5. Change the AWS region by updating the *--region* command parameter and perform the process for other regions.`, + + references: ['https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html'], + gql: `{ + queryawsAccount { + id + __typename + cloudtrail { + eventSelectors { + readWriteType + dataResources { + type + } + } + } + } + }`, + resource: 'queryawsAccount[*]', + severity: 'high', + conditions: { + path: '@.cloudtrail', + array_any: { + path: '[*].eventSelectors', + array_any: { + and: [ + { + path: '[*].includeManagementEvents', + equal: true, + }, + { + path: '[*].readWriteType', + in: ['WriteOnly', 'All'], + }, + { + path: '[*].dataResources', + isEmpty: false, + }, + ], + }, + } + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.11.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.11.ts new file mode 100644 index 00000000..ac4771d6 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.11.ts @@ -0,0 +1,94 @@ +// NIST 800-53 rev4 Rule equivalent 6.12 +export default { + id: 'aws-cis-1.3.0-3.11', + title: 'AWS CIS 3.11 Ensure that Object-level logging for read events is enabled for S3 bucket', + + description: 'S3 object-level API operations such as GetObject, DeleteObject, and PutObject are called data events. By default, CloudTrail trails don\'t log data events and so it is recommended to enable Object-level logging for S3 buckets.', + + audit: `**From Console:** + + 1. Login to the AWS Management Console and navigate to S3 dashboard at https://console.aws.amazon.com/s3/ + 2. In the left navigation panel, click buckets and then click on the S3 Bucket Name that you want to examine. + 3. Click *Properties* tab to see in detail bucket configuration. + 4. If the current status for *Object-level* logging is set to *Disabled*, then object-level logging of read events for the selected s3 bucket is not set. + 5. If the current status for *Object-level* logging is set to *Enabled*, but the Read event check-box is unchecked, then object-level logging of read events for the selected s3 bucket is not set. + 6. Repeat steps 2 to 5 to verify *object-level* logging for *read* events of your other S3 buckets. + + **From Command Line:** + + 1. Run *describe-trails* command to list the names of all Amazon CloudTrail trails currently available in the selected AWS region: + + aws cloudtrail describe-trails --region --output table --query trailList[*].Name + + 2. The command output will be table of the requested trail names. + 3. Run *get-event-selectors* command using the name of the trail returned at the previous step and custom query filters to determine if Data events logging feature is enabled within the selected CloudTrail trail configuration for s3 bucket resources: + + aws cloudtrail get-event-selectors --region --trail-name --query EventSelectors[*].DataResources[] + + 4. The command output should be an array that contains the configuration of the AWS resource(S3 bucket) defined for the Data events selector. + 5. If the *get-event-selectors* command returns an empty array, the Data events are not included into the selected AWS Cloudtrail trail logging configuration, therefore the S3 object-level API operations performed within your AWS account are not recorded. + 6. Repeat steps 1 to 5 for auditing each s3 bucket to identify other trails that are missing the capability to log Data events. + 7. Change the AWS region by updating the *--region* command parameter and perform the audit process for other regions.`, + + rationale: 'Enabling object-level logging will help you meet data compliance requirements within your organization, perform comprehensive security analysis, monitor specific patterns of user behavior in your AWS account or take immediate actions on any object-level API activity using Amazon CloudWatch Events.', + + remediation: `**From Console:** + + 1. Login to the AWS Management Console and navigate to S3 dashboard at https://console.aws.amazon.com/s3/ + 2. In the left navigation panel, click buckets and then click on the S3 Bucket Name that you want to examine. + 3. Click *Properties* tab to see in detail bucket configuration. + 4. Click on the *Object-level* logging setting, enter the CloudTrail name for the recording activity. You can choose an existing Cloudtrail or create a new one by navigating to the Cloudtrail console link https://console.aws.amazon.com/cloudtrail/ + 5. Once the Cloudtrail is selected, check the Read event checkbox, so that *object-level* logging for *Read* events is enabled. + 6. Repeat steps 2 to 5 to enable *object-level* logging of read events for other S3 buckets. + + **From Command Line:** + + 1. To enable object-level data events logging for S3 buckets within your AWS account, run put-event-selectors command using the name of the trail that you want to reconfigure as identifier: + + aws cloudtrail put-event-selectors --region --trail-name --event-selectors '[{ "ReadWriteType": "ReadOnly", "IncludeManagementEvents":true, "DataResources": [{ "Type": "AWS::S3::Object", "Values": ["arn:aws:s3:::/"] }] }]' + + 2. The command output will be *object-level* event trail configuration. + 3. If you want to enable it for all buckets at ones then change Values parameter to *["arn:aws:s3"]* in command given above. + 4. Repeat step 1 for each s3 bucket to update *object-level* logging of read events. + 5. Change the AWS region by updating the *--region* command parameter and perform the process for other regions.`, + + references: ['https://docs.aws.amazon.com/AmazonS3/latest/user-guide/enable-cloudtrail-events.html'], + gql: `{ + queryawsAccount { + id + __typename + cloudtrail { + eventSelectors { + readWriteType + dataResources { + type + } + } + } + } + }`, + resource: 'queryawsAccount[*]', + severity: 'high', + conditions: { + path: '@.cloudtrail', + array_any: { + path: '[*].eventSelectors', + array_any: { + and: [ + { + path: '[*].includeManagementEvents', + equal: true, + }, + { + path: '[*].readWriteType', + in: ['ReadOnly', 'All'], + }, + { + path: '[*].dataResources', + isEmpty: false, + }, + ], + }, + } + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.2.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.2.ts new file mode 100644 index 00000000..99e7f709 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.2.ts @@ -0,0 +1,64 @@ +// AWS CIS 1.2.0 Rule equivalent 2.2 +export default { + id: 'aws-cis-1.3.0-3.2', + title: 'AWS CIS 3.2 Ensure CloudTrail log file validation is enabled', + description: `CloudTrail log file validation creates a digitally signed digest file containing a hash of each + log that CloudTrail writes to S3. These digest files can be used to determine whether a log + file was changed, deleted, or unchanged after CloudTrail delivered the log. It is + recommended that file validation be enabled on all CloudTrails.`, + audit: `Perform the following on each trail to determine if log file validation is enabled: + Via the management Console + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/cloudtrail + 2. Click on *Trails* on the left navigation pane + 3. For Every Trail: + + + - Click on a trail via the link in the Name column + - Under the *S3* section, ensure *Enable log file validation* is set to *Yes* + + Via CLI + + aws cloudtrail describe-trails + + Ensure *LogFileValidationEnabled* is set to *true* for each trail`, + rationale: 'Enabling log file validation will provide additional integrity checking of CloudTrail logs.', + remediation: `Perform the following to enable log file validation on a given trail: + Via the management Console + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/cloudtrail + 2. Click on *Trails* on the left navigation pane + 3. Click on target trail + 4. Within the *S3* section click on the edit icon (pencil) + 5. Click *Advanced* + 6. Click on the *Yes* radio button in section *Enable log file validation* + 7. Click *Save* + + Via CLI + + aws cloudtrail update-trail --name --enable-log-file-validation + + Note that periodic validation of logs using these digests can be performed by running the following command: + + aws cloudtrail validate-logs --trail-arn --start-time --end-time +`, + references: [ + 'http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-enabling.html', + 'CCE-78914-9', + ], + gql: `{ + queryawsCloudtrail { + id + arn + accountId + __typename + logFileValidationEnabled + } + }`, + resource: 'queryawsCloudtrail[*]', + severity: 'medium', + conditions: { + path: '@.logFileValidationEnabled', + equal: 'Yes', + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.3.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.3.ts new file mode 100644 index 00000000..1ae3e2ea --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.3.ts @@ -0,0 +1,125 @@ +// AWS CIS 1.2.0 Rule equivalent 2.3 +export default { + id: 'aws-cis-1.3.0-3.3', + title: + 'AWS CIS 3.3 Ensure the S3 bucket used to store CloudTrail logs is not publicly accessible', + description: + 'CloudTrail logs a record of every API call made in your AWS account. These logs file are stored in an S3 bucket. It is recommended that the bucket policy, or access control list (ACL), applied to the S3 bucket that CloudTrail logs to prevents public access to the CloudTrail logs.', + audit: `Perform the following to determine if any public access is granted to an S3 bucket via an ACL or S3 bucket policy: Via the Management Console + + 1. Go to the Amazon CloudTrail console at https://console.aws.amazon.com/cloudtrail/home + 2. In the API activity history pane on the left, click Trails + 3. In the Trails pane, note the bucket names in the S3 bucket column + 4. Go to Amazon S3 console at https://console.aws.amazon.com/s3/home + 5. For each bucket noted in step 3, right-click on the bucket and click Properties + 6. In the Properties pane, click the Permissions tab. + 7. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. + 8. Ensure no rows exists that have the Grantee set to Everyone or the Grantee set to Any Authenticated User. + 9. If the Edit bucket policy button is present, click it to review the bucket policy. + 10. Ensure the policy does not contain a Statement having an Effect set to Allow and a Principal set to "*" or {"AWS" : "*"} + + Via CLI: + + 1. Get the name of the S3 bucket that CloudTrail is logging to: + + aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' + + 2. Ensure the AllUsers principal is not granted privileges to that : + + aws s3api get-bucket-acl --bucket --query 'Grants[?Grantee.URI== http://acs.amazonaws.com/groups/global/AllUsers ]' + + 3. Ensure the AuthenticatedUsers principal is not granted privileges to that : + + aws s3api get-bucket-acl --bucket --query 'Grants[?Grantee.URI== http://acs.amazonaws.com/groups/global/Authenticated Users ]' + + 4. Get the S3 Bucket Policy + + aws s3api get-bucket-policy --bucket + + 5. Ensure the policy does not contain a Statement having an Effect set to Allow and a Principal set to "*" or {"AWS" : "*"} + + Note: Principal set to "*" or {"AWS" : "*"} allows anonymous access.`, + rationale: + 'Allowing public access to CloudTrail log content may aid an adversary in identifying weaknesses in the affected account`s use or configuration.', + remediation: `Perform the following to remove any public access that has been granted to the bucket via an ACL or S3 bucket policy: + + 1. Go to Amazon S3 console at https://console.aws.amazon.com/s3/home + 2. Right-click on the bucket and click Properties + 3. In the Properties pane, click the Permissions tab. + 4. The tab shows a list of grants, one row per grant, in the bucket ACL. Each row identifies the grantee and the permissions granted. + 5. Select the row that grants permission to Everyone or Any Authenticated User + 6. Uncheck all the permissions granted to Everyone or Any Authenticated User (click x to delete the row). + 7. Click Save to save the ACL. + 8. If the Edit bucket policy button is present, click it. + 9. Remove any Statement having an Effect set to Allow and a Principal set to "*" or {"AWS" : "*"}.`, + references: [ + 'CCE-78915-6', + 'https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html', + ], + gql: `{ + queryawsCloudtrail { + id + arn + accountId + __typename + s3 { + aclGrants { + granteeUri + } + policy { + statement { + effect + principal { + key + value + } + } + } + } + } + }`, + resource: 'queryawsCloudtrail[*]', + severity: 'medium', + conditions: { + not: { + path: '@.s3', + array_any: { + or: [ + { + path: '[*].aclGrants', + array_any: { + path: '[*].granteeUri', + match: /^.*(AllUsers|AuthenticatedUsers).*$/, + }, + }, + { + path: '[*].policy.statement', + array_any: { + and: [ + { + path: '[*].effect', + equal: 'Allow', + }, + { + path: '[*].principal', + array_any: { + and: [ + { + path: '[*].key', + in: ['', 'AWS'], + }, + { + path: '[*].value', + contains: '*', + }, + ], + }, + }, + ], + }, + }, + ], + }, + }, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.4.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.4.ts new file mode 100644 index 00000000..59f0c385 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.4.ts @@ -0,0 +1,92 @@ +// AWS CIS 1.2.0 Rule equivalent 2.4 +export default { + id: 'aws-cis-1.3.0-3.4', + title: + 'AWS CIS 3.4 Ensure CloudTrail trails are integrated with CloudWatch Logs', + description: `AWS CloudTrail is a web service that records AWS API calls made in a given AWS account. + The recorded information includes the identity of the API caller, the time of the API call, the + source IP address of the API caller, the request parameters, and the response elements + returned by the AWS service. CloudTrail uses Amazon S3 for log file storage and delivery, + so log files are stored durably. In addition to capturing CloudTrail logs within a specified S3 + bucket for long term analysis, realtime analysis can be performed by configuring CloudTrail + to send logs to CloudWatch Logs. For a trail that is enabled in all regions in an account, + CloudTrail sends log files from all those regions to a CloudWatch Logs log group. It is + recommended that CloudTrail logs be sent to CloudWatch Logs. + + Note: The intent of this recommendation is to ensure AWS account activity is being + captured, monitored, and appropriately alarmed on. CloudWatch Logs is a native way to + accomplish this using AWS services but does not preclude the use of an alternate solution.`, + audit: `Perform the following to ensure CloudTrail is configured as prescribed: + Via the AWS management Console + + 1. Sign in to the AWS Management Console and open the CloudTrail console at https://console.aws.amazon.com/cloudtrail/ + 2. Under *All Buckets* , click on the target bucket you wish to evaluate + 3. Click *Properties* on the top right of the console + 4. Click *Trails* in the left menu + 5. Ensure a *CloudWatch Logs* log group is configured and has a recent (~one day old) *Last log file delivered* timestamp. + + Via CLI + 1. Run the following command to get a listing of existing trails: + + aws cloudtrail describe-trails + + 2. Ensure *CloudWatchLogsLogGroupArn* is not empty and note the value of the Name property. + 3. Using the noted value of the *Name* property, run the following command: + + aws cloudtrail get-trail-status --name + + 4. Ensure the *LatestcloudwatchLogdDeliveryTime* property is set to a recent (~one day old) timestamp.`, + rationale: 'Sending CloudTrail logs to CloudWatch Logs will facilitate real-time and historic activity logging based on user, API, resource, and IP address, and provides the opportunity to establish alarms and notifications for anomalous or sensitivity account activity.', + remediation: `Perform the following to establish the prescribed state: + Via the AWS management Console + + 1. Sign in to the AWS Management Console and open the CloudTrail console at https://console.aws.amazon.com/cloudtrail/ + 2. Under All Buckets, click on the target bucket you wish to evaluate + 3. Click Properties on the top right of the console + 4. Click *Trails* in the left menu + 5. Click on each trail where no *CloudWatch Logs* are defined + 6. Go to the *CloudWatch Logs* section and click on *Configure* + 7. Define a new or select an existing log group + 8. Click on *Continue* + 9. Configure IAM Role which will deliver CloudTrail events to CloudWatch Logs + o Create/Select an *IAM Role* and *Policy Name* + o Click *Allow* to continue + + Via CLI + + aws cloudtrail update-trail --name --cloudwatch-logs-log-group- + arn --cloudwatch-logs-role-arn `, + references: [ + 'https://aws.amazon.com/cloudtrail/', + 'CCE-78916-4', + ], + gql: `{ + queryawsCloudtrail { + id + arn + accountId + __typename + cloudWatchLogsLogGroupArn + status { + latestCloudWatchLogsDeliveryTime + } + } + }`, + resource: 'queryawsCloudtrail[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.cloudWatchLogsLogGroupArn', + notEqual: null, + }, + { + value: { + daysAgo: {}, + path: '@.status.latestCloudWatchLogsDeliveryTime', + }, + lessThanInclusive: 1, + }, + ], + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.5.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.5.ts new file mode 100644 index 00000000..8d979bac --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.5.ts @@ -0,0 +1,121 @@ +// AWS CIS 1.2.0 Rule equivalent 2.5 +export default { + id: 'aws-cis-1.3.0-3.5', + title: 'AWS CIS 3.5 Ensure AWS Config is enabled in all regions', + description: + 'AWS Config is a web service that performs configuration management of supported AWS resources within your account and delivers log files to you. The recorded information includes the configuration item (AWS resource), relationships between configuration items (AWS resources), any configuration changes between resources. It is recommended to enable AWS Config be enabled in all regions.', + + audit: `Process to evaluate AWS Config configuration per region Via AWS Management Console: + + 1. Sign in to the AWS Management Console and open the AWS Config console at https://console.aws.amazon.com/config/. + 2. On the top right of the console select target Region. + 3. If presented with Setup AWS Config - follow remediation procedure: + 4. On the Resource inventory page, Click on edit (the gear icon). The Set Up AWS Config page appears. + 5. Ensure 1 or both check-boxes under "All Resources" is checked. + - Include global resources related to IAM resources - which needs to be enabled in 1 region only + 6. Ensure the correct S3 bucket has been defined. + 7. Ensure the correct SNS topic has been defined. + 8. Repeat steps 2 to 7 for each region. + + Via AWS Command Line Interface: + + 1. Run this command to show all AWS Config recorders and their properties: + + aws configservice describe-configuration-recorders + + 2. Evaluate the output to ensure that there's at least one recorder for which recordingGroup object includes "allSupported": true AND "includeGlobalResourceTypes": true + + Note: There is one more parameter "ResourceTypes" in recordingGroup object. We don't need to check the same as whenever we set "allSupported": true, AWS enforces resource types to be empty ("ResourceTypes":[]) Sample Output: + + { + "ConfigurationRecorders": [ + { + "recordingGroup": { + "allSupported": true, + "resourceTypes": [], + "includeGlobalResourceTypes": true + }, + "roleARN": "arn:aws:iam:::role/service-role/", + "name": "default" + } + ] + } + + 3. Run this command to show the status for all AWS Config recorders: + + aws configservice describe-configuration-recorder-status + + 4. In the output, find recorders with name key matching the recorders that met criteria in step 2. Ensure that at least one of them includes "recording": true and "lastStatus": "SUCCESS"`, + + rationale: + 'The AWS configuration item history captured by AWS Config enables security analysis, resource change tracking, and compliance auditing.', + + remediation: `To implement AWS Config configuration: + Via AWS Management Console: + + 1. Select the region you want to focus on in the top right of the console + 2. Click Services + 3. Click Config + 4. Define which resources you want to record in the selected region + 5. Choose to include global resources (IAM resources) + 6. Specify an S3 bucket in the same account or in another managed AWS account + 7. Create an SNS Topic from the same AWS account or another managed AWS account + + Via AWS Command Line Interface: + + 1. Ensure there is an appropriate S3 bucket, SNS topic, and IAM role per the [AWS Config Service prerequisites](https://docs.aws.amazon.com/config/latest/developerguide/gs-cli-prereq.html). + 2. Run this command to set up the configuration recorder + + aws configservice subscribe --s3-bucket my-config-bucket --sns-topic arn:aws:sns:us-east-1:012345678912:my-config-notice --iam-role arn:aws:iam::012345678912:role/myConfigRole + + 3. Run this command to start the configuration recorder: + + start-configuration-recorder --configuration-recorder-name `, + + references: [ + 'CCE-78917-2', + 'CIS CSC v6.0 #1.1, #1.3, #1.4, #5.2, #11.1 - #11.3, #14.6', + 'http://docs.aws.amazon.com/cli/latest/reference/configservice/describe-configuration-recorder-status.html', + ], + gql: `{ + queryawsAccount { + id + __typename + configurationRecorders { + recordingGroup { + allSupported + includeGlobalResourceTypes + } + status { + recording + lastStatus + } + } + } + }`, + resource: 'queryawsAccount[*]', + severity: 'medium', + conditions: { + path: '@.configurationRecorders', + array_any: { + and: [ + { + path: '[*].recordingGroup.allSupported', + equal: true, + }, + { + path: '[*].recordingGroup.includeGlobalResourceTypes', + equal: true, + }, + { + path: '[*].status.recording', + equal: true, + }, + { + path: '[*].status.lastStatus', + equal: 'SUCCESS', + }, + ], + }, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.6.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.6.ts new file mode 100644 index 00000000..7cf6ce08 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.6.ts @@ -0,0 +1,80 @@ +// AWS CIS 1.2.0 Rule equivalent 2.6 +export default { + id: 'aws-cis-1.3.0-3.6', + title: + 'AWS CIS 3.6 Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket', + description: `S3 Bucket Access Logging generates a log that contains access records for each request + made to your S3 bucket. An access log record contains details about the request, such as the + request type, the resources specified in the request worked, and the time and date the + request was processed. It is recommended that bucket access logging be enabled on the + CloudTrail S3 bucket.`, + audit: `Perform the following ensure the CloudTrail S3 bucket has access logging is enabled: + Via the management Console + + 1. Go to the Amazon CloudTrail console at https://console.aws.amazon.com/cloudtrail/home + 2. In the API activity history pane on the left, click Trails + 3. In the Trails pane, note the bucket names in the S3 bucket column + 4. Sign in to the AWS Management Console and open the S3 console at https://console.aws.amazon.com/s3. + 5. Under *All Buckets* click on a target S3 bucket + 6. Click on *Properties* in the top right of the console + 7. Under *Bucket: * click on *Logging* + 8. Ensure *Enabled* is checked. + + Via CLI + + 1. Get the name of the S3 bucket that CloudTrail is logging to: + + aws cloudtrail describe-trails --query 'trailList[*].S3BucketName' + + 2. Ensure Bucket Logging is enabled: + + aws s3api get-bucket-logging --bucket + + Ensure command does not return empty output. + Sample Output for a bucket with logging enabled: + + + { + "LoggingEnabled": { + "TargetPrefix": "", + "TargetBucket": "" + } + }`, + rationale: 'By enabling S3 bucket logging on target S3 buckets, it is possible to capture all events which may affect objects within target buckets. Configuring logs to be placed in a separate bucket allows access to log information which can be useful in security and incident response workflows.', + remediation: `Perform the following to enable S3 bucket logging: + Via the Management Console + + 1. Sign in to the AWS Management Console and open the S3 console at https://console.aws.amazon.com/s3 + 2. Under *All Buckets* click on the target S3 bucket + 3. Click on *Properties* in the top right of the console + 4. Under *Bucket: * click on *Logging* + 5. Configure bucket logging + 1. Click on *Enabled* checkbox + 2. Select Target Bucket from list + 3. Enter a Target Prefix + 6. Click *Save*`, + references: [ + 'CCE-78918-0', + 'https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html', + ], + gql: `{ + queryawsCloudtrail { + id + arn + accountId + __typename + s3 { + logging + } + } + }`, + resource: 'queryawsCloudtrail[*]', + severity: 'medium', + conditions: { + path: '@.s3', + array_any: { + path: '[*].logging', + equal: 'Enabled', + }, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.7.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.7.ts new file mode 100644 index 00000000..ac15073d --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.7.ts @@ -0,0 +1,72 @@ +// AWS CIS 1.2.0 Rule equivalent 2.7 +export default { + id: 'aws-cis-1.3.0-3.7', + title: + 'AWS CIS 3.7 Ensure CloudTrail logs are encrypted at rest using KMS CMKs', + description: `AWS CloudTrail is a web service that records AWS API calls for an account and makes those + logs available to users and resources in accordance with IAM policies. AWS Key + Management Service (KMS) is a managed service that helps create and control the + encryption keys used to encrypt account data, and uses Hardware Security Modules + (HSMs) to protect the security of encryption keys. CloudTrail logs can be configured to + leverage server side encryption (SSE) and KMS customer created master keys (CMK) to + further protect CloudTrail logs. It is recommended that CloudTrail be configured to use + SSE-KMS.`, + audit: `Perform the following to determine if CloudTrail is configured to use SSE-KMS: + Via the Management Console + + 1. Sign in to the AWS Management Console and open the CloudTrail console at https://console.aws.amazon.com/cloudtrail + 2. In the left navigation pane, choose *Trails*. + 3. Select a Trail + 4. Under the *S3* section, ensure *Encrypt log files* is set to *Yes* and a KMS key ID is specified in the *KSM Key Id* field. + + Via CLI + + 1. Run the following command: + + aws cloudtrail describe-trails + + 2. For each trail listed, SSE-KMS is enabled if the trail has a *KmsKeyId* property defined.`, + rationale: 'Configuring CloudTrail to use SSE-KMS provides additional confidentiality controls on log data as a given user must have S3 read permission on the corresponding log bucket and must be granted decrypt permission by the CMK policy.', + remediation: `Perform the following to configure CloudTrail to use SSE-KMS: + Via the Management Console + + 1. Sign in to the AWS Management Console and open the CloudTrail console at https://console.aws.amazon.com/cloudtrail + 2. In the left navigation pane, choose *Trails*. + 3. Click on a Trail + 4. Under the *S3* section click on the edit button (pencil icon) + 5. Click *Advanced* + 6. Select an existing CMK from the *KMS key Id* drop-down menu + + - Note: Ensure the CMK is located in the same region as the S3 bucket + - Note: You will need to apply a KMS Key policy on the selected CMK in order for CloudTrail as a service to encrypt and decrypt log files using the CMK provided. Steps are provided [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/create-kms-key-policy-for-cloudtrail.html) for editing the selected CMK Key policy. + + 7. Click *Save* + 8. You will see a notification message stating that you need to have decrypt + permissions on the specified KMS key to decrypt log files. + 9. Click *Yes* + + Via CLI + + aws cloudtrail update-trail --name --kms-id + aws kms put-key-policy --key-id --policy `, + references: [ + 'https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html', + 'https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html', + 'CCE-78919-8', + ], + gql: `{ + queryawsCloudtrail { + id + arn + accountId + __typename + kmsKeyId + } + }`, + resource: 'queryawsCloudtrail[*]', + severity: 'medium', + conditions: { + path: '@.kmsKeyId', + notEqual: null, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.8.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.8.ts new file mode 100644 index 00000000..246f4cbb --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.8.ts @@ -0,0 +1,90 @@ +// this rule is also in PCI kms check 1 and AWS CIS 1.2.0 2.8 +export default { + id: 'aws-cis-1.3.0-3.8', + title: + 'AWS CIS 3.8 Ensure rotation for customer created CMKs is enabled', + description: `AWS Key Management Service (KMS) allows customers to rotate the backing key which is + key material stored within the KMS which is tied to the key ID of the Customer Created + customer master key (CMK). It is the backing key that is used to perform cryptographic + operations such as encryption and decryption. Automated key rotation currently retains all + prior backing keys so that decryption of encrypted data can take place transparently. It is + recommended that CMK key rotation be enabled.`, + audit: `Via the Management Console: + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam. + 2. In the left navigation pane, choose E*ncryption Keys*. + 3. Select a customer created master key (CMK) + 4. Under the *Key Policy* section, move down to *Key Rotation*. + 5. Ensure the *Rotate this key every year* checkbox is checked. + + Via CLI + + 1. Run the following command to get a list of all keys and their associated *KeyIds* + + aws kms list-keys + + 2. For each key, note the KeyId and run the following command + + aws kms get-key-rotation-status --key-id + + 3. Ensure *KeyRotationEnabled* is set to *true*`, + rationale: 'Rotating encryption keys helps reduce the potential impact of a compromised key as data encrypted with a new key cannot be accessed with a previous key that may have been exposed.', + remediation: `Via the Management Console: + + 1. Sign in to the AWS Management Console and open the IAM console at https://console.aws.amazon.com/iam. + 2. In the left navigation pane, choose *Encryption Keys*. + 3. Select a customer created master key (CMK) + 4. Under the *Key Policy* section, move down to *Key Rotation*. + 5. Check the *Rotate this key every year* checkbox. + + Via CLI + + 1. Run the following command to enable key rotation: + + aws kms enable-key-rotation --key-id `, + references: [ + 'https://aws.amazon.com/kms/pricing/', + 'https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-5/final', + 'CCE-78920-6', + ], + gql: `{ + queryawsKms { + id + arn + accountId + __typename + keyManager + keyRotationEnabled + } + }`, + resource: 'queryawsKms[*]', + severity: 'medium', + conditions: { + or: [ + { + and: [ + { + path: '@.keyManager', + equal: 'AWS', + }, + { + path: '@.keyRotationEnabled', + equal: true, + }, + ], + }, + { + and: [ + { + path: '@.keyManager', + equal: 'CUSTOMER', + }, + { + path: '@.keyRotationEnabled', + equal: true, + }, + ], + }, + ], + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.9.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.9.ts new file mode 100644 index 00000000..999bd437 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-3.9.ts @@ -0,0 +1,55 @@ +// AWS CIS 1.2.0 Rule equivalent 2.9 +export default { + id: 'aws-cis-1.3.0-3.9', + title: 'AWS CIS 3.9 Ensure VPC flow logging is enabled in all VPCs', + description: `VPC Flow Logs is a feature that enables you to capture information about the IP traffic + going to and from network interfaces in your VPC. After you've created a flow log, you can + view and retrieve its data in Amazon CloudWatch Logs. It is recommended that VPC Flow + Logs be enabled for packet "Rejects" for VPCs.`, + audit: `Perform the following to determine if VPC Flow logs is enabled: + Via the Management Console: + + 1. Sign into the management console + 2. Select *Services* then *VPC* + 3. In the left navigation pane, select *Your VPCs* + 4. Select a VPC + 5. In the right pane, select the *Flow Logs* tab. + 6. Ensure a Log Flow exists that has *Active* in the *Status* column.`, + rationale: 'VPC Flow Logs provide visibility into network traffic that traverses the VPC and can be used to detect anomalous traffic or insight during security workflows.', + remediation: `Perform the following to determine if VPC Flow logs is enabled: + Via the Management Console: + + 1. Sign into the management console + 2. Select *Services* then *VPC* + 3. In the left navigation pane, select *Your VPCs* + 4. Select a VPC + 5. In the right pane, select the *Flow Logs* tab. + 6. If no Flow Log exists, click *Create Flow Log* + 7. For Filter, select *Reject* + 8. Enter in a *Role* and *Destination Log Group* + 9. Click *Create Log Flow* + 10. Click on *CloudWatch Logs Group* + + **Note:** Setting the filter to "Reject" will dramatically reduce the logging data accumulation for this recommendation and provide sufficient information for the purposes of breach detection, research, and remediation. However, during periods of least privilege security group engineering, setting this filter to "All" can be very helpful in discovering existing traffic flows required for the proper operation of an already running environment.`, + references: [ + 'CCE-79202-8', + 'https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/flow-logs.html', + ], + gql: `{ + queryawsVpc { + id + arn + accountId + __typename + flowLog { + resourceId + } + } + }`, + resource: 'queryawsVpc[*]', + severity: 'medium', + conditions: { + path: '@.flowLog', + isEmpty: false, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.1.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.1.ts new file mode 100644 index 00000000..ff44a19f --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.1.ts @@ -0,0 +1,114 @@ +export default { + id: 'aws-cis-1.3.0-5.1', + title: 'AWS CIS 5.1 Ensure no Network ACLs allow ingress from 0.0.0.0/0 to remote server administration ports', + + description: 'The Network Access Control List (NACL) function provide stateless filtering of ingress and egress network traffic to AWS resources. It is recommended that no NACL allows unrestricted ingress access to remote server administration ports, such as SSH to port 22 and RDP to port 3389.', + + audit: `**From Console:** + Perform the following to determine if the account is configured as prescribed: + + 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home + 2. In the left pane, click *Network ACLs* + 3. For each network ACL, perform the following: + - Select the network ACL + - Click the *Inbound Rules* tab + - Ensure no rule exists that has a port range that includes port *22*, *3389*, or other remote server administration ports for your environment and has a *Source* of *0.0.0.0/0* and shows *ALLOW* + + **Note:** A Port value of *ALL* or a port range such as *0-1024* are inclusive of port *22*, *3389*, and other remote server administration ports`, + + rationale: 'Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.', + + remediation: `**From Console:** + Perform the following: + + 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home + 2. In the left pane, click *Network ACLs* + 3. For each network ACL to remediate, perform the following: + - Select the network ACL + - Click the *Inbound Rules* tab + - Click *Edit inbound rules* + - Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click *Delete* to remove the offending inbound rule + - Click *Save*`, + + references: [ + 'https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html', + 'https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Security.html#VPC_Security_Comparison', + ], + gql: `{ + queryawsNetworkAcl { + id + arn + accountId + __typename + inboundRules { + source + fromPort + toPort + allowOrDeny + } + } + }`, + resource: 'queryawsNetworkAcl[*]', + severity: 'high', + conditions: { + not: { + path: '@.inboundRules', + array_any: { + and: [ + { + path: '[*].source', + in: ['0.0.0.0/0', '::/0'], + }, + { + path: '[*].allowOrDeny', + equal: 'allow', + }, + { + or: [ + { + and: [ + { + path: '[*].fromPort', + equal: null, + }, + { + path: '[*].toPort', + equal: null, + }, + ], + }, + { + or: [ + { + and: [ + { + path: '[*].fromPort', + lessThanInclusive: 22, + }, + { + path: '[*].toPort', + greaterThanInclusive: 22, + }, + ], + }, + { + and: [ + { + path: '[*].fromPort', + lessThanInclusive: 3389, + }, + { + path: '[*].toPort', + greaterThanInclusive: 3389, + }, + ], + }, + ] + }, + ], + }, + ], + }, + }, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.2.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.2.ts new file mode 100644 index 00000000..f09484bd --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.2.ts @@ -0,0 +1,105 @@ +export default { + id: 'aws-cis-1.3.0-5.2', + title: 'AWS CIS 5.2 Ensure no security groups allow ingress from 0.0.0.0/0 to remote server administration ports', + + description: 'Security groups provide stateful filtering of ingress and egress network traffic to AWS resources. It is recommended that no security group allows unrestricted ingress access to remote server administration ports, such as SSH to port 22 and RDP to port 3389.', + + audit: `Perform the following to determine if the account is configured as prescribed: + + 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home + 2. In the left pane, click *Security Groups* + 3. For each security group, perform the following: + 4. Select the security group + 5. Click the *Inbound Rules* tab + 6. Ensure no rule exists that has a port range that includes port *22*, *3389*, or other remote server administration ports for your environment and has a *Source* of *0.0.0.0/0* + + **Note:** A Port value of *ALL* or a port range such as *0-1024* are inclusive of port *22*, *3389*, and other remote server administration ports.`, + + rationale: 'Public access to remote server administration ports, such as 22 and 3389, increases resource attack surface and unnecessarily raises the risk of resource compromise.', + + remediation: `Perform the following to implement the prescribed state: + + 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home + 2. In the left pane, click *Security Groups* + 3. For each security group, perform the following: + 4. Select the security group + 5. Click the *Inbound Rules* tab + 6. Click the *Edit inbound rules* button + 7. Identify the rules to be edited or removed + 8. Either A) update the Source field to a range other than 0.0.0.0/0, or, B) Click *Delete* to remove the offending inbound rule + 9. Click *Save rules*`, + + references: ['https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-security-groups.html#deleting-security-group-rule'], + gql: `{ + queryawsSecurityGroup{ + id + arn + accountId + __typename + inboundRules{ + source + toPort + fromPort + } + } + }`, + resource: 'queryawsSecurityGroup[*]', + severity: 'high', + conditions: { + not: { + path: '@.inboundRules', + array_any: { + and: [ + { + path: '[*].source', + in: ['0.0.0.0/0', '::/0'], + }, + { + or: [ + { + and: [ + { + path: '[*].fromPort', + equal: null, + }, + { + path: '[*].toPort', + equal: null, + }, + ], + }, + { + or: [ + { + and: [ + { + path: '[*].fromPort', + lessThanInclusive: 22, + }, + { + path: '[*].toPort', + greaterThanInclusive: 22, + }, + ], + }, + { + and: [ + { + path: '[*].fromPort', + lessThanInclusive: 3389, + }, + { + path: '[*].toPort', + greaterThanInclusive: 3389, + }, + ], + }, + ] + }, + ], + }, + ], + }, + }, + }, +} diff --git a/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.3.ts b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.3.ts new file mode 100644 index 00000000..0b792377 --- /dev/null +++ b/src/aws/cis-1.3.0/rules/aws-cis-1.3.0-5.3.ts @@ -0,0 +1,111 @@ +// AWS CIS 1.2.0 Rule equivalent 4.3 +export default { + id: 'aws-cis-1.3.0-5.3', + title: + 'AWS CIS 5.3 Ensure the default security group of every VPC restricts all traffic', + description: `A VPC comes with a default security group whose initial settings deny all inbound traffic, + allow all outbound traffic, and allow all traffic between instances assigned to the security + group. If you don't specify a security group when you launch an instance, the instance is + automatically assigned to this default security group. Security groups provide stateful + filtering of ingress/egress network traffic to AWS resources. It is recommended that the + default security group restrict all traffic. + + The default VPC in every region should have its default security group updated to comply. + Any newly created VPCs will automatically contain a default security group that will need + remediation to comply with this recommendation. + + **NOTE:** When implementing this recommendation, VPC flow logging is invaluable in + determining the least privilege port access required by systems to work properly because it + can log all packet acceptances and rejections occurring under the current security groups. + This dramatically reduces the primary barrier to least privilege engineering - discovering + the minimum ports required by systems in the environment. Even if the VPC flow logging + recommendation in this benchmark is not adopted as a permanent security measure, it + should be used during any period of discovery and engineering for least privileged security + groups.`, + audit: `Perform the following to determine if the account is configured as prescribed: + Security Group State + + 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home + 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: + 3. In the left pane, click *Security Groups* + 4. For each default security group, perform the following: + 5. Select the *default* security group + 6. Click the *Inbound Rules* tab + 7. Ensure no rule exist + 8. Click the *Outbound Rules* tab + 9. Ensure no rules exist + + Security Group Members + + 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home + 2. Repeat the next steps for all default groups in all VPCs - including the default VPC in each AWS region: + 3. In the left pane, click *Security Groups* + 4. Copy the id of the default security group. + 5. Change to the EC2 Management Console at https://console.aws.amazon.com/ec2/v2/home + 6. In the filter column type 'Security Group ID : < security group id from #4 >`, + rationale: 'Configuring all VPC default security groups to restrict all traffic will encourage least privilege security group development and mindful placement of AWS resources into security groups which will, in turn, reduce the exposure of those resources.', + remediation: `Security Group Members + Perform the following to implement the prescribed state: + + 1. Identify AWS resources that exist within the default security group + 2. Create a set of least privilege security groups for those resources + 3. Place the resources in those security groups + 4. Remove the resources noted in #1 from the default security group + + Security Group State + + 1. Login to the AWS Management Console at https://console.aws.amazon.com/vpc/home + 2. Repeat the next steps for all VPCs - including the default VPC in each AWS region: + 3. In the left pane, click *Security Groups* + 4. For each default security group, perform the following: + 5. Select the *default* security group + 6. Click the *Inbound Rules* tab + 7. Remove any inbound rules + 8. Click the *Outbound Rules* tab + 9. Remove any inbound rules + + Recommended: + IAM groups allow you to edit the "name" field. After remediating default groups rules for all VPCs in all regions, edit this field to add text similar to "DO NOT USE. DO NOT ADD RULES"`, + references: [ + 'CCE-79201-0', + 'http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html', + 'CIS CSC v6.0 #9.2', + ], + gql: `{ + queryawsSecurityGroup(filter: { name: { eq: "default" } }) { + id + name + arn + accountId + __typename + inboundRules{ + source + } + outboundRules{ + destination + } + } + }`, + resource: 'queryawsSecurityGroup[*]', + severity: 'high', + conditions: { + not: { + or: [ + { + path: '@.inboundRules', + array_any: { + path: '[*].source', + in: ['0.0.0.0/0', '::/0'], + }, + }, + { + path: '@.outboundRules', + array_any: { + path: '[*].destination', + in: ['0.0.0.0/0', '::/0'], + }, + }, + ], + }, + }, +} diff --git a/src/aws/cis-1.3.0/rules/index.ts b/src/aws/cis-1.3.0/rules/index.ts index 4be51268..90edb948 100644 --- a/src/aws/cis-1.3.0/rules/index.ts +++ b/src/aws/cis-1.3.0/rules/index.ts @@ -1,12 +1,39 @@ import Aws_CIS_130_11 from './aws-cis-1.3.0-1.1' import Aws_CIS_130_12 from './aws-cis-1.3.0-1.2' import Aws_CIS_130_13 from './aws-cis-1.3.0-1.3' +import Aws_CIS_130_14 from './aws-cis-1.3.0-1.4' +import Aws_CIS_130_15 from './aws-cis-1.3.0-1.5' +import Aws_CIS_130_16 from './aws-cis-1.3.0-1.6' +import Aws_CIS_130_17 from './aws-cis-1.3.0-1.7' +import Aws_CIS_130_18 from './aws-cis-1.3.0-1.8' +import Aws_CIS_130_19 from './aws-cis-1.3.0-1.9' +import Aws_CIS_130_110 from './aws-cis-1.3.0-1.10' import Aws_CIS_130_111 from './aws-cis-1.3.0-1.11' +import Aws_CIS_130_112 from './aws-cis-1.3.0-1.12' +import Aws_CIS_130_113 from './aws-cis-1.3.0-1.13' +import Aws_CIS_130_114 from './aws-cis-1.3.0-1.14' +import Aws_CIS_130_115 from './aws-cis-1.3.0-1.15' +import Aws_CIS_130_116 from './aws-cis-1.3.0-1.16' +import Aws_CIS_130_117 from './aws-cis-1.3.0-1.17' import Aws_CIS_130_118 from './aws-cis-1.3.0-1.18' +import Aws_CIS_130_119 from './aws-cis-1.3.0-1.19' +import Aws_CIS_130_120 from './aws-cis-1.3.0-1.20' +import Aws_CIS_130_121 from './aws-cis-1.3.0-1.21' import Aws_CIS_130_122 from './aws-cis-1.3.0-1.22' import Aws_CIS_130_211 from './aws-cis-1.3.0-2.1.1' import Aws_CIS_130_212 from './aws-cis-1.3.0-2.1.2' import Aws_CIS_130_221 from './aws-cis-1.3.0-2.2.1' +import Aws_CIS_130_31 from './aws-cis-1.3.0-3.1' +import Aws_CIS_130_32 from './aws-cis-1.3.0-3.2' +import Aws_CIS_130_33 from './aws-cis-1.3.0-3.3' +import Aws_CIS_130_34 from './aws-cis-1.3.0-3.4' +import Aws_CIS_130_35 from './aws-cis-1.3.0-3.5' +import Aws_CIS_130_36 from './aws-cis-1.3.0-3.6' +import Aws_CIS_130_37 from './aws-cis-1.3.0-3.7' +import Aws_CIS_130_38 from './aws-cis-1.3.0-3.8' +import Aws_CIS_130_39 from './aws-cis-1.3.0-3.9' +import Aws_CIS_130_310 from './aws-cis-1.3.0-3.10' +import Aws_CIS_130_311 from './aws-cis-1.3.0-3.11' import Aws_CIS_130_41 from './aws-cis-1.3.0-4.1' import Aws_CIS_130_42 from './aws-cis-1.3.0-4.2' import Aws_CIS_130_43 from './aws-cis-1.3.0-4.3' @@ -22,19 +49,48 @@ import Aws_CIS_130_412 from './aws-cis-1.3.0-4.12' import Aws_CIS_130_413 from './aws-cis-1.3.0-4.13' import Aws_CIS_130_414 from './aws-cis-1.3.0-4.14' import Aws_CIS_130_415 from './aws-cis-1.3.0-4.15' +import Aws_CIS_130_51 from './aws-cis-1.3.0-5.1' +import Aws_CIS_130_52 from './aws-cis-1.3.0-5.2' +import Aws_CIS_130_53 from './aws-cis-1.3.0-5.3' import Aws_CIS_130_54 from './aws-cis-1.3.0-5.4' - export default [ Aws_CIS_130_11, Aws_CIS_130_12, Aws_CIS_130_13, + Aws_CIS_130_14, + Aws_CIS_130_15, + Aws_CIS_130_16, + Aws_CIS_130_17, + Aws_CIS_130_18, + Aws_CIS_130_19, + Aws_CIS_130_110, Aws_CIS_130_111, + Aws_CIS_130_112, + Aws_CIS_130_113, + Aws_CIS_130_114, + Aws_CIS_130_115, + Aws_CIS_130_116, + Aws_CIS_130_117, Aws_CIS_130_118, + Aws_CIS_130_119, + Aws_CIS_130_120, + Aws_CIS_130_121, Aws_CIS_130_122, Aws_CIS_130_211, Aws_CIS_130_212, Aws_CIS_130_221, + Aws_CIS_130_31, + Aws_CIS_130_32, + Aws_CIS_130_33, + Aws_CIS_130_34, + Aws_CIS_130_35, + Aws_CIS_130_36, + Aws_CIS_130_37, + Aws_CIS_130_38, + Aws_CIS_130_39, + Aws_CIS_130_310, + Aws_CIS_130_311, Aws_CIS_130_41, Aws_CIS_130_42, Aws_CIS_130_43, @@ -50,5 +106,8 @@ export default [ Aws_CIS_130_413, Aws_CIS_130_414, Aws_CIS_130_415, + Aws_CIS_130_51, + Aws_CIS_130_52, + Aws_CIS_130_53, Aws_CIS_130_54, ] diff --git a/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-1.x.test.ts b/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-1.x.test.ts new file mode 100644 index 00000000..e90bc42e --- /dev/null +++ b/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-1.x.test.ts @@ -0,0 +1,1054 @@ +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' +import cuid from 'cuid' + +import Aws_CIS_130_14 from '../rules/aws-cis-1.3.0-1.4' +import Aws_CIS_130_15 from '../rules/aws-cis-1.3.0-1.5' +import Aws_CIS_130_16 from '../rules/aws-cis-1.3.0-1.6' +import Aws_CIS_130_17 from '../rules/aws-cis-1.3.0-1.7' +import Aws_CIS_130_18 from '../rules/aws-cis-1.3.0-1.8' +import Aws_CIS_130_19 from '../rules/aws-cis-1.3.0-1.9' +import Aws_CIS_130_110 from '../rules/aws-cis-1.3.0-1.10' +import Aws_CIS_130_112 from '../rules/aws-cis-1.3.0-1.12' +import Aws_CIS_130_113 from '../rules/aws-cis-1.3.0-1.13' +import Aws_CIS_130_114 from '../rules/aws-cis-1.3.0-1.14' +import Aws_CIS_130_115 from '../rules/aws-cis-1.3.0-1.15' +import Aws_CIS_130_116 from '../rules/aws-cis-1.3.0-1.16' +import Aws_CIS_130_117 from '../rules/aws-cis-1.3.0-1.17' +import Aws_CIS_130_119 from '../rules/aws-cis-1.3.0-1.19' +import Aws_CIS_130_120 from '../rules/aws-cis-1.3.0-1.20' +import Aws_CIS_130_121 from '../rules/aws-cis-1.3.0-1.21' + +export interface VirtualMfaDevice { + serialNumber: string +} + +export interface AccessKeyData { + lastUsedDate?: string + status?: string + lastRotated?: string +} + +export interface IamAttachedPolicy { + arn?: string + name?: string +} + +export interface Statement { + effect?: string + action?: string[] + resource?: string[] +} + +export interface AssumeRolePolicy { + statement: Statement[] +} + +export interface PolicyContent { + statement: Statement[] +} +export interface QueryawsIamPolicy { + id: string + policyContent: PolicyContent +} + +export interface QueryawsIamUser { + id: string + name?: string + accessKeysActive?: boolean + mfaActive?: boolean + accountId?:string + virtualMfaDevices?: VirtualMfaDevice[] + passwordEnabled?: boolean + passwordLastUsed?: string + accessKeyData?: AccessKeyData[] + iamAttachedPolicies?: IamAttachedPolicy[] + inlinePolicies?: string[] + +} + +export interface QueryawsIamPasswordPolicy { + id: string + minimumPasswordLength?: number + requireNumbers?: boolean + passwordReusePrevention?: number + requireLowercaseCharacters?: boolean + requireSymbols?: boolean + requireUppercaseCharacters?: boolean + expirePasswords?: boolean + maxPasswordAge?: number +} + +export interface iamRole { + arn: string +} + +export interface iamGroup { + arn: string +} + +export interface IamUser { + arn: string +} + +export interface IamPolicy { + name: string + iamUsers: IamUser[] + iamGroups: iamGroup[] + iamRoles: iamRole[] +} + +export interface IamAccessAnalyzer { + status: string + region: string +} + +export interface QueryawsIamServerCertificate { + id: string + expiration: string +} + +export interface QueryawsAccount { + id: string + regions?: string[] + iamPolicies?: IamPolicy[] + iamAccessAnalyzers?: IamAccessAnalyzer[] +} + +export interface QueryawsS3 { + id: string + versioning?: string + mfa?: string + blockPublicAcls?: string + ignorePublicAcls?: string + blockPublicPolicy?: string + restrictPublicBuckets?: string +} + +export interface CIS1xQueryResponse { + queryawsIamUser?: QueryawsIamUser[] + queryawsIamPasswordPolicy?: QueryawsIamPasswordPolicy[] + queryawsIamPolicy?: QueryawsIamPolicy[] + queryawsAccount?: QueryawsAccount[] + queryawsIamServerCertificate?: QueryawsIamServerCertificate[] + queryawsS3?: QueryawsS3[] +} + +describe('CIS Amazon Web Services Foundations: 1.4.0', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ + providerName: 'aws', + entityName: 'CIS', + }) + }) + + describe('AWS CIS 1.4 Ensure no root user account access key exists', () => { + const getTestRuleFixture = ( + accessKeysActive: boolean + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + accessKeysActive, + }, + ] + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_14 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a root account does not have any access key active', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(false) + await testRule(data, Result.PASS) + }) + + test('Security Issue when a root account has at least one access key active', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(true) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.5 Ensure MFA is enabled for the root user account', () => { + const getTestRuleFixture = ( + mfaActive: boolean + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + name: 'root', + mfaActive, + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_15 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a root account has a mfa device active', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(true) + await testRule(data, Result.PASS) + }) + + test('Security Issue when a root account has not a mfa device active', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(false) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.6 Ensure hardware MFA is enabled for the root user account', () => { + const getTestRuleFixture = ( + mfaActive: boolean + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + name: 'root', + mfaActive, + accountId: '123456', + virtualMfaDevices: [ + { + serialNumber: 'arn:aws:iam::123456:mfa/some-account-mfa-device', + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_16 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a root account has a mfa hardware device active', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(true) + await testRule(data, Result.PASS) + }) + + test('Security Issue when a root account has a mfa hardware device deactivate', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(false) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.6 Eliminate use of the root user for administrative and daily tasks', () => { + const getTestRuleFixture = ( + mfaActive: boolean + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + name: 'root', + mfaActive, + accountId: '123456', + virtualMfaDevices: [ + { + serialNumber: 'arn:aws:iam::123456:mfa/some-account-mfa-device', + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_16 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a root account has a mfa hardware device active', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(true) + await testRule(data, Result.PASS) + }) + + test('Security Issue when a root account has a mfa hardware device deactivate', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(false) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.7 Eliminate use of the root user for administrative and daily tasks', () => { + const getTestRuleFixture = ( + passwordLastUsed: string, + status: string, + lastUsedDate: string + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + passwordEnabled: true, + passwordLastUsed, + accessKeysActive: true, + accessKeyData: [ + { + status, + lastUsedDate, + }, + { + status: 'Active', + lastUsedDate: '2022-01-01T17:20:19.000Z', + } + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_17 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when when a root account does not uses his password in the last 90 days', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('2021-04-08T17:20:19.000Z', 'Active', '2021-10-08T17:20:19.000Z') + await testRule(data, Result.PASS) + }) + + test('No Security Issue when when a root account does not uses his password in the last 90 days and not have access Keys', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('2021-04-08T17:20:19.000Z', '', '') + const user = data.queryawsIamUser?.[0] as QueryawsIamUser + user.accessKeyData = [] + await testRule(data, Result.PASS) + }) + + test('Security Issue when a root account uses his password in the last 90 days', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(new Date().toISOString(), 'Active', '2021-10-08T17:20:19.000Z') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when a root account uses his access Keys in the last 90 days', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('2021-04-08T17:20:19.000Z', 'Active', new Date().toISOString()) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.8 Ensure IAM password policy requires minimum length of 14 or greater', () => { + const getTestRuleFixture = ( + minimumPasswordLength: number + ): CIS1xQueryResponse => { + return { + queryawsIamPasswordPolicy: [ + { + id: cuid(), + minimumPasswordLength, + }, + ] + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_18 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when password policy minimum length is greater than or equal to 14', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(14) + await testRule(data, Result.PASS) + }) + + test('Security Issue when password policy minimum length is less than 14', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(13) + await testRule(data, Result.FAIL) + }) + + }) + + describe('AWS CIS 1.9 Ensure IAM password policy prevents password reuse', () => { + const getTestRuleFixture = ( + passwordReusePrevention: number + ): CIS1xQueryResponse => { + return { + queryawsIamPasswordPolicy: [ + { + id: cuid(), + passwordReusePrevention, + }, + ] + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_19 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue if the number of previous passwords is more than 24', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(25) + await testRule(data, Result.PASS) + }) + + test('Security Issue if the number of previous passwords is less than 24', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(23) + await testRule(data, Result.FAIL) + }) + + }) + + describe('AWS CIS 1.10 Ensure multi-factor authentication (MFA) is enabled for all IAM users that have a console password', () => { + const getTestRuleFixture = ( + passwordEnabled: boolean, + mfaActive: boolean + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + passwordEnabled, + mfaActive, + }, + ] + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_110 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a user has no active password', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(false, true) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when a user has an active password with an mfa device register', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(true, true) + await testRule(data, Result.PASS) + }) + + test('Security Issue when a user has an active password without an mfa device register', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(true, false) + await testRule(data, Result.FAIL) + }) + + }) + + describe('AWS CIS 1.12 Ensure credentials unused for 90 days or greater are disabled', () => { + const getTestRuleFixture = ( + passwordLastUsed: string, + lastUsedDate: string + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + passwordLastUsed, + accessKeyData: [ + { + lastUsedDate, + } + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_112 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are an access key and password used for less than 90 days', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(new Date().toISOString(), new Date().toISOString()) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when no password last used AND no access key data', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('', '') + const queryawsIamUser = data.queryawsIamUser?.[0] as QueryawsIamUser + queryawsIamUser.accessKeyData = [] + await testRule(data, Result.PASS) + }) + + test('Security Issue when there are an access key unused for more than 90 days', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(new Date().toISOString(), '2021-05-27T20:29:00.000Z') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when there are a passwoord unused for more than 90 days', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('2021-05-27T20:29:00.000Z', new Date().toISOString()) + const queryawsIamUser = data.queryawsIamUser?.[0] as QueryawsIamUser + queryawsIamUser.accessKeyData = [] + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.13 Ensure there is only one active access key available for any single IAM user', () => { + const getTestRuleFixture = ( + status: string + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + accessKeyData: [ + { + status, + }, + { + status: 'Active', + } + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_113 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is only one active access key available for any single IAM user', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('Inactive') + await testRule(data, Result.PASS) + }) + + test('Security Issue when there are more than one active access key available for any single IAM user', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('Active') + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.14 Ensure access keys are rotated every 90 days or less', () => { + const getTestRuleFixture = ( + status: string, + lastRotated: string + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + accessKeyData: [ + { + status, + lastRotated, + }, + { + status: 'Active', + lastRotated: '2021-08-27T15:00:44.000Z', + } + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_114 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when users have an active access key created for less than 90 days', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('Active', new Date().toISOString()) + await testRule(data, Result.PASS) + }) + + test('Security Issue when users have an active access key created for more than 90 days', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('Active', '2021-09-23T15:56:01.000Z') + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.15 Ensure IAM Users Receive Permissions Only Through Groups', () => { + const getTestRuleFixture = ( + iamAttachedPolicies: IamAttachedPolicy[], + inlinePolicies: string[] + ): CIS1xQueryResponse => { + return { + queryawsIamUser: [ + { + id: cuid(), + iamAttachedPolicies, + inlinePolicies, + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_115 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when users does not have attached policies directly', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture([],[]) + await testRule(data, Result.PASS) + }) + + test('Security Issue when users have attached policies directly', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture([{ arn: cuid() }], ['inline_test']) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.16 Ensure IAM policies that allow full "*:*" administrative privileges are not attached', () => { + const getTestRuleFixture = ( + effect: string, + action: string[], + resource: string[] + ): CIS1xQueryResponse => { + return { + queryawsIamPolicy: [ + { + id: cuid(), + policyContent: { + statement: [ + { + effect, + action, + resource + } + ] + } + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_116 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when IAM policies not allow full "*:*" administrative privileges', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('Allow', [ + 'secretsmanager:DeleteSecret', + 'secretsmanager:GetSecretValue', + 'secretsmanager:UpdateSecret', + ], ['arn:aws:secretsmanager:*:*:secret:A4B*']) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when IAM policies that have a statement with "Effect": "Allow" with "Action": "*" over restricted "Resource"', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('Allow', ['*'], ['arn:aws:secretsmanager:*:*:secret:A4B*']) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when IAM policies that have a statement with "Effect": "Allow" with restricted "Action" over "Resource": "*"', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('Allow', [ + 'secretsmanager:DeleteSecret', + 'secretsmanager:GetSecretValue', + 'secretsmanager:UpdateSecret', + ], ['*']) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when IAM policies that allow full "*:*" administrative privileges', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('Allow', ['*'], ['*']) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.17 Ensure a support role has been created to manage incidents with AWS Support', () => { + const getTestRuleFixture = ( + name: string, + iamUsers: IamUser[], + iamGroups: iamGroup[], + iamRoles: iamRole[] + ): CIS1xQueryResponse => { + return { + queryawsAccount: [ + { + id: cuid(), + iamPolicies: [ + { + name, + iamUsers, + iamGroups, + iamRoles + } + ] + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_117 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when AWSSupportAccess is attached to IAM users', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('AWSSupportAccess', [{arn: 'arn:aws:iam::632941798677:user/test'}], [], []) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when AWSSupportAccess is attached to any IAM groups', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('AWSSupportAccess', [], [{arn: 'arn:aws:iam::632941798677:user/test'}], []) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when AWSSupportAccess is attached to any IAM roles', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('AWSSupportAccess', [], [], [{arn: 'arn:aws:iam::632941798677:user/test'}]) + await testRule(data, Result.PASS) + }) + + test('Security Issue when AWSSupportAccess is not attached to any IAM user, group or role', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('AWSSupportAccess', [], [], []) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when AWSSupportAccess does not exists', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('PolicyTest', [], [], [{arn: 'arn:aws:iam::632941798677:user/test'}]) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when there are no IAM policies', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('', [], [], []) + const account = data.queryawsAccount?.[0] as QueryawsAccount + account.iamPolicies = [] + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.19 Ensure that all the expired SSL/TLS certificates stored in AWS IAM are removed', () => { + const getTestRuleFixture = ( + expiration: string, + ): CIS1xQueryResponse => { + return { + queryawsIamServerCertificate: [ + { + id: cuid(), + expiration + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_119 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when thre not are expired SSL/TLS certificates', async () => { + const day = 1000 * 60 * 60 * 24 + const data: CIS1xQueryResponse = getTestRuleFixture(new Date(Date.now() + 30 * day).toISOString()) + await testRule(data, Result.PASS) + }) + + test('Security Issue when thre are expired SSL/TLS certificates', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture(new Date().toISOString()) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.20 Ensure that S3 Buckets are configured with Block public access (bucket settings)', () => { + const getTestRuleFixture = ( + blockPublicAcls: string, + ignorePublicAcls: string, + blockPublicPolicy: string, + restrictPublicBuckets: string + ): CIS1xQueryResponse => { + return { + queryawsS3: [ + { + id: cuid(), + blockPublicAcls, + ignorePublicAcls, + blockPublicPolicy, + restrictPublicBuckets, + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_120 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when S3 Buckets are configured with Block public access', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture( + 'Yes', + 'Yes', + 'Yes', + 'Yes' + ) + await testRule(data, Result.PASS) + }) + + test('Security Issue when S3 Buckets are not configured with Block public access', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture( + 'No', + 'No', + 'No', + 'No' + ) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when S3 Buckets have a Block public access with blockPublicAcls set to No', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture( + 'No', + 'Yes', + 'Yes', + 'Yes' + ) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when S3 Buckets have a Block public access with ignorePublicAcls set to No', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture( + 'Yes', + 'No', + 'Yes', + 'Yes' + ) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when S3 Buckets have a Block public access with blockPublicPolicy set to No', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture( + 'Yes', + 'Yes', + 'No', + 'Yes' + ) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when S3 Buckets have a Block public access with restrictPublicBuckets set to No', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture( + 'Yes', + 'Yes', + 'Yes', + 'No' + ) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 1.21 Ensure that IAM Access analyzer is enabled', () => { + const getTestRuleFixture = ( + statusRegion1: string, + statusRegion2: string, + ): CIS1xQueryResponse => { + return { + queryawsAccount: [ + { + id: cuid(), + regions: ['us-east-1', 'us-east-2'], + iamAccessAnalyzers: [ + { + region: 'us-east-1', + status: statusRegion1, + }, + { + region: 'us-east-2', + status: statusRegion2, + } + ] + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_121 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when at least one analyzer is enabled for all regions', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('ACTIVE','ACTIVE') + await testRule(data, Result.PASS) + }) + + test('Security Issue when there are an analyzer disabled for some region', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('ACTIVE', 'INACTIVE') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when no analyzer enabled for any region', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('INACTIVE', 'INACTIVE') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when when no analyzer is configured', async () => { + const data: CIS1xQueryResponse = getTestRuleFixture('','') + const account = data.queryawsAccount?.[0] as QueryawsAccount + account.iamAccessAnalyzers = [] + await testRule(data, Result.FAIL) + }) + }) +}) \ No newline at end of file diff --git a/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-3.x.test.ts b/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-3.x.test.ts new file mode 100644 index 00000000..d9f55702 --- /dev/null +++ b/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-3.x.test.ts @@ -0,0 +1,741 @@ +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' +import cuid from 'cuid' + +import Aws_CIS_130_31 from '../rules/aws-cis-1.3.0-3.1' +import Aws_CIS_130_32 from '../rules/aws-cis-1.3.0-3.2' +import Aws_CIS_130_33 from '../rules/aws-cis-1.3.0-3.3' +import Aws_CIS_130_34 from '../rules/aws-cis-1.3.0-3.4' +import Aws_CIS_130_35 from '../rules/aws-cis-1.3.0-3.5' +import Aws_CIS_130_36 from '../rules/aws-cis-1.3.0-3.6' +import Aws_CIS_130_37 from '../rules/aws-cis-1.3.0-3.7' +import Aws_CIS_130_38 from '../rules/aws-cis-1.3.0-3.8' +import Aws_CIS_130_39 from '../rules/aws-cis-1.3.0-3.9' +import Aws_CIS_130_310 from '../rules/aws-cis-1.3.0-3.10' +import Aws_CIS_130_311 from '../rules/aws-cis-1.3.0-3.11' + +export interface DataResource { + type: string +} + +export interface EventSelector { + readWriteType?: string + includeManagementEvents?: boolean + dataResources?: DataResource[] +} + +export interface Status { + isLogging?: boolean + latestCloudWatchLogsDeliveryTime?: string | null + recording?: boolean + lastStatus?: string +} + +export interface Cloudtrail { + isMultiRegionTrail?: string + status?: Status + eventSelectors?: EventSelector[] +} + +export interface Principal { + key?: string + value?: string[] +} + +export interface Statement { + effect?: string + principal?: Principal[] +} + +export interface Policy { + statement?: Statement[] +} + +export interface AclGrant { + granteeUri: string | undefined +} + +export interface S3 { + policy?: Policy + logging?: string + aclGrants?: AclGrant[] +} + +export interface RecordingGroup { + allSupported?: boolean + includeGlobalResourceTypes?: boolean +} + +export interface ConfigurationRecorder { + status?: Status + recordingGroup?: RecordingGroup +} + +export interface FlowLog { + resourceId?: string +} + +export interface QueryawsAccount { + id: string + cloudtrail?: Cloudtrail[] + configurationRecorders?: ConfigurationRecorder[] +} + +export interface QueryawsCloudtrail { + id: string + logFileValidationEnabled?: string + cloudWatchLogsLogGroupArn?: string | null + s3?: S3[] + status?: Status + kmsKeyId?: string | null +} + +export interface QueryawsKms { + id: string + keyManager: string + keyRotationEnabled: boolean +} + +export interface QueryawsVpc { + id: string + flowLog: FlowLog[] +} + +export interface CIS3xQueryResponse { + queryawsAccount?: QueryawsAccount[] + queryawsCloudtrail?: QueryawsCloudtrail[] + queryawsKms?: QueryawsKms[] + queryawsVpc?: QueryawsVpc[] +} + +describe('CIS Amazon Web Services Foundations: 1.3.0', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ + providerName: 'aws', + entityName: 'CIS', + }) + }) + + describe('AWS CIS 3.1 Ensure CloudTrail is enabled in all regions', () => { + const getTestRuleFixture = ( + isMultiRegionTrail: string, + isLogging: boolean, + readWriteType: string, + includeManagementEvents: boolean + ): CIS3xQueryResponse => { + return { + queryawsAccount: [ + { + id: cuid(), + cloudtrail: [ + { + isMultiRegionTrail, + status: { + isLogging, + }, + eventSelectors: [ + { + readWriteType, + includeManagementEvents, + }, + ], + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_31 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a trail has set IsMultiRegionTrail and isLogging as true with at least one Event Selector with IncludeManagementEvents set to true and ReadWriteType set to All', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture( + 'Yes', + true, + 'All', + true + ) + await testRule(data, Result.PASS) + }) + + test('Security Issue when a trail has set IsMultiRegionTrail is set to false', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture( + 'No', + true, + 'All', + true + ) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when a trail has set isLogging is set to false', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture( + 'Yes', + false, + 'All', + true + ) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when a trail has set multi region as true with all read-write type and include management events false', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture( + 'Yes', + true, + 'All', + false + ) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when there not are any trail', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('', true, '', true) + const account = data.queryawsAccount?.[0] as QueryawsAccount + account.cloudtrail = [] + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.2 Ensure CloudTrail log file validation is enabled', () => { + const getTestRuleFixture = ( + logFileValidationEnabled: string + ): CIS3xQueryResponse => { + return { + queryawsCloudtrail: [ + { + id: cuid(), + logFileValidationEnabled, + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_32 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a trail has log file validation enabled', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Yes') + await testRule(data, Result.PASS) + }) + test('Security Issue when a trail has log file validation disabled', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('No') + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.3 Ensure the S3 bucket used to store CloudTrail logs is not publicly accessible', () => { + const getTestRuleFixture = ( + effect: string, + key: string, + value: string[], + granteeUri?: string | undefined, + ): CIS3xQueryResponse => { + return { + queryawsCloudtrail: [ + { + id: cuid(), + s3: [ + { + aclGrants: [ + { + granteeUri + } + ], + policy: { + statement: [ + { + effect, + principal: [ + { + key, + value, + }, + ], + }, + ], + }, + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_33 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a policy contains a statement having an Effect set to Allow and a Principal not set to "*" or {"AWS" : "*"}', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Allow', 'Service', [ + 'cloudtrail.amazonaws.com', + ]) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when no exists any ACL Grantee set to Everyone or Any Authenticated User.', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Allow', 'Service', [ + 'cloudtrail.amazonaws.com', + ], 'http://acs.amazonaws.com/groups/s3/LogDelivery') + await testRule(data, Result.PASS) + }) + + test('Security Issue when a policy contains a statement having an Effect set to Allow and a Principal set to "*"', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Allow', '', ['*']) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when a policy contains a statement having an Effect set to Allow and a Principal set to {"AWS" : "*"}', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Allow', 'AWS', ['*']) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when exists an ACL Grantee set to Everyone.', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Allow', 'Service', [ + 'cloudtrail.amazonaws.com', + ], 'http://acs.amazonaws.com/groups/global/AllUsers') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when exists an ACL Grantee set to Any Authenticated User.', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Allow', 'Service', [ + 'cloudtrail.amazonaws.com', + ], 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers') + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.4 Ensure CloudTrail trails are integrated with CloudWatch Logs', () => { + const getTestRuleFixture = ( + cloudWatchLogsLogGroupArn: string | null, + latestCloudWatchLogsDeliveryTime: string | null + ): CIS3xQueryResponse => { + return { + queryawsCloudtrail: [ + { + id: cuid(), + cloudWatchLogsLogGroupArn, + status: { + latestCloudWatchLogsDeliveryTime, + }, + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_34 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a trail has cloudwatch logs integrated with a delivery date no more than a day', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(cuid(), new Date().toISOString()) + await testRule(data, Result.PASS) + }) + + test('Security Issue when a trail has cloudwatch logs integrated with a delivery date more than a day', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(cuid(), '2021-11-20T16:18:21.724Z') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when a trail does not have cloudwatch logs integrated', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(null, null) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.5 Ensure AWS Config is enabled in all regions', () => { + const getTestRuleFixture = ( + allSupported: boolean, + includeGlobalResourceTypes: boolean, + recording: boolean, + lastStatus: string + ): CIS3xQueryResponse => { + return { + queryawsAccount: [ + { + id: cuid(), + configurationRecorders: [ + { + recordingGroup: { + allSupported, + includeGlobalResourceTypes, + }, + status: { + recording, + lastStatus, + }, + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_35 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a configuration recorder is enabled in all regions', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(true, true, true, 'SUCCESS') + await testRule(data, Result.PASS) + }) + + test('Security Issue when a configuration recorder has recordingGroup object includes "allSupported": false', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(false, true, true, 'SUCCESS') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when a configuration recorder has recordingGroup object includes "includeGlobalResourceTypes": false', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(true, false, true, 'SUCCESS') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when a configuration recorder has status object includes "lastStatus" not "SUCCESS"', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(true, true, true, 'FAILED') + await testRule(data, Result.FAIL) + }) + + test('Security Issue when there not are any configurationRecorder', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(true, true, true, 'SUCCESS') + const account = data.queryawsAccount?.[0] as QueryawsAccount + account.configurationRecorders = [] + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.6 Ensure S3 bucket access logging is enabled on the CloudTrail S3 bucket', () => { + const getTestRuleFixture = ( + logging: string + ): CIS3xQueryResponse => { + return { + queryawsCloudtrail: [ + { + id: cuid(), + s3: [ + { + logging, + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_36 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when a trails bucket has access logging enabled', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Enabled') + await testRule(data, Result.PASS) + }) + + test('Security Issue when a trails bucket has access logging disabled', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('Disabled') + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.7 Ensure CloudTrail logs are encrypted at rest using KMS CMKs', () => { + const getTestRuleFixture = ( + kmsKeyId: string | null + ): CIS3xQueryResponse => { + return { + queryawsCloudtrail: [ + { + id: cuid(), + kmsKeyId, + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_37 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when cloudtrail logs are encrypted using a KMS key', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(cuid()) + await testRule(data, Result.PASS) + }) + + test('Security Issue when cloudtrail logs are not encrypted', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(null) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.8 Ensure rotation for customer created CMKs is enabled', () => { + const getTestRuleFixture = ( + keyManager: string, + keyRotationEnabled: boolean + ): CIS3xQueryResponse => { + return { + queryawsKms: [ + { + id: cuid(), + keyManager, + keyRotationEnabled + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_38 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when rotation is enabled with AWS as a manager', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('AWS', true) + await testRule(data, Result.PASS) + }) + + test('Security Issue when rotation is disabled with customer as a manager', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('CUSTOMER', false) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when rotation is disabled with AWS as a manager', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture('AWS', false) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.9 Ensure VPC flow logging is enabled in all VPCs', () => { + const getTestRuleFixture = ( + flowLog: FlowLog[], + ): CIS3xQueryResponse => { + return { + queryawsVpc: [ + { + id: cuid(), + flowLog + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_39 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when flow logging is enabled for each VPC', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture([{resourceId: cuid()}]) + await testRule(data, Result.PASS) + }) + + test('Security Issue when flow logging is disabled on one VPC', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture([]) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.10 Ensure that Object-level logging for write events is enabled for S3 bucket', () => { + const getTestRuleFixture = ( + includeManagementEvents: boolean, + readWriteType: string, + dataResources: DataResource[] + ): CIS3xQueryResponse => { + return { + queryawsAccount: [ + { + id: cuid(), + cloudtrail: [ + { + eventSelectors: [ + { + includeManagementEvents, + readWriteType, + dataResources, + }, + ], + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_310 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when S3 bucket object-level logging for write events is enabled', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(true, 'WriteOnly', [ + { type: 'AWS::S3::Object' }, + ]) + await testRule(data, Result.PASS) + }) + + test('Security Issue when S3 bucket object-level logging for write events is not enabled', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(true, 'WriteOnly', []) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS CIS 3.11 Ensure that Object-level logging for read events is enabled for S3 bucket', () => { + const getTestRuleFixture = ( + includeManagementEvents: boolean, + readWriteType: string, + dataResources: DataResource[] + ): CIS3xQueryResponse => { + return { + queryawsAccount: [ + { + id: cuid(), + cloudtrail: [ + { + eventSelectors: [ + { + includeManagementEvents, + readWriteType, + dataResources, + }, + ], + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: CIS3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_311 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when S3 bucket object-level logging for read events is enabled', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(true, 'ReadOnly', [ + { type: 'AWS::S3::Object' }, + ]) + await testRule(data, Result.PASS) + }) + + test('Security Issue when S3 bucket object-level logging for read events is not enabled', async () => { + const data: CIS3xQueryResponse = getTestRuleFixture(true, 'ReadOnly', []) + await testRule(data, Result.FAIL) + }) + }) +}) diff --git a/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-5.x.test.ts b/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-5.x.test.ts new file mode 100644 index 00000000..d28106b2 --- /dev/null +++ b/src/aws/cis-1.3.0/tests/aws-cis-1.3.0-5.x.test.ts @@ -0,0 +1,427 @@ +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' +import cuid from 'cuid' + +import Aws_CIS_130_51 from '../rules/aws-cis-1.3.0-5.1' +import Aws_CIS_130_52 from '../rules/aws-cis-1.3.0-5.2' +import Aws_CIS_130_53 from '../rules/aws-cis-1.3.0-5.3' + +const ipV4WildcardAddress = '0.0.0.0/0' +const ipV6WildcardAddress = '::/0' + +export interface InboundRule { + source?: string + toPort?: number | null + fromPort?: number | null + protocol?: string + allowOrDeny?: string +} + +export interface OutboundRule { + destination?: string + toPort?: number | null + fromPort?: number | null + protocol?: string +} + +export interface QueryawsSecurityGroup { + id: string + inboundRules?: InboundRule[] + outboundRules?: OutboundRule[] +} + +export interface QueryawsNetworkAcl { + id: string + inboundRules?: InboundRule[] + outboundRules?: OutboundRule[] +} + +export interface QueryResponse { + queryawsNetworkAcl?: QueryawsNetworkAcl[] + queryawsSecurityGroup: QueryawsSecurityGroup[] +} + +describe('CIS Amazon Web Services Foundations: 1.3.0', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ + providerName: 'aws', + entityName: 'CIS', + }) + }) + + describe('AWS CIS 5.1 Ensure no Network ACLs allow ingress from 0.0.0.0/0 to remote server administration ports', () => { + const test51Rule = async ( + fromPort: number | undefined, + toPort: number | undefined, + sourceAddress: string, + expectedResult: Result, + includeRandomValidData = false + ): Promise => { + // Arrange + const validInboundRule = { + toPort: 123, + fromPort: 456, + source: '10.10.10.10/16', + allowOrDeny: 'allow' + } + + const data: QueryResponse = { + queryawsSecurityGroup: [], + queryawsNetworkAcl: [ + { + id: cuid(), + inboundRules: [ + { + toPort, + fromPort, + source: sourceAddress, + allowOrDeny: 'allow' + }, + ], + }, + ], + } + + if (includeRandomValidData) { + data.queryawsNetworkAcl?.[0].inboundRules?.push(validInboundRule) + data.queryawsNetworkAcl?.push({ + id: cuid(), + inboundRules: [validInboundRule, validInboundRule], + }) + } + + // Act + const [processedRule] = await rulesEngine.processRule(Aws_CIS_130_51 as Rule, { ...data }) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 22', async () => { + await test51Rule(22, 22, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 3389', async () => { + await test51Rule(3389, 3389, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wildcard address and port 80', async () => { + await test51Rule(80, 80, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wildcard address and port 80', async () => { + await test51Rule(80, 80, ipV6WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random IPv4 and a port range not including the port 22', async () => { + await test51Rule( + 100, + 200, + '10.10.10.10/16', + Result.PASS + ) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wildcard address and a port range not including the port 3389', async () => { + await test51Rule( + 1000, + 2000, + ipV4WildcardAddress, + Result.PASS + ) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wildcard address and a port range not including the port 22', async () => { + await test51Rule( + 100, + 200, + ipV6WildcardAddress, + Result.PASS + ) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wildcard address and a port range not including the port 3389 (multiple values)', async () => { + await test51Rule( + 1000, + 2000, + ipV6WildcardAddress, + Result.PASS, + true + ) + }) + + test('Security Issue when IPv4 wildcard address and port 22', async () => { + await test51Rule(22, 22, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv6 wildcard address and port 3389', async () => { + await test51Rule(3389, 3389, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv4 wildcard address and port 22 (multiple values)', async () => { + await test51Rule( + 22, + 22, + ipV4WildcardAddress, + Result.FAIL, + true + ) + }) + + test('Security Issue when there is an inbound rule with IPv4 wildcard address and no port range is specified', async () => { + await test51Rule( + undefined, + undefined, + ipV4WildcardAddress, + Result.FAIL + ) + }) + + test('Security Issue when there is an inbound rule with IPv6 wildcard address and no port range is specified', async () => { + await test51Rule( + undefined, + undefined, + ipV6WildcardAddress, + Result.FAIL + ) + }) + + test('Security Issue when there is an inbound rule with IPv4 wildcard address and port range includes the port 22', async () => { + await test51Rule(0, 100, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wildcard address and port range includes the port 22', async () => { + await test51Rule(0, 100, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv4 wildcard address and port range includes the port 3389', async () => { + await test51Rule(3000, 4000, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wildcard address and port range includes the port 3389', async () => { + await test51Rule(3000, 4000, ipV6WildcardAddress, Result.FAIL) + }) + }) + + describe('AWS CIS 5.2 Ensure no security groups allow ingress from 0.0.0.0/0 to remote server administration ports', () => { + const testRule = async ( + fromPort: number | null , + toPort: number | null, + sourceAddress: string, + expectedResult: Result, + includeRandomValidData = false + ): Promise => { + // Arrange + const validInboundRule = { + toPort: 123, + fromPort: 456, + source: '10.10.10.10/16', + } + + const data: QueryResponse = { + queryawsSecurityGroup: [ + { + id: cuid(), + inboundRules: [ + { + toPort, + fromPort, + source: sourceAddress, + }, + ], + }, + ], + } + + if (includeRandomValidData) { + data.queryawsSecurityGroup?.[0].inboundRules?.push(validInboundRule) + data.queryawsSecurityGroup?.push({ + id: cuid(), + inboundRules: [validInboundRule, validInboundRule], + }) + } + + // Act + const [processedRule] = await rulesEngine.processRule(Aws_CIS_130_52 as Rule, { ...data }) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 22', async () => { + await testRule(22, 22, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 3389', async () => { + await testRule(3389, 3389, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wildcard address and port 80', async () => { + await testRule(80, 80, ipV6WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random IPv4 and a port range not including the port 22', async () => { + await testRule( + 100, + 200, + '10.10.10.10/16', + Result.PASS + ) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wildcard address and a port range not including the port 3389', async () => { + await testRule( + 1000, + 2000, + ipV4WildcardAddress, + Result.PASS + ) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wildcard address and a port range not including the port 22', async () => { + await testRule( + 100, + 200, + ipV6WildcardAddress, + Result.PASS + ) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wildcard address and a port range not including the port 3389 (multiple values)', async () => { + await testRule( + 1000, + 2000, + ipV6WildcardAddress, + Result.PASS, + true + ) + }) + + test('Security Issue when IPv4 wildcard address and port 22', async () => { + await testRule(22, 22, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv6 wildcard address and port 3389', async () => { + await testRule(3389, 3389, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv4 wildcard address and port 22 (multiple values)', async () => { + await testRule( + 22, + 22, + ipV4WildcardAddress, + Result.FAIL, + true + ) + }) + + test('Security Issue when IPv4 wildcard address and port 3389 (multiple values)', async () => { + await testRule( + 3389, + 3389, + ipV4WildcardAddress, + Result.FAIL, + true + ) + }) + + test('Security Issue when there is an inbound rule with IPv4 wildcard address and no port range is specified', async () => { + await testRule( + null, + null, + ipV4WildcardAddress, + Result.FAIL + ) + }) + + test('Security Issue when there is an inbound rule with IPv6 wildcard address and no port range is specified', async () => { + await testRule( + null, + null, + ipV6WildcardAddress, + Result.FAIL + ) + }) + + test('Security Issue when there is an inbound rule with IPv4 wildcard address and port range includes the port 22', async () => { + await testRule(0, 100, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wildcard address and port range includes the port 3389', async () => { + await testRule(3000, 4000, ipV6WildcardAddress, Result.FAIL) + }) + }) + + describe('AWS CIS 5.3 Ensure the default security group of every VPC restricts all traffic', () => { + const test53Rule = async ( + ingressSource: string, + egressDestination: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: QueryResponse = { + queryawsSecurityGroup: [ + { + id: cuid(), + inboundRules: [], + outboundRules: [], + }, + ], + } + if (ingressSource) { + data.queryawsSecurityGroup[0].inboundRules?.push({ + source: ingressSource as string, + }) + } + if (egressDestination) { + data.queryawsSecurityGroup[0].outboundRules?.push({ + destination: egressDestination as string, + }) + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_CIS_130_53 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is not an inbound/outbound rules with the wildcard addresses', async () => { + await test53Rule( + '10.10.10.10/16', + '2001:db8:3333:4444:5555:6666:7777:8888', + Result.PASS + ) + }) + + test('Security Issue when there is an inbound rule with a IPv4 wilcard address', async () => { + await test53Rule(ipV4WildcardAddress, '', Result.FAIL) + }) + test('Security Issue when there is an inbound rule with a IPv6 wilcard address', async () => { + await test53Rule(ipV6WildcardAddress, '', Result.FAIL) + }) + test('Security Issue when there is an outbound rule with a IPv4 wilcard address', async () => { + await test53Rule('', ipV4WildcardAddress, Result.FAIL) + }) + test('Security Issue when there is an outbound rule with a IPv6 wilcard address', async () => { + await test53Rule('', ipV6WildcardAddress, Result.FAIL) + }) + test('Security Issue when there is an inbound and an outbound rule with a IPv4 wilcard address', async () => { + await test53Rule( + ipV4WildcardAddress, + ipV4WildcardAddress, + Result.FAIL + ) + }) + test('Security Issue when there is an inbound and an outbound rule with a IPv6 wilcard address', async () => { + await test53Rule( + ipV6WildcardAddress, + ipV6WildcardAddress, + Result.FAIL + ) + }) + }) +}) diff --git a/src/aws/cis-1.4.0/.releaserc.yml b/src/aws/cis-1.4.0/.releaserc.yml index 70788a7a..cba7db8c 100644 --- a/src/aws/cis-1.4.0/.releaserc.yml +++ b/src/aws/cis-1.4.0/.releaserc.yml @@ -1,10 +1,13 @@ --- branches: - - name: main - - name: beta - prerelease: true - name: alpha + channel: alpha prerelease: true + - name: beta + channel: beta + prerelease: true + - name: main + plugins: - "@semantic-release/commit-analyzer" - "@semantic-release/release-notes-generator" @@ -12,24 +15,23 @@ plugins: - changelogFile: CHANGELOG.md - - "@semantic-release/git" - assets: - - CHANGELOG.md - - package.json - - - "@semantic-release/npm" - - npmPublish: false - - "@semantic-release/gitlab" + - CHANGELOG.md + - package.json + - - "@semrel-extra/npm" + - npmPublish: true + - "@semantic-release/github" verifyConditions: - "@semantic-release/changelog" - - "@semantic-release/gitlab" + - "@semantic-release/github" + - "@semrel-extra/npm" prepare: - "@semantic-release/changelog" - - "@semantic-release/npm" + - "@semrel-extra/npm" - - "@semantic-release/git" - - message: "chore(publish): ${nextRelease.version} \n\n${nextRelease.notes}" + - message: "chore(release): ${nextRelease.version} \n\n${nextRelease.notes}" publish: - - "@semantic-release/gitlab" -release: - noCi: true + - "@semantic-release/github" + - "@semrel-extra/npm" success: false fail: false -repositoryUrl: https://gitlab.com/auto-cloud/cloudgraph/policy-packs.git tagFormat: "${version}" diff --git a/src/aws/cis-1.4.0/README.md b/src/aws/cis-1.4.0/README.md index 19e46846..c3c36214 100644 --- a/src/aws/cis-1.4.0/README.md +++ b/src/aws/cis-1.4.0/README.md @@ -53,6 +53,8 @@ Policy Pack based on the [AWS Foundations 1.4.0](https://docs.aws.amazon.com/aud } ``` +## Available Ruleset + | Rule | Description | | ------------- | --------------------------------------------------------------------------------------------------------------------------- | | AWS CIS 1.1 | Maintain current contact details | diff --git a/src/aws/cis-1.4.0/rules/aws-cis-1.4.0-1.1.ts b/src/aws/cis-1.4.0/rules/aws-cis-1.4.0-1.1.ts index 3ffa67b5..fa86b663 100644 --- a/src/aws/cis-1.4.0/rules/aws-cis-1.4.0-1.1.ts +++ b/src/aws/cis-1.4.0/rules/aws-cis-1.4.0-1.1.ts @@ -1,22 +1,22 @@ export default { - id: 'aws-cis-1.4.0-1.', + id: 'aws-cis-1.4.0-1.', title: 'AWS CIS 1.1 Maintain current contact details', - + description: `Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization. - + An AWS account supports a number of contact details, and AWS will use these to contact the account owner if activity judged to be in breach of Acceptable Use Policy or indicative of likely security compromise is observed by the AWS Abuse team. Contact details should not be for a single individual, as circumstances may arise where that individual is unavailable. Email contact details should point to a mail alias which forwards email to multiple individuals within the organization; where feasible, phone contact details should point to a PABX hunt group or other call-forwarding system.`, - + audit: `This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:*Billing ) - + 1. Sign in to the AWS Management Console and open the Billing and Cost Management console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose My Account. 3. On the Account Settings page, review and verify the current details. 4. Under Contact Information, review and verify the current details.`, - - rationale: 'If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question, so it is in both the customers' and AWS' best interests that prompt contact can be established. This is best achieved by setting AWS account contact details to point to resources which have multiple individuals as recipients, such as email aliases and PABX hunt groups.', - + + rationale: 'If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior needs urgent mitigation, proactive measures may be taken, including throttling of traffic between the account exhibiting suspicious behavior and the AWS API endpoints and the Internet. This will result in impaired service to and from the account in question, so it is in both the customers\' and AWS\' best interests that prompt contact can be established. This is best achieved by setting AWS account contact details to point to resources which have multiple individuals as recipients, such as email aliases and PABX hunt groups.', + remediation: `This activity can only be performed via the AWS Console, with a user who has permission to read and write Billing information (aws-portal:*Billing ). - + 1. Sign in to the AWS Management Console and open the Billing and Cost Management console at https://console.aws.amazon.com/billing/home#/. 2. On the navigation bar, choose your account name, and then choose My Account. 3. On the Account Settings page, next to Account Settings, choose Edit. @@ -25,8 +25,8 @@ export default { 6. After you have made your changes, choose Done. 7. To edit your contact information, under Contact Information, choose Edit. 8. For the fields that you want to change, type your updated information, and then choose Update.`, - + references: ['https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/manage-account-payment.html#contact-info'], - + severity: 'high', } diff --git a/src/aws/nist-800-53-rev4/README.md b/src/aws/nist-800-53-rev4/README.md index 6706ae08..a5f3e2b8 100644 --- a/src/aws/nist-800-53-rev4/README.md +++ b/src/aws/nist-800-53-rev4/README.md @@ -166,6 +166,8 @@ Policy Pack based on the [800-53 Rev. 4](https://csrc.nist.gov/publications/deta | AWS NIST 10.8 | IAM password policies should require at least one uppercase character | | AWS NIST 11.1 | ECS task definitions should limit memory usage for containers | | AWS NIST 11.2 | ECS task definitions should set CPU limit for containers | +| AWS NIST 12.1 | CloudFront distributions should have geo-restrictions specified | +| AWS NIST 12.2 | EC2 instances should not have a public IP association (IPv4) | | AWS NIST 13.1 | IAM multi-factor authentication should be enabled for all IAM users that have a console password | | AWS NIST 13.2 | IAM should have hardware MFA enabled for the root account | | AWS NIST 13.3 | IAM should have MFA enabled for the root account | @@ -175,3 +177,9 @@ Policy Pack based on the [800-53 Rev. 4](https://csrc.nist.gov/publications/deta | AWS NIST 15.2 | IAM roles used for trust relationships should have MFA or external IDs | | AWS NIST 15.3 | IAM root user access key should not exist | | AWS NIST 15.4 | IAM root user should not be used | +| AWS NIST 16.1 | API Gateway classic custom domains should use secure TLS protocol versions (1.2 and above) | +| AWS NIST 16.2 | API Gateway v2 custom domains should use secure TLS protocol versions (1.2 and above) | +| AWS NIST 16.3 | CloudFront distribution custom origins should use secure TLS protocol versions (1.2 and above) | +| AWS NIST 16.4 | CloudFront distribution viewer certificate should use secure TLS protocol versions (1.2 and above) | +| AWS NIST 16.5 | ELB HTTPS listeners should use secure TLS protocol versions (1.2 and above) | +| AWS NIST 16.6 | ELBv2 HTTPS listeners should use secure TLS protocol versions (1.2 and above) | \ No newline at end of file diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-11.1.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-11.1.ts index a7975178..996a195d 100644 --- a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-11.1.ts +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-11.1.ts @@ -1,60 +1,65 @@ export default { id: 'aws-nist-800-53-rev4-11.1', - title: 'ECS task definitions should limit memory usage for containers', + title: 'AWS NIST 11.1 ECS task definitions should limit memory usage for containers', - description: `'Limiting memory usage for your ECS tasks allows you to avoid running out of memory because ECS stops placing tasks on the instance, and Docker kills any containers that try to go over the hard limit. Having no limit on memory usage can lead to issues where one container can easily make the whole system unstable and as a result unusable.'`, + description: `Limiting memory usage for your ECS tasks allows you to avoid running out of memory because ECS stops placing tasks on the instance, and Docker kills any containers that try to go over the hard limit. Having no limit on memory usage can lead to issues where one container can easily make the whole system unstable and as a result unusable. + + Memory limits must be set through the *memory* property for each *ContainerDefinition* within the task definition. For more information about the *memory* property, see [ContainerDefinition](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html) in the ECS API Reference.`, audit: '', rationale: '', - remediation: `**AWS Console** - - - Navigate to ECS. + remediation: `**Console Remediation Steps** + + - Navigate to [ECS](https://console.aws.amazon.com/ecs/). - Select the Region that contains your task definition. - In the left pane, select Task Definitions. - Check the task definition and click Create new revision. - On the Create new revision of task definition page, make changes. For example, to change the existing container definitions (such as the container image, memory limits, or port mappings), select the container, make the changes, and then choose Update. - Select Create. - - If your task definition is used in a service, update your service with the updated task definition and deactivate the previous task definition. For more information, see Updating a service. - - **AWS CLI** + - If your task definition is used in a service, update your service with the updated task definition and deactivate the previous task definition. For more information, see [Updating a service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/update-service.html). + + **CLI Remediation Steps** + - Create new task definition revision: - > aws ecs register-task-definition
- > --family \
- > [--task-role-arn \]
- > [--execution-role-arn \]
- > [--network-mode \]
- > --container-definitions \
- > [--volumes \]
- > [--placement-constraints \]
- > [--requires-compatibilities \]
- > [--cpu \]
- > [--memory \]
- > [--tags \]
- > [--pid-mode \]
- > [--ipc-mode \]
- > [--proxy-configuration \]
- > [--inference-accelerators \]
- > [--cli-input-json | --cli-input-yaml]
- > [--generate-cli-skeleton \]
- -- Update the service to use the new task definition: - > aws ecs update-service
- > [--cluster \]
- > --service \
- > [--desired-count \]
- > [--task-definition \]
- > [--capacity-provider-strategy \]
- > [--deployment-configuration \]
- > [--network-configuration \]
- > [--placement-constraints \]
- > [--placement-strategy \]
- > [--platform-version \]
- > [--force-new-deployment | --no-force-new-deployment]
- > [--health-check-grace-period-seconds \]
- > [--cli-input-json | --cli-input-yaml]
- > [--generate-cli-skeleton \]
`, + + aws ecs register-task-definition + --family + [--task-role-arn ] + [--execution-role-arn ] + [--network-mode ] + --container-definitions + [--volumes ] + [--placement-constraints ] + [--requires-compatibilities ] + [--cpu ] + [--memory ] + [--tags ] + [--pid-mode ] + [--ipc-mode ] + [--proxy-configuration ] + [--inference-accelerators ] + [--cli-input-json | --cli-input-yaml] + [--generate-cli-skeleton ] + + - Update the service to use the new task definition: + + aws ecs update-service + [--cluster ] + --service + [--desired-count ] + [--task-definition ] + [--capacity-provider-strategy ] + [--deployment-configuration ] + [--network-configuration ] + [--placement-constraints ] + [--placement-strategy ] + [--platform-version ] + [--force-new-deployment | --no-force-new-deployment] + [--health-check-grace-period-seconds ] + [--cli-input-json | --cli-input-yaml] + [--generate-cli-skeleton ]`, references: [ 'https://docs.aws.amazon.com/AmazonECS/latest/developerguide/update-service.html', @@ -76,6 +81,6 @@ export default { severity: 'medium', conditions: { path: '@.memory', - in: ["0", "256", "512"] + in: ['0', '256', '512'], }, } \ No newline at end of file diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-11.2.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-11.2.ts index 8b1bf325..ac59b49a 100644 --- a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-11.2.ts +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-11.2.ts @@ -1,60 +1,65 @@ export default { id: 'aws-nist-800-53-rev4-11.2', - title: 'ECS task definitions should set CPU limit for containers', + title: 'AWS NIST 11.2 ECS task definitions should set CPU limit for containers', - description: `'Unless specified, containers get access to all the CPU and memory capacity available on that host. Specifying CPU for ECS task definitions ensures that high priority containers are able to claim the CPU runtime they require.'`, + description: `Unless specified, containers get access to all the CPU and memory capacity available on that host. Specifying CPU for ECS task definitions ensures that high priority containers are able to claim the CPU runtime they require. + + A CPU limit must be set through the *cpu* property in the task definition. For more information about the *cpu* property, see [TaskDefinition](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_TaskDefinition.html) in the ECS API Reference.`, audit: '', rationale: '', - remediation: `**AWS Console** - - - Navigate to ECS. + remediation: `**Console Remediation Steps** + + - Navigate to [ECS](https://console.aws.amazon.com/ecs/). - Select the Region that contains your task definition. - In the left pane, select Task Definitions. - Check the task definition and click Create new revision. - On the Create new revision of task definition page, make changes. For example, to change the existing container definitions (such as the container image, memory limits, or port mappings), select the container, make the changes, and then choose Update. - Select Create. - - If your task definition is used in a service, update your service with the updated task definition and deactivate the previous task definition. For more information, see Updating a service. - - **AWS CLI** + - If your task definition is used in a service, update your service with the updated task definition and deactivate the previous task definition. For more information, see [Updating a service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/update-service.html). + + **CLI Remediation Steps** + - Create new task definition revision: - > aws ecs register-task-definition
- > --family \
- > [--task-role-arn \]
- > [--execution-role-arn \]
- > [--network-mode \]
- > --container-definitions \
- > [--volumes \]
- > [--placement-constraints \]
- > [--requires-compatibilities \]
- > [--cpu \]
- > [--memory \]
- > [--tags \]
- > [--pid-mode \]
- > [--ipc-mode \]
- > [--proxy-configuration \]
- > [--inference-accelerators \]
- > [--cli-input-json | --cli-input-yaml]
- > [--generate-cli-skeleton \] - + + aws ecs register-task-definition + --family + [--task-role-arn ] + [--execution-role-arn ] + [--network-mode ] + --container-definitions + [--volumes ] + [--placement-constraints ] + [--requires-compatibilities ] + [--cpu ] + [--memory ] + [--tags ] + [--pid-mode ] + [--ipc-mode ] + [--proxy-configuration ] + [--inference-accelerators ] + [--cli-input-json | --cli-input-yaml] + [--generate-cli-skeleton ] + - Update the service to use the new task definition: - > aws ecs update-service - > [--cluster \] - > --service \ - > [--desired-count \] - > [--task-definition \] - > [--capacity-provider-strategy \] - > [--deployment-configuration \] - > [--network-configuration \] - > [--placement-constraints \] - > [--placement-strategy \] - > [--platform-version \] - > [--force-new-deployment | --no-force-new-deployment] - > [--health-check-grace-period-seconds \] - > [--cli-input-json | --cli-input-yaml] - > [--generate-cli-skeleton \]`, + + aws ecs update-service + [--cluster ] + --service + [--desired-count ] + [--task-definition ] + [--capacity-provider-strategy ] + [--deployment-configuration ] + [--network-configuration ] + [--placement-constraints ] + [--placement-strategy ] + [--platform-version ] + [--force-new-deployment | --no-force-new-deployment] + [--health-check-grace-period-seconds ] + [--cli-input-json | --cli-input-yaml] + [--generate-cli-skeleton ]`, references: [ 'https://docs.aws.amazon.com/AmazonECS/latest/developerguide/update-service.html', @@ -75,6 +80,6 @@ export default { severity: 'medium', conditions: { path: '@.cpu', - in: ["0", "256", "512"] + in: ['0', '256', '512'], }, } \ No newline at end of file diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-12.1.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-12.1.ts new file mode 100644 index 00000000..aec5e1b4 --- /dev/null +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-12.1.ts @@ -0,0 +1,61 @@ +export default { + id: 'aws-nist-800-53-rev4-12.1', + title: 'AWS NIST 12.1 CloudFront distributions should have geo-restrictions specified', + + description: `CloudFront distributions should enable geo-restriction when an organization needs to + prevent users in specific geographic locations from accessing content. For example, + if an organization has rights to distribute content in only one country, geo restriction should be + enabled to allow access only from users in the whitelisted country. Or if the organization cannot + distribute content in a particular country, geo restriction should deny access from users in the + blacklisted country.`, + + audit: '', + + rationale: '', + + remediation: `**AWS Console** + + - Navigate to CloudFront. + - Select the distribution that you want to update. + - In the Distribution Settings pane, select the Restrictions tab > Edit. + - Enter the applicable values. For more information, refer to Restrictions. + - Choose Yes, Edit. + + **AWS CLI** + - Submit a GetDistributionConfig request to get the current configuration and an Etag header for the distribution. + - > get-distribution-config --id + - Update the returned XML to include the CloudFront should have geo-restrictions specified. + - Submit an UpdateDistribution request to update the configuration for your distribution. Refer to here for more information.`, + + references: [ + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesRestrictions', + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/georestrictions.html', + 'https://docs.aws.amazon.com/cli/latest/reference/cloudfront/update-distribution.html', + ], + gql: `{ + queryawsCloudfront { + id + arn + accountId + __typename + geoRestriction { + restrictionType + locations + } + } + }`, + resource: 'queryawsCloudfront[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.geoRestriction.restrictionType', + in: ['whitelist', 'blacklist'] + }, + { + path: '@.geoRestriction.locations', + isEmpty: false + }, + ], + }, +} \ No newline at end of file diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-12.2.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-12.2.ts new file mode 100644 index 00000000..e8637ca0 --- /dev/null +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-12.2.ts @@ -0,0 +1,69 @@ +export default { + id: 'aws-nist-800-53-rev4-12.2', + title: 'AWS NIST 12.2 EC2 instances should not have a public IP association (IPv4)', + + description: `EC2 instances are reachable over the internet even if you have protections such as + NACLs or security groups if a public IP address is associated with an instance. To minimize the risk + of unauthorized access to your instances, do not allow public IP associations unless absolutely necessary.`, + + + audit: '', + + rationale: '', + + remediation: `**AWS Console** + + Modify the public IPv4 addressing attribute + - Navigate to the VPC console. + - In the navigation pane, choose Subnets. + - Select your subnet and choose Subnet Actions, Modify auto-assign IP settings. + - The Enable auto-assign public IPv4 address check box, if selected, requests a public IPv4 address for all instances launched into the selected subnet. Select or clear the check box as required, and then choose Save. + +Disable the public IP addressing feature + + - Navigate to EC2. + - Choose Launch Instance. + - Select an AMI and an instance type, and then choose Next: Configure Instance Details. + - On the Configure Instance Details page, for Network, select a VPC. The Auto-assign Public IP list is displayed. Choose Disable to override the default setting for the subnet. + + **AWS CLI** + + - Use the + > run-instances + - command with the + > --no-associate-public-ip-address, or + - Execute the + > modify-subnet-attribute + - command with + > --no-map-customer-owned-ip-on-launch`, + + references: [ + 'https://docs.aws.amazon.com/AmazonECS/latest/developerguide/update-service.html', + 'https://docs.aws.amazon.com/AmazonECS/latest/developerguide/update-task-definition.html', + 'https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ecs/register-task-definition.html', + 'https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ecs/update-service.html', + 'https://aws.amazon.com/blogs/containers/how-amazon-ecs-manages-cpu-and-memory-resources/', + ], + gql: `{ + queryawsEc2 { + id + arn + accountId + __typename + subnets { + autoAssignPublicIpv4Address + } + } + }`, + resource: 'queryawsEc2[*]', + severity: 'medium', + conditions: { + not: { + path: '@.subnets', + array_any: { + path: '[*].autoAssignPublicIpv4Address', + equal: 'Yes', + }, + }, + }, +} \ No newline at end of file diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.1.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.1.ts new file mode 100644 index 00000000..a0a59388 --- /dev/null +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.1.ts @@ -0,0 +1,71 @@ +export default { + id: 'aws-nist-800-53-rev4-16.1', + title: + 'AWS NIST 16.1 API Gateway classic custom domains should use secure TLS protocol versions (1.2 and above)', + + description: + 'The TLS (Transport Layer Security) protocol secures transmission of data over the internet using standard encryption technology. Encryption should be set with the latest version of TLS where possible. Versions prior to TLS 1.2 are deprecated and usage may pose security risks.', + + audit: '', + + rationale: '', + + remediation: `**AWS Console** + + - Navigate to API Gateway. + - In the left navigation, select Custom Domain Names. + - Select the domain name to update. + - In Domain details, select Edit. + - In Minimum TLS version, select TLS 1.2 (recommended). + - Select Save. + + **AWS CLI** + + To update the API Gateway classic custom domains to use secure TLS protocol versions (1.2 and above): + + > aws apigateway update-domain-name \ + > --domain-name \ + > --patch-operations op='replace',path='/securityPolicy',value='TLS_1_2'`, + + references: [ + 'https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-custom-domain-tls-version.html#apigateway-custom-domain-tls-version-how-to', + 'https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-custom-domain-tls-version.html', + 'https://docs.aws.amazon.com/cli/latest/reference/apigateway/update-domain-name.html', + 'https://docs.aws.amazon.com/apigateway/api-reference/link-relation/domainname-update/', + ], + gql: `{ + queryawsApiGatewayRestApi { + id + arn + accountId + __typename + domainNames { + configurations { + securityPolicy + } + } + } + }`, + resource: 'queryawsApiGatewayRestApi[*]', + severity: 'medium', + conditions: { + or: [ + { + path: '@.domainNames', + isEmpty: true, + }, + { + not: { + path: '@.domainNames', + array_any: { + path: '[*].configurations', + array_any: { + path: '[*].securityPolicy', + equal: 'TLS_1_0', // The valid values are TLS_1_0 and TLS_1_2 + }, + }, + }, + }, + ], + }, +} diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.2.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.2.ts new file mode 100644 index 00000000..24d686e7 --- /dev/null +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.2.ts @@ -0,0 +1,71 @@ +export default { + id: 'aws-nist-800-53-rev4-16.2', + title: + 'AWS NIST 16.2 API Gateway v2 custom domains should use secure TLS protocol versions (1.2 and above)', + + description: + 'The TLS (Transport Layer Security) protocol secures transmission of data over the internet using standard encryption technology. Encryption should be set with the latest version of TLS where possible. Versions prior to TLS 1.2 are deprecated and usage may pose security risks.', + + audit: '', + + rationale: '', + + remediation: `**AWS Console** + + - Navigate to API Gateway. + - In the left navigation, select Custom Domain Names. + - Select the domain name to update. + - In Domain details, select Edit. + - In Minimum TLS version, select TLS 1.2 (recommended). + - Select Save. + + **AWS CLI** + + To update the API Gateway v2 custom domains to use secure TLS protocol versions (1.2 and above): + + > aws apigatewayv2 update-domain-name \ + > --domain-name \ + > --domain-name-configurations SecurityPolicy=TLS_1_2`, + + references: [ + 'https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-custom-domain-tls-version.html#apigateway-custom-domain-tls-version-how-to', + 'https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-custom-domain-tls-version.html', + 'https://docs.aws.amazon.com/cli/latest/reference/apigatewayv2/update-domain-name.html', + 'https://docs.aws.amazon.com/apigatewayv2/latest/api-reference/domainnames-domainname.html#domainnames-domainnamepatch', + ], + gql: `{ + queryawsApiGatewayHttpApi { + id + arn + accountId + __typename + domainNames { + configurations { + securityPolicy + } + } + } + }`, + resource: 'queryawsApiGatewayHttpApi[*]', + severity: 'medium', + conditions: { + or: [ + { + path: '@.domainNames', + isEmpty: true, + }, + { + not: { + path: '@.domainNames', + array_any: { + path: '[*].configurations', + array_any: { + path: '[*].securityPolicy', + equal: 'TLS_1_0', // The valid values are TLS_1_0 and TLS_1_2 + }, + }, + }, + }, + ], + }, +} diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.3.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.3.ts new file mode 100644 index 00000000..261ad4e9 --- /dev/null +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.3.ts @@ -0,0 +1,65 @@ +export default { + id: 'aws-nist-800-53-rev4-16.3', + title: + 'AWS NIST 16.3 CloudFront distribution custom origins should use secure TLS protocol versions (1.2 and above)', + + description: + 'The TLS (Transport Layer Security) protocol secures transmission of data over the internet using standard encryption technology. Encryption should be set with the latest version of TLS where possible. Versions prior to TLS 1.2 are deprecated and usage may pose security risks.', + + audit: '', + + rationale: '', + + remediation: `**AWS Console** + + - Navigate to AWS CloudFront. + - Select the Distribution. + - Select the Origins and Origin Groups tab. + - Select the checkbox for the Origin and select Edit. + - In the Minimum Origin SSL Protocol, select TLS protocol version TLSv1.2. + - Click Yes, Edit. + + **AWS CLI** + + To update your CloudFront distribution custom origins to use secure TLS protocol versions (1.2 and above): + + > aws cloudfront update-distribution \ + > [--distribution-config ] \ + > --id \ + > [--if-match ] \ + > [--default-root-object ] \ + > [--cli-input-json ] \ + > [--generate-cli-skeleton ]`, + + references: [ + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValues-security-policy', + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-cloudfront-to-custom-origin.html#using-https-cloudfront-to-origin-certificate', + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html', + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html', + 'https://docs.aws.amazon.com/cli/latest/reference/cloudfront/update-distribution.html', + ], + gql: `{ + queryawsCloudfront { + id + arn + accountId + __typename + origins { + customOriginConfig { + originSslProtocols { + items + } + } + } + } + }`, + resource: 'queryawsCloudfront[*]', + severity: 'medium', + conditions: { + path: '@.origins', + array_all: { + path: '[*].customOriginConfig.originSslProtocols.items', + contains: 'TLSv1.2', + }, + }, +} diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.4.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.4.ts new file mode 100644 index 00000000..0b94eb60 --- /dev/null +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.4.ts @@ -0,0 +1,57 @@ +export default { + id: 'aws-nist-800-53-rev4-16.4', + title: + 'AWS NIST 16.4 CloudFront distribution viewer certificate should use secure TLS protocol versions (1.2 and above)', + + description: + 'The TLS (Transport Layer Security) protocol secures transmission of data over the internet using standard encryption technology. Encryption should be set with the latest version of TLS where possible. Versions prior to TLS 1.2 are deprecated and usage may pose security risks.', + + audit: '', + + rationale: '', + + remediation: `**AWS Console** + + - Navigate to AWS CloudFront. + - Select the Distribution. + - On the General tab, click Edit. + - In the Security Policy, select TLS protocol version TLSv1.2_2018 or TLSv1.2_2019 (recommended). + - Click Yes, Edit. + + **AWS CLI** + + To update your CloudFront viewer certificate to use secure TLS protocol versions (1.2 and above): + + > aws cloudfront update-distribution \ + > [--distribution-config ] \ + > --id \ + > [--if-match ] \ + > [--default-root-object ] \ + > [--cli-input-json ] \ + > [--generate-cli-skeleton ]`, + + references: [ + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValues-security-policy', + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html', + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesViewerProtocolPolicy', + 'https://docs.aws.amazon.com/cli/latest/reference/cloudfront/update-distribution.html', + 'https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html#secure-connections-supported-ciphers', + ], + gql: `{ + queryawsCloudfront { + id + arn + accountId + __typename + viewerCertificate { + minimumProtocolVersion + } + } + }`, + resource: 'queryawsCloudfront[*]', + severity: 'medium', + conditions: { + path: '@.viewerCertificate.minimumProtocolVersion', + in: ['TLSv1.2_2018', 'TLSv1.2_2019', 'TLSv1.2_2021'], + }, +} diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.5.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.5.ts new file mode 100644 index 00000000..e97c2cfb --- /dev/null +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.5.ts @@ -0,0 +1,74 @@ +export default { + id: 'aws-nist-800-53-rev4-16.5', + title: + 'AWS NIST 16.5 ELB HTTPS listeners should use secure TLS protocol versions (1.2 and above)', + + description: + 'The TLS (Transport Layer Security) protocol secures transmission of data over the internet using standard encryption technology. Encryption should be set with the latest version of TLS where possible. Versions prior to TLS 1.2 are deprecated and usage may pose security risks.', + + audit: '', + + rationale: '', + + remediation: `**AWS Console** + - Navigate to AWS EC2. + - In the left navigation, select Load Balancers. + - Select the load balancer > select Listeners. + - Under SSL Certificate, select Change. + - Select your Certificate type from the following: + - Choose a certificate from ACM. Refer to What Is AWS Certificate Manager? for more information. + - Choose a certificate from IAM. Refer to Managing server certificates in IAM for more information. + - Upload a certificate to IAM. Refer to How can I upload and import an SSL certificate to AWS Identity and Access Management (IAM)? + - Click Save. + + **AWS CLI** + + Select your Certificate type from the following: + **To replace an SSL certificate with a certificate provided by ACM:** + - Use the following request-certificate command to request a new certificate: + > aws acm request-certificate --domain-name www.example.com + - Use the following set-load-balancer-listener-ssl-certificate command to set the certificate: + > aws elb set-load-balancer-listener-ssl-certificate --load-balancer-name my-load-balancer --load-balancer-port 443 --ssl-certificate-id arn:aws:acm:region:123456789012:certificate/12345678-1234-1234-1234-123456789012 + **To replace an SSL certificate with a certificate uploaded to IAM:** + - If you have an SSL certificate but have not uploaded it, refer to Uploading a server certificate in the IAM User Guide. + - Use the following get-server-certificate command to get the ARN of the certificate: + > aws iam get-server-certificate --server-certificate-name my-new-certificate + - Use the following set-load-balancer-listener-ssl-certificate command to set the certificate: + > aws elb set-load-balancer-listener-ssl-certificate --load-balancer-name my-load-balancer --load-balancer-port 443 --ssl-certificate-id arn:aws:iam::123456789012:server-certificate/my-new-certificate`, + + references: [ + 'https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-update-ssl-cert.html#us-update-lb-SSLcert-console', + 'https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-update-ssl-cert.html#us-update-lb-SSLcert-cli', + ], + gql: `{ + queryawsElb { + id + arn + accountId + __typename + listeners { + loadBalancerProtocol + sslCertificateId + } + } + }`, + resource: 'queryawsElb[*]', + severity: 'medium', + conditions: { + not: { + path: '@.listeners', + array_any: { + and: [ + { + path: '[*].loadBalancerProtocol', + equal: 'HTTPS', + }, + { + path: '[*].sslCertificateId', + in: [null, ''], + }, + ], + }, + }, + }, +} diff --git a/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.6.ts b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.6.ts new file mode 100644 index 00000000..5fc5b437 --- /dev/null +++ b/src/aws/nist-800-53-rev4/rules/aws-nist-800-53-rev4-16.6.ts @@ -0,0 +1,81 @@ +export default { + id: 'aws-nist-800-53-rev4-16.6', + title: + 'AWS NIST 16.6 ELBv2 HTTPS listeners should use secure TLS protocol versions (1.2 and above)', + + description: + 'The TLS (Transport Layer Security) protocol secures transmission of data over the internet using standard encryption technology. Encryption should be set with the latest version of TLS where possible. Versions prior to TLS 1.2 are deprecated and usage may pose security risks.', + + audit: '', + + rationale: '', + + remediation: `**AWS Console** + + - Navigate to AWS EC2. + - In the left navigation, select Load Balancers. + - Select the load balancer > select Listeners. + - Select the checkbox for the HTTPS listener and select Edit. + - For Security policy, choose a security policy. See Security Policies for more information. + - Click Update. + + **AWS CLI** + + - To update ELBv2 HTTPS listeners to use secure TLS protocol versions (1.2 and above): + > aws elbv2 modify-listener \ + > --listener-arn \ + > --protocol (string) \ + > --ssl-policy (string) \ + > --certificates (list)`, + + references: [ + 'https://docs.aws.amazon.com/elasticloadbalancing/latest/application/listener-update-certificates.html#update-security-policy', + 'https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies', + 'https://docs.aws.amazon.com/cli/latest/reference/elbv2/modify-listener.html', + ], + gql: `{ + queryawsAlb { + id + arn + accountId + __typename + listeners { + settings { + protocol + sslPolicy + } + } + } + }`, + resource: 'queryawsAlb[*]', + severity: 'medium', + conditions: { + not: { + path: '@.listeners', + array_any: { + and: [ + { + path: '[*].settings.protocol', + match: /^HTTPS.*$/, + }, + { + path: '[*].settings.sslPolicy', + notIn: [ + 'ELBSecurityPolicy-2016-08', + 'ELBSecurityPolicy-TLS-1-0-2015-04', + 'ELBSecurityPolicy-TLS-1-1-2017-01', + 'ELBSecurityPolicy-TLS-1-2-2017-01', + 'ELBSecurityPolicy-TLS-1-2-Ext-2018-06', + 'ELBSecurityPolicy-FS-2018-06', + 'ELBSecurityPolicy-FS-1-1-2019-08', + 'ELBSecurityPolicy-FS-1-2-2019-08', + 'ELBSecurityPolicy-FS-1-2-Res-2019-08', + 'ELBSecurityPolicy-2015-05', + 'ELBSecurityPolicy-FS-1-2-Res-2020-10', + ], + }, + ], + }, + }, + }, +} diff --git a/src/aws/nist-800-53-rev4/rules/index.ts b/src/aws/nist-800-53-rev4/rules/index.ts index 13bc6602..6bd86c9c 100644 --- a/src/aws/nist-800-53-rev4/rules/index.ts +++ b/src/aws/nist-800-53-rev4/rules/index.ts @@ -106,6 +106,8 @@ import Aws_NIST_800_53_107 from './aws-nist-800-53-rev4-10.7' import Aws_NIST_800_53_108 from './aws-nist-800-53-rev4-10.8' import Aws_NIST_800_53_111 from './aws-nist-800-53-rev4-11.1' import Aws_NIST_800_53_112 from './aws-nist-800-53-rev4-11.2' +import Aws_NIST_800_53_121 from './aws-nist-800-53-rev4-12.1' +import Aws_NIST_800_53_122 from './aws-nist-800-53-rev4-12.2' import Aws_NIST_800_53_131 from './aws-nist-800-53-rev4-13.1' import Aws_NIST_800_53_132 from './aws-nist-800-53-rev4-13.2' import Aws_NIST_800_53_133 from './aws-nist-800-53-rev4-13.3' @@ -115,6 +117,12 @@ import Aws_NIST_800_53_151 from './aws-nist-800-53-rev4-15.1' import Aws_NIST_800_53_152 from './aws-nist-800-53-rev4-15.2' import Aws_NIST_800_53_153 from './aws-nist-800-53-rev4-15.3' import Aws_NIST_800_53_154 from './aws-nist-800-53-rev4-15.4' +import Aws_NIST_800_53_161 from './aws-nist-800-53-rev4-16.1' +import Aws_NIST_800_53_162 from './aws-nist-800-53-rev4-16.2' +import Aws_NIST_800_53_163 from './aws-nist-800-53-rev4-16.3' +import Aws_NIST_800_53_164 from './aws-nist-800-53-rev4-16.4' +import Aws_NIST_800_53_165 from './aws-nist-800-53-rev4-16.5' +import Aws_NIST_800_53_166 from './aws-nist-800-53-rev4-16.6' export default [ Aws_NIST_800_53_11, @@ -225,6 +233,8 @@ export default [ Aws_NIST_800_53_108, Aws_NIST_800_53_111, Aws_NIST_800_53_112, + Aws_NIST_800_53_121, + Aws_NIST_800_53_122, Aws_NIST_800_53_131, Aws_NIST_800_53_132, Aws_NIST_800_53_133, @@ -234,4 +244,10 @@ export default [ Aws_NIST_800_53_152, Aws_NIST_800_53_153, Aws_NIST_800_53_154, + Aws_NIST_800_53_161, + Aws_NIST_800_53_162, + Aws_NIST_800_53_163, + Aws_NIST_800_53_164, + Aws_NIST_800_53_165, + Aws_NIST_800_53_166, ] diff --git a/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-11.x.test.ts b/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-11.x.test.ts index fa7f25d9..77154d8a 100644 --- a/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-11.x.test.ts +++ b/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-11.x.test.ts @@ -4,57 +4,14 @@ import cuid from 'cuid' import Aws_NIST_800_53_111 from '../rules/aws-nist-800-53-rev4-11.1' import Aws_NIST_800_53_112 from '../rules/aws-nist-800-53-rev4-11.2' -export interface Logging { - enabled: boolean -} - -export interface DataResource { - type: string -} - -export interface EventSelector { - readWriteType?: string - includeManagementEvents?: boolean - dataResources?: DataResource[] -} - -export interface Cloudtrail { - isMultiRegionTrail?: string - eventSelectors?: EventSelector[] - includeGlobalServiceEvents?: string -} - -export interface QueryawsCloudfront { +export interface QueryawsEcsTaskDefinition { id: string - logging: Logging + memory?: string | null + cpu?: string | null } -export interface QueryawsAccount { - id: string - cloudtrail: Cloudtrail[] -} - -export interface QueryawsCloudtrail { - id: string - eventSelectors?: EventSelector[] -} - -export interface QueryawsAlb { - id: string - accessLogsEnabled: string -} - -export interface QueryawsElb { - id: string - accessLogs: string -} - -export interface NIS6xQueryResponse { - queryawsCloudfront?: QueryawsCloudfront[] - queryawsAccount?: QueryawsAccount[] - queryawsCloudtrail?: QueryawsCloudtrail[] - queryawsAlb?: QueryawsAlb[] - queryawsElb?: QueryawsElb[] +export interface NIST11xQueryResponse { + queryawsEcsTaskDefinition?: QueryawsEcsTaskDefinition[] } describe('AWS NIST 800-53: Rev. 4', () => { @@ -66,14 +23,15 @@ describe('AWS NIST 800-53: Rev. 4', () => { }) }) - //11.X - describe(' AWS 11.1 ECS task definitions should limit memory usage for containers', () => { - const getTestRuleFixture = (memory: string|null|undefined): any => { + describe('AWS NIST 11.1 ECS task definitions should limit memory usage for containers', () => { + const getTestRuleFixture = ( + memory: string | null + ): NIST11xQueryResponse => { return { queryawsEcsTaskDefinition: [ { id: cuid(), - memory + memory, }, ], } @@ -81,7 +39,7 @@ describe('AWS NIST 800-53: Rev. 4', () => { // Act const testRule = async ( - data: any, + data: NIST11xQueryResponse, expectedResult: Result ): Promise => { // Act @@ -94,34 +52,29 @@ describe('AWS NIST 800-53: Rev. 4', () => { expect(processedRule.result).toBe(expectedResult) } - test('Container memory is within the acceptable limit', async () => { - const data: any = getTestRuleFixture('512') + test('No Security Issue when Container memory is within the acceptable limit (512)', async () => { + const data: NIST11xQueryResponse = getTestRuleFixture('512') await testRule(data, Result.PASS) }) - test('Container memory is within the acceptable limit', async () => { - const data: any = getTestRuleFixture('256') + test('No Security Issue when Container memory is within the acceptable limit (256)', async () => { + const data: NIST11xQueryResponse = getTestRuleFixture('256') await testRule(data, Result.PASS) }) - test('Container memory cannot be null or undefined', async () => { - const data: any = getTestRuleFixture(null) - await testRule(data, Result.FAIL) - }) - - test('Container memory cannot be null or undefined', async () => { - const data: any = getTestRuleFixture(undefined) + test('Security Issue when Container memory is not set', async () => { + const data: NIST11xQueryResponse = getTestRuleFixture(null) await testRule(data, Result.FAIL) }) }) - describe(' AWS 11.2 ECS task definitions should set CPU limit for containers', () => { - const getTestRuleFixture = (cpu: string|null|undefined): any => { + describe('AWS NIST 11.2 ECS task definitions should set CPU limit for containers', () => { + const getTestRuleFixture = (cpu: string | null): NIST11xQueryResponse => { return { queryawsEcsTaskDefinition: [ { id: cuid(), - cpu + cpu, }, ], } @@ -129,7 +82,7 @@ describe('AWS NIST 800-53: Rev. 4', () => { // Act const testRule = async ( - data: any, + data: NIST11xQueryResponse, expectedResult: Result ): Promise => { // Act @@ -142,25 +95,19 @@ describe('AWS NIST 800-53: Rev. 4', () => { expect(processedRule.result).toBe(expectedResult) } - test('CPU limit is within the acceptable limit', async () => { - const data: any = getTestRuleFixture('512') + test('No Security Issue when CPU limit is within the acceptable limit (512)', async () => { + const data: NIST11xQueryResponse = getTestRuleFixture('512') await testRule(data, Result.PASS) }) - test('CPU limit is within the acceptable limit', async () => { - const data: any = getTestRuleFixture('256') + test('No Security Issue when CPU limit is within the acceptable limit (256)', async () => { + const data: NIST11xQueryResponse = getTestRuleFixture('256') await testRule(data, Result.PASS) }) - test('CPU limit cannot be null or undefined', async () => { - const data: any = getTestRuleFixture(null) - await testRule(data, Result.FAIL) - }) - - test('CPU limit cannot be null or undefined', async () => { - const data: any = getTestRuleFixture(undefined) + test('Security Issue when CPU limit is not set', async () => { + const data: NIST11xQueryResponse = getTestRuleFixture(null) await testRule(data, Result.FAIL) }) }) - }) diff --git a/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-12.x.test.ts b/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-12.x.test.ts new file mode 100644 index 00000000..3046c8a8 --- /dev/null +++ b/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-12.x.test.ts @@ -0,0 +1,129 @@ +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' +import cuid from 'cuid' + +import Aws_NIST_800_53_121 from '../rules/aws-nist-800-53-rev4-12.1' +import Aws_NIST_800_53_122 from '../rules/aws-nist-800-53-rev4-12.2' + +export interface GeoRestriction { + restrictionType: string + locations: string[] +} + +export interface Subnet { + autoAssignPublicIpv4Address: string +} +export interface QueryawsEc2 { + id: string + subnets: Subnet[] +} + +export interface QueryawsCloudfront { + id: string + geoRestriction: GeoRestriction +} + +export interface NIST12xQueryResponse { + queryawsCloudfront?: QueryawsCloudfront[] + queryawsEc2?: QueryawsEc2[] +} + +describe('AWS NIST 800-53: Rev. 4', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ + providerName: 'aws', + entityName: 'NIST', + }) + }) + + describe('AWS NIST 12.1 CloudFront distributions should have geo-restrictions specified', () => { + const getTestRuleFixture = ( + restrictionType: string, + locations: string[] + ): NIST12xQueryResponse => { + return { + queryawsCloudfront: [ + { + id: cuid(), + geoRestriction: { + restrictionType, + locations + }, + }, + ], + } + } + + // Act + const testRule = async ( + data: NIST12xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_NIST_800_53_121 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a restrictionType equal to whitelist and locations specified', async () => { + const data: NIST12xQueryResponse = getTestRuleFixture('whitelist', ['CA','US']) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a restrictionType equal to whitelist and locations specified', async () => { + const data: NIST12xQueryResponse = getTestRuleFixture('blacklist', ['CA','US']) + await testRule(data, Result.PASS) + }) + + test('Security Issue when there is an inbound rule without geoRestriction specified', async () => { + const data: NIST12xQueryResponse = getTestRuleFixture('none', []) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS NIST 12.2 EC2 instances should not have a public IP association (IPv4)', () => { + const getTestRuleFixture = (autoAssignPublicIpv4Address: string): NIST12xQueryResponse => { + return { + queryawsEc2: [ + { + id: cuid(), + subnets: [ + { + autoAssignPublicIpv4Address, + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: NIST12xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_NIST_800_53_122 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when EC2 instances not have a public IP association (IPv4)', async () => { + const data: NIST12xQueryResponse = getTestRuleFixture('No') + await testRule(data, Result.PASS) + }) + + test('Security Issue when EC2 instances have a public IP association (IPv4)', async () => { + const data: NIST12xQueryResponse = getTestRuleFixture('Yes') + await testRule(data, Result.FAIL) + }) + }) +}) diff --git a/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-16.x.test.ts b/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-16.x.test.ts new file mode 100644 index 00000000..649f6e9c --- /dev/null +++ b/src/aws/nist-800-53-rev4/tests/nist-800-53-rev4-16.x.test.ts @@ -0,0 +1,406 @@ +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' +import cuid from 'cuid' + +import Aws_NIST_800_53_161 from '../rules/aws-nist-800-53-rev4-16.1' +import Aws_NIST_800_53_162 from '../rules/aws-nist-800-53-rev4-16.2' +import Aws_NIST_800_53_163 from '../rules/aws-nist-800-53-rev4-16.3' +import Aws_NIST_800_53_164 from '../rules/aws-nist-800-53-rev4-16.4' +import Aws_NIST_800_53_165 from '../rules/aws-nist-800-53-rev4-16.5' +import Aws_NIST_800_53_166 from '../rules/aws-nist-800-53-rev4-16.6' + +export interface ViewerCertificate { + minimumProtocolVersion: string +} + +export interface OriginSslProtocols { + items: string[] +} + +export interface CustomOriginConfig { + originSslProtocols: OriginSslProtocols +} +export interface Origin { + customOriginConfig: CustomOriginConfig +} + +export interface QueryawsCloudfront { + id: string + origins?: Origin[] + viewerCertificate?: ViewerCertificate +} + +export interface Configuration { + securityPolicy: string +} + +export interface DomainName { + configurations: Configuration[] +} + +export interface QueryawsApiGatewayRestApi { + id: string + domainNames: DomainName[] +} + +export interface QueryawsApiGatewayHttpApi { + id: string + domainNames: DomainName[] +} + +export interface Settings { + protocol: string + sslPolicy: string +} +export interface Listener { + settings?: Settings + loadBalancerProtocol?: string + sslCertificateId?: string | null +} + +export interface QueryawsAlb { + id: string + listeners: Listener[] +} + +export interface QueryawsElb { + id: string + listeners: Listener[] +} +export interface NIST16xQueryResponse { + queryawsCloudfront?: QueryawsCloudfront[] + queryawsApiGatewayRestApi?: QueryawsApiGatewayRestApi[] + queryawsApiGatewayHttpApi?: QueryawsApiGatewayHttpApi[] + queryawsElb?: QueryawsElb[] + queryawsAlb?: QueryawsAlb[] +} + +describe('AWS NIST 800-53: Rev. 4', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ + providerName: 'aws', + entityName: 'NIST', + }) + }) + + describe('AWS NIST 16.1 API Gateway classic custom domains should use secure TLS protocol versions (1.2 and above)', () => { + const getTestRuleFixture = ( + securityPolicy: string + ): NIST16xQueryResponse => { + return { + queryawsApiGatewayRestApi: [ + { + id: cuid(), + domainNames: [ + { + configurations: [ + { + securityPolicy, + }, + ], + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: NIST16xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_NIST_800_53_161 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when API Gateway classic custom domains use secure TLS protocol versions 1.2 and above', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('TLS_1_2') + await testRule(data, Result.PASS) + }) + + test('No Security Issue when API Gateway classic custom domains is not set', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('') + const restApi = data + .queryawsApiGatewayRestApi?.[0] as QueryawsApiGatewayRestApi + restApi.domainNames = [] + await testRule(data, Result.PASS) + }) + + test('Security Issue when API Gateway classic custom domains use secure TLS protocol versions older than 1.2', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('TLS_1_0') + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS NIST 16.2 API Gateway v2 custom domains should use secure TLS protocol versions (1.2 and above)', () => { + const getTestRuleFixture = ( + securityPolicy: string + ): NIST16xQueryResponse => { + return { + queryawsApiGatewayHttpApi: [ + { + id: cuid(), + domainNames: [ + { + configurations: [ + { + securityPolicy, + }, + ], + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: NIST16xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_NIST_800_53_162 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when API Gateway v2 custom domains use secure TLS protocol versions 1.2 and above', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('TLS_1_2') + await testRule(data, Result.PASS) + }) + + test('No Security Issue when API Gateway v2 custom domains is not set', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('') + const httpApi = data + .queryawsApiGatewayHttpApi?.[0] as QueryawsApiGatewayHttpApi + httpApi.domainNames = [] + await testRule(data, Result.PASS) + }) + + test('Security Issue when API Gateway v2 custom domains use secure TLS protocol versions older than 1.2', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('TLS_1_0') + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS NIST 16.3 CloudFront distribution custom origins should use secure TLS protocol versions (1.2 and above)', () => { + const getTestRuleFixture = (items: string[]): NIST16xQueryResponse => { + return { + queryawsCloudfront: [ + { + id: cuid(), + origins: [ + { + customOriginConfig: { + originSslProtocols: { + items, + }, + }, + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: NIST16xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_NIST_800_53_163 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when CloudFront distribution custom origins use secure TLS protocol versions 1.2 and above', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture(['TLSv1.2']) + await testRule(data, Result.PASS) + }) + + test('Security Issue when CloudFront distribution custom origins use secure TLS protocol versions older than 1.2', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture(['TLSv1.1']) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS NIST 16.4 CloudFront distribution viewer certificate should use secure TLS protocol versions (1.2 and above)', () => { + const getTestRuleFixture = ( + minimumProtocolVersion: string + ): NIST16xQueryResponse => { + return { + queryawsCloudfront: [ + { + id: cuid(), + viewerCertificate: { + minimumProtocolVersion, + }, + }, + ], + } + } + + // Act + const testRule = async ( + data: NIST16xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_NIST_800_53_164 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when CloudFront distribution viewer certificate use secure TLS protocol versions TLSv1.2_2021 and above', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('TLSv1.2_2021') + await testRule(data, Result.PASS) + }) + + test('No Security Issue when CloudFront distribution viewer certificate use secure TLS protocol versions TLSv1.2_2021 and above', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('TLSv1.2_2019') + await testRule(data, Result.PASS) + }) + + test('Security Issue when CloudFront distribution viewer certificate use secure TLS protocol versions older than 1.2', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('TLSv1.1_2016') + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS NIST 16.5 ELB HTTPS listeners should use secure TLS protocol versions (1.2 and above)', () => { + const getTestRuleFixture = ( + loadBalancerProtocol: string, + sslCertificateId: string | null + ): NIST16xQueryResponse => { + return { + queryawsElb: [ + { + id: cuid(), + listeners: [ + { + loadBalancerProtocol, + sslCertificateId, + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: NIST16xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_NIST_800_53_165 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when ELB HTTPS listeners have an SSL certificate configured', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture( + 'HTTPS', + 'arn:aws:acm:us-east-1:632941798677:certificate/add09e29-e7ea-4b4b-a8ca-706fb1e97d29' + ) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when for ELB HTTP listeners', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('HTTP', null) + await testRule(data, Result.PASS) + }) + + test('Security Issue when ELB HTTPS listeners not have a SSL certificate configured', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture('HTTPS', null) + await testRule(data, Result.FAIL) + }) + }) + + describe('AWS NIST 16.6 ELBv2 HTTPS listeners should use secure TLS protocol versions (1.2 and above)', () => { + const getTestRuleFixture = ( + protocol: string, + sslPolicy: string + ): NIST16xQueryResponse => { + return { + queryawsAlb: [ + { + id: cuid(), + listeners: [ + { + settings: { + protocol, + sslPolicy, + }, + }, + ], + }, + ], + } + } + + // Act + const testRule = async ( + data: NIST16xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Aws_NIST_800_53_166 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when ELBv2 HTTPS listeners use secure TLS protocol versions 1.2 and above', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture( + 'HTTPS:443 arn:aws:elasticloadbalancing:us-east-1:632941798677:listener/app/autocloud-sandbox-ecs-alb/6abb1980e6ded2ce/e720d4895ea6678d', + 'ELBSecurityPolicy-2016-08' + ) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when ELBv2 HTTP listeners use secure TLS protocol versions 1.2 and above', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture( + 'HTTP:80 arn:aws:elasticloadbalancing:us-east-1:632941798677:listener/app/autocloud-sandbox-ecs-alb/6abb1980e6ded2ce/e720d4895ea6678d', + 'ELBSecurityPolicy-2016-08' + ) + await testRule(data, Result.PASS) + }) + + test('Security Issue when ELBv2 HTTPS listeners use a secure TLS protocol versions older than 1.2', async () => { + const data: NIST16xQueryResponse = getTestRuleFixture( + 'HTTPS:443 arn:aws:elasticloadbalancing:us-east-1:632941798677:listener/app/autocloud-sandbox-ecs-alb/6abb1980e6ded2ce/e720d4895ea6678d', + 'ELBSecurityPolicy-2010-08' + ) + await testRule(data, Result.FAIL) + }) + }) +}) diff --git a/src/azure/cis-1.3.1/rules/azure-cis-1.3.1-9.6.ts b/src/azure/cis-1.3.1/rules/azure-cis-1.3.1-9.6.ts index 00abec3c..5e1ddea5 100644 --- a/src/azure/cis-1.3.1/rules/azure-cis-1.3.1-9.6.ts +++ b/src/azure/cis-1.3.1/rules/azure-cis-1.3.1-9.6.ts @@ -1,6 +1,6 @@ export default { id: 'azure-cis-1.3.1-9.6', - title: 'Azure CIS 9.2 Ensure that \'PHP version\' is the latest, if used to run the web app (Manual)', + title: 'Azure CIS 9.6 Ensure that \'PHP version\' is the latest, if used to run the web app (Manual)', description: 'Periodically newer versions are released for PHP software either due to security flaws or to include additional functionality. Using the latest PHP version for web apps is recommended in order to take advantage of security fixes, if any, and/or additional functionalities of the newer version.', diff --git a/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-3.7.ts b/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-3.7.ts index a4e1d5e3..e7758251 100644 --- a/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-3.7.ts +++ b/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-3.7.ts @@ -34,7 +34,7 @@ export default { - When ALL TCP ports are allowed in a rule, PORT does not have any value set (*NULL*) - When ALL Protocols are allowed in a rule, PORT does not have any value set (*NULL*)`, - rationale: `GCP *Firewall Rule*s within a *VPC Network*. These rules apply to outgoing (egress) traffic from instances and incoming (ingress) traffic to instances in the network. Egress and ingress traffic flows are controlled even if the traffic stays within the network (for example, instance-to-instance communication). For an instance to have outgoing Internet access, the network must have a valid Internet gateway route or custom route whose destination IP is specified. This route simply defines the path to the Internet, to avoid the most general (0.0.0.0/0) destination IP Range specified from the Internet through RDP with the default *Port 3389*. Generic access from the Internet to a specific IP Range should be restricted.`, + rationale: 'GCP *Firewall Rule*s within a *VPC Network*. These rules apply to outgoing (egress) traffic from instances and incoming (ingress) traffic to instances in the network. Egress and ingress traffic flows are controlled even if the traffic stays within the network (for example, instance-to-instance communication). For an instance to have outgoing Internet access, the network must have a valid Internet gateway route or custom route whose destination IP is specified. This route simply defines the path to the Internet, to avoid the most general (0.0.0.0/0) destination IP Range specified from the Internet through RDP with the default *Port 3389*. Generic access from the Internet to a specific IP Range should be restricted.', remediation: `**From the Console:** 1. Go to *VPC Network*. @@ -48,7 +48,7 @@ export default { 1. Update RDP Firewall rule with new *SOURCE_RANGE* from the below command: gcloud compute firewall-rules update FirewallName --allow=[PROTOCOL[:PORT[-PORT]],...] --source-ranges=[CIDR_RANGE,...]`, - references: [`https://cloud.google.com/vpc/docs/firewalls#blockedtraffic`], + references: ['https://cloud.google.com/vpc/docs/firewalls#blockedtraffic'], gql: `{ querygcpFirewall(filter: {direction:{eq: "INGRESS"}}){ id @@ -109,11 +109,11 @@ export default { and: [ { path: '[*].fromPort', - lessThanInclusive: 3986, + lessThanInclusive: 3389, }, { path: '[*].toPort', - greaterThanInclusive: 3986, + greaterThanInclusive: 3389, }, ], }, diff --git a/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.5.ts b/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.5.ts index f5400575..dfc5aab7 100644 --- a/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.5.ts +++ b/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.5.ts @@ -63,7 +63,7 @@ export default { You can prevent VMs from having serial port access enable by *Disable VM serial port access* organization policy: https://console.cloud.google.com/iam-admin/orgpolicies/compute-disableSerialPortAccess.`, references: [ - `https://cloud.google.com/compute/docs/instances/interacting-with-serial-console`, + 'https://cloud.google.com/compute/docs/instances/interacting-with-serial-console', ], gql: `{ querygcpVmInstance{ @@ -80,34 +80,20 @@ export default { resource: 'querygcpVmInstance[*]', severity: 'medium', conditions: { - path: '@.metadata.items', - array_any: { - or: [ - { - and: [ - { - path: '[*].key', - equal: 'serial-port-enable', - }, - { - path: '[*].value', - equal: '0', - }, - ], - }, - { - and: [ - { - path: '[*].key', - equal: 'serial-port-enable', - }, - { - path: '[*].value', - equal: 'false', - }, - ], - }, - ], + not: { + path: '@.metadata.items', + array_any: { + and: [ + { + path: '[*].key', + equal: 'serial-port-enable', + }, + { + path: '[*].value', + in: ['1', 'true'], + }, + ], + }, }, }, } diff --git a/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.6.ts b/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.6.ts index 036a929c..4a1f6442 100644 --- a/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.6.ts +++ b/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.6.ts @@ -56,7 +56,9 @@ export default { resource: 'querygcpVmInstance[*]', severity: 'medium', conditions: { - path: '@.canIpForward', - equal: false, + not: { + path: '@.canIpForward', + equal: true, + }, }, } diff --git a/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.9.ts b/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.9.ts index 497d8a56..43af3e7c 100644 --- a/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.9.ts +++ b/src/gcp/cis-1.2.0/rules/gcp-cis-1.2.0-4.9.ts @@ -103,16 +103,8 @@ export default { array_any: { path: '[*].accessConfigs', array_any: { - and: [ - { - path: '[*].natIP', - notEqual: null, - }, - { - path: '[*].natIP', - notEqual: '', - }, - ], + path: '[*].natIP', + notIn: [null, ''], }, }, }, diff --git a/src/gcp/cis-1.2.0/tests/gcp-cis-1.2.0-3.x.test.ts b/src/gcp/cis-1.2.0/tests/gcp-cis-1.2.0-3.x.test.ts index f2cd4e74..62dc78fb 100644 --- a/src/gcp/cis-1.2.0/tests/gcp-cis-1.2.0-3.x.test.ts +++ b/src/gcp/cis-1.2.0/tests/gcp-cis-1.2.0-3.x.test.ts @@ -451,8 +451,8 @@ describe('CIS Google Cloud Platform Foundations: 1.2.0', () => { expect(processedRule.result).toBe(expectedResult) } - test('No Security Issue when there is an inbound rule with a random IPv4 address and port 3986', async () => { - await test37Rule(3986, 3986, '10.10.10.10/16', Result.PASS) + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 3389', async () => { + await test37Rule(3389, 3389, '10.10.10.10/16', Result.PASS) }) test('No Security Issue when there is an inbound rule with IPv4 wilcard address and port 80', async () => { @@ -463,32 +463,32 @@ describe('CIS Google Cloud Platform Foundations: 1.2.0', () => { await test37Rule(80, 80, ipV6WildcardAddress, Result.PASS) }) - test('No Security Issue when there is an inbound rule with a random IPv4 and a port range not including the port 3986', async () => { + test('No Security Issue when there is an inbound rule with a random IPv4 and a port range not including the port 3389', async () => { await test37Rule(1000, 2000, '10.10.10.10/16', Result.PASS) }) - test('No Security Issue when there is an inbound rule with IPv4 wilcard address and a port range not including the port 3986', async () => { + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and a port range not including the port 3389', async () => { await test37Rule(1000, 2000, ipV4WildcardAddress, Result.PASS) }) - test('No Security Issue when there is an inbound rule with IPv6 wilcard address and a port range not including the port 3986', async () => { + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and a port range not including the port 3389', async () => { await test37Rule(1000, 2000, ipV6WildcardAddress, Result.PASS) }) - test('Security Issue when IPv4 wilcard address and port 3986 and tcp protocol', async () => { - await test37Rule(3986, 3986, ipV4WildcardAddress, Result.FAIL) + test('Security Issue when IPv4 wilcard address and port 3389 and tcp protocol', async () => { + await test37Rule(3389, 3389, ipV4WildcardAddress, Result.FAIL) }) - test('Security Issue when IPv4 wilcard address and port 3986 and all protocol', async () => { - await test37Rule(3986, 3986, ipV4WildcardAddress, Result.FAIL, 'all') + test('Security Issue when IPv4 wilcard address and port 3389 and all protocol', async () => { + await test37Rule(3389, 3389, ipV4WildcardAddress, Result.FAIL, 'all') }) - test('Security Issue when IPv6 wilcard address and port 3986 and tcp protocol', async () => { - await test37Rule(3986, 3986, ipV6WildcardAddress, Result.FAIL) + test('Security Issue when IPv6 wilcard address and port 3389 and tcp protocol', async () => { + await test37Rule(3389, 3389, ipV6WildcardAddress, Result.FAIL) }) - test('Security Issue when IPv6 wilcard address and port 3986 and all protocol', async () => { - await test37Rule(3986, 3986, ipV6WildcardAddress, Result.FAIL, 'all') + test('Security Issue when IPv6 wilcard address and port 3389 and all protocol', async () => { + await test37Rule(3389, 3389, ipV6WildcardAddress, Result.FAIL, 'all') }) test('Security Issue when there is an inbound rule with IPv4 wilcard address and no port range is specified', async () => { @@ -499,11 +499,11 @@ describe('CIS Google Cloud Platform Foundations: 1.2.0', () => { await test37Rule(undefined, undefined, ipV6WildcardAddress, Result.FAIL) }) - test('Security Issue when there is an inbound rule with IPv4 wilcard address and port range includes the port 3986', async () => { + test('Security Issue when there is an inbound rule with IPv4 wilcard address and port range includes the port 3389', async () => { await test37Rule(0, 4000, ipV4WildcardAddress, Result.FAIL) }) - test('Security Issue when there is an inbound rule with IPv6 wilcard address and port range includes the port 3986', async () => { + test('Security Issue when there is an inbound rule with IPv6 wilcard address and port range includes the port 3389', async () => { await test37Rule(0, 4000, ipV6WildcardAddress, Result.FAIL) }) }) diff --git a/src/gcp/cis-1.2.0/tests/gcp-cis-1.2.0-4.x.test.ts b/src/gcp/cis-1.2.0/tests/gcp-cis-1.2.0-4.x.test.ts index 6bffed84..6a700175 100644 --- a/src/gcp/cis-1.2.0/tests/gcp-cis-1.2.0-4.x.test.ts +++ b/src/gcp/cis-1.2.0/tests/gcp-cis-1.2.0-4.x.test.ts @@ -1369,7 +1369,7 @@ describe('CIS Google Cloud Platform Foundations: 1.2.0', () => { await test45Rule(data, Result.PASS) }) - test('Security Security Issue when ¨serial-port-enable¨ is set to true', async () => { + test('Security Issue when ¨serial-port-enable¨ is set to true', async () => { const metadataItems: MetadataItem[] = [ { key: 'serial-port-enable', @@ -1380,29 +1380,7 @@ describe('CIS Google Cloud Platform Foundations: 1.2.0', () => { await test45Rule(data, Result.FAIL) }) - test('Security Security Issue when ¨serial-port-enable¨ is set to 1', async () => { - const metadataItems: MetadataItem[] = [ - { - key: 'serial-port-enable', - value: 'true', - }, - ] - const data: CIS4xQueryResponse = getTest45RuleFixture(metadataItems) - await test45Rule(data, Result.FAIL) - }) - - test('Security Security Issue when ¨serial-port-enable¨ is set to 1', async () => { - const metadataItems: MetadataItem[] = [ - { - key: 'serial-port-enable', - value: '1', - }, - ] - const data: CIS4xQueryResponse = getTest45RuleFixture(metadataItems) - await test45Rule(data, Result.FAIL) - }) - - test('Security Security Issue when metadata is empty', async () => { + test('Security Issue when ¨serial-port-enable¨ is set to 1', async () => { const metadataItems: MetadataItem[] = [ { key: 'serial-port-enable', @@ -1413,15 +1391,10 @@ describe('CIS Google Cloud Platform Foundations: 1.2.0', () => { await test45Rule(data, Result.FAIL) }) - test('Security Security Issue when metadata does NOT contain ¨serial-port-enable¨ key', async () => { - const metadataItems: MetadataItem[] = [ - { - key: 'dummy-key', - value: 'false', - }, - ] + test('No Security Issue when metadata is empty', async () => { + const metadataItems: MetadataItem[] = [] const data: CIS4xQueryResponse = getTest45RuleFixture(metadataItems) - await test45Rule(data, Result.FAIL) + await test45Rule(data, Result.PASS) }) }) diff --git a/src/gcp/nist-800-53-rev4/.npmignore b/src/gcp/nist-800-53-rev4/.npmignore new file mode 100644 index 00000000..f3dc48f6 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/.npmignore @@ -0,0 +1,8 @@ +rules/ +tests/ +*.bak +.* +jest.config.js +tsconfig.json +**/*.ts +!dist/index.d.ts diff --git a/src/gcp/nist-800-53-rev4/.releaserc.yml b/src/gcp/nist-800-53-rev4/.releaserc.yml new file mode 100644 index 00000000..cba7db8c --- /dev/null +++ b/src/gcp/nist-800-53-rev4/.releaserc.yml @@ -0,0 +1,37 @@ +--- +branches: + - name: alpha + channel: alpha + prerelease: true + - name: beta + channel: beta + prerelease: true + - name: main + +plugins: + - "@semantic-release/commit-analyzer" + - "@semantic-release/release-notes-generator" + - - "@semantic-release/changelog" + - changelogFile: CHANGELOG.md + - - "@semantic-release/git" + - assets: + - CHANGELOG.md + - package.json + - - "@semrel-extra/npm" + - npmPublish: true + - "@semantic-release/github" +verifyConditions: + - "@semantic-release/changelog" + - "@semantic-release/github" + - "@semrel-extra/npm" +prepare: + - "@semantic-release/changelog" + - "@semrel-extra/npm" + - - "@semantic-release/git" + - message: "chore(release): ${nextRelease.version} \n\n${nextRelease.notes}" +publish: + - "@semantic-release/github" + - "@semrel-extra/npm" +success: false +fail: false +tagFormat: "${version}" diff --git a/src/gcp/nist-800-53-rev4/README.md b/src/gcp/nist-800-53-rev4/README.md new file mode 100644 index 00000000..4d5ec337 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/README.md @@ -0,0 +1,92 @@ +# NIST 800-53 Rev. 4 for Google Cloud Services + +Policy Pack based on the [800-53 Rev. 4](https://csrc.nist.gov/publications/detail/sp/800-53/rev-4/archive/2015-01-22) benchmark provided by the [The National Institute of Standards and Technology (NIST)](https://www.nist.gov) + +## First Steps + +1. Install [Cloud Graph CLI](https://docs.cloudgraph.dev/quick-start). +2. Set up the [GCP Provider](https://www.npmjs.com/package/@cloudgraph/cg-provider-gcp) for CG with the `cg init gcp` command. +3. Add Policy Pack NIST 800-53 Rev. 4 for Google Cloud Services benchmark using `cg policy add gcp-nist-800-53-rev4` command. +4. Execute the ruleset using the scan command `cg scan gcp`. +5. Query the findings using the different options: + + 5a. Querying findings by provider: + + ```graphql + query { + querygcpFindings { + NISTFindings { + id + resourceId + result + } + } + } + ``` + + 5b. Querying findings by specific benchmark: + + ```graphql + query { + querygcpNISTFindings { + id + resourceId + result + } + } + ``` + + 5c. Querying findings by resource: + + ```graphql + query { + querygcpIamPolicy { + id + NISTFindings { + id + resourceId + result + } + } + } + ``` + +## Available Ruleset + +| Rule | Description | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| GCP NIST 1.1 | Compute instances should not use the default service account | +| GCP NIST 1.2 | Compute instances should not use the default service account with full access to all Cloud APIs | +| GCP NIST 1.3 | Compute instance "block-project-ssh-keys should be enabled | +| GCP NIST 1.4 | Compute instances should not have public IP addresses | +| GCP NIST 1.5 | Compute instances "Enable connecting to serial ports" should not be enabled | +| GCP NIST 1.6 | SQL database instances should not permit access from 0.0.0.0/0 | +| GCP NIST 1.7 | SQL database instances should not have public IPs | +| GCP NIST 2.1 | DNS managed zone DNSSEC should be enabled | +| GCP NIST 2.2 | DNS managed zone DNSSEC key-signing keys should not use RSASHA1 | +| GCP NIST 2.3 | DNS managed zone DNSSEC zone-signing keys should not use RSASHA1 | +| GCP NIST 3.1 | IAM default audit log config should not exempt any users | +| GCP NIST 3.2 | PostgreSQL database instance 'log_checkpoints' database flag should be set to 'on' | +| GCP NIST 3.3 | PostgreSQL database instance 'log_connections' database flag should be set to 'on' | +| GCP NIST 3.4 | PostgreSQL database instance 'log_disconnections' database flag should be set to 'on' | +| GCP NIST 3.5 | PostgreSQL database instance 'log_lock_waits' database flag should be set to 'on' | +| GCP NIST 3.6 | PostgreSQL database instance 'log_min_error_statement' database flag should be set appropriately | +| GCP NIST 3.7 | PostgreSQL database instance 'log_temp_files' database flag should be set to '0' (on) | +| GCP NIST 3.8 | PostgreSQL database instance 'log_min_duration_statement' database flag should be set to '-1' (disabled) | +| GCP NIST 3.9 | At least one project-level logging sink should be configured with an empty filter | +| GCP NIST 3.10 | Network subnet flow logs should be enabled | +| GCP NIST 4.1 | Compute instance disks should be encrypted with customer-supplied encryption keys (CSEKs) | +| GCP NIST 4.2 | SQL database instances should require incoming connections to use SSL | +| GCP NIST 5.1 | Logging metric filter and alert for project ownership assignments/changes should be configured | +| GCP NIST 5.2 | Logging metric filter and alert for audit configuration changes should be configured | +| GCP NIST 5.3 | Logging metric filter and alert for Custom Role changes should be configured | +| GCP NIST 5.4 | Logging metric filter and alert for network firewall rule changes should be configured | +| GCP NIST 5.5 | Logging metric filter and alert for network route changes should be configured | +| GCP NIST 5.6 | Logging metric filter and alert for network changes should be configured | +| GCP NIST 5.7 | Logging metric filter and alert for SQL instance configuration changes should be configured | +| GCP NIST 5.8 | Logging storage bucket retention policies and Bucket Lock should be configured | +| GCP NIST 6.1 | The default network for a project should be deleted | +| GCP NIST 6.2 | Network firewall rules should not permit ingress from 0.0.0.0/0 to port 22 (SSH) | +| GCP NIST 6.3 | Network firewall rules should not permit ingress from 0.0.0.0/0 to port 3389 (RDP) | +| GCP NIST 6.4 | Load balancer HTTPS or SSL proxy SSL policies should not have weak cipher suites | +| GCP NIST 6.5 | Compute instances "IP forwarding" should not be enabled | diff --git a/src/gcp/nist-800-53-rev4/index.ts b/src/gcp/nist-800-53-rev4/index.ts new file mode 100644 index 00000000..84d5e14f --- /dev/null +++ b/src/gcp/nist-800-53-rev4/index.ts @@ -0,0 +1,7 @@ +import PolicyPacksRules from './rules' + +export default { + provider: 'gcp', + entity: 'NIST', + rules: PolicyPacksRules, +} diff --git a/src/gcp/nist-800-53-rev4/jest.config.js b/src/gcp/nist-800-53-rev4/jest.config.js new file mode 100644 index 00000000..42987aab --- /dev/null +++ b/src/gcp/nist-800-53-rev4/jest.config.js @@ -0,0 +1,7 @@ +/** @type {import('@ts-jest/dist/types').InitialOptionsTsJest} */ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + testMatch: ['/tests/**/*.test.ts'], + testPathIgnorePatterns: ['/lib/', '/node_modules/'], +} diff --git a/src/gcp/nist-800-53-rev4/package.json b/src/gcp/nist-800-53-rev4/package.json new file mode 100644 index 00000000..3a46daff --- /dev/null +++ b/src/gcp/nist-800-53-rev4/package.json @@ -0,0 +1,64 @@ +{ + "name": "@cloudgraph/policy-pack-gcp-nist-800-53-rev4", + "description": "Policy pack implementing The National Institute of Standards and Technology 800-53 Rev. 4 Benchmark for Google Cloud services", + "version": "0.0.1", + "author": "AutoCloud", + "license": "MPL-2.0", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "repository": { + "type": "git", + "url": "git+https://github.com/cloudgraphdev/cloudgraph-policy-packs.git", + "directory": "src/gcp/nist-800-53-rev4" + }, + "bugs": { + "url": "https://github.com/cloudgraphdev/cloudgraph-policy-packs/issues" + }, + "publishConfig": { + "access": "public" + }, + "directories": { + "test": "tests" + }, + "devDependencies": { + "@autocloud/eslint-config": "^0.1.0", + "@cloudgraph/sdk": "^0.18.1", + "@types/jest": "^27.4.0", + "@types/node": "^15.12.4", + "@types/pino": "^6.3.11", + "@typescript-eslint/eslint-plugin": "^4.28.5", + "@typescript-eslint/parser": "^4.28.5", + "cpx": "^1.5.0", + "cuid": "^2.1.8", + "eslint": "^7.25.0", + "eslint-config-airbnb-base": "14.2.1", + "eslint-config-prettier": "^6.11.0", + "eslint-plugin-import": "^2.22.1", + "eslint-plugin-prettier": "^3.4.0", + "jest": "^27.0.6", + "prettier": "^2.4.1", + "shx": "^0.3.3", + "ts-jest": "^27.0.4", + "tslib": "^1", + "typescript": "^4.3.5" + }, + "engines": { + "node": ">=16.0.0" + }, + "homepage": "https://www.cloudgraph.dev/", + "keywords": [ + "cloudgraph" + ], + "prettier": { + "semi": false, + "singleQuote": true + }, + "scripts": { + "build": "yarn prepack", + "clean": "rm -rf dist", + "lint": "eslint", + "prepack": "rm -rf dist && tsc -b", + "publish": "yarn npm publish", + "test": "NODE_ENV=test jest" + } +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.1.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.1.ts new file mode 100644 index 00000000..c95da131 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.1.ts @@ -0,0 +1,109 @@ +// GCP CIS 1.2.0 Rule equivalent 4.1 +export default { + id: 'gcp-nist-800-53-rev4-1.1', + title: + 'GCP NIST 1.1 Compute instances should not use the default service account', + description: `It is recommended to configure your instance to not use the default Compute Engine + service account because it has the Editor role on the project.`, + audit: `**From Console:** + + 1. Go to the *VM instances* page by visiting: https://console.cloud.google.com/compute/instances. + 2. Click on each instance name to go to its *VM instance details* page. + 3. Under the section *Service Account*, ensure that the default Compute Engine service account is not used. This account is named *[PROJECT_NUMBER]-compute@developer.gserviceaccount.com*. + + **From Command Line:** + + 1. List the instances in your project: + + gcloud compute instances list + + 2. Get the details on each instance: + + gcloud compute instances describe INSTANCE_NAME --zone ZONE + + 3. Ensure that the service account section does not have an email that matches the pattern used does not match the pattern *[PROJECT_NUMBER]-compute@developer.gserviceaccount.com*. + + **Exception:** + VMs created by GKE should be excluded. These VMs have names that start with *gke-* and + are labeled *goog-gke-node*.`, + rationale: `The default Compute Engine service account has the Editor role on the project, which allows read and write access to most Google Cloud Services. To defend against privilege escalations if your VM is compromised and prevent an attacker from gaining access to all of your project, it is recommended to not use the default Compute Engine service account. Instead, you should create a new service account and assigning only the permissions needed by your instance. + + The default Compute Engine service account is named *[PROJECT_NUMBER]- compute@developer.gserviceaccount.com*.`, + remediation: `**From Console:** + + 1. Go to the *VM instances* page by visiting:https://console.cloud.google.com/compute/instances. + 2. Click on the instance name to go to its *VM instance details* page. + 3. Click *STOP* and then click *EDIT*. + 4. Under the section *Service Account*, select a service account other than the default Compute Engine service account. You may first need to create a new service account. + 5. Click *Save* and then click *START*. + + **From Command Line:** + + 1. Stop the instance: + + gcloud compute instances stop INSTANCE_NAME + + 2. Update the instance: + + gcloud compute instances set-service-account INSTANCE_NAME --service-account=SERVICE_ACCOUNT + + 3. Restart the instance: + + gcloud compute instances start INSTANCE_NAME`, + references: [ + 'https://cloud.google.com/compute/docs/access/service-accounts', + 'https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances', + 'https://cloud.google.com/sdk/gcloud/reference/compute/instances/set-service-account', + ], + gql: `{ + querygcpVmInstance{ + __typename + id + project{ + id + } + name + labels{ + value + } + serviceAccounts{ + email + } + } + }`, + resource: 'querygcpVmInstance[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '@', + and: [ + { + path: '[*].name', + match: /^gke-.*$/, + }, + { + path: '[*].labels', + array_any: { + path: '[*].value', + equal: 'goog-gke-node', + }, + }, + ], + }, + { + jq: `[{ "defaultEmail" : (.project[].id | split("/") | .[1] + "-compute@developer.gserviceaccount.com")} + .serviceAccounts[]] + | [.[] | select(.defaultEmail == .email) ] + | {"match" : (length > 0)}`, + path: '@', + and: [ + { + path: '@.match', + notEqual: true, + }, + ], + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.2.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.2.ts new file mode 100644 index 00000000..a657cf58 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.2.ts @@ -0,0 +1,126 @@ +// GCP CIS 1.2.0 Rule equivalent 4.2 +export default { + id: 'gcp-nist-800-53-rev4-1.2', + title: + 'GCP NIST 1.2 Compute instances should not use the default service account with full access to all Cloud APIs', + description: `To support principle of least privileges and prevent potential privilege escalation it is + recommended that instances are not assigned to default service account Compute Engine + default service account with Scope Allow full access to all Cloud APIs.`, + audit: `**From Console:** + + 1. Go to the *VM instances* page by visiting: https://console.cloud.google.com/compute/instances. + 2. Click on each instance name to go to its *VM instance details* page. + 3. If the *Default Compute Engine service account* is selected under *Service Account*, ensure that *Cloud API access scopes* is not set to *Allow full access to all Cloud APIs*. + + **From Command Line:** + + 1. List Instances from project + + gcloud compute instances list + + 2. Get the details on each instance: + + gcloud compute instances describe INSTANCE_NAME --zone ZONE + + 3. Ensure that the instance is not configured to allow the https://www.googleapis.com/auth/cloud-platform scope for the default Compute Engine service account: + + serviceAccounts: + - email: [PROJECT_NUMBER]-compute@developer.gserviceaccount.com + scopes: + - https://www.googleapis.com/auth/cloud-platform + + **Exception:** Instances created by GKE should be excluded. These instances have names that + start with "gke-" and are labeled "goog-gke-node"`, + rationale: `Along with ability to optionally create, manage and use user managed custom service accounts, Google Compute Engine provides default service account *Compute Engine default service account* for an instances to access necessary cloud services. *Project Editor* role is assigned to *Compute Engine default service account* hence, This service account has almost all capabilities over all cloud services except billing. However, when *Compute Engine default service account* assigned to an instance it can operate in 3 scopes. + + 1. Allow default access: Allows only minimum access required to run an Instance (Least Privileges) + 2. Allow full access to all Cloud APIs: Allow full access to all the cloud APIs/Services (Too much access) + 3. Set access for each API: Allows Instance administrator to choose only those APIs that are needed to perform specific business functionality expected by instance + + When an instance is configured with *Compute Engine default service account* with Scope *Allow full access to all Cloud APIs*, based on IAM roles assigned to the user(s) accessing Instance, it may allow user to perform cloud operations/API calls that user is not supposed to perform leading to successful privilege escalation.`, + remediation: `**From Console:** + + 1. Go to the *VM instances* page by visiting: https://console.cloud.google.com/compute/instances. + 2. Click on the impacted VM instance. + 3. If the instance is not stopped, click the *Stop* button. Wait for the instance to be stopped. + 4. Next, click the *Edit* button. + 5. Scroll down to the *Service Account* section. + 6. Select a different service account or ensure that *Allow full access to all Cloud APIs* is not selected. + 7. Click the *Save* button to save your changes and then click *START*. + + **From Command Line:** + + 1. Stop the instance: + + gcloud compute instances stop INSTANCE_NAME + + 2. Update the instance: + + gcloud compute instances set-service-account INSTANCE_NAME --service- account=SERVICE_ACCOUNT --scopes [SCOPE1, SCOPE2...] + + 3. Restart the instance: + + gcloud compute instances start INSTANCE_NAME`, + references: [ + 'https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances', + 'https://cloud.google.com/compute/docs/access/service-accounts', + ], + gql: `{ + querygcpVmInstance{ + __typename + id + project{ + id + } + name + labels{ + value + } + serviceAccounts{ + email + scopes + } + } + }`, + resource: 'querygcpVmInstance[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '@', + and: [ + { + path: '[*].name', + match: /^gke-.*$/, + }, + { + path: '[*].labels', + array_any: { + path: '[*].value', + equal: 'goog-gke-node', + }, + }, + ], + }, + { + jq: `[{ "defaultEmail" : (.project[].id | split("/") | .[1] + "-compute@developer.gserviceaccount.com")} + .serviceAccounts[]] + | [.[] | select(.defaultEmail == .email) ] + | {"match" : (length > 0), "scopes": .[].scopes} // {"match" : false, "scopes": []}`, + path: '@', + and: [ + { + path: '@.match', + notEqual: true, + }, + { + path: '[*].scopes', + array_all: { + notEqual: 'https://www.googleapis.com/auth/cloud-platform', + }, + }, + ], + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.3.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.3.ts new file mode 100644 index 00000000..6b9336cf --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.3.ts @@ -0,0 +1,128 @@ +// GCP CIS 1.2.0 Rule equivalent 4.3 +export default { + id: 'gcp-nist-800-53-rev4-1.3', + title: + 'GCP NIST 1.3 Compute instance "block-project-ssh-keys should be enabled', + description: `It is recommended to use Instance specific SSH key(s) instead of using common/shared + project-wide SSH key(s) to access Instances.`, + audit: `**From Console:** + + 1. Go to the *VM instances* page by visiting https://console.cloud.google.com/compute/instances. It will list all the instances in your project. + 2. For every instance, click on the name of the instance. + 3. Under *SSH Keys*, ensure *Block project-wide SSH keys* is selected. + + **From Command Line:** + + 1. List all instances in a project: + + gcloud compute instances list + + 2. For every instance, get the instance metadata: + + gcloud compute instances describe INSTANCE_NAME + + 3. Ensure key: *block-project-ssh-keys* set to *value*: '*true*'. + + **Exception:** + Instances created by GKE should be excluded. These instances have names that start with + "gke-" and are labeled "goog-gke-node".`, + rationale: 'Project-wide SSH keys are stored in Compute/Project-meta-data. Project wide SSH keys can be used to login into all the instances within project. Using project-wide SSH keys eases the SSH key management but if compromised, poses the security risk which can impact all the instances within project. It is recommended to use Instance specific SSH keys which can limit the attack surface if the SSH keys are compromised.', + remediation: `**From Console:** + + 1. Go to the *VM instances* page by visiting: https://console.cloud.google.com/compute/instances. It will list all the instances in your project. + 2. Click on the name of the Impacted instance + 3. Click *Edit* in the toolbar + 4. Under SSH Keys, go to the *Block project-wide SSH keys* checkbox + 5. To block users with project-wide SSH keys from connecting to this instance, select *Block project-wide SSH keys* + 6. Click *Save* at the bottom of the page + 7. Repeat steps for every impacted Instance + + **From Command Line:** + Block project-wide public SSH keys, set the metadata value to *TRUE*: + + gcloud compute instances add-metadata INSTANCE_NAME --metadata block-project- ssh-keys=TRUE`, + references: [ + 'https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys', + ], + gql: `{ + querygcpVmInstance{ + __typename + id + project{ + id + } + name + labels{ + value + } + serviceAccounts{ + email + scopes + } + metadata{ + items{ + key + value + } + } + } + }`, + resource: 'querygcpVmInstance[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '@', + and: [ + { + path: '[*].name', + match: /^gke-.*$/, + }, + { + path: '[*].labels', + array_any: { + path: '[*].value', + equal: 'goog-gke-node', + }, + }, + ], + }, + { + path: '[*].metadata.items', + isEmpty: true + }, + { + and: [ + { + path: '[*].metadata.items', + array_any: { + and: [ + { + path: '[*].key', + equal: 'block-project-ssh-keys', + }, + { + path: '[*].value', + equal: 'true', + }, + ], + }, + }, + { + jq: `[{ "defaultEmail" : (.project[].id | split("/") | .[1] + "-compute@developer.gserviceaccount.com")} + .serviceAccounts[]] + | [.[] | select(.defaultEmail == .email) ] + | {"match" : (length > 0)} // {"match" : false}`, + path: '@', + and: [ + { + path: '@.match', + notEqual: true, + }, + ], + }, + ], + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.4.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.4.ts new file mode 100644 index 00000000..728e5b5c --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.4.ts @@ -0,0 +1,123 @@ +// GCP CIS 1.2.0 Rule equivalent 4.9 +export default { + id: 'gcp-nist-800-53-rev4-1.4', + title: + 'GCP NIST 1.4 Compute instances should not have public IP addresses', + description: + 'Compute instances should not be configured to have external IP addresses.', + audit: `**From Console:** + + 1. Go to the *VM instances* page by visiting: https://console.cloud.google.com/compute/instances. + 2. For every VM, ensure that there is no *External IP* configured. + + **From Command Line:** + + 1. List the instances in your project: + + gcloud compute instances list + + 2. For every instance, list its configuration: + + gcloud compute instances describe INSTANCE_NAME --zone=ZONE + + 3. The output should not contain an *accessConfigs* section under *networkInterfaces*. Note that the *natIP* value is present only for instances that are running or for instances that are stoped but have a static IP address. For instances that are stopped and are configured to have an ephemeral public IP address, the *natIP* field will not be present. Example output: + + networkInterfaces: + - accessConfigs: + - kind: compute#accessConfig + name: External NAT + networkTier: STANDARD + type: ONE_TO_ONE_NAT + + **Exception:** + + Instances created by GKE should be excluded because some of them have external IP + addresses and cannot be changed by editing the instance settings. Instances created by GKE + should be excluded. These instances have names that start with "gke-" and are labeled + "goog-gke-node".`, + rationale: 'To reduce your attack surface, Compute instances should not have public IP addresses. Instead, instances should be configured behind load balancers, to minimize the instance\'s exposure to the internet.', + remediation: `**From Console:** + + 1. Go to the *VM instances* page by visiting: https://console.cloud.google.com/compute/instances. + 2. Click on the instance name to go the the *Instance detail page*. + 3. Click *Edit*. + 4. For each Network interface, ensure that *External IP* is set to *None*. + 5. Click *Done* and then click *Save*. + + **From Command Line:** + + 1. Describe the instance properties: + + gcloud compute instances describe INSTANCE_NAME --zone=ZONE + + 2. Identify the access config name that contains the external IP address. This access config appears in the following format: + + networkInterfaces: + - accessConfigs: + - kind: compute#accessConfig + name: External NAT + natIP: 130.211.181.55 + type: ONE_TO_ONE_NAT + + 2. Delete the access config. + + gcloud compute instances delete-access-config INSTANCE_NAME --zone=ZONE -- access-config-name "ACCESS_CONFIG_NAME" + + + In the above example, the *ACCESS_CONFIG_NAME* is *External NAT*. The name of your access + config might be different. + + **Prevention:** + + You can configure the *Define allowed external IPs for VM instances* Organization Policy to prevent VMs from being configured with public IP addresses. Learn more at: https://console.cloud.google.com/orgpolicies/compute-vmExternalIpAccess`, + references: [ + 'https://cloud.google.com/load-balancing/docs/backend-service#backends_and_external_ip_addresses', + 'https://cloud.google.com/compute/docs/instances/connecting-advanced#sshbetweeninstances', + 'https://cloud.google.com/compute/docs/instances/connecting-to-instance', + 'https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address#unassign_ip', + 'https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints', + ], + gql: `{ + querygcpVmInstance { + id + __typename + name + networkInterfaces { + accessConfigs { + name + natIP + } + } + } + }`, + resource: 'querygcpVmInstance[*]', + severity: 'unknown', + conditions: { + not: { + and: [ + { + path: '@.name', + mismatch: /^gke-.*$/, + }, + { + path: '@.networkInterfaces', + array_any: { + path: '[*].accessConfigs', + array_any: { + and: [ + { + path: '[*].natIP', + notEqual: null, + }, + { + path: '[*].natIP', + notEqual: '', + }, + ], + }, + }, + }, + ], + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.5.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.5.ts new file mode 100644 index 00000000..47f31d73 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.5.ts @@ -0,0 +1,114 @@ +// GCP CIS 1.2.0 Rule equivalent 4.5 +export default { + id: 'gcp-nist-800-53-rev4-1.5', + title: + 'GCP NIST 1.5 Compute instances "Enable connecting to serial ports" should not be enabled', + description: `Interacting with a serial port is often referred to as the serial console, which is similar to + using a terminal window, in that input and output is entirely in text mode and there is no + graphical interface or mouse support. + + If you enable the interactive serial console on an instance, clients can attempt to connect to + that instance from any IP address. Therefore interactive serial console support should be + disabled.`, + audit: `**From Console:** + + 1. Login to Google Cloud console + 2. Go to Computer Engine + 3. Go to VM instances + 4. Click on the Specific VM + 5. Ensure *Enable connecting to serial ports* below *Remote access* block is + unselected. + + **From Command Line:** + Ensure the below command's output shows *null*: + + gcloud compute instances describe --zone= -- format="json(metadata.items[].key,metadata.items[].value)" + + or *key* and *value* properties from below command's json response are equal to *serial-port-enable* and *0* or *false* respectively. + + { + "metadata": { + "items": [ + { + "key": "serial-port-enable", + "value": "0" + } + ] + } + }`, + rationale: `A virtual machine instance has four virtual serial ports. Interacting with a serial port is similar to using a terminal window, in that input and output is entirely in text mode and there is no graphical interface or mouse support. The instance's operating system, BIOS, and other system-level entities often write output to the serial ports, and can accept input such as commands or answers to prompts. Typically, these system-level entities use the first serial port (port 1) and serial port 1 is often referred to as the serial console. + + The interactive serial console does not support IP-based access restrictions such as IP whitelists. If you enable the interactive serial console on an instance, clients can attempt to connect to that instance from any IP address. This allows anybody to connect to that instance if they know the correct SSH key, username, project ID, zone, and instance name. + + Therefore interactive serial console support should be disabled.`, + remediation: `**From Console:** + + 1. Login to Google Cloud console + 2. Go to Computer Engine + 3. Go to VM instances + 4. Click on the Specific VM + 5. Click *EDIT* + 6. Unselect *Enable connecting to serial ports* below *Remote access* block. + 7. Click *Save* + + **From Command Line:** + Use the below command to disable + + gcloud compute instances add-metadata INSTANCE_NAME --zone=ZONE -- metadata=serial-port-enable=false + + or + + gcloud compute instances add-metadata INSTANCE_NAME --zone=ZONE -- metadata=serial-port-enable=0 + + **Prevention:** + You can prevent VMs from having serial port access enable by *Disable VM serial port + access* organization policy: https://console.cloud.google.com/iam-admin/orgpolicies/compute-disableSerialPortAccess.`, + references: [ + 'https://cloud.google.com/compute/docs/instances/interacting-with-serial-console', + ], + gql: `{ + querygcpVmInstance{ + __typename + id + metadata{ + items{ + key + value + } + } + } + }`, + resource: 'querygcpVmInstance[*]', + severity: 'medium', + conditions: { + path: '@.metadata.items', + array_any: { + or: [ + { + and: [ + { + path: '[*].key', + equal: 'serial-port-enable', + }, + { + path: '[*].value', + equal: '0', + }, + ], + }, + { + and: [ + { + path: '[*].key', + equal: 'serial-port-enable', + }, + { + path: '[*].value', + equal: 'false', + }, + ], + }, + ], + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.6.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.6.ts new file mode 100644 index 00000000..4902c93d --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.6.ts @@ -0,0 +1,78 @@ +// GCP CIS 1.2.0 Rule equivalent 6.5 +export default { + id: 'gcp-nist-800-53-rev4-1.6', + title: + 'GCP NIST 1.6 SQL database instances should not permit access from 0.0.0.0/0', + description: `Database Server should accept connections only from trusted Network(s)/IP(s) and + restrict access from the world.`, + audit: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Click the instance name to open its *Instance details* page. + 3. Under the *Configuration* section click *Edit configurations*. + 4. Under *Configuration options* expand the *Connectivity* section. + 5. Ensure that no authorized network is configured to allow *0.0.0.0/0*. + + **From Command Line:** + + 1. List all Cloud SQL database Instances using the following command: + + gcloud sql instances list + + + 2. Get detailed configuration for every Cloud SQL database instance. + + gcloud sql instances describe INSTANCE_NAME + + Ensure that the section *settings: ipConfiguration : authorizedNetworks* does not have any parameter value containing *0.0.0.0/0*.`, + rationale: `To minimize attack surface on a Database server instance, only trusted/known and required IP(s) should be white-listed to connect to it. + + An authorized network should not have IPs/networks configured to *0.0.0.0/0* which will allow access to the instance from anywhere in the world. Note that authorized networks apply only to instances with public IPs.`, + remediation: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Click the instance name to open its *Instance details* page. + 3. Under the *Configuration* section click *Edit configuration*s + 4. Under *Configuration options* expand the *Connectivity* section. + 5. Click the *delete* icon for the authorized network *0.0.0.0/0*. + 6. Click *Save* to update the instance. + + **From Command Line:** + + Update the authorized network list by dropping off any addresses. + + gcloud sql instances patch INSTANCE_NAME --authorized-networks=IP_ADDR1,IP_ADDR2... + + **Prevention:** + + To prevent new SQL instances from being configured to accept incoming connections from any IP addresses, set up a *Restrict Authorized Networks on Cloud SQL instances* Organization Policy at: https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictAuthorizedNetworks.`, + references: [ + 'https://cloud.google.com/sql/docs/mysql/configure-ip', + 'https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictAuthorizedNetworks', + 'https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints', + 'https://cloud.google.com/sql/docs/mysql/connection-org-policy', + ], + gql: `{ + querygcpSqlInstance { + id + __typename + name + settings { + ipConfiguration { + authorizedNetworks { + value + } + } + } + } + }`, + resource: 'querygcpSqlInstance[*]', + severity: 'high', + conditions: { + path: '@.settings.ipConfiguration.authorizedNetworks', + array_all: { + path: '[*].value', + notEqual: '0.0.0.0/0', + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.7.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.7.ts new file mode 100644 index 00000000..98aeafb7 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-1.7.ts @@ -0,0 +1,71 @@ +// GCP CIS 1.2.0 Rule equivalent 6.6 +export default { + id: 'gcp-nist-800-53-rev4-1.7', + title: + 'GCP NIST 1.7 SQL database instances should not have public IPs', + description: `It is recommended to configure Second Generation Sql instance to use private IPs instead of + public IPs.`, + audit: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console: https://console.cloud.google.com/sql/instances + 2. Ensure that every instance has a private IP address and no public IP address configured. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. For every instance of type *instanceType: CLOUD_SQL_INSTANCE* with *backendType: SECOND_GEN*, get detailed configuration. Ignore instances of type *READ_REPLICA_INSTANCE* because these instances inherit their settings from the primary instance. Also, note that first generation instances cannot be configured to have a private IP address. + + gcloud sql instances describe INSTANCE_NAME + + 3. Ensure that the setting *ipAddresses* has an IP address configured of *type: PRIVATE* and has no IP address of type: PRIMARY. PRIMARY email addresses are public addresses. An instance can have both a private and public address at the same time. Note also that you cannot use private IP with First Generation instances.`, + rationale: 'To lower the organization\'s attack surface, Cloud SQL databases should not have public IPs. Private IPs provide improved network security and lower latency for your application.', + remediation: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console: https://console.cloud.google.com/sql/instances + 2. Click the instance name to open its Instance details page. + 3. Select the *Connections* tab. + 4. Deselect the *Public IP* checkbox. + 5. Click *Save* to update the instance. + + **From Command Line:** + + 1. For every instance remove its public IP and assign a private IP instead: + + gcloud beta sql instances patch INSTANCE_NAME --network=VPC_NETWOR_NAME --no-assign-ip + + 2. Confirm the changes using the following command:: + + gcloud sql instances describe INSTANCE_NAME + + **Prevention:** + + To prevent new SQL instances from getting configured with public IP addresses, set up a *Restrict Public IP access on Cloud SQL instances* Organization policy at: https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictPublicIp.`, + references: [ + 'https://cloud.google.com/sql/docs/mysql/configure-private-ip', + 'https://cloud.google.com/sql/docs/mysql/private-ip', + 'https://cloud.google.com/resource-manager/docs/organization-policy/org-policy-constraints', + 'https://console.cloud.google.com/iam-admin/orgpolicies/sql-restrictPublicIp', + ], + gql: `{ + querygcpSqlInstance(filter:{instanceType:{eq: "CLOUD_SQL_INSTANCE"}, backendType:{eq: "SECOND_GEN"}}) { + id + __typename + name + ipAddresses{ + type + } + } + }`, + resource: 'querygcpSqlInstance[*]', + severity: 'unknown', + conditions: { + path: '@.ipAddresses', + array_all: { + path: '[*].type', + equal: 'PRIVATE', + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.1.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.1.ts new file mode 100644 index 00000000..85c689d9 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.1.ts @@ -0,0 +1,70 @@ +// GCP CIS 1.2.0 Rule equivalent 3.3 +export default { + id: 'gcp-nist-800-53-rev4-2.1', + title: 'GCP NIST 2.1 DNS managed zone DNSSEC should be enabled', + description: `Cloud Domain Name System (DNS) is a fast, reliable and cost-effective domain name system + that powers millions of domains on the internet. Domain Name System Security Extensions + (DNSSEC) in Cloud DNS enables domain owners to take easy steps to protect their domains + against DNS hijacking and man-in-the-middle and other attacks.`, + audit: `**From Console:** + + 1. Go to *Cloud DNS* by visiting https://console.cloud.google.com/net-services/dns/zones. + 2. For each zone of *Type Public*, ensure that *DNSSEC* is set to *On*. + + **From Command Line:** + + 1. List all the Managed Zones in a project: + + gcloud dns managed-zones list + + 2. For each zone of *VISIBILITY public*, get its metadata: + + gcloud dns managed-zones describe ZONE_NAME + + 3. Ensure that *dnssecConfig.state* property is *on*.`, + rationale: 'Domain Name System Security Extensions (DNSSEC) adds security to the DNS protocol by enabling DNS responses to be validated. Having a trustworthy DNS that translates a domain name like www.example.com into its associated IP address is an increasingly important building block of today’s web-based applications. Attackers can hijack this process of domain/IP lookup and redirect users to a malicious site through DNS hijacking and man-in- the-middle attacks. DNSSEC helps mitigate the risk of such attacks by cryptographically signing DNS records. As a result, it prevents attackers from issuing fake DNS responses that may misdirect browsers to nefarious websites.', + remediation: `**From Console:** + + 1. Go to *Cloud DNS* by visiting https://console.cloud.google.com/net-services/dns/zones. + 2. For each zone of *Type Public*, set *DNSSEC* to *On*. + + **From Command Line:** + Use the below command to enable *DNSSEC* for Cloud DNS Zone Name. + + gcloud dns managed-zones update ZONE_NAME --dnssec-state on`, + references: [ + 'https://cloudplatform.googleblog.com/2017/11/DNSSEC-now-available-in-Cloud-DNS.html', + 'https://cloud.google.com/dns/dnssec-config#enabling', + 'https://cloud.google.com/dns/dnssec', + ], + gql: `{ + querygcpDnsManagedZone { + id + __typename + visibility + dnssecConfigState + } + }`, + resource: 'querygcpDnsManagedZone[*]', + severity: 'medium', + conditions: { + or: [ + { + path: '@.visibility', + equal: 'private', + }, + { + and: [ + { + path: '@.visibility', + equal: 'public', + }, + { + path: '@.dnssecConfigState', + equal: 'on', + }, + ], + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.2.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.2.ts new file mode 100644 index 00000000..8f58e315 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.2.ts @@ -0,0 +1,76 @@ +// GCP CIS 1.2.0 Rule equivalent 3.4 +export default { + id: 'gcp-nist-800-53-rev4-2.2', + title: + 'GCP NIST 2.2 DNS managed zone DNSSEC key-signing keys should not use RSASHA1', + description: `DNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing + (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular + subsets of these algorithms. The algorithm used for key signing should be a recommended + one and it should be strong.`, + audit: `Currently there is no support to audit this setting through console. + + **From Command Line:** + Ensure the property algorithm for keyType keySigning is not using *RSASHA1*. + + gcloud dns managed-zones describe ZONENAME --format="json(dnsName,dnssecConfig.state,dnssecConfig.defaultKeySpecs)"`, + rationale: `Domain Name System Security Extensions (DNSSEC) algorithm numbers in this registry may be used in CERT RRs. Zonesigning (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. + + The algorithm used for key signing should be a recommended one and it should be strong. When enabling DNSSEC for a managed zone, or creating a managed zone with DNSSEC, the user can select the DNSSEC signing algorithms and the denial-of-existence type. Changing the DNSSEC settings is only effective for a managed zone if DNSSEC is not already enabled. If there is a need to change the settings for a managed zone where it has been enabled, turn DNSSEC off and then re-enable it with different settings.`, + remediation: `1. If it is necessary to change the settings for a managed zone where it has been enabled, NSSEC must be turned off and re-enabled with different settings. To turn off DNSSEC, run the following command: + + gcloud dns managed-zones update ZONE_NAME --dnssec-state off + + +2. To update key-signing for a reported managed DNS Zone, run the following command: + + gcloud dns managed-zones update ZONE_NAME --dnssec-state on --ksk-algorithm KSK_ALGORITHM --ksk-key-length KSK_KEY_LENGTH --zsk-algorithm ZSK_ALGORITHM - -zsk-key-length ZSK_KEY_LENGTH --denial-of-existence DENIAL_OF_EXISTENCE`, + references: [ + 'https://cloud.google.com/dns/dnssec-advanced#advanced_signing_options', + ], + gql: `{ + querygcpDnsManagedZone { + id + __typename + visibility + dnssecConfigDefaultKeySpecs { + keyType + algorithm + } + } + }`, + resource: 'querygcpDnsManagedZone[*]', + severity: 'medium', + conditions: { + or: [ + { + path: '@.visibility', + equal: 'private', + }, + { + and: [ + { + path: '@.visibility', + equal: 'public', + }, + { + not: { + path: '@.dnssecConfigDefaultKeySpecs', + array_any: { + and: [ + { + path: '[*].keyType', + equal: 'keySigning', + }, + { + path: '[*].algorithm', + equal: 'rsasha1', + }, + ], + }, + }, + }, + ], + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.3.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.3.ts new file mode 100644 index 00000000..044a5842 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-2.3.ts @@ -0,0 +1,79 @@ +// GCP CIS 1.2.0 Rule equivalent 3.5 +export default { + id: 'gcp-nist-800-53-rev4-2.3', + title: + 'GCP NIST 2.3 DNS managed zone DNSSEC zone-signing keys should not use RSASHA1', + description: `DNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing + (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular + subsets of these algorithms. The algorithm used for key signing should be a recommended + one and it should be strong.`, + audit: `Currently there is no support to audit this setting through the console. + + **From Command Line:** + Ensure the property algorithm for keyType zone signing is not using RSASHA1. + + gcloud dns managed-zones describe --format="json(dnsName,dnssecConfig.state,dnssecConfig.defaultKeySpecs)"`, + rationale: `DNSSEC algorithm numbers in this registry may be used in CERT RRs. Zone signing (DNSSEC) and transaction security mechanisms (SIG(0) and TSIG) make use of particular subsets of these algorithms. + + The algorithm used for key signing should be a recommended one and it should be strong. When enabling DNSSEC for a managed zone, or creating a managed zone with DNSSEC, the DNSSEC signing algorithms and the denial-of-existence type can be selected. Changing the DNSSEC settings is only effective for a managed zone if DNSSEC is not already enabled. If the need exists to change the settings for a managed zone where it has been enabled, turn DNSSEC off and then re-enable it with different settings.`, + remediation: `1. If the need exists to change the settings for a managed zone where it has been + enabled, DNSSEC must be turned off and then re-enabled with different settings. To + turn off DNSSEC, run following command: + + gcloud dns managed-zones update ZONE_NAME --dnssec-state off + + +2. To update zone-signing for a reported managed DNS Zone, run the following + command: + + gcloud dns managed-zones update ZONE_NAME --dnssec-state on --ksk-algorithm KSK_ALGORITHM --ksk-key-length KSK_KEY_LENGTH --zsk-algorithm ZSK_ALGORITHM - -zsk-key-length ZSK_KEY_LENGTH --denial-of-existence DENIAL_OF_EXISTENCE`, + references: [ + 'https://cloud.google.com/dns/dnssec-advanced#advanced_signing_options', + ], + gql: `{ + querygcpDnsManagedZone { + id + __typename + visibility + dnssecConfigDefaultKeySpecs { + keyType + algorithm + } + } + }`, + resource: 'querygcpDnsManagedZone[*]', + severity: 'medium', + conditions: { + or: [ + { + path: '@.visibility', + equal: 'private', + }, + { + and: [ + { + path: '@.visibility', + equal: 'public', + }, + { + not: { + path: '@.dnssecConfigDefaultKeySpecs', + array_any: { + and: [ + { + path: '[*].keyType', + equal: 'zoneSigning', + }, + { + path: '[*].algorithm', + equal: 'rsasha1', + }, + ], + }, + }, + }, + ], + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.1.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.1.ts new file mode 100644 index 00000000..eb0aa52b --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.1.ts @@ -0,0 +1,167 @@ + +// GCP CIS 1.2.0 Rule equivalent 2.1 +export default { + id: 'gcp-nist-800-53-rev4-3.1', + title: + 'GCP NIST 3.1 IAM default audit log config should not exempt any users', + description: + 'It is recommended that Cloud Audit Logging is configured to track all admin activities and read, write access to user data.', + audit: `**From Console:** + + 1. Go to Audit Logs by visiting https://console.cloud.google.com/iam-admin/audit. + 2. Ensure that Admin Read, Data Write, and Data Read are enabled for all Google Cloud + services and that no exemptions are allowed. + + **From Command Line:** + + 1. List the Identity and Access Management (IAM) policies for the project, folder, or organization: + + gcloud organizations get-iam-policy ORGANIZATION_ID + gcloud resource-manager folders get-iam-policy FOLDER_ID + gcloud projects get-iam-policy PROJECT_ID + + 2. Policy should have a default auditConfigs section which has the logtype set to + DATA_WRITES and DATA_READ for all services. Note that projects inherit settings + from folders, which in turn inherit settings from the organization. When called, + projects get-iam-policy, the result shows only the policies set in the project, not the + policies inherited from the parent folder or organization. Nevertheless, if the parent + folder has Cloud Audit Logging enabled, the project does as well. + + Sample output for default audit configs may look like this: + + auditConfigs: + - auditLogConfigs: + - logType: ADMIN_READ + - logType: DATA_WRITE + - logType: DATA_READ + service: allServices + + 3. Any of the auditConfigs sections should not have parameter "exemptedMembers:" + set, which will ensure that Logging is enabled for all users and no user is exempted.`, + rationale: `Cloud Audit Logging maintains two audit logs for each project, folder, and organization: + Admin Activity and Data Access. + + 1. Admin Activity logs contain log entries for API calls or other administrative actions + that modify the configuration or metadata of resources. Admin Activity audit logs + are enabled for all services and cannot be configured. + 2. Data Access audit logs record API calls that create, modify, or read user-provided + data. These are disabled by default and should be enabled. + + There are three kinds of Data Access audit log information: + + - Admin read: Records operations that read metadata or configuration + information. Admin Activity audit logs record writes of metadata and + configuration information that cannot be disabled. + - Data read: Records operations that read user-provided data. + - Data write: Records operations that write user-provided data. + + It is recommended to have an effective default audit config configured in such a way that: + + 1. logtype is set to DATA_READ (to log user activity tracking) and DATA_WRITES (to + log changes/tampering to user data). + 2. audit config is enabled for all the services supported by the Data Access audit logs feature. + 3. Logs should be captured for all users, i.e., there are no exempted users in any of the audit config sections. This will ensure overriding the audit config will not contradict the requirement.`, + remediation: `**From Console:** + + 1. Go to Audit Logs by visiting https://console.cloud.google.com/iam-admin/audit. + + + 2. Follow the steps at https://cloud.google.com/logging/docs/audit/configure-data- + access to enable audit logs for all Google Cloud services. Ensure that no exemptions + are allowed. + + **From Command Line:** + + 1. To read the project's IAM policy and store it in a file run a command: + + gcloud projects get-iam-policy PROJECT_ID > /tmp/project_policy.yaml + + Alternatively, the policy can be set at the organization or folder level. If setting the policy at the organization level, it is not necessary to also set it for each folder or project. + + gcloud organizations get-iam-policy ORGANIZATION_ID > /tmp/org_policy.yaml + gcloud resource-manager folders get-iam-policy FOLDER_ID > /tmp/folder_policy.yaml + + 2. Edit policy in /tmp/policy.yaml, adding or changing only the audit logs + configuration to: + + auditConfigs: + - auditLogConfigs: + - logType: DATA_WRITE + - logType: DATA_READ + service: allServices + + **Note:** exemptedMembers: is not set as audit logging should be enabled for all the users + + 3. To write new IAM policy run command: + + gcloud organizations set-iam-policy ORGANIZATION_ID /tmp/org_policy.yaml + gcloud resource-manager folders set-iam-policy FOLDER_ID /tmp/folder_policy.yaml + gcloud projects set-iam-policy PROJECT_ID /tmp/project_policy.yaml + + If the preceding command reports a conflict with another change, then repeat these steps, starting with the first step.`, + references: [ + 'https://cloud.google.com/logging/docs/audit/', + 'https://cloud.google.com/logging/docs/audit/configure-1data-access', + ], + gql: `{ + querygcpIamPolicy{ + id + __typename + auditConfigs{ + auditLogConfigs{ + logType + exemptedMembers + } + service + exemptedMembers + } + } + }`, + resource: 'querygcpIamPolicy[*]', + severity: 'medium', + conditions: { + path: '@.auditConfigs', + array_any: { + and: [ + { + path: '[*].exemptedMembers', + isEmpty: true, + }, + { + path: '[*].service', + equal: 'allServices', + }, + { + path: '[*].auditLogConfigs', + array_any: { + and: [ + { + path: '[*].logType', + equal: 'DATA_WRITE', + }, + { + path: '[*].exemptedMembers', + isEmpty: true, + }, + ], + }, + }, + { + path: '[*].auditLogConfigs', + array_any: { + and: [ + { + path: '[*].logType', + equal: 'DATA_READ', + }, + { + path: '[*].exemptedMembers', + isEmpty: true, + }, + ], + }, + }, + ], + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.10.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.10.ts new file mode 100644 index 00000000..207cd2c4 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.10.ts @@ -0,0 +1,90 @@ +// GCP CIS 1.2.0 Rule equivalent 3.8 +export default { + id: 'gcp-nist-800-53-rev4-3.10', + title: + 'GCP NIST 3.10 Network subnet flow logs should be enabled', + description: `Flow Logs is a feature that enables users to capture information about the IP traffic going to + and from network interfaces in the organization's VPC Subnets. Once a flow log is created, + the user can view and retrieve its data in Stackdriver Logging. It is recommended that Flow + Logs be enabled for every business-critical VPC subnet.`, + + audit: `**From Console:** + + 1. Go to the VPC network GCP Console visiting https://console.cloud.google.com/networking/networks/list + 2. From the list of network subnets, + make sure for each subnet *Flow Logs* is set to *On* + + **From Command Line:** + + gcloud compute networks list --format json | \\ jq -r '.[].subnetworks | .[]' | \ + xargs -I {} gcloud compute networks subnets describe {} --format json | \ + jq -r '. | "Subnet: \\(.name) Purpose: \\(.purpose) VPC Flow Log Enabled: \\(has("enableFlowLogs"))"' + + The output of the above command will list each subnet, the subnet's purpose, and a *true* or *false* value if *Flow Logs* are enabled. + If the subnet's purpose is *PRIVATE* then *Flow Logs* should be *true*. + `, + rationale: `VPC networks and subnetworks not reserved for internal HTTP(S) load balancing provide logically isolated and secure network partitions where GCP resources can be launched. When Flow Logs are enabled for a subnet, VMs within that subnet start reporting on all Transmission Control Protocol (TCP) and User Datagram Protocol (UDP) flows. Each VM samples the TCP and UDP flows it sees, inbound and outbound, whether the flow is to or from another VM, a host in the on-premises datacenter, a Google service, or a host on the Internet. If two GCP VMs are communicating, and both are in subnets that have VPC Flow Logs enabled, both VMs report the flows. + + Flow Logs supports the following use cases: + + - Network monitoring + - Understanding network usage and optimizing network traffic expenses + - Network forensics + - Real-time security analysis + + Flow Logs provide visibility into network traffic for each VM inside the subnet and can be used to detect + anomalous traffic or provide insight during security workflows. + + Note: Subnets reserved for use by internal HTTP(S) load balancers do not support VPC flow logs.`, + remediation: `**From Console:** + + 1. Go to the VPC network GCP Console visiting https://console.cloud.google.com/networking/networks/list + 2. Click the name of a subnet, The *Subnet details* page displays. + 3. Click the *EDIT* button. + 4. Set *Flow Logs* to *On*. + 5. Click Save. + + **From Command Line:** + To set Private Google access for a network subnet, run the following command: + + gcloud compute networks subnets update [SUBNET_NAME] --region [REGION] --enable-flow-logs`, + references: [ + 'https://cloud.google.com/vpc/docs/using-flow-logs#enabling_vpc_flow_logging', + 'https://cloud.google.com/vpc/', + ], + gql: `{ + querygcpNetwork{ + id + __typename + subnets{ + purpose + enableFlowLogs + } + } + }`, + resource: 'querygcpNetwork[*]', + severity: 'high', + conditions: { + path: '@.subnets', + array_all: { + or: [ + { + path: '[*].purpose', + notEqual: 'PRIVATE', + }, + { + and: [ + { + path: '[*].purpose', + equal: 'PRIVATE', + }, + { + path: '[*].enableFlowLogs', + equal: true, + }, + ], + }, + ], + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.2.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.2.ts new file mode 100644 index 00000000..a96bab1a --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.2.ts @@ -0,0 +1,106 @@ +// GCP CIS 1.2.0 Rule equivalent 6.2.1 +export default { + id: 'gcp-nist-800-53-rev4-3.2', + title: + "GCP NIST 3.2 PostgreSQL database instance 'log_checkpoints' database flag should be set to 'on'", + description: `Ensure that the log_checkpoints database flag for the Cloud SQL PostgreSQL instance is + set to on.`, + audit: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the instance to open its *Instance Overview* page. + 3. Ensure that the database flag *log_checkpoints* that has been set is listed under the *Database flags* section. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. Ensure that the below command returns on for every Cloud SQL PostgreSQL + database instance. + + gcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags[] | select(.name=="log_checkpoints")|.value'`, + rationale: 'Enabling *log_checkpoints* causes checkpoints and restart points to be logged in the server log. Some statistics are included in the log messages, including the number of buffers written and the time spent writing them. This parameter can only be set in the postgresql.conf file or on the server command line. This recommendation is applicable to PostgreSQL database instances.', + remediation: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the PostgreSQL instance where the database flag needs to be enabled. + 3. Click *Edit*. + 4. Scroll down to the *Flags* section. + 5. To set a flag that has not been set on the instance before, click *Add item*, choose the flag *log_checkpoints* from the drop-down menu, and set its value. + 6. Click *Save*. + 7. Confirm the changes under *Flags* on the Overview page. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. Configure the *log_checkpoints* database flag for every Cloud SQL PosgreSQL database instance using the below command: + + gcloud sql instances patch INSTANCE_NAME --database-flags log_checkpoints=on + + **Note:** + + This command will overwrite all previously set database flags. To keep those and add new ones, include the values for all flags to be set on the instance. Any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign ("=").`, + references: [ + 'https://www.postgresql.org/docs/9.6/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT', + 'https://cloud.google.com/sql/docs/postgres/flags#setting_a_database_flag', + ], + gql: `{ + querygcpProject{ + id + projectId + __typename + sqlInstances(filter:{ databaseVersion: {regexp: "/POSTGRES*/"}}){ + name + settings{ + databaseFlags{ + name + value + } + } + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '[*].sqlInstances', + isEmpty: true, + }, + { + path: '[*].sqlInstances', + array_all: { + path: '[*]', + and: [ + { + path: '[*].settings.databaseFlags', + isEmpty: false, + }, + { + path: '[*].settings.databaseFlags', + array_any: { + and: [ + { + path: '[*].name', + equal: 'log_checkpoints', + }, + { + path: '[*].value', + equal: 'on', + }, + ], + }, + }, + ], + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.3.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.3.ts new file mode 100644 index 00000000..0e46868b --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.3.ts @@ -0,0 +1,108 @@ +// GCP CIS 1.2.0 Rule equivalent 6.2.3 +export default { + id: 'gcp-nist-800-53-rev4-3.3', + title: + "GGCP NIST 3.3 PostgreSQL database instance 'log_connections' database flag should be set to 'on'", + description: `Enabling the log_connections setting causes each attempted connection to the server to + be logged, along with successful completion of client authentication. This parameter cannot + be changed after the session starts.`, + audit: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the instance to open its *Instance Overview* page. + 3. Go to the *Configuration* card. + 4. Under *Database flags*, check the value of *log_connections* flag to determine if it is configured as expected. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. Ensure the below command returns *on* for every Cloud SQL PostgreSQL database instance: + + gcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags[] | select(.name=="log_connections")|.value' + `, + rationale: 'PostgreSQL does not log attempted connections by default. Enabling the *log_connections* setting will create log entries for each attempted connection as well as successful completion of client authentication which can be useful in troubleshooting issues and to determine any unusual connection attempts to the server. This recommendation is applicable to PostgreSQL database instances.', + remediation: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the PostgreSQL instance for which you want to enable the database flag. + 3. Click *Edit*. + 4. Scroll down to the *Flags* section. + 5. To set a flag that has not been set on the instance before, click *Add item*, choose the flag *log_connections* from the drop-down menu and set the value as *on*. + 6. Click *Save*. + 7. Confirm the changes under *Flags* on the Overview page. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. Configure the *log_connections* database flag for every Cloud SQL PosgreSQL database instance using the below command. + + gcloud sql instances patch INSTANCE_NAME --database-flags log_connections=on + + **Note:** + + This command will overwrite all previously set database flags. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign ("=").`, + references: [ + 'https://cloud.google.com/sql/docs/postgres/flags', + 'https://www.postgresql.org/docs/9.6/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT', + ], + gql: `{ + querygcpProject{ + id + projectId + __typename + sqlInstances(filter:{ databaseVersion: {regexp: "/POSTGRES*/"}}){ + name + settings{ + databaseFlags{ + name + value + } + } + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '[*].sqlInstances', + isEmpty: true, + }, + { + path: '[*].sqlInstances', + array_all: { + path: '[*]', + and: [ + { + path: '[*].settings.databaseFlags', + isEmpty: false, + }, + { + path: '[*].settings.databaseFlags', + array_any: { + and: [ + { + path: '[*].name', + equal: 'log_connections', + }, + { + path: '[*].value', + equal: 'on', + }, + ], + }, + }, + ], + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.4.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.4.ts new file mode 100644 index 00000000..59302f5d --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.4.ts @@ -0,0 +1,106 @@ +// GCP CIS 1.2.0 Rule equivalent 6.2.4 +export default { + id: 'gcp-nist-800-53-rev4-3.4', + title: + "GCP NIST 3.4 PostgreSQL database instance 'log_disconnections' database flag should be set to 'on'", + description: `Enabling the log_disconnections setting logs the end of each session, including the + session duration.`, + audit: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the instance to open its *Instance Overview* page + 3. Go to the *Configuration* card. + 4. Under *Database flags*, check the value of *log_disconnections* flag is configured as expected. + + **From Command Line:** + + 1. List all Cloud SQL database Instances using the following command: + + gcloud sql instances list + + 2. Ensure the below command returns on for every Cloud SQL PostgreSQL database instance: + + gcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags[] | select(.name=="log_disconnections")|.value'`, + rationale: 'PostgreSQL does not log session details such as duration and session end by default. Enabling the *log_disconnections* setting will create log entries at the end of each session which can be useful in troubleshooting issues and determine any unusual activity across a time period. The *log_disconnections* and *log_connections* work hand in hand and generally, the pair would be enabled/disabled together. This recommendation is applicable to PostgreSQL database instances.', + remediation: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the PostgreSQL instance where the database flag needs to be enabled. + 3. Click *Edit*. + 4. Scroll down to the *Flags* section. + 5. To set a flag that has not been set on the instance before, click *Add item*, choose the flag *log_disconnections* from the drop-down menu and set the value as *on*. + 6. Click *Save*. + 7. Confirm the changes under *Flags* on the Overview page. + + **From Command Line:** + + 1. List all Cloud SQL database Instances using the following command: + + gcloud sql instances list + + 2. Configure the *log_disconnections* database flag for every Cloud SQL PosgreSQL database instance using the below command: + + gcloud sql instances patch INSTANCE_NAME --database-flags log_disconnections=on + + **Note:** + + This command will overwrite all previously setdatabase flags. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign ("=").`, + references: [ + 'https://cloud.google.com/sql/docs/postgres/flags', + 'https://www.postgresql.org/docs/9.6/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT', + ], + gql: `{ + querygcpProject{ + id + projectId + __typename + sqlInstances(filter:{ databaseVersion: {regexp: "/POSTGRES*/"}}){ + name + settings{ + databaseFlags{ + name + value + } + } + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '[*].sqlInstances', + isEmpty: true, + }, + { + path: '[*].sqlInstances', + array_all: { + path: '[*]', + and: [ + { + path: '[*].settings.databaseFlags', + isEmpty: false, + }, + { + path: '[*].settings.databaseFlags', + array_any: { + and: [ + { + path: '[*].name', + equal: 'log_disconnections', + }, + { + path: '[*].value', + equal: 'on', + }, + ], + }, + }, + ], + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.5.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.5.ts new file mode 100644 index 00000000..ed7c3867 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.5.ts @@ -0,0 +1,106 @@ +// GCP CIS 1.2.0 Rule equivalent 6.2.6 +export default { + id: 'gcp-nist-800-53-rev4-3.5', + title: + "GCP NIST 3.5 PostgreSQL database instance 'log_lock_waits' database flag should be set to 'on'", + description: `Enabling the log_lock_waits flag for a PostgreSQL instance creates a log for any session + waits that take longer than the alloted deadlock_timeout time to acquire a lock.`, + audit: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the instance to open its Instance Overview page. + 3. Go to the *Configuration* card. + 4. Under *Database flags*, check if the value of the *log_lock_waits* flag is configured as expected. + + **From Command Line:** + + 1. List all Cloud SQL database Instances using the following command: + + gcloud sql instances list + + 2. Ensure the below command returns *on* for every Cloud SQL PostgreSQL database instance + + gcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags[] | select(.name=="log_lock_waits")|.value'`, + rationale: 'The deadlock timeout defines the time to wait on a lock before checking for any conditions. Frequent run overs on deadlock timeout can be an indication of an underlying issue. Logging such waits on locks by enabling the *log_lock_waits* flag can be used to identify poor performance due to locking delays or if a specially-crafted SQL is attempting to starve resources through holding locks for excessive amounts of time. This recommendation is applicable to PostgreSQL database instances.', + remediation: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the PostgreSQL instance where the database flag needs to be enabled. + 3. Click *Edit*. + 4. Scroll down to the *Flags* section. + 5. To set a flag that has not been set on the instance before, click *Add item*, choose the flag *log_lock_waits* from the drop-down menu and set the value as *on*. + 6. Click *Save*. + 7. Confirm the changes under *Flags* on the Overview page. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. Configure the *log_lock_waits* database flag for every Cloud SQL PosgreSQL database instance using the below command: + + gcloud sql instances patch INSTANCE_NAME --database-flags log_lock_waits=on + + **Note:** + + This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign ("=").`, + references: [ + 'https://cloud.google.com/sql/docs/postgres/flags', + 'https://www.postgresql.org/docs/9.6/runtime-config-logging.html#GUC-LOG-MIN-DURATION-STATEMENT', + ], + gql: `{ + querygcpProject{ + id + projectId + __typename + sqlInstances(filter:{ databaseVersion: {regexp: "/POSTGRES*/"}}){ + name + settings{ + databaseFlags{ + name + value + } + } + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '[*].sqlInstances', + isEmpty: true, + }, + { + path: '[*].sqlInstances', + array_all: { + path: '[*]', + and: [ + { + path: '[*].settings.databaseFlags', + isEmpty: false, + }, + { + path: '[*].settings.databaseFlags', + array_any: { + and: [ + { + path: '[*].name', + equal: 'log_lock_waits', + }, + { + path: '[*].value', + equal: 'on', + }, + ], + }, + }, + ], + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.6.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.6.ts new file mode 100644 index 00000000..303b44ec --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.6.ts @@ -0,0 +1,110 @@ +// GCP CIS 1.2.0 Rule equivalent 6.2.14 +export default { + id: 'gcp-nist-800-53-rev4-3.6', + title: + "GCP NIST 3.6 PostgreSQL database instance 'log_min_error_statement' database flag should be set appropriately", + description: `The log_min_error_statement flag defines the minimum message severity level that are + considered as an error statement. Messages for error statements are logged with the SQL + statement. Valid values include DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1, INFO, NOTICE, + WARNING, ERROR, LOG, FATAL, and PANIC. Each severity level includes the subsequent levels + mentioned above. Ensure a value of ERROR or stricter is set.`, + audit: `**Using Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the instance to open its *Instance Overview* page + 3. Go to *Configuration* card + 4. Under *Database flags*, check the value of *log_min_error_statement* flag is configured as to *ERROR* or stricter. + + **Using Command Line:** + + 1. List all Cloud SQL database Instances + + gcloud sql instances list + + + 2. Use the below command for every Cloud SQL PostgreSQL database instance to verify the value of *log_min_error_statement* is set to *ERROR* or stricter. + + gcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags[] | select(.name=="log_min_error_statement")|.value'`, + rationale: 'Auditing helps in troubleshooting operational problems and also permits forensic analysis. If *log_min_error_statement* is not set to the correct value, messages may not be classified as error messages appropriately. Considering general log messages as error messages would make is difficult to find actual errors and considering only stricter severity levels as error messages may skip actual errors to log their SQL statements. The *log_min_error_statement* flag should be set to *ERROR* or stricter. This recommendation is applicable to PostgreSQL database instances.', + remediation: `**Using Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the PostgreSQL instance for which you want to enable the database flag. + 3. Click *Edit*. + 4. Scroll down to the *Flags* section. + 5. To set a flag that has not been set on the instance before, click *Add item*, choose the flag *log_min_error_statement* from the drop-down menu and set appropriate value. + 6. Click *Save* to save your changes. + 7. Confirm your changes under *Flags* on the Overview page. + + **Using Command Line:** + + 1. List all Cloud SQL database Instances + + gcloud sql instances list + + 2. Configure the *log_min_error_statement* database flag for every Cloud SQL PosgreSQL database instance using the below command. + + gcloud sql instances patch INSTANCE_NAME --database-flags log_min_error_statement= + + **Note:** + + This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags you want set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign ("=").`, + references: [ + 'https://cloud.google.com/sql/docs/postgres/flags', + 'https://www.postgresql.org/docs/9.6/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHEN', + ], + gql: `{ + querygcpProject{ + id + projectId + __typename + sqlInstances(filter:{ databaseVersion: {regexp: "/POSTGRES*/"}}){ + name + settings{ + databaseFlags{ + name + value + } + } + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '[*].sqlInstances', + isEmpty: true, + }, + { + path: '[*].sqlInstances', + array_all: { + path: '[*]', + and: [ + { + path: '[*].settings.databaseFlags', + isEmpty: false, + }, + { + path: '[*].settings.databaseFlags', + array_any: { + and: [ + { + path: '[*].name', + equal: 'log_min_error_statement', + }, + { + path: '[*].value', + in: ['error', 'log', 'fatal', 'panic'], + }, + ], + }, + }, + ], + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.7.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.7.ts new file mode 100644 index 00000000..2773eec3 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.7.ts @@ -0,0 +1,110 @@ +// GCP CIS 1.2.0 Rule equivalent 6.2.15 +export default { + id: 'gcp-nist-800-53-rev4-3.7', + title: + "GCP NIST 3.7 PostgreSQL database instance 'log_temp_files' database flag should be set to '0' (on)", + description: `PostgreSQL can create a temporary file for actions such as sorting, hashing and temporary + query results when these operations exceed work_mem. The log_temp_files flag controls + logging names and the file size when it is deleted. Configuring log_temp_files to 0 causes + all temporary file information to be logged, while positive values log only files whose size is + greater than or equal to the specified number of kilobytes. A value of - 1 disables temporary + file information logging.`, + audit: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the instance to open its *Instance Overview* page + 3. Go to the *Configuration* card. + 4. Under *Database flags*, check that the value of *log_temp_files* flag is set to *0*. + + **From Command Line:** + + 1. List all Cloud SQL database Instances using the following command: + + gcloud sql instances list + + 2. Ensure that the below command returns *0* for every Cloud SQL PostgreSQL database instance + + gcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags[] | select(.name=="log_temp_files")|.value'`, + rationale: 'If all temporary files are not logged, it may be more difficult to identify potential performance issues that may be due to either poor application coding or deliberate resource starvation attempts.', + remediation: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the PostgreSQL instance where the database flag needs to be enabled. + 3. Click *Edit*. + 4. Scroll down to the *Flags* section. + 5. To set a flag that has not been set on the instance before, click *Add item*, choose the flag *log_temp_files* from the drop-down menu and set the value as *0*. + 6. Click *Save*. + 7. Confirm the changes under *Flags* on the Overview page. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. Configure the *log_temp_files* database flag for every Cloud SQL PosgreSQL database instance using the below command. + + gcloud sql instances patch INSTANCE_NAME --database-flags log_temp_files='0' + + **Note:** + + This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign ("=").`, + references: [ + 'https://cloud.google.com/sql/docs/postgres/flags', + 'https://www.postgresql.org/docs/9.6/runtime-config-logging.html#GUC-LOG-TEMP-FILES', + ], + gql: `{ + querygcpProject{ + id + projectId + __typename + sqlInstances(filter:{ databaseVersion: {regexp: "/POSTGRES*/"}}){ + name + settings{ + databaseFlags{ + name + value + } + } + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '[*].sqlInstances', + isEmpty: true, + }, + { + path: '[*].sqlInstances', + array_all: { + path: '[*]', + and: [ + { + path: '[*].settings.databaseFlags', + isEmpty: false, + }, + { + path: '[*].settings.databaseFlags', + array_any: { + and: [ + { + path: '[*].name', + equal: 'log_temp_files', + }, + { + path: '[*].value', + equal: '0', + }, + ], + }, + }, + ], + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.8.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.8.ts new file mode 100644 index 00000000..b0b91b2f --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.8.ts @@ -0,0 +1,107 @@ +// GCP CIS 1.2.0 Rule equivalent 6.2.16 +export default { + id: 'gcp-nist-800-53-rev4-3.8', + title: + "GCP NIST 3.8 PostgreSQL database instance 'log_min_duration_statement' database flag should be set to '-1' (disabled)", + description: `The log_min_duration_statement flag defines the minimum amount of execution time of a + statement in milliseconds where the total duration of the statement is logged. Ensure that + log_min_duration_statement is disabled, i.e., a value of - 1 is set.`, + audit: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the instance to open its *Instance Overview* page. + 3. Go to the *Configuration* card. + 4. Under* Database flags*, check that the value of *log_min_duration_statement* flag is set to *-1*. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. Use the below command for every Cloud SQL PostgreSQL database instance to verify the value of *log_min_duration_statement* is set to *-1*. + + gcloud sql instances describe INSTANCE_NAME --format=json | jq '.settings.databaseFlags[] | select(.name=="log_min_duration_statement")|.value'`, + rationale: 'Logging SQL statements may include sensitive information that should not be recorded in logs. This recommendation is applicable to PostgreSQL database instances.', + remediation: `**From Console:** + + 1. Go to the Cloud SQL Instances page in the Google Cloud Console by visiting https://console.cloud.google.com/sql/instances. + 2. Select the PostgreSQL instance where the database flag needs to be enabled. + 3. Click *Edit*. + 4. Scroll down to the *Flags* section. + 5. To set a flag that has not been set on the instance before, click *Add item*, choose the flag *log_min_duration_statement* from the drop-down menu and set a value of *-1*. + 6. Click *Save*. + 7. Confirm the changes under *Flags* on the Overview page. + + **From Command Line:** + + 1. List all Cloud SQL database instances using the following command: + + gcloud sql instances list + + 2. Configure the *log_min_duration_statement* flag for every Cloud SQL PosgreSQL database instance using the below command: + + gcloud sql instances patch INSTANCE_NAME --database-flags log_min_duration_statement=-1 + + **Note:** + + This command will overwrite all database flags previously set. To keep those and add new ones, include the values for all flags to be set on the instance; any flag not specifically included is set to its default value. For flags that do not take a value, specify the flag name followed by an equals sign ("=").`, + references: [ + 'https://cloud.google.com/sql/docs/postgres/flags', + 'https://www.postgresql.org/docs/current/runtime-config-logging.html#RUNTIME-CONFIG-LOGGING-WHAT', + ], + gql: `{ + querygcpProject{ + id + projectId + __typename + sqlInstances(filter:{ databaseVersion: {regexp: "/POSTGRES*/"}}){ + name + settings{ + databaseFlags{ + name + value + } + } + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'medium', + conditions: { + path: '@', + or: [ + { + path: '[*].sqlInstances', + isEmpty: true, + }, + { + path: '[*].sqlInstances', + array_all: { + path: '[*]', + and: [ + { + path: '[*].settings.databaseFlags', + isEmpty: false, + }, + { + path: '[*].settings.databaseFlags', + array_any: { + and: [ + { + path: '[*].name', + equal: 'log_min_duration_statement', + }, + { + path: '[*].value', + equal: '-1', + }, + ], + }, + }, + ], + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.9.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.9.ts new file mode 100644 index 00000000..7c656b2e --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-3.9.ts @@ -0,0 +1,82 @@ +// GCP CIS 1.2.0 Rule equivalent 2.2 +export default { + id: 'gcp-nist-800-53-rev4-3.9', + title: 'GCP NIST 3.9 At least one project-level logging sink should be configured with an empty filter', + description: `It is recommended to create a sink that will export copies of all the log entries. This can + help aggregate logs from multiple projects and export them to a Security Information and + Event Management (SIEM).`, + audit: `**From Console:** + + 1. Go to *Logging/Exports* by visiting https://console.cloud.google.com/logs/exports. + 2. For every sink, click the 3-dot button for Menu options and select *View Filter*. + 3. Ensure there is at least one sink with an *empty* sink filter. + 4. Additionally, ensure that the resource configured as *Destination* exists. + + **From Command Line:** + + 1. Ensure that a sink with an *empty filter* exists. List the sinks for the project, folder or organization. If sinks are configured at a folder or organization level, they do not need to be configured for each project: + + gcloud logging sinks list --folder=FOLDER_ID | --organization=ORGANIZATION_ID | --project=PROJECT_ID + + The output should list at least one sink with an *empty filter*. + + 2. Additionally, ensure that the resource configured as *Destination* exists. + + See https://cloud.google.com/sdk/gcloud/reference/beta/logging/sinks/list for more information.`, + rationale: 'Log entries are held in Cloud Logging. To aggregate logs, export them to a SIEM. To keep them longer, it is recommended to set up a log sink. Exporting involves writing a filter that selects the log entries to export, and choosing a destination in Cloud Storage, BigQuery, or Cloud Pub/Sub. The filter and destination are held in an object called a sink. To ensure all log entries are exported to sinks, ensure that there is no filter configured for a sink. Sinks can be created in projects, organizations, folders, and billing accounts.', + remediation: `**From Console:** + + 1. Go to *Logging/Logs* by visiting https://console.cloud.google.com/logs/viewer. + 2. Click the down arrow symbol on *Filter Bar* at the rightmost corner and select + *Convert to Advanced Filter*. + 3. This step converts *Filter Bar* to *Advanced Filter Bar*. + 4. Clear any text from the *Advanced Filter* field. This ensures that the *log-filter* is + set to empty and captures all the logs. + 5. Click *Submit Filter* and the result should display all logs. + 6. Click *Create Sink*, which opens a menu on the right. + 7. Fill out the fields and click *Create Sink*. + + For more information, see https://cloud.google.com/logging/docs/export/configure_export_v2#dest-create. + + **From Command Line:** + To create a sink to export all log entries in a Google Cloud Storage bucket: + + gcloud logging sinks create storage.googleapis.com/DESTINATION_BUCKET_NAME + + Sinks can be created for a folder or organization, which will include all projects. + + gcloud logging sinks create storage.googleapis.com/DESTINATION_BUCKET_NAME --include-children -- folder=FOLDER_ID | --organization=ORGANIZATION_ID + + **Note:** + + 1. A sink created by the command-line above will export logs in storage buckets. However, sinks can be configured to export logs into BigQuery, or Cloud Pub/Sub, or *Custom Destination*. + 2. While creating a sink, the sink option *--log-filter* is not used to ensure the sink exports all log entries. + 3. A sink can be created at a folder or organization level that collects the logs of all the projects underneath bypassing the option *--include-children* in the cloud command.`, + references: [ + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/logging/quotas', + 'https://cloud.google.com/logging/docs/export/', + 'https://cloud.google.com/logging/docs/export/using_exported_logs', + 'https://cloud.google.com/logging/docs/export/configure_export_v2', + 'https://cloud.google.com/logging/docs/export/aggregated_exports', + 'https://cloud.google.com/sdk/gcloud/reference/beta/logging/sinks/list', + ], + gql: `{ + querygcpProject { + id + __typename + logSinks { + filter + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'high', + conditions: { + path: '@.logSinks', + array_any: { + path: '[*].filter', + equal: '', + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-4.1.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-4.1.ts new file mode 100644 index 00000000..b8cbabcd --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-4.1.ts @@ -0,0 +1,86 @@ +// GCP CIS 1.2.0 Rule equivalent 4.7 +export default { + id: 'gcp-nist-800-53-rev4-4.1', + title: + 'GCP NIST 4.1 Compute instance disks should be encrypted with customer-supplied encryption keys (CSEKs)', + description: `Customer-Supplied Encryption Keys (CSEK) are a feature in Google Cloud Storage and + Google Compute Engine. If you supply your own encryption keys, Google uses your key to + protect the Google-generated keys used to encrypt and decrypt your data. By default, + Google Compute Engine encrypts all data at rest. Compute Engine handles and manages + this encryption for you without any additional actions on your part. However, if you + wanted to control and manage this encryption yourself, you can provide your own + encryption keys.`, + audit: `**From Console:** + + 1. Go to Compute Engine *Disks* by visiting: + https://console.cloud.google.com/compute/disks. + 2. Click on the disk for your critical VMs to see its configuration details. + 3. Ensure that *Encryption type* is set to *Customer supplied*. + + **From Command Line:** + Ensure *diskEncryptionKey* property in the below command's response is not null, and + contains key *sha256* with corresponding value + + gcloud compute disks describe DISK_NAME --zone ZONE -- format="json(diskEncryptionKey,name)"`, + rationale: `By default, Google Compute Engine encrypts all data at rest. Compute Engine handles and manages this encryption for you without any additional actions on your part. However, if you wanted to control and manage this encryption yourself, you can provide your own encryption keys. + + If you provide your own encryption keys, Compute Engine uses your key to protect the Google-generated keys used to encrypt and decrypt your data. Only users who can provide the correct key can use resources protected by a customer-supplied encryption key. + + Google does not store your keys on its servers and cannot access your protected data unless you provide the key. This also means that if you forget or lose your key, there is no way for Google to recover the key or to recover any data encrypted with the lost key. + + At least business critical VMs should have VM disks encrypted with CSEK.`, + remediation: `**Note:** Currently there is no way to update the encryption of an existing disk. Therefore you should create a new disk with *Encryption* set to *Customer supplied*. + + **From Console:** + + 1. Go to Compute Engine *Disks* by visiting: https://console.cloud.google.com/compute/disks. + 2. Click *CREATE DISK*. + 3. Set *Encryption type* to *Customer supplied*. + 4. Provide the *Key* in the box. + 5. Select *Wrapped key*. + 6. Click *Create*. + + **From Command Line:** + In the gcloud compute tool, encrypt a disk using the --csek-key-file flag during instance + creation. If you are using an RSA-wrapped key, use the gcloud beta component: + + gcloud (beta) compute instances create INSTANCE_NAME --csek-key-file + + + To encrypt a standalone persistent disk: + + gcloud (beta) compute disks create DISK_NAME --csek-key-file * is present with filter text: + + (protoPayload.serviceName="cloudresourcemanager.googleapis.com") + AND (ProjectOwnership OR projectOwnerInvitee) + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + + **Ensure that the prescribed Alerting Policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the Policies section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of zero(0) for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for your organization. + 5. Ensure that the appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with filter set to: + + (protoPayload.serviceName="cloudresourcemanager.googleapis.com") + AND (ProjectOwnership OR projectOwnerInvitee) + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + + - conditions.conditionThreshold.filter is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + rationale: `Project ownership has the highest level of privileges on a project. To avoid misuse of project resources, the project ownership assignment/change actions mentioned above should be monitored and alerted to concerned recipients. + + - Sending project ownership invites + - Acceptance/Rejection of project ownership invite by user + - Adding 'role\\Owner' to a user/service-account + - Removing a user/Service account from 'role\\Owner'`, + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + 3. Clear any text and add: + + (protoPayload.serviceName="cloudresourcemanager.googleapis.com") + AND (ProjectOwnership OR projectOwnerInvitee) + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + + 4. Click *Submit Filter*. The logs display based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to **1** (default) and the *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the advanced logs query. + 6. Click *Create Metric*. + + **Create the display prescribed Alert Policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the desired metric and select *Create alert from Metric*. A new page opens. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notifications channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create a prescribed Log Metric: + + + - Use the command: gcloud beta logging metrics create + - Reference for Command Usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create prescribed Alert Policy + + - Use the command: gcloud alpha monitoring policies create + - Reference for Command Usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'high', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.2.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.2.ts new file mode 100644 index 00000000..0fef551b --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.2.ts @@ -0,0 +1,139 @@ +const filterPatternRegex = + /\s*protoPayload.methodName\s*=\s*"SetIamPolicy"\s*AND\s*protoPayload.serviceData.policyDelta.auditConfigDeltas:*\s*/ + +// GCP CIS 1.2.0 Rule equivalent 2.5 +export default { + id: 'gcp-nist-800-53-rev4-5.2', + title: + 'GCP NIST 5.2 Logging metric filter and alert for audit configuration changes should be configured', + description: `Google Cloud Platform (GCP) services write audit log entries to the Admin Activity and Data + Access logs to help answer the questions of, "who did what, where, and when?" within GCP + projects. + + Cloud audit logging records information includes the identity of the API caller, the time of + the API call, the source IP address of the API caller, the request parameters, and the + response elements returned by GCP services. Cloud audit logging provides a history of GCP + API calls for an account, including API calls made via the console, SDKs, command-line + tools, and other GCP services.`, + audit: `**From Console: + Ensure the prescribed log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure that at least one metric ** is present with the filter text: + + protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:* + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to Alerting by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the Policies section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of 0 for greater than zero(0) seconds*, means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:* + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains at least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\"logging.googleapis.com/user/\"* + - AND *enabled* is set to *true*`, + rationale: `Admin activity and data access logs produced by cloud audit logging enable security analysis, resource change tracking, and compliance auditing. + + Configuring the metric filter and alerts for audit configuration changes ensures the recommended state of audit configuration is maintained so that all activities in the project are audit-able at any point in time.`, + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + 3. Clear any text and add: + + protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:* + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This will ensure that the log metric counts the number of log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create a prescribed Alert Policy:** + + 1. Identify the new metric the user just created, under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page opens. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notifications channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create a prescribed Log Metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + + Create prescribed Alert Policy + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + references: [ + `https://cloud.google.com/logging/docs/logs-based-metrics/`, + `https://cloud.google.com/monitoring/custom-metrics/`, + `https://cloud.google.com/monitoring/alerts/`, + `https://cloud.google.com/logging/docs/reference/tools/gcloud-logging`, + `https://cloud.google.com/logging/docs/audit/configure-data-access#getiampolicy-setiampolicy`, + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.3.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.3.ts new file mode 100644 index 00000000..650cd8b1 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.3.ts @@ -0,0 +1,145 @@ +const filterPatternRegex = + /\s*resource.type\s*=\s*"iam_role"\s*AND\s*protoPayload.methodName\s*=\s*"google.iam.admin.v1.CreateRole"\s*OR\s*protoPayload.methodName\s*=\s*"google.iam.admin.v1.DeleteRole"\s*OR\s*protoPayload.methodName\s*=\s*"google.iam.admin.v1.UpdateRole"\s*/ + +// GCP CIS 1.2.0 Rule equivalent 2.6 +export default { + id: 'gcp-nist-800-53-rev4-5.3', + title: + 'GCP NIST 5.3 Logging metric filter and alert for Custom Role changes should be configured', + description: `It is recommended that a metric filter and alarm be established for changes to Identity and + Access Management (IAM) role creation, deletion and updating activities.`, + audit: `**From Console: + Ensure that the prescribed log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure that at least one metric ** is present with filter text: + + resource.type="iam_role" + AND protoPayload.methodName = "google.iam.admin.v1.CreateRole" + OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" + OR protoPayload.methodName="google.iam.admin.v1.UpdateRole" + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to Alerting by visiting https://console.cloud.google.com/monitoring/alerting. + + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of zero(0) for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that the appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + resource.type="iam_role" AND protoPayload.methodName = "google.iam.admin.v1.CreateRole" OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" OR protoPayload.methodName="google.iam.admin.v1.UpdateRole" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*.`, + rationale: 'Google Cloud IAM provides predefined roles that give granular access to specific Google Cloud Platform resources and prevent unwanted access to other resources. However, to cater to organization-specific needs, Cloud IAM also provides the ability to create custom roles. Project owners and administrators with the Organization Role Administrator role or the IAM Role Administrator role can create custom roles. Monitoring role creation, deletion and updating activities will help in identifying any over-privileged role at early stages.', + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + + 3. Clear any text and add: + + resource.type="iam_role" + AND protoPayload.methodName = "google.iam.admin.v1.CreateRole" OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" + OR protoPayload.methodName="google.iam.admin.v1.UpdateRole" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the + user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* + (default) and *Type* to *Counter*. This ensures that the log metric counts the number of + log entries matching the advanced logs query. + 6. Click *Create Metric*. + + **Create a prescribed Alert Policy:** + + 1. Identify the new metric that was just created under the section *User-defined + Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the metric and select *Create alert + from Metric*. A new page displays. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold + and configuration that makes sense for the user's organization. For example, a + threshold of zero(0) for the most recent value ensures that a notification is triggered + for every owner change in the project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notification channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed Alert Policy: + + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/iam/docs/understanding-custom-roles', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.4.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.4.ts new file mode 100644 index 00000000..6ad34330 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.4.ts @@ -0,0 +1,137 @@ +const filterPatternRegex = + /\s*resource.type\s*=\s*"gce_firewall_rule"\s*AND\s*protoPayload.methodName\s*=\s*"v1.compute.firewalls.patch"\s*OR\s*protoPayload.methodName\s*=\s*"v1.compute.firewalls.insert"\s*/ + +// GCP CIS 1.2.0 Rule equivalent 2.7 +export default { + id: 'gcp-nist-800-53-rev4-5.4', + title: + 'GCP NIST 5.4 Logging metric filter and alert for network firewall rule changes should be configured', + description: `It is recommended that a metric filter and alarm be established for changes to Identity and + Access Management (IAM) role creation, deletion and updating activities.`, + audit: `**From Console: + Ensure that the prescribed log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure at least one metric ** is present with this filter text: + + resource.type="gce_firewall_rule" AND protoPayload.methodName="v1.compute.firewalls.patch" OR protoPayload.methodName="v1.compute.firewalls.insert" + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of zero(0) for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that appropriate notification channels have been set up. + + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + resource.type="gce_firewall_rule" AND protoPayload.methodName="v1.compute.firewalls.patch" OR protoPayload.methodName="v1.compute.firewalls.insert" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true* + `, + rationale: 'Monitoring for Create or Update Firewall rule events gives insight to network access changes and may reduce the time it takes to detect suspicious activity.', + remediation: ` + **From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based* Metrics by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Ba*r at the rightmost corner and select *Convert to Advanced Filter*. + 3. Clear any text and add: + + resource.type="gce_firewall_rule" AND protoPayload.methodName="v1.compute.firewalls.patch" OR protoPayload.methodName="v1.compute.firewalls.insert" + + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed Alert Policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select **Create alert from Metric**. A new page displays. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value ensures that a notification is triggered for every owner change in the project: + + Set "Aggregator" to "Count" + + Set "Configuration": + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notifications channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric + + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed alert policy: + + + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create + `, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/vpc/docs/firewalls', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.5.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.5.ts new file mode 100644 index 00000000..4a52eb0e --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.5.ts @@ -0,0 +1,137 @@ +const filterPatternRegex = + /\s*resource.type\s*=\s*"gce_route"\s*AND\s*protoPayload.methodName\s*=\s*"beta.compute.routes.patch"\s*OR\s*protoPayload.methodName\s*=\s*"beta.compute.routes.insert"\s*/ + +// GCP CIS 1.2.0 Rule equivalent 2.8 +export default { + id: 'gcp-nist-800-53-rev4-5.5', + title: + 'GCP NIST 5.5 Logging metric filter and alert for network route changes should be configured', + description: `It is recommended that a metric filter and alarm be established for Virtual Private Cloud + (VPC) network route changes.`, + audit: `**From Console: + Ensure that the prescribed Log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure that at least one metric ** is present with the filter text: + + resource.type="gce_route" AND protoPayload.methodName="beta.compute.routes.patch" OR protoPayload.methodName="beta.compute.routes.insert" + + **Ensure the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting: https://console.cloud.google.com/monitoring/alerting. + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of 0 for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alert thresholds make sense for the user's organization. + 5. Ensure that the appropriate notification channels have been set up. + + **From Command Line: + Ensure the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + resource.type="gce_route" AND protoPayload.methodName="beta.compute.routes.patch" OR protoPayload.methodName="beta.compute.routes.insert" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + rationale: `Google Cloud Platform (GCP) routes define the paths network traffic takes from a VM instance to another destination. The other destination can be inside the organization VPC network (such as another VM) or outside of it. Every route consists of a destination and a next hop. Traffic whose destination IP is within the destination range is sent to the next hop for delivery. + + Monitoring changes to route tables will help ensure that all VPC traffic flows through an expected path.`, + remediation: `**From Console: + Create the prescribed Log Metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter* + + 3. Clear any text and add: + + resource.type="gce_route" AND protoPayload.methodName="beta.compute.routes.patch" OR protoPayload.methodName="beta.compute.routes.insert" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed alert policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page displays. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold + and configuration that makes sense for the user's organization. For example, a + threshold of zero(0) for the most recent value ensures that a notification is triggered + for every owner change in the project: + + Set "Aggregator" to "Count" + + Set "Configuration": + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notification channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric: + + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed the alert policy: + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create + `, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/storage/docs/access-control/iam', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.6.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.6.ts new file mode 100644 index 00000000..bd145bbd --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.6.ts @@ -0,0 +1,153 @@ +const filterPatternRegex = + /\s*resource.type\s*=\s*gce_network\s*AND\s*protoPayload.methodName\s*=\s*"beta.compute.networks.insert"\s*OR\s*protoPayload.methodName\s*=\s*"beta.compute.networks.patch"\s*OR\s*protoPayload.methodName\s*=\s*"v1.compute.networks.delete"\s*OR\s*protoPayload.methodName\s*=\s*"v1.compute.networks.removePeering"\s*OR\s*protoPayload.methodName\s*=\s*"v1.compute.networks.addPeering"\s*/ + +// GCP CIS 1.2.0 Rule equivalent 2.9 +export default { + id: 'gcp-nist-800-53-rev4-5.6', + title: + 'GCP NIST 5.6 Logging metric filter and alert for network changes should be configured', + description: `It is recommended that a metric filter and alarm be established for Virtual Private Cloud + (VPC) network changes.`, + audit: `**From Console: + Ensure the prescribed log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure at least one metric ** is present with filter text: + + resource.type=gce_network + AND protoPayload.methodName="beta.compute.networks.insert" + OR protoPayload.methodName="beta.compute.networks.patch" + OR protoPayload.methodName="v1.compute.networks.delete" + OR protoPayload.methodName="v1.compute.networks.removePeering" + OR protoPayload.methodName="v1.compute.networks.addPeering" + + **Ensure the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + + + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of 0 for greater than 0 seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that appropriate notification channels have been set up. + + **From Command Line: + Ensure the log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with filter set to: + + resource.type=gce_network + AND protoPayload.methodName="beta.compute.networks.insert" + OR protoPayload.methodName="beta.compute.networks.patch" + OR protoPayload.methodName="v1.compute.networks.delete" + OR protoPayload.methodName="v1.compute.networks.removePeering" + OR protoPayload.methodName="v1.compute.networks.addPeering" + + 3. Note the value of the property *metricDescriptor.type for* the identified metric, in + the format l*ogging.googleapis.com/user/*. + + **Ensure the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains at least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + rationale: `It is possible to have more than one VPC within a project. In addition, it is also possible to create a peer connection between two VPCs enabling network traffic to route between VPCs. + + Monitoring changes to a VPC will help ensure VPC traffic flow is not getting impacted.`, + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + + 3. Clear any text and add: + + resource.type=gce_network + AND protoPayload.methodName="beta.compute.networks.insert" + OR protoPayload.methodName="beta.compute.networks.patch" + OR protoPayload.methodName="v1.compute.networks.delete" + OR protoPayload.methodName="v1.compute.networks.removePeering" + OR protoPayload.methodName="v1.compute.networks.addPeering" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* + (default) and *Type* to *Counter*. This ensures that the log metric counts the number of + log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed alert policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page appears. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of 0 for the most recent value will ensure that a notification is triggered for every owner change in the project: + + Set "Aggregator" to "Count" + + Set "Configuration": + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notification channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed alert policy: + + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/vpc/docs/overview', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.7.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.7.ts new file mode 100644 index 00000000..b691e302 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.7.ts @@ -0,0 +1,143 @@ +const filterPatternRegex = + /\s*protoPayload.methodName\s*=\s*"cloudsql.instances.update"\s*/ + +// GCP CIS 1.2.0 Rule equivalent 2.11 +export default { + id: 'gcp-nist-800-53-rev4-5.7', + title: + 'GCP NIST 5.7 Logging metric filter and alert for SQL instance configuration changes should be configured', + description: `It is recommended that a metric filter and alarm be established for SQL instance + configuration changes.`, + audit: `**From Console: + Ensure the prescribed log metric is present:** + + 1. For each project that contains Cloud SQL instances, go to L*ogging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure that at least one metric ** is present with the filter text: + + protoPayload.methodName="cloudsql.instances.update" + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the Pol*i*cies section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of zero(0) for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that the appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to + + protoPayload.methodName="cloudsql.instances.update" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains at least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + rationale: `Monitoring changes to SQL instance configuration changes may reduce the time needed to detect and correct misconfigurations done on the SQL server. + + Below are a few of the configurable options which may the impact security posture of an SQL instance: + + - Enable auto backups and high availability: Misconfiguration may adversely impact business continuity, disaster recovery, and high availability + - Authorize networks: Misconfiguration may increase exposure to untrusted networks`, + remediation: `**From Console: + Create the prescribed Log Metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + + 3. Clear any text and add: + + protoPayload.methodName="cloudsql.instances.update" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed alert policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page appears. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the user's project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notification channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed log metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed alert policy: + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create + `, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/storage/docs/overview', + 'https://cloud.google.com/sql/docs/', + 'https://cloud.google.com/sql/docs/mysql/', + 'https://cloud.google.com/sql/docs/postgres/', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + name + filter + metricDescriptor { + type + } + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.8.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.8.ts new file mode 100644 index 00000000..fbb9c874 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-5.8.ts @@ -0,0 +1,118 @@ +// GCP CIS 1.2.0 Rule equivalent 2.3 +export default { + id: 'gcp-nist-800-53-rev4-5.8', + title: + 'GCP NIST 5.8 Logging storage bucket retention policies and Bucket Lock should be configured', + description: `Enabling retention policies on log buckets will protect logs stored in cloud storage buckets + from being overwritten or accidentally deleted. It is recommended to set up retention + policies and configure Bucket Lock on all storage buckets that are used as log sinks.`, + audit: `**From Console:** + + 1. Open the Cloud Storage browser in the Google Cloud Console by visiting https://console.cloud.google.com/storage/browser. + 2. In the Column display options menu, make sure *Retention policy* is checked. + 3. In the list of buckets, the retention period of each bucket is found in the *Retention policy* column. If the retention policy is locked, an image of a lock appears directly to the left of the retention period. + + + **From Command Line:** + + 1. To list all sinks destined to storage buckets: + + gcloud logging sinks list --folder=FOLDER_ID | --organization=ORGANIZATION_ID | --project=PROJECT_ID + + 2. For every storage bucket listed above, verify that retention policies and Bucket Lock + are enabled: + + gsutil retention get gs://BUCKET_NAME + + For more information, see https://cloud.google.com/storage/docs/using-bucket-lock#view-policy.`, + rationale: `Logs can be exported by creating one or more sinks that include a log filter and a destination. As Cloud Logging receives new log entries, they are compared against each sink. If a log entry matches a sink's filter, then a copy of the log entry is written to the destination. + + Sinks can be configured to export logs in storage buckets. It is recommended to configure a data retention policy for these cloud storage buckets and to lock the data retention policy; thus permanently preventing the policy from being reduced or removed. This way, if the system is ever compromised by an attacker or a malicious insider who wants to cover their tracks, the activity logs are definitely preserved for forensics and security investigations.`, + remediation: `**From Console:** + + 1. If sinks are **not** configured, first follow the instructions in the recommendation: *Ensure that sinks are configured for all Log entries.* + 2. For each storage bucket configured as a sink, go to the Cloud Storage browser at https://console.cloud.google.com/storage/browser/. + 3. Select the Bucket Lock tab near the top of the page. + 4. In the Retention policy entry, click the Add Duration link. The *Set a retention policy* dialog box appears. + 5. Enter the desired length of time for the retention period and click *Save policy*. + 6. Set the *Lock status* for this retention policy to *Locked*. + + **From Command Line:** + + 1. To list all sinks destined to storage buckets: + + gcloud logging sinks list --folder=FOLDER_ID | --organization=ORGANIZATION_ID | --project=PROJECT_ID + + 2. For each storage bucket listed above, set a retention policy and lock it: + + gsutil retention set [TIME_DURATION] gs://[BUCKET_NAME] + gsutil retention lock gs://[BUCKET_NAME] + + For more information, visit https://cloud.google.com/storage/docs/using-bucket-lock#set-policy.`, + references: [ + 'https://cloud.google.com/storage/docs/bucket-lock', + 'https://cloud.google.com/storage/docs/using-bucket-lock', + ], + gql: `{ + querygcpProject { + id + __typename + logSinks { + destination + } + logBuckets { + name + retentionDays + locked + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'unknown', + conditions: { + jq: ` { + "id": .id, + "logSinks" : [ + { + "destination" : + .logSinks[].destination + | select(startswith("storage.googleapis.com/")) + | sub("storage.googleapis.com/"; "") , + "logBuckets" :.logBuckets + } + ] | map({ + "destination" : .destination, + "logBuckets" : [. as $parent | .logBuckets[] | select($parent.destination == .name)] + }) + }`, + path: '@', + and: [ + { + path: '[*].logSinks', + array_all: { + and: [ + { + path: '[*].logBuckets', + isEmpty: false, + }, + { + path: '[*].logBuckets', + array_any: { + and: [ + { + path: '[*].retentionDays', + greaterThan: 0, + }, + { + path: '[*].locked', + equal: true, + }, + ], + }, + }, + ], + }, + }, + ], + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.1.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.1.ts new file mode 100644 index 00000000..21deb331 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.1.ts @@ -0,0 +1,82 @@ +// GCP CIS 1.2.0 Rule equivalent 3.1 +export default { + id: 'gcp-nist-800-53-rev4-6.1', + title: + 'GCP NIST 6.1 The default network for a project should be deleted', + description: + 'To prevent use of default network, a project should not have a default network.', + audit: `**From Console:** + + 1. Go to the *VPC networks* page by visiting: https://console.cloud.google.com/networking/networks/list. + 2. Ensure that a network with the name *default* is not present. + + **From Command Line:** + + 1. Set the project name in the Google Cloud Shell: + + gcloud config set project PROJECT_ID + + 2. List the networks configured in that project: + + gcloud compute networks list + + It should not list *default* as one of the available networks in that project.`, + rationale: `The *default* network has a preconfigured network configuration and automatically generates the following insecure firewall rules: + + - default-allow-internal: Allows ingress connections for all protocols and ports among instances in the network. + - default-allow-ssh: Allows ingress connections on TCP port 22(SSH) from any source to any instance in the network. + - default-allow-rdp: Allows ingress connections on TCP port 3389(RDP) from any source to any instance in the network. + - default-allow-icmp: Allows ingress ICMP traffic from any source to any instance in the network. + + These automatically created firewall rules do not get audit logged and cannot be configured + to enable firewall rule logging. + + Furthermore, the default network is an auto mode network, which means that its subnets + use the same predefined range of IP addresses, and as a result, it's not possible to use Cloud + VPN or VPC Network Peering with the default network. + + Based on organization security and networking requirements, the organization should + create a new network and delete the *default* network.`, + remediation: `**From Console:** + + 1. Go to the *VPC networks* page by visiting: https://console.cloud.google.com/networking/networks/list. + 2. Click the network named *default*. + 3. On the network detail page, click *EDIT*. + 4. Click *DELETE VPC NETWORK*. + 5. If needed, create a new network to replace the default network. + + **From Command Line:** + For each Google Cloud Platform project, + + 1. Delete the default network: + + gcloud compute networks delete default + + 2. If needed, create a new network to replace it: + + gcloud compute networks create NETWORK_NAME + + **Prevention:** + The user can prevent the default network and its insecure default firewall rules from being created by setting up an Organization Policy to Skip default network creation at https://console.cloud.google.com/iam-admin/orgpolicies/compute-skipDefaultNetworkCreation.`, + references: [ + 'https://cloud.google.com/compute/docs/networking#firewall_rules', + 'https://cloud.google.com/compute/docs/reference/latest/networks/insert', + 'https://cloud.google.com/compute/docs/reference/latest/networks/delete', + 'https://cloud.google.com/vpc/docs/firewall-rules-logging', + 'https://cloud.google.com/vpc/docs/vpc#default-network', + 'https://cloud.google.com/sdk/gcloud/reference/compute/networks/delete', + ], + gql: `{ + querygcpNetwork { + id + __typename + name + } + }`, + resource: 'querygcpNetwork[*]', + severity: 'medium', + conditions: { + path: '@.name', + notEqual: 'default', + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.2.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.2.ts new file mode 100644 index 00000000..c2745e28 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.2.ts @@ -0,0 +1,130 @@ +// GCP CIS 1.2.0 Rule equivalent 3.6 +export default { + id: 'gcp-nist-800-53-rev4-6.2', + title: 'GCP NIST 6.2 Network firewall rules should not permit ingress from 0.0.0.0/0 to port 22 (SSH)', + description: `GCP Firewall Rules are specific to a VPC Network. Each rule either allows or denies + traffic when its conditions are met. Its conditions allow the user to specify the type of + traffic, such as ports and protocols, and the source or destination of the traffic, including IP + addresses, subnets, and instances. + + Firewall rules are defined at the VPC network level and are specific to the network in which + they are defined. The rules themselves cannot be shared among networks. Firewall rules + only support IPv4 traffic. When specifying a source for an ingress rule or a destination for + an egress rule by address, only an IPv4 address or IPv4 block in CIDR notation can be + used. Generic (0.0.0.0/0) incoming traffic from the internet to VPC or VM instance using + SSH on Port 22 can be avoided.`, + audit: `**From the Console:** + + 1. Go to *VPC network*. + 2. Go to the *Firewall Rules*. + 3. Ensure that *Port* is not equal to *22* and *Action* is not set to *Allow*. + 4. Ensure *IP Ranges* is not equal to *0.0.0.0/0* under *Source filters*. + + **From Command Line:** + + gcloud compute firewall-rules list --format=table'(name,direction,sourceRanges,allowed)' + + Ensure that there is no rule matching the below criteria: + + - *SOURCE_RANGES* is 0.0.0.0/0 + - AND *DIRECTION* is *INGRESS* + - AND IPProtocol is *tcp* or ALL + - AND *PORTS* is set to *22* or *range* *containing* *22* or *Null* *(not set)* + + Note: + + - When ALL TCP ports are allowed in a rule, PORT does not have any value set (*NULL*) + - When ALL Protocols are allowed in a rule, PORT does not have any value set (*NULL*)`, + rationale: 'GCP *Firewall Rules* within a *VPC Network* apply to outgoing (egress) traffic from instances and incoming (ingress) traffic to instances in the network. Egress and ingress traffic flows are controlled even if the traffic stays within the network (for example, instance-to-instance communication). For an instance to have outgoing Internet access, the network must have a valid Internet gateway route or custom route whose destination IP is specified. This route simply defines the path to the Internet, to avoid the most general *(0.0.0.0/0)* destination *IP Range* specified from the Internet through SSH with the default *Port 22*. Generic access from the Internet to a specific IP Range needs to be restricted.', + remediation: `**From the Console:** + + 1. Go to *VPC Network*. + 2. Go to the *Firewall Rules*. + 3. Click the *Firewall Rule* you want to modify. + 4. Click *Edit*. + 5. Modify *Source IP ranges* to specific *IP*. + 6. Click *Save*. + + **From Command Line:** + 1. Update the Firewall rule with the new *SOURCE_RANGE* from the below command: + + gcloud compute firewall-rules update FirewallName --allow=[PROTOCOL[:PORT[- PORT]],...] --source-ranges=[CIDR_RANGE,...] + `, + references: ['https://cloud.google.com/vpc/docs/firewalls#blockedtraffic'], + gql: `{ + querygcpFirewall(filter: {direction:{eq: "INGRESS"}}){ + id + name + __typename + sourceRanges + direction + allowed{ + ipProtocol + ports + } + } + }`, + resource: 'querygcpFirewall[*]', + severity: 'high', + conditions: { + not: { + path: '@', + and: [ + { + path: '[*].sourceRanges', + jq: 'map({"range": .})', + array_any: { + path: '[*].range', + in: ['0.0.0.0/0', '::/0'], + }, + }, + { + path: '[*].direction', + in: ['INGRESS'], + }, + { + path: '@.allowed', + jq: `[.[] + | { "ipProtocol": .ipProtocol} + + (if .ports | length > 0 then .ports[] else [""][] end | split("-") | {fromPort: (.[0]), toPort: (.[1] // .[0])}) ]`, + array_any: { + and: [ + { + path: '[*].ipProtocol', + in: ['tcp', 'all'], + }, + { + or: [ + { + and: [ + { + path: '[*].fromPort', + equal: null, + }, + { + path: '[*].toPort', + equal: null, + }, + ], + }, + { + and: [ + { + path: '[*].fromPort', + lessThanInclusive: 22, + }, + { + path: '[*].toPort', + greaterThanInclusive: 22, + }, + ], + }, + ], + }, + ], + }, + }, + ], + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.3.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.3.ts new file mode 100644 index 00000000..a7776b7d --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.3.ts @@ -0,0 +1,129 @@ +// GCP CIS 1.2.0 Rule equivalent 3.7 +export default { + id: 'gcp-nist-800-53-rev4-6.3', + title: 'GCP NIST 6.3 Network firewall rules should not permit ingress from 0.0.0.0/0 to port 3389 (RDP)', + description: `GCP Firewall Rules are specific to a VPC Network. Each rule either allows or denies + traffic when its conditions are met. Its conditions allow users to specify the type of traffic, + such as ports and protocols, and the source or destination of the traffic, including IP + addresses, subnets, and instances. + + Firewall rules are defined at the VPC network level and are specific to the network in which + they are defined. The rules themselves cannot be shared among networks. Firewall rules + only support IPv4 traffic. When specifying a source for an ingress rule or a destination for + an egress rule by address, an IPv4 address or IPv4 block in CIDR notation can be used. + Generic (0.0.0.0/0) incoming traffic from the Internet to a VPC or VM instance using RDP + on Port 3389 can be avoided.`, + audit: `**From the Console:** + + 1. Go to *VPC network*. + 2. Go to the *Firewall Rules*. + 3. Ensure *Port* is not equal to *3389* and *Action* is not *Allow*. + 4. Ensure *IP Ranges* is not equal to *0.0.0.0/0* under *Source filters*. + + **From Command Line:** + + gcloud compute firewall-rules list -- format=table'(name,direction,sourceRanges,allowed.ports)' + + Ensure that there is no rule matching the below criteria: + + - *SOURCE_RANGES* is *0.0.0.0/0* + - AND *DIRECTION* is *INGRESS* + - AND IPProtocol is *TCP* or *ALL* + - AND *PORTS* is set to *3389* or *range containing 3389* or *Null (not set)* + + Note: + + - When ALL TCP ports are allowed in a rule, PORT does not have any value set (*NULL*) + - When ALL Protocols are allowed in a rule, PORT does not have any value set (*NULL*)`, + rationale: 'GCP *Firewall Rule*s within a *VPC Network*. These rules apply to outgoing (egress) traffic from instances and incoming (ingress) traffic to instances in the network. Egress and ingress traffic flows are controlled even if the traffic stays within the network (for example, instance-to-instance communication). For an instance to have outgoing Internet access, the network must have a valid Internet gateway route or custom route whose destination IP is specified. This route simply defines the path to the Internet, to avoid the most general (0.0.0.0/0) destination IP Range specified from the Internet through RDP with the default *Port 3389*. Generic access from the Internet to a specific IP Range should be restricted.', + remediation: `**From the Console:** + + 1. Go to *VPC Network*. + 2. Go to the *Firewall Rules*. + 3. Click the *Firewall Rule* to be modified. + 4. Click *Edit*. + 5. Modify *Source IP ranges* to specific *IP*. + 6. Click *Save*. + + **From Command Line:** + 1. Update RDP Firewall rule with new *SOURCE_RANGE* from the below command: + + gcloud compute firewall-rules update FirewallName --allow=[PROTOCOL[:PORT[-PORT]],...] --source-ranges=[CIDR_RANGE,...]`, + references: ['https://cloud.google.com/vpc/docs/firewalls#blockedtraffic'], + gql: `{ + querygcpFirewall(filter: {direction:{eq: "INGRESS"}}){ + id + name + __typename + sourceRanges + direction + allowed{ + ipProtocol + ports + } + } + }`, + resource: 'querygcpFirewall[*]', + severity: 'high', + conditions: { + not: { + path: '@', + and: [ + { + path: '[*].sourceRanges', + jq: 'map({"range": .})', + array_any: { + path: '[*].range', + in: ['0.0.0.0/0', '::/0'], + }, + }, + { + path: '[*].direction', + in: ['INGRESS'], + }, + { + path: '@.allowed', + jq: `[.[] + | { "ipProtocol": .ipProtocol} + + (if .ports | length > 0 then .ports[] else [""][] end | split("-") | {fromPort: (.[0]), toPort: (.[1] // .[0])}) ]`, + array_any: { + and: [ + { + path: '[*].ipProtocol', + in: ['tcp', 'all'], + }, + { + or: [ + { + and: [ + { + path: '[*].fromPort', + equal: null, + }, + { + path: '[*].toPort', + equal: null, + }, + ], + }, + { + and: [ + { + path: '[*].fromPort', + lessThanInclusive: 3389, + }, + { + path: '[*].toPort', + greaterThanInclusive: 3389, + }, + ], + }, + ], + }, + ], + }, + }, + ], + }, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.4.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.4.ts new file mode 100644 index 00000000..d8ce2859 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.4.ts @@ -0,0 +1,200 @@ +const targetHttpsAndSslProxyConditions = { + and: [ + { + path: '@.sslPolicy', + isEmpty: false, + }, + { + path: '@.sslPolicy', + array_all: { + or: [ + { + and: [ + { + path: '[*].profile', + equal: 'MODERN', + }, + { + path: '[*].minTlsVersion', + equal: 'TLS_1_2', + }, + ], + }, + { + and: [ + { + path: '[*].profile', + equal: 'RESTRICTED', + }, + ], + }, + { + and: [ + { + path: '[*].profile', + equal: 'CUSTOM', + }, + { + path: '[*].enabledFeatures', + array_all: { + path: '[*]', + notIn: [ + 'TLS_RSA_WITH_AES_128_GCM_SHA256', + 'TLS_RSA_WITH_AES_256_GCM_SHA384', + 'TLS_RSA_WITH_AES_128_CBC_SHA', + 'TLS_RSA_WITH_AES_256_CBC_SHA', + 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + ], + }, + }, + ], + }, + ], + }, + } + ] +} + +// GCP CIS 1.2.0 Rule equivalent 3.9 +export default { + id: 'gcp-nist-800-53-rev4-6.4', + title: + 'GCP NIST 6.4 Load balancer HTTPS or SSL proxy SSL policies should not have weak cipher suites', + description: `Secure Sockets Layer (SSL) policies determine what port Transport Layer Security (TLS) + features clients are permitted to use when connecting to load balancers. To prevent usage + of insecure features, SSL policies should use (a) at least TLS 1.2 with the MODERN profile; + or (b) the RESTRICTED profile, because it effectively requires clients to use TLS 1.2 + regardless of the chosen minimum TLS version; or (3) a CUSTOM profile that does not + support any of the following features: + + TLS_RSA_WITH_AES_128_GCM_SHA256 + + TLS_RSA_WITH_AES_256_GCM_SHA384 + + TLS_RSA_WITH_AES_128_CBC_SHA + + TLS_RSA_WITH_AES_256_CBC_SHA + + TLS_RSA_WITH_3DES_EDE_CBC_SHA`, + audit: `**From Console:** + + 1. See all load balancers by visiting https://console.cloud.google.com/net-services/loadbalancing/loadBalancers/list. + 2. For each load balancer for *SSL (Proxy)* or *HTTPS*, click on its name to go the *Load balancer details* page. + 3. Ensure that each target proxy entry in the *Frontend* table has an *SSL Policy* configured. + 4. Click on each SSL policy to go to its *SSL policy details* page. + 5. Ensure that the SSL policy satisfies one of the following conditions: + + + - has a *Min TLS* set to *TLS 1.2* and *Profile* set to *Modern* profile, or + - has *Profile* set to *Restricted*. Note that a Restricted profile effectively requires + clients to use TLS 1.2 regardless of the chosen minimum TLS version, or + - has *Profile* set to *Custom* and the following features are all disabled: + + TLS_RSA_WITH_AES_128_GCM_SHA256 + TLS_RSA_WITH_AES_256_GCM_SHA384 + TLS_RSA_WITH_AES_128_CBC_SHA + TLS_RSA_WITH_AES_256_CBC_SHA + TLS_RSA_WITH_3DES_EDE_CBC_SHA + + **From Command Line:** + + 1. List all TargetHttpsProxies and TargetSslProxies. + + gcloud compute target-https-proxies list + gcloud compute target-ssl-proxies list + + 2. For each target proxy, list its properties: + + gcloud compute target-https-proxies describe TARGET_HTTPS_PROXY_NAME + gcloud compute target-ssl-proxies describe TARGET_SSL_PROXY_NAME + + 3. Ensure that the *sslPolicy* field is present and identifies the name of the SSL policy: + + sslPolicy: https://www.googleapis.com/compute/v1/projects/PROJECT_ID/global/sslPolicies/SSL_POLICY_NAME + + If the *sslPolicy* field is missing from the configuration, it means that the GCP default policy is used, which is insecure. + + 4. Describe the SSL policy: + + gcloud compute ssl-policies describe SSL_POLICY_NAME + + 5. Ensure that the policy satisfies one of the following conditions: + + - has *Profile* set to *Modern* and *minTlsVersion* set to *TLS_1_2*, or + - has *Profile* set to *Restricted*, or + - has *Profile* set to *Custom* and *enabledFeatures* does not contain any of the following values: + + TLS_RSA_WITH_AES_128_GCM_SHA256 + TLS_RSA_WITH_AES_256_GCM_SHA384 + TLS_RSA_WITH_AES_128_CBC_SHA + TLS_RSA_WITH_AES_256_CBC_SHA + TLS_RSA_WITH_3DES_EDE_CBC_SHA`, + rationale: 'Load balancers are used to efficiently distribute traffic across multiple servers. Both SSL proxy and HTTPS load balancers are external load balancers, meaning they distribute traffic from the Internet to a GCP network. GCP customers can configure load balancer SSL policies with a minimum TLS version (1.0, 1.1, or 1.2) that clients can use to establish a connection, along with a profile (Compatible, Modern, Restricted, or Custom) that specifies permissible cipher suites. To comply with users using outdated protocols, GCP load balancers can be configured to permit insecure cipher suites. In fact, the GCP default SSL policy uses a minimum TLS version of 1.0 and a Compatible profile, which allows the widest range of insecure cipher suites. As a result, it is easy for customers to configure a load balancer without even knowing that they are permitting outdated cipher suites.', + remediation: `**From Console:** + If the TargetSSLProxy or TargetHttpsProxy does not have an SSL policy configured, create a new SSL policy. Otherwise, modify the existing insecure policy. + + 1. Navigate to the *SSL Policies* page by visiting: https://console.cloud.google.com/net-security/sslpolicies + 2. Click on the name of the insecure policy to go to its *SSL policy details* page. + 3. Click *EDIT*. + 4. Set *Minimum TLS version* to *TLS 1.2*. + 5. Set *Profile* to *Modern* or *Restricted*. + 6. Alternatively, if teh user selects the profile *Custom*, make sure that the following features are disabled: + + TLS_RSA_WITH_AES_128_GCM_SHA256 + TLS_RSA_WITH_AES_256_GCM_SHA384 + TLS_RSA_WITH_AES_128_CBC_SHA + TLS_RSA_WITH_AES_256_CBC_SHA + TLS_RSA_WITH_3DES_EDE_CBC_SHA + + **From Command Line:** + + 1. For each insecure SSL policy, update it to use secure cyphers: + + gcloud compute ssl-policies update NAME [--profile COMPATIBLE|MODERN|RESTRICTED|CUSTOM] --min-tls-version 1.2 [--custom-features FEATURES] + + + 2. If the target proxy has a GCP default SSL policy, use the following command corresponding to the proxy type to update it. + + gcloud compute target-ssl-proxies update TARGET_SSL_PROXY_NAME --ssl-policy SSL_POLICY_NAME + gcloud compute target-https-proxies update TARGET_HTTPS_POLICY_NAME --ssl- policy SSL_POLICY_NAME + `, + references: [ + 'https://cloud.google.com/load-balancing/docs/use-ssl-policies', + 'https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-52r2.pdf', + ], + severity: 'medium', + queries: [ + { + gql: `{ + querygcpTargetHttpsProxy { + id + projectId + __typename + sslPolicy { + profile + enabledFeatures + minTlsVersion + } + } + }`, + resource: 'querygcpTargetHttpsProxy[*]', + conditions: targetHttpsAndSslProxyConditions + }, + { + gql: `{ + querygcpTargetSslProxy { + id + projectId + __typename + sslPolicy { + profile + enabledFeatures + minTlsVersion + } + } + }`, + resource: 'querygcpTargetSslProxy[*]', + conditions: targetHttpsAndSslProxyConditions + }, + ], +} diff --git a/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.5.ts b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.5.ts new file mode 100644 index 00000000..ec17339d --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/gcp-nist-800-53-rev4-6.5.ts @@ -0,0 +1,63 @@ +// GCP CIS 1.2.0 Rule equivalent 4.6 +export default { + id: 'gcp-nist-800-53-rev4-6.5', + title: 'GCP NIST 6.5 Compute instances "IP forwarding" should not be enabled', + description: `Compute Engine instance cannot forward a packet unless the source IP address of the + packet matches the IP address of the instance. Similarly, GCP won't deliver a packet whose + destination IP address is different than the IP address of the instance receiving the packet. + However, both capabilities are required if you want to use instances to help route packets. + + Forwarding of data packets should be disabled to prevent data loss or information + disclosure.`, + audit: `**From Console:** + + 1. Go to the *VM Instances* page by visiting: https://pantheon.corp.google.com/compute/instances. + 2. For every instance, click on its name to go to the *VM instance details* page. + 3. Under the Network interfaces section, ensure that *IP forwarding* is set to *Off* for every network interface. + + **From Command Line:** + + 1. List all instances: + + gcloud compute instances list --format='table(name,canIpForward)' + + 2. Ensure that *CAN_IP_FORWARD* column in the output of above command does not contain *True* for any VM instance. + + **Exception:** + Instances created by GKE should be excluded because they need to have IP forwarding enabled and cannot be changed. Instances created by GKE have names that start with "gke-".`, + rationale: 'Compute Engine instance cannot forward a packet unless the source IP address of the packet matches the IP address of the instance. Similarly, GCP won\'t deliver a packet whose destination IP address is different than the IP address of the instance receiving the packet. However, both capabilities are required if you want to use instances to help route packets. To enable this source and destination IP check, disable the canIpForward field, which allows an instance to send and receive packets with non-matching destination or source IPs.', + remediation: `**Note:** You only edit the *canIpForward* setting at instance creation time. Therefore, you need to + delete the instance and create a new one where *canIpForward* is set to *false*. + + **From Console:** + + 1. Go to the *VM Instances* page by visiting: https://pantheon.corp.google.com/compute/instances. + 2. Select the *VM Instance* you want to remediate. + 3. Click the *Delete* button. + 4. On the 'VM Instances' page, click 'CREATE INSTANCE'. + 5. Create a new instance with the desired configuration. By default, the instance is configured to not allow IP forwarding. + + **From Command Line:** + + 1. Delete the instance: + + gcloud compute instances delete INSTANCE_NAME + + 2. Create a new instance to replace it, with *IP forwarding* set to *Off* + + gcloud compute instances create`, + references: ['https://cloud.google.com/vpc/docs/using-routes#canipforward'], + gql: `{ + querygcpVmInstance{ + __typename + id + canIpForward + } + }`, + resource: 'querygcpVmInstance[*]', + severity: 'medium', + conditions: { + path: '@.canIpForward', + equal: false, + }, +} diff --git a/src/gcp/nist-800-53-rev4/rules/index.ts b/src/gcp/nist-800-53-rev4/rules/index.ts new file mode 100644 index 00000000..ee500efb --- /dev/null +++ b/src/gcp/nist-800-53-rev4/rules/index.ts @@ -0,0 +1,73 @@ +import Gcp_NIST_800_53_11 from './gcp-nist-800-53-rev4-1.1' +import Gcp_NIST_800_53_12 from './gcp-nist-800-53-rev4-1.2' +import Gcp_NIST_800_53_13 from './gcp-nist-800-53-rev4-1.3' +import Gcp_NIST_800_53_14 from './gcp-nist-800-53-rev4-1.4' +import Gcp_NIST_800_53_15 from './gcp-nist-800-53-rev4-1.5' +import Gcp_NIST_800_53_16 from './gcp-nist-800-53-rev4-1.6' +import Gcp_NIST_800_53_17 from './gcp-nist-800-53-rev4-1.7' +import Gcp_NIST_800_53_21 from './gcp-nist-800-53-rev4-2.1' +import Gcp_NIST_800_53_22 from './gcp-nist-800-53-rev4-2.2' +import Gcp_NIST_800_53_23 from './gcp-nist-800-53-rev4-2.3' +import Gcp_NIST_800_53_31 from './gcp-nist-800-53-rev4-3.1' +import Gcp_NIST_800_53_32 from './gcp-nist-800-53-rev4-3.2' +import Gcp_NIST_800_53_33 from './gcp-nist-800-53-rev4-3.3' +import Gcp_NIST_800_53_34 from './gcp-nist-800-53-rev4-3.4' +import Gcp_NIST_800_53_35 from './gcp-nist-800-53-rev4-3.5' +import Gcp_NIST_800_53_36 from './gcp-nist-800-53-rev4-3.6' +import Gcp_NIST_800_53_37 from './gcp-nist-800-53-rev4-3.7' +import Gcp_NIST_800_53_38 from './gcp-nist-800-53-rev4-3.8' +import Gcp_NIST_800_53_39 from './gcp-nist-800-53-rev4-3.9' +import Gcp_NIST_800_53_310 from './gcp-nist-800-53-rev4-3.10' +import Gcp_NIST_800_53_41 from './gcp-nist-800-53-rev4-4.1' +import Gcp_NIST_800_53_42 from './gcp-nist-800-53-rev4-4.2' +import Gcp_NIST_800_53_51 from './gcp-nist-800-53-rev4-5.1' +import Gcp_NIST_800_53_52 from './gcp-nist-800-53-rev4-5.2' +import Gcp_NIST_800_53_53 from './gcp-nist-800-53-rev4-5.3' +import Gcp_NIST_800_53_54 from './gcp-nist-800-53-rev4-5.4' +import Gcp_NIST_800_53_55 from './gcp-nist-800-53-rev4-5.5' +import Gcp_NIST_800_53_56 from './gcp-nist-800-53-rev4-5.6' +import Gcp_NIST_800_53_57 from './gcp-nist-800-53-rev4-5.7' +import Gcp_NIST_800_53_58 from './gcp-nist-800-53-rev4-5.8' +import Gcp_NIST_800_53_61 from './gcp-nist-800-53-rev4-6.1' +import Gcp_NIST_800_53_62 from './gcp-nist-800-53-rev4-6.2' +import Gcp_NIST_800_53_63 from './gcp-nist-800-53-rev4-6.3' +import Gcp_NIST_800_53_64 from './gcp-nist-800-53-rev4-6.4' +import Gcp_NIST_800_53_65 from './gcp-nist-800-53-rev4-6.5' + +export default [ + Gcp_NIST_800_53_11, + Gcp_NIST_800_53_12, + Gcp_NIST_800_53_13, + Gcp_NIST_800_53_14, + Gcp_NIST_800_53_15, + Gcp_NIST_800_53_16, + Gcp_NIST_800_53_17, + Gcp_NIST_800_53_21, + Gcp_NIST_800_53_22, + Gcp_NIST_800_53_23, + Gcp_NIST_800_53_31, + Gcp_NIST_800_53_32, + Gcp_NIST_800_53_33, + Gcp_NIST_800_53_34, + Gcp_NIST_800_53_35, + Gcp_NIST_800_53_36, + Gcp_NIST_800_53_37, + Gcp_NIST_800_53_38, + Gcp_NIST_800_53_39, + Gcp_NIST_800_53_310, + Gcp_NIST_800_53_41, + Gcp_NIST_800_53_42, + Gcp_NIST_800_53_51, + Gcp_NIST_800_53_52, + Gcp_NIST_800_53_53, + Gcp_NIST_800_53_54, + Gcp_NIST_800_53_55, + Gcp_NIST_800_53_56, + Gcp_NIST_800_53_57, + Gcp_NIST_800_53_58, + Gcp_NIST_800_53_61, + Gcp_NIST_800_53_62, + Gcp_NIST_800_53_63, + Gcp_NIST_800_53_64, + Gcp_NIST_800_53_65, +] \ No newline at end of file diff --git a/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-1.x.test.ts b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-1.x.test.ts new file mode 100644 index 00000000..97774375 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-1.x.test.ts @@ -0,0 +1,1466 @@ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' + +import Gcp_NIST_800_53_11 from '../rules/gcp-nist-800-53-rev4-1.1' +import Gcp_NIST_800_53_12 from '../rules/gcp-nist-800-53-rev4-1.2' +import Gcp_NIST_800_53_13 from '../rules/gcp-nist-800-53-rev4-1.3' +import Gcp_NIST_800_53_14 from '../rules/gcp-nist-800-53-rev4-1.4' +import Gcp_NIST_800_53_15 from '../rules/gcp-nist-800-53-rev4-1.5' +import Gcp_NIST_800_53_16 from '../rules/gcp-nist-800-53-rev4-1.6' +import Gcp_NIST_800_53_17 from '../rules/gcp-nist-800-53-rev4-1.7' + +export interface DatabaseFlagsItem { + name: string + value: string | null +} + +export interface AuthorizedNetwork { + value: string +} + +export interface IpConfiguration { + requireSsl?: boolean | null + authorizedNetworks?: AuthorizedNetwork[] +} + +export interface BackupConfiguration { + enabled: boolean | null + startTime: string | null +} + +export interface Settings { + databaseFlags: DatabaseFlagsItem[] + ipConfiguration?: IpConfiguration + backupConfiguration?: BackupConfiguration +} + +export interface IpAddress { + type: string +} + +export interface ServiceAccount { + email: string + scopes?: string[] +} + +export interface Label { + value: string +} + +export interface Project { + id: string +} + +export interface MetadataItem { + key: string + value: string +} + +export interface Metadata { + items: MetadataItem[] +} + +export interface DiskEncryptionKey { + sha256: string | null +} + +export interface Disk { + diskEncryptionKey: DiskEncryptionKey | null +} + +export interface AccessConfigs { + natIP: string | null +} + +export interface NetworkInterfaces { + accessConfigs: AccessConfigs[] +} + +export interface ShieldedInstanceConfig { + enableIntegrityMonitoring: boolean + enableVtpm: boolean +} + +export interface ConfidentialInstanceConfig { + enableConfidentialCompute: boolean +} + +export interface QuerygcpVmInstance { + id: string + name?: string + shieldedInstanceConfig?: ShieldedInstanceConfig + confidentialInstanceConfig?: ConfidentialInstanceConfig + networkInterfaces?: NetworkInterfaces[] + canIpForward?: boolean + project?: Project[] + labels?: Label[] + metadata?: Metadata + serviceAccounts?: ServiceAccount[] + disks?: Disk[] +} + +export interface ComputeProject { + commonInstanceMetadata: Metadata +} + +export interface QuerygcpProject { + id: string + computeProject?: ComputeProject[] + vmInstances?: QuerygcpVmInstance[] +} + +export interface QuerygcpSqlInstance { + id?: string + name: string + settings: Settings + ipAddresses?: IpAddress[] +} + +export interface NIST1xQueryResponse { + querygcpVmInstance?: QuerygcpVmInstance[] + querygcpProject?: QuerygcpProject[] + querygcpSqlInstance?: QuerygcpSqlInstance[] +} + +describe('GCP NIST 800-53: Rev. 4', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ providerName: 'gcp', entityName: 'NIST'} ) + }) + + describe('GCP NIST 1.1 Compute instances should not use the default service account', () => { + const getTest41RuleFixture = ( + name: string, + projects: Project[], + labels: Label[], + serviceAccounts: ServiceAccount[] + ): NIST1xQueryResponse => { + return { + querygcpVmInstance: [ + { + id: cuid(), + name, + project: projects, + labels, + serviceAccounts, + }, + ], + } + } + + const test41Rule = async ( + data: NIST1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_11 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when the vm name starts with "gke-", it has a "goog-gke-node" label and the service account is the default compute service account', async () => { + const projectId = 123456789 + const name = 'gke-test' + const projects: Project[] = [{ id: `projects/${projectId}` }] + const labels: Label[] = [ + { + value: 'goog-gke-node', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const data: NIST1xQueryResponse = getTest41RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test41Rule(data, Result.PASS) + }) + + test('No Security Issue when the vm name starts with "gke-", it does NOT have a "goog-gke-node" label but the service account is NOT the default compute service account', async () => { + const name = 'gke-test' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + }, + ] + const data: NIST1xQueryResponse = getTest41RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test41Rule(data, Result.PASS) + }) + + test('No Security Issue when the vm name does NOT start with "gke-", it has a "goog-gke-node" label but the service account is NOT the default compute service account', async () => { + const name = 'dummy' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [ + { + value: 'goog-gke-node', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + }, + ] + const data: NIST1xQueryResponse = getTest41RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test41Rule(data, Result.PASS) + }) + + test('No Security Issue when the vm name does NOT start with "gke-", it does NOT have a "goog-gke-node" label but the service account is NOT the default compute service account', async () => { + const name = 'dummy' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + }, + ] + const data: NIST1xQueryResponse = getTest41RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test41Rule(data, Result.PASS) + }) + + test('Security Issue when the vm name does NOT start with "gke-", it does NOT have a "goog-gke-node" label and the service account is the default compute service account', async () => { + const projectId = 123456789 + const name = 'dummy' + const projects: Project[] = [{ id: `projects/${projectId}` }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const data: NIST1xQueryResponse = getTest41RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test41Rule(data, Result.FAIL) + }) + + test('Security Issue when the vm name does start with "gke-", it does NOT have a "goog-gke-node" label and the service account is the default compute service account', async () => { + const name = 'gke-test' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const data: NIST1xQueryResponse = getTest41RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test41Rule(data, Result.FAIL) + }) + + test('Security Issue when the vm name does start with "gke-", it does NOT have any label and the service account is the default compute service account', async () => { + const name = 'gke-test' + const labels: Label[] = [] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const data: NIST1xQueryResponse = getTest41RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test41Rule(data, Result.FAIL) + }) + + test('Security Issue when the vm name does NOT start with "gke-", it does have a "goog-gke-node" label and the service account is the default compute service account', async () => { + const name = 'dummy' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + { + value: 'goog-gke-node', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const data: NIST1xQueryResponse = getTest41RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test41Rule(data, Result.FAIL) + }) + }) + + describe('GCP NIST 1.2 Compute instances should not use the default service account with full access to all Cloud APIs', () => { + const getTest42RuleFixture = ( + name: string, + projects: Project[], + labels: Label[], + serviceAccounts: ServiceAccount[] + ): NIST1xQueryResponse => { + return { + querygcpVmInstance: [ + { + id: cuid(), + name, + project: projects, + labels, + serviceAccounts, + }, + ], + } + } + + const test42Rule = async ( + data: NIST1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_12 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test(`No Security Issue when the vm name starts with "gke-", + it has a "goog-gke-node" label + and the service account is the default compute service account + but it does NOT have the "cloud-platform" scope`, async () => { + const projectId = 123456789 + const name = 'gke-test' + const projects: Project[] = [{ id: `projects/${projectId}` }] + const labels: Label[] = [ + { + value: 'goog-gke-node', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }, + ] + const data: NIST1xQueryResponse = getTest42RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test42Rule(data, Result.PASS) + }) + + test(`No Security Issue when the vm name starts with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT the default compute service account, + and it has the "cloud-platform" scope`, async () => { + const name = 'gke-test' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + scopes: [], + }, + ] + const data: NIST1xQueryResponse = getTest42RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test42Rule(data, Result.PASS) + }) + + test('No Security Issue when the vm name does NOT start with "gke-", it has a "goog-gke-node" label but the service account is NOT the default compute service account', async () => { + const name = 'dummy' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [ + { + value: 'goog-gke-node', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }, + ] + const data: NIST1xQueryResponse = getTest42RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test42Rule(data, Result.PASS) + }) + + test(`No Security Issue when the vm name does NOT start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT the default compute service account + and it has the "cloud-platform" scope`, async () => { + const name = 'dummy' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }, + ] + const data: NIST1xQueryResponse = getTest42RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test42Rule(data, Result.PASS) + }) + + test(`Security Issue when the vm name does NOT start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is the default compute service account, + and it has the "cloud-platform" scope`, async () => { + const projectId = 123456789 + const name = 'dummy' + const projects: Project[] = [{ id: `projects/${projectId}` }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }, + ] + const data: NIST1xQueryResponse = getTest42RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test42Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is the default compute service account, + and it has the "cloud-platform" scope`, async () => { + const name = 'gke-test' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }, + ] + const data: NIST1xQueryResponse = getTest42RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test42Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does start with "gke-", + it does NOT have any label, + the service account is the default compute service account, + and it has the "cloud-platform" scope`, async () => { + const name = 'gke-test' + const labels: Label[] = [] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }, + ] + const data: NIST1xQueryResponse = getTest42RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test42Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does NOT start with "gke-", + it does have a "goog-gke-node" label + the service account is the default compute service account + and it has the "cloud-platform" scope`, async () => { + const name = 'dummy' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + { + value: 'goog-gke-node', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + scopes: ['https://www.googleapis.com/auth/cloud-platform'], + }, + ] + const data: NIST1xQueryResponse = getTest42RuleFixture( + name, + projects, + labels, + serviceAccounts + ) + await test42Rule(data, Result.FAIL) + }) + }) + + describe('GCP NIST 1.3 Compute instance "block-project-ssh-keys should be enabled', () => { + const getTest43RuleFixture = ( + name: string, + projects: Project[], + labels: Label[], + serviceAccounts: ServiceAccount[], + metadataItems: MetadataItem[] + ): NIST1xQueryResponse => { + return { + querygcpVmInstance: [ + { + id: cuid(), + name, + project: projects, + labels, + serviceAccounts, + metadata: { + items: metadataItems, + }, + }, + ], + } + } + + const test43Rule = async ( + data: NIST1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_13 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test(`No Security Issue when the vm name starts with "gke-", + it has a "goog-gke-node" label + and the service account is the default compute service account + and it does have the "block-project-ssh-keys" set to true`, async () => { + const projectId = 123456789 + const name = 'gke-test' + const projects: Project[] = [{ id: `projects/${projectId}` }] + const labels: Label[] = [ + { + value: 'goog-gke-node', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.PASS) + }) + + test(`No Security Issue when the vm name starts with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT the default compute service account, + and it does have the "block-project-ssh-keys" set to true`, async () => { + const name = 'gke-test' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.PASS) + }) + + test(`No Security Issue when the vm name does NOT start with "gke-", + it has a "goog-gke-node" label, + the service account is NOT the default compute service account + and it does have the "block-project-ssh-keys" set to true`, async () => { + const name = 'dummy' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [ + { + value: 'goog-gke-node', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.PASS) + }) + + test(`No Security Issue when the vm name does NOT start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT the default compute service account + and it does have the "block-project-ssh-keys" set to true`, async () => { + const name = 'dummy' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.PASS) + }) + + test(`No Security Issue when the vm name does NOT start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT the default compute service account + and it not have metadata`, async () => { + const name = 'dummy' + const projects: Project[] = [{ id: 'projects/dummy-id' }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy-compute@test.com', + }, + ] + const metadataItems: MetadataItem[] = [] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.PASS) + }) + + test(`Security Issue when the vm name does NOT start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is the default compute service account, + and it does have the "block-project-ssh-keys" set to true`, async () => { + const projectId = 123456789 + const name = 'dummy' + const projects: Project[] = [{ id: `projects/${projectId}` }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is the default compute service account, + and it does have the "block-project-ssh-keys" set to true`, async () => { + const name = 'gke-test' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does start with "gke-", + it does NOT have any label, + the service account is the default compute service account, + and it does have the "block-project-ssh-keys" set to true`, async () => { + const name = 'gke-test' + const labels: Label[] = [] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does NOT start with "gke-", + it does have a "goog-gke-node" label + the service account is the default compute service account + and it does have the "block-project-ssh-keys" set to true`, async () => { + const name = 'dummy' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + { + value: 'goog-gke-node', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: `${projectId}-compute@developer.gserviceaccount.com`, + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does NOT start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT default compute service account, + and it does have the "block-project-ssh-keys" set to false`, async () => { + const projectId = 123456789 + const name = 'dummy' + const projects: Project[] = [{ id: `projects/${projectId}` }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT the default compute service account, + and it does have the "block-project-ssh-keys" set to false`, async () => { + const name = 'gke-test' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does start with "gke-", + it does NOT have any label, + the service account is NOT the default compute service account, + and it does have the "block-project-ssh-keys" set to false`, async () => { + const name = 'gke-test' + const labels: Label[] = [] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does NOT start with "gke-", + it does have a "goog-gke-node" label + the service account is NOT the default compute service account + and it does have the "block-project-ssh-keys" set to false`, async () => { + const name = 'dummy' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + { + value: 'goog-gke-node', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'block-project-ssh-keys', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does NOT start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT default compute service account, + and the "block-project-ssh-keys" key is not present`, async () => { + const projectId = 123456789 + const name = 'dummy' + const projects: Project[] = [{ id: `projects/${projectId}` }] + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'dummy-ssh-keys', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does start with "gke-", + it does NOT have a "goog-gke-node" label, + the service account is NOT the default compute service account, + and the "block-project-ssh-keys" key is not present`, async () => { + const name = 'gke-test' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'dummy-ssh-keys', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does start with "gke-", + it does NOT have any label, + the service account is NOT the default compute service account, + and the "block-project-ssh-keys" key is not present`, async () => { + const name = 'gke-test' + const labels: Label[] = [] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'dummy-ssh-keys', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + + test(`Security Issue when the vm name does NOT start with "gke-", + it does have a "goog-gke-node" label + the service account is NOT the default compute service account + and the "block-project-ssh-keys" key is not present`, async () => { + const name = 'dummy' + const labels: Label[] = [ + { + value: 'dummy-label', + }, + { + value: 'goog-gke-node', + }, + ] + const projectId = 123456789 + const projects: Project[] = [{ id: `projects/${projectId}` }] + const serviceAccounts: ServiceAccount[] = [ + { + email: 'dummy@test.com', + }, + ] + const metadataItems: MetadataItem[] = [ + { + key: 'dummy-ssh-keys', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest43RuleFixture( + name, + projects, + labels, + serviceAccounts, + metadataItems + ) + await test43Rule(data, Result.FAIL) + }) + }) + + describe('GCP NIST 1.4 Compute instances should not have public IP addresses', () => { + const test49Rule = async ( + instanceName: string, + natIP: string | null, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST1xQueryResponse = { + querygcpVmInstance: [ + { + id: cuid(), + name: instanceName, + networkInterfaces: [ + { + accessConfigs: [ + { + natIP, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_14 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with an instance cretaed by GKE with natIP', async () => { + await test49Rule('gke-instance-1', '34.69.30.133', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with an instance cretaed by GKE without natIp', async () => { + await test49Rule('gke-instance-1', '34.69.30.133', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random instance without natIP', async () => { + await test49Rule('instance-1', null, Result.PASS) + }) + + test('Security Issue when there is an inbound rule with a random instance with natIP', async () => { + await test49Rule('instance-1', '34.69.30.133', Result.FAIL) + }) + }) + + describe('GCP NIST 1.5 Compute instances "Enable connecting to serial ports" should not be enabled', () => { + const getTest45RuleFixture = ( + metadataItems: MetadataItem[] + ): NIST1xQueryResponse => { + return { + querygcpVmInstance: [ + { + id: cuid(), + name: 'dummy-project-name', + project: [], + labels: [], + serviceAccounts: [], + metadata: { + items: metadataItems, + }, + }, + ], + } + } + + const test45Rule = async ( + data: NIST1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_15 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when ¨block-project-ssh-keys¨ is set to false', async () => { + const metadataItems: MetadataItem[] = [ + { + key: 'serial-port-enable', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest45RuleFixture(metadataItems) + await test45Rule(data, Result.PASS) + }) + + test('No Security Issue when ¨serial-port-enable¨ is set to 0', async () => { + const metadataItems: MetadataItem[] = [ + { + key: 'serial-port-enable', + value: '0', + }, + ] + const data: NIST1xQueryResponse = getTest45RuleFixture(metadataItems) + await test45Rule(data, Result.PASS) + }) + + test('Security Security Issue when ¨serial-port-enable¨ is set to true', async () => { + const metadataItems: MetadataItem[] = [ + { + key: 'serial-port-enable', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest45RuleFixture(metadataItems) + await test45Rule(data, Result.FAIL) + }) + + test('Security Security Issue when ¨serial-port-enable¨ is set to 1', async () => { + const metadataItems: MetadataItem[] = [ + { + key: 'serial-port-enable', + value: 'true', + }, + ] + const data: NIST1xQueryResponse = getTest45RuleFixture(metadataItems) + await test45Rule(data, Result.FAIL) + }) + + test('Security Security Issue when ¨serial-port-enable¨ is set to 1', async () => { + const metadataItems: MetadataItem[] = [ + { + key: 'serial-port-enable', + value: '1', + }, + ] + const data: NIST1xQueryResponse = getTest45RuleFixture(metadataItems) + await test45Rule(data, Result.FAIL) + }) + + test('Security Security Issue when metadata is empty', async () => { + const metadataItems: MetadataItem[] = [ + { + key: 'serial-port-enable', + value: '1', + }, + ] + const data: NIST1xQueryResponse = getTest45RuleFixture(metadataItems) + await test45Rule(data, Result.FAIL) + }) + + test('Security Security Issue when metadata does NOT contain ¨serial-port-enable¨ key', async () => { + const metadataItems: MetadataItem[] = [ + { + key: 'dummy-key', + value: 'false', + }, + ] + const data: NIST1xQueryResponse = getTest45RuleFixture(metadataItems) + await test45Rule(data, Result.FAIL) + }) + }) + + describe('GCP NIST 1.6 SQL database instances should not permit access from 0.0.0.0/0', () => { + const getRuleFixture = (): NIST1xQueryResponse => { + return { + querygcpSqlInstance: [ + { + id: cuid(), + name: 'test-sql-instance', + settings: { + ipConfiguration: { + authorizedNetworks: [ + { value: '192.168.0.0/24' }, + { value: '192.168.1.0/24' }, + ], + }, + databaseFlags: [], + }, + }, + ], + } + } + + const testRule = async ( + data: NIST1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_16 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test("No Security Issue when authorizedNetworks is NOT set to '0.0.0.0/0'", async () => { + const data: NIST1xQueryResponse = getRuleFixture() + await testRule(data, Result.PASS) + }) + + test('No Security Issue when authorizedNetworks is empty', async () => { + const data: NIST1xQueryResponse = getRuleFixture() + const sqlInstance = data.querygcpSqlInstance?.[0] as QuerygcpSqlInstance + sqlInstance.settings = { + ipConfiguration: { + authorizedNetworks: [], + }, + databaseFlags: [], + } + await testRule(data, Result.PASS) + }) + + test("Security Issue when authorizedNetworks is set to '0.0.0.0/0'", async () => { + const data: NIST1xQueryResponse = getRuleFixture() + const sqlInstance = data.querygcpSqlInstance?.[0] as QuerygcpSqlInstance + sqlInstance.settings = { + ipConfiguration: { + authorizedNetworks: [{ value: '0.0.0.0/0' }], + }, + databaseFlags: [], + } + await testRule(data, Result.FAIL) + }) + }) + + describe('GCP NIST 1.7 SQL database instances should not have public IPs', () => { + const getRuleFixture = (): NIST1xQueryResponse => { + return { + querygcpSqlInstance: [ + { + id: cuid(), + name: 'test-sql-instance', + ipAddresses: [ + { + type: 'PRIVATE', + }, + ], + settings: { + databaseFlags: [], + }, + }, + ], + } + } + + const testRule = async ( + data: NIST1xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_17 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when ipAddresses are PRIVATE', async () => { + const data: NIST1xQueryResponse = getRuleFixture() + await testRule(data, Result.PASS) + }) + + test('No Security Issue when ipAddresses are empty', async () => { + const data: NIST1xQueryResponse = getRuleFixture() + const sqlInstance = data.querygcpSqlInstance?.[0] as QuerygcpSqlInstance + sqlInstance.ipAddresses = [] + await testRule(data, Result.PASS) + }) + + test('Security Issue when ipAddresses are PUBLIC', async () => { + const data: NIST1xQueryResponse = getRuleFixture() + const sqlInstance = data.querygcpSqlInstance?.[0] as QuerygcpSqlInstance + sqlInstance.ipAddresses = [ + { + type: 'PUBLIC', + }, + ] + await testRule(data, Result.FAIL) + }) + + test('Security Issue when ipAddresses are PRIVATE and PUBLIC', async () => { + const data: NIST1xQueryResponse = getRuleFixture() + const sqlInstance = data.querygcpSqlInstance?.[0] as QuerygcpSqlInstance + sqlInstance.ipAddresses = [ + { + type: 'PRIVATE', + }, + { + type: 'PUBLIC', + }, + ] + await testRule(data, Result.FAIL) + }) + }) +}) diff --git a/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-2.x.test.ts b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-2.x.test.ts new file mode 100644 index 00000000..042dbeb5 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-2.x.test.ts @@ -0,0 +1,180 @@ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' + +import Gcp_NIST_800_53_21 from '../rules/gcp-nist-800-53-rev4-2.1' +import Gcp_NIST_800_53_22 from '../rules/gcp-nist-800-53-rev4-2.2' +import Gcp_NIST_800_53_23 from '../rules/gcp-nist-800-53-rev4-2.3' + +export interface DnssecConfigDefaultKeySpecs { + keyType: string + algorithm: string +} + +export interface QuerygcpDnsManagedZone { + id: string + visibility?: string + dnssecConfigState?: string + dnssecConfigDefaultKeySpecs?: DnssecConfigDefaultKeySpecs[] +} + +export interface NIST2xQueryResponse { + querygcpDnsManagedZone?: QuerygcpDnsManagedZone[] +} + +describe('GCP NIST 800-53: Rev. 4', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ + providerName: 'gcp', + entityName: 'NIST', + }) + }) + + describe('GCP NIST 2.1 DNS managed zone DNSSEC should be enabled', () => { + const test33Rule = async ( + visibility: string, + dnssecConfigState: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST2xQueryResponse = { + querygcpDnsManagedZone: [ + { + id: cuid(), + visibility, + dnssecConfigState, + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_21 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with visibility public and dnssecConfigState is enabled', async () => { + await test33Rule('public', 'on', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with visibility private and dnssecConfigState is not enabled', async () => { + await test33Rule('private', 'off', Result.PASS) + }) + + test('Security Issue when there is an inbound rule with visibility public and dnssecConfigState is not enabled', async () => { + await test33Rule('public', 'off', Result.FAIL) + }) + }) + + describe('GCP NIST 2.2 DNS managed zone DNSSEC key-signing keys should not use RSASHA1', () => { + const test34Rule = async ( + visibility: string, + keyType: string, + algorithm: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST2xQueryResponse = { + querygcpDnsManagedZone: [ + { + id: cuid(), + visibility, + dnssecConfigDefaultKeySpecs: [ + { + keyType: 'keySigning', + algorithm: 'rsasha512', + }, + { + keyType: 'keyTest', + algorithm: 'rsasha1', + }, + { + keyType, + algorithm, + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_22 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with visibility public and keyType keySigning and algorithm type different to rsasha1', async () => { + await test34Rule('public', 'keySigning', 'rsasha256', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with visibility private and keyType keySigning and algorithm type rsasha1', async () => { + await test34Rule('private', 'keySigning', 'rsasha256', Result.PASS) + }) + + test('Security Issue when there is an inbound rule with visibility public and keyType keySigning and algorithm type rsasha1', async () => { + await test34Rule('public', 'keySigning', 'rsasha1', Result.FAIL) + }) + }) + + describe('GCP NIST 2.3 DNS managed zone DNSSEC zone-signing keys should not use RSASHA1', () => { + const test35Rule = async ( + visibility: string, + keyType: string, + algorithm: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST2xQueryResponse = { + querygcpDnsManagedZone: [ + { + id: cuid(), + visibility, + dnssecConfigDefaultKeySpecs: [ + { + keyType: 'zoneSigning', + algorithm: 'rsasha512', + }, + { + keyType: 'keyTest', + algorithm: 'rsasha1', + }, + { + keyType, + algorithm, + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_23 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with visibility public and keyType zoneSigning and algorithm type different to rsasha1', async () => { + await test35Rule('public', 'zoneSigning', 'rsasha256', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with visibility private and keyType zoneSigning and algorithm type rsasha1', async () => { + await test35Rule('private', 'zoneSigning', 'rsasha256', Result.PASS) + }) + + test('Security Issue when there is an inbound rule with visibility public and keyType zoneSigning and algorithm type rsasha1', async () => { + await test35Rule('public', 'zoneSigning', 'rsasha1', Result.FAIL) + }) + }) +}) diff --git a/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-3.x.test.ts b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-3.x.test.ts new file mode 100644 index 00000000..26930931 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-3.x.test.ts @@ -0,0 +1,1084 @@ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' + +import Gcp_NIST_800_53_31 from '../rules/gcp-nist-800-53-rev4-3.1' +import Gcp_NIST_800_53_32 from '../rules/gcp-nist-800-53-rev4-3.2' +import Gcp_NIST_800_53_33 from '../rules/gcp-nist-800-53-rev4-3.3' +import Gcp_NIST_800_53_34 from '../rules/gcp-nist-800-53-rev4-3.4' +import Gcp_NIST_800_53_35 from '../rules/gcp-nist-800-53-rev4-3.5' +import Gcp_NIST_800_53_36 from '../rules/gcp-nist-800-53-rev4-3.6' +import Gcp_NIST_800_53_37 from '../rules/gcp-nist-800-53-rev4-3.7' +import Gcp_NIST_800_53_38 from '../rules/gcp-nist-800-53-rev4-3.8' +import Gcp_NIST_800_53_39 from '../rules/gcp-nist-800-53-rev4-3.9' +import Gcp_NIST_800_53_310 from '../rules/gcp-nist-800-53-rev4-3.10' + +export interface DatabaseFlagsItem { + name: string + value: string | null +} + +export interface AuthorizedNetwork { + value: string +} + +export interface IpConfiguration { + requireSsl?: boolean | null + authorizedNetworks?: AuthorizedNetwork[] +} + +export interface BackupConfiguration { + enabled: boolean | null + startTime: string | null +} + +export interface Settings { + databaseFlags: DatabaseFlagsItem[] + ipConfiguration?: IpConfiguration + backupConfiguration?: BackupConfiguration +} + +export interface IpAddress { + type: string +} + +export interface SqlInstances { + id?: string + name: string + settings: Settings + ipAddresses?: IpAddress[] +} + +export interface LogBucket { + name: string + retentionDays: number + locked: boolean +} + +export interface LogSink { + filter?: string + destination?: string +} + +export interface QuerygcpProject { + id: string + sqlInstances: SqlInstances[] + logSinks?: LogSink[] + logBuckets?: LogBucket[] +} + +export interface AuditLogConfig { + logType: string + exemptedMembers: string[] +} + +export interface AuditConfig { + auditLogConfigs: AuditLogConfig[] + service: string + exemptedMembers: string[] +} + +export interface QuerygcpIamPolicy { + id: string + auditConfigs: AuditConfig[] +} + +export interface GcpNetworkSubnet { + purpose: string + enableFlowLogs: boolean | null +} + +export interface QuerygcpNetwork { + id: string + subnets?: GcpNetworkSubnet[] + name?: string + ipV4Range?: string | null +} +export interface NIST3xQueryResponse { + querygcpProject?: QuerygcpProject[] + querygcpSqlInstance?: SqlInstances[] + querygcpIamPolicy?: QuerygcpIamPolicy[] + querygcpNetwork?: QuerygcpNetwork[] +} + +describe('GCP NIST 800-53: Rev. 4', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ providerName: 'gcp', entityName: 'NIST'} ) + }) + + describe('GCP NIST 3.1 IAM default audit log config should not exempt any users', () => { + const getTestRuleFixture = (): NIST3xQueryResponse => { + return { + querygcpIamPolicy: [ + { + id: cuid(), + auditConfigs: [ + { + auditLogConfigs: [ + { + logType: 'DATA_WRITE', + exemptedMembers: [], + }, + { + logType: 'DATA_READ', + exemptedMembers: [], + }, + ], + service: 'allServices', + exemptedMembers: [], + }, + ], + }, + ], + } + } + + const test21Rule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_31 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is a auditConfig with logtype set to DATA_WRITES and DATA_READ for all services, and exemptedMembers is empty', async () => { + const data: NIST3xQueryResponse = getTestRuleFixture() + await test21Rule(data, Result.PASS) + }) + + test('Security Issue when there is a auditConfig with logtype set to DATA_WRITES and DATA_READ for all services, and exemptedMembers is NOT empty', async () => { + let data: NIST3xQueryResponse = { + querygcpIamPolicy: [ + { + id: cuid(), + auditConfigs: [ + { + auditLogConfigs: [ + { + logType: 'DATA_WRITES', + exemptedMembers: [], + }, + { + logType: 'DATA_READ', + exemptedMembers: [], + }, + ], + service: 'allServices', + exemptedMembers: ['dummy-member'], + }, + ], + }, + ], + } + await test21Rule(data, Result.FAIL) + + data = { + querygcpIamPolicy: [ + { + id: cuid(), + auditConfigs: [ + { + auditLogConfigs: [ + { + logType: 'DATA_WRITES', + exemptedMembers: ['dummy-member'], + }, + { + logType: 'DATA_READ', + exemptedMembers: [], + }, + ], + service: 'allServices', + exemptedMembers: [], + }, + ], + }, + ], + } + await test21Rule(data, Result.FAIL) + data = { + querygcpIamPolicy: [ + { + id: cuid(), + auditConfigs: [ + { + auditLogConfigs: [ + { + logType: 'DATA_WRITES', + exemptedMembers: [], + }, + { + logType: 'DATA_READ', + exemptedMembers: ['dummy-member'], + }, + ], + service: 'allServices', + exemptedMembers: [], + }, + ], + }, + ], + } + await test21Rule(data, Result.FAIL) + }) + + test('Security Issue when there is a auditConfig without logtype set to DATA_WRITES', async () => { + const data: NIST3xQueryResponse = { + querygcpIamPolicy: [ + { + id: cuid(), + auditConfigs: [ + { + auditLogConfigs: [ + { + logType: 'DATA_READ', + exemptedMembers: [], + }, + ], + service: 'allServices', + exemptedMembers: [], + }, + ], + }, + ], + } + await test21Rule(data, Result.FAIL) + }) + + test('Security Issue when there is a auditConfig without logtype set to DATA_READ', async () => { + const data: NIST3xQueryResponse = { + querygcpIamPolicy: [ + { + id: cuid(), + auditConfigs: [ + { + auditLogConfigs: [ + { + logType: 'DATA_WRITES', + exemptedMembers: [], + }, + ], + service: 'allServices', + exemptedMembers: [], + }, + ], + }, + ], + } + await test21Rule(data, Result.FAIL) + }) + + test('Security Issue when there is a auditConfig with logtype set to DATA_WRITES and DATA_READ NOT set to allServices', async () => { + const data: NIST3xQueryResponse = { + querygcpIamPolicy: [ + { + id: cuid(), + auditConfigs: [ + { + auditLogConfigs: [ + { + logType: 'DATA_WRITE', + exemptedMembers: [], + }, + { + logType: 'DATA_READ', + exemptedMembers: [], + }, + ], + service: 'dummy-service', + exemptedMembers: [], + }, + ], + }, + ], + } + await test21Rule(data, Result.FAIL) + }) + }) + + describe("GCP NIST 3.2 PostgreSQL database instance 'log_checkpoints' database flag should be set to 'on'", () => { + const getRuleFixture = (): NIST3xQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + sqlInstances: [ + { + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'log_checkpoints', + value: 'on', + }, + ], + }, + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_32 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is NO POSTGRES instances', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances = [] + await testRule(data, Result.PASS) + }) + + test("No Security Issue when all POSTGRES instances have the 'log_checkpoints' set to 'on'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances.push({ + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'dummy_key', + value: 'on', + }, + { + name: 'log_checkpoints', + value: 'on', + }, + ], + }, + }) + await testRule(data, Result.PASS) + }) + + test('Security Issue when the POSTGRES instances have no database flags', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do NOT have a 'log_checkpoints' database flag", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [ + { + name: 'dummy_key', + value: 'on', + }, + ] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do have a 'log_checkpoints' database flag set to 'off'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags[0].value = 'off' + await testRule(data, Result.FAIL) + }) + }) + + describe("GGCP NIST 3.3 PostgreSQL database instance 'log_connections' database flag should be set to 'on'", () => { + const getRuleFixture = (): NIST3xQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + sqlInstances: [ + { + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'log_connections', + value: 'on', + }, + ], + }, + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_33 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is NO POSTGRES instances', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances = [] + await testRule(data, Result.PASS) + }) + + test("No Security Issue when all POSTGRES instances have the 'log_connections' set to 'on'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances.push({ + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'dummy_key', + value: 'on', + }, + { + name: 'log_connections', + value: 'on', + }, + ], + }, + }) + await testRule(data, Result.PASS) + }) + + test('Security Issue when the POSTGRES instances have no database flags', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do NOT have a 'log_connections' database flag", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [ + { + name: 'dummy_key', + value: 'on', + }, + ] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do have a 'log_connections' database flag set to 'off'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags[0].value = 'off' + await testRule(data, Result.FAIL) + }) + }) + + describe("GCP NIST 3.4 PostgreSQL database instance 'log_disconnections' database flag should be set to 'on'", () => { + const getRuleFixture = (): NIST3xQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + sqlInstances: [ + { + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'log_disconnections', + value: 'on', + }, + ], + }, + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_34 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is NO POSTGRES instances', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances = [] + await testRule(data, Result.PASS) + }) + + test("No Security Issue when all POSTGRES instances have the 'log_disconnections' set to 'on'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances.push({ + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'dummy_key', + value: 'on', + }, + { + name: 'log_disconnections', + value: 'on', + }, + ], + }, + }) + await testRule(data, Result.PASS) + }) + + test('Security Issue when the POSTGRES instances have no database flags', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do NOT have a 'log_disconnections' database flag", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [ + { + name: 'dummy_key', + value: 'on', + }, + ] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do have a 'log_disconnections' database flag set to 'off'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags[0].value = 'off' + await testRule(data, Result.FAIL) + }) + }) + + describe("GCP NIST 3.5 PostgreSQL database instance 'log_lock_waits' database flag should be set to 'on'", () => { + const getRuleFixture = (): NIST3xQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + sqlInstances: [ + { + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'log_lock_waits', + value: 'on', + }, + ], + }, + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_35 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is NO POSTGRES instances', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances = [] + await testRule(data, Result.PASS) + }) + + test("No Security Issue when all POSTGRES instances have the 'log_lock_waits' set to 'on'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances.push({ + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'dummy_key', + value: 'on', + }, + { + name: 'log_lock_waits', + value: 'on', + }, + ], + }, + }) + await testRule(data, Result.PASS) + }) + + test('Security Issue when the POSTGRES instances have no database flags', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do NOT have a 'log_lock_waits' database flag", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [ + { + name: 'dummy_key', + value: 'on', + }, + ] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do have a 'log_lock_waits' database flag set to 'off'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags[0].value = 'off' + await testRule(data, Result.FAIL) + }) + }) + + describe("GCP NIST 3.6 PostgreSQL database instance 'log_min_error_statement' database flag should be set appropriately", () => { + const getRuleFixture = (): NIST3xQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + sqlInstances: [ + { + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'log_min_error_statement', + value: 'error', + }, + ], + }, + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_36 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is NO POSTGRES instances', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances = [] + await testRule(data, Result.PASS) + }) + + test("No Security Issue when all POSTGRES instances have the 'log_min_error_statement' set to any value: ['error', 'log', 'fatal', 'panic']", async () => { + const validValues = ['error', 'log', 'fatal', 'panic'] + for (const validValue of validValues) { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags[0].value = validValue + project.sqlInstances.push({ + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'dummy_key', + value: 'on', + }, + { + name: 'log_min_error_statement', + value: validValue, + }, + ], + }, + }) + await testRule(data, Result.PASS) + } + }) + + test('Security Issue when the POSTGRES instances have no database flags', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do NOT have a 'log_min_error_statement' database flag", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [ + { + name: 'dummy_key', + value: 'off', + }, + ] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do have a 'log_min_error_statement' database flag set to an invalid value", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags[0].value = 'dummy' + await testRule(data, Result.FAIL) + }) + }) + + describe("GCP NIST 3.7 PostgreSQL database instance 'log_temp_files' database flag should be set to '0' (on)", () => { + const getRuleFixture = (): NIST3xQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + sqlInstances: [ + { + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'log_temp_files', + value: '0', + }, + ], + }, + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_37 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is NO POSTGRES instances', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances = [] + await testRule(data, Result.PASS) + }) + + test("No Security Issue when all POSTGRES instances have the 'log_temp_files' set to '0'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances.push({ + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'dummy_key', + value: 'on', + }, + { + name: 'log_temp_files', + value: '0', + }, + ], + }, + }) + await testRule(data, Result.PASS) + }) + + test('Security Issue when the POSTGRES instances have no database flags', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do NOT have a 'log_temp_files' database flag", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [ + { + name: 'dummy_key', + value: 'off', + }, + ] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do have a 'log_temp_files' database flag set to '1'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags[0].value = '1' + await testRule(data, Result.FAIL) + }) + }) + + describe("GCP NIST 3.8 PostgreSQL database instance 'log_min_duration_statement' database flag should be set to '-1' (disabled)", () => { + const getRuleFixture = (): NIST3xQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + sqlInstances: [ + { + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'log_min_duration_statement', + value: '-1', + }, + ], + }, + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_38 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is NO POSTGRES instances', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances = [] + await testRule(data, Result.PASS) + }) + + test("No Security Issue when all POSTGRES instances have the 'log_min_duration_statement' set to '-1'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances.push({ + name: 'test-postgres-instance', + settings: { + databaseFlags: [ + { + name: 'dummy_key', + value: 'on', + }, + { + name: 'log_min_duration_statement', + value: '-1', + }, + ], + }, + }) + await testRule(data, Result.PASS) + }) + + test('Security Issue when the POSTGRES instances have no database flags', async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do NOT have a 'log_min_duration_statement' database flag", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags = [ + { + name: 'dummy_key', + value: '-1', + }, + ] + await testRule(data, Result.FAIL) + }) + + test("Security Issue when the POSTGRES instances do have a 'log_min_duration_statement' database flag set to '100'", async () => { + const data: NIST3xQueryResponse = getRuleFixture() + const project = data.querygcpProject?.[0] as QuerygcpProject + project.sqlInstances[0].settings.databaseFlags[0].value = '100' + await testRule(data, Result.FAIL) + }) + }) + + describe('GCP NIST 3.9 At least one project-level logging sink should be configured with an empty filter', () => { + const getTestRuleFixture = (filter: string): NIST3xQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + sqlInstances: [], + logSinks: [ + { + filter: 'dummy filter', + }, + { + filter, + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST3xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_39 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is a logSink with an empty filter', async () => { + const data: NIST3xQueryResponse = getTestRuleFixture('') + await testRule(data, Result.PASS) + }) + + test('Security Issue when there is a logSink with an empty filter', async () => { + const data: NIST3xQueryResponse = getTestRuleFixture('dummy-filter') + await testRule(data, Result.FAIL) + }) + }) + + + describe('GCP NIST 3.10 Network subnet flow logs should be enabled', () => { + const testRule = async ( + subnets: GcpNetworkSubnet[], + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST3xQueryResponse = { + querygcpNetwork: [ + { + id: cuid(), + subnets, + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_310 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when all PRIVATE subnets have enableFlowLogs set to true', async () => { + const subnets: GcpNetworkSubnet[] = [ + { + purpose: 'PRIVATE', + enableFlowLogs: true, + }, + { + purpose: 'PRIVATE', + enableFlowLogs: true, + }, + { + purpose: 'DUMMY', + enableFlowLogs: null, + }, + { + purpose: 'DUMMY', + enableFlowLogs: true, + }, + { + purpose: 'DUMMY', + enableFlowLogs: false, + }, + ] + await testRule(subnets, Result.PASS) + }) + + test('Security Issue when at least 1 PRIVATE subnet has enableFlowLogs set to false', async () => { + const subnets: GcpNetworkSubnet[] = [ + { + purpose: 'PRIVATE', + enableFlowLogs: true, + }, + { + purpose: 'PRIVATE', + enableFlowLogs: false, + }, + ] + await testRule(subnets, Result.FAIL) + }) + test('Security Issue when at least 1 PRIVATE subnet has enableFlowLogs set to null', async () => { + const subnets: GcpNetworkSubnet[] = [ + { + purpose: 'PRIVATE', + enableFlowLogs: true, + }, + { + purpose: 'PRIVATE', + enableFlowLogs: null, + }, + ] + await testRule(subnets, Result.FAIL) + }) + }) +}) diff --git a/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-4.x.test.ts b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-4.x.test.ts new file mode 100644 index 00000000..5c6c9292 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-4.x.test.ts @@ -0,0 +1,244 @@ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' + +import Gcp_NIST_800_53_41 from '../rules/gcp-nist-800-53-rev4-4.1' +import Gcp_NIST_800_53_42 from '../rules/gcp-nist-800-53-rev4-4.2' + + +export interface ServiceAccount { + email: string + scopes?: string[] +} + +export interface Label { + value: string +} + +export interface Project { + id: string +} + +export interface MetadataItem { + key: string + value: string +} + +export interface Metadata { + items: MetadataItem[] +} + +export interface DiskEncryptionKey { + sha256: string | null +} + +export interface Disk { + diskEncryptionKey: DiskEncryptionKey | null +} + +export interface AccessConfigs { + natIP: string | null +} + +export interface NetworkInterfaces { + accessConfigs: AccessConfigs[] +} + +export interface ShieldedInstanceConfig { + enableIntegrityMonitoring: boolean + enableVtpm: boolean +} + +export interface ConfidentialInstanceConfig { + enableConfidentialCompute: boolean +} + +export interface QuerygcpVmInstance { + id: string + name?: string + shieldedInstanceConfig?: ShieldedInstanceConfig + confidentialInstanceConfig?: ConfidentialInstanceConfig + networkInterfaces?: NetworkInterfaces[] + canIpForward?: boolean + project?: Project[] + labels?: Label[] + metadata?: Metadata + serviceAccounts?: ServiceAccount[] + disks?: Disk[] +} + +export interface DatabaseFlagsItem { + name: string + value: string | null +} + +export interface AuthorizedNetwork { + value: string +} + +export interface IpConfiguration { + requireSsl?: boolean | null + authorizedNetworks?: AuthorizedNetwork[] +} + +export interface BackupConfiguration { + enabled: boolean | null + startTime: string | null +} + +export interface Settings { + databaseFlags: DatabaseFlagsItem[] + ipConfiguration?: IpConfiguration + backupConfiguration?: BackupConfiguration +} + +export interface IpAddress { + type: string +} + +export interface QuerygcpSqlInstance { + id?: string + name: string + settings: Settings + ipAddresses?: IpAddress[] +} + +export interface NIST4xQueryResponse { + querygcpVmInstance?: QuerygcpVmInstance[] + querygcpSqlInstance?: QuerygcpSqlInstance[] +} + +describe('GCP NIST 800-53: Rev. 4', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ providerName: 'gcp', entityName: 'NIST'} ) + }) + + describe('GCP NIST 4.1 Compute instance disks should be encrypted with customer-supplied encryption keys (CSEKs)', () => { + const getTestRuleFixture = (disk: Disk): NIST4xQueryResponse => { + return { + querygcpVmInstance: [ + { + id: cuid(), + name: 'dummy-project-name', + project: [], + labels: [], + serviceAccounts: [], + disks: [disk], + }, + ], + } + } + + const testRule = async ( + data: NIST4xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_41 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when disk has a sha256 key', async () => { + const disk: Disk = { + diskEncryptionKey: { + sha256: 'dummy', + }, + } + + const data: NIST4xQueryResponse = getTestRuleFixture(disk) + await testRule(data, Result.PASS) + }) + + test('Security Issue when disk diskEncryptionKey is null', async () => { + const disk: Disk = { + diskEncryptionKey: null, + } + + const data: NIST4xQueryResponse = getTestRuleFixture(disk) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when disk diskEncryptionKey sha256 is null', async () => { + const disk: Disk = { + diskEncryptionKey: { + sha256: null, + }, + } + + const data: NIST4xQueryResponse = getTestRuleFixture(disk) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when disk diskEncryptionKey sha256 is empty', async () => { + const disk: Disk = { + diskEncryptionKey: { + sha256: '', + }, + } + + const data: NIST4xQueryResponse = getTestRuleFixture(disk) + await testRule(data, Result.FAIL) + }) + }) + + describe('GCP CIS 6.4 Ensure that the Cloud SQL database instance requires all incoming connections to use SSL', () => { + const getRuleFixture = (): NIST4xQueryResponse => { + return { + querygcpSqlInstance: [ + { + id: cuid(), + name: 'test-sql-instance', + settings: { + ipConfiguration: { + requireSsl: true, + }, + databaseFlags: [], + }, + }, + ], + } + } + + const testRule = async ( + data: NIST4xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_42 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when requireSsl is set to true', async () => { + const data: NIST4xQueryResponse = getRuleFixture() + await testRule(data, Result.PASS) + }) + + test('Security Issue when requireSsl is set to false', async () => { + const data: NIST4xQueryResponse = getRuleFixture() + const sqlInstance = data.querygcpSqlInstance?.[0] as QuerygcpSqlInstance + const ipConfiguration = sqlInstance.settings + .ipConfiguration as IpConfiguration + ipConfiguration.requireSsl = false + await testRule(data, Result.FAIL) + }) + + test('Security Issue when requireSsl is set to null', async () => { + const data: NIST4xQueryResponse = getRuleFixture() + const sqlInstance = data.querygcpSqlInstance?.[0] as QuerygcpSqlInstance + const ipConfiguration = sqlInstance.settings + .ipConfiguration as IpConfiguration + ipConfiguration.requireSsl = null + await testRule(data, Result.FAIL) + }) + }) +}) diff --git a/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-5.x.test.ts b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-5.x.test.ts new file mode 100644 index 00000000..62349150 --- /dev/null +++ b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-5.x.test.ts @@ -0,0 +1,888 @@ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' + +import Gcp_NIST_800_53_51 from '../rules/gcp-nist-800-53-rev4-5.1' +import Gcp_NIST_800_53_52 from '../rules/gcp-nist-800-53-rev4-5.2' +import Gcp_NIST_800_53_53 from '../rules/gcp-nist-800-53-rev4-5.3' +import Gcp_NIST_800_53_54 from '../rules/gcp-nist-800-53-rev4-5.4' +import Gcp_NIST_800_53_55 from '../rules/gcp-nist-800-53-rev4-5.5' +import Gcp_NIST_800_53_56 from '../rules/gcp-nist-800-53-rev4-5.6' +import Gcp_NIST_800_53_57 from '../rules/gcp-nist-800-53-rev4-5.7' +import Gcp_NIST_800_53_58 from '../rules/gcp-nist-800-53-rev4-5.8' + +const Gcp_NIST_800_53_51_Filter = + '( protoPayload.serviceName="cloudresourcemanager.googleapis.com" ) AND ( ProjectOwnership OR projectOwnerInvitee ) OR ( protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner" ) OR ( protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner" )' +const Gcp_NIST_800_53_52_Filter = + 'protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:*' +const Gcp_NIST_800_53_53_Filter = + 'resource.type="iam_role" AND protoPayload.methodName="google.iam.admin.v1.CreateRole" OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" OR protoPayload.methodName="google.iam.admin.v1.UpdateRole"' +const Gcp_NIST_800_53_54_Filter = + 'resource.type="gce_firewall_rule" AND protoPayload.methodName="v1.compute.firewalls.patch" OR protoPayload.methodName="v1.compute.firewalls.insert"' +const Gcp_NIST_800_53_55_Filter = + 'resource.type="gce_route" AND protoPayload.methodName="beta.compute.routes.patch" OR protoPayload.methodName="beta.compute.routes.insert"' +const Gcp_NIST_800_53_56_Filter = + 'resource.type=gce_network AND protoPayload.methodName="beta.compute.networks.insert" OR protoPayload.methodName="beta.compute.networks.patch" OR protoPayload.methodName="v1.compute.networks.delete" OR protoPayload.methodName="v1.compute.networks.removePeering" OR protoPayload.methodName="v1.compute.networks.addPeering"' +const Gcp_NIST_800_53_57_Filter = + 'protoPayload.methodName="cloudsql.instances.update"' + +export interface MetricDescriptor { + type: string +} + +export interface LogMetric { + filter: string + name?: string + metricDescriptor?: MetricDescriptor +} + +export interface Project { + logMetrics?: LogMetric[] +} + +export interface Enabled { + value: boolean +} + +export interface QuerygcpAlertPolicy { + id: string + enabled?: Enabled + project?: Project[] +} + +export interface DnsPolicy { + enableLogging: boolean +} + +export interface QuerygcpNetwork { + id: string + dnsPolicies?: DnsPolicy[] +} + +export interface LogBucket { + name: string + retentionDays: number + locked: boolean +} + +export interface LogSink { + filter?: string + destination?: string +} + +export interface QuerygcpProject { + id: string + logSinks: LogSink[] + logBuckets?: LogBucket[] +} + +export interface AuditLogConfig { + logType: string + exemptedMembers: string[] +} + +export interface AuditConfig { + auditLogConfigs: AuditLogConfig[] + service: string + exemptedMembers: string[] +} + +export interface QuerygcpIamPolicy { + id: string + auditConfigs: AuditConfig[] +} + +export interface NIST5xQueryResponse { + querygcpAlertPolicy?: QuerygcpAlertPolicy[] + querygcpNetwork?: QuerygcpNetwork[] + querygcpProject?: QuerygcpProject[] + querygcpIamPolicy?: QuerygcpIamPolicy[] +} + +describe('GCP NIST 800-53: Rev. 4', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ providerName: 'gcp', entityName: 'NIST'} ) + }) + + describe('GCP NIST 5.1 Logging metric filter and alert for project ownership assignments/changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST5xQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_51 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_NIST_800_53_51_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_NIST_800_53_51_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_NIST_800_53_51_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('GCP NIST 5.2 Logging metric filter and alert for audit configuration changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST5xQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_52 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_NIST_800_53_52_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_NIST_800_53_52_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_NIST_800_53_52_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('GCP NIST 5.3 Logging metric filter and alert for Custom Role changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST5xQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_53 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_NIST_800_53_53_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_NIST_800_53_53_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_NIST_800_53_53_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('GCP NIST 5.4 Logging metric filter and alert for network firewall rule changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST5xQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_54 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_NIST_800_53_54_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_NIST_800_53_54_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_NIST_800_53_54_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('GCP NIST 5.5 Logging metric filter and alert for network route changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST5xQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_55 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_NIST_800_53_55_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_NIST_800_53_55_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_NIST_800_53_55_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('GCP NIST 5.6 Logging metric filter and alert for network changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST5xQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_56 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_NIST_800_53_56_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_NIST_800_53_56_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_NIST_800_53_56_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('GCP NIST 5.7 Logging metric filter and alert for SQL instance configuration changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST5xQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_57 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_NIST_800_53_57_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_NIST_800_53_57_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_NIST_800_53_57_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('GCP NIST 5.8 Logging storage bucket retention policies and Bucket Lock should be configured', () => { + const getTestRuleFixture = ( + querygcpProjects: QuerygcpProject[] + ): NIST5xQueryResponse => { + return { + querygcpProject: querygcpProjects, + } + } + + const testRule = async ( + data: NIST5xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_58 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when all Sinks destined to storage buckets have retention policies and Bucket Lock are enabled', async () => { + const projectData = [ + { + id: cuid(), + logSinks: [ + { + destination: 'logging.googleapis.com/projects/dummy', + }, + { + destination: 'storage.googleapis.com/projects/storage-project', + }, + ], + logBuckets: [ + { + name: 'projects/dummy', + retentionDays: 30, + locked: false, + }, + { + name: 'projects/storage-project', + retentionDays: 30, + locked: true, + }, + ], + }, + ] + const data: NIST5xQueryResponse = getTestRuleFixture(projectData) + await testRule(data, Result.PASS) + }) + + test('No Security Issue when all Sinks destined to storage buckets have retention policies and Bucket Lock are enabled (multiple sinks)', async () => { + const projectData = [ + { + id: cuid(), + logSinks: [ + { + destination: 'storage.googleapis.com/projects/storage-project', + }, + { + destination: 'storage.googleapis.com/projects/storage-project2', + }, + ], + logBuckets: [ + { + name: 'projects/storage-project', + retentionDays: 30, + locked: true, + }, + { + name: 'projects/storage-project2', + retentionDays: 30, + locked: true, + }, + ], + }, + ] + const data: NIST5xQueryResponse = getTestRuleFixture(projectData) + await testRule(data, Result.PASS) + }) + + test('Security Issue when no Sinks destined to storage buckets', async () => { + const projectData = [ + { + id: cuid(), + logSinks: [ + { + destination: 'storage.googleapis.com/projects/storage-project', + }, + ], + logBuckets: [], + }, + ] + const data: NIST5xQueryResponse = getTestRuleFixture(projectData) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when the Sinks destined to storage buckets have NO retention policies', async () => { + const projectData = [ + { + id: cuid(), + logSinks: [ + { + destination: 'storage.googleapis.com/projects/storage-project', + }, + ], + logBuckets: [ + { + name: 'projects/storage-project', + retentionDays: 0, + locked: true, + }, + ], + }, + ] + const data: NIST5xQueryResponse = getTestRuleFixture(projectData) + await testRule(data, Result.FAIL) + }) + + test('Security Issue when the Sinks destined to storage buckets have Bucket Lock set to false', async () => { + const projectData = [ + { + id: cuid(), + logSinks: [ + { + destination: 'storage.googleapis.com/projects/storage-project', + }, + ], + logBuckets: [ + { + name: 'projects/storage-project', + retentionDays: 30, + locked: false, + }, + ], + }, + ] + const data: NIST5xQueryResponse = getTestRuleFixture(projectData) + await testRule(data, Result.FAIL) + }) + }) + +}) diff --git a/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-6.x.test.ts b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-6.x.test.ts new file mode 100644 index 00000000..c04b123f --- /dev/null +++ b/src/gcp/nist-800-53-rev4/tests/nist-800-53-rev4-6.x.test.ts @@ -0,0 +1,595 @@ +/* eslint-disable max-len */ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' +import 'jest' + +import Gcp_NIST_800_53_61 from '../rules/gcp-nist-800-53-rev4-6.1' +import Gcp_NIST_800_53_62 from '../rules/gcp-nist-800-53-rev4-6.2' +import Gcp_NIST_800_53_63 from '../rules/gcp-nist-800-53-rev4-6.3' +import Gcp_NIST_800_53_64 from '../rules/gcp-nist-800-53-rev4-6.4' +import Gcp_NIST_800_53_65 from '../rules/gcp-nist-800-53-rev4-6.5' + + +const ipV4WildcardAddress = '0.0.0.0/0' +const ipV6WildcardAddress = '::/0' + +export interface ServiceAccount { + email: string + scopes?: string[] +} + +export interface Label { + value: string +} + +export interface Project { + id: string +} + +export interface MetadataItem { + key: string + value: string +} + +export interface Metadata { + items: MetadataItem[] +} + +export interface DiskEncryptionKey { + sha256: string | null +} + +export interface Disk { + diskEncryptionKey: DiskEncryptionKey | null +} + +export interface AccessConfigs { + natIP: string | null +} + +export interface NetworkInterfaces { + accessConfigs: AccessConfigs[] +} + +export interface ShieldedInstanceConfig { + enableIntegrityMonitoring: boolean + enableVtpm: boolean +} + +export interface ConfidentialInstanceConfig { + enableConfidentialCompute: boolean +} + +export interface QuerygcpVmInstance { + id: string + name?: string + shieldedInstanceConfig?: ShieldedInstanceConfig + confidentialInstanceConfig?: ConfidentialInstanceConfig + networkInterfaces?: NetworkInterfaces[] + canIpForward?: boolean + project?: Project[] + labels?: Label[] + metadata?: Metadata + serviceAccounts?: ServiceAccount[] + disks?: Disk[] +} + +export interface Allowed { + ipProtocol: string + ports: string[] +} + +export interface QuerygcpFirewall { + id: string + sourceRanges: string[] + direction: string + allowed?: Allowed[] +} + +export interface GcpNetworkSubnet { + purpose: string + enableFlowLogs: boolean | null +} + +export interface QuerygcpNetwork { + id: string + subnets?: GcpNetworkSubnet[] + name?: string + ipV4Range?: string | null +} + +export interface SslPolicy { + profile: string + enabledFeatures?: string[] + minTlsVersion: string +} + +export interface TargetHttpsProxy { + sslPolicy?: SslPolicy[] +} + +export interface TargetSslProxy { + sslPolicy?: SslPolicy[] +} + +export interface QuerygcpTargetSslProxy { + id: string + sslPolicy?: SslPolicy[] +} +export interface QuerygcpTargetHttpsProxy { + id: string + sslPolicy?: SslPolicy[] +} + +export interface NIST6xQueryResponse { + querygcpFirewall?: QuerygcpFirewall[] + querygcpNetwork?: QuerygcpNetwork[] + querygcpTargetSslProxy?: QuerygcpTargetSslProxy[] + querygcpTargetHttpsProxy?: QuerygcpTargetHttpsProxy[] + querygcpVmInstance?: QuerygcpVmInstance[] +} + +describe('GCP NIST 800-53: Rev. 4', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ + providerName: 'gcp', + entityName: 'NIST', + }) + }) + + describe('GCP NIST 6.1 The default network for a project should be deleted', () => { + const testRule = async ( + networkName: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: NIST6xQueryResponse = { + querygcpNetwork: [ + { + id: cuid(), + name: networkName, + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_61 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a network name that is not equal to default', async () => { + await testRule('test-network', Result.PASS) + }) + + test('Security Issue when there is an inbound rule with a network name that is equal to default', async () => { + await testRule('default', Result.FAIL) + }) + }) + + describe('GCP NIST 6.2 Network firewall rules should not permit ingress from 0.0.0.0/0 to port 22 (SSH)', () => { + const testRule = async ( + fromPort: number | undefined, + toPort: number | undefined, + sourceAddress: string, + expectedResult: Result, + protocol?: string + ): Promise => { + // Arrange + const data: NIST6xQueryResponse = { + querygcpFirewall: [ + { + id: cuid(), + sourceRanges: [sourceAddress], + direction: 'INGRESS', + allowed: [ + { + ipProtocol: 'icmp', + ports: [], + }, + { + ipProtocol: protocol || 'tcp', + ports: fromPort && toPort ? [`${fromPort}-${toPort}`] : [], + }, + { + ipProtocol: 'udp', + ports: ['0-65535'], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_62 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 22', async () => { + await testRule(22, 22, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and port 80', async () => { + await testRule(80, 80, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and port 80', async () => { + await testRule(80, 80, ipV6WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random IPv4 and a port range not including the port 22', async () => { + await testRule(1000, 2000, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and a port range not including the port 22', async () => { + await testRule(1000, 2000, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and a port range not including the port 22', async () => { + await testRule(1000, 2000, ipV6WildcardAddress, Result.PASS) + }) + + test('Security Issue when IPv4 wilcard address and port 22 and tcp protocol', async () => { + await testRule(22, 22, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv4 wilcard address and port 22 and all protocol', async () => { + await testRule(22, 22, ipV4WildcardAddress, Result.FAIL, 'all') + }) + + test('Security Issue when IPv6 wilcard address and port 22 and tcp protocol', async () => { + await testRule(22, 22, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv6 wilcard address and port 22 and all protocol', async () => { + await testRule(22, 22, ipV6WildcardAddress, Result.FAIL, 'all') + }) + + test('Security Issue when there is an inbound rule with IPv4 wilcard address and no port range is specified', async () => { + await testRule(undefined, undefined, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wilcard address and no port range is specified', async () => { + await testRule(undefined, undefined, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv4 wilcard address and port range includes the port 22', async () => { + await testRule(0, 1000, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wilcard address and port range includes the port 22', async () => { + await testRule(0, 1000, ipV6WildcardAddress, Result.FAIL) + }) + }) + + describe('GCP NIST 6.3 Network firewall rules should not permit ingress from 0.0.0.0/0 to port 3389 (RDP)', () => { + const testRule = async ( + fromPort: number | undefined, + toPort: number | undefined, + sourceAddress: string, + expectedResult: Result, + protocol?: string + ): Promise => { + // Arrange + const data: NIST6xQueryResponse = { + querygcpFirewall: [ + { + id: cuid(), + sourceRanges: [sourceAddress], + direction: 'INGRESS', + allowed: [ + { + ipProtocol: 'icmp', + ports: [], + }, + { + ipProtocol: protocol || 'tcp', + ports: fromPort && toPort ? [`${fromPort}-${toPort}`] : [], + }, + { + ipProtocol: 'udp', + ports: ['0-65535'], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_63 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 3389', async () => { + await testRule(3389, 3389, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and port 80', async () => { + await testRule(80, 80, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and port 80', async () => { + await testRule(80, 80, ipV6WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random IPv4 and a port range not including the port 3389', async () => { + await testRule(1000, 2000, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and a port range not including the port 3389', async () => { + await testRule(1000, 2000, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and a port range not including the port 3389', async () => { + await testRule(1000, 2000, ipV6WildcardAddress, Result.PASS) + }) + + test('Security Issue when IPv4 wilcard address and port 3389 and tcp protocol', async () => { + await testRule(3389, 3389, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv4 wilcard address and port 3389 and all protocol', async () => { + await testRule(3389, 3389, ipV4WildcardAddress, Result.FAIL, 'all') + }) + + test('Security Issue when IPv6 wilcard address and port 3389 and tcp protocol', async () => { + await testRule(3389, 3389, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv6 wilcard address and port 3389 and all protocol', async () => { + await testRule(3389, 3389, ipV6WildcardAddress, Result.FAIL, 'all') + }) + + test('Security Issue when there is an inbound rule with IPv4 wilcard address and no port range is specified', async () => { + await testRule(undefined, undefined, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wilcard address and no port range is specified', async () => { + await testRule(undefined, undefined, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv4 wilcard address and port range includes the port 3389', async () => { + await testRule(0, 4000, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wilcard address and port range includes the port 3389', async () => { + await testRule(0, 4000, ipV6WildcardAddress, Result.FAIL) + }) + }) + + describe('GCP NIST 6.4 Load balancer HTTPS or SSL proxy SSL policies should not have weak cipher suites', () => { + + const getTestRuleAFixture = (): NIST6xQueryResponse => { + return { + querygcpTargetHttpsProxy: [ + { + id: cuid(), + sslPolicy: [ + { + profile: 'MODERN', + minTlsVersion: 'TLS_1_2', + }, + ], + }, + ], + } + } + + const getTestRuleBFixture = (): NIST6xQueryResponse => { + return { + querygcpTargetSslProxy: [ + { + id: cuid(), + sslPolicy: [ + { + profile: 'MODERN', + minTlsVersion: 'TLS_1_2', + }, + ], + }, + ], + } + } + + const testRule = async ( + data: NIST6xQueryResponse, + expectedResult: Result, + rule?: any + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule(rule as Rule, { + ...data, + }) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + describe('querygcpTargetHttpsProxy query:', () => { + let targetHttpsProxyRule: Rule + beforeAll(() => { + const { queries, ...ruleMetadata} = Gcp_NIST_800_53_64 + const query = queries.shift() + targetHttpsProxyRule = { + ...ruleMetadata, + ...query + } as Rule + }) + + test('No Security Issue when proxies and ssl policies are secure', async () => { + const data: NIST6xQueryResponse = getTestRuleAFixture() + await testRule(data, Result.PASS, targetHttpsProxyRule) + }) + + test('Security Issue when proxies not have ssl policy', async () => { + const data: NIST6xQueryResponse = getTestRuleAFixture() + const targetHttpsProxy = data + .querygcpTargetHttpsProxy?.[0] as QuerygcpTargetHttpsProxy + targetHttpsProxy.sslPolicy = [] + await testRule(data, Result.FAIL, targetHttpsProxyRule) + }) + + test('Security Issue when HTTPS-PROXY with MODERN ssl policy and VERSION is NOT TLS_1_2', async () => { + const data: NIST6xQueryResponse = getTestRuleAFixture() + const targetHttpsProxy = data + .querygcpTargetHttpsProxy?.[0] as QuerygcpTargetHttpsProxy + targetHttpsProxy.sslPolicy = targetHttpsProxy.sslPolicy?.map( + ({ minTlsVersion, ...p }) => { + return { + ...p, + minTlsVersion: 'dummy', + } + } + ) + await testRule(data, Result.FAIL, targetHttpsProxyRule) + }) + + test('Security Issue when HTTPS-PROXY with CUSTOM ssl policy and enabledFeatures contains invalid values', async () => { + const invalidEnabledFeatureValues = [ + 'TLS_RSA_WITH_AES_128_GCM_SHA256', + 'TLS_RSA_WITH_AES_256_GCM_SHA384', + 'TLS_RSA_WITH_AES_128_CBC_SHA', + 'TLS_RSA_WITH_AES_256_CBC_SHA', + 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + ] + for (const invalidEnabledFeatureValue of invalidEnabledFeatureValues) { + const data: NIST6xQueryResponse = getTestRuleAFixture() + const targetHttpsProxy = data + .querygcpTargetHttpsProxy?.[0] as QuerygcpTargetHttpsProxy + targetHttpsProxy.sslPolicy = targetHttpsProxy.sslPolicy?.map( + ({ enabledFeatures, profile, ...p }) => { + return { + ...p, + profile: 'CUSTOM', + enabledFeatures: [invalidEnabledFeatureValue], + } + } + ) + await testRule(data, Result.FAIL, targetHttpsProxyRule) + } + }) + }) + + describe('querygcpTargetSslProxy query:', () => { + let targetSslProxyRule: Rule + beforeAll(() => { + const { queries, ...ruleMetadata} = Gcp_NIST_800_53_64 + const query = queries.shift() + targetSslProxyRule = { + ...ruleMetadata, + ...query + } as Rule + }) + + test('No Security Issue when proxies and ssl policies are secure', async () => { + const data: NIST6xQueryResponse = getTestRuleBFixture() + await testRule(data, Result.PASS, targetSslProxyRule) + }) + + test('Security Issue when proxies not have ssl policy', async () => { + const data: NIST6xQueryResponse = getTestRuleBFixture() + const targetSslProxy = data + .querygcpTargetSslProxy?.[0] as QuerygcpTargetHttpsProxy + targetSslProxy.sslPolicy = [] + await testRule(data, Result.FAIL, targetSslProxyRule) + }) + + test('Security Issue when HTTPS-PROXY with MODERN ssl policy and VERSION is NOT TLS_1_2', async () => { + const data: NIST6xQueryResponse = getTestRuleBFixture() + const targetSslProxy = data + .querygcpTargetSslProxy?.[0] as QuerygcpTargetHttpsProxy + targetSslProxy.sslPolicy = targetSslProxy.sslPolicy?.map( + ({ minTlsVersion, ...p }) => { + return { + ...p, + minTlsVersion: 'dummy', + } + } + ) + await testRule(data, Result.FAIL, targetSslProxyRule) + }) + + test('Security Issue when HTTPS-PROXY with CUSTOM ssl policy and enabledFeatures contains invalid values', async () => { + const invalidEnabledFeatureValues = [ + 'TLS_RSA_WITH_AES_128_GCM_SHA256', + 'TLS_RSA_WITH_AES_256_GCM_SHA384', + 'TLS_RSA_WITH_AES_128_CBC_SHA', + 'TLS_RSA_WITH_AES_256_CBC_SHA', + 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + ] + for (const invalidEnabledFeatureValue of invalidEnabledFeatureValues) { + const data: NIST6xQueryResponse = getTestRuleBFixture() + const targetSslProxy = data + .querygcpTargetSslProxy?.[0] as QuerygcpTargetHttpsProxy + targetSslProxy.sslPolicy = targetSslProxy.sslPolicy?.map( + ({ enabledFeatures, profile, ...p }) => { + return { + ...p, + profile: 'CUSTOM', + enabledFeatures: [invalidEnabledFeatureValue], + } + } + ) + await testRule(data, Result.FAIL, targetSslProxyRule) + } + }) + }) + }) + + describe('GCP NIST 6.5 Compute instances "IP forwarding" should not be enabled', () => { + const getTestRuleFixture = ( + canIpForward: boolean + ): NIST6xQueryResponse => { + return { + querygcpVmInstance: [ + { + id: cuid(), + name: 'dummy-project-name', + canIpForward, + project: [], + labels: [], + }, + ], + } + } + + const test46Rule = async ( + data: NIST6xQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_NIST_800_53_65 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when canIpForward is false', async () => { + const data: NIST6xQueryResponse = getTestRuleFixture(false) + await test46Rule(data, Result.PASS) + }) + + test('Security Issue when canIpForward is true', async () => { + const data: NIST6xQueryResponse = getTestRuleFixture(true) + await test46Rule(data, Result.FAIL) + }) + }) +}) diff --git a/src/gcp/nist-800-53-rev4/tsconfig.json b/src/gcp/nist-800-53-rev4/tsconfig.json new file mode 100644 index 00000000..5ade62cc --- /dev/null +++ b/src/gcp/nist-800-53-rev4/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "declaration": true, + "importHelpers": true, + "module": "commonjs", + "outDir": "dist", + "rootDir": "./", + "strict": true, + "target": "es2020", + "lib": ["esnext.array", "ES2020.Promise"], + "allowSyntheticDefaultImports": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true + }, + "include": [ + "**/*" + ], + "exclude": ["dist", "./tests"] +} diff --git a/src/gcp/pci-dss-3.2.1/README.md b/src/gcp/pci-dss-3.2.1/README.md index 4a598cef..406ca4f1 100644 --- a/src/gcp/pci-dss-3.2.1/README.md +++ b/src/gcp/pci-dss-3.2.1/README.md @@ -51,10 +51,22 @@ Policy Pack based on the [PCI DSS version 3.2.1](https://www.pcisecuritystandard } ``` - + +| Rule | Description | +| ------------------ | ----------------------------------------------------------------------------------------------------------------------------- | +| iam-check-1 | IAM users should not have both KMS admin and any of the KMS encrypter/decrypter roles | +| iam-check-2 | IAM users should not have project-level "Service Account User" or "Service Account Token Creator" roles | +| monitoring-check-1 | Logging metric filter and alert for audit configuration changes should be configured | +| monitoring-check-2 | Logging metric filter and alert for Custom Role changes should be configured | +| monitoring-check-3 | Logging metric filter and alert for network changes should be configured | +| monitoring-check-4 | Logging metric filter and alert for network firewall rule changes should be configured | +| monitoring-check-5 | Logging metric filter and alert for network route changes should be configured | +| monitoring-check-6 | Logging metric filter and alert for project ownership assignments/changes should be configured | +| monitoring-check-7 | Logging metric filter and alert for SQL instance configuration changes should be configured | +| monitoring-check-8 | Logging metric filter and alert for Storage IAM permission changes should be configured | +| networking-check-1 | Network firewall rules should not permit ingress from 0.0.0.0/0 to port 22 (SSH) | +| networking-check-2 | Network firewall rules should not permit ingress from 0.0.0.0/0 to port 3389 (RDP) | +| networking-check-3 | The default network for a project should be deleted | +| networking-check-4 | Load balancer HTTPS or SSL proxy SSL policies should not have weak cipher suites | +| logging-check-1 | At least one project-level logging sink should be configured with an empty filter | diff --git a/src/gcp/pci-dss-3.2.1/rules/index.ts b/src/gcp/pci-dss-3.2.1/rules/index.ts index 44842b4f..68c2d524 100644 --- a/src/gcp/pci-dss-3.2.1/rules/index.ts +++ b/src/gcp/pci-dss-3.2.1/rules/index.ts @@ -1,3 +1,33 @@ +import Gcp_PCI_DSS_321_IAM_1 from './pci-dss-3.2.1-iam-check-1' +import Gcp_PCI_DSS_321_IAM_2 from './pci-dss-3.2.1-iam-check-2' +import Gcp_PCI_DSS_321_Logging_1 from './pci-dss-3.2.1-logging-check-1' +import Gcp_PCI_DSS_321_Monitoring_1 from './pci-dss-3.2.1-monitoring-check-1' +import Gcp_PCI_DSS_321_Monitoring_2 from './pci-dss-3.2.1-monitoring-check-2' +import Gcp_PCI_DSS_321_Monitoring_3 from './pci-dss-3.2.1-monitoring-check-3' +import Gcp_PCI_DSS_321_Monitoring_4 from './pci-dss-3.2.1-monitoring-check-4' +import Gcp_PCI_DSS_321_Monitoring_5 from './pci-dss-3.2.1-monitoring-check-5' +import Gcp_PCI_DSS_321_Monitoring_6 from './pci-dss-3.2.1-monitoring-check-6' +import Gcp_PCI_DSS_321_Monitoring_7 from './pci-dss-3.2.1-monitoring-check-7' +import Gcp_PCI_DSS_321_Monitoring_8 from './pci-dss-3.2.1-monitoring-check-8' +import Gcp_PCI_DSS_321_Networking_1 from './pci-dss-3.2.1-networking-check-1' +import Gcp_PCI_DSS_321_Networking_2 from './pci-dss-3.2.1-networking-check-2' +import Gcp_PCI_DSS_321_Networking_3 from './pci-dss-3.2.1-networking-check-3' +import Gcp_PCI_DSS_321_Networking_4 from './pci-dss-3.2.1-networking-check-4' + export default [ - // TODO: Add rules to export + Gcp_PCI_DSS_321_IAM_1, + Gcp_PCI_DSS_321_IAM_2, + Gcp_PCI_DSS_321_Logging_1, + Gcp_PCI_DSS_321_Monitoring_1, + Gcp_PCI_DSS_321_Monitoring_2, + Gcp_PCI_DSS_321_Monitoring_3, + Gcp_PCI_DSS_321_Monitoring_4, + Gcp_PCI_DSS_321_Monitoring_5, + Gcp_PCI_DSS_321_Monitoring_6, + Gcp_PCI_DSS_321_Monitoring_7, + Gcp_PCI_DSS_321_Monitoring_8, + Gcp_PCI_DSS_321_Networking_1, + Gcp_PCI_DSS_321_Networking_2, + Gcp_PCI_DSS_321_Networking_3, + Gcp_PCI_DSS_321_Networking_4, ] \ No newline at end of file diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-iam-check-1.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-iam-check-1.ts new file mode 100644 index 00000000..8b45a8ce --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-iam-check-1.ts @@ -0,0 +1,67 @@ +// GCP CIS 1.2.0 Rule equivalent 1.11 +export default { + id: 'gcp-pci-dss-3.2.1-iam-check-1', + title: 'IAM check 1: IAM users should not have both KMS admin and any of the KMS encrypter/decrypter roles', + + description: 'It is recommended that the principle of "Separation of Duties" is enforced while assigning KMS related roles to users.', + + audit: `**From Console:** + + 1. Go to IAM & Admin/IAM by visiting https://console.cloud.google.com/iam-admin/iam + + 2. Ensure no member has the roles Cloud KMS Admin and any of the Cloud KMS CryptoKey Encrypter/Decrypter, Cloud KMS CryptoKey Encrypter, Cloud KMS CryptoKey Decrypter assigned. + + **From Command Line:** + + 1. List all users and role assignments: + + gcloud projects get-iam-policy PROJECT_ID + + 2. Ensure that there are no common users found in the member section for roles cloudkms.admin and any one of Cloud KMS CryptoKey Encrypter/Decrypter, Cloud KMS CryptoKey Encrypter, Cloud KMS CryptoKey Decrypter`, + + rationale: `The built-in/predefined IAM role Cloud KMS Admin allows the user/identity to create, delete, and manage service account(s). The built-in/predefined IAM role Cloud KMS CryptoKey Encrypter/Decrypter allows the user/identity (with adequate privileges on concerned resources) to encrypt and decrypt data at rest using an encryption key(s). + + The built-in/predefined IAM role Cloud KMS CryptoKey Encrypter allows the user/identity (with adequate privileges on concerned resources) to encrypt data at rest using an encryption key(s). The built-in/predefined IAM role Cloud KMS CryptoKey Decrypter allows the user/identity (with adequate privileges on concerned resources) to decrypt data at rest using an encryption key(s). + + Separation of duties is the concept of ensuring that one individual does not have all necessary permissions to be able to complete a malicious action. In Cloud KMS, this could be an action such as using a key to access and decrypt data a user should not normally have access to. Separation of duties is a business control typically used in larger organizations, meant to help avoid security or privacy incidents and errors. It is considered best practice. + + No user(s) should have Cloud KMS Admin and any of the Cloud KMS CryptoKey Encrypter/Decrypter, Cloud KMS CryptoKey Encrypter, Cloud KMS CryptoKey Decrypter roles assigned at the same time`, + + remediation: `**From Console:** + + 1. Go to IAM & Admin/IAM using https://console.cloud.google.com/iam-admin/iam + + 2. For any member having Cloud KMS Admin and any of the Cloud KMS CryptoKey Encrypter/Decrypter, Cloud KMS CryptoKey Encrypter, Cloud KMS CryptoKey Decrypter roles granted assigned, click the Delete Bin icon to remove the role from the member.`, + + references: ['https://cloud.google.com/kms/docs/separation-of-duties'], + gql: `{ + querygcpIamPolicy { + id + __typename + bindings { + role + members + } + } + }`, + resource: 'querygcpIamPolicy[*]', + severity: 'unknown', + conditions: { + jq: `[({"member" : .bindings[].members[], "roles" : .bindings[].role}) ] + | group_by(.member) + | map({ "member" : .[].member, "roles" : map(.roles) }) + | [.[] + | select(.roles + | contains(["roles/cloudkms.admin", "roles/cloudkms.cryptoKeyEncrypterDecrypter"]) + or contains(["roles/cloudkms.admin", "roles/cloudkms.cryptoKeyEncrypter"]) + or contains(["roles/cloudkms.admin", "roles/cloudkms.cryptoKeyDecrypter"]))] + | {"userHasInvalidRoles": ( (. | length) > 0)}`, + path: '@', + and: [ + { + path: '@.userHasInvalidRoles', + notEqual: true, + }, + ], + }, +} \ No newline at end of file diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-iam-check-2.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-iam-check-2.ts new file mode 100644 index 00000000..1d491878 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-iam-check-2.ts @@ -0,0 +1,119 @@ +// GCP CIS 1.2.0 Rule equivalent 1.6 +export default { + id: 'gcp-pci-dss-3.2.1-iam-check-2', + title: + 'IAM check 2: IAM users should not have project-level "Service Account User" or "Service Account Token Creator" roles', + + description: `It is recommended to assign the Service Account User (iam.serviceAccountUser) and + Service Account Token Creator (iam.serviceAccountTokenCreator) roles to a user for + a specific service account rather than assigning the role to a user at project level.`, + + audit: `**From Console:** + + 1. Go to the IAM page in the GCP Console by visiting https://console.cloud.google.com/iam-admin/iam + 2. Click on the filter table text bar, Type *Role: Service Account User*. + 3. Ensure no user is listed as a result of the filter. + 4. Click on the filter table text bar, Type *Role: Service Account Token Creator*. + 5. Ensure no user is listed as a result of the filter. + + **From Command Line:** + To ensure IAM users are not assigned Service Account User role at the project level: + + gcloud projects get-iam-policy PROJECT_ID --format json | jq '.bindings[].role' | grep "roles/iam.serviceAccountUser" + + gcloud projects get-iam-policy PROJECT_ID --format json | jq '.bindings[].role' | grep "roles/iam.serviceAccountTokenCreator" + + These commands should not return any output.`, + + rationale: `A service account is a special Google account that belongs to an application or a virtual machine (VM), instead of to an individual end-user. Application/VM-Instance uses the service account to call the service's Google API so that users aren't directly involved. In addition to being an identity, a service account is a resource that has IAM policies attached to it. These policies determine who can use the service account. + + Users with IAM roles to update the App Engine and Compute Engine instances (such as App Engine Deployer or Compute Instance Admin) can effectively run code as the service accounts used to run these instances, and indirectly gain access to all the resources for which the service accounts have access. Similarly, SSH access to a Compute Engine instance may also provide the ability to execute code as that instance/Service account. + + Based on business needs, there could be multiple user-managed service accounts configured for a project. Granting the *iam.serviceAccountUser* or *iam.serviceAserviceAccountTokenCreatorccountUser* roles to a user for a project gives the user access to all service accounts in the project, including service accounts that may be created in the future. This can result in elevation of privileges by using service accounts and corresponding *Compute Engine instances*. + + In order to implement *least privileges* best practices, IAM users should not be assigned the *Service Account User* or *Service Account Token Creator* roles at the project level. Instead, these roles should be assigned to a user for a specific service account, giving that user access to the service account. The *Service Account User* allows a user to bind a service account to a long-running job service, whereas the *Service Account Token Creator* role allows a user to directly impersonate (or assert) the identity of a service account.`, + + remediation: `**From Console:** + + 1. Go to the IAM page in the GCP Console by visiting: https://console.cloud.google.com/iam-admin/iam. + 2. Click on the filter table text bar. Type *Role: Service Account User* + 3. Click the *Delete Bin* icon in front of the role *Service Account User* for every user listed as a result of a filter. + 4. Click on the filter table text bar. Type *Role: Service Account Token Creator* + 5. Click the *Delete Bin* icon in front of the role *Service Account Token Creator* for every user listed as a result of a filter. + + **From Command Line:** + + 1. Using a text editor, remove the bindings with the *roles/iam.serviceAccountUser* or *roles/iam.serviceAccountTokenCreator*. + + For example, you can use the iam.json file shown below as follows: + + { + "bindings": [ + { + "members": ["serviceAccount:our-project-123@appspot.gserviceaccount.com"], + "role": "roles/appengine.appViewer" + }, + { + "members": ["user:email1@gmail.com"], + "role": "roles/owner" + }, + { + "members": [ + "serviceAccount:our-project-123@appspot.gserviceaccount.com", + "serviceAccount:123456789012-compute@developer.gserviceaccount.com" + ], + "role": "roles/editor" + } + ], + "etag": "BwUjMhCsNvY=" + } + + 2. Update the project's IAM policy: + + gcloud projects set-iam-policy PROJECT_ID iam.json`, + + references: [ + 'https://cloud.google.com/iam/docs/service-accounts', + 'https://cloud.google.com/iam/docs/granting-roles-to-service-accounts', + 'https://cloud.google.com/iam/docs/understanding-roles', + 'https://cloud.google.com/iam/docs/granting-changing-revoking-access', + 'https://console.cloud.google.com/iam-admin/iam', + ], + gql: `{ + querygcpProject { + id + __typename + iamPolicies { + bindings { + role + members + } + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'unknown', + conditions: { + not: { + path: '@.iamPolicies', + array_any: { + path: '[*].bindings', + array_any: { + and: [ + { + path: '[*].members', + match: /user.*$/, + }, + { + path: '[*].role', + in: [ + 'roles/iam.serviceAccountUser', + 'roles/iam.serviceAccountTokenCreator', + ], + }, + ], + }, + }, + }, + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-logging-check-1.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-logging-check-1.ts new file mode 100644 index 00000000..adf3e075 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-logging-check-1.ts @@ -0,0 +1,87 @@ +// GCP CIS 1.2.0 Rule equivalent 2.2 +export default { + id: 'gcp-pci-dss-3.2.1-logging-check-1', + title: 'Logging check 1: At least one project-level logging sink should be configured with an empty filter', + + description: `It is recommended to create a sink that will export copies of all the log entries. This can + help aggregate logs from multiple projects and export them to a Security Information and + Event Management (SIEM).`, + + audit: `**From Console:** + + 1. Go to *Logging/Exports* by visiting https://console.cloud.google.com/logs/exports. + 2. For every sink, click the 3-dot button for Menu options and select *View Filter*. + 3. Ensure there is at least one sink with an *empty* sink filter. + 4. Additionally, ensure that the resource configured as *Destination* exists. + + **From Command Line:** + + 1. Ensure that a sink with an *empty filter* exists. List the sinks for the project, folder or organization. If sinks are configured at a folder or organization level, they do not need to be configured for each project: + + gcloud logging sinks list --folder=FOLDER_ID | --organization=ORGANIZATION_ID | --project=PROJECT_ID + + The output should list at least one sink with an *empty filter*. + + 2. Additionally, ensure that the resource configured as *Destination* exists. + + See https://cloud.google.com/sdk/gcloud/reference/beta/logging/sinks/list for more information.`, + + rationale: 'Log entries are held in Cloud Logging. To aggregate logs, export them to a SIEM. To keep them longer, it is recommended to set up a log sink. Exporting involves writing a filter that selects the log entries to export, and choosing a destination in Cloud Storage, BigQuery, or Cloud Pub/Sub. The filter and destination are held in an object called a sink. To ensure all log entries are exported to sinks, ensure that there is no filter configured for a sink. Sinks can be created in projects, organizations, folders, and billing accounts.', + + remediation: `**From Console:** + + 1. Go to *Logging/Logs* by visiting https://console.cloud.google.com/logs/viewer. + 2. Click the down arrow symbol on *Filter Bar* at the rightmost corner and select + *Convert to Advanced Filter*. + 3. This step converts *Filter Bar* to *Advanced Filter Bar*. + 4. Clear any text from the *Advanced Filter* field. This ensures that the *log-filter* is + set to empty and captures all the logs. + 5. Click *Submit Filter* and the result should display all logs. + 6. Click *Create Sink*, which opens a menu on the right. + 7. Fill out the fields and click *Create Sink*. + + For more information, see https://cloud.google.com/logging/docs/export/configure_export_v2#dest-create. + + **From Command Line:** + To create a sink to export all log entries in a Google Cloud Storage bucket: + + gcloud logging sinks create storage.googleapis.com/DESTINATION_BUCKET_NAME + + Sinks can be created for a folder or organization, which will include all projects. + + gcloud logging sinks create storage.googleapis.com/DESTINATION_BUCKET_NAME --include-children -- folder=FOLDER_ID | --organization=ORGANIZATION_ID + + **Note:** + + 1. A sink created by the command-line above will export logs in storage buckets. However, sinks can be configured to export logs into BigQuery, or Cloud Pub/Sub, or *Custom Destination*. + 2. While creating a sink, the sink option *--log-filter* is not used to ensure the sink exports all log entries. + 3. A sink can be created at a folder or organization level that collects the logs of all the projects underneath bypassing the option *--include-children* in the cloud command.`, + + references: [ + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/logging/quotas', + 'https://cloud.google.com/logging/docs/export/', + 'https://cloud.google.com/logging/docs/export/using_exported_logs', + 'https://cloud.google.com/logging/docs/export/configure_export_v2', + 'https://cloud.google.com/logging/docs/export/aggregated_exports', + 'https://cloud.google.com/sdk/gcloud/reference/beta/logging/sinks/list', + ], + gql: `{ + querygcpProject { + id + __typename + logSinks { + filter + } + } + }`, + resource: 'querygcpProject[*]', + severity: 'high', + conditions: { + path: '@.logSinks', + array_any: { + path: '[*].filter', + equal: '', + }, + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-1.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-1.ts new file mode 100644 index 00000000..d961bb58 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-1.ts @@ -0,0 +1,139 @@ +// GCP CIS 1.2.0 Rule equivalent 2.5 +const filterPatternRegex = + /\s*protoPayload.methodName\s*=\s*"SetIamPolicy"\s*AND\s*protoPayload.serviceData.policyDelta.auditConfigDeltas:*\s*/ + +export default { + id: 'gcp-pci-dss-3.2.1-monitoring-check-1', + title: + 'Monitoring check 1: Logging metric filter and alert for audit configuration changes should be configured', + description: `Google Cloud Platform (GCP) services write audit log entries to the Admin Activity and Data + Access logs to help answer the questions of, "who did what, where, and when?" within GCP + projects. + + Cloud audit logging records information includes the identity of the API caller, the time of + the API call, the source IP address of the API caller, the request parameters, and the + response elements returned by GCP services. Cloud audit logging provides a history of GCP + API calls for an account, including API calls made via the console, SDKs, command-line + tools, and other GCP services.`, + audit: `**From Console: + Ensure the prescribed log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure that at least one metric ** is present with the filter text: + + protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:* + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to Alerting by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the Policies section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of 0 for greater than zero(0) seconds*, means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:* + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains at least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + rationale: `Admin activity and data access logs produced by cloud audit logging enable security analysis, resource change tracking, and compliance auditing. + + Configuring the metric filter and alerts for audit configuration changes ensures the recommended state of audit configuration is maintained so that all activities in the project are audit-able at any point in time.`, + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + 3. Clear any text and add: + + protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:* + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This will ensure that the log metric counts the number of log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create a prescribed Alert Policy:** + + 1. Identify the new metric the user just created, under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page opens. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notifications channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create a prescribed Log Metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + + Create prescribed Alert Policy + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/logging/docs/audit/configure-data-access#getiampolicy-setiampolicy', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-2.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-2.ts new file mode 100644 index 00000000..99d2e0a4 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-2.ts @@ -0,0 +1,145 @@ +// GCP CIS 1.2.0 Rule equivalent 2.6 +const filterPatternRegex = + /\s*resource.type\s*=\s*"iam_role"\s*AND\s*protoPayload.methodName\s*=\s*"google.iam.admin.v1.CreateRole"\s*OR\s*protoPayload.methodName\s*=\s*"google.iam.admin.v1.DeleteRole"\s*OR\s*protoPayload.methodName\s*=\s*"google.iam.admin.v1.UpdateRole"\s*/ + +export default { + id: 'gcp-pci-dss-3.2.1-monitoring-check-2', + title: + 'Monitoring check 2: Logging metric filter and alert for Custom Role changes should be configured', + description: `It is recommended that a metric filter and alarm be established for changes to Identity and + Access Management (IAM) role creation, deletion and updating activities.`, + audit: `**From Console: + Ensure that the prescribed log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure that at least one metric ** is present with filter text: + + resource.type="iam_role" + AND protoPayload.methodName = "google.iam.admin.v1.CreateRole" + OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" + OR protoPayload.methodName="google.iam.admin.v1.UpdateRole" + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to Alerting by visiting https://console.cloud.google.com/monitoring/alerting. + + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of zero(0) for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that the appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + resource.type="iam_role" AND protoPayload.methodName = "google.iam.admin.v1.CreateRole" OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" OR protoPayload.methodName="google.iam.admin.v1.UpdateRole" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*.`, + rationale: 'Google Cloud IAM provides predefined roles that give granular access to specific Google Cloud Platform resources and prevent unwanted access to other resources. However, to cater to organization-specific needs, Cloud IAM also provides the ability to create custom roles. Project owners and administrators with the Organization Role Administrator role or the IAM Role Administrator role can create custom roles. Monitoring role creation, deletion and updating activities will help in identifying any over-privileged role at early stages.', + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + + 3. Clear any text and add: + + resource.type="iam_role" + AND protoPayload.methodName = "google.iam.admin.v1.CreateRole" OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" + OR protoPayload.methodName="google.iam.admin.v1.UpdateRole" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the + user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* + (default) and *Type* to *Counter*. This ensures that the log metric counts the number of + log entries matching the advanced logs query. + 6. Click *Create Metric*. + + **Create a prescribed Alert Policy:** + + 1. Identify the new metric that was just created under the section *User-defined + Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the metric and select *Create alert + from Metric*. A new page displays. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold + and configuration that makes sense for the user's organization. For example, a + threshold of zero(0) for the most recent value ensures that a notification is triggered + for every owner change in the project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notification channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed Alert Policy: + + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/iam/docs/understanding-custom-roles', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-3.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-3.ts new file mode 100644 index 00000000..5d07e76f --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-3.ts @@ -0,0 +1,153 @@ +// GCP CIS 1.2.0 Rule equivalent 2.9 +const filterPatternRegex = + /\s*resource.type\s*=\s*gce_network\s*AND\s*protoPayload.methodName\s*=\s*"beta.compute.networks.insert"\s*OR\s*protoPayload.methodName\s*=\s*"beta.compute.networks.patch"\s*OR\s*protoPayload.methodName\s*=\s*"v1.compute.networks.delete"\s*OR\s*protoPayload.methodName\s*=\s*"v1.compute.networks.removePeering"\s*OR\s*protoPayload.methodName\s*=\s*"v1.compute.networks.addPeering"\s*/ + +export default { + id: 'gcp-pci-dss-3.2.1-monitoring-check-3', + title: + 'Monitoring check 3: Logging metric filter and alert for network changes should be configured', + description: `It is recommended that a metric filter and alarm be established for Virtual Private Cloud + (VPC) network changes.`, + audit: `**From Console: + Ensure the prescribed log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure at least one metric ** is present with filter text: + + resource.type=gce_network + AND protoPayload.methodName="beta.compute.networks.insert" + OR protoPayload.methodName="beta.compute.networks.patch" + OR protoPayload.methodName="v1.compute.networks.delete" + OR protoPayload.methodName="v1.compute.networks.removePeering" + OR protoPayload.methodName="v1.compute.networks.addPeering" + + **Ensure the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + + + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of 0 for greater than 0 seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that appropriate notification channels have been set up. + + **From Command Line: + Ensure the log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with filter set to: + + resource.type=gce_network + AND protoPayload.methodName="beta.compute.networks.insert" + OR protoPayload.methodName="beta.compute.networks.patch" + OR protoPayload.methodName="v1.compute.networks.delete" + OR protoPayload.methodName="v1.compute.networks.removePeering" + OR protoPayload.methodName="v1.compute.networks.addPeering" + + 3. Note the value of the property *metricDescriptor.type for* the identified metric, in + the format l*ogging.googleapis.com/user/*. + + **Ensure the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains at least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + rationale: `It is possible to have more than one VPC within a project. In addition, it is also possible to create a peer connection between two VPCs enabling network traffic to route between VPCs. + + Monitoring changes to a VPC will help ensure VPC traffic flow is not getting impacted.`, + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + + 3. Clear any text and add: + + resource.type=gce_network + AND protoPayload.methodName="beta.compute.networks.insert" + OR protoPayload.methodName="beta.compute.networks.patch" + OR protoPayload.methodName="v1.compute.networks.delete" + OR protoPayload.methodName="v1.compute.networks.removePeering" + OR protoPayload.methodName="v1.compute.networks.addPeering" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* + (default) and *Type* to *Counter*. This ensures that the log metric counts the number of + log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed alert policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page appears. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of 0 for the most recent value will ensure that a notification is triggered for every owner change in the project: + + Set "Aggregator" to "Count" + + Set "Configuration": + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notification channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed alert policy: + + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/vpc/docs/overview', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-4.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-4.ts new file mode 100644 index 00000000..0b9e339a --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-4.ts @@ -0,0 +1,137 @@ +// GCP CIS 1.2.0 Rule equivalent 2.7 +const filterPatternRegex = + /\s*resource.type\s*=\s*"gce_firewall_rule"\s*AND\s*protoPayload.methodName\s*=\s*"v1.compute.firewalls.patch"\s*OR\s*protoPayload.methodName\s*=\s*"v1.compute.firewalls.insert"\s*/ + +export default { + id: 'gcp-pci-dss-3.2.1-monitoring-check-4', + title: + 'Monitoring check 4: Logging metric filter and alert for network firewall rule changes should be configured', + description: `It is recommended that a metric filter and alarm be established for changes to Identity and + Access Management (IAM) role creation, deletion and updating activities.`, + audit: `**From Console: + Ensure that the prescribed log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure at least one metric ** is present with this filter text: + + resource.type="gce_firewall_rule" AND protoPayload.methodName="v1.compute.firewalls.patch" OR protoPayload.methodName="v1.compute.firewalls.insert" + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of zero(0) for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that appropriate notification channels have been set up. + + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + resource.type="gce_firewall_rule" AND protoPayload.methodName="v1.compute.firewalls.patch" OR protoPayload.methodName="v1.compute.firewalls.insert" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true* + `, + rationale: 'Monitoring for Create or Update Firewall rule events gives insight to network access changes and may reduce the time it takes to detect suspicious activity.', + remediation: ` + **From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based* Metrics by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Ba*r at the rightmost corner and select *Convert to Advanced Filter*. + 3. Clear any text and add: + + resource.type="gce_firewall_rule" AND protoPayload.methodName="v1.compute.firewalls.patch" OR protoPayload.methodName="v1.compute.firewalls.insert" + + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed Alert Policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select **Create alert from Metric**. A new page displays. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value ensures that a notification is triggered for every owner change in the project: + + Set "Aggregator" to "Count" + + Set "Configuration": + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notifications channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric + + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed alert policy: + + + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create + `, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/vpc/docs/firewalls', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-5.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-5.ts new file mode 100644 index 00000000..94a96133 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-5.ts @@ -0,0 +1,139 @@ +// GCP CIS 1.2.0 Rule equivalent 2.8 +const filterPatternRegex = + /\s*resource.type\s*=\s*"gce_route"\s*AND\s*protoPayload.methodName\s*=\s*"beta.compute.routes.patch"\s*OR\s*protoPayload.methodName\s*=\s*"beta.compute.routes.insert"\s*/ + +export default { + id: 'gcp-pci-dss-3.2.1-monitoring-check-5', + title: + 'Monitoring check 5: Logging metric filter and alert for network route changes should be configured', + description: `It is recommended that a metric filter and alarm be established for Virtual Private Cloud + (VPC) network route changes.`, + audit: `**From Console: + Ensure that the prescribed Log metric is present:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure that at least one metric ** is present with the filter text: + + resource.type="gce_route" AND protoPayload.methodName="beta.compute.routes.patch" OR protoPayload.methodName="beta.compute.routes.insert" + + **Ensure the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting: https://console.cloud.google.com/monitoring/alerting. + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of 0 for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alert thresholds make sense for the user's organization. + 5. Ensure that the appropriate notification channels have been set up. + + **From Command Line: + Ensure the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + resource.type="gce_route" AND protoPayload.methodName="beta.compute.routes.patch" OR protoPayload.methodName="beta.compute.routes.insert" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + + rationale: `Google Cloud Platform (GCP) routes define the paths network traffic takes from a VM instance to another destination. The other destination can be inside the organization VPC network (such as another VM) or outside of it. Every route consists of a destination and a next hop. Traffic whose destination IP is within the destination range is sent to the next hop for delivery. + + Monitoring changes to route tables will help ensure that all VPC traffic flows through an expected path.`, + + remediation: `**From Console: + Create the prescribed Log Metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter* + + 3. Clear any text and add: + + resource.type="gce_route" AND protoPayload.methodName="beta.compute.routes.patch" OR protoPayload.methodName="beta.compute.routes.insert" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed alert policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page displays. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold + and configuration that makes sense for the user's organization. For example, a + threshold of zero(0) for the most recent value ensures that a notification is triggered + for every owner change in the project: + + Set "Aggregator" to "Count" + + Set "Configuration": + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notification channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric: + + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed the alert policy: + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create + `, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/storage/docs/access-control/iam', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-6.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-6.ts new file mode 100644 index 00000000..88f3bc83 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-6.ts @@ -0,0 +1,158 @@ +// GCP CIS 1.2.0 Rule equivalent 2.4 +const filterPatternRegex = + /\s*\(\s*protoPayload.serviceName\s*=\s*"cloudresourcemanager.googleapis.com"\s*\)\s*AND\s*\(\s*ProjectOwnership\s*OR\s*projectOwnerInvitee\s*\)\s*OR\s*\(\s*protoPayload.serviceData.policyDelta.bindingDeltas.action\s*=\s*"REMOVE"\s*AND\s*protoPayload.serviceData.policyDelta.bindingDeltas.role\s*=\s*"roles\/owner"\s*\)\s*OR\s*\(\s*protoPayload.serviceData.policyDelta.bindingDeltas.action\s*=\s*"ADD"\s*AND\s*protoPayload.serviceData.policyDelta.bindingDeltas.role\s*=\s*"roles\/owner"\s*\)\s*/ + +export default { + id: 'gcp-pci-dss-3.2.1-monitoring-check-6', + title: + 'Monitoring check 6: Logging metric filter and alert for project ownership assignments/changes should be configured', + + description: `In order to prevent unnecessary project ownership assignments to users/service-accounts + and further misuses of projects and resources, all roles/Owner assignments should be + monitored.`, + + audit: `**From Console: + Ensure that the prescribed log metric is present:** + + 1. Go to *Logging/Log-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the User-defined Metrics section, ensure that at least one metric ** is present with filter text: + + (protoPayload.serviceName="cloudresourcemanager.googleapis.com") + AND (ProjectOwnership OR projectOwnerInvitee) + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + + **Ensure that the prescribed Alerting Policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the Policies section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of zero(0) for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for your organization. + 5. Ensure that the appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with filter set to: + + (protoPayload.serviceName="cloudresourcemanager.googleapis.com") + AND (ProjectOwnership OR projectOwnerInvitee) + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + + - conditions.conditionThreshold.filter is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + + rationale: `Project ownership has the highest level of privileges on a project. To avoid misuse of project resources, the project ownership assignment/change actions mentioned above should be monitored and alerted to concerned recipients. + + - Sending project ownership invites + - Acceptance/Rejection of project ownership invite by user + - Adding 'role/Owner' to a user/service-account + - Removing a user/Service account from 'role/Owner'`, + + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + 3. Clear any text and add: + + (protoPayload.serviceName="cloudresourcemanager.googleapis.com") + AND (ProjectOwnership OR projectOwnerInvitee) + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + OR (protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" + AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner") + + 4. Click *Submit Filter*. The logs display based on the filter text entered by the user. + 5. In the *Metric Editor* menu on the right, fill out the name field. Set *Units* to **1** (default) and the *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the advanced logs query. + 6. Click *Create Metric*. + + **Create the display prescribed Alert Policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the desired metric and select *Create alert from Metric*. A new page opens. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notifications channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create a prescribed Log Metric: + + + - Use the command: gcloud beta logging metrics create + - Reference for Command Usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create prescribed Alert Policy + + - Use the command: gcloud alpha monitoring policies create + - Reference for Command Usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + filter + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'high', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-7.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-7.ts new file mode 100644 index 00000000..bb32795f --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-7.ts @@ -0,0 +1,148 @@ +// GCP CIS 1.2.0 Rule equivalent 2.11 +const filterPatternRegex = + /\s*protoPayload.methodName\s*=\s*"cloudsql.instances.update"\s*/ + +export default { + id: 'gcp-pci-dss-3.2.1-monitoring-check-7', + title: + 'Monitoring check 7: Logging metric filter and alert for SQL instance configuration changes should be configured', + + description: `It is recommended that a metric filter and alarm be established for SQL instance + configuration changes.`, + + audit: `**From Console: + Ensure the prescribed log metric is present:** + + 1. For each project that contains Cloud SQL instances, go to L*ogging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure that at least one metric ** is present with the filter text: + + protoPayload.methodName="cloudsql.instances.update" + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the Pol*i*cies section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of zero(0) for greater than zero(0) seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that the appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to + + protoPayload.methodName="cloudsql.instances.update" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure that the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains at least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + + rationale: `Monitoring changes to SQL instance configuration changes may reduce the time needed to detect and correct misconfigurations done on the SQL server. + + Below are a few of the configurable options which may the impact security posture of an SQL instance: + + - Enable auto backups and high availability: Misconfiguration may adversely impact business continuity, disaster recovery, and high availability + - Authorize networks: Misconfiguration may increase exposure to untrusted networks`, + + remediation: `**From Console: + Create the prescribed Log Metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + + 3. Clear any text and add: + + protoPayload.methodName="cloudsql.instances.update" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the *Metric Editor* menu on right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed alert policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page appears. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the user's project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notification channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed log metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed alert policy: + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create + `, + + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/storage/docs/overview', + 'https://cloud.google.com/sql/docs/', + 'https://cloud.google.com/sql/docs/mysql/', + 'https://cloud.google.com/sql/docs/postgres/', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + name + filter + metricDescriptor { + type + } + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-8.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-8.ts new file mode 100644 index 00000000..3c14eb7d --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-monitoring-check-8.ts @@ -0,0 +1,135 @@ +// GCP CIS 1.2.0 Rule equivalent 2.10 +const filterPatternRegex = + /\s*resource.type\s*=\s*gcs_bucket\s*AND\s*protoPayload.methodName\s*=\s*"storage.setIamPermissions"\s*/ + +export default { + id: 'gcp-pci-dss-3.2.1-monitoring-check-8', + title: + 'Monitoring check 8: Logging metric filter and alert for Storage IAM permission changes should be configured', + description: `It is recommended that a metric filter and alarm be established for Cloud Storage Bucket + IAM changes.`, + audit: `**From Console: + Ensure the prescribed log metric is present:** + + 1. For each project that contains cloud storage buckets, go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics. + 2. In the *User-defined Metrics* section, ensure at least one metric ** is present with the filter text: + + resource.type=gcs_bucket AND protoPayload.methodName="storage.setIamPermissions" + + **Ensure that the prescribed alerting policy is present:** + + 3. Go to *Alerting* by visiting https://console.cloud.google.com/monitoring/alerting. + 4. Under the *Policies* section, ensure that at least one alert policy exists for the log metric above. Clicking on the policy should show that it is configured with a condition. For example, *Violates when: Any logging.googleapis.com/user/ stream is above a threshold of 0 for greater than 0 seconds* means that the alert will trigger for any new owner change. Verify that the chosen alerting thresholds make sense for the user's organization. + 5. Ensure that the appropriate notifications channels have been set up. + + **From Command Line: + Ensure that the prescribed log metric is present:** + + 1. List the log metrics: + + gcloud beta logging metrics list --format json + + 2. Ensure that the output contains at least one metric with the filter set to: + + resource.type=gcs_bucket AND protoPayload.methodName="storage.setIamPermissions" + + 3. Note the value of the property *metricDescriptor.type* for the identified metric, in the format *logging.googleapis.com/user/*. + + **Ensure the prescribed alerting policy is present:** + + 4. List the alerting policies: + + gcloud alpha monitoring policies list --format json + + 5. Ensure that the output contains an least one alert policy where: + + - *conditions.conditionThreshold.filter* is set to *metric.type=\\"logging.googleapis.com/user/\\"* + - AND *enabled* is set to *true*`, + rationale: 'Monitoring changes to cloud storage bucket permissions may reduce the time needed to detect and correct permissions on sensitive cloud storage buckets and objects inside the bucket.', + remediation: `**From Console: + Create the prescribed log metric:** + + 1. Go to *Logging/Logs-based Metrics* by visiting https://console.cloud.google.com/logs/metrics and click "CREATE METRIC". + 2. Click the down arrow symbol on the *Filter Bar* at the rightmost corner and select *Convert to Advanced Filter*. + 3. Clear any text and add: + + resource.type=gcs_bucket AND protoPayload.methodName="storage.setIamPermissions" + + 4. Click *Submit Filter*. Display logs appear based on the filter text entered by the user. + 5. In the Metric Editor menu on right, fill out the name field. Set *Units* to *1* (default) and *Type* to *Counter*. This ensures that the log metric counts the number of log entries matching the user's advanced logs query. + 6. Click *Create Metric*. + + **Create the prescribed Alert Policy:** + + 1. Identify the newly created metric under the section *User-defined Metrics* at https://console.cloud.google.com/logs/metrics. + 2. Click the 3-dot icon in the rightmost column for the new metric and select *Create alert from Metric*. A new page appears. + 3. Fill out the alert policy configuration and click *Save*. Choose the alerting threshold and configuration that makes sense for the user's organization. For example, a threshold of zero(0) for the most recent value will ensure that a notification is triggered for every owner change in the project: + + Set 'Aggregator' to 'Count' + + Set 'Configuration': + + - Condition: above + + - Threshold: 0 + + - For: most recent value + + 4. Configure the desired notifications channels in the section *Notifications*. + 5. Name the policy and click *Save*. + + **From Command Line:** + Create the prescribed Log Metric: + + - Use the command: *gcloud beta logging metrics create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/beta/logging/metrics/create + + Create the prescribed alert policy: + + - Use the command: *gcloud alpha monitoring policies create* + - Reference for command usage: https://cloud.google.com/sdk/gcloud/reference/alpha/monitoring/policies/create`, + references: [ + 'https://cloud.google.com/logging/docs/logs-based-metrics/', + 'https://cloud.google.com/monitoring/custom-metrics/', + 'https://cloud.google.com/monitoring/alerts/', + 'https://cloud.google.com/logging/docs/reference/tools/gcloud-logging', + 'https://cloud.google.com/storage/docs/overview', + 'https://cloud.google.com/storage/docs/access-control/iam-roles', + ], + gql: `{ + querygcpAlertPolicy { + id + __typename + enabled { + value + } + project { + logMetrics { + name + filter + metricDescriptor { + type + } + } + } + } + }`, + resource: 'querygcpAlertPolicy[*]', + severity: 'medium', + conditions: { + and: [ + { + path: '@.enabled.value', + equal: true, + }, + { + path: '@.project', + jq: '[.[].logMetrics[] | select( "logging.googleapis.com/user/" + .name == .metricDescriptor.type)]', + array_any: { + path: '[*].filter', + match: filterPatternRegex, + }, + }, + ], + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-1.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-1.ts new file mode 100644 index 00000000..22c47f5b --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-1.ts @@ -0,0 +1,134 @@ +// GCP CIS 1.2.0 Rule equivalent 3.6 +export default { + id: 'gcp-pci-dss-3.2.1-networking-check-1', + title: 'Networking check 1: Network firewall rules should not permit ingress from 0.0.0.0/0 to port 22 (SSH)', + + description: `GCP Firewall Rules are specific to a VPC Network. Each rule either allows or denies + traffic when its conditions are met. Its conditions allow the user to specify the type of + traffic, such as ports and protocols, and the source or destination of the traffic, including IP + addresses, subnets, and instances. + + Firewall rules are defined at the VPC network level and are specific to the network in which + they are defined. The rules themselves cannot be shared among networks. Firewall rules + only support IPv4 traffic. When specifying a source for an ingress rule or a destination for + an egress rule by address, only an IPv4 address or IPv4 block in CIDR notation can be + used. Generic (0.0.0.0/0) incoming traffic from the internet to VPC or VM instance using + SSH on Port 22 can be avoided.`, + + audit: `**From the Console:** + + 1. Go to *VPC network*. + 2. Go to the *Firewall Rules*. + 3. Ensure that *Port* is not equal to *22* and *Action* is not set to *Allow*. + 4. Ensure *IP Ranges* is not equal to *0.0.0.0/0* under *Source filters*. + + **From Command Line:** + + gcloud compute firewall-rules list --format=table'(name,direction,sourceRanges,allowed)' + + Ensure that there is no rule matching the below criteria: + + - *SOURCE_RANGES* is 0.0.0.0/0 + - AND *DIRECTION* is *INGRESS* + - AND IPProtocol is *tcp* or ALL + - AND *PORTS* is set to *22* or *range* *containing* *22* or *Null* *(not set)* + + Note: + + - When ALL TCP ports are allowed in a rule, PORT does not have any value set (*NULL*) + - When ALL Protocols are allowed in a rule, PORT does not have any value set (*NULL*)`, + + rationale: 'GCP *Firewall Rules* within a *VPC Network* apply to outgoing (egress) traffic from instances and incoming (ingress) traffic to instances in the network. Egress and ingress traffic flows are controlled even if the traffic stays within the network (for example, instance-to-instance communication). For an instance to have outgoing Internet access, the network must have a valid Internet gateway route or custom route whose destination IP is specified. This route simply defines the path to the Internet, to avoid the most general *(0.0.0.0/0)* destination *IP Range* specified from the Internet through SSH with the default *Port 22*. Generic access from the Internet to a specific IP Range needs to be restricted.', + + remediation: `**From the Console:** + + 1. Go to *VPC Network*. + 2. Go to the *Firewall Rules*. + 3. Click the *Firewall Rule* you want to modify. + 4. Click *Edit*. + 5. Modify *Source IP ranges* to specific *IP*. + 6. Click *Save*. + + **From Command Line:** + 1. Update the Firewall rule with the new *SOURCE_RANGE* from the below command: + + gcloud compute firewall-rules update FirewallName --allow=[PROTOCOL[:PORT[- PORT]],...] --source-ranges=[CIDR_RANGE,...] + `, + references: ['https://cloud.google.com/vpc/docs/firewalls#blockedtraffic'], + gql: `{ + querygcpFirewall(filter: {direction:{eq: "INGRESS"}}){ + id + name + __typename + sourceRanges + direction + allowed{ + ipProtocol + ports + } + } + }`, + resource: 'querygcpFirewall[*]', + severity: 'high', + conditions: { + not: { + path: '@', + and: [ + { + path: '[*].sourceRanges', + jq: 'map({"range": .})', + array_any: { + path: '[*].range', + in: ['0.0.0.0/0', '::/0'], + }, + }, + { + path: '[*].direction', + in: ['INGRESS'], + }, + { + path: '@.allowed', + jq: `[.[] + | { "ipProtocol": .ipProtocol} + + (if .ports | length > 0 then .ports[] else [""][] end | split("-") | {fromPort: (.[0]), toPort: (.[1] // .[0])}) ]`, + array_any: { + and: [ + { + path: '[*].ipProtocol', + in: ['tcp', 'all'], + }, + { + or: [ + { + and: [ + { + path: '[*].fromPort', + equal: null, + }, + { + path: '[*].toPort', + equal: null, + }, + ], + }, + { + and: [ + { + path: '[*].fromPort', + lessThanInclusive: 22, + }, + { + path: '[*].toPort', + greaterThanInclusive: 22, + }, + ], + }, + ], + }, + ], + }, + }, + ], + }, + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-2.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-2.ts new file mode 100644 index 00000000..ccdbf88d --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-2.ts @@ -0,0 +1,133 @@ +// GCP CIS 1.2.0 Rule equivalent 3.7 +export default { + id: 'gcp-pci-dss-3.2.1-networking-check-2', + title: 'Networking check 2: Network firewall rules should not permit ingress from 0.0.0.0/0 to port 3389 (RDP)', + + description: `GCP Firewall Rules are specific to a VPC Network. Each rule either allows or denies + traffic when its conditions are met. Its conditions allow users to specify the type of traffic, + such as ports and protocols, and the source or destination of the traffic, including IP + addresses, subnets, and instances. + + Firewall rules are defined at the VPC network level and are specific to the network in which + they are defined. The rules themselves cannot be shared among networks. Firewall rules + only support IPv4 traffic. When specifying a source for an ingress rule or a destination for + an egress rule by address, an IPv4 address or IPv4 block in CIDR notation can be used. + Generic (0.0.0.0/0) incoming traffic from the Internet to a VPC or VM instance using RDP + on Port 3389 can be avoided.`, + + audit: `**From the Console:** + + 1. Go to *VPC network*. + 2. Go to the *Firewall Rules*. + 3. Ensure *Port* is not equal to *3389* and *Action* is not *Allow*. + 4. Ensure *IP Ranges* is not equal to *0.0.0.0/0* under *Source filters*. + + **From Command Line:** + + gcloud compute firewall-rules list -- format=table'(name,direction,sourceRanges,allowed.ports)' + + Ensure that there is no rule matching the below criteria: + + - *SOURCE_RANGES* is *0.0.0.0/0* + - AND *DIRECTION* is *INGRESS* + - AND IPProtocol is *TCP* or *ALL* + - AND *PORTS* is set to *3389* or *range containing 3389* or *Null (not set)* + + Note: + + - When ALL TCP ports are allowed in a rule, PORT does not have any value set (*NULL*) + - When ALL Protocols are allowed in a rule, PORT does not have any value set (*NULL*)`, + rationale: 'GCP *Firewall Rule*s within a *VPC Network*. These rules apply to outgoing (egress) traffic from instances and incoming (ingress) traffic to instances in the network. Egress and ingress traffic flows are controlled even if the traffic stays within the network (for example, instance-to-instance communication). For an instance to have outgoing Internet access, the network must have a valid Internet gateway route or custom route whose destination IP is specified. This route simply defines the path to the Internet, to avoid the most general (0.0.0.0/0) destination IP Range specified from the Internet through RDP with the default *Port 3389*. Generic access from the Internet to a specific IP Range should be restricted.', + + remediation: `**From the Console:** + + 1. Go to *VPC Network*. + 2. Go to the *Firewall Rules*. + 3. Click the *Firewall Rule* to be modified. + 4. Click *Edit*. + 5. Modify *Source IP ranges* to specific *IP*. + 6. Click *Save*. + + **From Command Line:** + 1. Update RDP Firewall rule with new *SOURCE_RANGE* from the below command: + + gcloud compute firewall-rules update FirewallName --allow=[PROTOCOL[:PORT[-PORT]],...] --source-ranges=[CIDR_RANGE,...]`, + + references: ['https://cloud.google.com/vpc/docs/firewalls#blockedtraffic'], + gql: `{ + querygcpFirewall(filter: {direction:{eq: "INGRESS"}}){ + id + name + __typename + sourceRanges + direction + allowed{ + ipProtocol + ports + } + } + }`, + resource: 'querygcpFirewall[*]', + severity: 'high', + conditions: { + not: { + path: '@', + and: [ + { + path: '[*].sourceRanges', + jq: 'map({"range": .})', + array_any: { + path: '[*].range', + in: ['0.0.0.0/0', '::/0'], + }, + }, + { + path: '[*].direction', + in: ['INGRESS'], + }, + { + path: '@.allowed', + jq: `[.[] + | { "ipProtocol": .ipProtocol} + + (if .ports | length > 0 then .ports[] else [""][] end | split("-") | {fromPort: (.[0]), toPort: (.[1] // .[0])}) ]`, + array_any: { + and: [ + { + path: '[*].ipProtocol', + in: ['tcp', 'all'], + }, + { + or: [ + { + and: [ + { + path: '[*].fromPort', + equal: null, + }, + { + path: '[*].toPort', + equal: null, + }, + ], + }, + { + and: [ + { + path: '[*].fromPort', + lessThanInclusive: 3389, + }, + { + path: '[*].toPort', + greaterThanInclusive: 3389, + }, + ], + }, + ], + }, + ], + }, + }, + ], + }, + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-3.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-3.ts new file mode 100644 index 00000000..a0ece29b --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-3.ts @@ -0,0 +1,87 @@ +// GCP CIS 1.2.0 Rule equivalent 3.1 +export default { + id: 'gcp-pci-dss-3.2.1-networking-check-3', + title: + 'Networking check 3: The default network for a project should be deleted', + + description: + 'To prevent use of default network, a project should not have a default network.', + + audit: `**From Console:** + + 1. Go to the *VPC networks* page by visiting: https://console.cloud.google.com/networking/networks/list. + 2. Ensure that a network with the name *default* is not present. + + **From Command Line:** + + 1. Set the project name in the Google Cloud Shell: + + gcloud config set project PROJECT_ID + + 2. List the networks configured in that project: + + gcloud compute networks list + + It should not list *default* as one of the available networks in that project.`, + + rationale: `The *default* network has a preconfigured network configuration and automatically generates the following insecure firewall rules: + + - default-allow-internal: Allows ingress connections for all protocols and ports among instances in the network. + - default-allow-ssh: Allows ingress connections on TCP port 22(SSH) from any source to any instance in the network. + - default-allow-rdp: Allows ingress connections on TCP port 3389(RDP) from any source to any instance in the network. + - default-allow-icmp: Allows ingress ICMP traffic from any source to any instance in the network. + + These automatically created firewall rules do not get audit logged and cannot be configured + to enable firewall rule logging. + + Furthermore, the default network is an auto mode network, which means that its subnets + use the same predefined range of IP addresses, and as a result, it's not possible to use Cloud + VPN or VPC Network Peering with the default network. + + Based on organization security and networking requirements, the organization should + create a new network and delete the *default* network.`, + + remediation: `**From Console:** + + 1. Go to the *VPC networks* page by visiting: https://console.cloud.google.com/networking/networks/list. + 2. Click the network named *default*. + 3. On the network detail page, click *EDIT*. + 4. Click *DELETE VPC NETWORK*. + 5. If needed, create a new network to replace the default network. + + **From Command Line:** + For each Google Cloud Platform project, + + 1. Delete the default network: + + gcloud compute networks delete default + + 2. If needed, create a new network to replace it: + + gcloud compute networks create NETWORK_NAME + + **Prevention:** + The user can prevent the default network and its insecure default firewall rules from being created by setting up an Organization Policy to Skip default network creation at https://console.cloud.google.com/iam-admin/orgpolicies/compute-skipDefaultNetworkCreation.`, + + references: [ + 'https://cloud.google.com/compute/docs/networking#firewall_rules', + 'https://cloud.google.com/compute/docs/reference/latest/networks/insert', + 'https://cloud.google.com/compute/docs/reference/latest/networks/delete', + 'https://cloud.google.com/vpc/docs/firewall-rules-logging', + 'https://cloud.google.com/vpc/docs/vpc#default-network', + 'https://cloud.google.com/sdk/gcloud/reference/compute/networks/delete', + ], + gql: `{ + querygcpNetwork { + id + __typename + name + } + }`, + resource: 'querygcpNetwork[*]', + severity: 'medium', + conditions: { + path: '@.name', + notEqual: 'default', + }, +} diff --git a/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-4.ts b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-4.ts new file mode 100644 index 00000000..89b7ba7f --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/rules/pci-dss-3.2.1-networking-check-4.ts @@ -0,0 +1,204 @@ +const targetHttpsAndSslProxyConditions = { + and: [ + { + path: '@.sslPolicy', + isEmpty: false, + }, + { + path: '@.sslPolicy', + array_all: { + or: [ + { + and: [ + { + path: '[*].profile', + equal: 'MODERN', + }, + { + path: '[*].minTlsVersion', + equal: 'TLS_1_2', + }, + ], + }, + { + and: [ + { + path: '[*].profile', + equal: 'RESTRICTED', + }, + ], + }, + { + and: [ + { + path: '[*].profile', + equal: 'CUSTOM', + }, + { + path: '[*].enabledFeatures', + array_all: { + path: '[*]', + notIn: [ + 'TLS_RSA_WITH_AES_128_GCM_SHA256', + 'TLS_RSA_WITH_AES_256_GCM_SHA384', + 'TLS_RSA_WITH_AES_128_CBC_SHA', + 'TLS_RSA_WITH_AES_256_CBC_SHA', + 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + ], + }, + }, + ], + }, + ], + }, + } + ] +} + +// GCP CIS 1.2.0 Rule equivalent 3.9 +export default { + id: 'gcp-pci-dss-3.2.1-networking-check-4', + title: + 'Networking check 4: Load balancer HTTPS or SSL proxy SSL policies should not have weak cipher suites', + + description: `Secure Sockets Layer (SSL) policies determine what port Transport Layer Security (TLS) + features clients are permitted to use when connecting to load balancers. To prevent usage + of insecure features, SSL policies should use (a) at least TLS 1.2 with the MODERN profile; + or (b) the RESTRICTED profile, because it effectively requires clients to use TLS 1.2 + regardless of the chosen minimum TLS version; or (3) a CUSTOM profile that does not + support any of the following features: + + TLS_RSA_WITH_AES_128_GCM_SHA256 + + TLS_RSA_WITH_AES_256_GCM_SHA384 + + TLS_RSA_WITH_AES_128_CBC_SHA + + TLS_RSA_WITH_AES_256_CBC_SHA + + TLS_RSA_WITH_3DES_EDE_CBC_SHA`, + + audit: `**From Console:** + + 1. See all load balancers by visiting https://console.cloud.google.com/net-services/loadbalancing/loadBalancers/list. + 2. For each load balancer for *SSL (Proxy)* or *HTTPS*, click on its name to go the *Load balancer details* page. + 3. Ensure that each target proxy entry in the *Frontend* table has an *SSL Policy* configured. + 4. Click on each SSL policy to go to its *SSL policy details* page. + 5. Ensure that the SSL policy satisfies one of the following conditions: + + + - has a *Min TLS* set to *TLS 1.2* and *Profile* set to *Modern* profile, or + - has *Profile* set to *Restricted*. Note that a Restricted profile effectively requires + clients to use TLS 1.2 regardless of the chosen minimum TLS version, or + - has *Profile* set to *Custom* and the following features are all disabled: + + TLS_RSA_WITH_AES_128_GCM_SHA256 + TLS_RSA_WITH_AES_256_GCM_SHA384 + TLS_RSA_WITH_AES_128_CBC_SHA + TLS_RSA_WITH_AES_256_CBC_SHA + TLS_RSA_WITH_3DES_EDE_CBC_SHA + + **From Command Line:** + + 1. List all TargetHttpsProxies and TargetSslProxies. + + gcloud compute target-https-proxies list + gcloud compute target-ssl-proxies list + + 2. For each target proxy, list its properties: + + gcloud compute target-https-proxies describe TARGET_HTTPS_PROXY_NAME + gcloud compute target-ssl-proxies describe TARGET_SSL_PROXY_NAME + + 3. Ensure that the *sslPolicy* field is present and identifies the name of the SSL policy: + + sslPolicy: https://www.googleapis.com/compute/v1/projects/PROJECT_ID/global/sslPolicies/SSL_POLICY_NAME + + If the *sslPolicy* field is missing from the configuration, it means that the GCP default policy is used, which is insecure. + + 4. Describe the SSL policy: + + gcloud compute ssl-policies describe SSL_POLICY_NAME + + 5. Ensure that the policy satisfies one of the following conditions: + + - has *Profile* set to *Modern* and *minTlsVersion* set to *TLS_1_2*, or + - has *Profile* set to *Restricted*, or + - has *Profile* set to *Custom* and *enabledFeatures* does not contain any of the following values: + + TLS_RSA_WITH_AES_128_GCM_SHA256 + TLS_RSA_WITH_AES_256_GCM_SHA384 + TLS_RSA_WITH_AES_128_CBC_SHA + TLS_RSA_WITH_AES_256_CBC_SHA + TLS_RSA_WITH_3DES_EDE_CBC_SHA`, + + rationale: 'Load balancers are used to efficiently distribute traffic across multiple servers. Both SSL proxy and HTTPS load balancers are external load balancers, meaning they distribute traffic from the Internet to a GCP network. GCP customers can configure load balancer SSL policies with a minimum TLS version (1.0, 1.1, or 1.2) that clients can use to establish a connection, along with a profile (Compatible, Modern, Restricted, or Custom) that specifies permissible cipher suites. To comply with users using outdated protocols, GCP load balancers can be configured to permit insecure cipher suites. In fact, the GCP default SSL policy uses a minimum TLS version of 1.0 and a Compatible profile, which allows the widest range of insecure cipher suites. As a result, it is easy for customers to configure a load balancer without even knowing that they are permitting outdated cipher suites.', + + remediation: `**From Console:** + If the TargetSSLProxy or TargetHttpsProxy does not have an SSL policy configured, create a new SSL policy. Otherwise, modify the existing insecure policy. + + 1. Navigate to the *SSL Policies* page by visiting: https://console.cloud.google.com/net-security/sslpolicies + 2. Click on the name of the insecure policy to go to its *SSL policy details* page. + 3. Click *EDIT*. + 4. Set *Minimum TLS version* to *TLS 1.2*. + 5. Set *Profile* to *Modern* or *Restricted*. + 6. Alternatively, if teh user selects the profile *Custom*, make sure that the following features are disabled: + + TLS_RSA_WITH_AES_128_GCM_SHA256 + TLS_RSA_WITH_AES_256_GCM_SHA384 + TLS_RSA_WITH_AES_128_CBC_SHA + TLS_RSA_WITH_AES_256_CBC_SHA + TLS_RSA_WITH_3DES_EDE_CBC_SHA + + **From Command Line:** + + 1. For each insecure SSL policy, update it to use secure cyphers: + + gcloud compute ssl-policies update NAME [--profile COMPATIBLE|MODERN|RESTRICTED|CUSTOM] --min-tls-version 1.2 [--custom-features FEATURES] + + + 2. If the target proxy has a GCP default SSL policy, use the following command corresponding to the proxy type to update it. + + gcloud compute target-ssl-proxies update TARGET_SSL_PROXY_NAME --ssl-policy SSL_POLICY_NAME + gcloud compute target-https-proxies update TARGET_HTTPS_POLICY_NAME --ssl- policy SSL_POLICY_NAME + `, + references: [ + 'https://cloud.google.com/load-balancing/docs/use-ssl-policies', + 'https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-52r2.pdf', + ], + severity: 'medium', + queries: [ + { + gql: `{ + querygcpTargetHttpsProxy { + id + projectId + __typename + sslPolicy { + profile + enabledFeatures + minTlsVersion + } + } + }`, + resource: 'querygcpTargetHttpsProxy[*]', + conditions: targetHttpsAndSslProxyConditions + }, + { + gql: `{ + querygcpTargetSslProxy { + id + projectId + __typename + sslPolicy { + profile + enabledFeatures + minTlsVersion + } + } + }`, + resource: 'querygcpTargetSslProxy[*]', + conditions: targetHttpsAndSslProxyConditions + }, + ], +} diff --git a/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-iam-checks.test.ts b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-iam-checks.test.ts new file mode 100644 index 00000000..fbb86119 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-iam-checks.test.ts @@ -0,0 +1,174 @@ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' + +import Gcp_PCI_DSS_321_IAM_1 from '../rules/pci-dss-3.2.1-iam-check-1' +import Gcp_PCI_DSS_321_IAM_2 from '../rules/pci-dss-3.2.1-iam-check-2' + +export interface Bindings { + members: string[] + role?: string +} + +export interface IamPolicy { + kmsCryptoKey?: string + bindings: Bindings[] +} + +export interface ApiKey { + id: string +} + +export interface QuerygcpProject { + id: string + iamPolicies?: IamPolicy[] + apiKeys?: ApiKey[] +} + +export interface QuerygcpIamPolicy { + id: string + bindings: Bindings[] +} + +export interface PCIQueryResponse { + querygcpIamPolicy?: QuerygcpIamPolicy[] + querygcpProject?: QuerygcpProject[] +} + +describe('PCI Data Security Standard: 3.2.1', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ providerName: 'gcp', entityName: 'PCI'} ) + }) + + + describe('IAM check 1: IAM users should not have both KMS admin and any of the KMS encrypter/decrypter roles', () => { + const getTestRuleFixture = ( + role: string, + members: string[] + ): PCIQueryResponse => { + return { + querygcpIamPolicy: [ + { + id: cuid(), + bindings: [ + { + role: 'roles/cloudkms.admin', + members: ['user:user1@autocloud.dev'] + }, + { + role, + members, + }, + ], + }, + ], + } + } + + const test111Rule = async ( + data: PCIQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_IAM_1 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a user account with kms admin role and without any cryptoKey roles', async () => { + const data: PCIQueryResponse = getTestRuleFixture( + 'roles/editor', ['user:user1@autocloud.dev'] + ) + await test111Rule(data, Result.PASS) + }) + + test('Security Issue when there is an inbound rule with a user account with kms admin role and cryptoKeyEncrypterDecrypter role', async () => { + const data: PCIQueryResponse = getTestRuleFixture( + 'roles/cloudkms.cryptoKeyEncrypterDecrypter', ['user:user1@autocloud.dev'] + ) + await test111Rule(data, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with a user account with kms admin role and cryptoKeyEncrypter role', async () => { + const data: PCIQueryResponse = getTestRuleFixture( + 'roles/cloudkms.cryptoKeyEncrypter', ['user:user1@autocloud.dev'] + ) + await test111Rule(data, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with a user account with kms admin role and cryptoKeyDecrypter role', async () => { + const data: PCIQueryResponse = getTestRuleFixture( + 'roles/cloudkms.cryptoKeyDecrypter', ['user:user1@autocloud.dev'] + ) + await test111Rule(data, Result.FAIL) + }) + + }) + + describe('IAM check 2: IAM users should not have project-level "Service Account User" or "Service Account Token Creator" roles', () => { + const getTestRuleFixture = ( + role: string, + projectMembers: string[] + ): PCIQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + iamPolicies: [ + { + bindings: [ + { + role, + members: projectMembers, + }, + ], + }, + ], + }, + ], + } + } + + const test16Rule = async ( + data: PCIQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_IAM_2 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with user accounts with securityReviewer role', async () => { + const data: PCIQueryResponse = getTestRuleFixture( + 'roles/iam.securityReviewer', + ['user:user1@autocloud.dev', 'user:user2@autocloud.dev'] + ) + await test16Rule(data, Result.PASS) + }) + + test('Security Issue when there is an inbound rule with user accounts with serviceAccountUser role', async () => { + const data: PCIQueryResponse = getTestRuleFixture( + 'roles/iam.serviceAccountUser', + ['user:user1@autocloud.dev', 'user:user2@autocloud.dev'] + ) + await test16Rule(data, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with user accounts with serviceAccountTokenCreator role', async () => { + const data: PCIQueryResponse = getTestRuleFixture( + 'roles/iam.serviceAccountTokenCreator', + ['user:user1@autocloud.dev', 'user:user2@autocloud.dev'] + ) + await test16Rule(data, Result.FAIL) + }) + }) +}) diff --git a/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-logging-checks.test.ts b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-logging-checks.test.ts new file mode 100644 index 00000000..89d9d478 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-logging-checks.test.ts @@ -0,0 +1,70 @@ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' + +import Gcp_PCI_DSS_321_Logging_1 from '../rules/pci-dss-3.2.1-logging-check-1' + +export interface LogSink { + filter?: string + destination?: string +} + +export interface QuerygcpProject { + id: string + logSinks: LogSink[] +} + +export interface PCIQueryResponse { + querygcpProject?: QuerygcpProject[] +} + +describe('PCI Data Security Standard: 3.2.1', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ providerName: 'gcp', entityName: 'PCI'} ) + }) + + describe('Logging check 1: At least one project-level logging sink should be configured with an empty filter', () => { + const getTestRuleFixture = (filter: string): PCIQueryResponse => { + return { + querygcpProject: [ + { + id: cuid(), + logSinks: [ + { + filter: 'dummy filter', + }, + { + filter, + }, + ], + }, + ], + } + } + + const test22Rule = async ( + data: PCIQueryResponse, + expectedResult: Result + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Logging_1 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is a logSink with an empty filter', async () => { + const data: PCIQueryResponse = getTestRuleFixture('') + await test22Rule(data, Result.PASS) + }) + + test('Security Issue when there is a logSink with an empty filter', async () => { + const data: PCIQueryResponse = getTestRuleFixture('dummy-filter') + await test22Rule(data, Result.FAIL) + }) + }) + +}) diff --git a/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-monitoring-checks.test.ts b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-monitoring-checks.test.ts new file mode 100644 index 00000000..385d4951 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-monitoring-checks.test.ts @@ -0,0 +1,837 @@ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' + +import Gcp_PCI_DSS_321_Monitoring_1 from '../rules/pci-dss-3.2.1-monitoring-check-1' +import Gcp_PCI_DSS_321_Monitoring_2 from '../rules/pci-dss-3.2.1-monitoring-check-2' +import Gcp_PCI_DSS_321_Monitoring_3 from '../rules/pci-dss-3.2.1-monitoring-check-3' +import Gcp_PCI_DSS_321_Monitoring_4 from '../rules/pci-dss-3.2.1-monitoring-check-4' +import Gcp_PCI_DSS_321_Monitoring_5 from '../rules/pci-dss-3.2.1-monitoring-check-5' +import Gcp_PCI_DSS_321_Monitoring_6 from '../rules/pci-dss-3.2.1-monitoring-check-6' +import Gcp_PCI_DSS_321_Monitoring_7 from '../rules/pci-dss-3.2.1-monitoring-check-7' +import Gcp_PCI_DSS_321_Monitoring_8 from '../rules/pci-dss-3.2.1-monitoring-check-8' + +const Gcp_PCI_DSS_321_Monitoring_1_Filter = + 'protoPayload.methodName="SetIamPolicy" AND protoPayload.serviceData.policyDelta.auditConfigDeltas:*' +const Gcp_PCI_DSS_321_Monitoring_2_Filter = + 'resource.type="iam_role" AND protoPayload.methodName="google.iam.admin.v1.CreateRole" OR protoPayload.methodName="google.iam.admin.v1.DeleteRole" OR protoPayload.methodName="google.iam.admin.v1.UpdateRole"' +const Gcp_PCI_DSS_321_Monitoring_3_Filter = + 'resource.type=gce_network AND protoPayload.methodName="beta.compute.networks.insert" OR protoPayload.methodName="beta.compute.networks.patch" OR protoPayload.methodName="v1.compute.networks.delete" OR protoPayload.methodName="v1.compute.networks.removePeering" OR protoPayload.methodName="v1.compute.networks.addPeering"' +const Gcp_PCI_DSS_321_Monitoring_4_Filter = + 'resource.type="gce_firewall_rule" AND protoPayload.methodName="v1.compute.firewalls.patch" OR protoPayload.methodName="v1.compute.firewalls.insert"' +const Gcp_PCI_DSS_321_Monitoring_5_Filter = + 'resource.type="gce_route" AND protoPayload.methodName="beta.compute.routes.patch" OR protoPayload.methodName="beta.compute.routes.insert"' +const Gcp_PCI_DSS_321_Monitoring_6_Filter = + '( protoPayload.serviceName="cloudresourcemanager.googleapis.com" ) AND ( ProjectOwnership OR projectOwnerInvitee ) OR ( protoPayload.serviceData.policyDelta.bindingDeltas.action="REMOVE" AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner" ) OR ( protoPayload.serviceData.policyDelta.bindingDeltas.action="ADD" AND protoPayload.serviceData.policyDelta.bindingDeltas.role="roles/owner" )' +const Gcp_PCI_DSS_321_Monitoring_7_Filter = + 'protoPayload.methodName="cloudsql.instances.update"' +const Gcp_PCI_DSS_321_Monitoring_8_Filter = + 'resource.type=gcs_bucket AND protoPayload.methodName="storage.setIamPermissions"' + + +export interface MetricDescriptor { + type: string +} + +export interface LogMetric { + filter: string + name?: string + metricDescriptor?: MetricDescriptor +} + +export interface Project { + logMetrics?: LogMetric[] +} + +export interface Enabled { + value: boolean +} + +export interface QuerygcpAlertPolicy { + id: string + enabled?: Enabled + project?: Project[] +} + +export interface DnsPolicy { + enableLogging: boolean +} + +export interface QuerygcpNetwork { + id: string + dnsPolicies?: DnsPolicy[] +} + +export interface LogBucket { + name: string + retentionDays: number + locked: boolean +} + +export interface LogSink { + filter?: string + destination?: string +} + +export interface QuerygcpProject { + id: string + logSinks: LogSink[] + logBuckets?: LogBucket[] +} + +export interface AuditLogConfig { + logType: string + exemptedMembers: string[] +} + +export interface AuditConfig { + auditLogConfigs: AuditLogConfig[] + service: string + exemptedMembers: string[] +} + +export interface QuerygcpIamPolicy { + id: string + auditConfigs: AuditConfig[] +} + +export interface PCIQueryResponse { + querygcpAlertPolicy?: QuerygcpAlertPolicy[] + querygcpNetwork?: QuerygcpNetwork[] + querygcpProject?: QuerygcpProject[] + querygcpIamPolicy?: QuerygcpIamPolicy[] +} + +describe('PCI Data Security Standard: 3.2.1', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ providerName: 'gcp', entityName: 'PCI'} ) + }) + + describe('Monitoring check 1: Logging metric filter and alert for audit configuration changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Monitoring_1 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_1_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_PCI_DSS_321_Monitoring_1_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_1_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('Monitoring check 2: Logging metric filter and alert for Custom Role changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Monitoring_2 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_2_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_PCI_DSS_321_Monitoring_2_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_2_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('Monitoring check 3: Logging metric filter and alert for network changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Monitoring_3 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_3_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_PCI_DSS_321_Monitoring_3_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_3_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('Monitoring check 4: Logging metric filter and alert for network firewall rule changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Monitoring_4 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_4_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_PCI_DSS_321_Monitoring_4_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_4_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('Monitoring check 5: Logging metric filter and alert for network route changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Monitoring_5 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_5_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_PCI_DSS_321_Monitoring_5_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_5_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('Monitoring check 6: Logging metric filter and alert for project ownership assignments/changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Monitoring_6 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_6_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_PCI_DSS_321_Monitoring_6_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_6_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('Monitoring check 7: Logging metric filter and alert for SQL instance configuration changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Monitoring_7 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_7_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_PCI_DSS_321_Monitoring_7_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_7_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) + + describe('Monitoring check 8: Logging metric filter and alert for Storage IAM permission changes should be configured', () => { + const testRule = async ( + enabled: boolean, + filter: string, + metricName: string, + metricType: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpAlertPolicy: [ + { + id: cuid(), + enabled: { + value: enabled, + }, + project: [ + { + logMetrics: [ + { + filter: 'dummy test filter', + name: 'dummy test name', + metricDescriptor: { + type: 'logging.googleapis.com/user/dummy-test-name', + }, + }, + { + filter, + name: metricName, + metricDescriptor: { + type: `logging.googleapis.com/user/${metricType}`, + }, + }, + ], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Monitoring_8 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there are metric filters and alerts exist', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_8_Filter, + 'log-metric-1', + 'log-metric-1', + Result.PASS + ) + }) + + test('Security Issue when there metric filters is not found', async () => { + await testRule( + true, + 'dummy metric filter value', + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters but not aletrs', async () => { + await testRule( + false, + Gcp_PCI_DSS_321_Monitoring_8_Filter, + 'log-metric-1', + 'log-metric-1', + Result.FAIL + ) + }) + + test('Security Issue when there are metric filters and aletrs but metric desciptor type not match with metric name', async () => { + await testRule( + true, + Gcp_PCI_DSS_321_Monitoring_8_Filter, + 'log-metric-1', + 'log-metric-2', + Result.FAIL + ) + }) + }) +}) diff --git a/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-networking-checks.test.ts b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-networking-checks.test.ts new file mode 100644 index 00000000..92144504 --- /dev/null +++ b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1-networking-checks.test.ts @@ -0,0 +1,500 @@ +/* eslint-disable max-len */ +import cuid from 'cuid' +import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' +import 'jest' + +import Gcp_PCI_DSS_321_Networking_1 from '../rules/pci-dss-3.2.1-networking-check-1' +import Gcp_PCI_DSS_321_Networking_2 from '../rules/pci-dss-3.2.1-networking-check-2' +import Gcp_PCI_DSS_321_Networking_3 from '../rules/pci-dss-3.2.1-networking-check-3' +import Gcp_PCI_DSS_321_Networking_4 from '../rules/pci-dss-3.2.1-networking-check-4' + +const ipV4WildcardAddress = '0.0.0.0/0' +const ipV6WildcardAddress = '::/0' + +export interface Allowed { + ipProtocol: string + ports: string[] +} + +export interface QuerygcpFirewall { + id: string + sourceRanges: string[] + direction: string + allowed?: Allowed[] +} + +export interface GcpNetworkSubnet { + purpose: string + enableFlowLogs: boolean | null +} + +export interface QuerygcpNetwork { + id: string + subnets?: GcpNetworkSubnet[] + name?: string + ipV4Range?: string | null +} + +export interface DnssecConfigDefaultKeySpecs { + keyType: string + algorithm: string +} + +export interface QuerygcpDnsManagedZone { + id: string + visibility?: string + dnssecConfigState?: string + dnssecConfigDefaultKeySpecs?: DnssecConfigDefaultKeySpecs[] +} + +export interface SslPolicy { + profile: string + enabledFeatures?: string[] + minTlsVersion: string +} + +export interface TargetHttpsProxy { + sslPolicy?: SslPolicy[] +} + +export interface TargetSslProxy { + sslPolicy?: SslPolicy[] +} + +export interface QuerygcpTargetSslProxy { + id: string + sslPolicy?: SslPolicy[] +} +export interface QuerygcpTargetHttpsProxy { + id: string + sslPolicy?: SslPolicy[] +} + +export interface PCIQueryResponse { + querygcpFirewall?: QuerygcpFirewall[] + querygcpNetwork?: QuerygcpNetwork[] + querygcpTargetSslProxy?: QuerygcpTargetSslProxy[] + querygcpTargetHttpsProxy?: QuerygcpTargetHttpsProxy[] +} + +describe('PCI Data Security Standard: 3.2.1', () => { + let rulesEngine: Engine + beforeAll(() => { + rulesEngine = new CloudGraph.RulesEngine({ + providerName: 'gcp', + entityName: 'PCI', + }) + }) + + describe('Networking check 1: Network firewall rules should not permit ingress from 0.0.0.0/0 to port 22 (SSH)', () => { + const testRule = async ( + fromPort: number | undefined, + toPort: number | undefined, + sourceAddress: string, + expectedResult: Result, + protocol?: string + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpFirewall: [ + { + id: cuid(), + sourceRanges: [sourceAddress], + direction: 'INGRESS', + allowed: [ + { + ipProtocol: 'icmp', + ports: [], + }, + { + ipProtocol: protocol || 'tcp', + ports: fromPort && toPort ? [`${fromPort}-${toPort}`] : [], + }, + { + ipProtocol: 'udp', + ports: ['0-65535'], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Networking_1 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 22', async () => { + await testRule(22, 22, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and port 80', async () => { + await testRule(80, 80, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and port 80', async () => { + await testRule(80, 80, ipV6WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random IPv4 and a port range not including the port 22', async () => { + await testRule(1000, 2000, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and a port range not including the port 22', async () => { + await testRule(1000, 2000, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and a port range not including the port 22', async () => { + await testRule(1000, 2000, ipV6WildcardAddress, Result.PASS) + }) + + test('Security Issue when IPv4 wilcard address and port 22 and tcp protocol', async () => { + await testRule(22, 22, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv4 wilcard address and port 22 and all protocol', async () => { + await testRule(22, 22, ipV4WildcardAddress, Result.FAIL, 'all') + }) + + test('Security Issue when IPv6 wilcard address and port 22 and tcp protocol', async () => { + await testRule(22, 22, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv6 wilcard address and port 22 and all protocol', async () => { + await testRule(22, 22, ipV6WildcardAddress, Result.FAIL, 'all') + }) + + test('Security Issue when there is an inbound rule with IPv4 wilcard address and no port range is specified', async () => { + await testRule(undefined, undefined, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wilcard address and no port range is specified', async () => { + await testRule(undefined, undefined, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv4 wilcard address and port range includes the port 22', async () => { + await testRule(0, 1000, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wilcard address and port range includes the port 22', async () => { + await testRule(0, 1000, ipV6WildcardAddress, Result.FAIL) + }) + }) + + describe('Networking check 2: Network firewall rules should not permit ingress from 0.0.0.0/0 to port 3389 (RDP)', () => { + const testRule = async ( + fromPort: number | undefined, + toPort: number | undefined, + sourceAddress: string, + expectedResult: Result, + protocol?: string + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpFirewall: [ + { + id: cuid(), + sourceRanges: [sourceAddress], + direction: 'INGRESS', + allowed: [ + { + ipProtocol: 'icmp', + ports: [], + }, + { + ipProtocol: protocol || 'tcp', + ports: fromPort && toPort ? [`${fromPort}-${toPort}`] : [], + }, + { + ipProtocol: 'udp', + ports: ['0-65535'], + }, + ], + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Networking_2 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a random IPv4 address and port 3389', async () => { + await testRule(3389, 3389, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and port 80', async () => { + await testRule(80, 80, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and port 80', async () => { + await testRule(80, 80, ipV6WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with a random IPv4 and a port range not including the port 3389', async () => { + await testRule(1000, 2000, '10.10.10.10/16', Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv4 wilcard address and a port range not including the port 3389', async () => { + await testRule(1000, 2000, ipV4WildcardAddress, Result.PASS) + }) + + test('No Security Issue when there is an inbound rule with IPv6 wilcard address and a port range not including the port 3389', async () => { + await testRule(1000, 2000, ipV6WildcardAddress, Result.PASS) + }) + + test('Security Issue when IPv4 wilcard address and port 3389 and tcp protocol', async () => { + await testRule(3389, 3389, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv4 wilcard address and port 3389 and all protocol', async () => { + await testRule(3389, 3389, ipV4WildcardAddress, Result.FAIL, 'all') + }) + + test('Security Issue when IPv6 wilcard address and port 3389 and tcp protocol', async () => { + await testRule(3389, 3389, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when IPv6 wilcard address and port 3389 and all protocol', async () => { + await testRule(3389, 3389, ipV6WildcardAddress, Result.FAIL, 'all') + }) + + test('Security Issue when there is an inbound rule with IPv4 wilcard address and no port range is specified', async () => { + await testRule(undefined, undefined, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wilcard address and no port range is specified', async () => { + await testRule(undefined, undefined, ipV6WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv4 wilcard address and port range includes the port 3389', async () => { + await testRule(0, 4000, ipV4WildcardAddress, Result.FAIL) + }) + + test('Security Issue when there is an inbound rule with IPv6 wilcard address and port range includes the port 3389', async () => { + await testRule(0, 4000, ipV6WildcardAddress, Result.FAIL) + }) + }) + + describe('Networking check 3: The default network for a project should be deleted', () => { + const testRule = async ( + networkName: string, + expectedResult: Result + ): Promise => { + // Arrange + const data: PCIQueryResponse = { + querygcpNetwork: [ + { + id: cuid(), + name: networkName, + }, + ], + } + + // Act + const [processedRule] = await rulesEngine.processRule( + Gcp_PCI_DSS_321_Networking_3 as Rule, + { ...data } + ) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + test('No Security Issue when there is an inbound rule with a network name that is not equal to default', async () => { + await testRule('test-network', Result.PASS) + }) + + test('Security Issue when there is an inbound rule with a network name that is equal to default', async () => { + await testRule('default', Result.FAIL) + }) + }) + + describe('Networking check 4: Load balancer HTTPS or SSL proxy SSL policies should not have weak cipher suites', () => { + const getTestRuleAFixture = (): PCIQueryResponse => { + return { + querygcpTargetHttpsProxy: [ + { + id: cuid(), + sslPolicy: [ + { + profile: 'MODERN', + minTlsVersion: 'TLS_1_2', + }, + ], + }, + ], + } + } + + const getTestRuleBFixture = (): PCIQueryResponse => { + return { + querygcpTargetSslProxy: [ + { + id: cuid(), + sslPolicy: [ + { + profile: 'MODERN', + minTlsVersion: 'TLS_1_2', + }, + ], + }, + ], + } + } + + const testRule = async ( + data: PCIQueryResponse, + expectedResult: Result, + rule?: Rule + ): Promise => { + // Act + const [processedRule] = await rulesEngine.processRule(rule as Rule, { + ...data, + }) + + // Asserts + expect(processedRule.result).toBe(expectedResult) + } + + describe('querygcpTargetHttpsProxy query:', () => { + let targetHttpsProxyRule: Rule + beforeAll(() => { + const { queries, ...ruleMetadata } = Gcp_PCI_DSS_321_Networking_4 + const query = queries.shift() + targetHttpsProxyRule = { + ...ruleMetadata, + ...query, + } as Rule + }) + + test('No Security Issue when proxies and ssl policies are secure', async () => { + const data: PCIQueryResponse = getTestRuleAFixture() + await testRule(data, Result.PASS, targetHttpsProxyRule) + }) + + test('Security Issue when proxies not have ssl policy', async () => { + const data: PCIQueryResponse = getTestRuleAFixture() + const targetHttpsProxy = data + .querygcpTargetHttpsProxy?.[0] as QuerygcpTargetHttpsProxy + targetHttpsProxy.sslPolicy = [] + await testRule(data, Result.FAIL, targetHttpsProxyRule) + }) + + test('Security Issue when HTTPS-PROXY with MODERN ssl policy and VERSION is NOT TLS_1_2', async () => { + const data: PCIQueryResponse = getTestRuleAFixture() + const targetHttpsProxy = data + .querygcpTargetHttpsProxy?.[0] as QuerygcpTargetHttpsProxy + targetHttpsProxy.sslPolicy = targetHttpsProxy.sslPolicy?.map( + ({ minTlsVersion, ...p }) => { + return { + ...p, + minTlsVersion: 'dummy', + } + } + ) + await testRule(data, Result.FAIL, targetHttpsProxyRule) + }) + + test('Security Issue when HTTPS-PROXY with CUSTOM ssl policy and enabledFeatures contains invalid values', async () => { + const invalidEnabledFeatureValues = [ + 'TLS_RSA_WITH_AES_128_GCM_SHA256', + 'TLS_RSA_WITH_AES_256_GCM_SHA384', + 'TLS_RSA_WITH_AES_128_CBC_SHA', + 'TLS_RSA_WITH_AES_256_CBC_SHA', + 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + ] + for (const invalidEnabledFeatureValue of invalidEnabledFeatureValues) { + const data: PCIQueryResponse = getTestRuleAFixture() + const targetHttpsProxy = data + .querygcpTargetHttpsProxy?.[0] as QuerygcpTargetHttpsProxy + targetHttpsProxy.sslPolicy = targetHttpsProxy.sslPolicy?.map( + ({ enabledFeatures, profile, ...p }) => { + return { + ...p, + profile: 'CUSTOM', + enabledFeatures: [invalidEnabledFeatureValue], + } + } + ) + await testRule(data, Result.FAIL, targetHttpsProxyRule) + } + }) + }) + + describe('querygcpTargetSslProxy query:', () => { + let targetSslProxyRule: Rule + beforeAll(() => { + const { queries, ...ruleMetadata } = Gcp_PCI_DSS_321_Networking_4 + const query = queries.shift() + targetSslProxyRule = { + ...ruleMetadata, + ...query, + } as Rule + }) + + test('No Security Issue when proxies and ssl policies are secure', async () => { + const data: PCIQueryResponse = getTestRuleBFixture() + await testRule(data, Result.PASS, targetSslProxyRule) + }) + + test('Security Issue when proxies not have ssl policy', async () => { + const data: PCIQueryResponse = getTestRuleBFixture() + const targetSslProxy = data + .querygcpTargetSslProxy?.[0] as QuerygcpTargetHttpsProxy + targetSslProxy.sslPolicy = [] + await testRule(data, Result.FAIL, targetSslProxyRule) + }) + + test('Security Issue when HTTPS-PROXY with MODERN ssl policy and VERSION is NOT TLS_1_2', async () => { + const data: PCIQueryResponse = getTestRuleBFixture() + const targetSslProxy = data + .querygcpTargetSslProxy?.[0] as QuerygcpTargetHttpsProxy + targetSslProxy.sslPolicy = targetSslProxy.sslPolicy?.map( + ({ minTlsVersion, ...p }) => { + return { + ...p, + minTlsVersion: 'dummy', + } + } + ) + await testRule(data, Result.FAIL, targetSslProxyRule) + }) + + test('Security Issue when HTTPS-PROXY with CUSTOM ssl policy and enabledFeatures contains invalid values', async () => { + const invalidEnabledFeatureValues = [ + 'TLS_RSA_WITH_AES_128_GCM_SHA256', + 'TLS_RSA_WITH_AES_256_GCM_SHA384', + 'TLS_RSA_WITH_AES_128_CBC_SHA', + 'TLS_RSA_WITH_AES_256_CBC_SHA', + 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + ] + for (const invalidEnabledFeatureValue of invalidEnabledFeatureValues) { + const data: PCIQueryResponse = getTestRuleBFixture() + const targetSslProxy = data + .querygcpTargetSslProxy?.[0] as QuerygcpTargetHttpsProxy + targetSslProxy.sslPolicy = targetSslProxy.sslPolicy?.map( + ({ enabledFeatures, profile, ...p }) => { + return { + ...p, + profile: 'CUSTOM', + enabledFeatures: [invalidEnabledFeatureValue], + } + } + ) + await testRule(data, Result.FAIL, targetSslProxyRule) + } + }) + }) + }) +}) diff --git a/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1.test.ts b/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1.test.ts deleted file mode 100644 index 9f42e299..00000000 --- a/src/gcp/pci-dss-3.2.1/tests/pci-dss-3.2.1.test.ts +++ /dev/null @@ -1,18 +0,0 @@ -import CloudGraph, { Rule, Result, Engine } from '@cloudgraph/sdk' - -describe('PCI Data Security Standard: 3.2.1', () => { - let rulesEngine: Engine - beforeAll(() => { - rulesEngine = new CloudGraph.RulesEngine({ - providerName: 'gcp', - entityName: 'PCI', - }) - }) - - // TODO: Change once we have real checks - describe("Dummy Check", () => { - test('Dummy Test', async () => { - expect('PASS').toBe(Result.PASS) - }) - }) -}) diff --git a/yarn.lock b/yarn.lock index 247b937b..e7bab7c5 100644 --- a/yarn.lock +++ b/yarn.lock @@ -420,12 +420,12 @@ __metadata: linkType: hard "@babel/types@npm:^7.8.3": - version: 7.17.10 - resolution: "@babel/types@npm:7.17.10" + version: 7.17.12 + resolution: "@babel/types@npm:7.17.12" dependencies: "@babel/helper-validator-identifier": ^7.16.7 to-fast-properties: ^2.0.0 - checksum: 40cfc3f43a3ab7374df8ee6844793f804c65e7bea0fd1b090886b425106ba26e16e8fa698ae4b2caf2746083fe3e62f03f12997a5982e0d131700f17cbdcfca1 + checksum: 5e522081587a0073a577fd05010ab917dbd2acea7aa06027ec42f90894ed1f8df2f03b9bb0713638153839b56a7be8dcf4b8ab2e55796c730a30ca9f0df1ba5c languageName: node linkType: hard @@ -632,6 +632,33 @@ __metadata: languageName: unknown linkType: soft +"@cloudgraph/policy-pack-gcp-nist-800-53-rev4@workspace:src/gcp/nist-800-53-rev4": + version: 0.0.0-use.local + resolution: "@cloudgraph/policy-pack-gcp-nist-800-53-rev4@workspace:src/gcp/nist-800-53-rev4" + dependencies: + "@autocloud/eslint-config": ^0.1.0 + "@cloudgraph/sdk": ^0.18.1 + "@types/jest": ^27.4.0 + "@types/node": ^15.12.4 + "@types/pino": ^6.3.11 + "@typescript-eslint/eslint-plugin": ^4.28.5 + "@typescript-eslint/parser": ^4.28.5 + cpx: ^1.5.0 + cuid: ^2.1.8 + eslint: ^7.25.0 + eslint-config-airbnb-base: 14.2.1 + eslint-config-prettier: ^6.11.0 + eslint-plugin-import: ^2.22.1 + eslint-plugin-prettier: ^3.4.0 + jest: ^27.0.6 + prettier: ^2.4.1 + shx: ^0.3.3 + ts-jest: ^27.0.4 + tslib: ^1 + typescript: ^4.3.5 + languageName: unknown + linkType: soft + "@cloudgraph/policy-pack-gcp-pci-dss-3.2.1@workspace:src/gcp/pci-dss-3.2.1": version: 0.0.0-use.local resolution: "@cloudgraph/policy-pack-gcp-pci-dss-3.2.1@workspace:src/gcp/pci-dss-3.2.1" @@ -713,6 +740,13 @@ __metadata: languageName: node linkType: hard +"@colors/colors@npm:1.5.0": + version: 1.5.0 + resolution: "@colors/colors@npm:1.5.0" + checksum: d64d5260bed1d5012ae3fc617d38d1afc0329fec05342f4e6b838f46998855ba56e0a73833f4a80fa8378c84810da254f76a8a19c39d038260dc06dc4e007425 + languageName: node + linkType: hard + "@eslint/eslintrc@npm:^0.4.3": version: 0.4.3 resolution: "@eslint/eslintrc@npm:0.4.3" @@ -1088,9 +1122,9 @@ __metadata: languageName: node linkType: hard -"@npmcli/arborist@npm:^5.0.0, @npmcli/arborist@npm:^5.0.3": - version: 5.1.1 - resolution: "@npmcli/arborist@npm:5.1.1" +"@npmcli/arborist@npm:^5.0.0, @npmcli/arborist@npm:^5.0.4": + version: 5.2.0 + resolution: "@npmcli/arborist@npm:5.2.0" dependencies: "@isaacs/string-locale-compare": ^1.1.0 "@npmcli/installed-package-contents": ^1.0.7 @@ -1128,7 +1162,7 @@ __metadata: walk-up-path: ^1.0.0 bin: arborist: bin/index.js - checksum: e6a989d3743d47444405aad943abcbb87d075184afd394cd968cc1abfe59ab0fad737f2c2417ceebf40b98d640309e8eb94c1e0fb72a6db88a52c4ff5c123ca7 + checksum: e466133cb564619f1544b53ed48632082e90d294a2c7f31103bc685b029c4ba6cb63cea845212148f28b5328ad42fd137936e3395039028b1bd84ed542b9108c languageName: node linkType: hard @@ -1139,7 +1173,7 @@ __metadata: languageName: node linkType: hard -"@npmcli/config@npm:^4.0.1": +"@npmcli/config@npm:^4.1.0": version: 4.1.0 resolution: "@npmcli/config@npm:4.1.0" dependencies: @@ -1251,15 +1285,6 @@ __metadata: languageName: node linkType: hard -"@npmcli/package-json@npm:^1.0.1": - version: 1.0.1 - resolution: "@npmcli/package-json@npm:1.0.1" - dependencies: - json-parse-even-better-errors: ^2.3.1 - checksum: 08b66c8ddb1d6b678975a83006d2fe5070b3013bcb68ea9d54c0142538a614596ddfd1143183fbb8f82c5cecf477d98f3c4e473ef34df3bbf3814e97e37e18d3 - languageName: node - linkType: hard - "@npmcli/package-json@npm:^2.0.0": version: 2.0.0 resolution: "@npmcli/package-json@npm:2.0.0" @@ -2177,13 +2202,6 @@ __metadata: languageName: node linkType: hard -"ansistyles@npm:~0.1.3": - version: 0.1.3 - resolution: "ansistyles@npm:0.1.3" - checksum: 0072507f97e46cc3cb71439f1c0935ceec5c8bca812ebb5034b9f8f6a9ee7d65cdc150c375b8d56643fc8305a08542f6df3a1cd6c80e32eba0b27c4e72da4efd - languageName: node - linkType: hard - "anymatch@npm:^1.3.0": version: 1.3.2 resolution: "anymatch@npm:1.3.2" @@ -2753,13 +2771,6 @@ __metadata: languageName: node linkType: hard -"builtins@npm:^1.0.3": - version: 1.0.3 - resolution: "builtins@npm:1.0.3" - checksum: 47ce94f7eee0e644969da1f1a28e5f29bd2e48b25b2bbb61164c345881086e29464ccb1fb88dbc155ea26e8b1f5fc8a923b26c8c1ed0935b67b644d410674513 - languageName: node - linkType: hard - "builtins@npm:^5.0.0": version: 5.0.1 resolution: "builtins@npm:5.0.1" @@ -2769,7 +2780,7 @@ __metadata: languageName: node linkType: hard -"cacache@npm:^16.0.0, cacache@npm:^16.0.2, cacache@npm:^16.0.6": +"cacache@npm:^16.0.0, cacache@npm:^16.0.2, cacache@npm:^16.0.6, cacache@npm:^16.0.7": version: 16.0.7 resolution: "cacache@npm:16.0.7" dependencies: @@ -3044,6 +3055,19 @@ __metadata: languageName: node linkType: hard +"cli-table3@npm:^0.6.2": + version: 0.6.2 + resolution: "cli-table3@npm:0.6.2" + dependencies: + "@colors/colors": 1.5.0 + string-width: ^4.2.0 + dependenciesMeta: + "@colors/colors": + optional: true + checksum: 2f82391698b8a2a2a5e45d2adcfea5d93e557207f90455a8d4c1aac688e9b18a204d9eb4ba1d322fa123b17d64ea3dc5e11de8b005529f3c3e7dbeb27cb4d9be + languageName: node + linkType: hard + "cli-width@npm:^3.0.0": version: 3.0.0 resolution: "cli-width@npm:3.0.0" @@ -5077,7 +5101,7 @@ __metadata: languageName: node linkType: hard -"glob@npm:^7.0.0, glob@npm:^7.0.5, glob@npm:^7.1.1, glob@npm:^7.1.2, glob@npm:^7.1.3, glob@npm:^7.1.4, glob@npm:^7.2.0": +"glob@npm:^7.0.0, glob@npm:^7.0.5, glob@npm:^7.1.1, glob@npm:^7.1.2, glob@npm:^7.1.3, glob@npm:^7.1.4": version: 7.2.0 resolution: "glob@npm:7.2.0" dependencies: @@ -5092,16 +5116,15 @@ __metadata: linkType: hard "glob@npm:^8.0.1": - version: 8.0.1 - resolution: "glob@npm:8.0.1" + version: 8.0.3 + resolution: "glob@npm:8.0.3" dependencies: fs.realpath: ^1.0.0 inflight: ^1.0.4 inherits: 2 minimatch: ^5.0.1 once: ^1.3.0 - path-is-absolute: ^1.0.0 - checksum: 7ac782f3ef1c08005884447479e68ceb0ad56997eb2003e1e9aefae71bad3cb48eb7c49190d3d6736f2ffcd8af4985d53a46831b3d5e0052cc5756822a38b61a + checksum: 50bcdea19d8e79d8de5f460b1939ffc2b3299eac28deb502093fdca22a78efebc03e66bf54f0abc3d3d07d8134d19a32850288b7440d77e072aa55f9d33b18c5 languageName: node linkType: hard @@ -5195,13 +5218,20 @@ __metadata: languageName: node linkType: hard -"graceful-fs@npm:^4.1.10, graceful-fs@npm:^4.1.11, graceful-fs@npm:^4.1.2, graceful-fs@npm:^4.1.5, graceful-fs@npm:^4.1.6, graceful-fs@npm:^4.2.0, graceful-fs@npm:^4.2.4, graceful-fs@npm:^4.2.6, graceful-fs@npm:^4.2.9": +"graceful-fs@npm:^4.1.10, graceful-fs@npm:^4.1.11, graceful-fs@npm:^4.1.2, graceful-fs@npm:^4.1.5, graceful-fs@npm:^4.1.6, graceful-fs@npm:^4.2.0, graceful-fs@npm:^4.2.4": version: 4.2.9 resolution: "graceful-fs@npm:4.2.9" checksum: 68ea4e07ff2c041ada184f9278b830375f8e0b75154e3f080af6b70f66172fabb4108d19b3863a96b53fc068a310b9b6493d86d1291acc5f3861eb4b79d26ad6 languageName: node linkType: hard +"graceful-fs@npm:^4.2.10, graceful-fs@npm:^4.2.6": + version: 4.2.10 + resolution: "graceful-fs@npm:4.2.10" + checksum: 3f109d70ae123951905d85032ebeae3c2a5a7a997430df00ea30df0e3a6c60cf6689b109654d6fdacd28810a053348c4d14642da1d075049e6be1ba5216218da + languageName: node + linkType: hard + "graphql@npm:^16.2.0": version: 16.2.0 resolution: "graphql@npm:16.2.0" @@ -5581,13 +5611,6 @@ __metadata: languageName: node linkType: hard -"ini@npm:^2.0.0": - version: 2.0.0 - resolution: "ini@npm:2.0.0" - checksum: e7aadc5fb2e4aefc666d74ee2160c073995a4061556b1b5b4241ecb19ad609243b9cceafe91bae49c219519394bbd31512516cb22a3b1ca6e66d869e0447e84e - languageName: node - linkType: hard - "ini@npm:^3.0.0": version: 3.0.0 resolution: "ini@npm:3.0.0" @@ -5595,7 +5618,7 @@ __metadata: languageName: node linkType: hard -"init-package-json@npm:^3.0.1": +"init-package-json@npm:^3.0.2": version: 3.0.2 resolution: "init-package-json@npm:3.0.2" dependencies: @@ -5678,9 +5701,9 @@ __metadata: linkType: hard "ip@npm:^1.1.5": - version: 1.1.5 - resolution: "ip@npm:1.1.5" - checksum: 30133981f082a060a32644f6a7746e9ba7ac9e2bc07ecc8bbdda3ee8ca9bec1190724c390e45a1ee7695e7edfd2a8f7dda2c104ec5f7ac5068c00648504c7e5a + version: 1.1.8 + resolution: "ip@npm:1.1.8" + checksum: a2ade53eb339fb0cbe9e69a44caab10d6e3784662285eb5d2677117ee4facc33a64679051c35e0dfdb1a3983a51ce2f5d2cb36446d52e10d01881789b76e28fb languageName: node linkType: hard @@ -7358,9 +7381,9 @@ __metadata: linkType: hard "lru-cache@npm:^7.4.4, lru-cache@npm:^7.5.1, lru-cache@npm:^7.7.1": - version: 7.9.0 - resolution: "lru-cache@npm:7.9.0" - checksum: c91a293a103d11ea4f07de4122ba4f73d8203d0de51852fb612b1764296aebf623a3e11dddef1b3aefdc8d71af97d52b222dad5459dcb967713bbab9a19fed7d + version: 7.10.1 + resolution: "lru-cache@npm:7.10.1" + checksum: e8b190d71ed0fcd7b29c71a3e9b01f851c92d1ef8865ff06b5581ca991db1e5e006920ed4da8b56da1910664ed51abfd76c46fb55e82ac252ff6c970ff910d72 languageName: node linkType: hard @@ -7399,7 +7422,7 @@ __metadata: languageName: node linkType: hard -"make-fetch-happen@npm:^10.0.3, make-fetch-happen@npm:^10.0.6": +"make-fetch-happen@npm:^10.0.3, make-fetch-happen@npm:^10.0.6, make-fetch-happen@npm:^10.1.3": version: 10.1.3 resolution: "make-fetch-happen@npm:10.1.3" dependencies: @@ -7662,11 +7685,11 @@ __metadata: linkType: hard "minimatch@npm:^5.0.1": - version: 5.0.1 - resolution: "minimatch@npm:5.0.1" + version: 5.1.0 + resolution: "minimatch@npm:5.1.0" dependencies: brace-expansion: ^2.0.1 - checksum: b34b98463da4754bc526b244d680c69d4d6089451ebe512edaf6dd9eeed0279399cfa3edb19233513b8f830bf4bfcad911dddcdf125e75074100d52f724774f0 + checksum: 15ce53d31a06361e8b7a629501b5c75491bc2b59712d53e802b1987121d91b433d73fcc5be92974fde66b2b51d8fb28d75a9ae900d249feb792bb1ba2a4f0a90 languageName: node linkType: hard @@ -8050,12 +8073,12 @@ __metadata: languageName: node linkType: hard -"npm-audit-report@npm:^2.1.5": - version: 2.1.5 - resolution: "npm-audit-report@npm:2.1.5" +"npm-audit-report@npm:^3.0.0": + version: 3.0.0 + resolution: "npm-audit-report@npm:3.0.0" dependencies: chalk: ^4.0.0 - checksum: 9199c4331a29b478b7adbafe1bf463943f65cfd840f62ffe9e6263f0ae64d77725ea102126b3892ef3379a6770a6fe11e1f68ab4cb196c0045db2e1aeafc593d + checksum: 3927972c14e1d9fd21a6ab2d3c2d651e20346ff9a784ea2fcdc2b1e3b3e23994fc0e8961c3c9f4aea857e3a995a556a77f4f0250dbaf6238c481c609ed912a92 languageName: node linkType: hard @@ -8078,15 +8101,6 @@ __metadata: languageName: node linkType: hard -"npm-install-checks@npm:^4.0.0": - version: 4.0.0 - resolution: "npm-install-checks@npm:4.0.0" - dependencies: - semver: ^7.1.1 - checksum: 8308ff48e61e0863d7f148f62543e1f6c832525a7d8002ea742d5e478efa8b29bf65a87f9fb82786e15232e4b3d0362b126c45afdceed4c051c0d3c227dd0ace - languageName: node - linkType: hard - "npm-install-checks@npm:^5.0.0": version: 5.0.0 resolution: "npm-install-checks@npm:5.0.0" @@ -8103,7 +8117,7 @@ __metadata: languageName: node linkType: hard -"npm-package-arg@npm:^9.0.0, npm-package-arg@npm:^9.0.1": +"npm-package-arg@npm:^9.0.0, npm-package-arg@npm:^9.0.1, npm-package-arg@npm:^9.0.2": version: 9.0.2 resolution: "npm-package-arg@npm:9.0.2" dependencies: @@ -8128,7 +8142,7 @@ __metadata: languageName: node linkType: hard -"npm-pick-manifest@npm:^7.0.0": +"npm-pick-manifest@npm:^7.0.0, npm-pick-manifest@npm:^7.0.1": version: 7.0.1 resolution: "npm-pick-manifest@npm:7.0.1" dependencies: @@ -8140,7 +8154,7 @@ __metadata: languageName: node linkType: hard -"npm-profile@npm:^6.0.2": +"npm-profile@npm:^6.0.3": version: 6.0.3 resolution: "npm-profile@npm:6.0.3" dependencies: @@ -8150,7 +8164,7 @@ __metadata: languageName: node linkType: hard -"npm-registry-fetch@npm:^13.0.0, npm-registry-fetch@npm:^13.0.1": +"npm-registry-fetch@npm:^13.0.0, npm-registry-fetch@npm:^13.0.1, npm-registry-fetch@npm:^13.1.1": version: 13.1.1 resolution: "npm-registry-fetch@npm:13.1.1" dependencies: @@ -8199,33 +8213,32 @@ __metadata: languageName: node linkType: hard -"npm@npm:^8.3.0": - version: 8.5.5 - resolution: "npm@npm:8.5.5" +"npm@npm:^8.3.0, npm@npm:^8.8.0": + version: 8.10.0 + resolution: "npm@npm:8.10.0" dependencies: "@isaacs/string-locale-compare": ^1.1.0 - "@npmcli/arborist": ^5.0.3 + "@npmcli/arborist": ^5.0.4 "@npmcli/ci-detect": ^2.0.0 - "@npmcli/config": ^4.0.1 - "@npmcli/map-workspaces": ^2.0.2 - "@npmcli/package-json": ^1.0.1 + "@npmcli/config": ^4.1.0 + "@npmcli/fs": ^2.1.0 + "@npmcli/map-workspaces": ^2.0.3 + "@npmcli/package-json": ^2.0.0 "@npmcli/run-script": ^3.0.1 abbrev: ~1.1.1 - ansicolors: ~0.3.2 - ansistyles: ~0.1.3 archy: ~1.0.0 - cacache: ^16.0.2 + cacache: ^16.0.7 chalk: ^4.1.2 chownr: ^2.0.0 cli-columns: ^4.0.0 - cli-table3: ^0.6.1 + cli-table3: ^0.6.2 columnify: ^1.6.0 fastest-levenshtein: ^1.0.12 - glob: ^7.2.0 - graceful-fs: ^4.2.9 + glob: ^8.0.1 + graceful-fs: ^4.2.10 hosted-git-info: ^5.0.0 - ini: ^2.0.0 - init-package-json: ^3.0.1 + ini: ^3.0.0 + init-package-json: ^3.0.2 is-cidr: ^4.0.2 json-parse-even-better-errors: ^2.3.1 libnpmaccess: ^6.0.2 @@ -8239,7 +8252,7 @@ __metadata: libnpmsearch: ^5.0.2 libnpmteam: ^4.0.2 libnpmversion: ^3.0.1 - make-fetch-happen: ^10.0.6 + make-fetch-happen: ^10.1.3 minipass: ^3.1.6 minipass-pipeline: ^1.2.4 mkdirp: ^1.0.4 @@ -8247,41 +8260,41 @@ __metadata: ms: ^2.1.2 node-gyp: ^9.0.0 nopt: ^5.0.0 - npm-audit-report: ^2.1.5 - npm-install-checks: ^4.0.0 - npm-package-arg: ^9.0.1 - npm-pick-manifest: ^7.0.0 - npm-profile: ^6.0.2 - npm-registry-fetch: ^13.0.1 + npm-audit-report: ^3.0.0 + npm-install-checks: ^5.0.0 + npm-package-arg: ^9.0.2 + npm-pick-manifest: ^7.0.1 + npm-profile: ^6.0.3 + npm-registry-fetch: ^13.1.1 npm-user-validate: ^1.0.1 - npmlog: ^6.0.1 + npmlog: ^6.0.2 opener: ^1.5.2 - pacote: ^13.0.5 - parse-conflict-json: ^2.0.1 - proc-log: ^2.0.0 + pacote: ^13.3.0 + parse-conflict-json: ^2.0.2 + proc-log: ^2.0.1 qrcode-terminal: ^0.12.0 read: ~1.0.7 - read-package-json: ^5.0.0 + read-package-json: ^5.0.1 read-package-json-fast: ^2.0.3 readdir-scoped-modules: ^1.1.0 rimraf: ^3.0.2 - semver: ^7.3.5 - ssri: ^8.0.1 + semver: ^7.3.7 + ssri: ^9.0.0 tar: ^6.1.11 text-table: ~0.2.0 tiny-relative-date: ^1.3.0 - treeverse: ^1.0.4 - validate-npm-package-name: ~3.0.0 + treeverse: ^2.0.0 + validate-npm-package-name: ^4.0.0 which: ^2.0.2 write-file-atomic: ^4.0.1 bin: npm: bin/npm-cli.js npx: bin/npx-cli.js - checksum: f48fbac8c76a0afa709aaeb3ffeb2d6886b88577f6f7f54e91bc0d6169f6ec90f402bbd6ab7e643347970d95d43860f35b0dc0343664222def47cc6042ccf74a + checksum: 7c819c3b72a4ea872f2c7cb4ff520a871d0f77eec81fd25005cb858b4ed4bff5d83bc442d5d7a980e8186849ab72d2de985370102fdbe4053cdc85645aa5e65d languageName: node linkType: hard -"npmlog@npm:^6.0.0, npmlog@npm:^6.0.1, npmlog@npm:^6.0.2": +"npmlog@npm:^6.0.0, npmlog@npm:^6.0.2": version: 6.0.2 resolution: "npmlog@npm:6.0.2" dependencies: @@ -8686,7 +8699,7 @@ __metadata: languageName: node linkType: hard -"pacote@npm:^13.0.3, pacote@npm:^13.0.5": +"pacote@npm:^13.0.3, pacote@npm:^13.0.5, pacote@npm:^13.3.0": version: 13.3.0 resolution: "pacote@npm:13.3.0" dependencies: @@ -8726,7 +8739,7 @@ __metadata: languageName: node linkType: hard -"parse-conflict-json@npm:^2.0.1": +"parse-conflict-json@npm:^2.0.1, parse-conflict-json@npm:^2.0.2": version: 2.0.2 resolution: "parse-conflict-json@npm:2.0.2" dependencies: @@ -8936,6 +8949,7 @@ __metadata: "@semantic-release/npm": ^9.0.1 "@semrel-extra/npm": ^1.2.0 lodash: ^4.17.21 + npm: ^8.8.0 semantic-release: ^19.0.2 languageName: unknown linkType: soft @@ -9011,7 +9025,7 @@ __metadata: languageName: node linkType: hard -"proc-log@npm:^2.0.0": +"proc-log@npm:^2.0.0, proc-log@npm:^2.0.1": version: 2.0.1 resolution: "proc-log@npm:2.0.1" checksum: f6f23564ff759097db37443e6e2765af84979a703d2c52c1b9df506ee9f87caa101ba49d8fdc115c1a313ec78e37e8134704e9069e6a870f3499d98bb24c436f @@ -9224,7 +9238,7 @@ __metadata: languageName: node linkType: hard -"read-package-json@npm:^5.0.0": +"read-package-json@npm:^5.0.0, read-package-json@npm:^5.0.1": version: 5.0.1 resolution: "read-package-json@npm:5.0.1" dependencies: @@ -10133,15 +10147,6 @@ __metadata: languageName: node linkType: hard -"ssri@npm:^8.0.1": - version: 8.0.1 - resolution: "ssri@npm:8.0.1" - dependencies: - minipass: ^3.1.1 - checksum: bc447f5af814fa9713aa201ec2522208ae0f4d8f3bda7a1f445a797c7b929a02720436ff7c478fb5edc4045adb02b1b88d2341b436a80798734e2494f1067b36 - languageName: node - linkType: hard - "ssri@npm:^9.0.0": version: 9.0.0 resolution: "ssri@npm:9.0.0" @@ -10709,13 +10714,6 @@ __metadata: languageName: node linkType: hard -"treeverse@npm:^1.0.4": - version: 1.0.4 - resolution: "treeverse@npm:1.0.4" - checksum: 712640acd811060ff552a3c761f700d18d22a4da544d31b4e290817ac4bbbfcfe33b58f85e7a5787e6ff7351d3a9100670721a289ca14eb87b36ad8a0c20ebd8 - languageName: node - linkType: hard - "treeverse@npm:^2.0.0": version: 2.0.0 resolution: "treeverse@npm:2.0.0" @@ -11170,15 +11168,6 @@ __metadata: languageName: node linkType: hard -"validate-npm-package-name@npm:~3.0.0": - version: 3.0.0 - resolution: "validate-npm-package-name@npm:3.0.0" - dependencies: - builtins: ^1.0.3 - checksum: ce4c68207abfb22c05eedb09ff97adbcedc80304a235a0844f5344f1fd5086aa80e4dbec5684d6094e26e35065277b765c1caef68bcea66b9056761eddb22967 - languageName: node - linkType: hard - "w3c-hr-time@npm:^1.0.2": version: 1.0.2 resolution: "w3c-hr-time@npm:1.0.2"