diff --git a/.gitignore b/.gitignore
index 79ba03f1..1ab00b0d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
.idea
bundle.tar.gz
opa
+.vscode/
+.DS_Store
diff --git a/checks/cloud/aws/accessanalyzer/accessanalyzer.go b/checks/cloud/aws/accessanalyzer/accessanalyzer.go
new file mode 100644
index 00000000..3af3b838
--- /dev/null
+++ b/checks/cloud/aws/accessanalyzer/accessanalyzer.go
@@ -0,0 +1 @@
+package accessanalyzer
diff --git a/checks/cloud/aws/accessanalyzer/enable_access_analyzer.go b/checks/cloud/aws/accessanalyzer/enable_access_analyzer.go
deleted file mode 100755
index 77f5afdf..00000000
--- a/checks/cloud/aws/accessanalyzer/enable_access_analyzer.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package accessanalyzer
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/framework"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-)
-
-var CheckEnableAccessAnalyzer = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0175",
- Provider: providers.AWSProvider,
- Service: "accessanalyzer",
- ShortCode: "enable-access-analyzer",
- Frameworks: map[framework.Framework][]string{
- framework.CIS_AWS_1_4: {"1.20"},
- },
- Summary: "Enable IAM Access analyzer for IAM policies about all resources in each region.",
- Impact: "Reduced visibility of externally shared resources.",
- Resolution: "Enable IAM Access analyzer across all regions.",
- Explanation: `
-AWS IAM Access Analyzer helps you identify the resources in your organization and
-accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity.
-This lets you identify unintended access to your resources and data. Access Analyzer
-identifies resources that are shared with external principals by using logic-based reasoning
-to analyze the resource-based policies in your AWS environment. IAM Access Analyzer
-continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service)
-keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.
-`,
- Links: []string{
- "https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html",
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- var enabled bool
- for _, analyzer := range s.AWS.AccessAnalyzer.Analyzers {
- if analyzer.Active.IsTrue() {
- enabled = true
- break
- }
- }
- if !enabled {
- results.Add(
- "Access Analyzer is not enabled.",
- trivyTypes.NewUnmanagedMetadata(),
- )
- } else {
- results.AddPassed(trivyTypes.NewUnmanagedMetadata())
- }
- return
- },
-)
diff --git a/checks/cloud/aws/accessanalyzer/enable_access_analyzer.rego b/checks/cloud/aws/accessanalyzer/enable_access_analyzer.rego
new file mode 100644
index 00000000..a6851b44
--- /dev/null
+++ b/checks/cloud/aws/accessanalyzer/enable_access_analyzer.rego
@@ -0,0 +1,45 @@
+# METADATA
+# title: Enable IAM Access analyzer for IAM policies about all resources in each region.
+# description: |
+# AWS IAM Access Analyzer helps you identify the resources in your organization and
+# accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity.
+# This lets you identify unintended access to your resources and data. Access Analyzer
+# identifies resources that are shared with external principals by using logic-based reasoning
+# to analyze the resource-based policies in your AWS environment. IAM Access Analyzer
+# continuously monitors all policies for S3 bucket, IAM roles, KMS(Key Management Service)
+# keys, AWS Lambda functions, and Amazon SQS(Simple Queue Service) queues.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html
+# custom:
+# id: AVD-AWS-0175
+# avd_id: AVD-AWS-0175
+# provider: aws
+# service: accessanalyzer
+# severity: LOW
+# short_code: enable-access-analyzer
+# recommended_action: Enable IAM Access analyzer across all regions.
+# frameworks:
+# cis-aws-1.4:
+# - "1.20"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: accessanalyzer
+# provider: aws
+package builtin.aws.accessanalyzer.aws0175
+
+import rego.v1
+
+deny contains res if {
+ not has_active_analyzer
+ res := result.new("Access Analyzer is not enabled.", {})
+}
+
+has_active_analyzer if {
+ some analyzer in input.aws.accessanalyzer.analyzers
+ analyzer.active.value
+}
diff --git a/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.go b/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.go
deleted file mode 100644
index ecfedd49..00000000
--- a/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package accessanalyzer
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/accessanalyzer"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestASCheckNoSecretsInUserData(t *testing.T) {
- tests := []struct {
- name string
- input accessanalyzer.AccessAnalyzer
- expected bool
- }{
- {
- name: "No analyzers enabled",
- input: accessanalyzer.AccessAnalyzer{},
- expected: true,
- },
- {
- name: "Analyzer disabled",
- input: accessanalyzer.AccessAnalyzer{
- Analyzers: []accessanalyzer.Analyzer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ARN: trivyTypes.String("arn:aws:accessanalyzer:us-east-1:123456789012:analyzer/test", trivyTypes.NewTestMetadata()),
- Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()),
- Active: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "Analyzer enabled",
- input: accessanalyzer.AccessAnalyzer{
- Analyzers: []accessanalyzer.Analyzer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ARN: trivyTypes.String("arn:aws:accessanalyzer:us-east-1:123456789012:analyzer/test", trivyTypes.NewTestMetadata()),
- Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()),
- Active: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.AccessAnalyzer = test.input
- results := CheckEnableAccessAnalyzer.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAccessAnalyzer.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.rego b/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.rego
new file mode 100644
index 00000000..1e37b2b2
--- /dev/null
+++ b/checks/cloud/aws/accessanalyzer/enable_access_analyzer_test.rego
@@ -0,0 +1,26 @@
+package builtin.aws.accessanalyzer.aws0175_test
+
+import rego.v1
+
+import data.builtin.aws.accessanalyzer.aws0175 as check
+import data.lib.test
+
+test_disallow_no_analyzers if {
+ r := check.deny with input as {"aws": {"accessanalyzer": {"analyzers": []}}}
+ test.assert_equal_message("Access Analyzer is not enabled.", r)
+}
+
+test_disallow_analyzer_disabled if {
+ r := check.deny with input as {"aws": {"accessanalyzer": {"analyzers": [{"active": {"value": false}}]}}}
+ test.assert_equal_message("Access Analyzer is not enabled.", r)
+}
+
+test_allow_one_of_analyzer_disabled if {
+ r := check.deny with input as {"aws": {"accessanalyzer": {"analyzers": [{"active": {"value": false}}, {"active": {"value": true}}]}}}
+ test.assert_empty(r)
+}
+
+test_allow_analyzer_enabled if {
+ r := check.deny with input as {"aws": {"accessanalyzer": {"analyzers": [{"active": {"value": true}}]}}}
+ test.assert_empty(r)
+}
diff --git a/checks/cloud/aws/apigateway/apigateway.go b/checks/cloud/aws/apigateway/apigateway.go
new file mode 100644
index 00000000..cd97df5a
--- /dev/null
+++ b/checks/cloud/aws/apigateway/apigateway.go
@@ -0,0 +1 @@
+package apigateway
diff --git a/checks/cloud/aws/apigateway/enable_access_logging.go b/checks/cloud/aws/apigateway/enable_access_logging.go
deleted file mode 100755
index ee698b0f..00000000
--- a/checks/cloud/aws/apigateway/enable_access_logging.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package apigateway
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableAccessLogging = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0001",
- Provider: providers.AWSProvider,
- Service: "api-gateway",
- ShortCode: "enable-access-logging",
- Summary: "API Gateway stages for V1 and V2 should have access logging enabled",
- Impact: "Logging provides vital information about access and usage",
- Resolution: "Enable logging for API Gateway stages",
- Explanation: `API Gateway stages should have access log settings block configured to track all access to a particular stage. This should be applied to both v1 and v2 gateway stages.`,
- Links: []string{
- "https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-logging.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableAccessLoggingGoodExamples,
- BadExamples: terraformEnableAccessLoggingBadExamples,
- Links: terraformEnableAccessLoggingLinks,
- RemediationMarkdown: terraformEnableAccessLoggingRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableAccessLoggingGoodExamples,
- BadExamples: cloudFormationEnableAccessLoggingBadExamples,
- Links: cloudFormationEnableAccessLoggingLinks,
- RemediationMarkdown: cloudFormationEnableAccessLoggingRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, api := range s.AWS.APIGateway.V1.APIs {
- for _, stage := range api.Stages {
- if stage.Metadata.IsUnmanaged() {
- continue
- }
- if stage.AccessLogging.CloudwatchLogGroupARN.IsEmpty() {
- results.Add(
- "Access logging is not configured.",
- stage.AccessLogging.CloudwatchLogGroupARN,
- )
- } else {
- results.AddPassed(&api)
- }
- }
- }
- for _, api := range s.AWS.APIGateway.V2.APIs {
- for _, stage := range api.Stages {
- if stage.Metadata.IsUnmanaged() {
- continue
- }
- if stage.AccessLogging.CloudwatchLogGroupARN.IsEmpty() {
- results.Add(
- "Access logging is not configured.",
- stage.AccessLogging.CloudwatchLogGroupARN,
- )
- } else {
- results.AddPassed(&api)
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/apigateway/enable_access_logging.rego b/checks/cloud/aws/apigateway/enable_access_logging.rego
new file mode 100644
index 00000000..25d10e62
--- /dev/null
+++ b/checks/cloud/aws/apigateway/enable_access_logging.rego
@@ -0,0 +1,49 @@
+# METADATA
+# title: API Gateway stages for V1 and V2 should have access logging enabled
+# description: |
+# API Gateway stages should have access log settings block configured to track all access to a particular stage. This should be applied to both v1 and v2 gateway stages.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-logging.html
+# custom:
+# id: AVD-AWS-0001
+# avd_id: AVD-AWS-0001
+# provider: aws
+# service: api-gateway
+# severity: MEDIUM
+# short_code: enable-access-logging
+# recommended_action: Enable logging for API Gateway stages
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: api-gateway
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/apigatewayv2_stage#access_log_settings
+# good_examples: checks/cloud/aws/apigateway/enable_access_logging.tf.go
+# bad_examples: checks/cloud/aws/apigateway/enable_access_logging.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/apigateway/enable_access_logging.cf.go
+# bad_examples: checks/cloud/aws/apigateway/enable_access_logging.cf.go
+package builtin.aws.apigateway.aws0001
+
+import rego.v1
+
+deny contains res if {
+ some stage in apis[_].stages
+ stage.__defsec_metadata.managed
+
+ arn := stage.accesslogging.cloudwatchloggrouparn
+ arn.value == "" # TODO: check if unresolvable?
+
+ res := result.new("Access logging is not configured.", arn)
+}
+
+# TODO: use map?
+apis contains input.aws.apigateway.v1.apis[_]
+
+apis contains input.aws.apigateway.v2.apis[_]
diff --git a/checks/cloud/aws/apigateway/enable_access_logging_test.go b/checks/cloud/aws/apigateway/enable_access_logging_test.go
deleted file mode 100644
index 390805d7..00000000
--- a/checks/cloud/aws/apigateway/enable_access_logging_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package apigateway
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- v1 "github.com/aquasecurity/trivy/pkg/iac/providers/aws/apigateway/v1"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableAccessLogging(t *testing.T) {
- tests := []struct {
- name string
- input v1.APIGateway
- expected bool
- }{
- {
- name: "API Gateway stage with no log group ARN",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- AccessLogging: v1.AccessLogging{
- Metadata: trivyTypes.NewTestMetadata(),
- CloudwatchLogGroupARN: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "API Gateway stage with log group ARN",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- AccessLogging: v1.AccessLogging{
- Metadata: trivyTypes.NewTestMetadata(),
- CloudwatchLogGroupARN: trivyTypes.String("log-group-arn", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.APIGateway.V1 = test.input
- results := CheckEnableAccessLogging.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAccessLogging.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/apigateway/enable_access_logging_test.rego b/checks/cloud/aws/apigateway/enable_access_logging_test.rego
new file mode 100644
index 00000000..6a65f15f
--- /dev/null
+++ b/checks/cloud/aws/apigateway/enable_access_logging_test.rego
@@ -0,0 +1,17 @@
+package builtin.aws.apigateway.aws0001_test
+
+import rego.v1
+
+import data.builtin.aws.apigateway.aws0001 as check
+import data.lib.test
+
+test_disallow_api_gateway_without_log_group_arn if {
+ r := check.deny with input as {"aws": {"apigateway": {"v1": {"apis": [{"stages": [{"__defsec_metadata": {"managed": true}, "accesslogging": {"cloudwatchloggrouparn": {"value": ""}}}]}]}}}}
+ test.assert_equal_message("Access logging is not configured.", r)
+}
+
+test_allow_api_gateway_with_log_group_arn if {
+ test.assert_empty(check.deny) with input as {"aws": {"apigateway": {"v1": {"apis": [{"stages": [{"__defsec_metadata": {"managed": true}, "accesslogging": {"cloudwatchloggrouparn": {"value": "log-group-arn"}}}]}]}}}}
+}
+
+# TODO add test for v2
diff --git a/checks/cloud/aws/apigateway/enable_cache.go b/checks/cloud/aws/apigateway/enable_cache.go
deleted file mode 100644
index cf7a0052..00000000
--- a/checks/cloud/aws/apigateway/enable_cache.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package apigateway
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableCache = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0190",
- Provider: providers.AWSProvider,
- Service: "api-gateway",
- ShortCode: "enable-cache",
- Summary: "Ensure that response caching is enabled for your Amazon API Gateway REST APIs.",
- Impact: "Reduce the number of calls made to your API endpoint and also improve the latency of requests to your API with response caching.",
- Resolution: "Enable cache",
- Explanation: "A REST API in API Gateway is a collection of resources and methods that are integrated with backend HTTP endpoints, Lambda functions, or other AWS services. You can enable API caching in Amazon API Gateway to cache your endpoint responses. With caching, you can reduce the number of calls made to your endpoint and also improve the latency of requests to your API.",
- Links: []string{"https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-caching.html"},
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableCacheGoodExamples,
- BadExamples: terraformEnableCacheBadExamples,
- Links: terraformEnableCacheLinks,
- RemediationMarkdown: terraformEnableCacheRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, api := range s.AWS.APIGateway.V1.APIs {
- if api.Metadata.IsUnmanaged() {
- continue
- }
- for _, stage := range api.Stages {
- if stage.Metadata.IsUnmanaged() {
- continue
- }
- for _, settings := range stage.RESTMethodSettings {
- if settings.Metadata.IsUnmanaged() {
- continue
- }
- if settings.CacheEnabled.IsFalse() {
- results.Add(
- "Cache data is not enabled.",
- settings.CacheEnabled,
- )
- } else {
- results.AddPassed(&settings)
- }
-
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/apigateway/enable_cache.rego b/checks/cloud/aws/apigateway/enable_cache.rego
new file mode 100644
index 00000000..32105db4
--- /dev/null
+++ b/checks/cloud/aws/apigateway/enable_cache.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: Ensure that response caching is enabled for your Amazon API Gateway REST APIs.
+# description: |
+# A REST API in API Gateway is a collection of resources and methods that are integrated with backend HTTP endpoints, Lambda functions, or other AWS services. You can enable API caching in Amazon API Gateway to cache your endpoint responses. With caching, you can reduce the number of calls made to your endpoint and also improve the latency of requests to your API.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-caching.html
+# custom:
+# id: AVD-AWS-0190
+# avd_id: AVD-AWS-0190
+# provider: aws
+# service: api-gateway
+# severity: LOW
+# short_code: enable-cache
+# recommended_action: Enable cache
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: api-gateway
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_method_settings#cache_enabled
+# good_examples: checks/cloud/aws/apigateway/enable_cache.tf.go
+# bad_examples: checks/cloud/aws/apigateway/enable_cache.tf.go
+package builtin.aws.apigateway.aws0190
+
+import rego.v1
+
+deny contains res if {
+ some api in input.aws.apigateway.v1.apis
+ api.__defsec_metadata.managed
+ some stage in api.stages
+ stage.__defsec_metadata.managed
+ some settings in stage.restmethodsettings
+ settings.__defsec_metadata.managed
+ not settings.cacheenabled.value
+ res := result.new("Cache data is not enabled.", settings.cacheenabled)
+}
diff --git a/checks/cloud/aws/apigateway/enable_cache_encryption.go b/checks/cloud/aws/apigateway/enable_cache_encryption.go
deleted file mode 100755
index 95942361..00000000
--- a/checks/cloud/aws/apigateway/enable_cache_encryption.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package apigateway
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableCacheEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0002",
- Provider: providers.AWSProvider,
- Service: "api-gateway",
- ShortCode: "enable-cache-encryption",
- Summary: "API Gateway must have cache enabled",
- Impact: "Data stored in the cache that is unencrypted may be vulnerable to compromise",
- Resolution: "Enable cache encryption",
- Explanation: `Method cache encryption ensures that any sensitive data in the cache is not vulnerable to compromise in the event of interception`,
- Links: []string{},
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableCacheEncryptionGoodExamples,
- BadExamples: terraformEnableCacheEncryptionBadExamples,
- Links: terraformEnableCacheEncryptionLinks,
- RemediationMarkdown: terraformEnableCacheEncryptionRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, api := range s.AWS.APIGateway.V1.APIs {
- if api.Metadata.IsUnmanaged() {
- continue
- }
- for _, stage := range api.Stages {
- if stage.Metadata.IsUnmanaged() {
- continue
- }
- for _, settings := range stage.RESTMethodSettings {
- if settings.Metadata.IsUnmanaged() {
- continue
- }
- if settings.CacheEnabled.IsFalse() {
- continue
- }
- if settings.CacheDataEncrypted.IsFalse() {
- results.Add(
- "Cache data is not encrypted.",
- settings.CacheDataEncrypted,
- )
- } else {
- results.AddPassed(&settings)
- }
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/apigateway/enable_cache_encryption.rego b/checks/cloud/aws/apigateway/enable_cache_encryption.rego
new file mode 100644
index 00000000..83d259fd
--- /dev/null
+++ b/checks/cloud/aws/apigateway/enable_cache_encryption.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: API Gateway must have cache enabled
+# description: |
+# Method cache encryption ensures that any sensitive data in the cache is not vulnerable to compromise in the event of interception
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# custom:
+# id: AVD-AWS-0002
+# avd_id: AVD-AWS-0002
+# provider: aws
+# service: api-gateway
+# severity: MEDIUM
+# short_code: enable-cache-encryption
+# recommended_action: Enable cache encryption
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: api-gateway
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_method_settings#cache_data_encrypted
+# good_examples: checks/cloud/aws/apigateway/enable_cache_encryption.tf.go
+# bad_examples: checks/cloud/aws/apigateway/enable_cache_encryption.tf.go
+package builtin.aws.apigateway.aws0002
+
+import rego.v1
+
+deny contains res if {
+ some api in input.aws.apigateway.v1.apis
+ api.__defsec_metadata.managed
+ some stage in api.stages
+ stage.__defsec_metadata.managed
+ some settings in stage.restmethodsettings
+ settings.__defsec_metadata.managed
+ settings.cacheenabled.value
+ not settings.cachedataencrypted.value
+ res := result.new("Cache data is not encrypted.", settings.cachedataencrypted)
+}
diff --git a/checks/cloud/aws/apigateway/enable_cache_encryption_test.go b/checks/cloud/aws/apigateway/enable_cache_encryption_test.go
deleted file mode 100644
index 70fbec68..00000000
--- a/checks/cloud/aws/apigateway/enable_cache_encryption_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package apigateway
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- v1 "github.com/aquasecurity/trivy/pkg/iac/providers/aws/apigateway/v1"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableCacheEncryption(t *testing.T) {
- tests := []struct {
- name string
- input v1.APIGateway
- expected bool
- }{
- {
- name: "API Gateway stage with unencrypted cache",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- RESTMethodSettings: []v1.RESTMethodSettings{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- CacheDataEncrypted: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- CacheEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "API Gateway stage with encrypted cache",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- RESTMethodSettings: []v1.RESTMethodSettings{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- CacheDataEncrypted: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- CacheEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "API Gateway stage with caching disabled",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- RESTMethodSettings: []v1.RESTMethodSettings{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- CacheDataEncrypted: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- CacheEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.APIGateway.V1 = test.input
- results := CheckEnableCacheEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableCacheEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/apigateway/enable_cache_encryption_test.rego b/checks/cloud/aws/apigateway/enable_cache_encryption_test.rego
new file mode 100644
index 00000000..f59af1b3
--- /dev/null
+++ b/checks/cloud/aws/apigateway/enable_cache_encryption_test.rego
@@ -0,0 +1,28 @@
+package builtin.aws.apigateway.aws0002_test
+
+import rego.v1
+
+import data.builtin.aws.apigateway.aws0002 as check
+import data.lib.test
+
+test_allow_api_gateway_with_cache_encryption if {
+ test.assert_empty(check.deny) with input as build_input(true)
+}
+
+test_disallow_api_gateway_without_cache_encryption if {
+ r := check.deny with input as build_input(false)
+
+ test.assert_equal_message("Cache data is not encrypted.", r)
+}
+
+build_input(cachedataencrypted) := {"aws": {"apigateway": {"v1": {"apis": [{
+ "__defsec_metadata": {"managed": true},
+ "stages": [{
+ "__defsec_metadata": {"managed": true},
+ "restmethodsettings": [{
+ "__defsec_metadata": {"managed": true},
+ "cacheenabled": {"value": true},
+ "cachedataencrypted": {"value": cachedataencrypted},
+ }],
+ }],
+}]}}}}
diff --git a/checks/cloud/aws/apigateway/enable_cache_test.go b/checks/cloud/aws/apigateway/enable_cache_test.go
deleted file mode 100644
index d3083807..00000000
--- a/checks/cloud/aws/apigateway/enable_cache_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package apigateway
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- v1 "github.com/aquasecurity/trivy/pkg/iac/providers/aws/apigateway/v1"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableCache(t *testing.T) {
- tests := []struct {
- name string
- input v1.APIGateway
- expected bool
- }{
- {
- name: "API Gateway stage with caching disabled",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- RESTMethodSettings: []v1.RESTMethodSettings{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- CacheEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: true,
- },
-
- {
- name: "API Gateway stage with caching enabled",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- RESTMethodSettings: []v1.RESTMethodSettings{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- CacheEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.APIGateway.V1 = test.input
- results := CheckEnableCache.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableCache.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/apigateway/enable_cache_test.rego b/checks/cloud/aws/apigateway/enable_cache_test.rego
new file mode 100644
index 00000000..babd7cb7
--- /dev/null
+++ b/checks/cloud/aws/apigateway/enable_cache_test.rego
@@ -0,0 +1,17 @@
+package builtin.aws.apigateway.aws0190_test
+
+import rego.v1
+
+import data.builtin.aws.apigateway.aws0190 as check
+import data.lib.test
+
+test_allow_cache_enabled if {
+ test.assert_empty(check.deny) with input as build_input(true)
+}
+
+test_disallow_cache_disabled if {
+ r := check.deny with input as build_input(false)
+ test.assert_equal_message("Cache data is not enabled.", r)
+}
+
+build_input(cacheenabled) := {"aws": {"apigateway": {"v1": {"apis": [{"__defsec_metadata": {"managed": true}, "stages": [{"__defsec_metadata": {"managed": true}, "restmethodsettings": [{"__defsec_metadata": {"managed": true}, "cacheenabled": {"value": cacheenabled}}]}]}]}}}}
diff --git a/checks/cloud/aws/apigateway/enable_tracing.go b/checks/cloud/aws/apigateway/enable_tracing.go
deleted file mode 100755
index 9d3b30b3..00000000
--- a/checks/cloud/aws/apigateway/enable_tracing.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package apigateway
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableTracing = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0003",
- Provider: providers.AWSProvider,
- Service: "api-gateway",
- ShortCode: "enable-tracing",
- Summary: "API Gateway must have X-Ray tracing enabled",
- Impact: "Without full tracing enabled it is difficult to trace the flow of logs",
- Resolution: "Enable tracing",
- Explanation: `X-Ray tracing enables end-to-end debugging and analysis of all API Gateway HTTP requests.`,
- Links: []string{},
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableTracingGoodExamples,
- BadExamples: terraformEnableTracingBadExamples,
- Links: terraformEnableTracingLinks,
- RemediationMarkdown: terraformEnableTracingRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, api := range s.AWS.APIGateway.V1.APIs {
- if api.Metadata.IsUnmanaged() {
- continue
- }
- for _, stage := range api.Stages {
- if stage.Metadata.IsUnmanaged() {
- continue
- }
- if stage.XRayTracingEnabled.IsFalse() {
- results.Add(
- "X-Ray tracing is not enabled,",
- stage.XRayTracingEnabled,
- )
- } else {
- results.AddPassed(&stage)
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/apigateway/enable_tracing.rego b/checks/cloud/aws/apigateway/enable_tracing.rego
new file mode 100644
index 00000000..90ed49c6
--- /dev/null
+++ b/checks/cloud/aws/apigateway/enable_tracing.rego
@@ -0,0 +1,38 @@
+# METADATA
+# title: API Gateway must have X-Ray tracing enabled
+# description: |
+# X-Ray tracing enables end-to-end debugging and analysis of all API Gateway HTTP requests.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# custom:
+# id: AVD-AWS-0003
+# avd_id: AVD-AWS-0003
+# provider: aws
+# service: api-gateway
+# severity: LOW
+# short_code: enable-tracing
+# recommended_action: Enable tracing
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: api-gateway
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_stage#xray_tracing_enabled
+# good_examples: checks/cloud/aws/apigateway/enable_tracing.tf.go
+# bad_examples: checks/cloud/aws/apigateway/enable_tracing.tf.go
+package builtin.aws.apigateway.aws0003
+
+import rego.v1
+
+deny contains res if {
+ some api in input.aws.apigateway.v1.apis
+ api.__defsec_metadata.managed
+ some stage in api.stages
+ stage.__defsec_metadata.managed
+ not stage.xraytracingenabled.value
+ res := result.new("X-Ray tracing is not enabled.", stage.xraytracingenabled)
+}
diff --git a/checks/cloud/aws/apigateway/enable_tracing_test.go b/checks/cloud/aws/apigateway/enable_tracing_test.go
deleted file mode 100644
index 1a82b01f..00000000
--- a/checks/cloud/aws/apigateway/enable_tracing_test.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package apigateway
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- v1 "github.com/aquasecurity/trivy/pkg/iac/providers/aws/apigateway/v1"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableTracing(t *testing.T) {
- tests := []struct {
- name string
- input v1.APIGateway
- expected bool
- }{
- {
- name: "API Gateway stage with X-Ray tracing disabled",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- XRayTracingEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "API Gateway stage with X-Ray tracing enabled",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Stages: []v1.Stage{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- XRayTracingEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.APIGateway.V1 = test.input
- results := CheckEnableTracing.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableTracing.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/apigateway/enable_tracing_test.rego b/checks/cloud/aws/apigateway/enable_tracing_test.rego
new file mode 100644
index 00000000..a0ee1ec7
--- /dev/null
+++ b/checks/cloud/aws/apigateway/enable_tracing_test.rego
@@ -0,0 +1,23 @@
+package builtin.aws.apigateway.aws0003_test
+
+import rego.v1
+
+import data.builtin.aws.apigateway.aws0003 as check
+import data.lib.test
+
+test_allow_tracing_enabled if {
+ test.assert_empty(check.deny) with input as build_input(true)
+}
+
+test_disallow_tracing_disabled if {
+ r := check.deny with input as build_input(false)
+ test.assert_equal_message("X-Ray tracing is not enabled.", r)
+}
+
+build_input(xraytracingenabled) := {"aws": {"apigateway": {"v1": {"apis": [{
+ "__defsec_metadata": {"managed": true},
+ "stages": [{
+ "__defsec_metadata": {"managed": true},
+ "xraytracingenabled": xraytracingenabled,
+ }],
+}]}}}}
diff --git a/checks/cloud/aws/apigateway/no_public_access.go b/checks/cloud/aws/apigateway/no_public_access.go
deleted file mode 100755
index 4a51871b..00000000
--- a/checks/cloud/aws/apigateway/no_public_access.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package apigateway
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- v1 "github.com/aquasecurity/trivy/pkg/iac/providers/aws/apigateway/v1"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckNoPublicAccess = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0004",
- Provider: providers.AWSProvider,
- Service: "api-gateway",
- ShortCode: "no-public-access",
- Summary: "No unauthorized access to API Gateway methods",
- Impact: "API gateway methods can be accessed without authorization.",
- Resolution: "Use and authorization method or require API Key",
- Explanation: `API Gateway methods should generally be protected by authorization or api key. OPTION verb calls can be used without authorization`,
- Links: []string{},
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformNoPublicAccessGoodExamples,
- BadExamples: terraformNoPublicAccessBadExamples,
- Links: terraformNoPublicAccessLinks,
- RemediationMarkdown: terraformNoPublicAccessRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, api := range s.AWS.APIGateway.V1.APIs {
- if api.Metadata.IsUnmanaged() {
- continue
- }
- for _, resource := range api.Resources {
- for _, method := range resource.Methods {
- if method.HTTPMethod.EqualTo("OPTION") {
- continue
- }
- if method.APIKeyRequired.IsTrue() {
- continue
- }
- if method.AuthorizationType.EqualTo(v1.AuthorizationNone) {
- results.Add(
- "Authorization is not enabled for this method.",
- method.AuthorizationType,
- )
- } else {
- results.AddPassed(&method)
- }
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/apigateway/no_public_access.rego b/checks/cloud/aws/apigateway/no_public_access.rego
new file mode 100644
index 00000000..428d246c
--- /dev/null
+++ b/checks/cloud/aws/apigateway/no_public_access.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: No unauthorized access to API Gateway methods
+# description: |
+# API Gateway methods should generally be protected by authorization or api key. OPTION verb calls can be used without authorization
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# custom:
+# id: AVD-AWS-0004
+# avd_id: AVD-AWS-0004
+# provider: aws
+# service: api-gateway
+# severity: LOW
+# short_code: no-public-access
+# recommended_action: Use and authorization method or require API Key
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: api-gateway
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_method#authorization
+# good_examples: checks/cloud/aws/apigateway/no_public_access.tf.go
+# bad_examples: checks/cloud/aws/apigateway/no_public_access.tf.go
+package builtin.aws.apigateway.aws0004
+
+import rego.v1
+
+authorization_none := "NONE"
+
+deny contains res if {
+ some api in input.aws.apigateway.v1.apis
+ api.__defsec_metadata.managed
+ some method in api.resources[_].methods
+ method.httpmethod.value != "OPTION"
+ not method.apikeyrequired.value
+ method.authorizationtype.value == authorization_none
+
+ res := result.new("Authorization is not enabled for this method.", method.authorizationtype)
+}
diff --git a/checks/cloud/aws/apigateway/no_public_access_test.go b/checks/cloud/aws/apigateway/no_public_access_test.go
deleted file mode 100644
index 043b5345..00000000
--- a/checks/cloud/aws/apigateway/no_public_access_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package apigateway
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- v1 "github.com/aquasecurity/trivy/pkg/iac/providers/aws/apigateway/v1"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckNoPublicAccess(t *testing.T) {
- tests := []struct {
- name string
- input v1.APIGateway
- expected bool
- }{
- {
- name: "API GET method without authorization",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Resources: []v1.Resource{
- {
- Methods: []v1.Method{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- HTTPMethod: trivyTypes.String("GET", trivyTypes.NewTestMetadata()),
- APIKeyRequired: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- AuthorizationType: trivyTypes.String(v1.AuthorizationNone, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "API OPTION method without authorization",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Resources: []v1.Resource{
- {
- Methods: []v1.Method{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- HTTPMethod: trivyTypes.String("OPTION", trivyTypes.NewTestMetadata()),
- APIKeyRequired: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- AuthorizationType: trivyTypes.String(v1.AuthorizationNone, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "API GET method with IAM authorization",
- input: v1.APIGateway{
- APIs: []v1.API{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Resources: []v1.Resource{
- {
- Methods: []v1.Method{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- HTTPMethod: trivyTypes.String("GET", trivyTypes.NewTestMetadata()),
- APIKeyRequired: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- AuthorizationType: trivyTypes.String(v1.AuthorizationIAM, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.APIGateway.V1 = test.input
- results := CheckNoPublicAccess.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckNoPublicAccess.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/apigateway/no_public_access_test.rego b/checks/cloud/aws/apigateway/no_public_access_test.rego
new file mode 100644
index 00000000..cd6c0bc5
--- /dev/null
+++ b/checks/cloud/aws/apigateway/no_public_access_test.rego
@@ -0,0 +1,26 @@
+package builtin.aws.apigateway.aws0004_test
+
+import rego.v1
+
+import data.builtin.aws.apigateway.aws0004 as check
+import data.lib.test
+
+test_disallow_get_method_without_auth if {
+ r := check.deny with input as input_with_method({"httpmethod": {"value": "GET"}, "authorizationtype": {"value": "NONE"}})
+
+ test.assert_equal_message("Authorization is not enabled for this method.", r)
+}
+
+test_allow_option_method if {
+ test.assert_empty(check.deny) with input as input_with_method({"httpmethod": {"value": "OPTION"}})
+}
+
+test_allow_get_method_with_auth if {
+ test.assert_empty(check.deny) with input as input_with_method({"methods": [{"httpmethod": {"value": "GET"}, "authorizationtype": {"value": "AWS_IAM"}}]})
+}
+
+test_allow_if_api_required if {
+ test.assert_empty(check.deny) with input as input_with_method({"httpmethod": {"value": "GET"}, "authorizationtype": {"value": "AWS_IAM"}})
+}
+
+input_with_method(method) = {"aws": {"apigateway": {"v1": {"apis": [{"__defsec_metadata": {"managed": true}, "resources": [{"methods": [method]}]}]}}}}
diff --git a/checks/cloud/aws/apigateway/use_secure_tls_policy.go b/checks/cloud/aws/apigateway/use_secure_tls_policy.go
deleted file mode 100755
index 000e607a..00000000
--- a/checks/cloud/aws/apigateway/use_secure_tls_policy.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package apigateway
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckUseSecureTlsPolicy = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0005",
- Provider: providers.AWSProvider,
- Service: "api-gateway",
- ShortCode: "use-secure-tls-policy",
- Summary: "API Gateway domain name uses outdated SSL/TLS protocols.",
- Impact: "Outdated SSL policies increase exposure to known vulnerabilities",
- Resolution: "Use the most modern TLS/SSL policies available",
- Explanation: `You should not use outdated/insecure TLS versions for encryption. You should be using TLS v1.2+.`,
- Links: []string{
- "https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-custom-domain-tls-version.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformUseSecureTlsPolicyGoodExamples,
- BadExamples: terraformUseSecureTlsPolicyBadExamples,
- Links: terraformUseSecureTlsPolicyLinks,
- RemediationMarkdown: terraformUseSecureTlsPolicyRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, domain := range s.AWS.APIGateway.V1.DomainNames {
- if domain.SecurityPolicy.NotEqualTo("TLS_1_2") {
- results.Add(
- "Domain name is configured with an outdated TLS policy.",
- domain.SecurityPolicy,
- )
- } else {
- results.AddPassed(&domain)
- }
- }
- for _, domain := range s.AWS.APIGateway.V2.DomainNames {
- if domain.SecurityPolicy.NotEqualTo("TLS_1_2") {
- results.Add(
- "Domain name is configured with an outdated TLS policy.",
- domain.SecurityPolicy,
- )
- } else {
- results.AddPassed(&domain)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/apigateway/use_secure_tls_policy.rego b/checks/cloud/aws/apigateway/use_secure_tls_policy.rego
new file mode 100644
index 00000000..4f846a69
--- /dev/null
+++ b/checks/cloud/aws/apigateway/use_secure_tls_policy.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: API Gateway domain name uses outdated SSL/TLS protocols.
+# description: |
+# You should not use outdated/insecure TLS versions for encryption. You should be using TLS v1.2+.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-custom-domain-tls-version.html
+# custom:
+# id: AVD-AWS-0005
+# avd_id: AVD-AWS-0005
+# provider: aws
+# service: api-gateway
+# severity: HIGH
+# short_code: use-secure-tls-policy
+# recommended_action: Use the most modern TLS/SSL policies available
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: api-gateway
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_domain_name#security_policy
+# good_examples: checks/cloud/aws/apigateway/use_secure_tls_policy.tf.go
+# bad_examples: checks/cloud/aws/apigateway/use_secure_tls_policy.tf.go
+package builtin.aws.apigateway.aws0005
+
+import rego.v1
+
+deny contains res if {
+ some domain in domainnames
+ domain.securitypolicy.value != "TLS_1_2"
+ res := result.new("Domain name is configured with an outdated TLS policy.", domain.securitypolicy)
+}
+
+# TODO: use map?
+domainnames contains input.aws.apigateway.v1.domainnames[_]
+
+domainnames contains input.aws.apigateway.v2.domainnames[_]
diff --git a/checks/cloud/aws/apigateway/use_secure_tls_policy_test.go b/checks/cloud/aws/apigateway/use_secure_tls_policy_test.go
deleted file mode 100644
index ec072a42..00000000
--- a/checks/cloud/aws/apigateway/use_secure_tls_policy_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package apigateway
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- v1 "github.com/aquasecurity/trivy/pkg/iac/providers/aws/apigateway/v1"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckUseSecureTlsPolicy(t *testing.T) {
- tests := []struct {
- name string
- input v1.APIGateway
- expected bool
- }{
- {
- name: "API Gateway domain name with TLS version 1.0",
- input: v1.APIGateway{
- DomainNames: []v1.DomainName{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- SecurityPolicy: trivyTypes.String("TLS_1_0", trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "API Gateway domain name with TLS version 1.2",
- input: v1.APIGateway{
- DomainNames: []v1.DomainName{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- SecurityPolicy: trivyTypes.String("TLS_1_2", trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.APIGateway.V1 = test.input
- results := CheckUseSecureTlsPolicy.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckUseSecureTlsPolicy.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/apigateway/use_secure_tls_policy_test.rego b/checks/cloud/aws/apigateway/use_secure_tls_policy_test.rego
new file mode 100644
index 00000000..d0b2b118
--- /dev/null
+++ b/checks/cloud/aws/apigateway/use_secure_tls_policy_test.rego
@@ -0,0 +1,20 @@
+package builtin.aws.apigateway.aws0005_test
+
+import rego.v1
+
+import data.builtin.aws.apigateway.aws0005 as check
+import data.lib.test
+
+test_allow_with_tls_1_2 if {
+ test.assert_empty(check.deny) with input as {"aws": {"apigateway": {"v1": {"domainnames": [{"securitypolicy": {"value": "TLS_1_2"}}]}}}}
+}
+
+test_disallow_with_tls_1_0 if {
+ r := check.deny with input as {"aws": {"apigateway": {"v1": {"domainnames": [{"securitypolicy": {"value": "TLS_1_0"}}]}}}}
+ test.assert_equal_message("Domain name is configured with an outdated TLS policy.", r)
+}
+
+test_dissalow_api_v2 if {
+ r := check.deny with input as {"aws": {"apigateway": {"v2": {"domainnames": [{"securitypolicy": {"value": "TLS_1_0"}}]}, "v1": {"domainnames": [{"securitypolicy": {"value": "TLS_1_11"}}]}}}}
+ test.assert_equal_message("Domain name is configured with an outdated TLS policy.", r)
+}
diff --git a/checks/cloud/aws/athena/athena.go b/checks/cloud/aws/athena/athena.go
new file mode 100644
index 00000000..b1603249
--- /dev/null
+++ b/checks/cloud/aws/athena/athena.go
@@ -0,0 +1 @@
+package athena
diff --git a/checks/cloud/aws/athena/enable_at_rest_encryption.rego b/checks/cloud/aws/athena/enable_at_rest_encryption.rego
new file mode 100644
index 00000000..5ed71972
--- /dev/null
+++ b/checks/cloud/aws/athena/enable_at_rest_encryption.rego
@@ -0,0 +1,53 @@
+# METADATA
+# title: Athena databases and workgroup configurations are created unencrypted at rest by default, they should be encrypted
+# description: |
+# Athena databases and workspace result sets should be encrypted at rests. These databases and query sets are generally derived from data in S3 buckets and should have the same level of at rest protection.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/athena/latest/ug/encryption.html
+# custom:
+# id: AVD-AWS-0006
+# avd_id: AVD-AWS-0006
+# provider: aws
+# service: athena
+# severity: HIGH
+# short_code: enable-at-rest-encryption
+# recommended_action: Enable encryption at rest for Athena databases and workgroup configurations
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: athena
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_workgroup#encryption_configuration
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_database#encryption_configuration
+# good_examples: checks/cloud/aws/athena/enable_at_rest_encryption.tf.go
+# bad_examples: checks/cloud/aws/athena/enable_at_rest_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/athena/enable_at_rest_encryption.cf.go
+# bad_examples: checks/cloud/aws/athena/enable_at_rest_encryption.cf.go
+package builtin.aws.athena.aws0006
+
+import rego.v1
+
+encryption_type_none := ""
+
+deny contains res if {
+ some workgroup in input.aws.athena.workgroups
+ is_encryption_type_none(workgroup.encryption)
+ res := result.new("Workgroup does not have encryption configured.", workgroup)
+}
+
+deny contains res if {
+ some database in input.aws.athena.databases
+ is_encryption_type_none(database.encryption)
+ res := result.new("Database does not have encryption configured.", database)
+}
+
+is_encryption_type_none(encryption) if {
+ encryption.type.value == encryption_type_none
+}
diff --git a/checks/cloud/aws/athena/enable_at_rest_encryption_test.rego b/checks/cloud/aws/athena/enable_at_rest_encryption_test.rego
new file mode 100644
index 00000000..4272ac39
--- /dev/null
+++ b/checks/cloud/aws/athena/enable_at_rest_encryption_test.rego
@@ -0,0 +1,26 @@
+package builtin.aws.athena.aws0006_test
+
+import rego.v1
+
+import data.builtin.aws.athena.aws0006 as check
+import data.lib.test
+
+test_disallow_database_unencrypted if {
+ inp := {"aws": {"athena": {"databases": [{"encryption": {"type": {"value": ""}}}]}}}
+ test.assert_equal_message("Database does not have encryption configured.", check.deny) with input as inp
+}
+
+test_disallow_workgroup_unencrypted if {
+ inp := {"aws": {"athena": {"workgroups": [{"encryption": {"type": {"value": ""}}}]}}}
+ test.assert_equal_message("Workgroup does not have encryption configured.", check.deny) with input as inp
+}
+
+test_allow_database_encrypted if {
+ inp := {"aws": {"athena": {"databases": [{"encryption": {"type": {"value": "SSE_S3"}}}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_workgroup_encrypted if {
+ inp := {"aws": {"athena": {"workgroups": [{"encryption": {"type": {"value": "SSE_S3"}}}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/athena/no_encryption_override.rego b/checks/cloud/aws/athena/no_encryption_override.rego
new file mode 100644
index 00000000..df45ddcd
--- /dev/null
+++ b/checks/cloud/aws/athena/no_encryption_override.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: Athena workgroups should enforce configuration to prevent client disabling encryption
+# description: |
+# Athena workgroup configuration should be enforced to prevent client side changes to disable encryption settings.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/athena/latest/ug/manage-queries-control-costs-with-workgroups.html
+# custom:
+# id: AVD-AWS-0007
+# avd_id: AVD-AWS-0007
+# provider: aws
+# service: athena
+# severity: HIGH
+# short_code: no-encryption-override
+# recommended_action: Enforce the configuration to prevent client overrides
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: athena
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/athena_workgroup#configuration
+# good_examples: checks/cloud/aws/athena/no_encryption_override.tf.go
+# bad_examples: checks/cloud/aws/athena/no_encryption_override.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/athena/no_encryption_override.cf.go
+# bad_examples: checks/cloud/aws/athena/no_encryption_override.cf.go
+package builtin.aws.athena.aws0007
+
+import rego.v1
+
+deny contains res if {
+ some workgroup in input.aws.athena.workgroups
+ not workgroup.enforceconfiguration.value
+ res := result.new("The workgroup configuration is not enforced.", workgroup.enforceconfiguration)
+}
diff --git a/checks/cloud/aws/athena/no_encryption_override_test.rego b/checks/cloud/aws/athena/no_encryption_override_test.rego
new file mode 100644
index 00000000..55c8140d
--- /dev/null
+++ b/checks/cloud/aws/athena/no_encryption_override_test.rego
@@ -0,0 +1,16 @@
+package builtin.aws.athena.aws0007_test
+
+import rego.v1
+
+import data.builtin.aws.athena.aws0007 as check
+import data.lib.test
+
+test_allow_workgroup_enforce_configuration if {
+ inp := {"aws": {"athena": {"workgroups": [{"enforceconfiguration": {"value": true}}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_workgroup_no_enforce_configuration if {
+ inp := {"aws": {"athena": {"workgroups": [{"enforceconfiguration": {"value": false}}]}}}
+ test.assert_equal_message("The workgroup configuration is not enforced.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudfront/cloudfront.go b/checks/cloud/aws/cloudfront/cloudfront.go
new file mode 100644
index 00000000..d8f0e7ee
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/cloudfront.go
@@ -0,0 +1 @@
+package cloudfront
diff --git a/checks/cloud/aws/cloudfront/enable_logging.rego b/checks/cloud/aws/cloudfront/enable_logging.rego
new file mode 100644
index 00000000..7c1d1f2b
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/enable_logging.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: Cloudfront distribution should have Access Logging configured
+# description: |
+# You should configure CloudFront Access Logging to create log files that contain detailed information about every user request that CloudFront receives
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html
+# custom:
+# id: AVD-AWS-0010
+# avd_id: AVD-AWS-0010
+# provider: aws
+# service: cloudfront
+# severity: MEDIUM
+# short_code: enable-logging
+# recommended_action: Enable logging for CloudFront distributions
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudfront
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#logging_config
+# good_examples: checks/cloud/aws/cloudfront/enable_logging.tf.go
+# bad_examples: checks/cloud/aws/cloudfront/enable_logging.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudfront/enable_logging.cf.go
+# bad_examples: checks/cloud/aws/cloudfront/enable_logging.cf.go
+package builtin.aws.cloudfront.aws0010
+
+import rego.v1
+
+deny contains res if {
+ some dist in input.aws.cloudfront.distributions
+ dist.logging.bucket.value == ""
+ res := result.new("Distribution does not have logging enabled", dist)
+}
diff --git a/checks/cloud/aws/cloudfront/enable_logging_test.rego b/checks/cloud/aws/cloudfront/enable_logging_test.rego
new file mode 100644
index 00000000..c7dd18cc
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/enable_logging_test.rego
@@ -0,0 +1,16 @@
+package builtin.aws.cloudfront.aws0010_test
+
+import rego.v1
+
+import data.builtin.aws.cloudfront.aws0010 as check
+import data.lib.test
+
+test_allow_distribution_with_logging if {
+ inp := {"aws": {"cloudfront": {"distributions": [{"logging": {"bucket": {"value": "somebucket"}}}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_distribution_without_logging if {
+ inp := {"aws": {"cloudfront": {"distributions": [{"logging": {"bucket": {"value": ""}}}]}}}
+ test.assert_not_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudfront/enable_waf.rego b/checks/cloud/aws/cloudfront/enable_waf.rego
new file mode 100644
index 00000000..49fb99e5
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/enable_waf.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: CloudFront distribution does not have a WAF in front.
+# description: |
+# You should configure a Web Application Firewall in front of your CloudFront distribution. This will mitigate many types of attacks on your web application.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/waf/latest/developerguide/cloudfront-features.html
+# custom:
+# id: AVD-AWS-0011
+# avd_id: AVD-AWS-0011
+# provider: aws
+# service: cloudfront
+# severity: HIGH
+# short_code: enable-waf
+# recommended_action: Enable WAF for the CloudFront distribution
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudfront
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#web_acl_id
+# good_examples: checks/cloud/aws/cloudfront/enable_waf.tf.go
+# bad_examples: checks/cloud/aws/cloudfront/enable_waf.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudfront/enable_waf.cf.go
+# bad_examples: checks/cloud/aws/cloudfront/enable_waf.cf.go
+package builtin.aws.cloudfront.aws0011
+
+import rego.v1
+
+deny contains res if {
+ some dist in input.aws.cloudfront.distributions
+ dist.wafid.value == ""
+ res := result.new("Distribution does not utilise a WAF.", dist)
+}
diff --git a/checks/cloud/aws/cloudfront/enable_waf_test.rego b/checks/cloud/aws/cloudfront/enable_waf_test.rego
new file mode 100644
index 00000000..08296f3c
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/enable_waf_test.rego
@@ -0,0 +1,14 @@
+package builtin.aws.cloudfront.aws0011_test
+
+import rego.v1
+
+import data.builtin.aws.cloudfront.aws0011 as check
+import data.lib.test
+
+test_allow_distribution_with_waf if {
+ test.assert_empty(check.deny) with input as {"aws": {"cloudfront": {"distributions": [{"waf": {"value": true}}]}}}
+}
+
+test_disallow_distribution_without_waf if {
+ test.assert_equal_message("CloudFront distribution does not have a WAF in front.", check.deny) with input as {"aws": {"cloudfront": {"distributions": [{"wafid": {"value": ""}}]}}}
+}
diff --git a/checks/cloud/aws/cloudfront/enforce_https.rego b/checks/cloud/aws/cloudfront/enforce_https.rego
new file mode 100644
index 00000000..9251dd78
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/enforce_https.rego
@@ -0,0 +1,48 @@
+# METADATA
+# title: CloudFront distribution allows unencrypted (HTTP) communications.
+# description: |
+# Plain HTTP is unencrypted and human-readable. This means that if a malicious actor was to eavesdrop on your connection, they would be able to see all of your data flowing back and forth.
+# You should use HTTPS, which is HTTP over an encrypted (TLS) connection, meaning eavesdroppers cannot read your traffic.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-cloudfront-to-s3-origin.html
+# custom:
+# id: AVD-AWS-0012
+# avd_id: AVD-AWS-0012
+# provider: aws
+# service: cloudfront
+# severity: CRITICAL
+# short_code: enforce-https
+# recommended_action: Only allow HTTPS for CloudFront distribution communication
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudfront
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#viewer_protocol_policy
+# good_examples: checks/cloud/aws/cloudfront/enforce_https.tf.go
+# bad_examples: checks/cloud/aws/cloudfront/enforce_https.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudfront/enforce_https.cf.go
+# bad_examples: checks/cloud/aws/cloudfront/enforce_https.cf.go
+package builtin.aws.cloudfront.aws0012
+
+import rego.v1
+
+viewer_protocol_policy_allow_all := "allow-all"
+
+deny contains res if {
+ some cachebehavior in cachebehaviors
+ cachebehavior.viewerprotocolpolicy.value == viewer_protocol_policy_allow_all
+
+ res := result.new("Distribution allows unencrypted communications.", cachebehavior.viewerprotocolpolicy)
+}
+
+cachebehaviors contains input.aws.cloudfront.distributions[_].defaultcachebehaviour
+
+cachebehaviors contains input.aws.cloudfront.distributions[_].orderercachebehaviours[_]
diff --git a/checks/cloud/aws/cloudfront/enforce_https_test.rego b/checks/cloud/aws/cloudfront/enforce_https_test.rego
new file mode 100644
index 00000000..93d9e77c
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/enforce_https_test.rego
@@ -0,0 +1,28 @@
+package builtin.aws.cloudfront.aws0012_test
+
+import rego.v1
+
+import data.builtin.aws.cloudfront.aws0012 as check
+import data.lib.test
+
+test_disallow_default_cache_behavior_with_allow_all if {
+ r := check.deny with input as build_input({"defaultcachebehaviour": {"viewerprotocolpolicy": {"value": "allow-all"}}})
+ test.assert_equal_message("Distribution allows unencrypted communications.", r)
+}
+
+test_disallow_ordered_cache_behaviors_with_allow_all if {
+ r := check.deny with input as build_input({"orderercachebehaviours": [{"viewerprotocolpolicy": {"value": "allow-all"}}]})
+ test.assert_equal_message("Distribution allows unencrypted communications.", r)
+}
+
+test_allow_default_cache_behavior_with_https if {
+ inp := build_input({"defaultcachebehavior": {"viewerprotocolpolicy": {"value": "https-only"}}})
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_ordered_cache_behaviors_with_https if {
+ inp := build_input({"orderercachebehaviours": [{"viewerprotocolpolicy": {"value": "https-only"}}]})
+ test.assert_empty(check.deny) with input as inp
+}
+
+build_input(body) = {"aws": {"cloudfront": {"distributions": [body]}}}
diff --git a/checks/cloud/aws/cloudfront/use_secure_tls_policy.rego b/checks/cloud/aws/cloudfront/use_secure_tls_policy.rego
new file mode 100644
index 00000000..23dec2ca
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/use_secure_tls_policy.rego
@@ -0,0 +1,49 @@
+# METADATA
+# title: CloudFront distribution uses outdated SSL/TLS protocols.
+# description: |
+# You should not use outdated/insecure TLS versions for encryption. You should be using TLS v1.2+.
+# Note: that setting *minimum_protocol_version = "TLSv1.2_2021"* is only possible when *cloudfront_default_certificate* is false (eg. you are not using the cloudfront.net domain name).
+# If *cloudfront_default_certificate* is true then the Cloudfront API will only allow setting *minimum_protocol_version = "TLSv1"*, and setting it to any other value will result in a perpetual diff in your *terraform plan*'s.
+# The only option when using the cloudfront.net domain name is to ignore this rule.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html
+# - https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesGeneral
+# custom:
+# id: AVD-AWS-0013
+# avd_id: AVD-AWS-0013
+# provider: aws
+# service: cloudfront
+# severity: HIGH
+# short_code: use-secure-tls-policy
+# recommended_action: Use the most modern TLS/SSL policies available
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudfront
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudfront_distribution#minimum_protocol_version
+# good_examples: checks/cloud/aws/cloudfront/use_secure_tls_policy.tf.go
+# bad_examples: checks/cloud/aws/cloudfront/use_secure_tls_policy.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudfront/use_secure_tls_policy.cf.go
+# bad_examples: checks/cloud/aws/cloudfront/use_secure_tls_policy.cf.go
+package builtin.aws.cloudfront.aws0013
+
+import rego.v1
+
+protocol_version_tls1_2_2021 = "TLSv1.2_2021"
+
+deny contains res if {
+ some dist in input.aws.cloudfront.distributions
+ viewrcert := dist.viewercertificate
+ not viewrcert.cloudfrontdefaultcertificate.value
+ viewrcert.minimumprotocolversion.value != protocol_version_tls1_2_2021
+
+ res := result.new("Distribution allows unencrypted communications.", viewrcert.minimumprotocolversion)
+}
diff --git a/checks/cloud/aws/cloudfront/use_secure_tls_policy_test.rego b/checks/cloud/aws/cloudfront/use_secure_tls_policy_test.rego
new file mode 100644
index 00000000..32cfbc51
--- /dev/null
+++ b/checks/cloud/aws/cloudfront/use_secure_tls_policy_test.rego
@@ -0,0 +1,29 @@
+package builtin.aws.cloudfront.aws0013_test
+
+import rego.v1
+
+import data.builtin.aws.cloudfront.aws0013 as check
+import data.lib.test
+
+test_disallow_distribution_using_tls_1_0 if {
+ test.assert_equal_message("Distribution allows unencrypted communications", check.deny) with input as build_input({"viewercertificate": {
+ "cloudfrontdefaultcertificate": {"value": false},
+ "minimumprotocolversion": {"value": "TLSv1.0"},
+ }})
+}
+
+test_allow_distribution_using_tls_1_2 if {
+ test.assert_empty(check.deny) with input as build_input({"viewercertificate": {
+ "cloudfrontdefaultcertificate": {"value": false},
+ "minimumprotocolversion": {"value": check.protocol_version_tls1_2_2021},
+ }})
+}
+
+test_allow_distribution_with_default_certificate_and_tls_1_0 if {
+ test.assert_empty(check.deny) with input as build_input({"viewercertificate": {
+ "cloudfrontdefaultcertificate": {"value": true},
+ "minimumprotocolversion": {"value": "TLSv1.0"},
+ }})
+}
+
+build_input(body) = {"aws": {"cloudfront": {"distributions": [body]}}}
diff --git a/checks/cloud/aws/cloudtrail/cloudtrail.go b/checks/cloud/aws/cloudtrail/cloudtrail.go
new file mode 100644
index 00000000..3ecf36ea
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/cloudtrail.go
@@ -0,0 +1 @@
+package cloudtrail
diff --git a/checks/cloud/aws/cloudtrail/enable_all_regions.rego b/checks/cloud/aws/cloudtrail/enable_all_regions.rego
new file mode 100644
index 00000000..574e6190
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/enable_all_regions.rego
@@ -0,0 +1,43 @@
+# METADATA
+# title: Cloudtrail should be enabled in all regions regardless of where your AWS resources are generally homed
+# description: |
+# When creating Cloudtrail in the AWS Management Console the trail is configured by default to be multi-region, this isn't the case with the Terraform resource. Cloudtrail should cover the full AWS account to ensure you can track changes in regions you are not actively operting in.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/receive-cloudtrail-log-files-from-multiple-regions.html
+# custom:
+# id: AVD-AWS-0014
+# avd_id: AVD-AWS-0014
+# provider: aws
+# service: cloudtrail
+# severity: MEDIUM
+# short_code: enable-all-regions
+# recommended_action: Enable Cloudtrail in all regions
+# frameworks:
+# cis-aws-1.2:
+# - "2.5"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudtrail
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#is_multi_region_trail
+# good_examples: checks/cloud/aws/cloudtrail/enable_all_regions.tf.go
+# bad_examples: checks/cloud/aws/cloudtrail/enable_all_regions.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudtrail/enable_all_regions.cf.go
+# bad_examples: checks/cloud/aws/cloudtrail/enable_all_regions.cf.go
+package builtin.aws.cloudtrail.aws0014
+
+import rego.v1
+
+deny contains res if {
+ some trail in input.aws.cloudtrail.trails
+ not trail.ismultiregion.value
+ res := result.new("Trail is not enabled across all regions.", trail.ismultiregion)
+}
diff --git a/checks/cloud/aws/cloudtrail/enable_all_regions_test.rego b/checks/cloud/aws/cloudtrail/enable_all_regions_test.rego
new file mode 100644
index 00000000..c004db30
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/enable_all_regions_test.rego
@@ -0,0 +1,16 @@
+package builtin.aws.cloudtrail.aws0014_test
+
+import rego.v1
+
+import data.builtin.aws.cloudtrail.aws0014 as check
+import data.lib.test
+
+test_disallow_cloudtrail_without_all_regions if {
+ r := check.deny with input as {"aws": {"cloudtrail": {"trails": [{"ismultiregion": {"value": false}}]}}}
+ test.assert_equal_message("CloudTrail is not enabled across all regions.", r)
+}
+
+test_allow_cloudtrail_with_all_regions if {
+ r := check.deny with input as {"aws": {"cloudtrail": {"trails": [{"ismultiregion": {"value": true}}]}}}
+ test.assert_empty(r)
+}
diff --git a/checks/cloud/aws/cloudtrail/enable_log_validation.rego b/checks/cloud/aws/cloudtrail/enable_log_validation.rego
new file mode 100644
index 00000000..b75cef8a
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/enable_log_validation.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: Cloudtrail log validation should be enabled to prevent tampering of log data
+# description: |
+# Log validation should be activated on Cloudtrail logs to prevent the tampering of the underlying data in the S3 bucket. It is feasible that a rogue actor compromising an AWS account might want to modify the log data to remove trace of their actions.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-log-file-validation-intro.html
+# custom:
+# id: AVD-AWS-0016
+# avd_id: AVD-AWS-0016
+# provider: aws
+# service: cloudtrail
+# severity: HIGH
+# short_code: enable-log-validation
+# recommended_action: Turn on log validation for Cloudtrail
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudtrail
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#enable_log_file_validation
+# good_examples: checks/cloud/aws/cloudtrail/enable_log_validation.tf.go
+# bad_examples: checks/cloud/aws/cloudtrail/enable_log_validation.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudtrail/enable_log_validation.cf.go
+# bad_examples: checks/cloud/aws/cloudtrail/enable_log_validation.cf.go
+package builtin.aws.cloudtrail.aws0016
+
+import rego.v1
+
+deny contains res if {
+ some trail in input.aws.cloudtrail.trails
+ not trail.enablelogfilevalidation.value
+ res := result.new("Trail does not have log validation enabled.", trail.enablelogfilevalidation)
+}
diff --git a/checks/cloud/aws/cloudtrail/enable_log_validation_test.rego b/checks/cloud/aws/cloudtrail/enable_log_validation_test.rego
new file mode 100644
index 00000000..7436046e
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/enable_log_validation_test.rego
@@ -0,0 +1,16 @@
+package builtin.aws.cloudtrail.aws0016_test
+
+import rego.v1
+
+import data.builtin.aws.cloudtrail.aws0016 as check
+import data.lib.test
+
+test_allow_trail_with_log_validation if {
+ inp := {"aws": {"cloudtrail": {"trails": [{"enablelogfilevalidation": {"value": true}}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_trail_without_log_validation if {
+ inp := {"aws": {"cloudtrail": {"trails": [{"enablelogfilevalidation": {"value": false}}]}}}
+ test.assert_equal_message("Trail does not have log validation enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudtrail/encryption_customer_key.rego b/checks/cloud/aws/cloudtrail/encryption_customer_key.rego
new file mode 100644
index 00000000..e2950946
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/encryption_customer_key.rego
@@ -0,0 +1,43 @@
+# METADATA
+# title: CloudTrail should use Customer managed keys to encrypt the logs
+# description: |
+# Using Customer managed keys provides comprehensive control over cryptographic keys, enabling management of policies, permissions, and rotation, thus enhancing security and compliance measures for sensitive data and systems.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html
+# - https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-mgmt
+# custom:
+# id: AVD-AWS-0015
+# avd_id: AVD-AWS-0015
+# provider: aws
+# service: cloudtrail
+# severity: HIGH
+# short_code: encryption-customer-managed-key
+# recommended_action: Use Customer managed key
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudtrail
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#kms_key_id
+# good_examples: checks/cloud/aws/cloudtrail/encryption_customer_key.tf.go
+# bad_examples: checks/cloud/aws/cloudtrail/encryption_customer_key.tf.go
+# cloudformation:
+# links:
+# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudtrail-trail.html#cfn-cloudtrail-trail-kmskeyid
+# good_examples: checks/cloud/aws/cloudtrail/encryption_customer_key.cf.go
+# bad_examples: checks/cloud/aws/cloudtrail/encryption_customer_key.cf.go
+package builtin.aws.cloudtrail.aws0015
+
+import rego.v1
+
+deny contains res if {
+ some trail in input.aws.cloudtrail.trails
+ trail.kmskeyid.value == ""
+ res := result.new("CloudTrail does not use a customer managed key to encrypt the logs.", trail.kmskeyid)
+}
diff --git a/checks/cloud/aws/cloudtrail/encryption_customer_key_test.rego b/checks/cloud/aws/cloudtrail/encryption_customer_key_test.rego
new file mode 100644
index 00000000..3005c9ba
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/encryption_customer_key_test.rego
@@ -0,0 +1,16 @@
+package builtin.aws.cloudtrail.aws0015_test
+
+import rego.v1
+
+import data.builtin.aws.cloudtrail.aws0015 as check
+import data.lib.test
+
+test_allow_trail_with_cmk if {
+ inp := {"aws": {"cloudtrail": {"trails": [{"kmskeyid": {"value": "key-id"}}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_trail_without_cmk if {
+ inp := {"aws": {"cloudtrail": {"trails": [{"kmskeyid": {"value": ""}}]}}}
+ test.assert_equal_message("CloudTrail does not use a customer managed key to encrypt the logs.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego
new file mode 100644
index 00000000..1249935b
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.rego
@@ -0,0 +1,47 @@
+# METADATA
+# title: CloudTrail logs should be stored in S3 and also sent to CloudWatch Logs
+# description: |
+# CloudTrail is a web service that records AWS API calls made in a given account. The recorded information includes the identity of the API caller, the time of the API call, the source IP address of the API caller, the request parameters, and the response elements returned by the AWS service.
+# CloudTrail uses Amazon S3 for log file storage and delivery, so log files are stored durably. In addition to capturing CloudTrail logs in a specified Amazon S3 bucket for long-term analysis, you can perform real-time analysis by configuring CloudTrail to send logs to CloudWatch Logs.
+# For a trail that is enabled in all Regions in an account, CloudTrail sends log files from all those Regions to a CloudWatch Logs log group.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html#send-cloudtrail-events-to-cloudwatch-logs-console
+# custom:
+# id: AVD-AWS-0162
+# avd_id: AVD-AWS-0162
+# provider: aws
+# service: cloudtrail
+# severity: LOW
+# short_code: ensure-cloudwatch-integration
+# recommended_action: Enable logging to CloudWatch
+# frameworks:
+# cis-aws-1.2:
+# - "2.4"
+# cis-aws-1.4:
+# - "3.4"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudtrail
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail
+# good_examples: checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.tf.go
+# bad_examples: checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.cf.go
+# bad_examples: checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration.cf.go
+package builtin.aws.cloudtrail.aws0162
+
+import rego.v1
+
+deny contains res if {
+ some trail in input.aws.cloudtrail.trails
+ trail.cloudwatchlogsloggrouparn.value == ""
+ res := result.new("Trail does not have CloudWatch logging configured", trail)
+}
diff --git a/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.rego b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.rego
new file mode 100644
index 00000000..c04d79ed
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/ensure_cloudwatch_integration_test.rego
@@ -0,0 +1,16 @@
+package builtin.aws.cloudtrail.aws0162_test
+
+import rego.v1
+
+import data.builtin.aws.cloudtrail.aws0162 as check
+import data.lib.test
+
+test_allow_cloudwatch_integration if {
+ inp := {"aws": {"cloudtrail": {"trails": [{"cloudwatchlogsloggrouparn": {"value": "log-group-arn"}}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_without_cloudwatch_integration if {
+ inp := {"aws": {"cloudtrail": {"trails": [{"cloudwatchlogsloggrouparn": {"value": ""}}]}}}
+ test.assert_equal_message("CloudWatch integration is not configured.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudtrail/no_public_log_access.rego b/checks/cloud/aws/cloudtrail/no_public_log_access.rego
new file mode 100644
index 00000000..87aca43f
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/no_public_log_access.rego
@@ -0,0 +1,52 @@
+# METADATA
+# title: The S3 Bucket backing Cloudtrail should be private
+# description: |
+# CloudTrail logs a record of every API call made in your account. These log files are stored in an S3 bucket. CIS recommends that the S3 bucket policy, or access control list (ACL), applied to the S3 bucket that CloudTrail logs to prevents public access to the CloudTrail logs. Allowing public access to CloudTrail log content might aid an adversary in identifying weaknesses in the affected account's use or configuration.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonS3/latest/userguide/configuring-block-public-access-bucket.html
+# custom:
+# id: AVD-AWS-0161
+# avd_id: AVD-AWS-0161
+# provider: aws
+# service: cloudtrail
+# severity: CRITICAL
+# short_code: no-public-log-access
+# recommended_action: Restrict public access to the S3 bucket
+# frameworks:
+# cis-aws-1.4:
+# - "3.3"
+# cis-aws-1.2:
+# - "2.3"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudtrail
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#is_multi_region_trail
+# good_examples: checks/cloud/aws/cloudtrail/no_public_log_access.tf.go
+# bad_examples: checks/cloud/aws/cloudtrail/no_public_log_access.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudtrail/no_public_log_access.cf.go
+# bad_examples: checks/cloud/aws/cloudtrail/no_public_log_access.cf.go
+package builtin.aws.cloudtrail.aws0161
+
+import rego.v1
+
+import data.lib.s3
+
+deny contains res if {
+ some trail in input.aws.cloudtrail.trails
+ trail.bucketname.value != ""
+
+ some bucket in input.aws.s3.buckets
+ bucket.name.value == trail.bucketname.value
+
+ s3.bucket_has_public_access(bucket)
+ res := result.new("Trail S3 bucket is publicly exposed", bucket)
+}
diff --git a/checks/cloud/aws/cloudtrail/no_public_log_access_test.rego b/checks/cloud/aws/cloudtrail/no_public_log_access_test.rego
new file mode 100644
index 00000000..1a9e2702
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/no_public_log_access_test.rego
@@ -0,0 +1,25 @@
+package builtin.aws.cloudtrail.aws0161_test
+
+import rego.v1
+
+import data.builtin.aws.cloudtrail.aws0161 as check
+import data.lib.test
+
+test_allow_bucket_without_public_access if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{"bucketname": {"value": "bucket_name"}}]},
+ "s3": {"buckets": [{"name": {"value": "bucket_name"}, "acl": {"value": "private"}}]},
+ }}
+ test.assert_empty(check.deny) with input as inp
+}
+
+# TODO: count should be 2
+test_disallow_bucket_with_public_access if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{"bucketname": {"value": "bucket_name"}}]},
+ "s3": {"buckets": [{"name": {"value": "bucket_name"}, "acl": {"value": "public-read"}}, {"name": {"value": "bucket_name"}, "acl": {"value": "public-read-write"}}]},
+ }}
+
+ # test.assert_equal_message("Bucket has public access", check.deny) with input as inp
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego b/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego
new file mode 100644
index 00000000..47acd50a
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/require_bucket_access_logging.rego
@@ -0,0 +1,52 @@
+# METADATA
+# title: You should enable bucket access logging on the CloudTrail S3 bucket.
+# description: |
+# Amazon S3 bucket access logging generates a log that contains access records for each request made to your S3 bucket. An access log record contains details about the request, such as the request type, the resources specified in the request worked, and the time and date the request was processed.
+# CIS recommends that you enable bucket access logging on the CloudTrail S3 bucket.
+# By enabling S3 bucket logging on target S3 buckets, you can capture all events that might affect objects in a target bucket. Configuring logs to be placed in a separate bucket enables access to log information, which can be useful in security and incident response workflows.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html
+# custom:
+# id: AVD-AWS-0163
+# avd_id: AVD-AWS-0163
+# provider: aws
+# service: cloudtrail
+# severity: LOW
+# short_code: require-bucket-access-logging
+# recommended_action: Enable access logging on the bucket
+# frameworks:
+# cis-aws-1.4:
+# - "3.6"
+# cis-aws-1.2:
+# - "2.6"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudtrail
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudtrail#is_multi_region_trail
+# good_examples: checks/cloud/aws/cloudtrail/require_bucket_access_logging.tf.go
+# bad_examples: checks/cloud/aws/cloudtrail/require_bucket_access_logging.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudtrail/require_bucket_access_logging.cf.go
+# bad_examples: checks/cloud/aws/cloudtrail/require_bucket_access_logging.cf.go
+package builtin.aws.cloudtrail.aws0163
+
+import rego.v1
+
+deny contains res if {
+ some trail in input.aws.cloudtrail.trails
+ trail.bucketname.value != ""
+
+ some bucket in input.aws.s3.buckets
+ bucket.name.value == trail.bucketname.value
+ not bucket.logging.enabled.value
+
+ res := result.new("Trail S3 bucket does not have logging enabled", bucket)
+}
diff --git a/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.rego b/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.rego
new file mode 100644
index 00000000..5b09f1ef
--- /dev/null
+++ b/checks/cloud/aws/cloudtrail/require_bucket_access_logging_test.rego
@@ -0,0 +1,33 @@
+package builtin.aws.cloudtrail.aws0163_test
+
+import rego.v1
+
+import data.builtin.aws.cloudtrail.aws0163 as check
+import data.lib.test
+
+test_allow_bucket_with_logging_enabled if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{"bucketname": {"value": "bucket1"}}]},
+ "s3": {"buckets": [{
+ "name": {"value": "bucket1"},
+ "logging": {"enabled": {"value": true}},
+ }]},
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_bucket_with_logging_disabled if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{"bucketname": {"value": "bucket1"}}]},
+ "s3": {"buckets": [{
+ "name": {"value": "bucket1"},
+ "logging": {"enabled": {"value": false}},
+ }]},
+ }}
+
+ test.assert_equal_message(
+ "Trail S3 bucket does not have logging enabled",
+ check.deny,
+ ) with input as inp
+}
diff --git a/checks/cloud/aws/cloudwatch/cloudwatch.go b/checks/cloud/aws/cloudwatch/cloudwatch.go
new file mode 100644
index 00000000..c60f4174
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/cloudwatch.go
@@ -0,0 +1 @@
+package cloudwatch
diff --git a/checks/cloud/aws/cloudwatch/log_group_customer_key.rego b/checks/cloud/aws/cloudwatch/log_group_customer_key.rego
new file mode 100644
index 00000000..51f18347
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/log_group_customer_key.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: CloudWatch log groups should be encrypted using CMK
+# description: |
+# CloudWatch log groups are encrypted by default, however, to get the full benefit of controlling key rotation and other KMS aspects a KMS CMK should be used.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html
+# custom:
+# id: AVD-AWS-0017
+# avd_id: AVD-AWS-0017
+# provider: aws
+# service: cloudwatch
+# severity: LOW
+# short_code: log-group-customer-key
+# recommended_action: Enable CMK encryption of CloudWatch Log Groups
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudwatch
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group#kms_key_id
+# good_examples: checks/cloud/aws/cloudwatch/log_group_customer_key.tf.go
+# bad_examples: checks/cloud/aws/cloudwatch/log_group_customer_key.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/cloudwatch/log_group_customer_key.cf.go
+# bad_examples: checks/cloud/aws/cloudwatch/log_group_customer_key.cf.go
+package builtin.aws.cloudwatch.aws0017
+
+import rego.v1
+
+deny contains res if {
+ some group in input.aws.cloudwatch.loggroups
+ group.kmskeyid.value == ""
+ res := result.new("Log group is not encrypted.", group)
+}
diff --git a/checks/cloud/aws/cloudwatch/log_group_customer_key_test.rego b/checks/cloud/aws/cloudwatch/log_group_customer_key_test.rego
new file mode 100644
index 00000000..0428d709
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/log_group_customer_key_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.cloudwatch.aws0017_test
+
+import rego.v1
+
+import data.builtin.aws.cloudwatch.aws0017 as check
+import data.lib.test
+
+test_allow_log_group_with_cmk if {
+ inp := {"aws": {"cloudwatch": {"loggroups": [{"kmskeyid": {"value": "some-key-id"}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_log_group_without_cmk if {
+ inp := {"aws": {"cloudwatch": {"loggroups": [{"kmskeyid": {"value": ""}}]}}}
+
+ test.assert_equal_message("Log group is not encrypted.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudwatch/require_cloudtrail_change_alarm.rego b/checks/cloud/aws/cloudwatch/require_cloudtrail_change_alarm.rego
new file mode 100644
index 00000000..464c1ab2
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_cloudtrail_change_alarm.rego
@@ -0,0 +1,47 @@
+# METADATA
+# title: Ensure a log metric filter and alarm exist for CloudTrail configuration changes
+# description: |
+# You can do real-time monitoring of API calls by directing CloudTrail logs to CloudWatch Logs and establishing corresponding metric filters and alarms.
+# CIS recommends that you create a metric filter and alarm for changes to CloudTrail configuration settings. Monitoring these changes helps ensure sustained visibility to activities in the account.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html
+# custom:
+# id: AVD-AWS-0151
+# avd_id: AVD-AWS-0151
+# provider: aws
+# service: cloudwatch
+# severity: LOW
+# short_code: require-cloud-trail-change-alarm
+# recommended_action: Create an alarm to alert on CloudTrail configuration changes
+# frameworks:
+# cis-aws-1.2:
+# - "3.5"
+# cis-aws-1.4:
+# - "4.5"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudwatch
+# provider: aws
+# terraform:
+# cloudformation:
+package builtin.aws.cloudwatch.aws0151
+
+import data.lib.aws
+import rego.v1
+
+config_changes_filter_pattern := `{($.eventName=CreateTrail) || ($.eventName=UpdateTrail) || ($.eventName=DeleteTrail) || ($.eventName=StartLogging) || ($.eventName=StopLogging)}`
+
+deny contains res if {
+ some trail in aws.trails_without_filter(config_changes_filter_pattern)
+ res := result.new("Cloudtrail has no IAM policy change log filter", trail)
+}
+
+deny contains res if {
+ some trail in aws.trails_without_alarm_for_filter(config_changes_filter_pattern)
+ res := result.new("Cloudtrail has no IAM Policy change alarm", trail)
+}
diff --git a/checks/cloud/aws/cloudwatch/require_cloudtrail_change_alarm_test.rego b/checks/cloud/aws/cloudwatch/require_cloudtrail_change_alarm_test.rego
new file mode 100644
index 00000000..bd57f083
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_cloudtrail_change_alarm_test.rego
@@ -0,0 +1,117 @@
+package builtin.aws.cloudwatch.aws0151_test
+
+import rego.v1
+
+import data.builtin.aws.cloudwatch.aws0151 as check
+import data.lib.test
+
+test_allow_trail_alarms_on_configuration_change if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ "metricfilters": [{
+ "filterpattern": {"value": "{($.eventName=CreateTrail) || ($.eventName=UpdateTrail) || ($.eventName=DeleteTrail) || ($.eventName=StartLogging) || ($.eventName=StopLogging)}"},
+ "filtername": {"value": "CloudTrailConfigurationChange"},
+ }],
+ }],
+ "alarms": [{
+ "alarmname": {"value": "CloudTrailConfigurationChange"},
+ "metricname": {"value": "CloudTrailConfigurationChange"},
+ "metrics": [{"id": {"value": "CloudTrailConfigurationChange"}}],
+ }],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_trail_does_not_have_filter_for_configuration_change if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"}}],
+ "alarms": [{"alarmname": {"value": "OtherAlarm"}}],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no IAM policy change log filter", check.deny) with input as inp
+}
+
+test_disallow_trail_does_not_have_alarm_for_configuration_change if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ "metricfilters": [{
+ "filterpattern": {"value": "{($.eventName=CreateTrail) || ($.eventName=UpdateTrail) || ($.eventName=DeleteTrail) || ($.eventName=StartLogging) || ($.eventName=StopLogging)}"},
+ "filtername": {"value": "CloudTrailConfigurationChange"},
+ }],
+ }],
+ "alarms": [{"metricname": {"value": "OtherAlarm"}}],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no IAM Policy change alarm", check.deny) with input as inp
+}
+
+test_allow_trail_is_not_multiregion if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": false},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"}}],
+ "alarms": [],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_trail_is_not_logging if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": true},
+ "islogging": {"value": false},
+ "cloudwatchlogsloggrouparn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"}}],
+ "alarms": [],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_trail_without_loggroup if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"}}],
+ "alarms": [],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudwatch/require_cmk_disabled_alarm.rego b/checks/cloud/aws/cloudwatch/require_cmk_disabled_alarm.rego
new file mode 100644
index 00000000..4388b849
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_cmk_disabled_alarm.rego
@@ -0,0 +1,48 @@
+# METADATA
+# title: Ensure a log metric filter and alarm exist for disabling or scheduled deletion of customer managed keys
+# description: |
+# You can do real-time monitoring of API calls by directing CloudTrail logs to CloudWatch Logs and establishing corresponding metric filters and alarms.
+# CIS recommends that you create a metric filter and alarm for customer managed keys that have changed state to disabled or scheduled deletion. Data encrypted with disabled or deleted keys is no longer accessible.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html
+# custom:
+# id: AVD-AWS-0153
+# avd_id: AVD-AWS-0153
+# provider: aws
+# service: cloudwatch
+# severity: LOW
+# short_code: require-cmk-disabled-alarm
+# recommended_action: Create an alarm to alert on CMKs being disabled or scheduled for deletion
+# frameworks:
+# cis-aws-1.2:
+# - "3.7"
+# cis-aws-1.4:
+# - "4.7"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudwatch
+# provider: aws
+# terraform:
+# cloudformation:
+package builtin.aws.cloudwatch.aws0153
+
+import rego.v1
+
+import data.lib.aws
+
+disabled_filter_pattern := `{($.eventSource=kms.amazonaws.com) && (($.eventName=DisableKey) || ($.eventName=ScheduleKeyDeletion))}`
+
+deny contains res if {
+ some trail in aws.trails_without_filter(disabled_filter_pattern)
+ res := result.new("Cloudtrail has no CMK disabled log filter", trail)
+}
+
+deny contains res if {
+ some trail in aws.trails_without_alarm_for_filter(disabled_filter_pattern)
+ res := result.new("Cloudtrail has no CMK disabled of scheduled deletion alarm", trail)
+}
diff --git a/checks/cloud/aws/cloudwatch/require_cmk_disabled_alarm_test.rego b/checks/cloud/aws/cloudwatch/require_cmk_disabled_alarm_test.rego
new file mode 100644
index 00000000..3f5ebd4a
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_cmk_disabled_alarm_test.rego
@@ -0,0 +1,70 @@
+package builtin.aws.cloudwatch.aws0153_test
+
+import rego.v1
+
+import data.builtin.aws.cloudwatch.aws0153 as check
+import data.lib.test
+
+test_allow_alarm_exists_for_disabled_cmk if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ "metricfilters": [{
+ "filterpattern": {"value": "{($.eventSource=kms.amazonaws.com) && (($.eventName=DisableKey) || ($.eventName=ScheduleKeyDeletion))}"},
+ "filtername": {"value": "CMKDisbledOrScheduledDelete"},
+ }],
+ }],
+ "alarms": [{
+ "alarmname": {"value": "CMKDisbledOrScheduledDelete"},
+ "metricname": {"value": "CMKDisbledOrScheduledDelete"},
+ "metrics": [{"id": {"value": "CMKDisbledOrScheduledDelete"}}],
+ }],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_alarm_does_not_exist_for_disabled_cmk if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ "metricfilters": [{
+ "filterpattern": {"value": `{($.eventSource=kms.amazonaws.com) && (($.eventName=DisableKey) || ($.eventName=ScheduleKeyDeletion))}`},
+ "filtername": {"value": "CMKDisbledOrScheduledDelete"},
+ }],
+ }],
+ "alarms": [],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no CMK disabled of scheduled deletion alarm", check.deny) with input as inp
+}
+
+test_disallow_filter_does_not_exist_for_disabled_cmk if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [{
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"},
+ }]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"}}],
+ "alarms": [],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no CMK disabled of scheduled deletion alarm", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/cloudwatch/require_config_configuration_change_alarm.rego b/checks/cloud/aws/cloudwatch/require_config_configuration_change_alarm.rego
new file mode 100644
index 00000000..7a7decfc
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_config_configuration_change_alarm.rego
@@ -0,0 +1,48 @@
+# METADATA
+# title: Ensure a log metric filter and alarm exist for AWS Config configuration changes
+# description: |
+# You can do real-time monitoring of API calls by directing CloudTrail logs to CloudWatch Logs and establishing corresponding metric filters and alarms.
+# CIS recommends that you create a metric filter and alarm for changes to AWS Config configuration settings. Monitoring these changes helps ensure sustained visibility of configuration items in the account.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html
+# custom:
+# id: AVD-AWS-0155
+# avd_id: AVD-AWS-0155
+# provider: aws
+# service: cloudwatch
+# severity: LOW
+# short_code: require-config-configuration-changes-alarm
+# recommended_action: Create an alarm to alert on AWS Config configuration changes
+# frameworks:
+# cis-aws-1.2:
+# - "3.9"
+# cis-aws-1.4:
+# - "4.9"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudwatch
+# provider: aws
+# terraform:
+# cloudformation:
+package builtin.aws.cloudwatch.aws0155
+
+import rego.v1
+
+import data.lib.aws
+
+filter_pattern := `{($.eventSource=config.amazonaws.com) && (($.eventName=StopConfigurationRecorder) || ($.eventName=DeleteDeliveryChannel) || ($.eventName=PutDeliveryChannel) || ($.eventName=PutConfigurationRecorder))}`
+
+deny contains res if {
+ some trail in aws.trails_without_filter(filter_pattern)
+ res := result.new("Cloudtrail has no Config configuration change log filter", trail)
+}
+
+deny contains res if {
+ some trail in aws.trails_without_alarm_for_filter(filter_pattern)
+ res := result.new("Cloudtrail has no Config configuration change alarm", trail)
+}
diff --git a/checks/cloud/aws/cloudwatch/require_config_configuration_change_alarm_test.rego b/checks/cloud/aws/cloudwatch/require_config_configuration_change_alarm_test.rego
new file mode 100644
index 00000000..74fe4a14
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_config_configuration_change_alarm_test.rego
@@ -0,0 +1,64 @@
+package builtin.aws.cloudwatch.aws0155_test
+
+import rego.v1
+
+import data.builtin.aws.cloudwatch.aws0155 as check
+import data.lib.test
+
+test_allow_config_configuration_change_alarm if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [confgi_change_metric_filter],
+ }],
+ "alarms": [config_changes_alarm],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_filter_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": log_group_arn}}],
+ "alarms": [config_changes_alarm],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no IAM policy change log filter", check.deny) with input as inp
+}
+
+test_disallow_alarm_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {"loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [confgi_change_metric_filter],
+ }]},
+ }}
+
+ test.assert_equal_message("Cloudtrail has no Config configuration change alarm", check.deny) with input as inp
+}
+
+multiregion_trail := {
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": log_group_arn},
+}
+
+config_changes_alarm := {
+ "alarmname": {"value": "ConfigConfigurationChange"},
+ "metricname": {"value": "ConfigConfigurationChange"},
+ "metrics": [{"id": {"value": "ConfigConfigurationChange"}}],
+}
+
+confgi_change_metric_filter := {
+ "filterpattern": {"value": check.filter_pattern},
+ "filtername": {"value": "ConfigConfigurationChange"},
+}
+
+log_group_arn := "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"
diff --git a/checks/cloud/aws/cloudwatch/require_console_login_failure_alarm.rego b/checks/cloud/aws/cloudwatch/require_console_login_failure_alarm.rego
new file mode 100644
index 00000000..ecf3a30a
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_console_login_failure_alarm.rego
@@ -0,0 +1,48 @@
+# METADATA
+# title: Ensure a log metric filter and alarm exist for AWS Management Console authentication failures
+# description: |
+# You can do real-time monitoring of API calls by directing CloudTrail logs to CloudWatch Logs and establishing corresponding metric filters and alarms.
+# CIS recommends that you create a metric filter and alarm for failed console authentication attempts. Monitoring failed console logins might decrease lead time to detect an attempt to brute-force a credential, which might provide an indicator, such as source IP, that you can use in other event correlations.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-aws-console-sign-in-events.html
+# custom:
+# id: AVD-AWS-0152
+# avd_id: AVD-AWS-0152
+# provider: aws
+# service: cloudwatch
+# severity: LOW
+# short_code: require-console-login-failures-alarm
+# recommended_action: Create an alarm to alert on console login failures
+# frameworks:
+# cis-aws-1.4:
+# - "4.6"
+# cis-aws-1.2:
+# - "3.6"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudwatch
+# provider: aws
+# terraform:
+# cloudformation:
+package builtin.aws.cloudwatch.aws0152
+
+import rego.v1
+
+import data.lib.aws
+
+filter_pattern := `{($.eventName=ConsoleLogin) && ($.errorMessage="Failed authentication")}`
+
+deny contains res if {
+ some trail in aws.trails_without_filter(filter_pattern)
+ res := result.new("Cloudtrail has no console login failure log filter", trail)
+}
+
+deny contains res if {
+ some trail in aws.trails_without_alarm_for_filter(filter_pattern)
+ res := result.new("ClouCloudtrail has no console login failure alarm", trail)
+}
diff --git a/checks/cloud/aws/cloudwatch/require_console_login_failure_alarm_test.rego b/checks/cloud/aws/cloudwatch/require_console_login_failure_alarm_test.rego
new file mode 100644
index 00000000..f876ccfe
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_console_login_failure_alarm_test.rego
@@ -0,0 +1,64 @@
+package builtin.aws.cloudwatch.aws0152_test
+
+import rego.v1
+
+import data.builtin.aws.cloudwatch.aws0152 as check
+import data.lib.test
+
+test_allow_console_login_failure_alarm if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [console_login_failure_metric_filter],
+ }],
+ "alarms": [console_login_failure_alarm],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_filter_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": log_group_arn}}],
+ "alarms": [console_login_failure_alarm],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no IAM policy change log filter", check.deny) with input as inp
+}
+
+test_disallow_alarm_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {"loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [console_login_failure_metric_filter],
+ }]},
+ }}
+
+ test.assert_equal_message("Cloudtrail has no Config configuration change alarm", check.deny) with input as inp
+}
+
+multiregion_trail := {
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": log_group_arn},
+}
+
+console_login_failure_alarm := {
+ "alarmname": {"value": "ConsoleLoginFailure"},
+ "metricname": {"value": "ConsoleLoginFailure"},
+ "metrics": [{"id": {"value": "ConsoleLoginFailure"}}],
+}
+
+console_login_failure_metric_filter := {
+ "filterpattern": {"value": check.filter_pattern},
+ "filtername": {"value": "ConsoleLoginFailure"},
+}
+
+log_group_arn := "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"
diff --git a/checks/cloud/aws/cloudwatch/require_iam_policy_change_alarm.rego b/checks/cloud/aws/cloudwatch/require_iam_policy_change_alarm.rego
new file mode 100644
index 00000000..8f27828c
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_iam_policy_change_alarm.rego
@@ -0,0 +1,63 @@
+# METADATA
+# title: Ensure a log metric filter and alarm exist for IAM policy changes
+# description: |
+# You can do real-time monitoring of API calls by directing CloudTrail logs to CloudWatch Logs and establishing corresponding metric filters and alarms.
+# CIS recommends that you create a metric filter and alarm for changes made to IAM policies. Monitoring these changes helps ensure that authentication and authorization controls remain intact.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html
+# custom:
+# id: AVD-AWS-0150
+# avd_id: AVD-AWS-0150
+# provider: aws
+# service: cloudwatch
+# severity: LOW
+# short_code: require-iam-policy-change-alarm
+# recommended_action: Create an alarm to alert on IAM Policy changes
+# frameworks:
+# cis-aws-1.2:
+# - "3.4"
+# cis-aws-1.4:
+# - "4.4"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudwatch
+# provider: aws
+# terraform:
+# cloudformation:
+package builtin.aws.cloudwatch.aws0150
+
+import rego.v1
+
+import data.lib.aws
+
+filter_pattern := `{($.eventName=DeleteGroupPolicy) ||
+($.eventName=DeleteRolePolicy) ||
+($.eventName=DeleteUserPolicy) ||
+($.eventName=PutGroupPolicy) ||
+($.eventName=PutRolePolicy) ||
+($.eventName=PutUserPolicy) ||
+($.eventName=CreatePolicy) ||
+($.eventName=DeletePolicy) ||
+($.eventName=CreatePolicyVersion) ||
+($.eventName=DeletePolicyVersion) ||
+($.eventName=AttachRolePolicy) ||
+($.eventName=DetachRolePolicy) ||
+($.eventName=AttachUserPolicy) ||
+($.eventName=DetachUserPolicy) ||
+($.eventName=AttachGroupPolicy) ||
+($.eventName=DetachGroupPolicy)}`
+
+deny contains res if {
+ some trail in aws.trails_without_filter(filter_pattern)
+ res := result.new("Cloudtrail has no IAM policy change log filter", trail)
+}
+
+deny contains res if {
+ some trail in aws.trails_without_alarm_for_filter(filter_pattern)
+ res := result.new("Cloudtrail has no IAM Policy change alarm", trail)
+}
diff --git a/checks/cloud/aws/cloudwatch/require_iam_policy_change_alarm_test.rego b/checks/cloud/aws/cloudwatch/require_iam_policy_change_alarm_test.rego
new file mode 100644
index 00000000..8eef1f92
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_iam_policy_change_alarm_test.rego
@@ -0,0 +1,64 @@
+package builtin.aws.cloudwatch.aws0150_test
+
+import rego.v1
+
+import data.builtin.aws.cloudwatch.aws0150 as check
+import data.lib.test
+
+test_allow_change_alarm if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [change_metric_filter],
+ }],
+ "alarms": [change_alarm],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_filter_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": log_group_arn}}],
+ "alarms": [change_alarm],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no IAM policy change log filter", check.deny) with input as inp
+}
+
+test_disallow_alarm_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {"loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [change_metric_filter],
+ }]},
+ }}
+
+ test.assert_equal_message("Cloudtrail has no Config configuration change alarm", check.deny) with input as inp
+}
+
+multiregion_trail := {
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": log_group_arn},
+}
+
+change_alarm := {
+ "alarmname": {"value": "ConsoleLoginFailure"},
+ "metricname": {"value": "ConsoleLoginFailure"},
+ "metrics": [{"id": {"value": "ConsoleLoginFailure"}}],
+}
+
+change_metric_filter := {
+ "filterpattern": {"value": check.filter_pattern},
+ "filtername": {"value": "ConsoleLoginFailure"},
+}
+
+log_group_arn := "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"
diff --git a/checks/cloud/aws/cloudwatch/require_nacl_change_alarm.rego b/checks/cloud/aws/cloudwatch/require_nacl_change_alarm.rego
new file mode 100644
index 00000000..8340b271
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_nacl_change_alarm.rego
@@ -0,0 +1,52 @@
+# METADATA
+# title: Ensure a log metric filter and alarm exist for changes to Network Access Control Lists (NACL)
+# description: |
+# You can do real-time monitoring of API calls by directing CloudTrail logs to CloudWatch Logs and establishing corresponding metric filters and alarms.
+# NACLs are used as a stateless packet filter to control ingress and egress traffic for subnets in a VPC.
+# CIS recommends that you create a metric filter and alarm for changes to NACLs. Monitoring these changes helps ensure that AWS resources and services aren't unintentionally exposed.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html
+# custom:
+# id: AVD-AWS-0157
+# avd_id: AVD-AWS-0157
+# provider: aws
+# service: cloudwatch
+# severity: LOW
+# short_code: require-nacl-changes-alarm
+# recommended_action: Create an alarm to alert on network acl changes
+# frameworks:
+# cis-aws-1.4:
+# - "4.11"
+# cis-aws-1.2:
+# - "3.11"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudwatch
+# provider: aws
+# terraform:
+# cloudformation:
+package builtin.aws.cloudwatch.aws0157
+
+import rego.v1
+
+import data.lib.aws
+
+filter_pattern := `{($.eventName=CreateNetworkAcl) ||
+ ($.eventName=CreateNetworkAclEntry) || ($.eventName=DeleteNetworkAcl) ||
+ ($.eventName=DeleteNetworkAclEntry) || ($.eventName=ReplaceNetworkAclEntry) ||
+ ($.eventName=ReplaceNetworkAclAssociation)}`
+
+deny contains res if {
+ some trail in aws.trails_without_filter(filter_pattern)
+ res := result.new("Cloudtrail has no network ACL change log filter", trail)
+}
+
+deny contains res if {
+ some trail in aws.trails_without_alarm_for_filter(filter_pattern)
+ res := result.new("Cloudtrail has no network ACL change alarm", trail)
+}
diff --git a/checks/cloud/aws/cloudwatch/require_nacl_change_alarm_test.rego b/checks/cloud/aws/cloudwatch/require_nacl_change_alarm_test.rego
new file mode 100644
index 00000000..b3a77259
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_nacl_change_alarm_test.rego
@@ -0,0 +1,64 @@
+package builtin.aws.cloudwatch.aws0157_test
+
+import rego.v1
+
+import data.builtin.aws.cloudwatch.aws0157 as check
+import data.lib.test
+
+test_allow_change_alarm if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [change_metric_filter],
+ }],
+ "alarms": [change_alarm],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_filter_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": log_group_arn}}],
+ "alarms": [change_alarm],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no IAM policy change log filter", check.deny) with input as inp
+}
+
+test_disallow_alarm_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {"loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [change_metric_filter],
+ }]},
+ }}
+
+ test.assert_equal_message("Cloudtrail has no Config configuration change alarm", check.deny) with input as inp
+}
+
+multiregion_trail := {
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": log_group_arn},
+}
+
+change_alarm := {
+ "alarmname": {"value": "ConsoleLoginFailure"},
+ "metricname": {"value": "ConsoleLoginFailure"},
+ "metrics": [{"id": {"value": "ConsoleLoginFailure"}}],
+}
+
+change_metric_filter := {
+ "filterpattern": {"value": check.filter_pattern},
+ "filtername": {"value": "ConsoleLoginFailure"},
+}
+
+log_group_arn := "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"
diff --git a/checks/cloud/aws/cloudwatch/require_network_gateway_change_alarm.rego b/checks/cloud/aws/cloudwatch/require_network_gateway_change_alarm.rego
new file mode 100644
index 00000000..97af260a
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_network_gateway_change_alarm.rego
@@ -0,0 +1,52 @@
+# METADATA
+# title: Ensure a log metric filter and alarm exist for changes to network gateways
+# description: |
+# You can do real-time monitoring of API calls by directing CloudTrail logs to CloudWatch Logs and establishing corresponding metric filters and alarms.
+# Network gateways are required to send and receive traffic to a destination outside a VPC.
+# CIS recommends that you create a metric filter and alarm for changes to network gateways. Monitoring these changes helps ensure that all ingress and egress traffic traverses the VPC border via a controlled path.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudwatch-alarms-for-cloudtrail.html
+# custom:
+# id: AVD-AWS-0158
+# avd_id: AVD-AWS-0158
+# provider: aws
+# service: cloudwatch
+# severity: LOW
+# short_code: require-network-gateway-changes-alarm
+# recommended_action: Create an alarm to alert on network gateway changes
+# frameworks:
+# cis-aws-1.2:
+# - "3.12"
+# cis-aws-1.4:
+# - "4.12"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: cloudwatch
+# provider: aws
+# terraform:
+# cloudformation:
+package builtin.aws.cloudwatch.aws0158
+
+import rego.v1
+
+import data.lib.aws
+
+filter_pattern := `{($.eventName=CreateCustomerGateway) ||
+ ($.eventName=DeleteCustomerGateway) || ($.eventName=AttachInternetGateway) ||
+ ($.eventName=CreateInternetGateway) || ($.eventName=DeleteInternetGateway) ||
+ ($.eventName=DetachInternetGateway)}`
+
+deny contains res if {
+ some trail in aws.trails_without_filter(filter_pattern)
+ res := result.new("Cloudtrail has no network gateway change log filter", trail)
+}
+
+deny contains res if {
+ some trail in aws.trails_without_alarm_for_filter(filter_pattern)
+ res := result.new("Cloudtrail has no network gateway change alarm", trail)
+}
diff --git a/checks/cloud/aws/cloudwatch/require_network_gateway_change_alarm_test.rego b/checks/cloud/aws/cloudwatch/require_network_gateway_change_alarm_test.rego
new file mode 100644
index 00000000..c5377ca2
--- /dev/null
+++ b/checks/cloud/aws/cloudwatch/require_network_gateway_change_alarm_test.rego
@@ -0,0 +1,64 @@
+package builtin.aws.cloudwatch.aws0158_test
+
+import rego.v1
+
+import data.builtin.aws.cloudwatch.aws0158 as check
+import data.lib.test
+
+test_allow_change_alarm if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [change_metric_filter],
+ }],
+ "alarms": [change_alarm],
+ },
+ }}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_filter_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {
+ "loggroups": [{"arn": {"value": log_group_arn}}],
+ "alarms": [change_alarm],
+ },
+ }}
+
+ test.assert_equal_message("Cloudtrail has no IAM policy change log filter", check.deny) with input as inp
+}
+
+test_disallow_alarm_does_not_exist if {
+ inp := {"aws": {
+ "cloudtrail": {"trails": [multiregion_trail]},
+ "cloudwatch": {"loggroups": [{
+ "arn": {"value": log_group_arn},
+ "metricfilters": [change_metric_filter],
+ }]},
+ }}
+
+ test.assert_equal_message("Cloudtrail has no Config configuration change alarm", check.deny) with input as inp
+}
+
+multiregion_trail := {
+ "ismultiregion": {"value": true},
+ "islogging": {"value": true},
+ "cloudwatchlogsloggrouparn": {"value": log_group_arn},
+}
+
+change_alarm := {
+ "alarmname": {"value": "ConsoleLoginFailure"},
+ "metricname": {"value": "ConsoleLoginFailure"},
+ "metrics": [{"id": {"value": "ConsoleLoginFailure"}}],
+}
+
+change_metric_filter := {
+ "filterpattern": {"value": check.filter_pattern},
+ "filtername": {"value": "ConsoleLoginFailure"},
+}
+
+log_group_arn := "arn:aws:cloudwatch:us-east-1:123456789012:log-group:cloudtrail-logging"
diff --git a/checks/cloud/aws/codebuild/codebuild.go b/checks/cloud/aws/codebuild/codebuild.go
new file mode 100644
index 00000000..5ead5bb5
--- /dev/null
+++ b/checks/cloud/aws/codebuild/codebuild.go
@@ -0,0 +1 @@
+package codebuild
diff --git a/checks/cloud/aws/codebuild/enable_encryption.go b/checks/cloud/aws/codebuild/enable_encryption.go
deleted file mode 100755
index bb0fca17..00000000
--- a/checks/cloud/aws/codebuild/enable_encryption.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package codebuild
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0018",
- Provider: providers.AWSProvider,
- Service: "codebuild",
- ShortCode: "enable-encryption",
- Summary: "CodeBuild Project artifacts encryption should not be disabled",
- Impact: "CodeBuild project artifacts are unencrypted",
- Resolution: "Enable encryption for CodeBuild project artifacts",
- Explanation: `All artifacts produced by your CodeBuild project pipeline should always be encrypted`,
- Links: []string{
- "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-artifacts.html",
- "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codebuild-project.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableEncryptionGoodExamples,
- BadExamples: terraformEnableEncryptionBadExamples,
- Links: terraformEnableEncryptionLinks,
- RemediationMarkdown: terraformEnableEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableEncryptionGoodExamples,
- BadExamples: cloudFormationEnableEncryptionBadExamples,
- Links: cloudFormationEnableEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, project := range s.AWS.CodeBuild.Projects {
- if project.ArtifactSettings.EncryptionEnabled.IsFalse() {
- results.Add(
- "Encryption is not enabled for project artifacts.",
- project.ArtifactSettings.EncryptionEnabled,
- )
- } else {
- results.AddPassed(&project)
- }
-
- for _, setting := range project.SecondaryArtifactSettings {
- if setting.EncryptionEnabled.IsFalse() {
- results.Add(
- "Encryption is not enabled for secondary project artifacts.",
- setting.EncryptionEnabled,
- )
- } else {
- results.AddPassed(&setting)
- }
- }
-
- }
- return
- },
-)
diff --git a/checks/cloud/aws/codebuild/enable_encryption.rego b/checks/cloud/aws/codebuild/enable_encryption.rego
new file mode 100644
index 00000000..8c569f2e
--- /dev/null
+++ b/checks/cloud/aws/codebuild/enable_encryption.rego
@@ -0,0 +1,49 @@
+# METADATA
+# title: CodeBuild Project artifacts encryption should not be disabled
+# description: |
+# All artifacts produced by your CodeBuild project pipeline should always be encrypted
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-artifacts.html
+# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codebuild-project.html
+# custom:
+# id: AVD-AWS-0018
+# avd_id: AVD-AWS-0018
+# provider: aws
+# service: codebuild
+# severity: HIGH
+# short_code: enable-encryption
+# recommended_action: Enable encryption for CodeBuild project artifacts
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: codebuild
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/codebuild_project#encryption_disabled
+# good_examples: checks/cloud/aws/codebuild/enable_encryption.tf.go
+# bad_examples: checks/cloud/aws/codebuild/enable_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/codebuild/enable_encryption.cf.go
+# bad_examples: checks/cloud/aws/codebuild/enable_encryption.cf.go
+package builtin.aws.codebuild.aws0018
+
+import rego.v1
+
+deny contains res if {
+ some project in input.aws.codebuild.projects
+ encryptionenabled := project.artifactsettings.encryptionenabled
+ not encryptionenabled.value
+ res := result.new("Encryption is not enabled for project artifacts.", encryptionenabled)
+}
+
+deny contains res if {
+ some project in input.aws.codebuild.projects
+ some setting in project.secondaryartifactsettings
+ not setting.encryptionenabled.value
+ res := result.new("Encryption is not enabled for secondary project artifacts.", setting.encryptionenabled)
+}
diff --git a/checks/cloud/aws/codebuild/enable_encryption_test.go b/checks/cloud/aws/codebuild/enable_encryption_test.go
deleted file mode 100644
index 15493589..00000000
--- a/checks/cloud/aws/codebuild/enable_encryption_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package codebuild
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/codebuild"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableEncryption(t *testing.T) {
- tests := []struct {
- name string
- input codebuild.CodeBuild
- expected bool
- }{
- {
- name: "AWS Codebuild project with unencrypted artifact",
- input: codebuild.CodeBuild{
- Projects: []codebuild.Project{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ArtifactSettings: codebuild.ArtifactSettings{
- Metadata: trivyTypes.NewTestMetadata(),
- EncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS Codebuild project with unencrypted secondary artifact",
- input: codebuild.CodeBuild{
- Projects: []codebuild.Project{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ArtifactSettings: codebuild.ArtifactSettings{
- Metadata: trivyTypes.NewTestMetadata(),
- EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- SecondaryArtifactSettings: []codebuild.ArtifactSettings{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- EncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS Codebuild with encrypted artifacts",
- input: codebuild.CodeBuild{
- Projects: []codebuild.Project{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ArtifactSettings: codebuild.ArtifactSettings{
- Metadata: trivyTypes.NewTestMetadata(),
- EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- SecondaryArtifactSettings: []codebuild.ArtifactSettings{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- EncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.CodeBuild = test.input
- results := CheckEnableEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/codebuild/enable_encryption_test.rego b/checks/cloud/aws/codebuild/enable_encryption_test.rego
new file mode 100644
index 00000000..a5ea58b1
--- /dev/null
+++ b/checks/cloud/aws/codebuild/enable_encryption_test.rego
@@ -0,0 +1,24 @@
+package builtin.aws.codebuild.aws0018_test
+
+import rego.v1
+
+import data.builtin.aws.codebuild.aws0018 as check
+import data.lib.test
+
+test_allow_artifact_settings_with_encryption if {
+ test.assert_empty(check.deny) with input as build_input({"artifactsettings": {"encryptionenabled": {"value": true}}})
+}
+
+test_allow_secondary_artifact_settings_with_encryption if {
+ test.assert_empty(check.deny) with input as build_input({"secondaryartifactsettings": [{"encryptionenabled": {"value": true}}]})
+}
+
+test_disallow_artifact_settings_without_encryption if {
+ test.assert_equal_message("Encryption is not enabled for project artifacts.", check.deny) with input as build_input({"artifactsettings": {"encryptionenabled": {"value": false}}})
+}
+
+test_disallow_secondary_artifact_settings_without_encryption if {
+ test.assert_equal_message("Encryption is not enabled for secondary project artifacts.", check.deny) with input as build_input({"secondaryartifactsettings": [{"encryptionenabled": {"value": false}}]})
+}
+
+build_input(project) := {"aws": {"codebuild": {"projects": [project]}}}
diff --git a/checks/cloud/aws/config/aggregate_all_regions.go b/checks/cloud/aws/config/aggregate_all_regions.go
deleted file mode 100755
index c534b942..00000000
--- a/checks/cloud/aws/config/aggregate_all_regions.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package config
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckAggregateAllRegions = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0019",
- Provider: providers.AWSProvider,
- Service: "config",
- ShortCode: "aggregate-all-regions",
- Summary: "Config configuration aggregator should be using all regions for source",
- Impact: "Sources that aren't covered by the aggregator are not include in the configuration",
- Resolution: "Set the aggregator to cover all regions",
- Explanation: `The configuration aggregator should be configured with all_regions for the source.
-
-This will help limit the risk of any unmonitored configuration in regions that are thought to be unused.`,
- Links: []string{
- "https://docs.aws.amazon.com/config/latest/developerguide/aggregate-data.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformAggregateAllRegionsGoodExamples,
- BadExamples: terraformAggregateAllRegionsBadExamples,
- Links: terraformAggregateAllRegionsLinks,
- RemediationMarkdown: terraformAggregateAllRegionsRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationAggregateAllRegionsGoodExamples,
- BadExamples: cloudFormationAggregateAllRegionsBadExamples,
- Links: cloudFormationAggregateAllRegionsLinks,
- RemediationMarkdown: cloudFormationAggregateAllRegionsRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- if s.AWS.Config.ConfigurationAggregrator.Metadata.IsUnmanaged() {
- return
- }
- if s.AWS.Config.ConfigurationAggregrator.SourceAllRegions.IsFalse() {
- results.Add(
- "Configuration aggregation is not set to source from all regions.",
- s.AWS.Config.ConfigurationAggregrator.SourceAllRegions,
- )
- } else {
- results.AddPassed(s.AWS.Config.ConfigurationAggregrator.SourceAllRegions)
- }
- return
- },
-)
diff --git a/checks/cloud/aws/config/aggregate_all_regions.rego b/checks/cloud/aws/config/aggregate_all_regions.rego
new file mode 100644
index 00000000..19ebab8b
--- /dev/null
+++ b/checks/cloud/aws/config/aggregate_all_regions.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: Config configuration aggregator should be using all regions for source
+# description: |
+# The configuration aggregator should be configured with all_regions for the source.
+# This will help limit the risk of any unmonitored configuration in regions that are thought to be unused.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/config/latest/developerguide/aggregate-data.html
+# custom:
+# id: AVD-AWS-0019
+# avd_id: AVD-AWS-0019
+# provider: aws
+# service: config
+# severity: HIGH
+# short_code: aggregate-all-regions
+# recommended_action: Set the aggregator to cover all regions
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: config
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/config_configuration_aggregator#all_regions
+# good_examples: checks/cloud/aws/config/aggregate_all_regions.tf.go
+# bad_examples: checks/cloud/aws/config/aggregate_all_regions.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/config/aggregate_all_regions.cf.go
+# bad_examples: checks/cloud/aws/config/aggregate_all_regions.cf.go
+package builtin.aws.config.aws0019
+
+import rego.v1
+
+deny contains res if {
+ cfg_aggregator := input.aws.config.configurationaggregrator
+ cfg_aggregator.__defsec_metadata.managed
+ not cfg_aggregator.sourceallregions.value
+ res := result.new("Configuration aggregation is not set to source from all regions.", cfg_aggregator.sourceallregions)
+}
diff --git a/checks/cloud/aws/config/aggregate_all_regions_test.go b/checks/cloud/aws/config/aggregate_all_regions_test.go
deleted file mode 100644
index af2b6d0e..00000000
--- a/checks/cloud/aws/config/aggregate_all_regions_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package config
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/config"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckAggregateAllRegions(t *testing.T) {
- tests := []struct {
- name string
- input config.Config
- expected bool
- }{
- {
- name: "AWS Config aggregator source with all regions set to false",
- input: config.Config{
- ConfigurationAggregrator: config.ConfigurationAggregrator{
- Metadata: trivyTypes.NewTestMetadata(),
- SourceAllRegions: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- expected: true,
- },
- {
- name: "AWS Config aggregator source with all regions set to true",
- input: config.Config{
- ConfigurationAggregrator: config.ConfigurationAggregrator{
- Metadata: trivyTypes.NewTestMetadata(),
- SourceAllRegions: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Config = test.input
- results := CheckAggregateAllRegions.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckAggregateAllRegions.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/config/aggregate_all_regions_test.rego b/checks/cloud/aws/config/aggregate_all_regions_test.rego
new file mode 100644
index 00000000..854ec618
--- /dev/null
+++ b/checks/cloud/aws/config/aggregate_all_regions_test.rego
@@ -0,0 +1,20 @@
+package builtin.aws.config.aws0019_test
+
+import rego.v1
+
+import data.builtin.aws.config.aws0019 as check
+import data.lib.test
+
+test_allow_all_regions if {
+ test.assert_empty(check.deny) with input as {"aws": {"config": {"configurationaggregrator": {
+ "__defsec_metadata": {"managed": true},
+ "sourceallregions": {"value": true},
+ }}}}
+}
+
+test_disallow_all_regions if {
+ test.assert_equal_message("Configuration aggregation is not set to source from all regions.", check.deny) with input as {"aws": {"config": {"configurationaggregrator": {
+ "__defsec_metadata": {"managed": true},
+ "sourceallregions": {"value": false},
+ }}}}
+}
diff --git a/checks/cloud/aws/config/config.go b/checks/cloud/aws/config/config.go
new file mode 100644
index 00000000..d912156b
--- /dev/null
+++ b/checks/cloud/aws/config/config.go
@@ -0,0 +1 @@
+package config
diff --git a/checks/cloud/aws/documentdb/documentdb.go b/checks/cloud/aws/documentdb/documentdb.go
new file mode 100644
index 00000000..ed583021
--- /dev/null
+++ b/checks/cloud/aws/documentdb/documentdb.go
@@ -0,0 +1 @@
+package documentdb
diff --git a/checks/cloud/aws/documentdb/enable_log_export.go b/checks/cloud/aws/documentdb/enable_log_export.go
deleted file mode 100755
index 47d41f38..00000000
--- a/checks/cloud/aws/documentdb/enable_log_export.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package documentdb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableLogExport = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0020",
- Provider: providers.AWSProvider,
- Service: "documentdb",
- ShortCode: "enable-log-export",
- Summary: "DocumentDB logs export should be enabled",
- Impact: "Limited visibility of audit trail for changes to the DocumentDB",
- Resolution: "Enable export logs",
- Explanation: `Document DB does not have auditing by default. To ensure that you are able to accurately audit the usage of your DocumentDB cluster you should enable export logs.`,
- Links: []string{
- "https://docs.aws.amazon.com/documentdb/latest/developerguide/event-auditing.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableLogExportGoodExamples,
- BadExamples: terraformEnableLogExportBadExamples,
- Links: terraformEnableLogExportLinks,
- RemediationMarkdown: terraformEnableLogExportRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableLogExportGoodExamples,
- BadExamples: cloudFormationEnableLogExportBadExamples,
- Links: cloudFormationEnableLogExportLinks,
- RemediationMarkdown: cloudFormationEnableLogExportRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.DocumentDB.Clusters {
- var hasAudit bool
- var hasProfiler bool
-
- for _, log := range cluster.EnabledLogExports {
- if log.EqualTo(documentdb.LogExportAudit) {
- hasAudit = true
- }
- if log.EqualTo(documentdb.LogExportProfiler) {
- hasProfiler = true
- }
- }
- if !hasAudit && !hasProfiler {
- results.Add(
- "Neither CloudWatch audit nor profiler log exports are enabled.",
- &cluster,
- )
- } else {
- results.AddPassed(&cluster)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/documentdb/enable_log_export.rego b/checks/cloud/aws/documentdb/enable_log_export.rego
new file mode 100644
index 00000000..ca93f2b3
--- /dev/null
+++ b/checks/cloud/aws/documentdb/enable_log_export.rego
@@ -0,0 +1,49 @@
+# METADATA
+# title: DocumentDB logs export should be enabled
+# description: |
+# Document DB does not have auditing by default. To ensure that you are able to accurately audit the usage of your DocumentDB cluster you should enable export logs.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/documentdb/latest/developerguide/event-auditing.html
+# custom:
+# id: AVD-AWS-0020
+# avd_id: AVD-AWS-0020
+# provider: aws
+# service: documentdb
+# severity: MEDIUM
+# short_code: enable-log-export
+# recommended_action: Enable export logs
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: documentdb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#enabled_cloudwatch_logs_exports
+# good_examples: checks/cloud/aws/documentdb/enable_log_export.tf.go
+# bad_examples: checks/cloud/aws/documentdb/enable_log_export.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/documentdb/enable_log_export.cf.go
+# bad_examples: checks/cloud/aws/documentdb/enable_log_export.cf.go
+package builtin.aws.documentdb.aws0020
+
+import rego.v1
+
+log_export_audit := "audit"
+
+log_export_profiler := "profiler"
+
+deny contains res if {
+ some cluster in input.aws.documentdb.clusters
+ not export_audit_or_profiler(cluster)
+ res := result.new("Neither CloudWatch audit nor profiler log exports are enabled.", cluster)
+}
+
+export_audit_or_profiler(cluster) if {
+ some log in cluster.enabledlogexports
+ log.value in [log_export_audit, log_export_profiler]
+}
diff --git a/checks/cloud/aws/documentdb/enable_log_export_test.go b/checks/cloud/aws/documentdb/enable_log_export_test.go
deleted file mode 100644
index 9fd21b5a..00000000
--- a/checks/cloud/aws/documentdb/enable_log_export_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package documentdb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableLogExport(t *testing.T) {
- tests := []struct {
- name string
- input documentdb.DocumentDB
- expected bool
- }{
- {
- name: "DocDB Cluster not exporting logs",
- input: documentdb.DocumentDB{
- Clusters: []documentdb.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- EnabledLogExports: []trivyTypes.StringValue{
- trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "DocDB Cluster exporting audit logs",
- input: documentdb.DocumentDB{
- Clusters: []documentdb.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- EnabledLogExports: []trivyTypes.StringValue{
- trivyTypes.String(documentdb.LogExportAudit, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "DocDB Cluster exporting profiler logs",
- input: documentdb.DocumentDB{
- Clusters: []documentdb.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- EnabledLogExports: []trivyTypes.StringValue{
- trivyTypes.String(documentdb.LogExportProfiler, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.DocumentDB = test.input
- results := CheckEnableLogExport.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableLogExport.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/documentdb/enable_log_export_test.rego b/checks/cloud/aws/documentdb/enable_log_export_test.rego
new file mode 100644
index 00000000..95cb64a1
--- /dev/null
+++ b/checks/cloud/aws/documentdb/enable_log_export_test.rego
@@ -0,0 +1,26 @@
+package builtin.aws.documentdb.aws0020_test
+
+import rego.v1
+
+import data.builtin.aws.documentdb.aws0020 as check
+import data.lib.test
+
+test_disallow_no_export_log if {
+ inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": []}]}}}
+ test.assert_equal_message("Neither CloudWatch audit nor profiler log exports are enabled.", check.deny) with input as inp
+}
+
+test_allow_export_audit if {
+ inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "audit"}]}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_export_profiler if {
+ inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "profiler"}]}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_export_mixed if {
+ inp := {"aws": {"documentdb": {"clusters": [{"enabledlogexports": [{"value": "audit"}, {"value": "profiler"}]}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption.go b/checks/cloud/aws/documentdb/enable_storage_encryption.go
deleted file mode 100755
index ba8eb653..00000000
--- a/checks/cloud/aws/documentdb/enable_storage_encryption.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package documentdb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableStorageEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0021",
- Provider: providers.AWSProvider,
- Service: "documentdb",
- ShortCode: "enable-storage-encryption",
- Summary: "DocumentDB storage must be encrypted",
- Impact: "Unencrypted sensitive data is vulnerable to compromise.",
- Resolution: "Enable storage encryption",
- Explanation: `Encryption of the underlying storage used by DocumentDB ensures that if their is compromise of the disks, the data is still protected.`,
- Links: []string{"https://docs.aws.amazon.com/documentdb/latest/developerguide/encryption-at-rest.html"},
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableStorageEncryptionGoodExamples,
- BadExamples: terraformEnableStorageEncryptionBadExamples,
- Links: terraformEnableStorageEncryptionLinks,
- RemediationMarkdown: terraformEnableStorageEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableStorageEncryptionGoodExamples,
- BadExamples: cloudFormationEnableStorageEncryptionBadExamples,
- Links: cloudFormationEnableStorageEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableStorageEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.DocumentDB.Clusters {
- if cluster.StorageEncrypted.IsFalse() {
- results.Add(
- "Cluster storage does not have encryption enabled.",
- cluster.StorageEncrypted,
- )
- } else {
- results.AddPassed(&cluster)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption.rego b/checks/cloud/aws/documentdb/enable_storage_encryption.rego
new file mode 100644
index 00000000..8467738d
--- /dev/null
+++ b/checks/cloud/aws/documentdb/enable_storage_encryption.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: DocumentDB storage must be encrypted
+# description: |
+# Encryption of the underlying storage used by DocumentDB ensures that if their is compromise of the disks, the data is still protected.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/documentdb/latest/developerguide/encryption-at-rest.html
+# custom:
+# id: AVD-AWS-0021
+# avd_id: AVD-AWS-0021
+# provider: aws
+# service: documentdb
+# severity: HIGH
+# short_code: enable-storage-encryption
+# recommended_action: Enable storage encryption
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: documentdb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#storage_encrypted
+# good_examples: checks/cloud/aws/documentdb/enable_storage_encryption.tf.go
+# bad_examples: checks/cloud/aws/documentdb/enable_storage_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/documentdb/enable_storage_encryption.cf.go
+# bad_examples: checks/cloud/aws/documentdb/enable_storage_encryption.cf.go
+package builtin.aws.documentdb.aws0021
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.documentdb.clusters
+ not cluster.storageencrypted.value
+ res := result.new("Cluster storage does not have encryption enabled.", cluster.storageencrypted)
+}
diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption_test.go b/checks/cloud/aws/documentdb/enable_storage_encryption_test.go
deleted file mode 100644
index 7b289cd7..00000000
--- a/checks/cloud/aws/documentdb/enable_storage_encryption_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package documentdb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableStorageEncryption(t *testing.T) {
- tests := []struct {
- name string
- input documentdb.DocumentDB
- expected bool
- }{
- {
- name: "DocDB unencrypted storage",
- input: documentdb.DocumentDB{
- Clusters: []documentdb.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- StorageEncrypted: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "DocDB encrypted storage",
- input: documentdb.DocumentDB{
- Clusters: []documentdb.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- StorageEncrypted: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.DocumentDB = test.input
- results := CheckEnableStorageEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableStorageEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego b/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego
new file mode 100644
index 00000000..399a2796
--- /dev/null
+++ b/checks/cloud/aws/documentdb/enable_storage_encryption_test.rego
@@ -0,0 +1,17 @@
+package builtin.aws.documentdb.aws0021_test
+
+import rego.v1
+
+import data.builtin.aws.documentdb.aws0021 as check
+import data.lib.test
+
+test_allow_with_encryption if {
+ inp := {"aws": {"documentdb": {"clusters": [{"storageencrypted": {"value": true}}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_without_encryption if {
+ inp := {"aws": {"documentdb": {"clusters": [{"storageencrypted": {"value": false}}]}}}
+
+ test.assert_equal_message("Cluster storage does not have encryption enabled.", check) with input as inp
+}
diff --git a/checks/cloud/aws/documentdb/encryption_customer_key.go b/checks/cloud/aws/documentdb/encryption_customer_key.go
deleted file mode 100755
index 4ba0ebd5..00000000
--- a/checks/cloud/aws/documentdb/encryption_customer_key.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package documentdb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEncryptionCustomerKey = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0022",
- Provider: providers.AWSProvider,
- Service: "documentdb",
- ShortCode: "encryption-customer-key",
- Summary: "DocumentDB encryption should use Customer Managed Keys",
- Impact: "Using AWS managed keys does not allow for fine grained control",
- Resolution: "Enable encryption using customer managed keys",
- Explanation: `Encryption using AWS keys provides protection for your DocumentDB underlying storage. To increase control of the encryption and manage factors like rotation use customer managed keys.`,
- Links: []string{"https://docs.aws.amazon.com/documentdb/latest/developerguide/security.encryption.ssl.public-key.html"},
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEncryptionCustomerKeyGoodExamples,
- BadExamples: terraformEncryptionCustomerKeyBadExamples,
- Links: terraformEncryptionCustomerKeyLinks,
- RemediationMarkdown: terraformEncryptionCustomerKeyRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEncryptionCustomerKeyGoodExamples,
- BadExamples: cloudFormationEncryptionCustomerKeyBadExamples,
- Links: cloudFormationEncryptionCustomerKeyLinks,
- RemediationMarkdown: cloudFormationEncryptionCustomerKeyRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.DocumentDB.Clusters {
- if cluster.Metadata.IsManaged() && cluster.KMSKeyID.IsEmpty() {
- results.Add(
- "Cluster encryption does not use a customer-managed KMS key.",
- cluster.KMSKeyID,
- )
- } else {
- results.AddPassed(&cluster)
- }
- for _, instance := range cluster.Instances {
- if instance.Metadata.IsUnmanaged() {
- continue
- }
- if instance.KMSKeyID.IsEmpty() {
- results.Add(
- "Instance encryption does not use a customer-managed KMS key.",
- instance.KMSKeyID,
- )
- } else {
- results.AddPassed(&cluster)
- }
-
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/documentdb/encryption_customer_key.rego b/checks/cloud/aws/documentdb/encryption_customer_key.rego
new file mode 100644
index 00000000..efe8af9e
--- /dev/null
+++ b/checks/cloud/aws/documentdb/encryption_customer_key.rego
@@ -0,0 +1,49 @@
+# METADATA
+# title: DocumentDB encryption should use Customer Managed Keys
+# description: |
+# Encryption using AWS keys provides protection for your DocumentDB underlying storage. To increase control of the encryption and manage factors like rotation use customer managed keys.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/documentdb/latest/developerguide/security.encryption.ssl.public-key.html
+# custom:
+# id: AVD-AWS-0022
+# avd_id: AVD-AWS-0022
+# provider: aws
+# service: documentdb
+# severity: LOW
+# short_code: encryption-customer-key
+# recommended_action: Enable encryption using customer managed keys
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: documentdb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/docdb_cluster#kms_key_id
+# good_examples: checks/cloud/aws/documentdb/encryption_customer_key.tf.go
+# bad_examples: checks/cloud/aws/documentdb/encryption_customer_key.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/documentdb/encryption_customer_key.cf.go
+# bad_examples: checks/cloud/aws/documentdb/encryption_customer_key.cf.go
+package builtin.aws.documentdb.aws0022
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.documentdb.clusters
+ cluster.kmskeyid.value == ""
+
+ res := result.new("Cluster encryption does not use a customer-managed KMS key.", cluster)
+}
+
+deny contains res if {
+ some cluster in input.aws.documentdb.clusters
+ some instance in cluster.instances
+ instance.kmskeyid.value == ""
+
+ res := result.new("Instance encryption does not use a customer-managed KMS key.", cluster)
+}
diff --git a/checks/cloud/aws/documentdb/encryption_customer_key_test.go b/checks/cloud/aws/documentdb/encryption_customer_key_test.go
deleted file mode 100644
index 86f1c1f2..00000000
--- a/checks/cloud/aws/documentdb/encryption_customer_key_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package documentdb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/documentdb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEncryptionCustomerKey(t *testing.T) {
- tests := []struct {
- name string
- input documentdb.DocumentDB
- expected bool
- }{
- {
- name: "DocDB Cluster encryption missing KMS key",
- input: documentdb.DocumentDB{
- Clusters: []documentdb.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "DocDB Instance encryption missing KMS key",
- input: documentdb.DocumentDB{
- Clusters: []documentdb.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()),
- Instances: []documentdb.Instance{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "DocDB Cluster and Instance encrypted with proper KMS keys",
- input: documentdb.DocumentDB{
- Clusters: []documentdb.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()),
- Instances: []documentdb.Instance{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("kms-key", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.DocumentDB = test.input
- results := CheckEncryptionCustomerKey.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEncryptionCustomerKey.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/documentdb/encryption_customer_key_test.rego b/checks/cloud/aws/documentdb/encryption_customer_key_test.rego
new file mode 100644
index 00000000..107a4113
--- /dev/null
+++ b/checks/cloud/aws/documentdb/encryption_customer_key_test.rego
@@ -0,0 +1,30 @@
+package builtin.aws.documentdb.aws0022_test
+
+import rego.v1
+
+import data.builtin.aws.documentdb.aws0022 as check
+import data.lib.test
+
+test_allow_cluster_with_kms_key if {
+ inp := {"aws": {"documentdb": {"clusters": [{"kmskeyid": {"value": "test"}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_instance_with_kms_key if {
+ inp := {"aws": {"documentdb": {"clusters": [{"instances": [{"kmskeyid": {"value": "test"}}]}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_cluster_without_kms_key if {
+ inp := {"aws": {"documentdb": {"clusters": [{"kmskeyid": {"value": ""}}]}}}
+
+ test.assert_equal_message("Cluster encryption does not use a customer-managed KMS key.", check.deny) with input as inp
+}
+
+test_disallow_instance_without_kms_key if {
+ inp := {"aws": {"documentdb": {"clusters": [{"instances": [{"kmskeyid": {"value": ""}}]}]}}}
+
+ test.assert_equal_message("Instance encryption does not use a customer-managed KMS key.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/dynamodb/dynamodb.go b/checks/cloud/aws/dynamodb/dynamodb.go
new file mode 100644
index 00000000..eeacc196
--- /dev/null
+++ b/checks/cloud/aws/dynamodb/dynamodb.go
@@ -0,0 +1 @@
+package dynamodb
diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go
deleted file mode 100755
index b0cac3be..00000000
--- a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package dynamodb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableAtRestEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0023",
- Provider: providers.AWSProvider,
- Service: "dynamodb",
- ShortCode: "enable-at-rest-encryption",
- Summary: "DAX Cluster should always encrypt data at rest",
- Impact: "Data can be freely read if compromised",
- Resolution: "Enable encryption at rest for DAX Cluster",
- Explanation: `Amazon DynamoDB Accelerator (DAX) encryption at rest provides an additional layer of data protection by helping secure your data from unauthorized access to the underlying storage.`,
- Links: []string{
- "https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DAXEncryptionAtRest.html",
- "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableAtRestEncryptionGoodExamples,
- BadExamples: terraformEnableAtRestEncryptionBadExamples,
- Links: terraformEnableAtRestEncryptionLinks,
- RemediationMarkdown: terraformEnableAtRestEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableAtRestEncryptionGoodExamples,
- BadExamples: cloudFormationEnableAtRestEncryptionBadExamples,
- Links: cloudFormationEnableAtRestEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableAtRestEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.DynamoDB.DAXClusters {
- if cluster.Metadata.IsUnmanaged() {
- continue
- }
- if cluster.ServerSideEncryption.Enabled.IsFalse() {
- results.Add(
- "DAX encryption is not enabled.",
- cluster.ServerSideEncryption.Enabled,
- )
- } else {
- results.AddPassed(&cluster)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego
new file mode 100644
index 00000000..0bcd3628
--- /dev/null
+++ b/checks/cloud/aws/dynamodb/enable_at_rest_encryption.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: DAX Cluster should always encrypt data at rest
+# description: |
+# Amazon DynamoDB Accelerator (DAX) encryption at rest provides an additional layer of data protection by helping secure your data from unauthorized access to the underlying storage.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DAXEncryptionAtRest.html
+# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dax-cluster.html
+# custom:
+# id: AVD-AWS-0023
+# avd_id: AVD-AWS-0023
+# provider: aws
+# service: dynamodb
+# severity: HIGH
+# short_code: enable-at-rest-encryption
+# recommended_action: Enable encryption at rest for DAX Cluster
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: dynamodb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dax_cluster#server_side_encryption
+# good_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.tf.go
+# bad_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.cf.go
+# bad_examples: checks/cloud/aws/dynamodb/enable_at_rest_encryption.cf.go
+package builtin.aws.dynamodb.aws0023
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.dynamodb.daxclusters
+ cluster.serversideencryption.enabled.value == false
+ res := result.new("DAX encryption is not enabled.", cluster.serversideencryption.enabled)
+}
diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go
deleted file mode 100644
index 66c02a1b..00000000
--- a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package dynamodb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableAtRestEncryption(t *testing.T) {
- tests := []struct {
- name string
- input dynamodb.DynamoDB
- expected bool
- }{
- {
- name: "Cluster with SSE disabled",
- input: dynamodb.DynamoDB{
- DAXClusters: []dynamodb.DAXCluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ServerSideEncryption: dynamodb.ServerSideEncryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Cluster with SSE enabled",
- input: dynamodb.DynamoDB{
- DAXClusters: []dynamodb.DAXCluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ServerSideEncryption: dynamodb.ServerSideEncryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.DynamoDB = test.input
- results := CheckEnableAtRestEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAtRestEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego
new file mode 100644
index 00000000..237e2f20
--- /dev/null
+++ b/checks/cloud/aws/dynamodb/enable_at_rest_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.dynamodb.aws0023_test
+
+import rego.v1
+
+import data.builtin.aws.dynamodb.aws0023 as check
+import data.lib.test
+
+test_allow_with_encryption if {
+ inp := {"aws": {"dynamodb": {"daxclusters": [{"serversideencryption": {"enabled": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_without_encryption if {
+ inp := {"aws": {"dynamodb": {"daxclusters": [{"serversideencryption": {"enabled": {"value": false}}}]}}}
+
+ test.assert_equal_message("DAX encryption is not enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/dynamodb/enable_recovery.go b/checks/cloud/aws/dynamodb/enable_recovery.go
deleted file mode 100755
index 8fa5e687..00000000
--- a/checks/cloud/aws/dynamodb/enable_recovery.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package dynamodb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableRecovery = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0024",
- Provider: providers.AWSProvider,
- Service: "dynamodb",
- ShortCode: "enable-recovery",
- Summary: "Point in time recovery should be enabled to protect DynamoDB table",
- Impact: "Accidental or malicious writes and deletes can't be rolled back",
- Resolution: "Enable point in time recovery",
- Explanation: `DynamoDB tables should be protected against accidentally or malicious write/delete actions by ensuring that there is adequate protection.
-
-By enabling point-in-time-recovery you can restore to a known point in the event of loss of data.`,
- Links: []string{
- "https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableRecoveryGoodExamples,
- BadExamples: terraformEnableRecoveryBadExamples,
- Links: terraformEnableRecoveryLinks,
- RemediationMarkdown: terraformEnableRecoveryRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.DynamoDB.DAXClusters {
- if cluster.Metadata.IsUnmanaged() {
- continue
- }
- if cluster.PointInTimeRecovery.IsFalse() {
- results.Add(
- "Point-in-time recovery is not enabled.",
- cluster.PointInTimeRecovery,
- )
- } else {
- results.AddPassed(&cluster)
- }
- }
- for _, table := range s.AWS.DynamoDB.Tables {
- if table.Metadata.IsUnmanaged() {
- continue
- }
- if table.PointInTimeRecovery.IsFalse() {
- results.Add(
- "Point-in-time recovery is not enabled.",
- table.PointInTimeRecovery,
- )
- } else {
- results.AddPassed(&table)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/dynamodb/enable_recovery.rego b/checks/cloud/aws/dynamodb/enable_recovery.rego
new file mode 100644
index 00000000..12d89903
--- /dev/null
+++ b/checks/cloud/aws/dynamodb/enable_recovery.rego
@@ -0,0 +1,46 @@
+# METADATA
+# title: Point in time recovery should be enabled to protect DynamoDB table
+# description: |
+# DynamoDB tables should be protected against accidentally or malicious write/delete actions by ensuring that there is adequate protection.
+# By enabling point-in-time-recovery you can restore to a known point in the event of loss of data.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/PointInTimeRecovery.html
+# custom:
+# id: AVD-AWS-0024
+# avd_id: AVD-AWS-0024
+# provider: aws
+# service: dynamodb
+# severity: MEDIUM
+# short_code: enable-recovery
+# recommended_action: Enable point in time recovery
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: dynamodb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dynamodb_table#point_in_time_recovery
+# good_examples: checks/cloud/aws/dynamodb/enable_recovery.tf.go
+# bad_examples: checks/cloud/aws/dynamodb/enable_recovery.tf.go
+package builtin.aws.dynamodb.aws0024
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.dynamodb.daxclusters
+ cluster.pointintimerecovery.value == false
+
+ res := result.new("Point-in-time recovery is not enabled.", cluster.pointintimerecovery)
+}
+
+deny contains res if {
+ some table in input.aws.dynamodb.tables
+ table.pointintimerecovery.value == false
+
+ res := result.new("Point-in-time recovery is not enabled.", table.pointintimerecovery)
+}
diff --git a/checks/cloud/aws/dynamodb/enable_recovery_test.go b/checks/cloud/aws/dynamodb/enable_recovery_test.go
deleted file mode 100644
index 9df6d104..00000000
--- a/checks/cloud/aws/dynamodb/enable_recovery_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package dynamodb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableRecovery(t *testing.T) {
- tests := []struct {
- name string
- input dynamodb.DynamoDB
- expected bool
- }{
- {
- name: "Cluster with point in time recovery disabled",
- input: dynamodb.DynamoDB{
- DAXClusters: []dynamodb.DAXCluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- PointInTimeRecovery: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "Cluster with point in time recovery enabled",
- input: dynamodb.DynamoDB{
- DAXClusters: []dynamodb.DAXCluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- PointInTimeRecovery: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.DynamoDB = test.input
- results := CheckEnableRecovery.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableRecovery.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/dynamodb/enable_recovery_test.rego b/checks/cloud/aws/dynamodb/enable_recovery_test.rego
new file mode 100644
index 00000000..e73a8d09
--- /dev/null
+++ b/checks/cloud/aws/dynamodb/enable_recovery_test.rego
@@ -0,0 +1,30 @@
+package builtin.aws.dynamodb.aws0024_test
+
+import rego.v1
+
+import data.builtin.aws.dynamodb.aws0024 as check
+import data.lib.test
+
+test_allow_cluster_with_recovery if {
+ inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": true}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_cluster_without_recovery if {
+ inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": false}}]}}}
+
+ test.assert_equal_message("Point-in-time recovery is not enabled.", check.deny) with input as inp
+}
+
+test_allow_table_with_recovery if {
+ inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": true}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_table_without_recovery if {
+ inp := {"aws": {"dynamodb": {"tables": [{"pointintimerecovery": {"value": false}}]}}}
+
+ test.assert_equal_message("Point-in-time recovery is not enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/dynamodb/table_customer_key.go b/checks/cloud/aws/dynamodb/table_customer_key.go
deleted file mode 100755
index 643e3bdd..00000000
--- a/checks/cloud/aws/dynamodb/table_customer_key.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package dynamodb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckTableCustomerKey = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0025",
- Provider: providers.AWSProvider,
- Service: "dynamodb",
- ShortCode: "table-customer-key",
- Summary: "DynamoDB tables should use at rest encryption with a Customer Managed Key",
- Impact: "Using AWS managed keys does not allow for fine grained control",
- Resolution: "Enable server side encryption with a customer managed key",
- Explanation: `DynamoDB tables are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key.`,
- Links: []string{
- "https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/EncryptionAtRest.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformTableCustomerKeyGoodExamples,
- BadExamples: terraformTableCustomerKeyBadExamples,
- Links: terraformTableCustomerKeyLinks,
- RemediationMarkdown: terraformTableCustomerKeyRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, table := range s.AWS.DynamoDB.Tables {
- if table.Metadata.IsUnmanaged() {
- continue
- }
- if table.ServerSideEncryption.Enabled.IsFalse() {
- results.Add(
- "Table encryption does not use a customer-managed KMS key.",
- table.ServerSideEncryption.KMSKeyID,
- )
- } else if table.ServerSideEncryption.KMSKeyID.IsEmpty() ||
- table.ServerSideEncryption.KMSKeyID.EqualTo(dynamodb.DefaultKMSKeyID) {
- results.Add(
- "Table encryption explicitly uses the default KMS key.",
- table.ServerSideEncryption.KMSKeyID,
- )
- } else {
- results.AddPassed(&table)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/dynamodb/table_customer_key.rego b/checks/cloud/aws/dynamodb/table_customer_key.rego
new file mode 100644
index 00000000..69b84c33
--- /dev/null
+++ b/checks/cloud/aws/dynamodb/table_customer_key.rego
@@ -0,0 +1,44 @@
+# METADATA
+# title: DynamoDB tables should use at rest encryption with a Customer Managed Key
+# description: |
+# DynamoDB tables are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/EncryptionAtRest.html
+# custom:
+# id: AVD-AWS-0025
+# avd_id: AVD-AWS-0025
+# provider: aws
+# service: dynamodb
+# severity: LOW
+# short_code: table-customer-key
+# recommended_action: Enable server side encryption with a customer managed key
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: dynamodb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/dynamodb_table#server_side_encryption
+# good_examples: checks/cloud/aws/dynamodb/table_customer_key.tf.go
+# bad_examples: checks/cloud/aws/dynamodb/table_customer_key.tf.go
+package builtin.aws.dynamodb.aws0025
+
+import rego.v1
+
+deny contains res if {
+ some table in input.aws.dynamodb.tables
+ table.serversideencryption.enabled.value == false
+ res := result.new("Table encryption does not use a customer-managed KMS key.", table.serversideencryption.enabled)
+}
+
+deny contains res if {
+ some table in input.aws.dynamodb.tables
+ table.serversideencryption.enabled.value
+ table.serversideencryption.kmskeyid.value == ""
+ res := result.new("Table encryption explicitly uses the default KMS key.", table.serversideencryption.kmskeyid)
+}
diff --git a/checks/cloud/aws/dynamodb/table_customer_key_test.go b/checks/cloud/aws/dynamodb/table_customer_key_test.go
deleted file mode 100644
index 56daa731..00000000
--- a/checks/cloud/aws/dynamodb/table_customer_key_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package dynamodb
-
-import (
- "testing"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/dynamodb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/state"
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckTableCustomerKey(t *testing.T) {
- tests := []struct {
- name string
- input dynamodb.DynamoDB
- expected bool
- }{
- {
- name: "Cluster encryption missing KMS key",
- input: dynamodb.DynamoDB{
- Tables: []dynamodb.Table{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ServerSideEncryption: dynamodb.ServerSideEncryption{
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Cluster encryption using default KMS key",
- input: dynamodb.DynamoDB{
- Tables: []dynamodb.Table{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ServerSideEncryption: dynamodb.ServerSideEncryption{
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String(dynamodb.DefaultKMSKeyID, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Cluster encryption using proper KMS key",
- input: dynamodb.DynamoDB{
- Tables: []dynamodb.Table{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ServerSideEncryption: dynamodb.ServerSideEncryption{
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "KMS key exist, but SSE is not enabled",
- input: dynamodb.DynamoDB{
- Tables: []dynamodb.Table{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ServerSideEncryption: dynamodb.ServerSideEncryption{
- Enabled: trivyTypes.BoolDefault(false, trivyTypes.NewTestMetadata()),
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.DynamoDB = test.input
- results := CheckTableCustomerKey.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckTableCustomerKey.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/dynamodb/table_customer_key_test.rego b/checks/cloud/aws/dynamodb/table_customer_key_test.rego
new file mode 100644
index 00000000..74b24f7c
--- /dev/null
+++ b/checks/cloud/aws/dynamodb/table_customer_key_test.rego
@@ -0,0 +1,42 @@
+package builtin.aws.dynamodb.aws0025_test
+
+import rego.v1
+
+import data.builtin.aws.dynamodb.aws0025 as check
+import data.lib.test
+
+test_allow_table_with_cmk if {
+ inp := {"aws": {"dynamodb": {"tables": [{
+ "name": "test",
+ "serversideencryption": {
+ "enabled": {"value": true},
+ "kmskeyid": {"value": "alias/test"},
+ },
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_table_without_cmk if {
+ inp := {"aws": {"dynamodb": {"tables": [{
+ "name": "test",
+ "serversideencryption": {
+ "enabled": {"value": true},
+ "kmskeyid": {"value": ""},
+ },
+ }]}}}
+
+ test.assert_equal_message("Table encryption explicitly uses the default KMS key.", check.deny) with input as inp
+}
+
+test_deny_table_sse_disabled if {
+ inp := {"aws": {"dynamodb": {"tables": [{
+ "name": "test",
+ "serversideencryption": {
+ "enabled": {"value": false},
+ "kmskeyid": {"value": ""},
+ },
+ }]}}}
+
+ test.assert_equal_message("Table encryption explicitly uses the default KMS key.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/ecr/ecr.go b/checks/cloud/aws/ecr/ecr.go
new file mode 100644
index 00000000..b7dc0603
--- /dev/null
+++ b/checks/cloud/aws/ecr/ecr.go
@@ -0,0 +1 @@
+package ecr
diff --git a/checks/cloud/aws/ecr/enable_image_scans.go b/checks/cloud/aws/ecr/enable_image_scans.go
deleted file mode 100755
index 2cfb3e11..00000000
--- a/checks/cloud/aws/ecr/enable_image_scans.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package ecr
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableImageScans = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0030",
- Provider: providers.AWSProvider,
- Service: "ecr",
- ShortCode: "enable-image-scans",
- Summary: "ECR repository has image scans disabled.",
- Impact: "The ability to scan images is not being used and vulnerabilities will not be highlighted",
- Resolution: "Enable ECR image scanning",
- Explanation: `Repository image scans should be enabled to ensure vulnerable software can be discovered and remediated as soon as possible.`,
- Links: []string{
- "https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableImageScansGoodExamples,
- BadExamples: terraformEnableImageScansBadExamples,
- Links: terraformEnableImageScansLinks,
- RemediationMarkdown: terraformEnableImageScansRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableImageScansGoodExamples,
- BadExamples: cloudFormationEnableImageScansBadExamples,
- Links: cloudFormationEnableImageScansLinks,
- RemediationMarkdown: cloudFormationEnableImageScansRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, repo := range s.AWS.ECR.Repositories {
- if repo.ImageScanning.ScanOnPush.IsFalse() {
- results.Add(
- "Image scanning is not enabled.",
- repo.ImageScanning.ScanOnPush,
- )
- } else {
- results.AddPassed(&repo)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/ecr/enable_image_scans.rego b/checks/cloud/aws/ecr/enable_image_scans.rego
new file mode 100644
index 00000000..0f906bb0
--- /dev/null
+++ b/checks/cloud/aws/ecr/enable_image_scans.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: ECR repository has image scans disabled.
+# description: |
+# Repository image scans should be enabled to ensure vulnerable software can be discovered and remediated as soon as possible.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html
+# custom:
+# id: AVD-AWS-0030
+# avd_id: AVD-AWS-0030
+# provider: aws
+# service: ecr
+# severity: HIGH
+# short_code: enable-image-scans
+# recommended_action: Enable ECR image scanning
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: ecr
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository#image_scanning_configuration
+# good_examples: checks/cloud/aws/ecr/enable_image_scans.tf.go
+# bad_examples: checks/cloud/aws/ecr/enable_image_scans.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/ecr/enable_image_scans.cf.go
+# bad_examples: checks/cloud/aws/ecr/enable_image_scans.cf.go
+package builtin.aws.ecr.aws0030
+
+import rego.v1
+
+deny contains res if {
+ some repo in input.aws.ecr.repositories
+ repo.imagescanning.scanonpush.value == false
+
+ res := result.new("Image scanning is not enabled", repo.imagescanning.scanonpush)
+}
diff --git a/checks/cloud/aws/ecr/enable_image_scans_test.go b/checks/cloud/aws/ecr/enable_image_scans_test.go
deleted file mode 100644
index 0cf5df66..00000000
--- a/checks/cloud/aws/ecr/enable_image_scans_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package ecr
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/ecr"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableImageScans(t *testing.T) {
- tests := []struct {
- name string
- input ecr.ECR
- expected bool
- }{
- {
- name: "ECR repository with image scans disabled",
- input: ecr.ECR{
- Repositories: []ecr.Repository{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ImageScanning: ecr.ImageScanning{
- Metadata: trivyTypes.NewTestMetadata(),
- ScanOnPush: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "ECR repository with image scans enabled",
- input: ecr.ECR{
- Repositories: []ecr.Repository{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ImageScanning: ecr.ImageScanning{
- Metadata: trivyTypes.NewTestMetadata(),
- ScanOnPush: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ECR = test.input
- results := CheckEnableImageScans.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableImageScans.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/ecr/enable_image_scans_test.rego b/checks/cloud/aws/ecr/enable_image_scans_test.rego
new file mode 100644
index 00000000..ae0c46de
--- /dev/null
+++ b/checks/cloud/aws/ecr/enable_image_scans_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.ecr.aws0030_test
+
+import rego.v1
+
+import data.builtin.aws.ecr.aws0030 as check
+import data.lib.test
+
+test_allow_image_scanning_enabled if {
+ inp := {"aws": {"ecr": {"repositories": [{"imagescanning": {"scanonpush": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_image_scanning_disabled if {
+ inp := {"aws": {"ecr": {"repositories": [{"imagescanning": {"scanonpush": {"value": false}}}]}}}
+
+ test.assert_equal_message("Image scanning is not enabled", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/ecr/enforce_immutable_repository.go b/checks/cloud/aws/ecr/enforce_immutable_repository.go
deleted file mode 100755
index 6ebcfadc..00000000
--- a/checks/cloud/aws/ecr/enforce_immutable_repository.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package ecr
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnforceImmutableRepository = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0031",
- Provider: providers.AWSProvider,
- Service: "ecr",
- ShortCode: "enforce-immutable-repository",
- Summary: "ECR images tags shouldn't be mutable.",
- Impact: "Image tags could be overwritten with compromised images",
- Resolution: "Only use immutable images in ECR",
- Explanation: `ECR images should be set to IMMUTABLE to prevent code injection through image mutation.
-
-This can be done by setting image_tab_mutability
to IMMUTABLE
`,
- Links: []string{
- "https://sysdig.com/blog/toctou-tag-mutability/",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnforceImmutableRepositoryGoodExamples,
- BadExamples: terraformEnforceImmutableRepositoryBadExamples,
- Links: terraformEnforceImmutableRepositoryLinks,
- RemediationMarkdown: terraformEnforceImmutableRepositoryRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnforceImmutableRepositoryGoodExamples,
- BadExamples: cloudFormationEnforceImmutableRepositoryBadExamples,
- Links: cloudFormationEnforceImmutableRepositoryLinks,
- RemediationMarkdown: cloudFormationEnforceImmutableRepositoryRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, repo := range s.AWS.ECR.Repositories {
- if repo.ImageTagsImmutable.IsFalse() {
- results.Add(
- "Repository tags are mutable.",
- repo.ImageTagsImmutable,
- )
- } else {
- results.AddPassed(&repo)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/ecr/enforce_immutable_repository.rego b/checks/cloud/aws/ecr/enforce_immutable_repository.rego
new file mode 100644
index 00000000..96e8e0ef
--- /dev/null
+++ b/checks/cloud/aws/ecr/enforce_immutable_repository.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: ECR images tags shouldn't be mutable.
+# description: |
+# ECR images should be set to IMMUTABLE to prevent code injection through image mutation.
+# This can be done by setting image_tab_mutability
to IMMUTABLE
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://sysdig.com/blog/toctou-tag-mutability/
+# custom:
+# id: AVD-AWS-0031
+# avd_id: AVD-AWS-0031
+# provider: aws
+# service: ecr
+# severity: HIGH
+# short_code: enforce-immutable-repository
+# recommended_action: Only use immutable images in ECR
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: ecr
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository
+# good_examples: checks/cloud/aws/ecr/enforce_immutable_repository.tf.go
+# bad_examples: checks/cloud/aws/ecr/enforce_immutable_repository.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/ecr/enforce_immutable_repository.cf.go
+# bad_examples: checks/cloud/aws/ecr/enforce_immutable_repository.cf.go
+package builtin.aws.ecr.aws0031
+
+import rego.v1
+
+deny contains res if {
+ some repo in input.aws.ecr.repositories
+ repo.imagetagsimmutable.value == false
+
+ res := result.new("Repository tags are mutable.", repo.imagetagsimmutable)
+}
diff --git a/checks/cloud/aws/ecr/enforce_immutable_repository_test.go b/checks/cloud/aws/ecr/enforce_immutable_repository_test.go
deleted file mode 100644
index 8ced0b65..00000000
--- a/checks/cloud/aws/ecr/enforce_immutable_repository_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package ecr
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/ecr"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnforceImmutableRepository(t *testing.T) {
- tests := []struct {
- name string
- input ecr.ECR
- expected bool
- }{
- {
- name: "ECR mutable image tags",
- input: ecr.ECR{
- Repositories: []ecr.Repository{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ImageTagsImmutable: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "ECR immutable image tags",
- input: ecr.ECR{
- Repositories: []ecr.Repository{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- ImageTagsImmutable: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ECR = test.input
- results := CheckEnforceImmutableRepository.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnforceImmutableRepository.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/ecr/enforce_immutable_repository_test.rego b/checks/cloud/aws/ecr/enforce_immutable_repository_test.rego
new file mode 100644
index 00000000..7068fa3d
--- /dev/null
+++ b/checks/cloud/aws/ecr/enforce_immutable_repository_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.ecr.aws0031_test
+
+import rego.v1
+
+import data.builtin.aws.ecr.aws0031 as check
+import data.lib.test
+
+test_allow_immutable_repository if {
+ inp := {"aws": {"ecr": {"repositories": [{"imagetagsimmutable": {"value": true}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_immutable_repository if {
+ inp := {"aws": {"ecr": {"repositories": [{"imagetagsimmutable": {"value": false}}]}}}
+
+ test.assert_equal_message("Repository tags are mutable.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/ecr/no_public_access.go b/checks/cloud/aws/ecr/no_public_access.go
deleted file mode 100755
index dbefbaa0..00000000
--- a/checks/cloud/aws/ecr/no_public_access.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package ecr
-
-import (
- "strings"
-
- "github.com/aquasecurity/trivy/pkg/iac/severity"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/aquasecurity/trivy-checks/pkg/rules"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers"
-)
-
-var CheckNoPublicAccess = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0032",
- Provider: providers.AWSProvider,
- Service: "ecr",
- ShortCode: "no-public-access",
- Summary: "ECR repository policy must block public access",
- Impact: "Risk of potential data leakage of sensitive artifacts",
- Resolution: "Do not allow public access in the policy",
- Explanation: `Allowing public access to the ECR repository risks leaking sensitive of abusable information`,
- Links: []string{
- "https://docs.aws.amazon.com/AmazonECR/latest/public/public-repository-policies.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformNoPublicAccessGoodExamples,
- BadExamples: terraformNoPublicAccessBadExamples,
- Links: terraformNoPublicAccessLinks,
- RemediationMarkdown: terraformNoPublicAccessRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationNoPublicAccessGoodExamples,
- BadExamples: cloudFormationNoPublicAccessBadExamples,
- Links: cloudFormationNoPublicAccessLinks,
- RemediationMarkdown: cloudFormationNoPublicAccessRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, repo := range s.AWS.ECR.Repositories {
- if repo.Metadata.IsUnmanaged() {
- continue
- }
- for _, policyDocument := range repo.Policies {
- policy := policyDocument.Document.Parsed
- statements, _ := policy.Statements()
- for _, statement := range statements {
- var hasECRAction bool
- actions, _ := statement.Actions()
- for _, action := range actions {
- if strings.HasPrefix(action, "ecr:") {
- hasECRAction = true
- break
- }
- }
- if !hasECRAction {
- continue
- }
- var foundIssue bool
- principals, _ := statement.Principals()
- if all, r := principals.All(); all {
- foundIssue = true
- results.Add(
- "Policy provides public access to the ECR repository.",
- policyDocument.Document.MetadataFromIamGo(statement.Range(), r),
- )
- } else {
- accounts, r := principals.AWS()
- for _, account := range accounts {
- if account == "*" {
- foundIssue = true
- results.Add(
- "Policy provides public access to the ECR repository.",
- policyDocument.Document.MetadataFromIamGo(statement.Range(), r),
- )
- }
- continue
- }
- }
- if foundIssue {
- results.AddPassed(&repo)
- }
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/ecr/no_public_access.rego b/checks/cloud/aws/ecr/no_public_access.rego
new file mode 100644
index 00000000..c883524c
--- /dev/null
+++ b/checks/cloud/aws/ecr/no_public_access.rego
@@ -0,0 +1,57 @@
+# METADATA
+# title: ECR repository policy must block public access
+# description: |
+# Allowing public access to the ECR repository risks leaking sensitive of abusable information
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonECR/latest/public/public-repository-policies.html
+# custom:
+# id: AVD-AWS-0032
+# avd_id: AVD-AWS-0032
+# provider: aws
+# service: ecr
+# severity: HIGH
+# short_code: no-public-access
+# recommended_action: Do not allow public access in the policy
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: ecr
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository_policy#policy
+# good_examples: checks/cloud/aws/ecr/no_public_access.tf.go
+# bad_examples: checks/cloud/aws/ecr/no_public_access.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/ecr/no_public_access.cf.go
+# bad_examples: checks/cloud/aws/ecr/no_public_access.cf.go
+package builtin.aws.ecr.aws0032
+
+import rego.v1
+
+deny contains res if {
+ some repo in input.aws.ecr.repositories
+ some policy in repo.policies
+ value := json.unmarshal(policy.document.value)
+ some statement in value.Statement
+ has_ecr_action(statement)
+ has_public_access(statement)
+ res := result.new("Policy provides public access to the ECR repository.", policy.document)
+}
+
+has_ecr_action(statement) if {
+ some action in statement.Action
+ startswith(action, "ecr:")
+}
+
+has_public_access(statement) if {
+ statement.Principal.All
+}
+
+has_public_access(statement) if {
+ "*" in statement.Principal.AWS
+}
diff --git a/checks/cloud/aws/ecr/no_public_access_test.go b/checks/cloud/aws/ecr/no_public_access_test.go
deleted file mode 100644
index f114083e..00000000
--- a/checks/cloud/aws/ecr/no_public_access_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package ecr
-
-import (
- "testing"
-
- "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/ecr"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/iam"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/liamg/iamgo"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckNoPublicAccess(t *testing.T) {
- tests := []struct {
- name string
- input ecr.ECR
- expected bool
- }{
- {
- name: "ECR repository policy with wildcard principal",
- input: ecr.ECR{
- Repositories: []ecr.Repository{
- {
- Metadata: types.NewTestMetadata(),
- Policies: func() []iam.Policy {
-
- sb := iamgo.NewStatementBuilder()
- sb.WithSid("new policy")
- sb.WithEffect("Allow")
- sb.WithAllPrincipals(true)
- sb.WithActions([]string{
- "ecr:GetDownloadUrlForLayer",
- "ecr:BatchGetImage",
- "ecr:BatchCheckLayerAvailability",
- "ecr:PutImage",
- "ecr:InitiateLayerUpload",
- "ecr:UploadLayerPart",
- "ecr:CompleteLayerUpload",
- "ecr:DescribeRepositories",
- "ecr:GetRepositoryPolicy",
- "ecr:ListImages",
- "ecr:DeleteRepository",
- "ecr:BatchDeleteImage",
- "ecr:SetRepositoryPolicy",
- "ecr:DeleteRepositoryPolicy",
- })
-
- builder := iamgo.NewPolicyBuilder()
- builder.WithVersion("2021-10-07")
- builder.WithStatement(sb.Build())
-
- return []iam.Policy{
- {
- Document: iam.Document{
- Metadata: types.NewTestMetadata(),
- Parsed: builder.Build(),
- },
- },
- }
- }(),
- },
- },
- },
- expected: true,
- },
- {
- name: "ECR repository policy with specific principal",
- input: ecr.ECR{
- Repositories: []ecr.Repository{
- {
- Metadata: types.NewTestMetadata(),
- Policies: func() []iam.Policy {
-
- sb := iamgo.NewStatementBuilder()
- sb.WithSid("new policy")
- sb.WithEffect("Allow")
- sb.WithAWSPrincipals([]string{"arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"})
- sb.WithActions([]string{
- "ecr:GetDownloadUrlForLayer",
- "ecr:BatchGetImage",
- "ecr:BatchCheckLayerAvailability",
- "ecr:PutImage",
- "ecr:InitiateLayerUpload",
- "ecr:UploadLayerPart",
- "ecr:CompleteLayerUpload",
- "ecr:DescribeRepositories",
- "ecr:GetRepositoryPolicy",
- "ecr:ListImages",
- "ecr:DeleteRepository",
- "ecr:BatchDeleteImage",
- "ecr:SetRepositoryPolicy",
- "ecr:DeleteRepositoryPolicy",
- })
-
- builder := iamgo.NewPolicyBuilder()
- builder.WithVersion("2021-10-07")
- builder.WithStatement(sb.Build())
-
- return []iam.Policy{
- {
- Document: iam.Document{
- Metadata: types.NewTestMetadata(),
- Parsed: builder.Build(),
- },
- },
- }
- }(),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ECR = test.input
- results := CheckNoPublicAccess.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckNoPublicAccess.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/ecr/no_public_access_test.rego b/checks/cloud/aws/ecr/no_public_access_test.rego
new file mode 100644
index 00000000..475943e6
--- /dev/null
+++ b/checks/cloud/aws/ecr/no_public_access_test.rego
@@ -0,0 +1,35 @@
+package builtin.aws.ecr.aws0032_test
+
+import rego.v1
+
+import data.builtin.aws.ecr.aws0032 as check
+import data.lib.test
+
+test_allow_without_public_access if {
+ inp := {"aws": {"ecr": {"repositories": [{"policies": [{"document": {"value": json.marshal({"Statement": [{
+ "Action": ["ecr:*"],
+ "Principal": {"AWS": "arn:aws:iam::123456789012:root"},
+ "Effect": "Allow",
+ }]})}}]}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_with_public_access_all if {
+ inp := {"aws": {"ecr": {"repositories": [{"policies": [{"document": {"value": json.marshal({"Statement": [{
+ "Action": ["ecr:*"],
+ "Principal": {"All": true},
+ }]})}}]}]}}}
+
+ test.assert_equal_message("Policy provides public access to the ECR repository", check.deny) with input as inp
+}
+
+test_deny_with_public_acces_any if {
+ inp := {"aws": {"ecr": {"repositories": [{"policies": [{"document": {"value": json.marshal({"Statement": [{
+ "Action": ["ecr:*"],
+ "Principal": {"AWS": ["*"]},
+ "Effect": "Allow",
+ }]})}}]}]}}}
+
+ test.assert_equal_message("Policy provides public access to the ECR repository", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/ecr/repository_customer_key.rego b/checks/cloud/aws/ecr/repository_customer_key.rego
new file mode 100644
index 00000000..ecc30620
--- /dev/null
+++ b/checks/cloud/aws/ecr/repository_customer_key.rego
@@ -0,0 +1,49 @@
+# METADATA
+# title: ECR Repository should use customer managed keys to allow more control
+# description: |
+# Images in the ECR repository are encrypted by default using AWS managed encryption keys. To increase control of the encryption and control the management of factors like key rotation, use a Customer Managed Key.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html
+# custom:
+# id: AVD-AWS-0033
+# avd_id: AVD-AWS-0033
+# provider: aws
+# service: ecr
+# severity: LOW
+# short_code: repository-customer-key
+# recommended_action: Use customer managed keys
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: ecr
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecr_repository#encryption_configuration
+# good_examples: checks/cloud/aws/ecr/repository_customer_key.tf.go
+# bad_examples: checks/cloud/aws/ecr/repository_customer_key.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/ecr/repository_customer_key.cf.go
+# bad_examples: checks/cloud/aws/ecr/repository_customer_key.cf.go
+package builtin.aws.ecr.aws0033
+
+import rego.v1
+
+deny contains res if {
+ some repo in input.aws.ecr.repositories
+ not is_encyption_type_kms(repo.encryption.type)
+ res := result.new("Repository is not encrypted using KMS.", repo.encryption.type)
+}
+
+deny contains res if {
+ some repo in input.aws.ecr.repositories
+ is_encyption_type_kms(repo.encryption.type)
+ repo.encryption.kmskeyid.value == ""
+ res := result.new("Repository encryption does not use a customer managed KMS key.", repo.encryption.kmskeyid)
+}
+
+is_encyption_type_kms(typ) if typ.value == "KMS"
diff --git a/checks/cloud/aws/ecr/repository_customer_key_test.rego b/checks/cloud/aws/ecr/repository_customer_key_test.rego
new file mode 100644
index 00000000..8c91a517
--- /dev/null
+++ b/checks/cloud/aws/ecr/repository_customer_key_test.rego
@@ -0,0 +1,30 @@
+package builtin.aws.ecr.aws0033_test
+
+import rego.v1
+
+import data.builtin.aws.ecr.aws0033 as check
+import data.lib.test
+
+test_allow_repo_with_kms if {
+ inp := {"aws": {"ecr": {"repositories": [{"encryption": {
+ "type": {"value": "KMS"},
+ "kmskeyid": {"value": "key"},
+ }}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_repo_without_kms_encryption if {
+ inp := {"aws": {"ecr": {"repositories": [{"encryption": {"type": {"value": "AES256"}}}]}}}
+
+ test.assert_equal_message("Repository is not encrypted using KMS.", check.deny) with input as inp
+}
+
+test_deny_repo_with_kms_encryption_without_key if {
+ inp := {"aws": {"ecr": {"repositories": [{"encryption": {
+ "type": {"value": "KMS"},
+ "kmskeyid": {"value": ""},
+ }}]}}}
+
+ test.assert_equal_message("Repository encryption does not use a customer managed KMS key.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/ecs/ecs.go b/checks/cloud/aws/ecs/ecs.go
new file mode 100644
index 00000000..7f32d0d2
--- /dev/null
+++ b/checks/cloud/aws/ecs/ecs.go
@@ -0,0 +1 @@
+package ecs
diff --git a/checks/cloud/aws/ecs/enable_container_insight.go b/checks/cloud/aws/ecs/enable_container_insight.go
deleted file mode 100755
index 5cff03f2..00000000
--- a/checks/cloud/aws/ecs/enable_container_insight.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package ecs
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableContainerInsight = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0034",
- Provider: providers.AWSProvider,
- Service: "ecs",
- ShortCode: "enable-container-insight",
- Summary: "ECS clusters should have container insights enabled",
- Impact: "Not all metrics and logs may be gathered for containers when Container Insights isn't enabled",
- Resolution: "Enable Container Insights",
- Explanation: `Cloudwatch Container Insights provide more metrics and logs for container based applications and micro services.`,
- Links: []string{
- "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableContainerInsightGoodExamples,
- BadExamples: terraformEnableContainerInsightBadExamples,
- Links: terraformEnableContainerInsightLinks,
- RemediationMarkdown: terraformEnableContainerInsightRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableContainerInsightGoodExamples,
- BadExamples: cloudFormationEnableContainerInsightBadExamples,
- Links: cloudFormationEnableContainerInsightLinks,
- RemediationMarkdown: cloudFormationEnableContainerInsightRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.ECS.Clusters {
- if cluster.Settings.ContainerInsightsEnabled.IsFalse() {
- results.Add(
- "Cluster does not have container insights enabled.",
- cluster.Settings.ContainerInsightsEnabled,
- )
- } else {
- results.AddPassed(&cluster)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/ecs/enable_container_insight.rego b/checks/cloud/aws/ecs/enable_container_insight.rego
new file mode 100644
index 00000000..7b2032cb
--- /dev/null
+++ b/checks/cloud/aws/ecs/enable_container_insight.rego
@@ -0,0 +1,43 @@
+# METADATA
+# title: ECS clusters should have container insights enabled
+# description: |
+# Cloudwatch Container Insights provide more metrics and logs for container based applications and micro services.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights.html
+# custom:
+# id: AVD-AWS-0034
+# avd_id: AVD-AWS-0034
+# provider: aws
+# service: ecs
+# severity: LOW
+# short_code: enable-container-insight
+# recommended_action: Enable Container Insights
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: ecs
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_cluster#setting
+# good_examples: checks/cloud/aws/ecs/enable_container_insight.tf.go
+# bad_examples: checks/cloud/aws/ecs/enable_container_insight.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/ecs/enable_container_insight.cf.go
+# bad_examples: checks/cloud/aws/ecs/enable_container_insight.cf.go
+package builtin.aws.ecs.aws0034
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.ecs.clusters
+ cluster.settings.containerinsightsenabled.value == false
+ res := result.new(
+ "Cluster does not have container insights enabled.",
+ cluster.settings.containerinsightsenabled,
+ )
+}
diff --git a/checks/cloud/aws/ecs/enable_container_insight_test.go b/checks/cloud/aws/ecs/enable_container_insight_test.go
deleted file mode 100644
index 9b22491e..00000000
--- a/checks/cloud/aws/ecs/enable_container_insight_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package ecs
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/ecs"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableContainerInsight(t *testing.T) {
- tests := []struct {
- name string
- input ecs.ECS
- expected bool
- }{
- {
- name: "Cluster with disabled container insights",
- input: ecs.ECS{
- Clusters: []ecs.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Settings: ecs.ClusterSettings{
- Metadata: trivyTypes.NewTestMetadata(),
- ContainerInsightsEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Cluster with enabled container insights",
- input: ecs.ECS{
- Clusters: []ecs.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Settings: ecs.ClusterSettings{
- Metadata: trivyTypes.NewTestMetadata(),
- ContainerInsightsEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ECS = test.input
- results := CheckEnableContainerInsight.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableContainerInsight.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/ecs/enable_container_insight_test.rego b/checks/cloud/aws/ecs/enable_container_insight_test.rego
new file mode 100644
index 00000000..98434ea9
--- /dev/null
+++ b/checks/cloud/aws/ecs/enable_container_insight_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.ecs.aws0034_test
+
+import rego.v1
+
+import data.builtin.aws.ecs.aws0034 as check
+import data.lib.test
+
+test_allow_cluster_with_container_insights if {
+ inp := {"aws": {"ecs": {"clusters": [{"settings": {"containerinsightsenabled": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_cluster_without_container_insights if {
+ inp := {"aws": {"ecs": {"clusters": [{"settings": {"containerinsightsenabled": {"value": false}}}]}}}
+
+ test.assert_equal_message("Cluster does not have container insights enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/ecs/enable_in_transit_encryption.go b/checks/cloud/aws/ecs/enable_in_transit_encryption.go
deleted file mode 100755
index f1d84860..00000000
--- a/checks/cloud/aws/ecs/enable_in_transit_encryption.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package ecs
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableInTransitEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0035",
- Provider: providers.AWSProvider,
- Service: "ecs",
- ShortCode: "enable-in-transit-encryption",
- Summary: "ECS Task Definitions with EFS volumes should use in-transit encryption",
- Impact: "Intercepted traffic to and from EFS may lead to data loss",
- Resolution: "Enable in transit encryption when using efs",
- Explanation: `ECS task definitions that have volumes using EFS configuration should explicitly enable in transit encryption to prevent the risk of data loss due to interception.`,
- Links: []string{
- "https://docs.aws.amazon.com/AmazonECS/latest/userguide/efs-volumes.html",
- "https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableInTransitEncryptionGoodExamples,
- BadExamples: terraformEnableInTransitEncryptionBadExamples,
- Links: terraformEnableInTransitEncryptionLinks,
- RemediationMarkdown: terraformEnableInTransitEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableInTransitEncryptionGoodExamples,
- BadExamples: cloudFormationEnableInTransitEncryptionBadExamples,
- Links: cloudFormationEnableInTransitEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableInTransitEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, definition := range s.AWS.ECS.TaskDefinitions {
- for _, volume := range definition.Volumes {
- if volume.EFSVolumeConfiguration.TransitEncryptionEnabled.IsFalse() {
- results.Add(
- "Task definition includes a volume which does not have in-transit-encryption enabled.",
- volume.EFSVolumeConfiguration.TransitEncryptionEnabled,
- )
- } else {
- results.AddPassed(&volume)
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/ecs/enable_in_transit_encryption.rego b/checks/cloud/aws/ecs/enable_in_transit_encryption.rego
new file mode 100644
index 00000000..e01ece51
--- /dev/null
+++ b/checks/cloud/aws/ecs/enable_in_transit_encryption.rego
@@ -0,0 +1,45 @@
+# METADATA
+# title: ECS Task Definitions with EFS volumes should use in-transit encryption
+# description: |
+# ECS task definitions that have volumes using EFS configuration should explicitly enable in transit encryption to prevent the risk of data loss due to interception.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonECS/latest/userguide/efs-volumes.html
+# - https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html
+# custom:
+# id: AVD-AWS-0035
+# avd_id: AVD-AWS-0035
+# provider: aws
+# service: ecs
+# severity: HIGH
+# short_code: enable-in-transit-encryption
+# recommended_action: Enable in transit encryption when using efs
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: ecs
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_task_definition#transit_encryption
+# good_examples: checks/cloud/aws/ecs/enable_in_transit_encryption.tf.go
+# bad_examples: checks/cloud/aws/ecs/enable_in_transit_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/ecs/enable_in_transit_encryption.cf.go
+# bad_examples: checks/cloud/aws/ecs/enable_in_transit_encryption.cf.go
+package builtin.aws.ecs.aws0035
+
+import rego.v1
+
+deny contains res if {
+ some task_definition in input.aws.ecs.taskdefinitions
+ some volume in task_definition.volumes
+ volume.efsvolumeconfiguration.transitencryptionenabled.value == false
+ res := result.new(
+ "Task definition includes a volume which does not have in-transit-encryption enabled.",
+ volume.efsvolumeconfiguration.transitencryptionenabled,
+ )
+}
diff --git a/checks/cloud/aws/ecs/enable_in_transit_encryption_test.go b/checks/cloud/aws/ecs/enable_in_transit_encryption_test.go
deleted file mode 100644
index 97a29f9c..00000000
--- a/checks/cloud/aws/ecs/enable_in_transit_encryption_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package ecs
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/ecs"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableInTransitEncryption(t *testing.T) {
- tests := []struct {
- name string
- input ecs.ECS
- expected bool
- }{
- {
- name: "ECS task definition unencrypted volume",
- input: ecs.ECS{
- TaskDefinitions: []ecs.TaskDefinition{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Volumes: []ecs.Volume{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- EFSVolumeConfiguration: ecs.EFSVolumeConfiguration{
- Metadata: trivyTypes.NewTestMetadata(),
- TransitEncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "ECS task definition encrypted volume",
- input: ecs.ECS{
- TaskDefinitions: []ecs.TaskDefinition{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Volumes: []ecs.Volume{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- EFSVolumeConfiguration: ecs.EFSVolumeConfiguration{
- Metadata: trivyTypes.NewTestMetadata(),
- TransitEncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ECS = test.input
- results := CheckEnableInTransitEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableInTransitEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/ecs/enable_in_transit_encryption_test.rego b/checks/cloud/aws/ecs/enable_in_transit_encryption_test.rego
new file mode 100644
index 00000000..d4541609
--- /dev/null
+++ b/checks/cloud/aws/ecs/enable_in_transit_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.ecs.aws0035_test
+
+import rego.v1
+
+import data.builtin.aws.ecs.aws0035 as check
+import data.lib.test
+
+test_allow_in_transit_encryption_enabled if {
+ inp := {"aws": {"ecs": {"taskdefinitions": [{"volumes": [{"efsvolumeconfiguration": {"transitencryptionenabled": {"value": true}}}]}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_in_transit_encryption_disabled if {
+ inp := {"aws": {"ecs": {"taskdefinitions": [{"volumes": [{"efsvolumeconfiguration": {"transitencryptionenabled": {"value": false}}}]}]}}}
+
+ test.assert_equal_message("Task definition includes a volume which does not have in-transit-encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/ecs/no_plaintext_secrets.rego b/checks/cloud/aws/ecs/no_plaintext_secrets.rego
new file mode 100644
index 00000000..8f5ce5ca
--- /dev/null
+++ b/checks/cloud/aws/ecs/no_plaintext_secrets.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: Task definition defines sensitive environment variable(s).
+# description: |
+# You should not make secrets available to a user in plaintext in any scenario. Secrets can instead be pulled from a secure secret storage system by the service requiring them.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/systems-manager/latest/userguide/integration-ps-secretsmanager.html
+# - https://www.vaultproject.io/
+# custom:
+# id: AVD-AWS-0036
+# avd_id: AVD-AWS-0036
+# provider: aws
+# service: ecs
+# severity: CRITICAL
+# short_code: no-plaintext-secrets
+# recommended_action: Use secrets for the task definition
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: ecs
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ecs_task_definition
+# good_examples: checks/cloud/aws/ecs/no_plaintext_secrets.tf.go
+# bad_examples: checks/cloud/aws/ecs/no_plaintext_secrets.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/ecs/no_plaintext_secrets.cf.go
+# bad_examples: checks/cloud/aws/ecs/no_plaintext_secrets.cf.go
+package builtin.aws.ecs.aws0036
+
+import rego.v1
+
+# TODO: need to support "github.com/owenrumney/squealer/pkg/squealer"
+# deny contains res if {
+# some env in input.aws.ecs.taskdefinitions[_].containerdefinitions[_].environment
+# }
diff --git a/checks/cloud/aws/ecs/no_plaintext_secrets_test.rego b/checks/cloud/aws/ecs/no_plaintext_secrets_test.rego
new file mode 100644
index 00000000..d0247752
--- /dev/null
+++ b/checks/cloud/aws/ecs/no_plaintext_secrets_test.rego
@@ -0,0 +1,6 @@
+package builtin.aws.ecs.aws0036_test
+
+import rego.v1
+
+import data.builtin.aws.ecs.aws0036 as check
+import data.lib.test
diff --git a/checks/cloud/aws/efs/efs.go b/checks/cloud/aws/efs/efs.go
new file mode 100644
index 00000000..e7882819
--- /dev/null
+++ b/checks/cloud/aws/efs/efs.go
@@ -0,0 +1 @@
+package efs
diff --git a/checks/cloud/aws/efs/enable_at_rest_encryption.rego b/checks/cloud/aws/efs/enable_at_rest_encryption.rego
new file mode 100644
index 00000000..08e4568a
--- /dev/null
+++ b/checks/cloud/aws/efs/enable_at_rest_encryption.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: EFS Encryption has not been enabled
+# description: |
+# If your organization is subject to corporate or regulatory policies that require encryption of data and metadata at rest, we recommend creating a file system that is encrypted at rest, and mounting your file system using encryption of data in transit.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/efs/latest/ug/encryption.html
+# custom:
+# id: AVD-AWS-0037
+# avd_id: AVD-AWS-0037
+# provider: aws
+# service: efs
+# severity: HIGH
+# short_code: enable-at-rest-encryption
+# recommended_action: Enable encryption for EFS
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: efs
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/efs_file_system
+# good_examples: checks/cloud/aws/efs/enable_at_rest_encryption.tf.go
+# bad_examples: checks/cloud/aws/efs/enable_at_rest_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/efs/enable_at_rest_encryption.cf.go
+# bad_examples: checks/cloud/aws/efs/enable_at_rest_encryption.cf.go
+package builtin.aws.efs.aws0037
+
+import rego.v1
+
+deny contains res if {
+ some fs in input.aws.efs.filesystems
+ fs.encrypted.value == false
+ res := result.new("File system is not encrypted.", fs.encrypted)
+}
diff --git a/checks/cloud/aws/efs/enable_at_rest_encryption_test.rego b/checks/cloud/aws/efs/enable_at_rest_encryption_test.rego
new file mode 100644
index 00000000..81930d38
--- /dev/null
+++ b/checks/cloud/aws/efs/enable_at_rest_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.efs.aws0037_test
+
+import rego.v1
+
+import data.builtin.aws.efs.aws0037 as check
+import data.lib.test
+
+test_allow_fs_encrypted if {
+ inp := {"aws": {"efs": {"filesystems": [{"encrypted": {"value": true}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_fs_unencrypted if {
+ inp := {"aws": {"efs": {"filesystems": [{"encrypted": {"value": false}}]}}}
+
+ test.assert_not_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/eks/enable_control_plane_logging.go b/checks/cloud/aws/eks/enable_control_plane_logging.go
deleted file mode 100755
index 407f92bf..00000000
--- a/checks/cloud/aws/eks/enable_control_plane_logging.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package eks
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableControlPlaneLogging = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0038",
- Provider: providers.AWSProvider,
- Service: "eks",
- ShortCode: "enable-control-plane-logging",
- Summary: "EKS Clusters should have cluster control plane logging turned on",
- Impact: "Logging provides valuable information about access and usage",
- Resolution: "Enable logging for the EKS control plane",
- Explanation: `By default cluster control plane logging is not turned on. Logging is available for audit, api, authenticator, controllerManager and scheduler. All logging should be turned on for cluster control plane.`,
- Links: []string{
- "https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableControlPlaneLoggingGoodExamples,
- BadExamples: terraformEnableControlPlaneLoggingBadExamples,
- Links: terraformEnableControlPlaneLoggingLinks,
- RemediationMarkdown: terraformEnableControlPlaneLoggingRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.EKS.Clusters {
- if cluster.Logging.API.IsFalse() {
- results.Add(
- "Control plane API logging is not enabled.",
- cluster.Logging.API,
- )
- } else {
- results.AddPassed(&cluster, "Cluster plane API logging enabled")
- }
-
- if cluster.Logging.Audit.IsFalse() {
- results.Add(
- "Control plane audit logging is not enabled.",
- cluster.Logging.Audit,
- )
- } else {
- results.AddPassed(&cluster, "Cluster plane audit logging enabled")
- }
-
- if cluster.Logging.Authenticator.IsFalse() {
- results.Add(
- "Control plane authenticator logging is not enabled.",
- cluster.Logging.Authenticator,
- )
- } else {
- results.AddPassed(&cluster, "Cluster plane authenticator logging enabled")
- }
-
- if cluster.Logging.ControllerManager.IsFalse() {
- results.Add(
- "Control plane controller manager logging is not enabled.",
- cluster.Logging.ControllerManager,
- )
- } else {
- results.AddPassed(&cluster, "Cluster plane manager logging enabled")
- }
-
- if cluster.Logging.Scheduler.IsFalse() {
- results.Add(
- "Control plane scheduler logging is not enabled.",
- cluster.Logging.Scheduler,
- )
- } else {
- results.AddPassed(&cluster, "Cluster plane scheduler logging enabled")
- }
-
- }
- return
- },
-)
diff --git a/checks/cloud/aws/eks/enable_control_plane_logging.rego b/checks/cloud/aws/eks/enable_control_plane_logging.rego
new file mode 100644
index 00000000..8276bb16
--- /dev/null
+++ b/checks/cloud/aws/eks/enable_control_plane_logging.rego
@@ -0,0 +1,61 @@
+# METADATA
+# title: EKS Clusters should have cluster control plane logging turned on
+# description: |
+# By default cluster control plane logging is not turned on. Logging is available for audit, api, authenticator, controllerManager and scheduler. All logging should be turned on for cluster control plane.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html
+# custom:
+# id: AVD-AWS-0038
+# avd_id: AVD-AWS-0038
+# provider: aws
+# service: eks
+# severity: MEDIUM
+# short_code: enable-control-plane-logging
+# recommended_action: Enable logging for the EKS control plane
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: eks
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster#enabled_cluster_log_types
+# good_examples: checks/cloud/aws/eks/enable_control_plane_logging.tf.go
+# bad_examples: checks/cloud/aws/eks/enable_control_plane_logging.tf.go
+package builtin.aws.eks.aws0038
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.eks.clusters
+ cluster.logging.api.value == false
+ res := result.new("Control plane API logging is not enabled.", cluster.logging.api)
+}
+
+deny contains res if {
+ some cluster in input.aws.eks.clusters
+ cluster.logging.audit.value == false
+ res := result.new("Control plane audit logging is not enabled.", cluster.logging.audit)
+}
+
+deny contains res if {
+ some cluster in input.aws.eks.clusters
+ cluster.logging.authenticator.value == false
+ res := result.new("Control plane authenticator logging is not enabled.", cluster.logging.authenticator)
+}
+
+deny contains res if {
+ some cluster in input.aws.eks.clusters
+ cluster.logging.controllermanager.value == false
+ res := result.new("Control plane controller manager logging is not enabled.", cluster.logging.controllermanager)
+}
+
+deny contains res if {
+ some cluster in input.aws.eks.clusters
+ cluster.logging.scheduler.value == false
+ res := result.new("Control plane scheduler logging is not enabled.", cluster.logging.scheduler)
+}
diff --git a/checks/cloud/aws/eks/enable_control_plane_logging_test.go b/checks/cloud/aws/eks/enable_control_plane_logging_test.go
deleted file mode 100644
index 324735fc..00000000
--- a/checks/cloud/aws/eks/enable_control_plane_logging_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package eks
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/eks"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableControlPlaneLogging(t *testing.T) {
- tests := []struct {
- name string
- input eks.EKS
- expected bool
- }{
- {
- name: "EKS cluster with all cluster logging disabled",
- input: eks.EKS{
- Clusters: []eks.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Logging: eks.Logging{
- API: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- Audit: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- Authenticator: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- ControllerManager: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- Scheduler: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "EKS cluster with only some cluster logging enabled",
- input: eks.EKS{
- Clusters: []eks.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Logging: eks.Logging{
- API: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- Audit: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- Authenticator: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- ControllerManager: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- Scheduler: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "EKS cluster with all cluster logging enabled",
- input: eks.EKS{
- Clusters: []eks.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Logging: eks.Logging{
- API: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- Audit: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- Authenticator: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- ControllerManager: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- Scheduler: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.EKS = test.input
- results := CheckEnableControlPlaneLogging.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableControlPlaneLogging.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/eks/enable_control_plane_logging_test.rego b/checks/cloud/aws/eks/enable_control_plane_logging_test.rego
new file mode 100644
index 00000000..1b8ff2b9
--- /dev/null
+++ b/checks/cloud/aws/eks/enable_control_plane_logging_test.rego
@@ -0,0 +1,39 @@
+package builtin.aws.eks.aws0038_test
+
+import rego.v1
+
+import data.builtin.aws.eks.aws0038 as check
+import data.lib.test
+
+test_allow_all_logging_enabled if {
+ inp := {"aws": {"eks": {"clusters": [{"logging": {
+ "audit": {"value": true},
+ "authenticator": {"value": true},
+ "controllermanager": {"value": true},
+ "scheduler": {"value": true},
+ }}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_all_logging_disabled if {
+ inp := {"aws": {"eks": {"clusters": [{"logging": {
+ "audit": {"value": false},
+ "authenticator": {"value": false},
+ "controllermanager": {"value": false},
+ "scheduler": {"value": false},
+ }}]}}}
+
+ test.assert_not_empty(check.deny) with input as inp
+}
+
+test_deny_one_logging_disabled if {
+ inp := {"aws": {"eks": {"clusters": [{"logging": {
+ "audit": {"value": false},
+ "authenticator": {"value": true},
+ "controllermanager": {"value": true},
+ "scheduler": {"value": true},
+ }}]}}}
+
+ test.assert_equal_message("Control plane audit logging is not enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/eks/encrypt_secrets.go b/checks/cloud/aws/eks/encrypt_secrets.go
deleted file mode 100755
index 809090bd..00000000
--- a/checks/cloud/aws/eks/encrypt_secrets.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package eks
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEncryptSecrets = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0039",
- Provider: providers.AWSProvider,
- Service: "eks",
- ShortCode: "encrypt-secrets",
- Summary: "EKS should have the encryption of secrets enabled",
- Impact: "EKS secrets could be read if compromised",
- Resolution: "Enable encryption of EKS secrets",
- Explanation: `EKS cluster resources should have the encryption_config block set with protection of the secrets resource.`,
- Links: []string{
- "https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEncryptSecretsGoodExamples,
- BadExamples: terraformEncryptSecretsBadExamples,
- Links: terraformEncryptSecretsLinks,
- RemediationMarkdown: terraformEncryptSecretsRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEncryptSecretsGoodExamples,
- BadExamples: cloudFormationEncryptSecretsBadExamples,
- Links: cloudFormationEncryptSecretsLinks,
- RemediationMarkdown: cloudFormationEncryptSecretsRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.EKS.Clusters {
- if cluster.Encryption.Secrets.IsFalse() {
- results.Add(
- "Cluster does not have secret encryption enabled.",
- cluster.Encryption.Secrets,
- )
- } else if cluster.Encryption.KMSKeyID.IsEmpty() {
- results.Add(
- "Cluster encryption requires a KMS key ID, which is missing",
- cluster.Encryption.KMSKeyID,
- )
- } else {
- results.AddPassed(&cluster)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/eks/encrypt_secrets.rego b/checks/cloud/aws/eks/encrypt_secrets.rego
new file mode 100644
index 00000000..7a8c6aa0
--- /dev/null
+++ b/checks/cloud/aws/eks/encrypt_secrets.rego
@@ -0,0 +1,47 @@
+# METADATA
+# title: EKS should have the encryption of secrets enabled
+# description: |
+# EKS cluster resources should have the encryption_config block set with protection of the secrets resource.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://aws.amazon.com/about-aws/whats-new/2020/03/amazon-eks-adds-envelope-encryption-for-secrets-with-aws-kms/
+# custom:
+# id: AVD-AWS-0039
+# avd_id: AVD-AWS-0039
+# provider: aws
+# service: eks
+# severity: HIGH
+# short_code: encrypt-secrets
+# recommended_action: Enable encryption of EKS secrets
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: eks
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster#encryption_config
+# good_examples: checks/cloud/aws/eks/encrypt_secrets.tf.go
+# bad_examples: checks/cloud/aws/eks/encrypt_secrets.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/eks/encrypt_secrets.cf.go
+# bad_examples: checks/cloud/aws/eks/encrypt_secrets.cf.go
+package builtin.aws.eks.aws0039
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.eks.clusters
+ cluster.encryption.secrets.value == false
+ res := result.new("Cluster does not have secret encryption enabled.", cluster.encryption.secrets)
+}
+
+deny contains res if {
+ some cluster in input.aws.eks.clusters
+ cluster.encryption.secrets.value == true
+ cluster.encryption.kmskeyid.value == ""
+ res := result.new("Cluster encryption requires a KMS key ID, which is missing", cluster.encryption.kmskeyid)
+}
diff --git a/checks/cloud/aws/eks/encrypt_secrets_test.go b/checks/cloud/aws/eks/encrypt_secrets_test.go
deleted file mode 100644
index 38534f85..00000000
--- a/checks/cloud/aws/eks/encrypt_secrets_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package eks
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/eks"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEncryptSecrets(t *testing.T) {
- tests := []struct {
- name string
- input eks.EKS
- expected bool
- }{
- {
- name: "EKS Cluster with no secrets in the resources attribute",
- input: eks.EKS{
- Clusters: []eks.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: eks.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Secrets: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "EKS Cluster with secrets in the resources attribute but no KMS key",
- input: eks.EKS{
- Clusters: []eks.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: eks.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Secrets: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "EKS Cluster with secrets in the resources attribute and a KMS key",
- input: eks.EKS{
- Clusters: []eks.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: eks.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Secrets: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("some-arn", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.EKS = test.input
- results := CheckEncryptSecrets.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEncryptSecrets.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/eks/encrypt_secrets_test.rego b/checks/cloud/aws/eks/encrypt_secrets_test.rego
new file mode 100644
index 00000000..53b8bc74
--- /dev/null
+++ b/checks/cloud/aws/eks/encrypt_secrets_test.rego
@@ -0,0 +1,33 @@
+package builtin.aws.eks.aws0039_test
+
+import rego.v1
+
+import data.builtin.aws.eks.aws0039 as check
+import data.lib.test
+
+test_deny_without_secrets_and_kms if {
+ inp := {"aws": {"eks": {"clusters": [{"encryption": {
+ "kmskeyid": {"value": ""},
+ "secrets": {"value": false},
+ }}]}}}
+
+ test.assert_equal_message("Cluster does not have secret encryption enabled.", check.deny) with input as inp
+}
+
+test_deny_with_secrets_but_no_kms if {
+ inp := {"aws": {"eks": {"clusters": [{"encryption": {
+ "kmskeyid": {"value": ""},
+ "secrets": {"value": true},
+ }}]}}}
+
+ test.assert_equal_message("Cluster encryption requires a KMS key ID, which is missing", check.deny) with input as inp
+}
+
+test_allow_with_secrets_and_kms if {
+ inp := {"aws": {"eks": {"clusters": [{"encryption": {
+ "kmskeyid": {"value": "test"},
+ "secrets": {"value": true},
+ }}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/eks/no_public_cluster_access.go b/checks/cloud/aws/eks/no_public_cluster_access.go
deleted file mode 100755
index 6c406503..00000000
--- a/checks/cloud/aws/eks/no_public_cluster_access.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package eks
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckNoPublicClusterAccess = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0040",
- Provider: providers.AWSProvider,
- Service: "eks",
- ShortCode: "no-public-cluster-access",
- Summary: "EKS Clusters should have the public access disabled",
- Impact: "EKS can be access from the internet",
- Resolution: "Don't enable public access to EKS Clusters",
- Explanation: `EKS clusters are available publicly by default, this should be explicitly disabled in the vpc_config of the EKS cluster resource.`,
- Links: []string{
- "https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformNoPublicClusterAccessGoodExamples,
- BadExamples: terraformNoPublicClusterAccessBadExamples,
- Links: terraformNoPublicClusterAccessLinks,
- RemediationMarkdown: terraformNoPublicClusterAccessRemediationMarkdown,
- },
- Severity: severity.Critical,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.EKS.Clusters {
- if cluster.PublicAccessEnabled.IsTrue() {
- results.Add(
- "Public cluster access is enabled.",
- cluster.PublicAccessEnabled,
- )
- } else {
- results.AddPassed(&cluster)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/eks/no_public_cluster_access.rego b/checks/cloud/aws/eks/no_public_cluster_access.rego
new file mode 100644
index 00000000..d2b30708
--- /dev/null
+++ b/checks/cloud/aws/eks/no_public_cluster_access.rego
@@ -0,0 +1,37 @@
+# METADATA
+# title: EKS Clusters should have the public access disabled
+# description: |
+# EKS clusters are available publicly by default, this should be explicitly disabled in the vpc_config of the EKS cluster resource.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html
+# custom:
+# id: AVD-AWS-0040
+# avd_id: AVD-AWS-0040
+# provider: aws
+# service: eks
+# severity: CRITICAL
+# short_code: no-public-cluster-access
+# recommended_action: Don't enable public access to EKS Clusters
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: eks
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster#endpoint_public_access
+# good_examples: checks/cloud/aws/eks/no_public_cluster_access.tf.go
+# bad_examples: checks/cloud/aws/eks/no_public_cluster_access.tf.go
+package builtin.aws.eks.aws0040
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.eks.clusters
+ cluster.publicaccessenabled.value == true
+ res := result.new("Public cluster access is enabled.", cluster.publicaccessenabled)
+}
diff --git a/checks/cloud/aws/eks/no_public_cluster_access_test.go b/checks/cloud/aws/eks/no_public_cluster_access_test.go
deleted file mode 100644
index 5d5c8f9d..00000000
--- a/checks/cloud/aws/eks/no_public_cluster_access_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package eks
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/eks"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckNoPublicClusterAccess(t *testing.T) {
- tests := []struct {
- name string
- input eks.EKS
- expected bool
- }{
- {
- name: "EKS Cluster with public access enabled",
- input: eks.EKS{
- Clusters: []eks.Cluster{
- {
- PublicAccessEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "EKS Cluster with public access disabled",
- input: eks.EKS{
- Clusters: []eks.Cluster{
- {
- PublicAccessEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.EKS = test.input
- results := CheckNoPublicClusterAccess.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckNoPublicClusterAccess.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/eks/no_public_cluster_access_test.rego b/checks/cloud/aws/eks/no_public_cluster_access_test.rego
new file mode 100644
index 00000000..5b910e62
--- /dev/null
+++ b/checks/cloud/aws/eks/no_public_cluster_access_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.eks.aws0040_test
+
+import rego.v1
+
+import data.builtin.aws.eks.aws0040 as check
+import data.lib.test
+
+test_deny_public_access_enabled if {
+ inp := {"aws": {"eks": {"clusters": [{"publicaccessenabled": {"value": true}}]}}}
+
+ test.assert_equal_message("public access should be enabled", check.deny) with input as inp
+}
+
+test_allow_public_access_disabled if {
+ inp := {"aws": {"eks": {"clusters": [{"publicaccessenabled": {"value": false}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/eks/no_public_cluster_access_to_cidr.rego b/checks/cloud/aws/eks/no_public_cluster_access_to_cidr.rego
new file mode 100644
index 00000000..161f3a16
--- /dev/null
+++ b/checks/cloud/aws/eks/no_public_cluster_access_to_cidr.rego
@@ -0,0 +1,38 @@
+# METADATA
+# title: EKS cluster should not have open CIDR range for public access
+# description: |
+# EKS Clusters have public access cidrs set to 0.0.0.0/0 by default which is wide open to the internet. This should be explicitly set to a more specific private CIDR range
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/eks/latest/userguide/create-public-private-vpc.html
+# custom:
+# id: AVD-AWS-0041
+# avd_id: AVD-AWS-0041
+# provider: aws
+# service: eks
+# severity: CRITICAL
+# short_code: no-public-cluster-access-to-cidr
+# recommended_action: Don't enable public access to EKS Clusters
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: eks
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_cluster#vpc_config
+# good_examples: checks/cloud/aws/eks/no_public_cluster_access_to_cidr.tf.go
+# bad_examples: checks/cloud/aws/eks/no_public_cluster_access_to_cidr.tf.go
+package builtin.aws.eks.aws0041
+
+import rego.v1
+
+# TODO: add support for "github.com/aquasecurity/trivy-checks/internal/cidr"
+# deny contains res if {
+# some cluster in input.aws.eks.clusters
+# cluster.publicaccesenabled.value == true
+# some cidr in cluster.publicaccescidrs
+# }
diff --git a/checks/cloud/aws/eks/no_public_cluster_access_to_cidr_test.rego b/checks/cloud/aws/eks/no_public_cluster_access_to_cidr_test.rego
new file mode 100644
index 00000000..8d1d5388
--- /dev/null
+++ b/checks/cloud/aws/eks/no_public_cluster_access_to_cidr_test.rego
@@ -0,0 +1,6 @@
+package builtin.aws.eks.aws0041_test
+
+import rego.v1
+
+import data.builtin.aws.eks.aws0041 as check
+import data.lib.test
diff --git a/checks/cloud/aws/elasticache/add_description_for_security_group.go b/checks/cloud/aws/elasticache/add_description_for_security_group.go
deleted file mode 100755
index afb000e1..00000000
--- a/checks/cloud/aws/elasticache/add_description_for_security_group.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package elasticache
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckAddDescriptionForSecurityGroup = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0049",
- Provider: providers.AWSProvider,
- Service: "elasticache",
- ShortCode: "add-description-for-security-group",
- Summary: "Missing description for security group/security group rule.",
- Impact: "Descriptions provide context for the firewall rule reasons",
- Resolution: "Add descriptions for all security groups and rules",
- Explanation: `Security groups and security group rules should include a description for auditing purposes.
-
-Simplifies auditing, debugging, and managing security groups.`,
- Links: []string{
- "https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/SecurityGroups.Creating.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformAddDescriptionForSecurityGroupGoodExamples,
- BadExamples: terraformAddDescriptionForSecurityGroupBadExamples,
- Links: terraformAddDescriptionForSecurityGroupLinks,
- RemediationMarkdown: terraformAddDescriptionForSecurityGroupRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationAddDescriptionForSecurityGroupGoodExamples,
- BadExamples: cloudFormationAddDescriptionForSecurityGroupBadExamples,
- Links: cloudFormationAddDescriptionForSecurityGroupLinks,
- RemediationMarkdown: cloudFormationAddDescriptionForSecurityGroupRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, sg := range s.AWS.ElastiCache.SecurityGroups {
- if sg.Description.IsEmpty() {
- results.Add(
- "Security group does not have a description.",
- sg.Description,
- )
- } else {
- results.AddPassed(&sg)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticache/add_description_for_security_group.rego b/checks/cloud/aws/elasticache/add_description_for_security_group.rego
new file mode 100644
index 00000000..e6b9847e
--- /dev/null
+++ b/checks/cloud/aws/elasticache/add_description_for_security_group.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: Missing description for security group/security group rule.
+# description: |
+# Security groups and security group rules should include a description for auditing purposes.
+# Simplifies auditing, debugging, and managing security groups.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/SecurityGroups.Creating.html
+# custom:
+# id: AVD-AWS-0049
+# avd_id: AVD-AWS-0049
+# provider: aws
+# service: elasticache
+# severity: LOW
+# short_code: add-description-for-security-group
+# recommended_action: Add descriptions for all security groups and rules
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elasticache
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_security_group#description
+# good_examples: checks/cloud/aws/elasticache/add_description_for_security_group.tf.go
+# bad_examples: checks/cloud/aws/elasticache/add_description_for_security_group.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/elasticache/add_description_for_security_group.cf.go
+# bad_examples: checks/cloud/aws/elasticache/add_description_for_security_group.cf.go
+package builtin.aws.elasticache.aws0049
+
+import rego.v1
+
+deny contains res if {
+ some secgroup in input.aws.elasticache.securitygroups
+ secgroup.description.value == ""
+ res := result.new("Security group does not have a description.", secgroup.description)
+}
diff --git a/checks/cloud/aws/elasticache/add_description_for_security_group_test.go b/checks/cloud/aws/elasticache/add_description_for_security_group_test.go
deleted file mode 100644
index 8fb3a81f..00000000
--- a/checks/cloud/aws/elasticache/add_description_for_security_group_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package elasticache
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticache"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckAddDescriptionForSecurityGroup(t *testing.T) {
- tests := []struct {
- name string
- input elasticache.ElastiCache
- expected bool
- }{
- {
- name: "ElastiCache security group with no description provided",
- input: elasticache.ElastiCache{
- SecurityGroups: []elasticache.SecurityGroup{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Description: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "ElastiCache security group with description",
- input: elasticache.ElastiCache{
- SecurityGroups: []elasticache.SecurityGroup{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Description: trivyTypes.String("some decent description", trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ElastiCache = test.input
- results := CheckAddDescriptionForSecurityGroup.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckAddDescriptionForSecurityGroup.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticache/add_description_for_security_group_test.rego b/checks/cloud/aws/elasticache/add_description_for_security_group_test.rego
new file mode 100644
index 00000000..cd6f7c54
--- /dev/null
+++ b/checks/cloud/aws/elasticache/add_description_for_security_group_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elasticache.aws0049_test
+
+import rego.v1
+
+import data.builtin.aws.elasticache.aws0049 as check
+import data.lib.test
+
+test_allow_sg_with_description if {
+ inp := {"aws": {"elasticache": {"securitygroups": [{"description": {"value": "sg description"}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_sg_without_description if {
+ inp := {"aws": {"elasticache": {"securitygroups": [{"description": {"value": ""}}]}}}
+
+ test.assert_equal_message("Security group does not have a description.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elasticache/elasticache.go b/checks/cloud/aws/elasticache/elasticache.go
new file mode 100644
index 00000000..10fb0f2a
--- /dev/null
+++ b/checks/cloud/aws/elasticache/elasticache.go
@@ -0,0 +1 @@
+package elasticache
diff --git a/checks/cloud/aws/elasticache/enable_at_rest_encryption.go b/checks/cloud/aws/elasticache/enable_at_rest_encryption.go
deleted file mode 100755
index cbd01634..00000000
--- a/checks/cloud/aws/elasticache/enable_at_rest_encryption.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package elasticache
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableAtRestEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0045",
- Provider: providers.AWSProvider,
- Service: "elasticache",
- ShortCode: "enable-at-rest-encryption",
- Summary: "Elasticache Replication Group stores unencrypted data at-rest.",
- Impact: "At-rest data in the Replication Group could be compromised if accessed.",
- Resolution: "Enable at-rest encryption for replication group",
- Explanation: `Data stored within an Elasticache replication node should be encrypted to ensure sensitive data is kept private.`,
- Links: []string{
- "https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/at-rest-encryption.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableAtRestEncryptionGoodExamples,
- BadExamples: terraformEnableAtRestEncryptionBadExamples,
- Links: terraformEnableAtRestEncryptionLinks,
- RemediationMarkdown: terraformEnableAtRestEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, group := range s.AWS.ElastiCache.ReplicationGroups {
- if group.AtRestEncryptionEnabled.IsFalse() {
- results.Add(
- "Replication group does not have at-rest encryption enabled.",
- group.AtRestEncryptionEnabled,
- )
- } else {
- results.AddPassed(&group)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticache/enable_at_rest_encryption.rego b/checks/cloud/aws/elasticache/enable_at_rest_encryption.rego
new file mode 100644
index 00000000..01b3bca5
--- /dev/null
+++ b/checks/cloud/aws/elasticache/enable_at_rest_encryption.rego
@@ -0,0 +1,37 @@
+# METADATA
+# title: Elasticache Replication Group stores unencrypted data at-rest.
+# description: |
+# Data stored within an Elasticache replication node should be encrypted to ensure sensitive data is kept private.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/at-rest-encryption.html
+# custom:
+# id: AVD-AWS-0045
+# avd_id: AVD-AWS-0045
+# provider: aws
+# service: elasticache
+# severity: HIGH
+# short_code: enable-at-rest-encryption
+# recommended_action: Enable at-rest encryption for replication group
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elasticache
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group#at_rest_encryption_enabled
+# good_examples: checks/cloud/aws/elasticache/enable_at_rest_encryption.tf.go
+# bad_examples: checks/cloud/aws/elasticache/enable_at_rest_encryption.tf.go
+package builtin.aws.elasticache.aws0045
+
+import rego.v1
+
+deny contains res if {
+ some group in input.aws.elasticache.replicationgroups
+ group.atrestencryptionenabled.value == false
+ res := result.new("Replication group does not have at-rest encryption enabled.", group.atrestencryptionenabled)
+}
diff --git a/checks/cloud/aws/elasticache/enable_at_rest_encryption_test.go b/checks/cloud/aws/elasticache/enable_at_rest_encryption_test.go
deleted file mode 100644
index 41d5204b..00000000
--- a/checks/cloud/aws/elasticache/enable_at_rest_encryption_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package elasticache
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticache"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableAtRestEncryption(t *testing.T) {
- tests := []struct {
- name string
- input elasticache.ElastiCache
- expected bool
- }{
- {
- name: "ElastiCache replication group with at-rest encryption disabled",
- input: elasticache.ElastiCache{
- ReplicationGroups: []elasticache.ReplicationGroup{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- AtRestEncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "ElastiCache replication group with at-rest encryption enabled",
- input: elasticache.ElastiCache{
- ReplicationGroups: []elasticache.ReplicationGroup{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- AtRestEncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ElastiCache = test.input
- results := CheckEnableAtRestEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAtRestEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticache/enable_at_rest_encryption_test.rego b/checks/cloud/aws/elasticache/enable_at_rest_encryption_test.rego
new file mode 100644
index 00000000..be5e83df
--- /dev/null
+++ b/checks/cloud/aws/elasticache/enable_at_rest_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elasticache.aws0045_test
+
+import rego.v1
+
+import data.builtin.aws.elasticache.aws0045 as check
+import data.lib.test
+
+test_allow_with_encryption_enabled if {
+ inp := {"aws": {"elasticache": {"replicationgroups": [{"atrestencryptionenabled": {"value": true}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_with_encryption_disabled if {
+ inp := {"aws": {"elasticache": {"replicationgroups": [{"atrestencryptionenabled": {"value": false}}]}}}
+
+ test.assert_equal_message("Replication group does not have at-rest encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elasticache/enable_backup_retention.go b/checks/cloud/aws/elasticache/enable_backup_retention.go
deleted file mode 100755
index 01de6fa2..00000000
--- a/checks/cloud/aws/elasticache/enable_backup_retention.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package elasticache
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableBackupRetention = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0050",
- Provider: providers.AWSProvider,
- Service: "elasticache",
- ShortCode: "enable-backup-retention",
- Summary: "Redis cluster should have backup retention turned on",
- Impact: "Without backups of the redis cluster recovery is made difficult",
- Resolution: "Configure snapshot retention for redis cluster",
- Explanation: `Redis clusters should have a snapshot retention time to ensure that they are backed up and can be restored if required.`,
- Links: []string{
- "https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-automatic.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableBackupRetentionGoodExamples,
- BadExamples: terraformEnableBackupRetentionBadExamples,
- Links: terraformEnableBackupRetentionLinks,
- RemediationMarkdown: terraformEnableBackupRetentionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableBackupRetentionGoodExamples,
- BadExamples: cloudFormationEnableBackupRetentionBadExamples,
- Links: cloudFormationEnableBackupRetentionLinks,
- RemediationMarkdown: cloudFormationEnableBackupRetentionRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, cluster := range s.AWS.ElastiCache.Clusters {
- if !cluster.Engine.EqualTo("redis") {
- continue
- }
-
- if cluster.NodeType.EqualTo("cache.t1.micro") {
- continue
- }
-
- if cluster.SnapshotRetentionLimit.EqualTo(0) {
- results.Add(
- "Cluster snapshot retention is not enabled.",
- cluster.SnapshotRetentionLimit,
- )
- } else {
- results.AddPassed(&cluster)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticache/enable_backup_retention.rego b/checks/cloud/aws/elasticache/enable_backup_retention.rego
new file mode 100644
index 00000000..2acb230c
--- /dev/null
+++ b/checks/cloud/aws/elasticache/enable_backup_retention.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: Redis cluster should have backup retention turned on
+# description: |
+# Redis clusters should have a snapshot retention time to ensure that they are backed up and can be restored if required.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-automatic.html
+# custom:
+# id: AVD-AWS-0050
+# avd_id: AVD-AWS-0050
+# provider: aws
+# service: elasticache
+# severity: MEDIUM
+# short_code: enable-backup-retention
+# recommended_action: Configure snapshot retention for redis cluster
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elasticache
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_cluster#snapshot_retention_limit
+# good_examples: checks/cloud/aws/elasticache/enable_backup_retention.tf.go
+# bad_examples: checks/cloud/aws/elasticache/enable_backup_retention.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/elasticache/enable_backup_retention.cf.go
+# bad_examples: checks/cloud/aws/elasticache/enable_backup_retention.cf.go
+package builtin.aws.elasticache.aws0050
+
+import rego.v1
+
+deny contains res if {
+ some cluster in input.aws.elasticache.clusters
+ cluster.engine.value == "redis"
+ cluster.nodetype.value != "cache.t1.micro"
+ cluster.snapshotretentionlimit.value == 0
+ res := result.new("Cluster snapshot retention is not enabled.", cluster.snapshotretentionlimit)
+}
diff --git a/checks/cloud/aws/elasticache/enable_backup_retention_test.go b/checks/cloud/aws/elasticache/enable_backup_retention_test.go
deleted file mode 100644
index e1c486ea..00000000
--- a/checks/cloud/aws/elasticache/enable_backup_retention_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package elasticache
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticache"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableBackupRetention(t *testing.T) {
- tests := []struct {
- name string
- input elasticache.ElastiCache
- expected bool
- }{
- {
- name: "Cluster snapshot retention days set to 0",
- input: elasticache.ElastiCache{
- Clusters: []elasticache.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Engine: trivyTypes.String("redis", trivyTypes.NewTestMetadata()),
- NodeType: trivyTypes.String("cache.m4.large", trivyTypes.NewTestMetadata()),
- SnapshotRetentionLimit: trivyTypes.Int(0, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "Cluster snapshot retention days set to 5",
- input: elasticache.ElastiCache{
- Clusters: []elasticache.Cluster{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Engine: trivyTypes.String("redis", trivyTypes.NewTestMetadata()),
- NodeType: trivyTypes.String("cache.m4.large", trivyTypes.NewTestMetadata()),
- SnapshotRetentionLimit: trivyTypes.Int(5, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ElastiCache = test.input
- results := CheckEnableBackupRetention.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableBackupRetention.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticache/enable_backup_retention_test.rego b/checks/cloud/aws/elasticache/enable_backup_retention_test.rego
new file mode 100644
index 00000000..6ab67ab9
--- /dev/null
+++ b/checks/cloud/aws/elasticache/enable_backup_retention_test.rego
@@ -0,0 +1,46 @@
+package builtin.aws.elasticache.aws0050_test
+
+import rego.v1
+
+import data.builtin.aws.elasticache.aws0050 as check
+import data.lib.test
+
+test_allow_retention_limit_greater_than_zero if {
+ inp := {"aws": {"elasticache": {"clusters": [{
+ "engine": {"value": "redis"},
+ "nodetype": {"value": "cache.t3.micro"},
+ "snapshotretentionlimit": {"value": 1},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_retention_limit_zero_but_engine_is_not_redis if {
+ inp := {"aws": {"elasticache": {"clusters": [{
+ "engine": {"value": "memcached"},
+ "nodetype": {"value": "cache.t3.micro"},
+ "snapshotretentionlimit": {"value": 0},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_retention_limit_zero_but_nodetype_is_t1micro if {
+ inp := {"aws": {"elasticache": {"clusters": [{
+ "engine": {"value": "redis"},
+ "nodetype": {"value": "cache.t1.micro"},
+ "snapshotretentionlimit": {"value": 0},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_retention_limit_zero if {
+ inp := {"aws": {"elasticache": {"clusters": [{
+ "engine": {"value": "redis"},
+ "nodetype": {"value": "cache.t3.micro"},
+ "snapshotretentionlimit": {"value": 0},
+ }]}}}
+
+ test.assert_equal_message("Cluster snapshot retention is not enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elasticache/enable_in_transit_encryption.go b/checks/cloud/aws/elasticache/enable_in_transit_encryption.go
deleted file mode 100755
index b5be9908..00000000
--- a/checks/cloud/aws/elasticache/enable_in_transit_encryption.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package elasticache
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableInTransitEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0051",
- Provider: providers.AWSProvider,
- Service: "elasticache",
- ShortCode: "enable-in-transit-encryption",
- Summary: "Elasticache Replication Group uses unencrypted traffic.",
- Impact: "In transit data in the Replication Group could be read if intercepted",
- Resolution: "Enable in transit encryption for replication group",
- Explanation: `Traffic flowing between Elasticache replication nodes should be encrypted to ensure sensitive data is kept private.`,
- Links: []string{
- "https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/in-transit-encryption.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableInTransitEncryptionGoodExamples,
- BadExamples: terraformEnableInTransitEncryptionBadExamples,
- Links: terraformEnableInTransitEncryptionLinks,
- RemediationMarkdown: terraformEnableInTransitEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableInTransitEncryptionGoodExamples,
- BadExamples: cloudFormationEnableInTransitEncryptionBadExamples,
- Links: cloudFormationEnableInTransitEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableInTransitEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, group := range s.AWS.ElastiCache.ReplicationGroups {
- if group.TransitEncryptionEnabled.IsFalse() {
- results.Add(
- "Replication group does not have transit encryption enabled.",
- group.TransitEncryptionEnabled,
- )
- } else {
- results.AddPassed(&group)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticache/enable_in_transit_encryption.rego b/checks/cloud/aws/elasticache/enable_in_transit_encryption.rego
new file mode 100644
index 00000000..dbb88093
--- /dev/null
+++ b/checks/cloud/aws/elasticache/enable_in_transit_encryption.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: Elasticache Replication Group uses unencrypted traffic.
+# description: |
+# Traffic flowing between Elasticache replication nodes should be encrypted to ensure sensitive data is kept private.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/in-transit-encryption.html
+# custom:
+# id: AVD-AWS-0051
+# avd_id: AVD-AWS-0051
+# provider: aws
+# service: elasticache
+# severity: HIGH
+# short_code: enable-in-transit-encryption
+# recommended_action: Enable in transit encryption for replication group
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elasticache
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticache_replication_group#transit_encryption_enabled
+# good_examples: checks/cloud/aws/elasticache/enable_in_transit_encryption.tf.go
+# bad_examples: checks/cloud/aws/elasticache/enable_in_transit_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/elasticache/enable_in_transit_encryption.cf.go
+# bad_examples: checks/cloud/aws/elasticache/enable_in_transit_encryption.cf.go
+package builtin.aws.elasticache.aws0051
+
+import rego.v1
+
+deny contains res if {
+ some group in input.aws.elasticache.replicationgroups
+ group.transitencryptionenabled.value == false
+ res := result.new("Replication group does not have transit encryption enabled.", group.transitencryptionenabled)
+}
diff --git a/checks/cloud/aws/elasticache/enable_in_transit_encryption_test.go b/checks/cloud/aws/elasticache/enable_in_transit_encryption_test.go
deleted file mode 100644
index 1a8ae0e9..00000000
--- a/checks/cloud/aws/elasticache/enable_in_transit_encryption_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package elasticache
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticache"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableInTransitEncryption(t *testing.T) {
- tests := []struct {
- name string
- input elasticache.ElastiCache
- expected bool
- }{
- {
- name: "ElastiCache replication group with in-transit encryption disabled",
- input: elasticache.ElastiCache{
- ReplicationGroups: []elasticache.ReplicationGroup{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- TransitEncryptionEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "ElastiCache replication group with in-transit encryption enabled",
- input: elasticache.ElastiCache{
- ReplicationGroups: []elasticache.ReplicationGroup{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- TransitEncryptionEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ElastiCache = test.input
- results := CheckEnableInTransitEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableInTransitEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticache/enable_in_transit_encryption_test.rego b/checks/cloud/aws/elasticache/enable_in_transit_encryption_test.rego
new file mode 100644
index 00000000..045471c6
--- /dev/null
+++ b/checks/cloud/aws/elasticache/enable_in_transit_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elasticache.aws0051_test
+
+import rego.v1
+
+import data.builtin.aws.elasticache.aws0051 as check
+import data.lib.test
+
+test_allow_encyption_enabled if {
+ inp := {"aws": {"elasticache": {"replicationgroups": [{"transitencryptionenabled": {"value": true}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_encyption_disabled if {
+ inp := {"aws": {"elasticache": {"replicationgroups": [{"transitencryptionenabled": {"value": false}}]}}}
+
+ test.assert_equal_message("Replication group does not have transit encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elasticsearch/elasticsearch.go b/checks/cloud/aws/elasticsearch/elasticsearch.go
new file mode 100644
index 00000000..c2206bc3
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/elasticsearch.go
@@ -0,0 +1 @@
+package elasticsearch
diff --git a/checks/cloud/aws/elasticsearch/enable_domain_encryption.go b/checks/cloud/aws/elasticsearch/enable_domain_encryption.go
deleted file mode 100755
index 434d31e1..00000000
--- a/checks/cloud/aws/elasticsearch/enable_domain_encryption.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package elasticsearch
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableDomainEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0048",
- Provider: providers.AWSProvider,
- Service: "elastic-search",
- ShortCode: "enable-domain-encryption",
- Summary: "Elasticsearch domain isn't encrypted at rest.",
- Impact: "Data will be readable if compromised",
- Resolution: "Enable ElasticSearch domain encryption",
- Explanation: `You should ensure your Elasticsearch data is encrypted at rest to help prevent sensitive information from being read by unauthorised users.`,
- Links: []string{
- "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/encryption-at-rest.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableDomainEncryptionGoodExamples,
- BadExamples: terraformEnableDomainEncryptionBadExamples,
- Links: terraformEnableDomainEncryptionLinks,
- RemediationMarkdown: terraformEnableDomainEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableDomainEncryptionGoodExamples,
- BadExamples: cloudFormationEnableDomainEncryptionBadExamples,
- Links: cloudFormationEnableDomainEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableDomainEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, domain := range s.AWS.Elasticsearch.Domains {
- if domain.AtRestEncryption.Enabled.IsFalse() {
- results.Add(
- "Domain does not have at-rest encryption enabled.",
- domain.AtRestEncryption.Enabled,
- )
- } else {
- results.AddPassed(&domain)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticsearch/enable_domain_encryption.rego b/checks/cloud/aws/elasticsearch/enable_domain_encryption.rego
new file mode 100644
index 00000000..804626ff
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/enable_domain_encryption.rego
@@ -0,0 +1,43 @@
+# METADATA
+# title: Elasticsearch domain isn't encrypted at rest.
+# description: |
+# You should ensure your Elasticsearch data is encrypted at rest to help prevent sensitive information from being read by unauthorised users.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/encryption-at-rest.html
+# custom:
+# id: AVD-AWS-0048
+# avd_id: AVD-AWS-0048
+# provider: aws
+# service: elastic-search
+# severity: HIGH
+# short_code: enable-domain-encryption
+# recommended_action: Enable ElasticSearch domain encryption
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elastic-search
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticsearch_domain#encrypt_at_rest
+# good_examples: checks/cloud/aws/elasticsearch/enable_domain_encryption.tf.go
+# bad_examples: checks/cloud/aws/elasticsearch/enable_domain_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/elasticsearch/enable_domain_encryption.cf.go
+# bad_examples: checks/cloud/aws/elasticsearch/enable_domain_encryption.cf.go
+package builtin.aws.elasticsearch.aws0048
+
+import rego.v1
+
+deny contains res if {
+ some domain in input.aws.elasticsearch.domains
+ domain.atrestencryption.enabled.value == false
+ res := result.new(
+ "Domain does not have at-rest encryption enabled.",
+ domain.atrestencryption.enabled,
+ )
+}
diff --git a/checks/cloud/aws/elasticsearch/enable_domain_encryption_test.go b/checks/cloud/aws/elasticsearch/enable_domain_encryption_test.go
deleted file mode 100644
index 6a0fd35f..00000000
--- a/checks/cloud/aws/elasticsearch/enable_domain_encryption_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package elasticsearch
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticsearch"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableDomainEncryption(t *testing.T) {
- tests := []struct {
- name string
- input elasticsearch.Elasticsearch
- expected bool
- }{
- {
- name: "Elasticsearch domain with at-rest encryption disabled",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- AtRestEncryption: elasticsearch.AtRestEncryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Elasticsearch domain with at-rest encryption enabled",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- AtRestEncryption: elasticsearch.AtRestEncryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Elasticsearch = test.input
- results := CheckEnableDomainEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableDomainEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticsearch/enable_domain_encryption_test.rego b/checks/cloud/aws/elasticsearch/enable_domain_encryption_test.rego
new file mode 100644
index 00000000..c1fe6733
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/enable_domain_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elasticsearch.aws0048_test
+
+import rego.v1
+
+import data.builtin.aws.elasticsearch.aws0048 as check
+import data.lib.test
+
+test_allow_encryption_enabled if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"atrestencryption": {"enabled": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_encryption_disabled if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"atrestencryption": {"enabled": {"value": false}}}]}}}
+
+ test.assert_equal_message("Domain has at-rest encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elasticsearch/enable_domain_logging.go b/checks/cloud/aws/elasticsearch/enable_domain_logging.go
deleted file mode 100755
index 8b561ecf..00000000
--- a/checks/cloud/aws/elasticsearch/enable_domain_logging.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package elasticsearch
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableDomainLogging = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0042",
- Provider: providers.AWSProvider,
- Service: "elastic-search",
- ShortCode: "enable-domain-logging",
- Summary: "Domain logging should be enabled for Elastic Search domains",
- Impact: "Logging provides vital information about access and usage",
- Resolution: "Enable logging for ElasticSearch domains",
- Explanation: `Amazon ES exposes four Elasticsearch logs through Amazon CloudWatch Logs: error logs, search slow logs, index slow logs, and audit logs.
-
-Search slow logs, index slow logs, and error logs are useful for troubleshooting performance and stability issues.
-
-Audit logs track user activity for compliance purposes.
-
-All the logs are disabled by default.`,
- Links: []string{
- "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createdomain-configure-slow-logs.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableDomainLoggingGoodExamples,
- BadExamples: terraformEnableDomainLoggingBadExamples,
- Links: terraformEnableDomainLoggingLinks,
- RemediationMarkdown: terraformEnableDomainLoggingRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableDomainLoggingGoodExamples,
- BadExamples: cloudFormationEnableDomainLoggingBadExamples,
- Links: cloudFormationEnableDomainLoggingLinks,
- RemediationMarkdown: cloudFormationEnableDomainLoggingRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, domain := range s.AWS.Elasticsearch.Domains {
- if domain.LogPublishing.AuditEnabled.IsFalse() {
- results.Add(
- "Domain audit logging is not enabled.",
- domain.LogPublishing.AuditEnabled,
- )
- } else {
- results.AddPassed(&domain)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticsearch/enable_domain_logging.rego b/checks/cloud/aws/elasticsearch/enable_domain_logging.rego
new file mode 100644
index 00000000..3227afcf
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/enable_domain_logging.rego
@@ -0,0 +1,43 @@
+# METADATA
+# title: Domain logging should be enabled for Elastic Search domains
+# description: |
+# Amazon ES exposes four Elasticsearch logs through Amazon CloudWatch Logs: error logs, search slow logs, index slow logs, and audit logs.
+# Search slow logs, index slow logs, and error logs are useful for troubleshooting performance and stability issues.
+# Audit logs track user activity for compliance purposes.
+# All the logs are disabled by default.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createdomain-configure-slow-logs.html
+# custom:
+# id: AVD-AWS-0042
+# avd_id: AVD-AWS-0042
+# provider: aws
+# service: elastic-search
+# severity: MEDIUM
+# short_code: enable-domain-logging
+# recommended_action: Enable logging for ElasticSearch domains
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elastic-search
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticsearch_domain#log_type
+# good_examples: checks/cloud/aws/elasticsearch/enable_domain_logging.tf.go
+# bad_examples: checks/cloud/aws/elasticsearch/enable_domain_logging.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/elasticsearch/enable_domain_logging.cf.go
+# bad_examples: checks/cloud/aws/elasticsearch/enable_domain_logging.cf.go
+package builtin.aws.elasticsearch.aws0042
+
+import rego.v1
+
+deny contains res if {
+ some domain in input.aws.elasticsearch.domains
+ domain.logpublishing.auditenabled.value == false
+ res := result.new("Domain audit logging is not enabled.", domain.logpublishing.auditenabled)
+}
diff --git a/checks/cloud/aws/elasticsearch/enable_domain_logging_test.go b/checks/cloud/aws/elasticsearch/enable_domain_logging_test.go
deleted file mode 100644
index 680fb8e6..00000000
--- a/checks/cloud/aws/elasticsearch/enable_domain_logging_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package elasticsearch
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticsearch"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableDomainLogging(t *testing.T) {
- tests := []struct {
- name string
- input elasticsearch.Elasticsearch
- expected bool
- }{
- {
- name: "Elasticsearch domain with audit logging disabled",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- LogPublishing: elasticsearch.LogPublishing{
- Metadata: trivyTypes.NewTestMetadata(),
- AuditEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Elasticsearch domain with audit logging enabled",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- LogPublishing: elasticsearch.LogPublishing{
- Metadata: trivyTypes.NewTestMetadata(),
- AuditEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Elasticsearch = test.input
- results := CheckEnableDomainLogging.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableDomainLogging.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticsearch/enable_domain_logging_test.rego b/checks/cloud/aws/elasticsearch/enable_domain_logging_test.rego
new file mode 100644
index 00000000..471a874a
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/enable_domain_logging_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elasticsearch.aws0042_test
+
+import rego.v1
+
+import data.builtin.aws.elasticsearch.aws0042 as check
+import data.lib.test
+
+test_allow_logging_enabled if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"logpublishing": {"auditenabled": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_logging_disabled if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"logpublishing": {"auditenabled": {"value": false}}}]}}}
+
+ test.assert_equal_message("Domain audit logging is not enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.go b/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.go
deleted file mode 100755
index 88a2618f..00000000
--- a/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package elasticsearch
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableInTransitEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0043",
- Provider: providers.AWSProvider,
- Service: "elastic-search",
- ShortCode: "enable-in-transit-encryption",
- Summary: "Elasticsearch domain uses plaintext traffic for node to node communication.",
- Impact: "In transit data between nodes could be read if intercepted",
- Resolution: "Enable encrypted node to node communication",
- Explanation: `Traffic flowing between Elasticsearch nodes should be encrypted to ensure sensitive data is kept private.`,
- Links: []string{
- "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/ntn.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableInTransitEncryptionGoodExamples,
- BadExamples: terraformEnableInTransitEncryptionBadExamples,
- Links: terraformEnableInTransitEncryptionLinks,
- RemediationMarkdown: terraformEnableInTransitEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableInTransitEncryptionGoodExamples,
- BadExamples: cloudFormationEnableInTransitEncryptionBadExamples,
- Links: cloudFormationEnableInTransitEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableInTransitEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, domain := range s.AWS.Elasticsearch.Domains {
- if domain.TransitEncryption.Enabled.IsFalse() {
- results.Add(
- "Domain does not have in-transit encryption enabled.",
- domain.TransitEncryption.Enabled,
- )
- } else {
- results.AddPassed(&domain)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.rego b/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.rego
new file mode 100644
index 00000000..6c0999bd
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/enable_in_transit_encryption.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: Elasticsearch domain uses plaintext traffic for node to node communication.
+# description: |
+# Traffic flowing between Elasticsearch nodes should be encrypted to ensure sensitive data is kept private.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/ntn.html
+# custom:
+# id: AVD-AWS-0043
+# avd_id: AVD-AWS-0043
+# provider: aws
+# service: elastic-search
+# severity: HIGH
+# short_code: enable-in-transit-encryption
+# recommended_action: Enable encrypted node to node communication
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elastic-search
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticsearch_domain#encrypt_at_rest
+# good_examples: checks/cloud/aws/elasticsearch/enable_in_transit_encryption.tf.go
+# bad_examples: checks/cloud/aws/elasticsearch/enable_in_transit_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/elasticsearch/enable_in_transit_encryption.cf.go
+# bad_examples: checks/cloud/aws/elasticsearch/enable_in_transit_encryption.cf.go
+package builtin.aws.elasticsearch.aws0043
+
+import rego.v1
+
+deny contains res if {
+ some domain in input.aws.elasticsearch.domains
+ domain.transitencryption.enabled.value == false
+ res := result.new("Domain does not have in-transit encryption enabled.", domain.transitencryption.enabled)
+}
diff --git a/checks/cloud/aws/elasticsearch/enable_in_transit_encryption_test.go b/checks/cloud/aws/elasticsearch/enable_in_transit_encryption_test.go
deleted file mode 100644
index ecbf72c9..00000000
--- a/checks/cloud/aws/elasticsearch/enable_in_transit_encryption_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package elasticsearch
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticsearch"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableInTransitEncryption(t *testing.T) {
- tests := []struct {
- name string
- input elasticsearch.Elasticsearch
- expected bool
- }{
- {
- name: "Elasticsearch domain without in-transit encryption",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- TransitEncryption: elasticsearch.TransitEncryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Elasticsearch domain with in-transit encryption",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- TransitEncryption: elasticsearch.TransitEncryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Elasticsearch = test.input
- results := CheckEnableInTransitEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableInTransitEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticsearch/enable_in_transit_encryption_test.rego b/checks/cloud/aws/elasticsearch/enable_in_transit_encryption_test.rego
new file mode 100644
index 00000000..c58c3f07
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/enable_in_transit_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elasticsearch.aws0043_test
+
+import rego.v1
+
+import data.builtin.aws.elasticsearch.aws0043 as check
+import data.lib.test
+
+test_allow_encryption_enabled if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"transitencryption": {"enabled": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_encryption_disabled if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"transitencryption": {"enabled": {"value": false}}}]}}}
+
+ test.assert_equal_message("Domain does not have in-transit encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elasticsearch/enforce_https.go b/checks/cloud/aws/elasticsearch/enforce_https.go
deleted file mode 100755
index 8bc714ee..00000000
--- a/checks/cloud/aws/elasticsearch/enforce_https.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package elasticsearch
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnforceHttps = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0046",
- Provider: providers.AWSProvider,
- Service: "elastic-search",
- ShortCode: "enforce-https",
- Summary: "Elasticsearch doesn't enforce HTTPS traffic.",
- Impact: "HTTP traffic can be intercepted and the contents read",
- Resolution: "Enforce the use of HTTPS for ElasticSearch",
- Explanation: `Plain HTTP is unencrypted and human-readable. This means that if a malicious actor was to eavesdrop on your connection, they would be able to see all of your data flowing back and forth.
-
-You should use HTTPS, which is HTTP over an encrypted (TLS) connection, meaning eavesdroppers cannot read your traffic.`,
- Links: []string{
- "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-data-protection.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnforceHttpsGoodExamples,
- BadExamples: terraformEnforceHttpsBadExamples,
- Links: terraformEnforceHttpsLinks,
- RemediationMarkdown: terraformEnforceHttpsRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnforceHttpsGoodExamples,
- BadExamples: cloudFormationEnforceHttpsBadExamples,
- Links: cloudFormationEnforceHttpsLinks,
- RemediationMarkdown: cloudFormationEnforceHttpsRemediationMarkdown,
- },
- Severity: severity.Critical,
- },
- func(s *state.State) (results scan.Results) {
- for _, domain := range s.AWS.Elasticsearch.Domains {
- if domain.Endpoint.EnforceHTTPS.IsFalse() {
- results.Add(
- "Domain does not enforce HTTPS.",
- domain.Endpoint.EnforceHTTPS,
- )
- } else {
- results.AddPassed(&domain)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticsearch/enforce_https.rego b/checks/cloud/aws/elasticsearch/enforce_https.rego
new file mode 100644
index 00000000..52142126
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/enforce_https.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: Elasticsearch doesn't enforce HTTPS traffic.
+# description: |
+# Plain HTTP is unencrypted and human-readable. This means that if a malicious actor was to eavesdrop on your connection, they would be able to see all of your data flowing back and forth.
+# You should use HTTPS, which is HTTP over an encrypted (TLS) connection, meaning eavesdroppers cannot read your traffic.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-data-protection.html
+# custom:
+# id: AVD-AWS-0046
+# avd_id: AVD-AWS-0046
+# provider: aws
+# service: elastic-search
+# severity: CRITICAL
+# short_code: enforce-https
+# recommended_action: Enforce the use of HTTPS for ElasticSearch
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elastic-search
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticsearch_domain#enforce_https
+# good_examples: checks/cloud/aws/elasticsearch/enforce_https.tf.go
+# bad_examples: checks/cloud/aws/elasticsearch/enforce_https.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/elasticsearch/enforce_https.cf.go
+# bad_examples: checks/cloud/aws/elasticsearch/enforce_https.cf.go
+package builtin.aws.elasticsearch.aws0046
+
+import rego.v1
+
+deny contains res if {
+ some domain in input.aws.elasticsearch.domains
+ domain.endpoint.enforcehttps.value == false
+ res := result.new("Domain does not enforce HTTPS.", domain.endpoint.enforcehttps)
+}
diff --git a/checks/cloud/aws/elasticsearch/enforce_https_test.go b/checks/cloud/aws/elasticsearch/enforce_https_test.go
deleted file mode 100644
index 06b6a91a..00000000
--- a/checks/cloud/aws/elasticsearch/enforce_https_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package elasticsearch
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticsearch"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnforceHttps(t *testing.T) {
- tests := []struct {
- name string
- input elasticsearch.Elasticsearch
- expected bool
- }{
- {
- name: "Elasticsearch domain with enforce HTTPS disabled",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Endpoint: elasticsearch.Endpoint{
- Metadata: trivyTypes.NewTestMetadata(),
- EnforceHTTPS: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Elasticsearch domain with enforce HTTPS enabled",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Endpoint: elasticsearch.Endpoint{
- Metadata: trivyTypes.NewTestMetadata(),
- EnforceHTTPS: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Elasticsearch = test.input
- results := CheckEnforceHttps.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnforceHttps.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticsearch/enforce_https_test.rego b/checks/cloud/aws/elasticsearch/enforce_https_test.rego
new file mode 100644
index 00000000..7246a860
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/enforce_https_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elasticsearch.aws0046_test
+
+import rego.v1
+
+import data.builtin.aws.elasticsearch.aws0046 as check
+import data.lib.test
+
+test_allow_enforce_https if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"endpoint": {"enforcehttps": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_does_not_enforce_https if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"endpoint": {"enforcehttps": {"value": false}}}]}}}
+
+ test.assert_equal_message("Domain does not enforce HTTPS.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elasticsearch/use_secure_tls_policy.go b/checks/cloud/aws/elasticsearch/use_secure_tls_policy.go
deleted file mode 100755
index eccef1d5..00000000
--- a/checks/cloud/aws/elasticsearch/use_secure_tls_policy.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package elasticsearch
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckUseSecureTlsPolicy = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0126",
- Provider: providers.AWSProvider,
- Service: "elastic-search",
- ShortCode: "use-secure-tls-policy",
- Summary: "Elasticsearch domain endpoint is using outdated TLS policy.",
- Impact: "Outdated SSL policies increase exposure to known vulnerabilities",
- Resolution: "Use the most modern TLS/SSL policies available",
- Explanation: `You should not use outdated/insecure TLS versions for encryption. You should be using TLS v1.2+.`,
- Links: []string{
- "https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-data-protection.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformUseSecureTlsPolicyGoodExamples,
- BadExamples: terraformUseSecureTlsPolicyBadExamples,
- Links: terraformUseSecureTlsPolicyLinks,
- RemediationMarkdown: terraformUseSecureTlsPolicyRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationUseSecureTlsPolicyGoodExamples,
- BadExamples: cloudFormationUseSecureTlsPolicyBadExamples,
- Links: cloudFormationUseSecureTlsPolicyLinks,
- RemediationMarkdown: cloudFormationUseSecureTlsPolicyRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, domain := range s.AWS.Elasticsearch.Domains {
- if domain.Endpoint.TLSPolicy.NotEqualTo("Policy-Min-TLS-1-2-2019-07") {
- results.Add(
- "Domain does not have a secure TLS policy.",
- domain.Endpoint.TLSPolicy,
- )
- } else {
- results.AddPassed(&domain)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elasticsearch/use_secure_tls_policy.rego b/checks/cloud/aws/elasticsearch/use_secure_tls_policy.rego
new file mode 100644
index 00000000..0a442973
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/use_secure_tls_policy.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: Elasticsearch domain endpoint is using outdated TLS policy.
+# description: |
+# You should not use outdated/insecure TLS versions for encryption. You should be using TLS v1.2+.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-data-protection.html
+# custom:
+# id: AVD-AWS-0126
+# avd_id: AVD-AWS-0126
+# provider: aws
+# service: elastic-search
+# severity: HIGH
+# short_code: use-secure-tls-policy
+# recommended_action: Use the most modern TLS/SSL policies available
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elastic-search
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/elasticsearch_domain#tls_security_policy
+# good_examples: checks/cloud/aws/elasticsearch/use_secure_tls_policy.tf.go
+# bad_examples: checks/cloud/aws/elasticsearch/use_secure_tls_policy.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/elasticsearch/use_secure_tls_policy.cf.go
+# bad_examples: checks/cloud/aws/elasticsearch/use_secure_tls_policy.cf.go
+package builtin.aws.elasticsearch.aws0126
+
+import rego.v1
+
+deny contains res if {
+ some domain in input.aws.elasticsearch.domains
+ domain.endpoint.tlspolicy.value != "Policy-Min-TLS-1-2-2019-07"
+ res := result.new("Domain does not have a secure TLS policy.", domain.endpoint.tlspolicy)
+}
diff --git a/checks/cloud/aws/elasticsearch/use_secure_tls_policy_test.go b/checks/cloud/aws/elasticsearch/use_secure_tls_policy_test.go
deleted file mode 100644
index b1af6726..00000000
--- a/checks/cloud/aws/elasticsearch/use_secure_tls_policy_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package elasticsearch
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elasticsearch"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckUseSecureTlsPolicy(t *testing.T) {
- tests := []struct {
- name string
- input elasticsearch.Elasticsearch
- expected bool
- }{
- {
- name: "Elasticsearch domain with TLS v1.0",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Endpoint: elasticsearch.Endpoint{
- Metadata: trivyTypes.NewTestMetadata(),
- TLSPolicy: trivyTypes.String("Policy-Min-TLS-1-0-2019-07", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Elasticsearch domain with TLS v1.2",
- input: elasticsearch.Elasticsearch{
- Domains: []elasticsearch.Domain{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Endpoint: elasticsearch.Endpoint{
- Metadata: trivyTypes.NewTestMetadata(),
- TLSPolicy: trivyTypes.String("Policy-Min-TLS-1-2-2019-07", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Elasticsearch = test.input
- results := CheckUseSecureTlsPolicy.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckUseSecureTlsPolicy.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elasticsearch/use_secure_tls_policy_test.rego b/checks/cloud/aws/elasticsearch/use_secure_tls_policy_test.rego
new file mode 100644
index 00000000..ff44dc96
--- /dev/null
+++ b/checks/cloud/aws/elasticsearch/use_secure_tls_policy_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elasticsearch.aws0126_test
+
+import rego.v1
+
+import data.builtin.aws.elasticsearch.aws0126 as check
+import data.lib.test
+
+test_allow_use_secure_tls_policy if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"endpoint": {"tlspolicy": {"value": "Policy-Min-TLS-1-2-2019-07"}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_does_not_use_secure_tls_policy if {
+ inp := {"aws": {"elasticsearch": {"domains": [{"endpoint": {"tlspolicy": {"value": "Policy-Min-TLS-1-0-2019-07"}}}]}}}
+
+ test.assert_equal_message("Domain does not have a secure TLS policy.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elb/alb_not_public.go b/checks/cloud/aws/elb/alb_not_public.go
deleted file mode 100755
index ee137ffc..00000000
--- a/checks/cloud/aws/elb/alb_not_public.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package elb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckAlbNotPublic = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0053",
- Provider: providers.AWSProvider,
- Service: "elb",
- ShortCode: "alb-not-public",
- Summary: "Load balancer is exposed to the internet.",
- Impact: "The load balancer is exposed on the internet",
- Resolution: "Switch to an internal load balancer or add a tfsec ignore",
- Explanation: `There are many scenarios in which you would want to expose a load balancer to the wider internet, but this check exists as a warning to prevent accidental exposure of internal assets. You should ensure that this resource should be exposed publicly.`,
- Links: []string{},
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformAlbNotPublicGoodExamples,
- BadExamples: terraformAlbNotPublicBadExamples,
- Links: terraformAlbNotPublicLinks,
- RemediationMarkdown: terraformAlbNotPublicRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, lb := range s.AWS.ELB.LoadBalancers {
- if lb.Metadata.IsUnmanaged() || lb.Type.EqualTo(elb.TypeGateway) {
- continue
- }
- if lb.Internal.IsFalse() {
- results.Add(
- "Load balancer is exposed publicly.",
- lb.Internal,
- )
- } else {
- results.AddPassed(&lb)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elb/alb_not_public.rego b/checks/cloud/aws/elb/alb_not_public.rego
new file mode 100644
index 00000000..18a63ba7
--- /dev/null
+++ b/checks/cloud/aws/elb/alb_not_public.rego
@@ -0,0 +1,37 @@
+# METADATA
+# title: Load balancer is exposed to the internet.
+# description: |
+# There are many scenarios in which you would want to expose a load balancer to the wider internet, but this check exists as a warning to prevent accidental exposure of internal assets. You should ensure that this resource should be exposed publicly.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# custom:
+# id: AVD-AWS-0053
+# avd_id: AVD-AWS-0053
+# provider: aws
+# service: elb
+# severity: HIGH
+# short_code: alb-not-public
+# recommended_action: Switch to an internal load balancer or add a tfsec ignore
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb
+# good_examples: checks/cloud/aws/elb/alb_not_public.tf.go
+# bad_examples: checks/cloud/aws/elb/alb_not_public.tf.go
+package builtin.aws.elb.aws0053
+
+import rego.v1
+
+deny contains res if {
+ some lb in input.aws.elb.loadbalancers
+ lb.type.value != "gateway"
+ lb.internal.value == false
+
+ res := result.new("Load balancer is exposed publicly.", lb.internal)
+}
diff --git a/checks/cloud/aws/elb/alb_not_public_test.go b/checks/cloud/aws/elb/alb_not_public_test.go
deleted file mode 100644
index 0915e1cf..00000000
--- a/checks/cloud/aws/elb/alb_not_public_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package elb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckAlbNotPublic(t *testing.T) {
- tests := []struct {
- name string
- input elb.ELB
- expected bool
- }{
- {
- name: "Load balancer publicly accessible",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeApplication, trivyTypes.NewTestMetadata()),
- Internal: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "Load balancer internally accessible",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeApplication, trivyTypes.NewTestMetadata()),
- Internal: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ELB = test.input
- results := CheckAlbNotPublic.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckAlbNotPublic.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elb/alb_not_public_test.rego b/checks/cloud/aws/elb/alb_not_public_test.rego
new file mode 100644
index 00000000..9b7490f6
--- /dev/null
+++ b/checks/cloud/aws/elb/alb_not_public_test.rego
@@ -0,0 +1,33 @@
+package builtin.aws.elb.aws0053_test
+
+import rego.v1
+
+import data.builtin.aws.elb.aws0053 as check
+import data.lib.test
+
+test_deny_public_alb if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "application"},
+ "internal": {"value": false},
+ }]}}}
+
+ test.assert_equal_message("Load balancer is exposed publicly.", check.deny) with input as inp
+}
+
+test_allow_public_but_gateway if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "gateway"},
+ "internal": {"value": false},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_internal if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "application"},
+ "internal": {"value": true},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elb/drop_invalid_headers.go b/checks/cloud/aws/elb/drop_invalid_headers.go
deleted file mode 100755
index a06e0053..00000000
--- a/checks/cloud/aws/elb/drop_invalid_headers.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package elb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckDropInvalidHeaders = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0052",
- Provider: providers.AWSProvider,
- Service: "elb",
- ShortCode: "drop-invalid-headers",
- Summary: "Load balancers should drop invalid headers",
- Impact: "Invalid headers being passed through to the target of the load balance may exploit vulnerabilities",
- Resolution: "Set drop_invalid_header_fields to true",
- Explanation: `Passing unknown or invalid headers through to the target poses a potential risk of compromise.
-
-By setting drop_invalid_header_fields to true, anything that doe not conform to well known, defined headers will be removed by the load balancer.`,
- Links: []string{
- "https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformDropInvalidHeadersGoodExamples,
- BadExamples: terraformDropInvalidHeadersBadExamples,
- Links: terraformDropInvalidHeadersLinks,
- RemediationMarkdown: terraformDropInvalidHeadersRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, lb := range s.AWS.ELB.LoadBalancers {
- if lb.Metadata.IsUnmanaged() || !lb.Type.EqualTo(elb.TypeApplication) || lb.Metadata.IsUnmanaged() {
- continue
- }
- if lb.DropInvalidHeaderFields.IsFalse() {
- results.Add(
- "Application load balancer is not set to drop invalid headers.",
- lb.DropInvalidHeaderFields,
- )
- } else {
- results.AddPassed(&lb)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elb/drop_invalid_headers.rego b/checks/cloud/aws/elb/drop_invalid_headers.rego
new file mode 100644
index 00000000..b0cf7f2b
--- /dev/null
+++ b/checks/cloud/aws/elb/drop_invalid_headers.rego
@@ -0,0 +1,39 @@
+# METADATA
+# title: Load balancers should drop invalid headers
+# description: |
+# Passing unknown or invalid headers through to the target poses a potential risk of compromise.
+# By setting drop_invalid_header_fields to true, anything that doe not conform to well known, defined headers will be removed by the load balancer.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html
+# custom:
+# id: AVD-AWS-0052
+# avd_id: AVD-AWS-0052
+# provider: aws
+# service: elb
+# severity: HIGH
+# short_code: drop-invalid-headers
+# recommended_action: Set drop_invalid_header_fields to true
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb#drop_invalid_header_fields
+# good_examples: checks/cloud/aws/elb/drop_invalid_headers.tf.go
+# bad_examples: checks/cloud/aws/elb/drop_invalid_headers.tf.go
+package builtin.aws.elb.aws0052
+
+import rego.v1
+
+deny contains res if {
+ some lb in input.aws.elb.loadbalancers
+ lb.type.value == "application"
+ lb.dropinvalidheaderfields.value == false
+ res := result.new("Application load balancer is not set to drop invalid headers.", lb.dropinvalidheaderfields)
+}
diff --git a/checks/cloud/aws/elb/drop_invalid_headers_test.go b/checks/cloud/aws/elb/drop_invalid_headers_test.go
deleted file mode 100644
index f57e1ef6..00000000
--- a/checks/cloud/aws/elb/drop_invalid_headers_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package elb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckDropInvalidHeaders(t *testing.T) {
- tests := []struct {
- name string
- input elb.ELB
- expected bool
- }{
- {
- name: "Load balancer drop invalid headers disabled",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeApplication, trivyTypes.NewTestMetadata()),
- DropInvalidHeaderFields: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "Load balancer drop invalid headers enabled",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeApplication, trivyTypes.NewTestMetadata()),
- DropInvalidHeaderFields: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- }, {
- name: "Classic load balanace doesn't fail when no drop headers",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeClassic, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ELB = test.input
- results := CheckDropInvalidHeaders.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckDropInvalidHeaders.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elb/drop_invalid_headers_test.rego b/checks/cloud/aws/elb/drop_invalid_headers_test.rego
new file mode 100644
index 00000000..0f21b322
--- /dev/null
+++ b/checks/cloud/aws/elb/drop_invalid_headers_test.rego
@@ -0,0 +1,33 @@
+package builtin.aws.elb.aws0052_test
+
+import rego.v1
+
+import data.builtin.aws.elb.aws0052 as check
+import data.lib.test
+
+test_allow_drop_invalid_headers if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "application"},
+ "dropinvalidheaderfields": {"value": true},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_without_drop_invalid_headers_but_no_application if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "gateway"},
+ "dropinvalidheaderfields": {"value": false},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_without_drop_invalid_headers_and_application if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "application"},
+ "dropinvalidheaderfields": {"value": false},
+ }]}}}
+
+ test.assert_equal_message("Application load balancer is not set to drop invalid headers.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elb/elb.go b/checks/cloud/aws/elb/elb.go
new file mode 100644
index 00000000..89606879
--- /dev/null
+++ b/checks/cloud/aws/elb/elb.go
@@ -0,0 +1 @@
+package elb
diff --git a/checks/cloud/aws/elb/http_not_used.go b/checks/cloud/aws/elb/http_not_used.go
deleted file mode 100755
index aa345644..00000000
--- a/checks/cloud/aws/elb/http_not_used.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package elb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckHttpNotUsed = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0054",
- Provider: providers.AWSProvider,
- Service: "elb",
- ShortCode: "http-not-used",
- Summary: "Use of plain HTTP.",
- Impact: "Your traffic is not protected",
- Resolution: "Switch to HTTPS to benefit from TLS security features",
- Explanation: `Plain HTTP is unencrypted and human-readable. This means that if a malicious actor was to eavesdrop on your connection, they would be able to see all of your data flowing back and forth.
-
-You should use HTTPS, which is HTTP over an encrypted (TLS) connection, meaning eavesdroppers cannot read your traffic.`,
- Links: []string{
- "https://www.cloudflare.com/en-gb/learning/ssl/why-is-http-not-secure/",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformHttpNotUsedGoodExamples,
- BadExamples: terraformHttpNotUsedBadExamples,
- Links: terraformHttpNotUsedLinks,
- RemediationMarkdown: terraformHttpNotUsedRemediationMarkdown,
- },
- Severity: severity.Critical,
- },
- func(s *state.State) (results scan.Results) {
- for _, lb := range s.AWS.ELB.LoadBalancers {
- if !lb.Type.EqualTo(elb.TypeApplication) {
- continue
- }
- for _, listener := range lb.Listeners {
- if !listener.Protocol.EqualTo("HTTP") {
- results.AddPassed(&listener)
- continue
- }
-
- var hasRedirect bool
- for _, action := range listener.DefaultActions {
- if action.Type.EqualTo("redirect") {
- hasRedirect = true
- break
- }
- }
- if hasRedirect {
- results.AddPassed(&listener)
- break
- }
-
- results.Add(
- "Listener for application load balancer does not use HTTPS.",
- listener.Protocol,
- )
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elb/http_not_used.rego b/checks/cloud/aws/elb/http_not_used.rego
new file mode 100644
index 00000000..b361f208
--- /dev/null
+++ b/checks/cloud/aws/elb/http_not_used.rego
@@ -0,0 +1,51 @@
+# METADATA
+# title: Use of plain HTTP.
+# description: |
+# Plain HTTP is unencrypted and human-readable. This means that if a malicious actor was to eavesdrop on your connection, they would be able to see all of your data flowing back and forth.
+# You should use HTTPS, which is HTTP over an encrypted (TLS) connection, meaning eavesdroppers cannot read your traffic.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://www.cloudflare.com/en-gb/learning/ssl/why-is-http-not-secure/
+# custom:
+# id: AVD-AWS-0054
+# avd_id: AVD-AWS-0054
+# provider: aws
+# service: elb
+# severity: CRITICAL
+# short_code: http-not-used
+# recommended_action: Switch to HTTPS to benefit from TLS security features
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener
+# good_examples: checks/cloud/aws/elb/http_not_used.tf.go
+# bad_examples: checks/cloud/aws/elb/http_not_used.tf.go
+package builtin.aws.elb.aws0054
+
+import rego.v1
+
+deny contains res if {
+ some lb in input.aws.elb.loadbalancers
+ lb.type.value == "application"
+
+ some listener in lb.listeners
+ use_http(listener)
+ res := result.new("Listener for application load balancer does not use HTTPS.", listener)
+}
+
+use_http(listener) if {
+ listener.protocol.value == "HTTP"
+ not has_redirect(listener)
+}
+
+has_redirect(listener) if {
+ some action in listener.defaultactions
+ action.type.value == "redirect"
+}
diff --git a/checks/cloud/aws/elb/http_not_used_test.go b/checks/cloud/aws/elb/http_not_used_test.go
deleted file mode 100644
index f301ecfe..00000000
--- a/checks/cloud/aws/elb/http_not_used_test.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package elb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckHttpNotUsed(t *testing.T) {
- tests := []struct {
- name string
- input elb.ELB
- expected bool
- }{
- {
- name: "Load balancer listener with HTTP protocol",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeApplication, trivyTypes.NewTestMetadata()),
- Listeners: []elb.Listener{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Protocol: trivyTypes.String("HTTP", trivyTypes.NewTestMetadata()),
- DefaultActions: []elb.Action{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String("forward", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Load balancer listener with HTTP protocol but redirect default action",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeApplication, trivyTypes.NewTestMetadata()),
- Listeners: []elb.Listener{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Protocol: trivyTypes.String("HTTP", trivyTypes.NewTestMetadata()),
- DefaultActions: []elb.Action{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String("redirect", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "Load balancer listener with HTTP protocol but redirect among multiple default actions",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeApplication, trivyTypes.NewTestMetadata()),
- Listeners: []elb.Listener{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Protocol: trivyTypes.String("HTTP", trivyTypes.NewTestMetadata()),
- DefaultActions: []elb.Action{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String("forward", trivyTypes.NewTestMetadata()),
- },
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String("redirect", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "Load balancer listener with HTTPS protocol",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(elb.TypeApplication, trivyTypes.NewTestMetadata()),
- Listeners: []elb.Listener{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Protocol: trivyTypes.String("HTTPS", trivyTypes.NewTestMetadata()),
- DefaultActions: []elb.Action{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String("forward", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ELB = test.input
- results := CheckHttpNotUsed.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckHttpNotUsed.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elb/http_not_used_test.rego b/checks/cloud/aws/elb/http_not_used_test.rego
new file mode 100644
index 00000000..953e76da
--- /dev/null
+++ b/checks/cloud/aws/elb/http_not_used_test.rego
@@ -0,0 +1,63 @@
+package builtin.aws.elb.aws0054_test
+
+import rego.v1
+
+import data.builtin.aws.elb.aws0054 as check
+import data.lib.test
+
+test_allow_https if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "application"},
+ "listeners": [{"protocol": {"value": "HTTPS"}}],
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_http_with_redirect if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "application"},
+ "listeners": [{
+ "protocol": {"value": "HTTP"},
+ "defaultactions": [{"type": {"value": "redirect"}}],
+ }],
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_http_mixed_actions if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "application"},
+ "listeners": [{
+ "protocol": {"value": "HTTP"},
+ "defaultactions": [
+ {"type": {"value": "redirect"}},
+ {"type": {"value": "forward"}},
+ ],
+ }],
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_http_but_not_application if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "network"},
+ "listeners": [{"protocol": {"value": "HTTP"}}],
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_http_without_redirect if {
+ inp := {"aws": {"elb": {"loadbalancers": [{
+ "type": {"value": "application"},
+ "listeners": [{
+ "protocol": {"value": "HTTP"},
+ "defaultactions": [{"type": {"value": "forward"}}],
+ }],
+ }]}}}
+
+ test.assert_equal_message("Listener for application load balancer does not use HTTPS.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/elb/use_secure_tls_policy.go b/checks/cloud/aws/elb/use_secure_tls_policy.go
deleted file mode 100755
index 33a5341e..00000000
--- a/checks/cloud/aws/elb/use_secure_tls_policy.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package elb
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var outdatedSSLPolicies = []string{
- "ELBSecurityPolicy-2015-05",
- "ELBSecurityPolicy-2016-08",
- "ELBSecurityPolicy-FS-2018-06",
- "ELBSecurityPolicy-FS-1-1-2019-08",
- "ELBSecurityPolicy-TLS-1-0-2015-04",
- "ELBSecurityPolicy-TLS-1-1-2017-01",
- "ELBSecurityPolicy-TLS13-1-0-2021-06",
- "ELBSecurityPolicy-TLS13-1-1-2021-06",
- "ELBSecurityPolicy-TLS13-1-2-Ext1-2021-06",
- "ELBSecurityPolicy-TLS13-1-2-Ext2-2021-06",
-}
-
-var CheckUseSecureTlsPolicy = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0047",
- Provider: providers.AWSProvider,
- Service: "elb",
- ShortCode: "use-secure-tls-policy",
- Summary: "An outdated SSL policy is in use by a load balancer.",
- Impact: "The SSL policy is outdated and has known vulnerabilities",
- Resolution: "Use a more recent TLS/SSL policy for the load balancer",
- Explanation: `You should not use outdated/insecure TLS versions for encryption. You should be using TLS v1.2+.`,
- Links: []string{},
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformUseSecureTlsPolicyGoodExamples,
- BadExamples: terraformUseSecureTlsPolicyBadExamples,
- Links: terraformUseSecureTlsPolicyLinks,
- RemediationMarkdown: terraformUseSecureTlsPolicyRemediationMarkdown,
- },
- Severity: severity.Critical,
- },
- func(s *state.State) (results scan.Results) {
- for _, lb := range s.AWS.ELB.LoadBalancers {
- for _, listener := range lb.Listeners {
- for _, outdated := range outdatedSSLPolicies {
- if listener.TLSPolicy.EqualTo(outdated) {
- results.Add(
- "Listener uses an outdated TLS policy.",
- listener.TLSPolicy,
- )
- } else {
- results.AddPassed(&listener)
- }
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/elb/use_secure_tls_policy.rego b/checks/cloud/aws/elb/use_secure_tls_policy.rego
new file mode 100644
index 00000000..57fdae03
--- /dev/null
+++ b/checks/cloud/aws/elb/use_secure_tls_policy.rego
@@ -0,0 +1,53 @@
+# METADATA
+# title: An outdated SSL policy is in use by a load balancer.
+# description: |
+# You should not use outdated/insecure TLS versions for encryption. You should be using TLS v1.2+.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# custom:
+# id: AVD-AWS-0047
+# avd_id: AVD-AWS-0047
+# provider: aws
+# service: elb
+# severity: CRITICAL
+# short_code: use-secure-tls-policy
+# recommended_action: Use a more recent TLS/SSL policy for the load balancer
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: elb
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lb_listener
+# good_examples: checks/cloud/aws/elb/use_secure_tls_policy.tf.go
+# bad_examples: checks/cloud/aws/elb/use_secure_tls_policy.tf.go
+package builtin.aws.elb.aws0047
+
+import rego.v1
+
+outdated_ssl_policies := {
+ "ELBSecurityPolicy-2015-05",
+ "ELBSecurityPolicy-2016-08",
+ "ELBSecurityPolicy-FS-2018-06",
+ "ELBSecurityPolicy-FS-1-1-2019-08",
+ "ELBSecurityPolicy-TLS-1-0-2015-04",
+ "ELBSecurityPolicy-TLS-1-1-2017-01",
+ "ELBSecurityPolicy-TLS13-1-0-2021-06",
+ "ELBSecurityPolicy-TLS13-1-1-2021-06",
+ "ELBSecurityPolicy-TLS13-1-2-Ext1-2021-06",
+ "ELBSecurityPolicy-TLS13-1-2-Ext2-2021-06",
+}
+
+deny contains res if {
+ some lb in input.aws.elb.loadbalancers
+ some listener in lb.listeners
+ has_outdated_policy(listener)
+ res := result.new("Listener uses an outdated TLS policy.", listener.tlspolicy)
+}
+
+has_outdated_policy(listener) if {
+ listener.tlspolicy.value in outdated_ssl_policies
+}
diff --git a/checks/cloud/aws/elb/use_secure_tls_policy_test.go b/checks/cloud/aws/elb/use_secure_tls_policy_test.go
deleted file mode 100644
index 4f46b2c6..00000000
--- a/checks/cloud/aws/elb/use_secure_tls_policy_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package elb
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/elb"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckUseSecureTlsPolicy(t *testing.T) {
- tests := []struct {
- name string
- input elb.ELB
- expected bool
- }{
- {
- name: "Load balancer listener using TLS v1.0",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Listeners: []elb.Listener{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- TLSPolicy: trivyTypes.String("ELBSecurityPolicy-TLS-1-0-2015-04", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Load balancer listener using TLS v1.2",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Listeners: []elb.Listener{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- TLSPolicy: trivyTypes.String("ELBSecurityPolicy-TLS-1-2-2017-01", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "Load balancer listener using TLS v1.3",
- input: elb.ELB{
- LoadBalancers: []elb.LoadBalancer{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Listeners: []elb.Listener{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- TLSPolicy: trivyTypes.String("ELBSecurityPolicy-TLS13-1-2-2021-06", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.ELB = test.input
- results := CheckUseSecureTlsPolicy.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckUseSecureTlsPolicy.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/elb/use_secure_tls_policy_test.rego b/checks/cloud/aws/elb/use_secure_tls_policy_test.rego
new file mode 100644
index 00000000..220fbda8
--- /dev/null
+++ b/checks/cloud/aws/elb/use_secure_tls_policy_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.elb.aws0047_test
+
+import rego.v1
+
+import data.builtin.aws.elb.aws0047 as check
+import data.lib.test
+
+test_deny_with_outdated_tls_policy if {
+ inp := {"aws": {"elb": {"loadbalancers": [{"listeners": [{"tlspolicy": {"value": "ELBSecurityPolicy-TLS-1-0-2015-04"}}]}]}}}
+
+ test.assert_equal_message("Load balancer listener using TLS v1.0", check.deny) with input as inp
+}
+
+test_allow_with_actual_tls_policy if {
+ inp := {"aws": {"elb": {"loadbalancers": [{"listeners": [{"tlspolicy": {"value": "ELBSecurityPolicy-TLS-1-2-2017-01"}}]}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/emr/emr.go b/checks/cloud/aws/emr/emr.go
new file mode 100644
index 00000000..e42cbd99
--- /dev/null
+++ b/checks/cloud/aws/emr/emr.go
@@ -0,0 +1 @@
+package emr
diff --git a/checks/cloud/aws/emr/enable_at_rest_encryption.go b/checks/cloud/aws/emr/enable_at_rest_encryption.go
deleted file mode 100644
index f15115dd..00000000
--- a/checks/cloud/aws/emr/enable_at_rest_encryption.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package emr
-
-import (
- "encoding/json"
-
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableAtRestEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0137",
- Provider: providers.AWSProvider,
- Service: "emr",
- ShortCode: "enable-at-rest-encryption",
- Summary: "Enable at-rest encryption for EMR clusters.",
- Impact: "At-rest data in the EMR cluster could be compromised if accessed.",
- Resolution: "Enable at-rest encryption for EMR cluster",
- Explanation: `Data stored within an EMR cluster should be encrypted to ensure sensitive data is kept private.`,
- Links: []string{
- "https://docs.aws.amazon.com/config/latest/developerguide/operational-best-practices-for-nist_800-171.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableAtRestEncryptionGoodExamples,
- BadExamples: terraformEnableAtRestEncryptionBadExamples,
- Links: terraformEnableAtRestEncryptionLinks,
- RemediationMarkdown: terraformEnableAtRestEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, conf := range s.AWS.EMR.SecurityConfiguration {
- vars, err := readVarsFromConfigurationAtRest(conf.Configuration.Value())
- if err != nil {
- continue
- }
-
- if !vars.EncryptionConfiguration.EnableAtRestEncryption {
- results.Add(
- "EMR cluster does not have at-rest encryption enabled.",
- conf.Configuration,
- )
- } else {
- results.AddPassed(&conf)
- }
-
- }
- return
- },
-)
-
-type conf struct {
- EncryptionConfiguration struct {
- AtRestEncryptionConfiguration struct {
- S3EncryptionConfiguration struct {
- EncryptionMode string `json:"EncryptionMode"`
- } `json:"S3EncryptionConfiguration"`
- LocalDiskEncryptionConfiguration struct {
- EncryptionKeyProviderType string `json:"EncryptionKeyProviderType"`
- AwsKmsKey string `json:"AwsKmsKey"`
- } `json:"LocalDiskEncryptionConfiguration"`
- } `json:"AtRestEncryptionConfiguration"`
- EnableInTransitEncryption bool `json:"EnableInTransitEncryption"`
- EnableAtRestEncryption bool `json:"EnableAtRestEncryption"`
- } `json:"EncryptionConfiguration"`
-}
-
-func readVarsFromConfigurationAtRest(raw string) (*conf, error) {
- var testConf conf
- if err := json.Unmarshal([]byte(raw), &testConf); err != nil {
- return nil, err
- }
-
- return &testConf, nil
-}
diff --git a/checks/cloud/aws/emr/enable_at_rest_encryption.rego b/checks/cloud/aws/emr/enable_at_rest_encryption.rego
new file mode 100644
index 00000000..9f35bda0
--- /dev/null
+++ b/checks/cloud/aws/emr/enable_at_rest_encryption.rego
@@ -0,0 +1,38 @@
+# METADATA
+# title: Enable at-rest encryption for EMR clusters.
+# description: |
+# Data stored within an EMR cluster should be encrypted to ensure sensitive data is kept private.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/config/latest/developerguide/operational-best-practices-for-nist_800-171.html
+# custom:
+# id: AVD-AWS-0137
+# avd_id: AVD-AWS-0137
+# provider: aws
+# service: emr
+# severity: HIGH
+# short_code: enable-at-rest-encryption
+# recommended_action: Enable at-rest encryption for EMR cluster
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: emr
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_security_configuration
+# good_examples: checks/cloud/aws/emr/enable_at_rest_encryption.tf.go
+# bad_examples: checks/cloud/aws/emr/enable_at_rest_encryption.tf.go
+package builtin.aws.emr.aws0137
+
+import rego.v1
+
+deny contains res if {
+ some sec_conf in input.aws.emr.securityconfiguration
+ vars := json.unmarshal(sec_conf.configuration.value)
+ vars.EncryptionConfiguration.EnableAtRestEncryption == false
+ res := result.new("EMR cluster does not have at-rest encryption enabled.", sec_conf.configuration)
+}
diff --git a/checks/cloud/aws/emr/enable_at_rest_encryption_test.go b/checks/cloud/aws/emr/enable_at_rest_encryption_test.go
deleted file mode 100644
index 50dfd8da..00000000
--- a/checks/cloud/aws/emr/enable_at_rest_encryption_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package emr
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/emr"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/state"
- "github.com/stretchr/testify/assert"
-)
-
-func TestEnableAtRestEncryption(t *testing.T) {
- tests := []struct {
- name string
- input emr.EMR
- expected bool
- }{
- {
- name: "EMR cluster with at-rest encryption disabled",
- input: emr.EMR{
- SecurityConfiguration: []emr.SecurityConfiguration{
- {
- Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()),
- Configuration: trivyTypes.String(`{
- "EncryptionConfiguration": {
- "AtRestEncryptionConfiguration": {
- "S3EncryptionConfiguration": {
- "EncryptionMode": "SSE-S3"
- },
- "LocalDiskEncryptionConfiguration": {
- "EncryptionKeyProviderType": "AwsKms",
- "AwsKmsKey": "arn:aws:kms:us-west-2:187416307283:alias/tf_emr_test_key"
- }
- },
- "EnableAtRestEncryption": false
- }
- }`, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "EMR cluster with at-rest encryption enabled",
- input: emr.EMR{
- SecurityConfiguration: []emr.SecurityConfiguration{
- {
- Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()),
- Configuration: trivyTypes.String(`{
- "EncryptionConfiguration": {
- "AtRestEncryptionConfiguration": {
- "S3EncryptionConfiguration": {
- "EncryptionMode": "SSE-S3"
- },
- "LocalDiskEncryptionConfiguration": {
- "EncryptionKeyProviderType": "AwsKms",
- "AwsKmsKey": "arn:aws:kms:us-west-2:187416307283:alias/tf_emr_test_key"
- }
- },
- "EnableAtRestEncryption": true
- }
- }`, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.EMR = test.input
- results := CheckEnableAtRestEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAtRestEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/emr/enable_at_rest_encryption_test.rego b/checks/cloud/aws/emr/enable_at_rest_encryption_test.rego
new file mode 100644
index 00000000..aa69e27b
--- /dev/null
+++ b/checks/cloud/aws/emr/enable_at_rest_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.emr.aws0137_test
+
+import rego.v1
+
+import data.builtin.aws.emr.aws0137 as check
+import data.lib.test
+
+test_allow_with_encryption if {
+ inp := {"aws": {"emr": {"securityconfiguration": [{"configuration": {"value": json.marshal({"EncryptionConfiguration": {"EnableAtRestEncryption": true}})}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_without_encryption if {
+ inp := {"aws": {"emr": {"securityconfiguration": [{"configuration": {"value": json.marshal({"EncryptionConfiguration": {"EnableAtRestEncryption": false}})}}]}}}
+
+ test.assert_equal_message("EMR cluster does not have at-rest encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/emr/enable_in_transit_encryption.go b/checks/cloud/aws/emr/enable_in_transit_encryption.go
deleted file mode 100644
index f96ed56a..00000000
--- a/checks/cloud/aws/emr/enable_in_transit_encryption.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package emr
-
-import (
- "encoding/json"
-
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableInTransitEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0138",
- Provider: providers.AWSProvider,
- Service: "emr",
- ShortCode: "enable-in-transit-encryption",
- Summary: "Enable in-transit encryption for EMR clusters.",
- Impact: "In-transit data in the EMR cluster could be compromised if accessed.",
- Resolution: "Enable in-transit encryption for EMR cluster",
- Explanation: `Data stored within an EMR cluster should be encrypted to ensure sensitive data is kept private.`,
- Links: []string{
- "https://docs.aws.amazon.com/config/latest/developerguide/operational-best-practices-for-nist_800-171.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableInTransitEncryptionGoodExamples,
- BadExamples: terraformEnableInTransitEncryptionBadExamples,
- Links: terraformEnableInTransitEncryptionLinks,
- RemediationMarkdown: terraformEnableInTransitEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, conf := range s.AWS.EMR.SecurityConfiguration {
- vars, err := readVarsFromConfigurationInTransit(conf.Configuration.Value())
- if err != nil {
- continue
- }
-
- if !vars.EncryptionConfiguration.EnableInTransitEncryption {
- results.Add(
- "EMR cluster does not have in-transit encryption enabled.",
- conf.Configuration,
- )
- } else {
- results.AddPassed(&conf)
- }
-
- }
- return
- },
-)
-
-func readVarsFromConfigurationInTransit(raw string) (*conf, error) {
- var testConf conf
- if err := json.Unmarshal([]byte(raw), &testConf); err != nil {
- return nil, err
- }
-
- return &testConf, nil
-}
diff --git a/checks/cloud/aws/emr/enable_in_transit_encryption.rego b/checks/cloud/aws/emr/enable_in_transit_encryption.rego
new file mode 100644
index 00000000..37a792ec
--- /dev/null
+++ b/checks/cloud/aws/emr/enable_in_transit_encryption.rego
@@ -0,0 +1,38 @@
+# METADATA
+# title: Enable in-transit encryption for EMR clusters.
+# description: |
+# Data stored within an EMR cluster should be encrypted to ensure sensitive data is kept private.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/config/latest/developerguide/operational-best-practices-for-nist_800-171.html
+# custom:
+# id: AVD-AWS-0138
+# avd_id: AVD-AWS-0138
+# provider: aws
+# service: emr
+# severity: HIGH
+# short_code: enable-in-transit-encryption
+# recommended_action: Enable in-transit encryption for EMR cluster
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: emr
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_security_configuration
+# good_examples: checks/cloud/aws/emr/enable_in_transit_encryption.tf.go
+# bad_examples: checks/cloud/aws/emr/enable_in_transit_encryption.tf.go
+package builtin.aws.emr.aws0138
+
+import rego.v1
+
+deny contains res if {
+ some sec_conf in input.aws.emr.securityconfiguration
+ vars := json.unmarshal(sec_conf.configuration.value)
+ vars.EncryptionConfiguration.EnableInTransitEncryption == false
+ res := result.new("EMR cluster does not have in-transit encryption enabled.", sec_conf.configuration)
+}
diff --git a/checks/cloud/aws/emr/enable_in_transit_encryption_test.go b/checks/cloud/aws/emr/enable_in_transit_encryption_test.go
deleted file mode 100644
index ac6dec5b..00000000
--- a/checks/cloud/aws/emr/enable_in_transit_encryption_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package emr
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/emr"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/state"
- "github.com/stretchr/testify/assert"
-)
-
-func TestEnableInTransitEncryption(t *testing.T) {
- tests := []struct {
- name string
- input emr.EMR
- expected bool
- }{
- {
- name: "EMR cluster with in-transit encryption disabled",
- input: emr.EMR{
- SecurityConfiguration: []emr.SecurityConfiguration{
- {
- Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()),
- Configuration: trivyTypes.String(`{
- "EncryptionConfiguration": {
- "AtRestEncryptionConfiguration": {
- "S3EncryptionConfiguration": {
- "EncryptionMode": "SSE-S3"
- },
- "LocalDiskEncryptionConfiguration": {
- "EncryptionKeyProviderType": "AwsKms",
- "AwsKmsKey": "arn:aws:kms:us-west-2:187416307283:alias/tf_emr_test_key"
- }
- },
- "EnableInTransitEncryption": false,
- "EnableAtRestEncryption": false
- }
- }`, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "EMR cluster with in-transit encryption enabled",
- input: emr.EMR{
- SecurityConfiguration: []emr.SecurityConfiguration{
- {
- Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()),
- Configuration: trivyTypes.String(`{
- "EncryptionConfiguration": {
- "AtRestEncryptionConfiguration": {
- "S3EncryptionConfiguration": {
- "EncryptionMode": "SSE-S3"
- },
- "LocalDiskEncryptionConfiguration": {
- "EncryptionKeyProviderType": "AwsKms",
- "AwsKmsKey": "arn:aws:kms:us-west-2:187416307283:alias/tf_emr_test_key"
- }
- },
- "EnableInTransitEncryption": true,
- "EnableAtRestEncryption": true
- }
- }`, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.EMR = test.input
- results := CheckEnableInTransitEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableInTransitEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/emr/enable_in_transit_encryption_test.rego b/checks/cloud/aws/emr/enable_in_transit_encryption_test.rego
new file mode 100644
index 00000000..e2ba637f
--- /dev/null
+++ b/checks/cloud/aws/emr/enable_in_transit_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.emr.aws0138_test
+
+import rego.v1
+
+import data.builtin.aws.emr.aws0138 as check
+import data.lib.test
+
+test_allow_with_encryption if {
+ inp := {"aws": {"emr": {"securityconfiguration": [{"configuration": {"value": json.marshal({"EncryptionConfiguration": {"EnableInTransitEncryption": true}})}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_without_encryption if {
+ inp := {"aws": {"emr": {"securityconfiguration": [{"configuration": {"value": json.marshal({"EncryptionConfiguration": {"EnableInTransitEncryption": false}})}}]}}}
+
+ test.assert_equal_message("EMR cluster does not have in-transit encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/emr/enable_local_disk_encryption.go b/checks/cloud/aws/emr/enable_local_disk_encryption.go
deleted file mode 100644
index 59c14a47..00000000
--- a/checks/cloud/aws/emr/enable_local_disk_encryption.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package emr
-
-import (
- "encoding/json"
-
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableLocalDiskEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0139",
- Provider: providers.AWSProvider,
- Service: "emr",
- ShortCode: "enable-local-disk-encryption",
- Summary: "Enable local-disk encryption for EMR clusters.",
- Impact: "Local-disk data in the EMR cluster could be compromised if accessed.",
- Resolution: "Enable local-disk encryption for EMR cluster",
- Explanation: `Data stored within an EMR instances should be encrypted to ensure sensitive data is kept private.`,
- Links: []string{
- "https://docs.aws.amazon.com/config/latest/developerguide/operational-best-practices-for-nist_800-171.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableLocalDiskEncryptionGoodExamples,
- BadExamples: terraformEnableLocalDiskEncryptionBadExamples,
- Links: terraformEnableLocalDiskEncryptionLinks,
- RemediationMarkdown: terraformEnableLocalDiskEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, conf := range s.AWS.EMR.SecurityConfiguration {
- vars, err := readVarsFromConfigurationLocalDisk(conf.Configuration.Value())
- if err != nil {
- continue
- }
-
- if vars.EncryptionConfiguration.AtRestEncryptionConfiguration.LocalDiskEncryptionConfiguration.EncryptionKeyProviderType == "" {
- results.Add(
- "EMR cluster does not have local-disk encryption enabled.",
- conf.Configuration,
- )
- } else {
- results.AddPassed(&conf)
- }
-
- }
- return
- },
-)
-
-func readVarsFromConfigurationLocalDisk(raw string) (*conf, error) {
- var testConf conf
- if err := json.Unmarshal([]byte(raw), &testConf); err != nil {
- return nil, err
- }
-
- return &testConf, nil
-}
diff --git a/checks/cloud/aws/emr/enable_local_disk_encryption.rego b/checks/cloud/aws/emr/enable_local_disk_encryption.rego
new file mode 100644
index 00000000..3b2e82c3
--- /dev/null
+++ b/checks/cloud/aws/emr/enable_local_disk_encryption.rego
@@ -0,0 +1,38 @@
+# METADATA
+# title: Enable local-disk encryption for EMR clusters.
+# description: |
+# Data stored within an EMR instances should be encrypted to ensure sensitive data is kept private.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/config/latest/developerguide/operational-best-practices-for-nist_800-171.html
+# custom:
+# id: AVD-AWS-0139
+# avd_id: AVD-AWS-0139
+# provider: aws
+# service: emr
+# severity: HIGH
+# short_code: enable-local-disk-encryption
+# recommended_action: Enable local-disk encryption for EMR cluster
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: emr
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/emr_security_configuration
+# good_examples: checks/cloud/aws/emr/enable_local_disk_encryption.tf.go
+# bad_examples: checks/cloud/aws/emr/enable_local_disk_encryption.tf.go
+package builtin.aws.emr.aws0139
+
+import rego.v1
+
+deny contains res if {
+ some sec_conf in input.aws.emr.securityconfiguration
+ vars := json.unmarshal(sec_conf.configuration.value)
+ vars.EncryptionConfiguration.AtRestEncryptionConfiguration.LocalDiskEncryptionConfiguration.EncryptionKeyProviderType == ""
+ res := result.new("EMR cluster does not have local-disk encryption enabled.", sec_conf.configuration)
+}
diff --git a/checks/cloud/aws/emr/enable_local_disk_encryption_test.go b/checks/cloud/aws/emr/enable_local_disk_encryption_test.go
deleted file mode 100644
index b5e03494..00000000
--- a/checks/cloud/aws/emr/enable_local_disk_encryption_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package emr
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/emr"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/state"
- "github.com/stretchr/testify/assert"
-)
-
-func TestEnableLocalDiskEncryption(t *testing.T) {
- tests := []struct {
- name string
- input emr.EMR
- expected bool
- }{
- {
- name: "EMR cluster with local-disk encryption disabled",
- input: emr.EMR{
- SecurityConfiguration: []emr.SecurityConfiguration{
- {
- Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()),
- Configuration: trivyTypes.String(`{
- "EncryptionConfiguration": {
- "AtRestEncryptionConfiguration": {
- "S3EncryptionConfiguration": {
- "EncryptionMode": "SSE-S3"
- },
- "LocalDiskEncryptionConfiguration": {
- "EncryptionKeyProviderType": "",
- "AwsKmsKey": ""
- }
- },
- "EnableInTransitEncryption": true,
- "EnableAtRestEncryption": true
- }
- }`, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "EMR cluster with local-disk encryption enabled",
- input: emr.EMR{
- SecurityConfiguration: []emr.SecurityConfiguration{
- {
- Name: trivyTypes.String("test", trivyTypes.NewTestMetadata()),
- Configuration: trivyTypes.String(`{
- "EncryptionConfiguration": {
- "AtRestEncryptionConfiguration": {
- "S3EncryptionConfiguration": {
- "EncryptionMode": "SSE-S3"
- },
- "LocalDiskEncryptionConfiguration": {
- "EncryptionKeyProviderType": "AwsKms",
- "AwsKmsKey": "arn:aws:kms:us-west-2:187416307283:alias/tf_emr_test_key"
- }
- },
- "EnableInTransitEncryption": true,
- "EnableAtRestEncryption": true
- }
- }`, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.EMR = test.input
- results := CheckEnableLocalDiskEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableLocalDiskEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/emr/enable_local_disk_encryption_test.rego b/checks/cloud/aws/emr/enable_local_disk_encryption_test.rego
new file mode 100644
index 00000000..9af8b3bb
--- /dev/null
+++ b/checks/cloud/aws/emr/enable_local_disk_encryption_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.emr.aws0139_test
+
+import rego.v1
+
+import data.builtin.aws.emr.aws0139 as check
+import data.lib.test
+
+test_allow_with_encryption if {
+ inp := {"aws": {"emr": {"securityconfiguration": [{"configuration": {"value": json.marshal({"EncryptionConfiguration": {"AtRestEncryptionConfiguration": {"LocalDiskEncryptionConfiguration": {"EncryptionKeyProviderType": "AwsKms"}}}})}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_without_encryption if {
+ inp := {"aws": {"emr": {"securityconfiguration": [{"configuration": {"value": json.marshal({"EncryptionConfiguration": {"AtRestEncryptionConfiguration": {"LocalDiskEncryptionConfiguration": {"EncryptionKeyProviderType": ""}}}})}}]}}}
+
+ test.assert_equal_message("EMR cluster does not have in-transit encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/disable_unused_credentials.rego b/checks/cloud/aws/iam/disable_unused_credentials.rego
new file mode 100644
index 00000000..23556fc5
--- /dev/null
+++ b/checks/cloud/aws/iam/disable_unused_credentials.rego
@@ -0,0 +1,47 @@
+# METADATA
+# title: Credentials which are no longer used should be disabled.
+# description: |
+# CIS recommends that you remove or deactivate all credentials that have been unused in 90 days or more. Disabling or removing unnecessary credentials reduces the window of opportunity for credentials associated with a compromised or abandoned account to be used.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://console.aws.amazon.com/iam/
+# custom:
+# id: AVD-AWS-0144
+# avd_id: AVD-AWS-0144
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: disable-unused-credentials
+# recommended_action: Disable credentials which are no longer used.
+# frameworks:
+# cis-aws-1.2:
+# - "1.3"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0144
+
+import rego.v1
+
+import data.lib.iam
+
+days_to_check = 90
+
+deny contains res if {
+ some user in input.aws.iam.users
+ iam.user_is_inactive(user, days_to_check)
+ res := result.new("User has not logged in for >90 days.", user)
+}
+
+deny contains res if {
+ some user in input.aws.iam.users
+ not iam.user_is_inactive(user, days_to_check)
+ some key in user.accesskeys
+ iam.key_is_unused(key, days_to_check)
+ res := result.new(sprintf("User access key %q has not been used in >90 days", [key.accesskeyid.value]), user)
+}
diff --git a/checks/cloud/aws/iam/disable_unused_credentials_45.go b/checks/cloud/aws/iam/disable_unused_credentials_45.go
index 4683b644..0d517e6f 100644
--- a/checks/cloud/aws/iam/disable_unused_credentials_45.go
+++ b/checks/cloud/aws/iam/disable_unused_credentials_45.go
@@ -26,11 +26,11 @@ var CheckUnusedCredentialsDisabled45Days = rules.Register(
},
Service: "iam",
ShortCode: "disable-unused-credentials-45-days",
- Summary: "AWS IAM users can access AWS resources using different types of credentials, such as\npasswords or access keys. It is recommended that all credentials that have been unused in\n45 or greater days be deactivated or removed.",
+ Summary: "Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.",
Impact: "Leaving unused credentials active widens the scope for compromise.",
Resolution: "Disable credentials which are no longer used.",
Explanation: `
-Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.
+AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in45 or greater days be deactivated or removed.
`,
Links: []string{
"https://console.aws.amazon.com/iam/",
diff --git a/checks/cloud/aws/iam/disable_unused_credentials_45.rego b/checks/cloud/aws/iam/disable_unused_credentials_45.rego
new file mode 100644
index 00000000..dce27cb5
--- /dev/null
+++ b/checks/cloud/aws/iam/disable_unused_credentials_45.rego
@@ -0,0 +1,46 @@
+# METADATA
+# title: Disabling or removing unnecessary credentials will reduce the window of opportunity for credentials associated with a compromised or abandoned account to be used.
+# description: |
+# AWS IAM users can access AWS resources using different types of credentials, such as passwords or access keys. It is recommended that all credentials that have been unused in45 or greater days be deactivated or removed.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://console.aws.amazon.com/iam/
+# custom:
+# id: AVD-AWS-0166
+# avd_id: AVD-AWS-0166
+# provider: aws
+# service: iam
+# severity: LOW
+# short_code: disable-unused-credentials-45-days
+# recommended_action: Disable credentials which are no longer used.
+# frameworks:
+# cis-aws-1.4:
+# - "1.12"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0166
+
+import data.lib.iam
+import rego.v1
+
+days_to_check = 45
+
+deny contains res if {
+ some user in input.aws.iam.users
+ iam.user_is_inactive(user, days_to_check)
+ res := result.new("User has not logged in for >45 days.", user)
+}
+
+deny contains res if {
+ some user in input.aws.iam.users
+ not iam.user_is_inactive(user, days_to_check)
+ some key in user.accesskeys
+ iam.key_is_unused(key, days_to_check)
+ res := result.new(sprintf("User access key %q has not been used in >45 days", [key.accesskeyid.value]), user)
+}
diff --git a/checks/cloud/aws/iam/disable_unused_credentials_45_test.rego b/checks/cloud/aws/iam/disable_unused_credentials_45_test.rego
new file mode 100644
index 00000000..b56c2483
--- /dev/null
+++ b/checks/cloud/aws/iam/disable_unused_credentials_45_test.rego
@@ -0,0 +1,66 @@
+package builtin.aws.iam.aws0166._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0166 as check
+import data.lib.datetime
+import data.lib.test
+
+test_allow_user_logged_in_today if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ })
+}
+
+test_allow_user_never_logged_in if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": datetime.zero_time_string},
+ })
+}
+
+test_disallow_user_logged_in_100_days_ago if {
+ test.assert_equal_message("User has not logged in for >45 days.", check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": time.format(time.now_ns() - datetime.days_to_ns(100))},
+ })
+}
+
+test_disallow_user_access_key_not_used_100_days if {
+ test.assert_equal_message(`User access key "AKIACKCEVSQ6C2EXAMPLE" has not been used in >45 days`, check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ "accesskeys": [{
+ "accesskeyid": {"value": "AKIACKCEVSQ6C2EXAMPLE"},
+ "active": {"value": true},
+ "lastaccess": {"value": time.format(time.now_ns() - datetime.days_to_ns(100))},
+ }],
+ })
+}
+
+test_allow_nonactive_user_access_key_not_used_100_days if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ "accesskeys": [{
+ "accesskeyid": {"value": "AKIACKCEVSQ6C2EXAMPLE"},
+ "active": {"value": false},
+ "lastaccess": {"value": time.format(time.now_ns() - datetime.days_to_ns(100))},
+ }],
+ })
+}
+
+test_allow_user_access_key_used_today if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ "accesskeys": [{
+ "accesskeyid": {"value": "AKIACKCEVSQ6C2EXAMPLE"},
+ "active": {"value": true},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ }],
+ })
+}
+
+build_input(body) = {"aws": {"iam": {"users": [body]}}}
diff --git a/checks/cloud/aws/iam/disable_unused_credentials_test.rego b/checks/cloud/aws/iam/disable_unused_credentials_test.rego
new file mode 100644
index 00000000..18e55a84
--- /dev/null
+++ b/checks/cloud/aws/iam/disable_unused_credentials_test.rego
@@ -0,0 +1,85 @@
+package builtin.aws.iam.aws0144._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0144 as check
+import data.lib.datetime
+import data.lib.test
+
+test_allow_user_logged_in_today if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": "test",
+ "lastaccess": {"value": time.format(time.now_ns())},
+ })
+}
+
+test_allow_user_never_logged_in if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": datetime.zero_time_string},
+ })
+}
+
+test_disallow_user_logged_in_100_days_ago if {
+ test.assert_equal_message("User has not logged in for >90 days.", check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": time.format(time.now_ns() - datetime.days_to_ns(100))},
+ })
+}
+
+test_disallow_user_access_key_not_used_100_days if {
+ test.assert_equal_message(`User access key "AKIACKCEVSQ6C2EXAMPLE" has not been used in >90 days`, check.deny) with input as build_input({
+ "name": {"value": "test"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ "accesskeys": [{
+ "accesskeyid": {"value": "AKIACKCEVSQ6C2EXAMPLE"},
+ "active": {"value": true},
+ "lastaccess": {"value": time.format(time.now_ns() - datetime.days_to_ns(100))},
+ }],
+ })
+}
+
+test_allow_nonactive_user_access_key_not_used_100_days if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": "test",
+ "lastaccess": {"value": time.format(time.now_ns())},
+ "accesskeys": [{
+ "accesskeyid": {"value": "AKIACKCEVSQ6C2EXAMPLE"},
+ "active": {"value": false},
+ "lastaccess": {"value": time.format(time.now_ns() - datetime.days_to_ns(100))},
+ }],
+ })
+}
+
+test_allow_user_access_key_used_today if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": "test",
+ "lastaccess": {"value": time.format(time.now_ns())},
+ "accesskeys": [{
+ "accesskeyid": {"value": "AKIACKCEVSQ6C2EXAMPLE"},
+ "active": {"value": true},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ }],
+ })
+}
+
+test_disallow_one_of_the_user_access_key_used_100_days if {
+ test.assert_equal_message(`User access key "AKIACKCEVSQ6C2EXAMPLE" has not been used in >90 days`, check.deny) with input as build_input({
+ "name": "test",
+ "lastaccess": {"value": time.format(time.now_ns())},
+ "accesskeys": [
+ {
+ "accesskeyid": {"value": "AKIACKCEVSQ6C2EXAMPLE"},
+ "active": {"value": true},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ },
+ {
+ "accesskeyid": {"value": "AKIACKCEVSQ6C2EXAMPLE"},
+ "active": {"value": true},
+ "lastaccess": {"value": time.format(time.now_ns() - datetime.days_to_ns(100))},
+ },
+ ],
+ })
+}
+
+build_input(body) = {"aws": {"iam": {"users": [body]}}}
diff --git a/checks/cloud/aws/iam/enforce_group_mfa.rego b/checks/cloud/aws/iam/enforce_group_mfa.rego
new file mode 100644
index 00000000..8d730955
--- /dev/null
+++ b/checks/cloud/aws/iam/enforce_group_mfa.rego
@@ -0,0 +1,46 @@
+# METADATA
+# title: IAM groups should have MFA enforcement activated.
+# description: |
+# IAM groups should be protected with multi factor authentication to add safe guards to password compromise.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# custom:
+# id: AVD-AWS-0123
+# avd_id: AVD-AWS-0123
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: enforce-group-mfa
+# recommended_action: Use terraform-module/enforce-mfa/aws to ensure that MFA is enforced
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/modules/terraform-module/enforce-mfa/aws/latest
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# good_examples: checks/cloud/aws/iam/enforce_group_mfa.tf.go
+# bad_examples: checks/cloud/aws/iam/enforce_group_mfa.tf.go
+package builtin.aws.iam.aws0123
+
+import rego.v1
+
+deny contains res if {
+ some group in input.aws.iam.groups
+ not is_group_mfa_enforced(group)
+ res := result.new("Multi-Factor authentication is not enforced for group", group)
+}
+
+is_group_mfa_enforced(group) if {
+ some policy in group.policies
+ value := json.unmarshal(policy.document.value)
+ some condition in value.Statement[_].Condition
+ some key, _ in condition
+ key == "aws:MultiFactorAuthPresent"
+}
diff --git a/checks/cloud/aws/iam/enforce_group_mfa_test.rego b/checks/cloud/aws/iam/enforce_group_mfa_test.rego
new file mode 100644
index 00000000..1e905463
--- /dev/null
+++ b/checks/cloud/aws/iam/enforce_group_mfa_test.rego
@@ -0,0 +1,19 @@
+package builtin.aws.iam.aws0123._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0123 as check
+import data.lib.test
+
+test_allow_group_with_mfa if {
+ test.assert_empty(check.deny) with input as build_condition({
+ "StringLike": {"kms:ViaService": "timestream.*.amazonaws.com"},
+ "Bool": {"aws:MultiFactorAuthPresent": "true"},
+ })
+}
+
+test_disallow_group_without_mfa if {
+ test.assert_equal_message("Multi-Factor authentication is not enforced for group", check.deny) with input as build_condition({})
+}
+
+build_condition(body) = {"aws": {"iam": {"groups": [{"policies": [{"document": {"value": json.marshal({"Statement": [{"Condition": body}]})}}]}]}}}
diff --git a/checks/cloud/aws/iam/enforce_root_hardware_mfa.rego b/checks/cloud/aws/iam/enforce_root_hardware_mfa.rego
new file mode 100644
index 00000000..03ec1643
--- /dev/null
+++ b/checks/cloud/aws/iam/enforce_root_hardware_mfa.rego
@@ -0,0 +1,43 @@
+# METADATA
+# title: The "root" account has unrestricted access to all resources in the AWS account. It is highly recommended that this account have hardware MFA enabled.
+# description: |
+# Hardware MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they're prompted for their user name and password and for an authentication code from their AWS MFA device.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html
+# custom:
+# id: AVD-AWS-0165
+# avd_id: AVD-AWS-0165
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: enforce-root-hardware-mfa
+# recommended_action: Enable hardware MFA on the root user account.
+# frameworks:
+# cis-aws-1.4:
+# - "1.6"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0165
+
+import rego.v1
+
+deny contains res if {
+ some user in input.aws.iam.users
+ user.name.value == "root"
+ not is_user_have_hardware_mfa(user)
+ res := result.new("Root user does not have a hardware MFA device", user)
+}
+
+# is_user_have_hardware_mfa(user) if
+
+is_user_have_hardware_mfa(user) if {
+ some device in user.mfadevices
+ device.isvirtual.value == false
+}
diff --git a/checks/cloud/aws/iam/enforce_root_hardware_mfa_test.rego b/checks/cloud/aws/iam/enforce_root_hardware_mfa_test.rego
new file mode 100644
index 00000000..111256f0
--- /dev/null
+++ b/checks/cloud/aws/iam/enforce_root_hardware_mfa_test.rego
@@ -0,0 +1,44 @@
+package builtin.aws.iam.aws0165._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0165 as check
+import data.lib.test
+
+test_disallow_root_user_without_mfa if {
+ test.assert_equal_message("Root user does not have a hardware MFA device", check.deny) with input as build_input({"name": {"value": "root"}})
+}
+
+test_disallow_root_user_with_virtual_mfa if {
+ test.assert_equal_message("Root user does not have a hardware MFA device", check.deny) with input as build_input({
+ "name": {"value": "root"},
+ "mfadevices": [{"isvirtual": {"value": true}}],
+ })
+}
+
+test_allow_non_root_user_without_mfa if {
+ test.assert_empty(check.deny) with input as build_input({"name": {"value": "other"}})
+}
+
+test_allow_root_user_with_hardware_mfa if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": {"value": "root"}},
+ "mfadevices": [{"isvirtual": {"value": false}}],
+ })
+}
+
+test_allow_root_user_with_different_mfa if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "root"},
+ "mfadevices": [
+ {"isvirtual": {"value": true}},
+ {"isvirtual": {"value": false}},
+ ],
+ })
+}
+
+test_allow_without_user if {
+ test.assert_empty(check.deny) with input as build_input({})
+}
+
+build_input(body) = {"aws": {"iam": {"users": [body]}}}
diff --git a/checks/cloud/aws/iam/enforce_root_mfa.rego b/checks/cloud/aws/iam/enforce_root_mfa.rego
new file mode 100644
index 00000000..7013421d
--- /dev/null
+++ b/checks/cloud/aws/iam/enforce_root_mfa.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: The "root" account has unrestricted access to all resources in the AWS account. It is highly recommended that this account have MFA enabled.
+# description: |
+# MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they're prompted for their user name and password and for an authentication code from their AWS MFA device.
+# When you use virtual MFA for the root user, CIS recommends that the device used is not a personal device. Instead, use a dedicated mobile device (tablet or phone) that you manage to keep charged and secured independent of any individual personal devices. This lessens the risks of losing access to the MFA due to device loss, device trade-in, or if the individual owning the device is no longer employed at the company.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-cis-controls.html#securityhub-cis-controls-1.14
+# custom:
+# id: AVD-AWS-0142
+# avd_id: AVD-AWS-0142
+# provider: aws
+# service: iam
+# severity: CRITICAL
+# short_code: enforce-root-mfa
+# recommended_action: Enable MFA on the root user account.
+# frameworks:
+# cis-aws-1.2:
+# - "1.13"
+# cis-aws-1.4:
+# - "1.5"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0142
+
+import rego.v1
+
+import data.lib.iam
+
+deny contains res if {
+ some user in input.aws.iam.users
+ iam.is_root_user(user)
+ not iam.user_has_mfa_devices(user)
+ res := result.new("Root user does not have an MFA device", user)
+}
diff --git a/checks/cloud/aws/iam/enforce_root_mfa_test.rego b/checks/cloud/aws/iam/enforce_root_mfa_test.rego
new file mode 100644
index 00000000..07dd6bda
--- /dev/null
+++ b/checks/cloud/aws/iam/enforce_root_mfa_test.rego
@@ -0,0 +1,26 @@
+package builtin.aws.iam.aws0142._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0142 as check
+import data.lib.test
+
+test_disallow_root_user_without_mfa if {
+ test.assert_equal_message("Root user does not have an MFA device", check.deny) with input as build_input({"name": {"value": "root"}})
+}
+
+test_allow_non_root_user_without_mfa if {
+ test.assert_empty(check.deny) with input as build_input({"name": {"value": "other"}})
+}
+
+test_allow_root_user_with_mfa if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": "root",
+ "mfadevices": [
+ {"isvirtual": {"value": false}},
+ {"isvirtual": {"value": true}},
+ ],
+ })
+}
+
+build_input(body) = {"aws": {"iam": {"users": [body]}}}
diff --git a/checks/cloud/aws/iam/enforce_user_mfa.rego b/checks/cloud/aws/iam/enforce_user_mfa.rego
new file mode 100644
index 00000000..9560a557
--- /dev/null
+++ b/checks/cloud/aws/iam/enforce_user_mfa.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: IAM Users should have MFA enforcement activated.
+# description: |
+# IAM user accounts should be protected with multi factor authentication to add safe guards to password compromise.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://console.aws.amazon.com/iam/
+# custom:
+# id: AVD-AWS-0145
+# avd_id: AVD-AWS-0145
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: enforce-user-mfa
+# recommended_action: Enable MFA for the user account
+# frameworks:
+# cis-aws-1.2:
+# - "1.2"
+# cis-aws-1.4:
+# - "1.4"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0145
+
+import rego.v1
+
+import data.lib.iam
+
+deny contains res if {
+ some user in input.aws.iam.users
+ not iam.user_has_mfa_devices(user)
+ iam.is_user_logged_in(user)
+ res := result.new("User account does not have MFA", user)
+}
diff --git a/checks/cloud/aws/iam/enforce_user_mfa_test.rego b/checks/cloud/aws/iam/enforce_user_mfa_test.rego
new file mode 100644
index 00000000..7db90abe
--- /dev/null
+++ b/checks/cloud/aws/iam/enforce_user_mfa_test.rego
@@ -0,0 +1,31 @@
+package builtin.aws.iam.aws0145._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0145 as check
+import data.lib.datetime
+import data.lib.test
+
+test_disallow_user_logged_in_without_mfa if {
+ test.assert_equal_message("User account does not have MFA", check.deny) with input as build_input({
+ "name": {"value": "other"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ })
+}
+
+test_allow_user_never_logged_in_with_mfa if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "other"},
+ "lastaccess": {"value": datetime.zero_time_string},
+ })
+}
+
+test_allow_user_logged_in_with_mfa if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "other"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ "mfadevices": [{"isvirtual": {"value": false}}],
+ })
+}
+
+build_input(body) = {"aws": {"iam": {"users": [body]}}}
diff --git a/checks/cloud/aws/iam/limit_root_account_usage.rego b/checks/cloud/aws/iam/limit_root_account_usage.rego
new file mode 100644
index 00000000..a5776f1b
--- /dev/null
+++ b/checks/cloud/aws/iam/limit_root_account_usage.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: The "root" account has unrestricted access to all resources in the AWS account. It is highly recommended that the use of this account be avoided.
+# description: |
+# The root user has unrestricted access to all services and resources in an AWS account. We highly recommend that you avoid using the root user for daily tasks. Minimizing the use of the root user and adopting the principle of least privilege for access management reduce the risk of accidental changes and unintended disclosure of highly privileged credentials.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html
+# custom:
+# id: AVD-AWS-0140
+# avd_id: AVD-AWS-0140
+# provider: aws
+# service: iam
+# severity: LOW
+# short_code: limit-root-account-usage
+# recommended_action: Use lower privileged accounts instead, so only required privileges are available.
+# frameworks:
+# cis-aws-1.2:
+# - "1.1"
+# cis-aws-1.4:
+# - "1.7"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0140
+
+import data.lib.datetime
+import data.lib.iam
+import rego.v1
+
+deny contains res if {
+ some user in input.aws.iam.users
+ iam.is_root_user(user)
+ datetime.time_diff_lt_days(user.lastaccess.value, 1)
+ res := result.new("The root user logged in within the last 24 hours", user)
+}
diff --git a/checks/cloud/aws/iam/limit_root_account_usage_test.rego b/checks/cloud/aws/iam/limit_root_account_usage_test.rego
new file mode 100644
index 00000000..120d3d89
--- /dev/null
+++ b/checks/cloud/aws/iam/limit_root_account_usage_test.rego
@@ -0,0 +1,37 @@
+package builtin.aws.iam.aws0140._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0140 as check
+import data.lib.datetime
+import data.lib.test
+
+test_allow_root_user_never_logged_in if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "root"},
+ "lastaccess": {"value": datetime.zero_time_string},
+ })
+}
+
+test_allow_root_user_logged_in_over_24_hours if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "root"},
+ "lastaccess": {"value": time.format(time.now_ns() - datetime.days_to_ns(7))},
+ })
+}
+
+test_disallow_root_user_logged_in_within_24_hours if {
+ test.assert_equal_message("The root user logged in within the last 24 hours", check.deny) with input as build_input({
+ "name": {"value": "root"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ })
+}
+
+test_allow_nonroot_user_logged_in_within_24_hours if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "other"},
+ "lastaccess": {"value": time.format(time.now_ns())},
+ })
+}
+
+build_input(body) = {"aws": {"iam": {"users": [body]}}}
diff --git a/checks/cloud/aws/iam/limit_user_access_keys.rego b/checks/cloud/aws/iam/limit_user_access_keys.rego
new file mode 100644
index 00000000..152d2eb9
--- /dev/null
+++ b/checks/cloud/aws/iam/limit_user_access_keys.rego
@@ -0,0 +1,35 @@
+# METADATA
+# title: No user should have more than one active access key.
+# description: |
+# Multiple active access keys widens the scope for compromise.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://console.aws.amazon.com/iam/
+# custom:
+# id: AVD-AWS-0167
+# avd_id: AVD-AWS-0167
+# provider: aws
+# service: iam
+# severity: LOW
+# short_code: limit-user-access-keys
+# recommended_action: Limit the number of active access keys to one key per user.
+# frameworks:
+# cis-aws-1.4:
+# - "1.13"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0167
+
+import rego.v1
+
+deny contains res if {
+ some user in input.aws.iam.users
+ count([key | some key in user.accesskeys; key.active.value]) > 1
+ res := result.new("User has more than one active access key", user)
+}
diff --git a/checks/cloud/aws/iam/limit_user_access_keys_test.rego b/checks/cloud/aws/iam/limit_user_access_keys_test.rego
new file mode 100644
index 00000000..bead151c
--- /dev/null
+++ b/checks/cloud/aws/iam/limit_user_access_keys_test.rego
@@ -0,0 +1,29 @@
+package builtin.aws.iam.aws0167._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0167 as check
+import data.lib.test
+
+test_allow_one_key_is_active if {
+ test.assert_empty(check.deny) with input as build_input([{"active": {"value": true}}])
+}
+
+test_allow_two_keys_but_one_non_active if {
+ test.assert_empty(check.deny) with input as build_input([
+ {"active": {"value": false}},
+ {"active": {"value": true}},
+ ])
+}
+
+test_disallow_two_active_keys if {
+ test.assert_equal_message("User has more than one active access key", check.deny) with input as build_input([
+ {"active": {"value": true}},
+ {"active": {"value": true}},
+ ])
+}
+
+build_input(keys) = {"aws": {"iam": {"users": [{
+ "name": {"value": "test"},
+ "accesskeys": keys,
+}]}}}
diff --git a/checks/cloud/aws/iam/no_password_reuse.rego b/checks/cloud/aws/iam/no_password_reuse.rego
new file mode 100644
index 00000000..e0080ce5
--- /dev/null
+++ b/checks/cloud/aws/iam/no_password_reuse.rego
@@ -0,0 +1,43 @@
+# METADATA
+# title: IAM Password policy should prevent password reuse.
+# description: |
+# IAM account password policies should prevent the reuse of passwords.
+# The account password policy should be set to prevent using any of the last five used passwords.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# custom:
+# id: AVD-AWS-0056
+# avd_id: AVD-AWS-0056
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: no-password-reuse
+# recommended_action: Prevent password reuse in the policy
+# frameworks:
+# cis-aws-1.2:
+# - "1.10"
+# cis-aws-1.4:
+# - "1.9"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_account_password_policy
+# good_examples: checks/cloud/aws/iam/no_password_reuse.tf.go
+# bad_examples: checks/cloud/aws/iam/no_password_reuse.tf.go
+package builtin.aws.iam.aws0056
+
+import rego.v1
+
+deny contains res if {
+ policy := input.aws.iam.passwordpolicy
+ policy.reusepreventioncount.value < 5
+ res := result.new("Password policy allows reuse of recent passwords.", policy)
+}
diff --git a/checks/cloud/aws/iam/no_password_reuse_test.rego b/checks/cloud/aws/iam/no_password_reuse_test.rego
new file mode 100644
index 00000000..3f1c90ca
--- /dev/null
+++ b/checks/cloud/aws/iam/no_password_reuse_test.rego
@@ -0,0 +1,15 @@
+package builtin.aws.iam.aws0056._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0056 as check
+import data.lib.test
+
+test_disallow_policy_with_less_than_5_password_reuse if {
+ inp = {"aws": {"iam": {"passwordpolicy": {"reusepreventioncount": {"value": 1}}}}}
+ test.assert_equal_message("Password policy allows reuse of recent passwords.", check.deny) with input as inp
+}
+
+test_allow_policy_with_5_password_reuse if {
+ test.assert_empty(check.deny) with input as {"aws": {"iam": {"passwordpolicy": {"reusepreventioncount": {"value": 5}}}}}
+}
diff --git a/checks/cloud/aws/iam/no_root_access_keys.rego b/checks/cloud/aws/iam/no_root_access_keys.rego
new file mode 100644
index 00000000..3b607fc2
--- /dev/null
+++ b/checks/cloud/aws/iam/no_root_access_keys.rego
@@ -0,0 +1,47 @@
+# METADATA
+# title: The root user has complete access to all services and resources in an AWS account. AWS Access Keys provide programmatic access to a given account.
+# description: |
+# CIS recommends that all access keys be associated with the root user be removed. Removing access keys associated with the root user limits vectors that the account can be compromised by. Removing the root user access keys also encourages the creation and use of role-based accounts that are least privileged.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html
+# custom:
+# id: AVD-AWS-0141
+# avd_id: AVD-AWS-0141
+# provider: aws
+# service: iam
+# severity: CRITICAL
+# short_code: no-root-access-keys
+# recommended_action: Use lower privileged accounts instead, so only required privileges are available.
+# frameworks:
+# cis-aws-1.2:
+# - "1.12"
+# cis-aws-1.4:
+# - "1.4"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_access_key
+# good_examples: checks/cloud/aws/iam/no_root_access_keys.tf.go
+# bad_examples: checks/cloud/aws/iam/no_root_access_keys.tf.go
+package builtin.aws.iam.aws0141
+
+import data.lib.iam
+import rego.v1
+
+deny contains res if {
+ some user in input.aws.iam.users
+ iam.is_root_user(user)
+
+ some key in user.accesskeys
+ key.active.value
+
+ res := result.new("Access key exists for root user", key)
+}
diff --git a/checks/cloud/aws/iam/no_root_access_keys_test.rego b/checks/cloud/aws/iam/no_root_access_keys_test.rego
new file mode 100644
index 00000000..c4b3175d
--- /dev/null
+++ b/checks/cloud/aws/iam/no_root_access_keys_test.rego
@@ -0,0 +1,40 @@
+package builtin.aws.iam.aws0141._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0141 as check
+import data.lib.test
+
+test_allow_root_user_without_access_keys if {
+ test.assert_empty(check.deny) with input as build_input({"name": {"value": "root"}})
+}
+
+test_allow_non_root_user_without_access_keys if {
+ test.assert_empty(check.deny) with input as build_input({"name": {"value": "user"}})
+}
+
+test_allow_non_root_user_with_access_keys if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "user"},
+ "accesskeys": [{"active": {"value": true}}],
+ })
+}
+
+test_allow_root_user_with_inactive_access_keys if {
+ test.assert_empty(check.deny) with input as build_input({
+ "name": {"value": "root"},
+ "accesskeys": [{"active": {"value": false}}],
+ })
+}
+
+test_disallow_root_user_with_active_access_keys if {
+ test.assert_equal_message("Access key exists for root user", check.deny) with input as build_input({
+ "name": {"value": "root"},
+ "accesskeys": [
+ {"active": {"value": false}},
+ {"active": {"value": true}},
+ ],
+ })
+}
+
+build_input(body) := {"aws": {"iam": {"users": [body]}}}
diff --git a/checks/cloud/aws/iam/no_user_attached_policies.rego b/checks/cloud/aws/iam/no_user_attached_policies.rego
new file mode 100644
index 00000000..f690828c
--- /dev/null
+++ b/checks/cloud/aws/iam/no_user_attached_policies.rego
@@ -0,0 +1,43 @@
+# METADATA
+# title: IAM policies should not be granted directly to users.
+# description: |
+# CIS recommends that you apply IAM policies directly to groups and roles but not users. Assigning privileges at the group or role level reduces the complexity of access management as the number of users grow. Reducing access management complexity might in turn reduce opportunity for a principal to inadvertently receive or retain excessive privileges.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://console.aws.amazon.com/iam/
+# custom:
+# id: AVD-AWS-0143
+# avd_id: AVD-AWS-0143
+# provider: aws
+# service: iam
+# severity: LOW
+# short_code: no-user-attached-policies
+# recommended_action: Grant policies at the group level instead.
+# frameworks:
+# cis-aws-1.2:
+# - "1.16"
+# cis-aws-1.4:
+# - "1.15"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_user
+# good_examples: checks/cloud/aws/iam/no_user_attached_policies.tf.go
+# bad_examples: checks/cloud/aws/iam/no_user_attached_policies.tf.go
+package builtin.aws.iam.aws0143
+
+import rego.v1
+
+deny contains res if {
+ some user in input.aws.iam.users
+ count(user.policies) > 0
+
+ res := result.new("One or more policies are attached directly to a user", user)
+}
diff --git a/checks/cloud/aws/iam/no_user_attached_policies_test.rego b/checks/cloud/aws/iam/no_user_attached_policies_test.rego
new file mode 100644
index 00000000..026e0587
--- /dev/null
+++ b/checks/cloud/aws/iam/no_user_attached_policies_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.iam.aws0143._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0143 as check
+import data.lib.test
+
+test_allow_user_without_attached_policies if {
+ inp := {"aws": {"iam": {"users": [{"policies": []}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_user_with_attached_policies if {
+ inp := {"aws": {"iam": {"users": [{"policies": [{"name": {"value": "policy_name"}}]}]}}}
+
+ test.assert_equal_message("One or more policies are attached directly to a user", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/remove_expired_certificates.rego b/checks/cloud/aws/iam/remove_expired_certificates.rego
new file mode 100644
index 00000000..dac946be
--- /dev/null
+++ b/checks/cloud/aws/iam/remove_expired_certificates.rego
@@ -0,0 +1,39 @@
+# METADATA
+# title: Delete expired TLS certificates
+# description: |
+# Removing expired SSL/TLS certificates eliminates the risk that an invalid certificate will be
+# deployed accidentally to a resource such as AWS Elastic Load Balancer (ELB), which can
+# damage the credibility of the application/website behind the ELB. As a best practice, it is
+# recommended to delete expired certificates.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://console.aws.amazon.com/iam/
+# custom:
+# id: AVD-AWS-0168
+# avd_id: AVD-AWS-0168
+# provider: aws
+# service: iam
+# severity: LOW
+# short_code: remove-expired-certificates
+# recommended_action: Remove expired certificates
+# frameworks:
+# cis-aws-1.4:
+# - "1.19"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0168
+
+import rego.v1
+
+deny contains res if {
+ some certificate in input.aws.iam.servercertificates
+ time.parse_rfc3339_ns(certificate.expiration.value) < time.now_ns()
+
+ res := result.new("Certificate has expired", certificate)
+}
diff --git a/checks/cloud/aws/iam/remove_expired_certificates_test.rego b/checks/cloud/aws/iam/remove_expired_certificates_test.rego
new file mode 100644
index 00000000..71a42e84
--- /dev/null
+++ b/checks/cloud/aws/iam/remove_expired_certificates_test.rego
@@ -0,0 +1,19 @@
+package builtin.aws.iam.aws0168._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0168 as check
+import data.lib.datetime
+import data.lib.test
+
+test_disallow_expired_certificate if {
+ inp := {"aws": {"iam": {"servercertificates": [{"expiration": {"value": time.format(time.now_ns() - datetime.days_to_ns(10))}}]}}}
+
+ test.assert_equal_message("Certificate has expired", check.deny) with input as inp
+}
+
+test_allow_non_expired_certificate if {
+ inp := {"aws": {"iam": {"servercertificates": [{"expiration": {"value": time.format(time.now_ns() + datetime.days_to_ns(10))}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/require_lowercase_in_passwords.rego b/checks/cloud/aws/iam/require_lowercase_in_passwords.rego
new file mode 100644
index 00000000..eddbe4ba
--- /dev/null
+++ b/checks/cloud/aws/iam/require_lowercase_in_passwords.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: IAM Password policy should have requirement for at least one lowercase character.
+# description: |
+# IAM account password policies should ensure that passwords content including at least one lowercase character.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# custom:
+# id: AVD-AWS-0058
+# avd_id: AVD-AWS-0058
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: require-lowercase-in-passwords
+# recommended_action: Enforce longer, more complex passwords in the policy
+# frameworks:
+# cis-aws-1.2:
+# - "1.6"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_account_password_policy
+# good_examples: checks/cloud/aws/iam/require_lowercase_in_passwords.tf.go
+# bad_examples: checks/cloud/aws/iam/require_lowercase_in_passwords.tf.go
+package builtin.aws.iam.aws0058
+
+import rego.v1
+
+deny contains res if {
+ policy := input.aws.iam.passwordpolicy
+ not policy.requirelowercase.value
+
+ res := result.new("Password policy does not require lowercase characters", policy.requirelowercase)
+}
diff --git a/checks/cloud/aws/iam/require_lowercase_in_passwords_test.rego b/checks/cloud/aws/iam/require_lowercase_in_passwords_test.rego
new file mode 100644
index 00000000..aa2f9bd7
--- /dev/null
+++ b/checks/cloud/aws/iam/require_lowercase_in_passwords_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.iam.aws0058._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0058 as check
+import data.lib.test
+
+test_allow_policy_require_lowercase_in_passwords if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"requirelowercase": {"value": true}}}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_policy_no_require_lowercase_in_passwords if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"requirelowercase": {"value": false}}}}}
+
+ test.assert_equal_message("Password policy does not require lowercase characters", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/require_numbers_in_passwords.rego b/checks/cloud/aws/iam/require_numbers_in_passwords.rego
new file mode 100644
index 00000000..57d17b9b
--- /dev/null
+++ b/checks/cloud/aws/iam/require_numbers_in_passwords.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: IAM Password policy should have requirement for at least one number in the password.
+# description: |
+# IAM account password policies should ensure that passwords content including at least one number.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# custom:
+# id: AVD-AWS-0059
+# avd_id: AVD-AWS-0059
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: require-numbers-in-passwords
+# recommended_action: Enforce longer, more complex passwords in the policy
+# frameworks:
+# cis-aws-1.2:
+# - "1.8"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_account_password_policy
+# good_examples: checks/cloud/aws/iam/require_numbers_in_passwords.tf.go
+# bad_examples: checks/cloud/aws/iam/require_numbers_in_passwords.tf.go
+package builtin.aws.iam.aws0059
+
+import rego.v1
+
+deny contains res if {
+ policy := input.aws.iam.passwordpolicy
+ not policy.requirenumbers.value
+
+ res := result.new("Password policy does not require numbers.", policy.requirenumbers)
+}
diff --git a/checks/cloud/aws/iam/require_numbers_in_passwords_test.rego b/checks/cloud/aws/iam/require_numbers_in_passwords_test.rego
new file mode 100644
index 00000000..1127d05b
--- /dev/null
+++ b/checks/cloud/aws/iam/require_numbers_in_passwords_test.rego
@@ -0,0 +1,15 @@
+package builtin.aws.iam.aws0059._test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0059 as check
+import data.lib.test
+
+test_allow_policy_require_numbers_in_passwords if {
+ test.assert_empty(check.deny) with input.aws.iam.passwordpolicy.requirenumbers.value as true
+}
+
+test_disallow_policy_no_require_numbers_in_passwords if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"requirenumbers": {"value": false}}}}}
+ test.assert_equal_message("Password policy does not require numbers.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/require_support_role.rego b/checks/cloud/aws/iam/require_support_role.rego
new file mode 100644
index 00000000..9cc8c444
--- /dev/null
+++ b/checks/cloud/aws/iam/require_support_role.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: Missing IAM Role to allow authorized users to manage incidents with AWS Support.
+# description: |
+# By implementing least privilege for access control, an IAM Role will require an appropriate
+# IAM Policy to allow Support Center Access in order to manage Incidents with AWS Support.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://console.aws.amazon.com/iam/
+# custom:
+# id: AVD-AWS-0169
+# avd_id: AVD-AWS-0169
+# provider: aws
+# service: iam
+# severity: LOW
+# short_code: require-support-role
+# recommended_action: Create an IAM role with the necessary permissions to manage incidents with AWS Support.
+# frameworks:
+# cis-aws-1.4:
+# - "1.17"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0169
+
+import rego.v1
+
+deny contains res if {
+ some role in input.aws.iam.roles
+ not has_iam_support_role(role)
+ res := result.new("Missing IAM support role.", role)
+}
+
+has_iam_support_role(role) if {
+ some policy in role.policies
+ policy.builtin.value
+ policy.name.value == "AWSSupportAccess"
+}
diff --git a/checks/cloud/aws/iam/require_support_role_test.rego b/checks/cloud/aws/iam/require_support_role_test.rego
new file mode 100644
index 00000000..c0c1281e
--- /dev/null
+++ b/checks/cloud/aws/iam/require_support_role_test.rego
@@ -0,0 +1,39 @@
+package builtin.aws.iam.aws0169_test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0169 as check
+import data.lib.test
+
+test_disallow_no_support_role if {
+ inp := {"aws": {"iam": {"roles": [{"policies": [{
+ "name": {"value": "roleName"},
+ "builtin": {"value": true},
+ }]}]}}}
+
+ test.assert_equal_message("Missing IAM support role.", check.deny) with input as inp
+}
+
+test_disallow_non_built_in_support_role if {
+ inp := {"aws": {"iam": {"roles": [{"policies": [{
+ "name": {"value": "AWSSupportAccess"},
+ "builtin": {"value": false},
+ }]}]}}}
+
+ test.assert_equal_message("Missing IAM support role.", check.deny) with input as inp
+}
+
+test_allow_has_support_role if {
+ inp := {"aws": {"iam": {"roles": [{"policies": [
+ {
+ "name": {"value": "AWSSupplyChainFederationAdminAccess"},
+ "builtin": {"value": true},
+ },
+ {
+ "name": {"value": "AWSSupportAccess"},
+ "builtin": {"value": true},
+ },
+ ]}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/require_symbols_in_passwords.rego b/checks/cloud/aws/iam/require_symbols_in_passwords.rego
new file mode 100644
index 00000000..e1f75218
--- /dev/null
+++ b/checks/cloud/aws/iam/require_symbols_in_passwords.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: IAM Password policy should have requirement for at least one symbol in the password.
+# description: |
+# IAM account password policies should ensure that passwords content including a symbol.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# custom:
+# id: AVD-AWS-0060
+# avd_id: AVD-AWS-0060
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: require-symbols-in-passwords
+# recommended_action: Enforce longer, more complex passwords in the policy
+# frameworks:
+# cis-aws-1.2:
+# - "1.7"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_account_password_policy
+# good_examples: checks/cloud/aws/iam/require_symbols_in_passwords.tf.go
+# bad_examples: checks/cloud/aws/iam/require_symbols_in_passwords.tf.go
+package builtin.aws.iam.aws0060
+
+import rego.v1
+
+deny contains res if {
+ policy := input.aws.iam.passwordpolicy
+ not policy.requiresymbols.value
+
+ res := result.new("Password policy does not require symbols.", policy.requiresymbols)
+}
diff --git a/checks/cloud/aws/iam/require_symbols_in_passwords_test.rego b/checks/cloud/aws/iam/require_symbols_in_passwords_test.rego
new file mode 100644
index 00000000..0ed8c646
--- /dev/null
+++ b/checks/cloud/aws/iam/require_symbols_in_passwords_test.rego
@@ -0,0 +1,15 @@
+package builtin.aws.iam.aws0060_test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0060 as check
+import data.lib.test
+
+test_allow_policy_require_symbols_in_passwords if {
+ test.assert_empty(check.deny) with input.aws.iam.passwordpolicy.requiresymbols.value as true
+}
+
+test_disallow_policy_no_require_symbols_in_passwords if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"requiresymbols": {"value": false}}}}}
+ test.assert_equal_message("Password policy does not require symbols.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/require_uppercase_in_passwords.rego b/checks/cloud/aws/iam/require_uppercase_in_passwords.rego
new file mode 100644
index 00000000..dffd6c5b
--- /dev/null
+++ b/checks/cloud/aws/iam/require_uppercase_in_passwords.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: IAM Password policy should have requirement for at least one uppercase character.
+# description: |
+# ,
+# IAM account password policies should ensure that passwords content including at least one uppercase character.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# custom:
+# id: AVD-AWS-0061
+# avd_id: AVD-AWS-0061
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: require-uppercase-in-passwords
+# recommended_action: Enforce longer, more complex passwords in the policy
+# frameworks:
+# cis-aws-1.2:
+# - "1.5"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_account_password_policy
+# good_examples: checks/cloud/aws/iam/require_uppercase_in_passwords.tf.go
+# bad_examples: checks/cloud/aws/iam/require_uppercase_in_passwords.tf.go
+package builtin.aws.iam.aws0061
+
+import rego.v1
+
+deny contains res if {
+ policy := input.aws.iam.passwordpolicy
+ not policy.requireuppercase.value
+
+ res := result.new("Password policy does not require uppercase characters.", policy.requireuppercase)
+}
diff --git a/checks/cloud/aws/iam/require_uppercase_in_passwords_test.rego b/checks/cloud/aws/iam/require_uppercase_in_passwords_test.rego
new file mode 100644
index 00000000..cab23561
--- /dev/null
+++ b/checks/cloud/aws/iam/require_uppercase_in_passwords_test.rego
@@ -0,0 +1,15 @@
+package builtin.aws.iam.aws0061_test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0061 as check
+import data.lib.test
+
+test_allow_policy_require_uppercase_in_passwords if {
+ test.assert_empty(check.deny) with input.aws.iam.passwordpolicy.requireuppercase.value as true
+}
+
+test_disallow_policy_no_require_uppercase_in_passwords if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"requireuppercase": {"value": false}}}}}
+ test.assert_equal_message("Password policy does not require uppercase characters.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/rotate_access_keys.rego b/checks/cloud/aws/iam/rotate_access_keys.rego
new file mode 100644
index 00000000..f424cf8d
--- /dev/null
+++ b/checks/cloud/aws/iam/rotate_access_keys.rego
@@ -0,0 +1,47 @@
+# METADATA
+# title: Access keys should be rotated at least every 90 days
+# description: |
+# Regularly rotating your IAM credentials helps prevent a compromised set of IAM access keys from accessing components in your AWS account.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/prescriptive-guidance/latest/patterns/automatically-rotate-iam-user-access-keys-at-scale-with-aws-organizations-and-aws-secrets-manager.html
+# custom:
+# id: AVD-AWS-0146
+# avd_id: AVD-AWS-0146
+# provider: aws
+# service: iam
+# severity: LOW
+# short_code: rotate-access-keys
+# recommended_action: Rotate keys every 90 days or less
+# frameworks:
+# cis-aws-1.2:
+# - "1.4"
+# cis-aws-1.4:
+# - "1.14"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+package builtin.aws.iam.aws0146
+
+import data.lib.datetime
+import rego.v1
+
+deny contains res if {
+ some user in input.aws.iam.users
+
+ some key in user.accesskeys
+ key.active.value
+
+ ns := time.parse_rfc3339_ns(key.creationdate.value)
+ diff := time.now_ns() - ns
+ diff > datetime.days_to_ns(90)
+ days := ceil((diff - datetime.days_to_ns(90)) / datetime.ns_in_day)
+
+ msg := sprintf("User access key %q should have been rotated %d day(s) ago", [key.accesskeyid.value, days])
+ res := result.new(msg, user)
+}
diff --git a/checks/cloud/aws/iam/rotate_access_keys_test.rego b/checks/cloud/aws/iam/rotate_access_keys_test.rego
new file mode 100644
index 00000000..53136a9b
--- /dev/null
+++ b/checks/cloud/aws/iam/rotate_access_keys_test.rego
@@ -0,0 +1,25 @@
+package builtin.aws.iam.aws0146_test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0146 as check
+import data.lib.datetime
+import data.lib.test
+
+test_allow_access_key_created_within_90_days if {
+ inp := {"aws": {"iam": {"users": [{"accesskeys": [{
+ "creationdate": {"value": time.format(time.now_ns() - datetime.days_to_ns(10))},
+ "accesskeyid": {"value": "keyid"},
+ "active": {"value": true},
+ }]}]}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_access_key_created_more_than_90_days_ago if {
+ inp := {"aws": {"iam": {"users": [{"accesskeys": [{
+ "creationdate": {"value": time.format(time.now_ns() - datetime.days_to_ns(100))},
+ "accesskeyid": {"value": "keyid"},
+ "active": {"value": true},
+ }]}]}}}
+ test.assert_equal_message(`User access key "keyid" should have been rotated 10 day(s) ago`, check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/set_max_password_age.rego b/checks/cloud/aws/iam/set_max_password_age.rego
new file mode 100644
index 00000000..e6829f2c
--- /dev/null
+++ b/checks/cloud/aws/iam/set_max_password_age.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: IAM Password policy should have expiry less than or equal to 90 days.
+# description: |
+# IAM account password policies should have a maximum age specified.
+# The account password policy should be set to expire passwords after 90 days or less.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# custom:
+# id: AVD-AWS-0062
+# avd_id: AVD-AWS-0062
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: set-max-password-age
+# recommended_action: Limit the password duration with an expiry in the policy
+# frameworks:
+# cis-aws-1.2:
+# - "1.11"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_account_password_policy
+# good_examples: checks/cloud/aws/iam/set_max_password_age.tf.go
+# bad_examples: checks/cloud/aws/iam/set_max_password_age.tf.go
+package builtin.aws.iam.aws0062
+
+import rego.v1
+
+deny contains res if {
+ policy := input.aws.iam.passwordpolicy
+ policy.maxagedays.value < 90
+ res := result.new("Password policy allows a maximum password age of greater than 90 days.", policy.maxagedays)
+}
diff --git a/checks/cloud/aws/iam/set_max_password_age_test.rego b/checks/cloud/aws/iam/set_max_password_age_test.rego
new file mode 100644
index 00000000..96c334e7
--- /dev/null
+++ b/checks/cloud/aws/iam/set_max_password_age_test.rego
@@ -0,0 +1,16 @@
+package builtin.aws.iam.aws0062_test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0062 as check
+import data.lib.test
+
+test_allow_password_with_max_age_days_over_90 if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"maxagedays": {"value": 91}}}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_password_with_max_age_days_within_90 if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"maxagedays": {"value": 60}}}}}
+ test.assert_equal_message("Password policy allows a maximum password age of greater than 90 days.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/iam/set_minimum_password_length.rego b/checks/cloud/aws/iam/set_minimum_password_length.rego
new file mode 100644
index 00000000..b13036d8
--- /dev/null
+++ b/checks/cloud/aws/iam/set_minimum_password_length.rego
@@ -0,0 +1,45 @@
+# METADATA
+# title: IAM Password policy should have minimum password length of 14 or more characters.
+# description: |
+# IAM account password policies should ensure that passwords have a minimum length.
+# The account password policy should be set to enforce minimum password length of at least 14 characters.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html#password-policy-details
+# custom:
+# id: AVD-AWS-0063
+# avd_id: AVD-AWS-0063
+# provider: aws
+# service: iam
+# severity: MEDIUM
+# short_code: set-minimum-password-length
+# recommended_action: Enforce longer, more complex passwords in the policy
+# frameworks:
+# cis-aws-1.2:
+# - "1.9"
+# cis-aws-1.4:
+# - "1.8"
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: iam
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_account_password_policy
+# good_examples: checks/cloud/aws/iam/set_minimum_password_length.tf.go
+# bad_examples: checks/cloud/aws/iam/set_minimum_password_length.tf.go
+package builtin.aws.iam.aws0063
+
+import rego.v1
+
+msg := "Password policy allows a maximum password age of greater than 90 days"
+
+deny contains res if {
+ policy := input.aws.iam.passwordpolicy
+ policy.minimumlength.value < 14
+ res := result.new("Password policy allows a maximum password age of greater than 90 days", policy.minimumlength)
+}
diff --git a/checks/cloud/aws/iam/set_minimum_password_length_test.rego b/checks/cloud/aws/iam/set_minimum_password_length_test.rego
new file mode 100644
index 00000000..053ca4e5
--- /dev/null
+++ b/checks/cloud/aws/iam/set_minimum_password_length_test.rego
@@ -0,0 +1,16 @@
+package builtin.aws.iam.aws0063_test
+
+import rego.v1
+
+import data.builtin.aws.iam.aws0063 as check
+import data.lib.test
+
+test_allow_password_length_over_14 if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"minimumlength": {"value": 15}}}}}
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_disallow_password_length_under_14 if {
+ inp := {"aws": {"iam": {"passwordpolicy": {"minimumlength": {"value": 13}}}}}
+ test.assert_equal_message("Password policy allows a maximum password age of greater than 90 days", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/kinesis/enable_in_transit_encryption.go b/checks/cloud/aws/kinesis/enable_in_transit_encryption.go
deleted file mode 100755
index d03bda84..00000000
--- a/checks/cloud/aws/kinesis/enable_in_transit_encryption.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package kinesis
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/kinesis"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableInTransitEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0064",
- Provider: providers.AWSProvider,
- Service: "kinesis",
- ShortCode: "enable-in-transit-encryption",
- Summary: "Kinesis stream is unencrypted.",
- Impact: "Intercepted data can be read in transit",
- Resolution: "Enable in transit encryption",
- Explanation: `Kinesis streams should be encrypted to ensure sensitive data is kept private. Additionally, non-default KMS keys should be used so granularity of access control can be ensured.`,
- Links: []string{
- "https://docs.aws.amazon.com/streams/latest/dev/server-side-encryption.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableInTransitEncryptionGoodExamples,
- BadExamples: terraformEnableInTransitEncryptionBadExamples,
- Links: terraformEnableInTransitEncryptionLinks,
- RemediationMarkdown: terraformEnableInTransitEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableInTransitEncryptionGoodExamples,
- BadExamples: cloudFormationEnableInTransitEncryptionBadExamples,
- Links: cloudFormationEnableInTransitEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableInTransitEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, stream := range s.AWS.Kinesis.Streams {
- if stream.Encryption.Type.NotEqualTo(kinesis.EncryptionTypeKMS) {
- results.Add(
- "Stream does not use KMS encryption.",
- stream.Encryption.Type,
- )
- } else if stream.Encryption.KMSKeyID.IsEmpty() {
- results.Add(
- "Stream does not use a custom-managed KMS key.",
- stream.Encryption.KMSKeyID,
- )
- } else {
- results.AddPassed(&stream)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/kinesis/enable_in_transit_encryption.rego b/checks/cloud/aws/kinesis/enable_in_transit_encryption.rego
new file mode 100644
index 00000000..54e7d4d0
--- /dev/null
+++ b/checks/cloud/aws/kinesis/enable_in_transit_encryption.rego
@@ -0,0 +1,47 @@
+# METADATA
+# title: Kinesis stream is unencrypted.
+# description: |
+# Kinesis streams should be encrypted to ensure sensitive data is kept private. Additionally, non-default KMS keys should be used so granularity of access control can be ensured.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/streams/latest/dev/server-side-encryption.html
+# custom:
+# id: AVD-AWS-0064
+# avd_id: AVD-AWS-0064
+# provider: aws
+# service: kinesis
+# severity: HIGH
+# short_code: enable-in-transit-encryption
+# recommended_action: Enable in transit encryption
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: kinesis
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kinesis_stream#encryption_type
+# good_examples: checks/cloud/aws/kinesis/enable_in_transit_encryption.tf.go
+# bad_examples: checks/cloud/aws/kinesis/enable_in_transit_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/kinesis/enable_in_transit_encryption.cf.go
+# bad_examples: checks/cloud/aws/kinesis/enable_in_transit_encryption.cf.go
+package builtin.aws.kinesis.aws0064
+
+import rego.v1
+
+deny contains res if {
+ some stream in input.aws.kinesis.streams
+ stream.encryption.type.value != "KMS"
+ res := result.new("Stream does not use KMS encryption.", stream.encryption.type)
+}
+
+deny contains res if {
+ some stream in input.aws.kinesis.streams
+ stream.encryption.type.value == "KMS"
+ stream.encryption.kmskeyid.value == ""
+ res := result.new("Stream does not use a custom-managed KMS key.", stream.encryption.kmskeyid)
+}
diff --git a/checks/cloud/aws/kinesis/enable_in_transit_encryption_test.go b/checks/cloud/aws/kinesis/enable_in_transit_encryption_test.go
deleted file mode 100644
index bfc5cdcc..00000000
--- a/checks/cloud/aws/kinesis/enable_in_transit_encryption_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package kinesis
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/kinesis"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableInTransitEncryption(t *testing.T) {
- tests := []struct {
- name string
- input kinesis.Kinesis
- expected bool
- }{
- {
- name: "AWS Kinesis Stream with no encryption",
- input: kinesis.Kinesis{
- Streams: []kinesis.Stream{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: kinesis.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String("NONE", trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("some-key", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS Kinesis Stream with KMS encryption but no key",
- input: kinesis.Kinesis{
- Streams: []kinesis.Stream{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: kinesis.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(kinesis.EncryptionTypeKMS, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS Kinesis Stream with KMS encryption and key",
- input: kinesis.Kinesis{
- Streams: []kinesis.Stream{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: kinesis.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Type: trivyTypes.String(kinesis.EncryptionTypeKMS, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("some-key", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Kinesis = test.input
- results := CheckEnableInTransitEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableInTransitEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/kinesis/enable_in_transit_encryption_test.rego b/checks/cloud/aws/kinesis/enable_in_transit_encryption_test.rego
new file mode 100644
index 00000000..b971c65b
--- /dev/null
+++ b/checks/cloud/aws/kinesis/enable_in_transit_encryption_test.rego
@@ -0,0 +1,42 @@
+package builtin.aws.kinesis.aws0064_test
+
+import rego.v1
+
+import data.builtin.aws.kinesis.aws0064 as check
+import data.lib.test
+
+test_deny_unecrypted if {
+ inp := {"aws": {"kinesis": {"streams": [{
+ "name": "test",
+ "encryption": {
+ "type": {"value": ""},
+ "kmskeyid": {"value": ""},
+ },
+ }]}}}
+
+ test.assert_equal_message("Stream does not use KMS encryption.", check.deny) with input as inp
+}
+
+test_deny_with_kms_but_without_key if {
+ inp := {"aws": {"kinesis": {"streams": [{
+ "name": "test",
+ "encryption": {
+ "type": {"value": "KMS"},
+ "kmskeyid": {"value": ""},
+ },
+ }]}}}
+
+ test.assert_equal_message("Stream does not use a custom-managed KMS key.", check.deny) with input as inp
+}
+
+test_allow_encrypted if {
+ inp := {"aws": {"kinesis": {"streams": [{
+ "name": "test",
+ "encryption": {
+ "type": {"value": "KMS"},
+ "kmskeyid": {"value": "test"},
+ },
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/kinesis/kinesis.go b/checks/cloud/aws/kinesis/kinesis.go
new file mode 100644
index 00000000..1dbca1da
--- /dev/null
+++ b/checks/cloud/aws/kinesis/kinesis.go
@@ -0,0 +1 @@
+package kinesis
diff --git a/checks/cloud/aws/kms/auto_rotate_keys.go b/checks/cloud/aws/kms/auto_rotate_keys.go
deleted file mode 100755
index aab0a496..00000000
--- a/checks/cloud/aws/kms/auto_rotate_keys.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package kms
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/kms"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckAutoRotateKeys = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0065",
- Provider: providers.AWSProvider,
- Service: "kms",
- ShortCode: "auto-rotate-keys",
- Summary: "A KMS key is not configured to auto-rotate.",
- Impact: "Long life KMS keys increase the attack surface when compromised",
- Resolution: "Configure KMS key to auto rotate",
- Explanation: `You should configure your KMS keys to auto rotate to maintain security and defend against compromise.`,
- Links: []string{
- "https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformAutoRotateKeysGoodExamples,
- BadExamples: terraformAutoRotateKeysBadExamples,
- Links: terraformAutoRotateKeysLinks,
- RemediationMarkdown: terraformAutoRotateKeysRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, key := range s.AWS.KMS.Keys {
- if key.Usage.EqualTo(kms.KeyUsageSignAndVerify) {
- continue
- }
- if key.RotationEnabled.IsFalse() {
- results.Add(
- "Key does not have rotation enabled.",
- key.RotationEnabled,
- )
- } else {
- results.AddPassed(&key)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/kms/auto_rotate_keys.rego b/checks/cloud/aws/kms/auto_rotate_keys.rego
new file mode 100644
index 00000000..59bd62df
--- /dev/null
+++ b/checks/cloud/aws/kms/auto_rotate_keys.rego
@@ -0,0 +1,38 @@
+# METADATA
+# title: A KMS key is not configured to auto-rotate.
+# description: |
+# You should configure your KMS keys to auto rotate to maintain security and defend against compromise.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html
+# custom:
+# id: AVD-AWS-0065
+# avd_id: AVD-AWS-0065
+# provider: aws
+# service: kms
+# severity: MEDIUM
+# short_code: auto-rotate-keys
+# recommended_action: Configure KMS key to auto rotate
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: kms
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key#enable_key_rotation
+# good_examples: checks/cloud/aws/kms/auto_rotate_keys.tf.go
+# bad_examples: checks/cloud/aws/kms/auto_rotate_keys.tf.go
+package builtin.aws.kms.aws0065
+
+import rego.v1
+
+deny contains res if {
+ some key in input.aws.kms.keys
+ key.usage.value != "SIGN_VERIFY"
+ key.rotationenabled.value == false
+ res := result.new("Key does not have rotation enabled.", key.rotationenabled)
+}
diff --git a/checks/cloud/aws/kms/auto_rotate_keys_test.go b/checks/cloud/aws/kms/auto_rotate_keys_test.go
deleted file mode 100644
index 35ccaf40..00000000
--- a/checks/cloud/aws/kms/auto_rotate_keys_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package kms
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/kms"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckAutoRotateKeys(t *testing.T) {
- tests := []struct {
- name string
- input kms.KMS
- expected bool
- }{
- {
- name: "ENCRYPT_DECRYPT KMS Key with auto-rotation disabled",
- input: kms.KMS{
- Keys: []kms.Key{
- {
- Usage: trivyTypes.String("ENCRYPT_DECRYPT", trivyTypes.NewTestMetadata()),
- RotationEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "ENCRYPT_DECRYPT KMS Key with auto-rotation enabled",
- input: kms.KMS{
- Keys: []kms.Key{
- {
- Usage: trivyTypes.String("ENCRYPT_DECRYPT", trivyTypes.NewTestMetadata()),
- RotationEnabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- {
- name: "SIGN_VERIFY KMS Key with auto-rotation disabled",
- input: kms.KMS{
- Keys: []kms.Key{
- {
- Usage: trivyTypes.String(kms.KeyUsageSignAndVerify, trivyTypes.NewTestMetadata()),
- RotationEnabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.KMS = test.input
- results := CheckAutoRotateKeys.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckAutoRotateKeys.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/kms/auto_rotate_keys_test.rego b/checks/cloud/aws/kms/auto_rotate_keys_test.rego
new file mode 100644
index 00000000..def58bfa
--- /dev/null
+++ b/checks/cloud/aws/kms/auto_rotate_keys_test.rego
@@ -0,0 +1,33 @@
+package builtin.aws.kms.aws0065_test
+
+import rego.v1
+
+import data.builtin.aws.kms.aws0065 as check
+import data.lib.test
+
+test_allow_sign_verify_key_without_autorotate if {
+ inp := {"aws": {"kms": {"keys": [{
+ "usage": {"value": "SIGN_VERIFY"},
+ "rotationenabled": {"value": false},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_encrypt_decrypt_key_with_autorotate if {
+ inp := {"aws": {"kms": {"keys": [{
+ "usage": {"value": "ENCRYPT_DECRYPT"},
+ "rotationenabled": {"value": true},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_encrypt_decrypt_key_without_autorotate if {
+ inp := {"aws": {"kms": {"keys": [{
+ "usage": {"value": "ENCRYPT_DECRYPT"},
+ "rotationenabled": {"value": false},
+ }]}}}
+
+ test.assert_equal_message("Key does not have rotation enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/kms/kms.go b/checks/cloud/aws/kms/kms.go
new file mode 100644
index 00000000..cfe5e2b4
--- /dev/null
+++ b/checks/cloud/aws/kms/kms.go
@@ -0,0 +1 @@
+package kms
diff --git a/checks/cloud/aws/lambda/enable_tracing.go b/checks/cloud/aws/lambda/enable_tracing.go
deleted file mode 100755
index ab1bd754..00000000
--- a/checks/cloud/aws/lambda/enable_tracing.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package lambda
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/lambda"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableTracing = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0066",
- Provider: providers.AWSProvider,
- Service: "lambda",
- ShortCode: "enable-tracing",
- Summary: "Lambda functions should have X-Ray tracing enabled",
- Impact: "Without full tracing enabled it is difficult to trace the flow of logs",
- Resolution: "Enable tracing",
- Explanation: `X-Ray tracing enables end-to-end debugging and analysis of all function activity. This will allow for identifying bottlenecks, slow downs and timeouts.`,
- Links: []string{
- "https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableTracingGoodExamples,
- BadExamples: terraformEnableTracingBadExamples,
- Links: terraformEnableTracingLinks,
- RemediationMarkdown: terraformEnableTracingRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableTracingGoodExamples,
- BadExamples: cloudFormationEnableTracingBadExamples,
- Links: cloudFormationEnableTracingLinks,
- RemediationMarkdown: cloudFormationEnableTracingRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, function := range s.AWS.Lambda.Functions {
- if function.Metadata.IsUnmanaged() {
- continue
- }
- if function.Tracing.Mode.NotEqualTo(lambda.TracingModeActive) {
- results.Add(
- "Function does not have tracing enabled.",
- function.Tracing.Mode,
- )
- } else {
- results.AddPassed(&function)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/lambda/enable_tracing.rego b/checks/cloud/aws/lambda/enable_tracing.rego
new file mode 100644
index 00000000..64c23080
--- /dev/null
+++ b/checks/cloud/aws/lambda/enable_tracing.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: Lambda functions should have X-Ray tracing enabled
+# description: |
+# X-Ray tracing enables end-to-end debugging and analysis of all function activity. This will allow for identifying bottlenecks, slow downs and timeouts.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html
+# custom:
+# id: AVD-AWS-0066
+# avd_id: AVD-AWS-0066
+# provider: aws
+# service: lambda
+# severity: LOW
+# short_code: enable-tracing
+# recommended_action: Enable tracing
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: lambda
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function#mode
+# good_examples: checks/cloud/aws/lambda/enable_tracing.tf.go
+# bad_examples: checks/cloud/aws/lambda/enable_tracing.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/lambda/enable_tracing.cf.go
+# bad_examples: checks/cloud/aws/lambda/enable_tracing.cf.go
+package builtin.aws.lambda.aws0066
+
+import rego.v1
+
+deny contains res if {
+ some func in input.aws.lambda.functions
+ func.tracing.mode.value != "Active"
+ res := result.new("Function does not have tracing enabled.", func.tracing.mode)
+}
diff --git a/checks/cloud/aws/lambda/enable_tracing_test.go b/checks/cloud/aws/lambda/enable_tracing_test.go
deleted file mode 100644
index e9a504a7..00000000
--- a/checks/cloud/aws/lambda/enable_tracing_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package lambda
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/lambda"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableTracing(t *testing.T) {
- tests := []struct {
- name string
- input lambda.Lambda
- expected bool
- }{
- {
- name: "Lambda function with no tracing mode specified",
- input: lambda.Lambda{
- Functions: []lambda.Function{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Tracing: lambda.Tracing{
- Metadata: trivyTypes.NewTestMetadata(),
- Mode: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Lambda function with active tracing mode",
- input: lambda.Lambda{
- Functions: []lambda.Function{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Tracing: lambda.Tracing{
- Metadata: trivyTypes.NewTestMetadata(),
- Mode: trivyTypes.String(lambda.TracingModeActive, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Lambda = test.input
- results := CheckEnableTracing.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableTracing.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/lambda/enable_tracing_test.rego b/checks/cloud/aws/lambda/enable_tracing_test.rego
new file mode 100644
index 00000000..86035729
--- /dev/null
+++ b/checks/cloud/aws/lambda/enable_tracing_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.lambda.aws0066_test
+
+import rego.v1
+
+import data.builtin.aws.lambda.aws0066 as check
+import data.lib.test
+
+test_allow_with_active_tracing_mode if {
+ inp := {"aws": {"lambda": {"functions": [{"tracing": {"mode": {"value": "Active"}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_without_active_tracing_mode if {
+ inp := {"aws": {"lambda": {"functions": [{"tracing": {"mode": {"value": "PassThrough"}}}]}}}
+
+ test.assert_equal_message("Function does not have tracing enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/lambda/restrict_source_arn.go b/checks/cloud/aws/lambda/restrict_source_arn.go
deleted file mode 100755
index 10bb8e0f..00000000
--- a/checks/cloud/aws/lambda/restrict_source_arn.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package lambda
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckRestrictSourceArn = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0067",
- Provider: providers.AWSProvider,
- Service: "lambda",
- ShortCode: "restrict-source-arn",
- Summary: "Ensure that lambda function permission has a source arn specified",
- Impact: "Not providing the source ARN allows any resource from principal, even from other accounts",
- Resolution: "Always provide a source arn for Lambda permissions",
- Explanation: `When the principal is an AWS service, the ARN of the specific resource within that service to grant permission to.
-
-Without this, any resource from principal will be granted permission – even if that resource is from another account.
-
-For S3, this should be the ARN of the S3 Bucket. For CloudWatch Events, this should be the ARN of the CloudWatch Events Rule. For API Gateway, this should be the ARN of the API`,
- Links: []string{
- "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-permission.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformRestrictSourceArnGoodExamples,
- BadExamples: terraformRestrictSourceArnBadExamples,
- Links: terraformRestrictSourceArnLinks,
- RemediationMarkdown: terraformRestrictSourceArnRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationRestrictSourceArnGoodExamples,
- BadExamples: cloudFormationRestrictSourceArnBadExamples,
- Links: cloudFormationRestrictSourceArnLinks,
- RemediationMarkdown: cloudFormationRestrictSourceArnRemediationMarkdown,
- },
- Severity: severity.Critical,
- },
- func(s *state.State) (results scan.Results) {
- for _, function := range s.AWS.Lambda.Functions {
- for _, permission := range function.Permissions {
- if !permission.Principal.EndsWith(".amazonaws.com") {
- continue
- }
- if permission.SourceARN.IsEmpty() {
- results.Add(
- "Lambda permission lacks source ARN for *.amazonaws.com principal.",
- permission.SourceARN,
- )
- } else {
- results.AddPassed(&function)
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/lambda/restrict_source_arn.rego b/checks/cloud/aws/lambda/restrict_source_arn.rego
new file mode 100644
index 00000000..13f383dc
--- /dev/null
+++ b/checks/cloud/aws/lambda/restrict_source_arn.rego
@@ -0,0 +1,44 @@
+# METADATA
+# title: Ensure that lambda function permission has a source arn specified
+# description: |
+# When the principal is an AWS service, the ARN of the specific resource within that service to grant permission to.
+# Without this, any resource from principal will be granted permission – even if that resource is from another account.
+# For S3, this should be the ARN of the S3 Bucket. For CloudWatch Events, this should be the ARN of the CloudWatch Events Rule. For API Gateway, this should be the ARN of the API
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-permission.html
+# custom:
+# id: AVD-AWS-0067
+# avd_id: AVD-AWS-0067
+# provider: aws
+# service: lambda
+# severity: CRITICAL
+# short_code: restrict-source-arn
+# recommended_action: Always provide a source arn for Lambda permissions
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: lambda
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission
+# good_examples: checks/cloud/aws/lambda/restrict_source_arn.tf.go
+# bad_examples: checks/cloud/aws/lambda/restrict_source_arn.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/lambda/restrict_source_arn.cf.go
+# bad_examples: checks/cloud/aws/lambda/restrict_source_arn.cf.go
+package builtin.aws.lambda.aws0067
+
+import rego.v1
+
+deny contains res if {
+ some func in input.aws.lambda.functions
+ some permission in func.permissions
+ endswith(permission.principal.value, ".amazonaws.com")
+ permission.sourcearn.value == ""
+ res := result.new("Lambda permission lacks source ARN for *.amazonaws.com principal.", permission.sourcearn)
+}
diff --git a/checks/cloud/aws/lambda/restrict_source_arn_test.go b/checks/cloud/aws/lambda/restrict_source_arn_test.go
deleted file mode 100644
index cd18eeb0..00000000
--- a/checks/cloud/aws/lambda/restrict_source_arn_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package lambda
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/lambda"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckRestrictSourceArn(t *testing.T) {
- tests := []struct {
- name string
- input lambda.Lambda
- expected bool
- }{
- {
- name: "Lambda function permission missing source ARN",
- input: lambda.Lambda{
- Functions: []lambda.Function{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Permissions: []lambda.Permission{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Principal: trivyTypes.String("sns.amazonaws.com", trivyTypes.NewTestMetadata()),
- SourceARN: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "Lambda function permission with source ARN",
- input: lambda.Lambda{
- Functions: []lambda.Function{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Permissions: []lambda.Permission{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Principal: trivyTypes.String("sns.amazonaws.com", trivyTypes.NewTestMetadata()),
- SourceARN: trivyTypes.String("source-arn", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.Lambda = test.input
- results := CheckRestrictSourceArn.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckRestrictSourceArn.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/lambda/restrict_source_arn_test.rego b/checks/cloud/aws/lambda/restrict_source_arn_test.rego
new file mode 100644
index 00000000..932841af
--- /dev/null
+++ b/checks/cloud/aws/lambda/restrict_source_arn_test.rego
@@ -0,0 +1,24 @@
+package builtin.aws.lambda.aws0067_test
+
+import rego.v1
+
+import data.builtin.aws.lambda.aws0067 as check
+import data.lib.test
+
+test_allow_with_arn if {
+ inp := {"aws": {"lambda": {"functions": [{"permissions": [{
+ "principal": {"value": "sns.amazonaws.com"},
+ "sourcearn": {"value": "arn"},
+ }]}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_without_arn if {
+ inp := {"aws": {"lambda": {"functions": [{"permissions": [{
+ "principal": {"value": "sns.amazonaws.com"},
+ "sourcearn": {"value": ""},
+ }]}]}}}
+
+ test.assert_equal_message("Lambda permission lacks source ARN for *.amazonaws.com principal.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/mq/enable_audit_logging.go b/checks/cloud/aws/mq/enable_audit_logging.go
deleted file mode 100755
index 8ee0420d..00000000
--- a/checks/cloud/aws/mq/enable_audit_logging.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package mq
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableAuditLogging = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0070",
- Provider: providers.AWSProvider,
- Service: "mq",
- ShortCode: "enable-audit-logging",
- Summary: "MQ Broker should have audit logging enabled",
- Impact: "Without audit logging it is difficult to trace activity in the MQ broker",
- Resolution: "Enable audit logging",
- Explanation: `Logging should be enabled to allow tracing of issues and activity to be investigated more fully. Logs provide additional information and context which is often invalauble during investigation`,
- Links: []string{
- "https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/configure-logging-monitoring-activemq.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableAuditLoggingGoodExamples,
- BadExamples: terraformEnableAuditLoggingBadExamples,
- Links: terraformEnableAuditLoggingLinks,
- RemediationMarkdown: terraformEnableAuditLoggingRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableAuditLoggingGoodExamples,
- BadExamples: cloudFormationEnableAuditLoggingBadExamples,
- Links: cloudFormationEnableAuditLoggingLinks,
- RemediationMarkdown: cloudFormationEnableAuditLoggingRemediationMarkdown,
- },
- Severity: severity.Medium,
- },
- func(s *state.State) (results scan.Results) {
- for _, broker := range s.AWS.MQ.Brokers {
- if broker.Logging.Audit.IsFalse() {
- results.Add(
- "Broker does not have audit logging enabled.",
- broker.Logging.Audit,
- )
- } else {
- results.AddPassed(&broker)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/mq/enable_audit_logging.rego b/checks/cloud/aws/mq/enable_audit_logging.rego
new file mode 100644
index 00000000..2d4a5da6
--- /dev/null
+++ b/checks/cloud/aws/mq/enable_audit_logging.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: MQ Broker should have audit logging enabled
+# description: |
+# Logging should be enabled to allow tracing of issues and activity to be investigated more fully. Logs provide additional information and context which is often invalauble during investigation
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/configure-logging-monitoring-activemq.html
+# custom:
+# id: AVD-AWS-0070
+# avd_id: AVD-AWS-0070
+# provider: aws
+# service: mq
+# severity: MEDIUM
+# short_code: enable-audit-logging
+# recommended_action: Enable audit logging
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: mq
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/mq_broker#audit
+# good_examples: checks/cloud/aws/mq/enable_audit_logging.tf.go
+# bad_examples: checks/cloud/aws/mq/enable_audit_logging.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/mq/enable_audit_logging.cf.go
+# bad_examples: checks/cloud/aws/mq/enable_audit_logging.cf.go
+package builtin.aws.mq.aws0070
+
+import rego.v1
+
+deny contains res if {
+ some broker in input.aws.mq.brokers
+ broker.logging.audit.value == false
+
+ res := result.new("Broker does not have audit logging enabled.", broker.logging.audit)
+}
diff --git a/checks/cloud/aws/mq/enable_audit_logging_test.go b/checks/cloud/aws/mq/enable_audit_logging_test.go
deleted file mode 100644
index 2dfd1033..00000000
--- a/checks/cloud/aws/mq/enable_audit_logging_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package mq
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/mq"
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableAuditLogging(t *testing.T) {
- tests := []struct {
- name string
- input mq.MQ
- expected bool
- }{
- {
- name: "AWS MQ Broker without audit logging",
- input: mq.MQ{
- Brokers: []mq.Broker{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Logging: mq.Logging{
- Metadata: trivyTypes.NewTestMetadata(),
- Audit: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS MQ Broker with audit logging",
- input: mq.MQ{
- Brokers: []mq.Broker{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Logging: mq.Logging{
- Metadata: trivyTypes.NewTestMetadata(),
- Audit: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.MQ = test.input
- results := CheckEnableAuditLogging.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableAuditLogging.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/mq/enable_audit_logging_test.rego b/checks/cloud/aws/mq/enable_audit_logging_test.rego
new file mode 100644
index 00000000..e300098f
--- /dev/null
+++ b/checks/cloud/aws/mq/enable_audit_logging_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.mq.aws0070_test
+
+import rego.v1
+
+import data.builtin.aws.mq.aws0070 as check
+import data.lib.test
+
+test_allow_with_audit_logging if {
+ inp := {"aws": {"mq": {"brokers": [{"logging": {"audit": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_without_audit_logging if {
+ inp := {"aws": {"mq": {"brokers": [{"logging": {"audit": {"value": false}}}]}}}
+
+ test.assert_equal_message("Broker does not have audit logging enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/mq/enable_general_logging.go b/checks/cloud/aws/mq/enable_general_logging.go
deleted file mode 100755
index 3d1ee852..00000000
--- a/checks/cloud/aws/mq/enable_general_logging.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package mq
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableGeneralLogging = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0071",
- Provider: providers.AWSProvider,
- Service: "mq",
- ShortCode: "enable-general-logging",
- Summary: "MQ Broker should have general logging enabled",
- Impact: "Without logging it is difficult to trace issues",
- Resolution: "Enable general logging",
- Explanation: `Logging should be enabled to allow tracing of issues and activity to be investigated more fully. Logs provide additional information and context which is often invalauble during investigation`,
- Links: []string{
- "https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/configure-logging-monitoring-activemq.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableGeneralLoggingGoodExamples,
- BadExamples: terraformEnableGeneralLoggingBadExamples,
- Links: terraformEnableGeneralLoggingLinks,
- RemediationMarkdown: terraformEnableGeneralLoggingRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableGeneralLoggingGoodExamples,
- BadExamples: cloudFormationEnableGeneralLoggingBadExamples,
- Links: cloudFormationEnableGeneralLoggingLinks,
- RemediationMarkdown: cloudFormationEnableGeneralLoggingRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, broker := range s.AWS.MQ.Brokers {
- if broker.Logging.General.IsFalse() {
- results.Add(
- "Broker does not have general logging enabled.",
- broker.Logging.General,
- )
- } else {
- results.AddPassed(&broker)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/mq/enable_general_logging.rego b/checks/cloud/aws/mq/enable_general_logging.rego
new file mode 100644
index 00000000..564dc999
--- /dev/null
+++ b/checks/cloud/aws/mq/enable_general_logging.rego
@@ -0,0 +1,40 @@
+# METADATA
+# title: MQ Broker should have general logging enabled
+# description: |
+# Logging should be enabled to allow tracing of issues and activity to be investigated more fully. Logs provide additional information and context which is often invalauble during investigation
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/configure-logging-monitoring-activemq.html
+# custom:
+# id: AVD-AWS-0071
+# avd_id: AVD-AWS-0071
+# provider: aws
+# service: mq
+# severity: LOW
+# short_code: enable-general-logging
+# recommended_action: Enable general logging
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: mq
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/mq_broker#general
+# good_examples: checks/cloud/aws/mq/enable_general_logging.tf.go
+# bad_examples: checks/cloud/aws/mq/enable_general_logging.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/mq/enable_general_logging.cf.go
+# bad_examples: checks/cloud/aws/mq/enable_general_logging.cf.go
+package builtin.aws.mq.aws0071
+
+import rego.v1
+
+deny contains res if {
+ some broker in input.aws.mq.brokers
+ broker.logging.general.value == false
+ res := result.new("Broker does not have general logging enabled.", broker.logging.general)
+}
diff --git a/checks/cloud/aws/mq/enable_general_logging_test.go b/checks/cloud/aws/mq/enable_general_logging_test.go
deleted file mode 100644
index 04418ff2..00000000
--- a/checks/cloud/aws/mq/enable_general_logging_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package mq
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/mq"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableGeneralLogging(t *testing.T) {
- tests := []struct {
- name string
- input mq.MQ
- expected bool
- }{
- {
- name: "AWS MQ Broker without general logging",
- input: mq.MQ{
- Brokers: []mq.Broker{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Logging: mq.Logging{
- Metadata: trivyTypes.NewTestMetadata(),
- General: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS MQ Broker with general logging",
- input: mq.MQ{
- Brokers: []mq.Broker{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Logging: mq.Logging{
- Metadata: trivyTypes.NewTestMetadata(),
- General: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.MQ = test.input
- results := CheckEnableGeneralLogging.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableGeneralLogging.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/mq/enable_general_logging_test.rego b/checks/cloud/aws/mq/enable_general_logging_test.rego
new file mode 100644
index 00000000..7490e83d
--- /dev/null
+++ b/checks/cloud/aws/mq/enable_general_logging_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.mq.aws0071_test
+
+import rego.v1
+
+import data.builtin.aws.mq.aws0071 as check
+import data.lib.test
+
+test_allow_with_logging if {
+ inp := {"aws": {"mq": {"brokers": [{"logging": {"general": {"value": true}}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_without_logging if {
+ inp := {"aws": {"mq": {"brokers": [{"logging": {"general": {"value": false}}}]}}}
+
+ test.assert_equal_message("Broker does not have general logging enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/mq/mq.go b/checks/cloud/aws/mq/mq.go
new file mode 100644
index 00000000..71893fd0
--- /dev/null
+++ b/checks/cloud/aws/mq/mq.go
@@ -0,0 +1 @@
+package mq
diff --git a/checks/cloud/aws/mq/no_public_access.go b/checks/cloud/aws/mq/no_public_access.go
deleted file mode 100755
index 631753ef..00000000
--- a/checks/cloud/aws/mq/no_public_access.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package mq
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckNoPublicAccess = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0072",
- Provider: providers.AWSProvider,
- Service: "mq",
- ShortCode: "no-public-access",
- Summary: "Ensure MQ Broker is not publicly exposed",
- Impact: "Publicly accessible MQ Broker may be vulnerable to compromise",
- Resolution: "Disable public access when not required",
- Explanation: `Public access of the MQ broker should be disabled and only allow routes to applications that require access.`,
- Links: []string{
- "https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/using-amazon-mq-securely.html#prefer-brokers-without-public-accessibility",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformNoPublicAccessGoodExamples,
- BadExamples: terraformNoPublicAccessBadExamples,
- Links: terraformNoPublicAccessLinks,
- RemediationMarkdown: terraformNoPublicAccessRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationNoPublicAccessGoodExamples,
- BadExamples: cloudFormationNoPublicAccessBadExamples,
- Links: cloudFormationNoPublicAccessLinks,
- RemediationMarkdown: cloudFormationNoPublicAccessRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, broker := range s.AWS.MQ.Brokers {
- if broker.PublicAccess.IsTrue() {
- results.Add(
- "Broker has public access enabled.",
- broker.PublicAccess,
- )
- } else {
- results.AddPassed(&broker)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/mq/no_public_access.rego b/checks/cloud/aws/mq/no_public_access.rego
new file mode 100644
index 00000000..2f33763f
--- /dev/null
+++ b/checks/cloud/aws/mq/no_public_access.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: Ensure MQ Broker is not publicly exposed
+# description: |
+# Public access of the MQ broker should be disabled and only allow routes to applications that require access.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/using-amazon-mq-securely.html#prefer-brokers-without-public-accessibility
+# custom:
+# id: AVD-AWS-0072
+# avd_id: AVD-AWS-0072
+# provider: aws
+# service: mq
+# severity: HIGH
+# short_code: no-public-access
+# recommended_action: Disable public access when not required
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: mq
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/mq_broker#publicly_accessible
+# good_examples: checks/cloud/aws/mq/no_public_access.tf.go
+# bad_examples: checks/cloud/aws/mq/no_public_access.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/mq/no_public_access.cf.go
+# bad_examples: checks/cloud/aws/mq/no_public_access.cf.go
+package builtin.aws.mq.aws0072
+
+import rego.v1
+
+deny contains res if {
+ some broker in input.aws.mq.brokers
+ broker.publicaccess.value == true
+
+ res := result.new("Broker has public access enabled.", broker.publicaccess)
+}
diff --git a/checks/cloud/aws/mq/no_public_access_test.go b/checks/cloud/aws/mq/no_public_access_test.go
deleted file mode 100644
index f2241c5a..00000000
--- a/checks/cloud/aws/mq/no_public_access_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package mq
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/mq"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckNoPublicAccess(t *testing.T) {
- tests := []struct {
- name string
- input mq.MQ
- expected bool
- }{
- {
- name: "AWS MQ Broker with public access enabled",
- input: mq.MQ{
- Brokers: []mq.Broker{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- PublicAccess: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS MQ Broker with public access disabled",
- input: mq.MQ{
- Brokers: []mq.Broker{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- PublicAccess: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.MQ = test.input
- results := CheckNoPublicAccess.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckNoPublicAccess.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/mq/no_public_access_test.rego b/checks/cloud/aws/mq/no_public_access_test.rego
new file mode 100644
index 00000000..f2f08117
--- /dev/null
+++ b/checks/cloud/aws/mq/no_public_access_test.rego
@@ -0,0 +1,18 @@
+package builtin.aws.mq.aws0072_test
+
+import rego.v1
+
+import data.builtin.aws.mq.aws0072 as check
+import data.lib.test
+
+test_allow_without_public_access if {
+ inp := {"aws": {"mq": {"brokers": [{"publicaccess": {"value": false}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_with_public_access if {
+ inp := {"aws": {"mq": {"brokers": [{"publicaccess": {"value": true}}]}}}
+
+ test.assert_equal_message("Broker has public access enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/sqs/enable_queue_encryption.go b/checks/cloud/aws/sqs/enable_queue_encryption.go
deleted file mode 100755
index b7ca3db3..00000000
--- a/checks/cloud/aws/sqs/enable_queue_encryption.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package sqs
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableQueueEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0096",
- Provider: providers.AWSProvider,
- Service: "sqs",
- ShortCode: "enable-queue-encryption",
- Summary: "Unencrypted SQS queue.",
- Impact: "The SQS queue messages could be read if compromised",
- Resolution: "Turn on SQS Queue encryption",
- Explanation: `Queues should be encrypted to protect queue contents.`,
- Links: []string{
- "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableQueueEncryptionGoodExamples,
- BadExamples: terraformEnableQueueEncryptionBadExamples,
- Links: terraformEnableQueueEncryptionLinks,
- RemediationMarkdown: terraformEnableQueueEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableQueueEncryptionGoodExamples,
- BadExamples: cloudFormationEnableQueueEncryptionBadExamples,
- Links: cloudFormationEnableQueueEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableQueueEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, queue := range s.AWS.SQS.Queues {
- if queue.Metadata.IsUnmanaged() {
- continue
- }
- if queue.Encryption.KMSKeyID.IsEmpty() && queue.Encryption.ManagedEncryption.IsFalse() {
- results.Add(
- "Queue is not encrypted",
- queue.Encryption,
- )
- } else {
- results.AddPassed(&queue)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/sqs/enable_queue_encryption.rego b/checks/cloud/aws/sqs/enable_queue_encryption.rego
new file mode 100644
index 00000000..5b04fb01
--- /dev/null
+++ b/checks/cloud/aws/sqs/enable_queue_encryption.rego
@@ -0,0 +1,42 @@
+# METADATA
+# title: Unencrypted SQS queue.
+# description: |
+# Queues should be encrypted to protect queue contents.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html
+# custom:
+# id: AVD-AWS-0096
+# avd_id: AVD-AWS-0096
+# provider: aws
+# service: sqs
+# severity: HIGH
+# short_code: enable-queue-encryption
+# recommended_action: Turn on SQS Queue encryption
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: sqs
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue#server-side-encryption-sse
+# good_examples: checks/cloud/aws/sqs/enable_queue_encryption.tf.go
+# bad_examples: checks/cloud/aws/sqs/enable_queue_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/sqs/enable_queue_encryption.cf.go
+# bad_examples: checks/cloud/aws/sqs/enable_queue_encryption.cf.go
+package builtin.aws.sqs.aws0096
+
+import rego.v1
+
+deny contains res if {
+ some queue in input.aws.sqs.queues
+ queue.__defsec_metadata.managed
+ queue.encryption.kmskeyid.value == ""
+ queue.encryption.managedencryption.value == false
+ res := result.new("Queue is not encrypted", queue.encryption)
+}
diff --git a/checks/cloud/aws/sqs/enable_queue_encryption_test.go b/checks/cloud/aws/sqs/enable_queue_encryption_test.go
deleted file mode 100644
index 4d086407..00000000
--- a/checks/cloud/aws/sqs/enable_queue_encryption_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package sqs
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/sqs"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableQueueEncryption(t *testing.T) {
- tests := []struct {
- name string
- input sqs.SQS
- expected bool
- }{
- {
- name: "SQS Queue unencrypted",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: sqs.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- ManagedEncryption: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "SQS Queue encrypted with default key",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: sqs.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- ManagedEncryption: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("alias/aws/sqs", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "SQS Queue encrypted with proper key",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: sqs.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- ManagedEncryption: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "SQS Queue encrypted with proper key",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: sqs.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- ManagedEncryption: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.SQS = test.input
- results := CheckEnableQueueEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableQueueEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/sqs/enable_queue_encryption_test.rego b/checks/cloud/aws/sqs/enable_queue_encryption_test.rego
new file mode 100644
index 00000000..dc674d80
--- /dev/null
+++ b/checks/cloud/aws/sqs/enable_queue_encryption_test.rego
@@ -0,0 +1,40 @@
+package builtin.aws.sqs.aws0096_test
+
+import rego.v1
+
+import data.builtin.aws.sqs.aws0096 as check
+import data.lib.test
+
+test_allow_encrypted if {
+ inp := {"aws": {"sqs": {"queues": [{
+ "__defsec_metadata": {"managed": true},
+ "encryption": {
+ "kmskeyid": {"value": "alias/key"},
+ "managedencryption": {"value": true},
+ },
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_without_key_but_managed if {
+ inp := {"aws": {"sqs": {"queues": [{
+ "__defsec_metadata": {"managed": true},
+ "encryption": {
+ "kmskeyid": {"value": ""},
+ "managedencryption": {"value": true},
+ },
+ }]}}}
+}
+
+test_deny_unencrypted if {
+ inp := {"aws": {"sqs": {"queues": [{
+ "__defsec_metadata": {"managed": true},
+ "encryption": {
+ "kmskeyid": {"value": ""},
+ "managedencryption": {"value": false},
+ },
+ }]}}}
+
+ test.assert_equal_message("Queue is not encrypted", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/sqs/no_wildcards_in_policy_documents.go b/checks/cloud/aws/sqs/no_wildcards_in_policy_documents.go
deleted file mode 100755
index 6055b861..00000000
--- a/checks/cloud/aws/sqs/no_wildcards_in_policy_documents.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package sqs
-
-import (
- "strings"
-
- "github.com/aquasecurity/trivy/pkg/iac/severity"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/aquasecurity/trivy-checks/pkg/rules"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers"
-
- "github.com/liamg/iamgo"
-)
-
-var CheckNoWildcardsInPolicyDocuments = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0097",
- Provider: providers.AWSProvider,
- Service: "sqs",
- ShortCode: "no-wildcards-in-policy-documents",
- Summary: "AWS SQS policy document has wildcard action statement.",
- Impact: "SQS policies with wildcard actions allow more that is required",
- Resolution: "Keep policy scope to the minimum that is required to be effective",
- Explanation: `SQS Policy actions should always be restricted to a specific set.
-
-This ensures that the queue itself cannot be modified or deleted, and prevents possible future additions to queue actions to be implicitly allowed.`,
- Links: []string{
- "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-security-best-practices.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformNoWildcardsInPolicyDocumentsGoodExamples,
- BadExamples: terraformNoWildcardsInPolicyDocumentsBadExamples,
- Links: terraformNoWildcardsInPolicyDocumentsLinks,
- RemediationMarkdown: terraformNoWildcardsInPolicyDocumentsRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationNoWildcardsInPolicyDocumentsGoodExamples,
- BadExamples: cloudFormationNoWildcardsInPolicyDocumentsBadExamples,
- Links: cloudFormationNoWildcardsInPolicyDocumentsLinks,
- RemediationMarkdown: cloudFormationNoWildcardsInPolicyDocumentsRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, queue := range s.AWS.SQS.Queues {
- for _, policyDoc := range queue.Policies {
- var fail bool
- policy := policyDoc.Document.Parsed
- statements, _ := policy.Statements()
- for _, statement := range statements {
- effect, _ := statement.Effect()
- if effect != iamgo.EffectAllow {
- continue
- }
- actions, r := statement.Actions()
- for _, action := range actions {
- action = strings.ToLower(action)
- if action == "*" || action == "sqs:*" {
- fail = true
- results.Add(
- "Queue policy does not restrict actions to a known set.",
- policyDoc.Document.MetadataFromIamGo(statement.Range(), r),
- )
- break
- }
- }
- }
- if !fail {
- results.AddPassed(&queue)
- }
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/sqs/no_wildcards_in_policy_documents.rego b/checks/cloud/aws/sqs/no_wildcards_in_policy_documents.rego
new file mode 100644
index 00000000..069fbf24
--- /dev/null
+++ b/checks/cloud/aws/sqs/no_wildcards_in_policy_documents.rego
@@ -0,0 +1,46 @@
+# METADATA
+# title: AWS SQS policy document has wildcard action statement.
+# description: |
+# SQS Policy actions should always be restricted to a specific set.
+# This ensures that the queue itself cannot be modified or deleted, and prevents possible future additions to queue actions to be implicitly allowed.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-security-best-practices.html
+# custom:
+# id: AVD-AWS-0097
+# avd_id: AVD-AWS-0097
+# provider: aws
+# service: sqs
+# severity: HIGH
+# short_code: no-wildcards-in-policy-documents
+# recommended_action: Keep policy scope to the minimum that is required to be effective
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: sqs
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy
+# good_examples: checks/cloud/aws/sqs/no_wildcards_in_policy_documents.tf.go
+# bad_examples: checks/cloud/aws/sqs/no_wildcards_in_policy_documents.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/sqs/no_wildcards_in_policy_documents.cf.go
+# bad_examples: checks/cloud/aws/sqs/no_wildcards_in_policy_documents.cf.go
+package builtin.aws.sqs.aws0097
+
+import rego.v1
+
+deny contains res if {
+ some queue in input.aws.sqs.queues
+ some policyDoc in queue.policies
+ doc := json.unmarshal(policyDoc.document.value)
+ some statement in doc.Statement
+ statement.Effect == "Allow"
+ some action in statement.Action
+ action in ["*", "sqs:*"]
+ res := result.new("Queue policy does not restrict actions to a known set.", policyDoc.document)
+}
diff --git a/checks/cloud/aws/sqs/no_wildcards_in_policy_documents_test.go b/checks/cloud/aws/sqs/no_wildcards_in_policy_documents_test.go
deleted file mode 100644
index 649c5c7b..00000000
--- a/checks/cloud/aws/sqs/no_wildcards_in_policy_documents_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sqs
-
-import (
- "testing"
-
- "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/iam"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/sqs"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/liamg/iamgo"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckNoWildcardsInPolicyDocuments(t *testing.T) {
- tests := []struct {
- name string
- input sqs.SQS
- expected bool
- }{
- {
- name: "AWS SQS policy document with wildcard action statement",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: types.NewTestMetadata(),
- Policies: func() []iam.Policy {
-
- sb := iamgo.NewStatementBuilder()
- sb.WithSid("new policy")
- sb.WithEffect("Allow")
- sb.WithActions([]string{
- "sqs:*",
- })
- sb.WithResources([]string{"arn:aws:sqs:::my-queue"})
-
- builder := iamgo.NewPolicyBuilder()
- builder.WithVersion("2012-10-17")
- builder.WithStatement(sb.Build())
-
- return []iam.Policy{
- {
- Document: iam.Document{
- Metadata: types.NewTestMetadata(),
- Parsed: builder.Build(),
- },
- },
- }
- }(),
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS SQS policy document with action statement list",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: types.NewTestMetadata(),
- Policies: func() []iam.Policy {
-
- sb := iamgo.NewStatementBuilder()
- sb.WithSid("new policy")
- sb.WithEffect("Allow")
- sb.WithActions([]string{
- "sqs:SendMessage",
- "sqs:ReceiveMessage",
- })
- sb.WithResources([]string{"arn:aws:sqs:::my-queue"})
- sb.WithAWSPrincipals([]string{"*"})
-
- builder := iamgo.NewPolicyBuilder()
- builder.WithVersion("2012-10-17")
- builder.WithStatement(sb.Build())
-
- return []iam.Policy{
- {
- Document: iam.Document{
- Metadata: types.NewTestMetadata(),
- Parsed: builder.Build(),
- },
- },
- }
- }(),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.SQS = test.input
- results := CheckNoWildcardsInPolicyDocuments.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckNoWildcardsInPolicyDocuments.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/sqs/no_wildcards_in_policy_documents_test.rego b/checks/cloud/aws/sqs/no_wildcards_in_policy_documents_test.rego
new file mode 100644
index 00000000..52c2d968
--- /dev/null
+++ b/checks/cloud/aws/sqs/no_wildcards_in_policy_documents_test.rego
@@ -0,0 +1,42 @@
+package builtin.aws.sqs.aws0097_test
+
+import rego.v1
+
+import data.builtin.aws.sqs.aws0097 as check
+import data.lib.test
+
+test_allow_without_wildcards if {
+ inp := {"aws": {"sqs": {"queues": [{"policies": [{"document": {"value": json.marshal({
+ "Version": "2012-10-17",
+ "Statement": [{
+ "Effect": "Allow",
+ "Action": ["sqs:CreateQueue"],
+ }],
+ })}}]}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_allow_with_wildcards_but_not_allowed if {
+ inp := {"aws": {"sqs": {"queues": [{"policies": [{"document": {"value": json.marshal({
+ "Version": "2012-10-17",
+ "Statement": [{
+ "Effect": "Deny",
+ "Action": ["sqs:*"],
+ }],
+ })}}]}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_with_wildcards if {
+ inp := {"aws": {"sqs": {"queues": [{"policies": [{"document": {"value": json.marshal({
+ "Version": "2012-10-17",
+ "Statement": [{
+ "Effect": "Allow",
+ "Action": ["*"],
+ }],
+ })}}]}]}}}
+
+ test.assert_equal_message("Queue policy does not restrict actions to a known set.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/sqs/queue_encryption_with_cmk.go b/checks/cloud/aws/sqs/queue_encryption_with_cmk.go
deleted file mode 100755
index 908f80dc..00000000
--- a/checks/cloud/aws/sqs/queue_encryption_with_cmk.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package sqs
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckQueueEncryptionUsesCMK = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0135",
- Provider: providers.AWSProvider,
- Service: "sqs",
- ShortCode: "queue-encryption-use-cmk",
- Summary: "SQS queue should be encrypted with a CMK.",
- Impact: "The SQS queue messages could be read if compromised. Key management is very limited when using default keys.",
- Resolution: "Encrypt SQS Queue with a customer-managed key",
- Explanation: `Queues should be encrypted with customer managed KMS keys and not default AWS managed keys, in order to allow granular control over access to specific queues.`,
- Links: []string{
- "https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformQueueEncryptionUsesCMKGoodExamples,
- BadExamples: terraformQueueEncryptionUsesCMKBadExamples,
- Links: terraformQueueEncryptionUsesCMKLinks,
- RemediationMarkdown: terraformQueueEncryptionUsesCMKRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationQueueEncryptionUsesCMKGoodExamples,
- BadExamples: cloudFormationQueueEncryptionUsesCMKBadExamples,
- Links: cloudFormationQueueEncryptionUsesCMKLinks,
- RemediationMarkdown: cloudFormationQueueEncryptionUsesCMKRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, queue := range s.AWS.SQS.Queues {
- if queue.Metadata.IsUnmanaged() {
- continue
- }
- if queue.Encryption.KMSKeyID.EqualTo("alias/aws/sqs") {
- results.Add(
- "Queue is not encrypted with a customer managed key.",
- queue.Encryption.KMSKeyID,
- )
- } else {
- results.AddPassed(&queue)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/sqs/queue_encryption_with_cmk.rego b/checks/cloud/aws/sqs/queue_encryption_with_cmk.rego
new file mode 100644
index 00000000..2ba5db5a
--- /dev/null
+++ b/checks/cloud/aws/sqs/queue_encryption_with_cmk.rego
@@ -0,0 +1,41 @@
+# METADATA
+# title: SQS queue should be encrypted with a CMK.
+# description: |
+# Queues should be encrypted with customer managed KMS keys and not default AWS managed keys, in order to allow granular control over access to specific queues.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html
+# custom:
+# id: AVD-AWS-0135
+# avd_id: AVD-AWS-0135
+# provider: aws
+# service: sqs
+# severity: HIGH
+# short_code: queue-encryption-use-cmk
+# recommended_action: Encrypt SQS Queue with a customer-managed key
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: sqs
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue#server-side-encryption-sse
+# good_examples: checks/cloud/aws/sqs/queue_encryption_with_cmk.tf.go
+# bad_examples: checks/cloud/aws/sqs/queue_encryption_with_cmk.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/sqs/queue_encryption_with_cmk.cf.go
+# bad_examples: checks/cloud/aws/sqs/queue_encryption_with_cmk.cf.go
+package builtin.aws.sqs.aws0135
+
+import rego.v1
+
+deny contains res if {
+ some queue in input.aws.sqs.queues
+ queue.__defsec_metadata.managed
+ queue.encryption.kmskeyid.value == "alias/aws/sqs"
+ res := result.new("Queue is not encrypted with a customer managed key.", queue.encryption.kmskeyid)
+}
diff --git a/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.go b/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.go
deleted file mode 100644
index 23f7670c..00000000
--- a/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package sqs
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/sqs"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckQueueEncryptionUsesCMK(t *testing.T) {
- tests := []struct {
- name string
- input sqs.SQS
- expected bool
- }{
- {
- name: "SQS Queue unencrypted",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: sqs.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- {
- name: "SQS Queue encrypted with default key",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: sqs.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("alias/aws/sqs", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "SQS Queue encrypted with proper key",
- input: sqs.SQS{
- Queues: []sqs.Queue{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: sqs.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.SQS = test.input
- results := CheckQueueEncryptionUsesCMK.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckQueueEncryptionUsesCMK.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.rego b/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.rego
new file mode 100644
index 00000000..3b60c7dc
--- /dev/null
+++ b/checks/cloud/aws/sqs/queue_encryption_with_cmk_test.rego
@@ -0,0 +1,26 @@
+package builtin.aws.sqs.aws0135_test
+
+import rego.v1
+
+import data.builtin.aws.sqs.aws0135 as check
+import data.lib.test
+
+test_allow_encrypted_with_cmk if {
+ inp := {"aws": {"sqs": {"queues": [{
+ "__defsec_metadata": {"managed": true},
+ "name": "test-queue",
+ "encryption": {"kmskeyid": {"value": "key-id"}},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_unencrypted_with_cmk if {
+ inp := {"aws": {"sqs": {"queues": [{
+ "__defsec_metadata": {"managed": true},
+ "name": "test-queue",
+ "encryption": {"kmskeyid": {"value": "alias/aws/sqs"}},
+ }]}}}
+
+ test.assert_equal_message("Queue is not encrypted with a customer managed key.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/sqs/sqs.go b/checks/cloud/aws/sqs/sqs.go
new file mode 100644
index 00000000..e63cf0e5
--- /dev/null
+++ b/checks/cloud/aws/sqs/sqs.go
@@ -0,0 +1 @@
+package sqs
diff --git a/checks/cloud/aws/ssm/secret_use_customer_key.go b/checks/cloud/aws/ssm/secret_use_customer_key.go
deleted file mode 100755
index fb3c5ff6..00000000
--- a/checks/cloud/aws/ssm/secret_use_customer_key.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package ssm
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/ssm"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckSecretUseCustomerKey = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0098",
- Provider: providers.AWSProvider,
- Service: "ssm",
- ShortCode: "secret-use-customer-key",
- Summary: "Secrets Manager should use customer managed keys",
- Impact: "Using AWS managed keys reduces the flexibility and control over the encryption key",
- Resolution: "Use customer managed keys",
- Explanation: `Secrets Manager encrypts secrets by default using a default key created by AWS. To ensure control and granularity of secret encryption, CMK's should be used explicitly.`,
- Links: []string{
- "https://docs.aws.amazon.com/kms/latest/developerguide/services-secrets-manager.html#asm-encrypt",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformSecretUseCustomerKeyGoodExamples,
- BadExamples: terraformSecretUseCustomerKeyBadExamples,
- Links: terraformSecretUseCustomerKeyLinks,
- RemediationMarkdown: terraformSecretUseCustomerKeyRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationSecretUseCustomerKeyGoodExamples,
- BadExamples: cloudFormationSecretUseCustomerKeyBadExamples,
- Links: cloudFormationSecretUseCustomerKeyLinks,
- RemediationMarkdown: cloudFormationSecretUseCustomerKeyRemediationMarkdown,
- },
- Severity: severity.Low,
- },
- func(s *state.State) (results scan.Results) {
- for _, secret := range s.AWS.SSM.Secrets {
- if secret.KMSKeyID.IsEmpty() {
- results.Add(
- "Secret is not encrypted with a customer managed key.",
- secret.KMSKeyID,
- )
- } else if secret.KMSKeyID.EqualTo(ssm.DefaultKMSKeyID) {
- results.Add(
- "Secret explicitly uses the default key.",
- secret.KMSKeyID,
- )
- } else {
- results.AddPassed(&secret)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/ssm/secret_use_customer_key.rego b/checks/cloud/aws/ssm/secret_use_customer_key.rego
new file mode 100644
index 00000000..51fc2fd3
--- /dev/null
+++ b/checks/cloud/aws/ssm/secret_use_customer_key.rego
@@ -0,0 +1,46 @@
+# METADATA
+# title: Secrets Manager should use customer managed keys
+# description: |
+# Secrets Manager encrypts secrets by default using a default key created by AWS. To ensure control and granularity of secret encryption, CMK's should be used explicitly.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/kms/latest/developerguide/services-secrets-manager.html#asm-encrypt
+# custom:
+# id: AVD-AWS-0098
+# avd_id: AVD-AWS-0098
+# provider: aws
+# service: ssm
+# severity: LOW
+# short_code: secret-use-customer-key
+# recommended_action: Use customer managed keys
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: ssm
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret#kms_key_id
+# good_examples: checks/cloud/aws/ssm/secret_use_customer_key.tf.go
+# bad_examples: checks/cloud/aws/ssm/secret_use_customer_key.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/ssm/secret_use_customer_key.cf.go
+# bad_examples: checks/cloud/aws/ssm/secret_use_customer_key.cf.go
+package builtin.aws.ssm.aws0098
+
+import rego.v1
+
+deny contains res if {
+ some secret in input.aws.ssm.secrets
+ secret.kmskeyid.value == ""
+ res := result.new("Secret is not encrypted with a customer managed key.", secret.kmskeyid)
+}
+
+deny contains res if {
+ some secret in input.aws.ssm.secrets
+ secret.kmskeyid.value == "alias/aws/secretsmanager"
+ res := result.new("Secret explicitly uses the default key.", secret.kmskeyid)
+}
diff --git a/checks/cloud/aws/ssm/secret_use_customer_key_test.go b/checks/cloud/aws/ssm/secret_use_customer_key_test.go
deleted file mode 100644
index c73830cb..00000000
--- a/checks/cloud/aws/ssm/secret_use_customer_key_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package ssm
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/ssm"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckSecretUseCustomerKey(t *testing.T) {
- tests := []struct {
- name string
- input ssm.SSM
- expected bool
- }{
- {
- name: "AWS SSM missing KMS key",
- input: ssm.SSM{
- Secrets: []ssm.Secret{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("", trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS SSM with default KMS key",
- input: ssm.SSM{
- Secrets: []ssm.Secret{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String(ssm.DefaultKMSKeyID, trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS SSM with proper KMS key",
- input: ssm.SSM{
- Secrets: []ssm.Secret{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- KMSKeyID: trivyTypes.String("some-ok-key", trivyTypes.NewTestMetadata()),
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.SSM = test.input
- results := CheckSecretUseCustomerKey.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckSecretUseCustomerKey.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/ssm/secret_use_customer_key_test.rego b/checks/cloud/aws/ssm/secret_use_customer_key_test.rego
new file mode 100644
index 00000000..589b88dc
--- /dev/null
+++ b/checks/cloud/aws/ssm/secret_use_customer_key_test.rego
@@ -0,0 +1,24 @@
+package builtin.aws.ssm.aws0098_test
+
+import rego.v1
+
+import data.builtin.aws.ssm.aws0098 as check
+import data.lib.test
+
+test_deny_without_kms_key if {
+ inp := {"aws": {"ssm": {"secrets": [{"kmskeyid": {"value": ""}}]}}}
+
+ test.assert_equal_message("Secret is not encrypted with a customer managed key.", check.deny) with input as inp
+}
+
+test_deny_with_default_kms_key if {
+ inp := {"aws": {"ssm": {"secrets": [{"kmskeyid": {"value": "alias/aws/secretsmanager"}}]}}}
+
+ test.assert_equal_message("Secret explicitly uses the default key.", check.deny) with input as inp
+}
+
+test_allow_with_custom_kms_key if {
+ inp := {"aws": {"ssm": {"secrets": [{"kmskeyid": {"value": "arn:aws:kms:us-east-1:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab"}}]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/ssm/ssm.go b/checks/cloud/aws/ssm/ssm.go
new file mode 100644
index 00000000..7c641180
--- /dev/null
+++ b/checks/cloud/aws/ssm/ssm.go
@@ -0,0 +1 @@
+package ssm
diff --git a/checks/cloud/aws/workspaces/enable_disk_encryption.go b/checks/cloud/aws/workspaces/enable_disk_encryption.go
deleted file mode 100755
index 1887702b..00000000
--- a/checks/cloud/aws/workspaces/enable_disk_encryption.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package workspaces
-
-import (
- "github.com/aquasecurity/trivy-checks/pkg/rules"
- "github.com/aquasecurity/trivy/pkg/iac/providers"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
- "github.com/aquasecurity/trivy/pkg/iac/severity"
- "github.com/aquasecurity/trivy/pkg/iac/state"
-)
-
-var CheckEnableDiskEncryption = rules.Register(
- scan.Rule{
- AVDID: "AVD-AWS-0109",
- Provider: providers.AWSProvider,
- Service: "workspaces",
- ShortCode: "enable-disk-encryption",
- Summary: "Root and user volumes on Workspaces should be encrypted",
- Impact: "Data can be freely read if compromised",
- Resolution: "Root and user volume encryption should be enabled",
- Explanation: `Workspace volumes for both user and root should be encrypted to protect the data stored on them.`,
- Links: []string{
- "https://docs.aws.amazon.com/workspaces/latest/adminguide/encrypt-workspaces.html",
- },
- Terraform: &scan.EngineMetadata{
- GoodExamples: terraformEnableDiskEncryptionGoodExamples,
- BadExamples: terraformEnableDiskEncryptionBadExamples,
- Links: terraformEnableDiskEncryptionLinks,
- RemediationMarkdown: terraformEnableDiskEncryptionRemediationMarkdown,
- },
- CloudFormation: &scan.EngineMetadata{
- GoodExamples: cloudFormationEnableDiskEncryptionGoodExamples,
- BadExamples: cloudFormationEnableDiskEncryptionBadExamples,
- Links: cloudFormationEnableDiskEncryptionLinks,
- RemediationMarkdown: cloudFormationEnableDiskEncryptionRemediationMarkdown,
- },
- Severity: severity.High,
- },
- func(s *state.State) (results scan.Results) {
- for _, workspace := range s.AWS.WorkSpaces.WorkSpaces {
- var fail bool
- if workspace.RootVolume.Encryption.Enabled.IsFalse() {
- results.Add(
- "Root volume does not have encryption enabled.",
- workspace.RootVolume.Encryption.Enabled,
- )
- fail = true
- }
- if workspace.UserVolume.Encryption.Enabled.IsFalse() {
- results.Add(
- "User volume does not have encryption enabled.",
- workspace.UserVolume.Encryption.Enabled,
- )
- fail = true
- }
- if !fail {
- results.AddPassed(&workspace)
- }
- }
- return
- },
-)
diff --git a/checks/cloud/aws/workspaces/enable_disk_encryption.rego b/checks/cloud/aws/workspaces/enable_disk_encryption.rego
new file mode 100644
index 00000000..635821c0
--- /dev/null
+++ b/checks/cloud/aws/workspaces/enable_disk_encryption.rego
@@ -0,0 +1,46 @@
+# METADATA
+# title: Root and user volumes on Workspaces should be encrypted
+# description: |
+# Workspace volumes for both user and root should be encrypted to protect the data stored on them.
+# scope: package
+# schemas:
+# - input: schema["cloud"]
+# related_resources:
+# - https://docs.aws.amazon.com/workspaces/latest/adminguide/encrypt-workspaces.html
+# custom:
+# id: AVD-AWS-0109
+# avd_id: AVD-AWS-0109
+# provider: aws
+# service: workspaces
+# severity: HIGH
+# short_code: enable-disk-encryption
+# recommended_action: Root and user volume encryption should be enabled
+# input:
+# selector:
+# - type: aws
+# subtypes:
+# - service: workspaces
+# provider: aws
+# terraform:
+# links:
+# - https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/workspaces_workspace#root_volume_encryption_enabled
+# good_examples: checks/cloud/aws/workspaces/enable_disk_encryption.tf.go
+# bad_examples: checks/cloud/aws/workspaces/enable_disk_encryption.tf.go
+# cloudformation:
+# good_examples: checks/cloud/aws/workspaces/enable_disk_encryption.cf.go
+# bad_examples: checks/cloud/aws/workspaces/enable_disk_encryption.cf.go
+package builtin.aws.workspaces.aws0109
+
+import rego.v1
+
+deny contains res if {
+ some workspace in input.aws.workspaces.workspaces
+ workspace.rootvolume.encryption.enabled.value == false
+ res := result.new("Root volume does not have encryption enabled.", workspace.rootvolume.encryption)
+}
+
+deny contains res if {
+ some workspace in input.aws.workspaces.workspaces
+ workspace.uservolume.encryption.enabled.value == false
+ res := result.new("User volume does not have encryption enabled.", workspace.uservolume.encryption)
+}
diff --git a/checks/cloud/aws/workspaces/enable_disk_encryption_test.go b/checks/cloud/aws/workspaces/enable_disk_encryption_test.go
deleted file mode 100644
index fe43bdb8..00000000
--- a/checks/cloud/aws/workspaces/enable_disk_encryption_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package workspaces
-
-import (
- "testing"
-
- trivyTypes "github.com/aquasecurity/trivy/pkg/iac/types"
-
- "github.com/aquasecurity/trivy/pkg/iac/state"
-
- "github.com/aquasecurity/trivy/pkg/iac/providers/aws/workspaces"
- "github.com/aquasecurity/trivy/pkg/iac/scan"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCheckEnableDiskEncryption(t *testing.T) {
- tests := []struct {
- name string
- input workspaces.WorkSpaces
- expected bool
- }{
- {
- name: "AWS Workspace with unencrypted root volume",
- input: workspaces.WorkSpaces{
- WorkSpaces: []workspaces.WorkSpace{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- RootVolume: workspaces.Volume{
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: workspaces.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- UserVolume: workspaces.Volume{
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: workspaces.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: true,
- },
- {
- name: "AWS Workspace with unencrypted user volume",
- input: workspaces.WorkSpaces{
- WorkSpaces: []workspaces.WorkSpace{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- RootVolume: workspaces.Volume{
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: workspaces.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- UserVolume: workspaces.Volume{
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: workspaces.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(false, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: true,
- },
-
- {
- name: "AWS Workspace with encrypted user and root volumes",
- input: workspaces.WorkSpaces{
- WorkSpaces: []workspaces.WorkSpace{
- {
- Metadata: trivyTypes.NewTestMetadata(),
- RootVolume: workspaces.Volume{
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: workspaces.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- UserVolume: workspaces.Volume{
- Metadata: trivyTypes.NewTestMetadata(),
- Encryption: workspaces.Encryption{
- Metadata: trivyTypes.NewTestMetadata(),
- Enabled: trivyTypes.Bool(true, trivyTypes.NewTestMetadata()),
- },
- },
- },
- },
- },
- expected: false,
- },
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- var testState state.State
- testState.AWS.WorkSpaces = test.input
- results := CheckEnableDiskEncryption.Evaluate(&testState)
- var found bool
- for _, result := range results {
- if result.Status() == scan.StatusFailed && result.Rule().LongID() == CheckEnableDiskEncryption.LongID() {
- found = true
- }
- }
- if test.expected {
- assert.True(t, found, "Rule should have been found")
- } else {
- assert.False(t, found, "Rule should not have been found")
- }
- })
- }
-}
diff --git a/checks/cloud/aws/workspaces/enable_disk_encryption_test.rego b/checks/cloud/aws/workspaces/enable_disk_encryption_test.rego
new file mode 100644
index 00000000..ac27b443
--- /dev/null
+++ b/checks/cloud/aws/workspaces/enable_disk_encryption_test.rego
@@ -0,0 +1,27 @@
+package builtin.aws.workspaces.aws0109_test
+
+import rego.v1
+
+import data.builtin.aws.workspaces.aws0109 as check
+import data.lib.test
+
+test_allow_encrypted if {
+ inp := {"aws": {"workspaces": {"workspaces": [{
+ "rootvolume": {"encryption": {"enabled": {"value": true}}},
+ "uservolume": {"encryption": {"enabled": {"value": true}}},
+ }]}}}
+
+ test.assert_empty(check.deny) with input as inp
+}
+
+test_deny_root_volume_unencrypted if {
+ inp := {"aws": {"workspaces": {"workspaces": [{"rootvolume": {"encryption": {"enabled": {"value": false}}}}]}}}
+
+ test.assert_equal_message("Root volume does not have encryption enabled.", check.deny) with input as inp
+}
+
+test_deny_user_volume_unencrypted if {
+ inp := {"aws": {"workspaces": {"workspaces": [{"uservolume": {"encryption": {"enabled": {"value": false}}}}]}}}
+
+ test.assert_equal_message("User volume does not have encryption enabled.", check.deny) with input as inp
+}
diff --git a/checks/cloud/aws/workspaces/workspaces.go b/checks/cloud/aws/workspaces/workspaces.go
new file mode 100644
index 00000000..b81be0ec
--- /dev/null
+++ b/checks/cloud/aws/workspaces/workspaces.go
@@ -0,0 +1 @@
+package workspaces
diff --git a/checks/kubernetes/cisbenchmarks/apiserver/deny_service_external_ips_plugin_test.rego b/checks/kubernetes/cisbenchmarks/apiserver/deny_service_external_ips_plugin_test.rego
index 57034549..5c728fb0 100644
--- a/checks/kubernetes/cisbenchmarks/apiserver/deny_service_external_ips_plugin_test.rego
+++ b/checks/kubernetes/cisbenchmarks/apiserver/deny_service_external_ips_plugin_test.rego
@@ -1,6 +1,6 @@
package builtin.kubernetes.KCV0003
-test_deny_service_external_ips_is_enabled {
+test_disallow_service_external_ips_is_enabled {
r := deny with input as {
"apiVersion": "v1",
"kind": "Pod",
@@ -65,7 +65,7 @@ test_enable_admission_plugins_is_not_configured_args {
count(r) == 0
}
-test_deny_service_external_ips_is_not_enabled {
+test_disallow_service_external_ips_is_not_enabled {
r := deny with input as {
"apiVersion": "v1",
"kind": "Pod",
@@ -86,7 +86,7 @@ test_deny_service_external_ips_is_not_enabled {
count(r) == 0
}
-test_deny_service_external_ips_is_enabled_with_others {
+test_disallow_service_external_ips_is_enabled_with_others {
r := deny with input as {
"apiVersion": "v1",
"kind": "Pod",
diff --git a/cmd/go2rego/main.go b/cmd/go2rego/main.go
new file mode 100644
index 00000000..2cb7440b
--- /dev/null
+++ b/cmd/go2rego/main.go
@@ -0,0 +1,473 @@
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/fs"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+ "slices"
+ "strings"
+
+ "golang.org/x/exp/maps"
+
+ "github.com/aquasecurity/trivy/pkg/iac/framework"
+ "github.com/aquasecurity/trivy/pkg/iac/providers"
+ "github.com/aquasecurity/trivy/pkg/iac/rules"
+ "github.com/aquasecurity/trivy/pkg/iac/scan"
+ "github.com/open-policy-agent/opa/ast"
+ "github.com/open-policy-agent/opa/format"
+ "github.com/open-policy-agent/opa/loader"
+)
+
+var fileMappings = buildFileMappings()
+
+func main() {
+
+ if len(os.Args) == 2 {
+ checkID := os.Args[1]
+
+ rule := findCheckByID(checkID)
+ if rule == nil {
+ log.Fatal("Check not found")
+ }
+
+ goCheckToRego(rule)
+ } else {
+ log.Println("Total checks:", len(rules.GetRegistered(framework.ALL)))
+ for _, r := range rules.GetRegistered(framework.ALL) {
+ goCheckToRego(&r.Rule)
+ }
+ }
+
+}
+
+func buildFileMappings() map[string]string {
+
+ m := make(map[string]string)
+
+ walkFn := func(path string, info fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if info.IsDir() {
+ return nil
+ }
+
+ if !strings.HasSuffix(path, ".go") || slices.ContainsFunc(
+ []string{"_test.go", ".tf.go", ".cf.go"},
+ func(s string) bool {
+ return strings.HasSuffix(path, s)
+ },
+ ) {
+ return nil
+ }
+
+ // read file
+
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ r := regexp.MustCompile(`AVDID:\s*"([^"]+)"`)
+
+ matches := r.FindStringSubmatch(string(b))
+ if len(matches) != 2 {
+ log.Printf("expected 2 matches, got %d. File path: %s", len(matches), path)
+ return nil
+ }
+ if _, ok := m[matches[1]]; ok {
+ log.Printf("duplicate check id %s. File path: %s", matches[1], path)
+ }
+
+ m[matches[1]] = removeExtension(path)
+
+ return nil
+ }
+
+ if err := filepath.WalkDir("checks", walkFn); err != nil {
+ log.Fatal(err)
+ }
+
+ return m
+}
+
+func goCheckToRego(rule *scan.Rule) {
+ outputPath := buildOutputPath(rule)
+
+ goCheckPath := removeExtension(outputPath) + ".go"
+ if _, err := os.Stat(goCheckPath); errors.Is(err, os.ErrNotExist) {
+ log.Println("Go check file not found", goCheckPath)
+ }
+
+ pkg := buildRegoPackage(rule)
+ pkgPath := ast.MustParseRef(pkg) // TODO: why without builtin prefix
+
+ pkgAnnotation := buildPackageAnnotation(rule)
+ var comments []*ast.Comment
+
+ for i := 0; i < len(pkgAnnotation); i++ {
+ comments = append(comments, &ast.Comment{
+ Text: []byte(pkgAnnotation[i]),
+ Location: &ast.Location{
+ Row: i + 1,
+ },
+ })
+ }
+
+ if err := modifyOrCreateRegoCheck(outputPath, pkgPath, comments); err != nil {
+ log.Fatal(err)
+ }
+
+ regoTestPath := removeExtension(outputPath) + "_test.rego"
+ if err := createTestRegoCheck(regoTestPath, pkgPath); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func modifyTestRegoCheck(regoTestPath string, pkgPath ast.Ref) error {
+ if _, err := os.Stat(regoTestPath); err != nil {
+ return err
+ }
+
+ b, err := os.ReadFile(regoTestPath)
+ if err != nil {
+ return err
+ }
+
+ result, err := loader.NewFileLoader().
+ WithReader(bytes.NewReader(b)).
+ WithProcessAnnotation(true).
+ Filtered([]string{regoTestPath}, nil)
+
+ if err != nil {
+ return err
+ }
+
+ if len(result.Modules) != 1 {
+ return fmt.Errorf("expected 1 module, got %d", len(result.Modules))
+ }
+
+ module := maps.Values(result.ParsedModules())[0]
+
+ f, err := os.OpenFile(regoTestPath, os.O_WRONLY|os.O_TRUNC, 0644)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return updateAndWriteTestRegoCheck(f, pkgPath, module)
+}
+
+func updateAndWriteTestRegoCheck(f *os.File, pkgPath ast.Ref, module *ast.Module) error {
+
+ module.Package = &ast.Package{
+ Path: ast.MustParseRef(pkgPath.String() + "_test"),
+ Location: &ast.Location{
+ Row: 1,
+ },
+ }
+
+ module.Imports = []*ast.Import{
+ {
+ Path: ast.MustParseTerm("rego.v1"),
+ Location: &ast.Location{
+ Row: 3,
+ },
+ },
+ {
+ Path: ast.MustParseTerm(pkgPath.String()),
+ Alias: ast.Var("check"),
+ Location: &ast.Location{
+ Row: 5,
+ },
+ },
+ {
+ Path: ast.MustParseTerm("data.lib.test"),
+ Location: &ast.Location{
+ Row: 6,
+ },
+ },
+ }
+
+ formatted, err := format.Ast(module)
+ if err != nil {
+ return err
+ }
+
+ if _, err := f.Write(formatted); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func createTestRegoCheck(regoTestPath string, pkgPath ast.Ref) error {
+ if _, err := os.Stat(regoTestPath); err == nil {
+ return modifyTestRegoCheck(regoTestPath, pkgPath)
+ } else if !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+
+ f, err := os.Create(regoTestPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ module := &ast.Module{}
+
+ return updateAndWriteTestRegoCheck(f, pkgPath, module)
+}
+
+func findCheckByID(id string) *scan.Rule {
+ for _, r := range rules.GetRegistered(framework.ALL) {
+ if r.Rule.AVDID == id {
+ return &r.Rule
+ }
+ }
+ return nil
+}
+
+func modifyOrCreateRegoCheck(filePath string, pkgPath ast.Ref, annotationComments []*ast.Comment) error {
+ b, err := os.ReadFile(filePath)
+ if errors.Is(err, os.ErrNotExist) {
+ return createRegoCheck(filePath, pkgPath, annotationComments)
+ }
+
+ result, err := loader.NewFileLoader().
+ WithReader(bytes.NewReader(b)).
+ WithProcessAnnotation(true).
+ Filtered([]string{filePath}, nil)
+
+ if err != nil {
+ return err
+ }
+
+ if len(result.Modules) != 1 {
+ return fmt.Errorf("expected 1 module, got %d", len(result.Modules))
+ }
+
+ module := maps.Values(result.ParsedModules())[0]
+
+ module.Annotations = nil
+
+ var moduleComments []*ast.Comment
+
+ for _, c := range module.Comments {
+ if c.Location.Row > module.Package.Location.Row {
+ moduleComments = append(moduleComments, c)
+ }
+ }
+
+ module.Comments = append(moduleComments, annotationComments...)
+ module.Package.Path = pkgPath
+
+ formatted, err := format.Ast(module)
+ if err != nil {
+ return err
+ }
+
+ f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_TRUNC, 0644)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ f.Write(formatted)
+
+ return nil
+}
+
+func createRegoCheck(filePath string, pkgPath ast.Ref, annotationComments []*ast.Comment) error {
+
+ f, err := os.Create(filePath)
+ if err != nil {
+ return err
+ }
+
+ defer f.Close()
+
+ module := &ast.Module{
+ Package: &ast.Package{
+ Path: pkgPath,
+ Location: &ast.Location{
+ Row: len(annotationComments) + 1,
+ },
+ },
+ Comments: annotationComments,
+ }
+
+ formatted, err := format.Ast(module)
+ if err != nil {
+ return err
+ }
+
+ if _, err := f.Write(formatted); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func buildOutputPath(rule *scan.Rule) string {
+
+ p, ok := fileMappings[rule.AVDID]
+ if !ok {
+ log.Fatal("File mapping not found", rule.AVDID)
+ }
+ return p + ".rego"
+}
+
+func cleanExplanation(s string) []string {
+ lines := strings.Split(s, "\n")
+
+ for i := 0; i < len(lines); i++ {
+ lines[i] = strings.TrimSpace(lines[i])
+ // Trim tabs
+ lines[i] = strings.ReplaceAll(lines[i], "\t", " ")
+
+ if lines[i] == "" {
+ lines = append(lines[:i], lines[i+1:]...)
+ i--
+ }
+ }
+
+ return lines
+}
+
+func buildPackageAnnotation(r *scan.Rule) []string {
+
+ var lines []string
+
+ var addLine = func(line string, ident int) {
+ lines = append(lines, strings.Repeat(" ", ident)+line)
+ }
+
+ addLine("METADATA", 1)
+
+ addLine("title: "+strings.ReplaceAll(r.Summary, "\n", " "), 1) // TODO
+ addLine("description: |", 1)
+ for _, line := range cleanExplanation(r.Explanation) {
+ addLine(line, 3)
+ }
+ addLine("scope: package", 1)
+ addLine("schemas:", 1)
+
+ switch r.Provider {
+ case providers.KubernetesProvider:
+ addLine("- input: schema[\"kubernetes\"]", 3)
+ default:
+ addLine("- input: schema[\"cloud\"]", 3)
+ }
+
+ if len(r.Links) > 0 {
+ addLine("related_resources:", 1)
+ for _, link := range r.Links {
+ if link == "" {
+ continue
+ }
+ addLine("- "+link, 3)
+ }
+ }
+
+ addLine("custom:", 1)
+ addLine("id: "+r.AVDID, 3)
+ addLine("avd_id: "+r.AVDID, 3)
+ addLine("provider: "+string(r.Provider), 3)
+ addLine("service: "+r.Service, 3)
+ addLine("severity: "+string(r.Severity), 3)
+ addLine("short_code: "+r.ShortCode, 3)
+ addLine("recommended_action: "+r.Resolution, 3)
+
+ generateFramework(r, &lines)
+
+ addLine("input:", 3)
+ addLine("selector:", 5)
+ addLine("- type: "+string(r.Provider), 7)
+ addLine("subtypes:", 9)
+ addLine("- service: "+r.Service, 11)
+ addLine("provider: "+string(r.Provider), 13)
+
+ if r.Terraform != nil {
+ addLine("terraform:", 3)
+ generateEngineMetadata(r, "tf", r.Terraform, &lines)
+ }
+
+ if r.CloudFormation != nil {
+ addLine("cloudformation:", 3)
+ generateEngineMetadata(r, "cf", r.CloudFormation, &lines)
+ }
+
+ return lines
+}
+
+func generateFramework(r *scan.Rule, lines *[]string) {
+ if _, ok := r.Frameworks[framework.Default]; ok && len(r.Frameworks) == 1 {
+ return
+ }
+
+ if len(r.Frameworks) > 0 {
+
+ *lines = append(*lines, strings.Repeat(" ", 3)+"frameworks:")
+ for f, versions := range r.Frameworks {
+ if f == framework.Default {
+ continue
+ }
+
+ *lines = append(*lines, strings.Repeat(" ", 5)+string(f)+":")
+ for _, version := range versions {
+ *lines = append(*lines, strings.Repeat(" ", 7)+"- \""+version+"\"")
+ }
+ }
+ }
+}
+
+func generateEngineMetadata(r *scan.Rule, typ string, meta *scan.EngineMetadata, lines *[]string) {
+ if meta == nil {
+ return
+ }
+
+ if len(meta.Links) > 0 {
+ *lines = append(*lines, strings.Repeat(" ", 5)+"links:")
+ for _, link := range meta.Links {
+ if link == "" {
+ continue
+ }
+ *lines = append(*lines, strings.Repeat(" ", 7)+"- "+link)
+ }
+ }
+
+ outputPath := buildOutputPath(r)
+ examplePath := removeExtension(outputPath) + "." + typ + ".go"
+
+ if len(meta.GoodExamples) > 0 {
+ *lines = append(*lines, strings.Repeat(" ", 5)+"good_examples: "+examplePath)
+ }
+
+ if len(meta.BadExamples) > 0 {
+ *lines = append(*lines, strings.Repeat(" ", 5)+"bad_examples: "+examplePath)
+ }
+
+ // TODO: support for remidantion markdown
+}
+
+func removeExtension(s string) string {
+ return s[0 : len(s)-len(filepath.Ext(s))]
+}
+
+func buildRegoPackage(r *scan.Rule) string {
+ id := strings.SplitN(r.AVDID, "-", 3)
+ service := strings.ReplaceAll(r.Service, "-", "")
+ switch r.Provider {
+ case providers.KubernetesProvider:
+ return strings.Join([]string{"data", "builtin", "kubernetes", id[1] + id[2]}, ".")
+ default:
+ return strings.Join([]string{"data", "builtin", string(r.Provider), service, string(r.Provider) + id[2]}, ".")
+ }
+}
diff --git a/cmd/opa/main.go b/cmd/opa/main.go
index 0fd8e057..9c536c9b 100644
--- a/cmd/opa/main.go
+++ b/cmd/opa/main.go
@@ -4,9 +4,8 @@ import (
"fmt"
"os"
- // register Built-in Functions from defsec
"github.com/aquasecurity/trivy-checks/pkg/rego"
- _ "github.com/aquasecurity/trivy/pkg/iac/rego"
+ _ "github.com/aquasecurity/trivy/pkg/iac/rego" // register Built-in Functions
"github.com/open-policy-agent/opa/cmd"
)
diff --git a/go.mod b/go.mod
index c76054e0..2527c7a6 100644
--- a/go.mod
+++ b/go.mod
@@ -1,53 +1,55 @@
module github.com/aquasecurity/trivy-checks
-go 1.22
+go 1.22.0
-toolchain go1.22.0
+toolchain go1.22.3
+
+replace github.com/aquasecurity/trivy => github.com/nikpivkin/trivy v0.0.0-20240607053708-0f8190560ced
require (
github.com/aquasecurity/trivy v0.51.2-0.20240514170658-7c22ee3df5ee
- github.com/docker/docker v26.0.2+incompatible
+ github.com/docker/docker v26.1.3+incompatible
github.com/liamg/iamgo v0.0.9
github.com/liamg/memoryfs v1.6.0
- github.com/open-policy-agent/opa v0.64.1
+ github.com/open-policy-agent/opa v0.65.0
github.com/owenrumney/squealer v1.2.2
github.com/stretchr/testify v1.9.0
- github.com/testcontainers/testcontainers-go v0.30.0
+ github.com/testcontainers/testcontainers-go v0.31.0
+ golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa
gopkg.in/yaml.v3 v3.0.1
mvdan.cc/sh/v3 v3.8.0
)
require (
cloud.google.com/go v0.112.1 // indirect
- cloud.google.com/go/compute v1.25.0 // indirect
- cloud.google.com/go/compute/metadata v0.2.3 // indirect
+ cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.6 // indirect
cloud.google.com/go/storage v1.39.1 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
- github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
- github.com/Microsoft/go-winio v0.6.1 // indirect
- github.com/Microsoft/hcsshim v0.11.4 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
+ github.com/Microsoft/hcsshim v0.12.0 // indirect
github.com/OneOfOne/xxhash v1.2.8 // indirect
- github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect
+ github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/alecthomas/chroma v0.10.0 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
- github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 // indirect
- github.com/aws/aws-sdk-go v1.51.25 // indirect
- github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect
+ github.com/aquasecurity/go-version v0.0.0-20240603093900-cf8a8d29271d // indirect
+ github.com/aws/aws-sdk-go v1.53.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3 // indirect
github.com/aws/smithy-go v1.20.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
- github.com/containerd/containerd v1.7.16 // indirect
+ github.com/containerd/containerd v1.7.17 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
@@ -61,16 +63,16 @@ require (
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
- github.com/fatih/color v1.16.0 // indirect
+ github.com/fatih/color v1.17.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
- github.com/go-git/go-git/v5 v5.11.0 // indirect
+ github.com/go-git/go-git/v5 v5.12.0 // indirect
github.com/go-ini/ini v1.67.0 // indirect
- github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/go-ole/go-ole v1.3.0 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.2.0 // indirect
@@ -83,30 +85,30 @@ require (
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
github.com/gorilla/mux v1.8.1 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-getter v1.7.4 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
- github.com/hashicorp/go-version v1.6.0 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/hashicorp/hcl/v2 v2.19.1 // indirect
+ github.com/hashicorp/hcl/v2 v2.20.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
- github.com/klauspost/compress v1.17.4 // indirect
+ github.com/klauspost/compress v1.17.7 // indirect
github.com/liamg/jfather v0.0.7 // indirect
- github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
+ github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/mattn/go-runewidth v0.0.14 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
- github.com/moby/buildkit v0.12.5 // indirect
+ github.com/moby/buildkit v0.13.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
@@ -117,13 +119,13 @@ require (
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
- github.com/pelletier/go-toml/v2 v2.1.0 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/peterh/liner v1.2.2 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
- github.com/prometheus/client_golang v1.19.0 // indirect
+ github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
+ github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
@@ -132,59 +134,57 @@ require (
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/samber/lo v1.39.0 // indirect
- github.com/sergi/go-diff v1.3.1 // indirect
- github.com/shirou/gopsutil/v3 v3.23.12 // indirect
+ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
+ github.com/shirou/gopsutil/v3 v3.24.2 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
- github.com/skeema/knownhosts v1.2.1 // indirect
+ github.com/skeema/knownhosts v1.2.2 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/cobra v1.8.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
- github.com/spf13/viper v1.18.2 // indirect
+ github.com/spf13/viper v1.19.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
- github.com/tklauser/go-sysconf v0.3.12 // indirect
- github.com/tklauser/numcpus v0.6.1 // indirect
+ github.com/tklauser/go-sysconf v0.3.13 // indirect
+ github.com/tklauser/numcpus v0.7.0 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/yashtewari/glob-intersection v0.2.0 // indirect
- github.com/yusufpapurcu/wmi v1.2.3 // indirect
+ github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zclconf/go-cty v1.14.4 // indirect
github.com/zclconf/go-cty-yaml v1.0.3 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/otel v1.24.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
+ go.opentelemetry.io/otel v1.27.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/sdk v1.24.0 // indirect
- go.opentelemetry.io/otel/trace v1.24.0 // indirect
- go.opentelemetry.io/proto/otlp v1.1.0 // indirect
+ go.opentelemetry.io/otel/metric v1.27.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.27.0 // indirect
+ go.opentelemetry.io/otel/trace v1.27.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.2.0 // indirect
go.uber.org/automaxprocs v1.5.3 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.22.0 // indirect
- golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
- golang.org/x/mod v0.16.0 // indirect
- golang.org/x/net v0.24.0 // indirect
- golang.org/x/oauth2 v0.18.0 // indirect
- golang.org/x/sync v0.6.0 // indirect
- golang.org/x/sys v0.19.0 // indirect
- golang.org/x/text v0.14.0 // indirect
+ golang.org/x/crypto v0.23.0 // indirect
+ golang.org/x/mod v0.17.0 // indirect
+ golang.org/x/net v0.25.0 // indirect
+ golang.org/x/oauth2 v0.20.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
+ golang.org/x/sys v0.20.0 // indirect
+ golang.org/x/text v0.15.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.19.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/api v0.172.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
- google.golang.org/grpc v1.63.2 // indirect
- google.golang.org/protobuf v1.34.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect
+ google.golang.org/grpc v1.64.0 // indirect
+ google.golang.org/protobuf v1.34.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index edb54139..20de2a84 100644
--- a/go.sum
+++ b/go.sum
@@ -68,10 +68,8 @@ cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
-cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU=
-cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE=
-cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
-cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
@@ -189,20 +187,20 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
-github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
-github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
-github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/Microsoft/hcsshim v0.12.0 h1:rbICA+XZFwrBef2Odk++0LjFvClNCJGRK+fsrP254Ts=
+github.com/Microsoft/hcsshim v0.12.0/go.mod h1:RZV12pcHCXQ42XnlQ3pz6FZfmrC1C+R4gaOHhRNML1g=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
-github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0=
-github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
+github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
+github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
@@ -216,20 +214,18 @@ github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4t
github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
-github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 h1:rcEG5HI490FF0a7zuvxOxen52ddygCfNVjP0XOCMl+M=
-github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492/go.mod h1:9Beu8XsUNNfzml7WBf3QmyPToP1wm1Gj/Vc5UJKqTzU=
-github.com/aquasecurity/trivy v0.51.2-0.20240514170658-7c22ee3df5ee h1:Cs0OQO/ldEv1R9wPGhr5DemUJ18lk05Ly71zlaBDM88=
-github.com/aquasecurity/trivy v0.51.2-0.20240514170658-7c22ee3df5ee/go.mod h1:7UhbpzvSN7Ack4D4cX9X9XC5qFX4KP5O1xSskdZxGQY=
+github.com/aquasecurity/go-version v0.0.0-20240603093900-cf8a8d29271d h1:4zour5Sh9chOg+IqIinIcJ3qtr3cIf8FdFY6aArlXBw=
+github.com/aquasecurity/go-version v0.0.0-20240603093900-cf8a8d29271d/go.mod h1:1cPOp4BaQZ1G2F5fnw4dFz6pkOyXJI9KTuak8ghIl3U=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
-github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
-github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o=
+github.com/aws/aws-sdk-go v1.53.0 h1:MMo1x1ggPPxDfHMXJnQudTbGXYlD4UigUAud1DJxPVo=
+github.com/aws/aws-sdk-go v1.53.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3 h1:57NtjG+WLims0TxIQbjTqebZUKDM03DfM11ANAekW0s=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.54.3/go.mod h1:739CllldowZiPPsDFcJHNF4FXrVxaSGVnZ9Ez9Iz9hc=
github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -240,8 +236,8 @@ github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwN
github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
@@ -265,11 +261,14 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
-github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
-github.com/containerd/containerd v1.7.16 h1:7Zsfe8Fkj4Wi2My6DXGQ87hiqIrmOXolm72ZEkFU5Mg=
-github.com/containerd/containerd v1.7.16/go.mod h1:NL49g7A/Fui7ccmxV6zkBWwqMgmMxFWzujYCc+JLt7k=
-github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
-github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
+github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
+github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
+github.com/containerd/containerd v1.7.17 h1:KjNnn0+tAVQHAoaWRjmdak9WlvnFR/8rU1CHHy8Rm2A=
+github.com/containerd/containerd v1.7.17/go.mod h1:vK+hhT4TIv2uejlcDlbVIc8+h/BqtKLIyNrtCZol8lI=
+github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
+github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
+github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
+github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
@@ -280,7 +279,6 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
@@ -303,8 +301,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E=
github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/docker/docker v26.0.2+incompatible h1:yGVmKUFGgcxA6PXWAokO0sQL22BrQ67cgVjko8tGdXE=
-github.com/docker/docker v26.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
+github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -327,8 +325,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
-github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
+github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
@@ -341,28 +339,29 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
-github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
+github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE=
+github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
-github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
-github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
+github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys=
+github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
+github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -478,8 +477,8 @@ github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-getter v1.7.4 h1:3yQjWuxICvSpYwqSayAdKRFcvBl1y/vogCxczWSmix0=
@@ -488,14 +487,15 @@ github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhE
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI=
-github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE=
+github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc=
+github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@@ -515,8 +515,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
-github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
-github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
+github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -524,16 +524,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
-github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/liamg/iamgo v0.0.9 h1:tADGm3xVotyRJmuKKaH4+zsBn7LOcvgdpuF3WsSKW3c=
github.com/liamg/iamgo v0.0.9/go.mod h1:Kk6ZxBF/GQqG9nnaUjIi6jf+WXNpeOTyhwc6gnguaZQ=
github.com/liamg/jfather v0.0.7 h1:Xf78zS263yfT+xr2VSo6+kyAy4ROlCacRqJG7s5jt4k=
github.com/liamg/jfather v0.0.7/go.mod h1:xXBGiBoiZ6tmHhfy5Jzw8sugzajwYdi6VosIpB3/cPM=
github.com/liamg/memoryfs v1.6.0 h1:jAFec2HI1PgMTem5gR7UT8zi9u4BfG5jorCRlLH06W8=
github.com/liamg/memoryfs v1.6.0/go.mod h1:z7mfqXFQS8eSeBBsFjYLlxYRMRyiPktytvYCYTb3BSk=
-github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI=
+github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
@@ -547,8 +546,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
-github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM=
github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -560,16 +559,16 @@ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTS
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0=
-github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso=
+github.com/moby/buildkit v0.13.2 h1:nXNszM4qD9E7QtG7bFWPnDI1teUQFQglBzon/IU3SzI=
+github.com/moby/buildkit v0.13.2/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
-github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
-github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
+github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
+github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
@@ -578,12 +577,14 @@ github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/nikpivkin/trivy v0.0.0-20240607053708-0f8190560ced h1:4UDGBSZY0LcW9WHdWVZH5EYs0E/61hTrQDGX3HN96Qo=
+github.com/nikpivkin/trivy v0.0.0-20240607053708-0f8190560ced/go.mod h1:Okgwze9Nj0/MmkoEerxY9xnIM06J3AmOH2NvXIVD9k0=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
-github.com/open-policy-agent/opa v0.64.1 h1:n8IJTYlFWzqiOYx+JiawbErVxiqAyXohovcZxYbskxQ=
-github.com/open-policy-agent/opa v0.64.1/go.mod h1:j4VeLorVpKipnkQ2TDjWshEuV3cvP/rHzQhYaraUXZY=
+github.com/open-policy-agent/opa v0.65.0 h1:wnEU0pEk80YjFi3yoDbFTMluyNssgPI4VJNJetD9a4U=
+github.com/open-policy-agent/opa v0.65.0/go.mod h1:CNoLL44LuCH1Yot/zoeZXRKFylQtCJV+oGFiP2TeeEc=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -591,8 +592,8 @@ github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2sz
github.com/owenrumney/squealer v1.2.2 h1:zsnZSwkWi8Y2lgwmg77b565vlHQovlvBrSBzmAs3oiE=
github.com/owenrumney/squealer v1.2.2/go.mod h1:pDCW33bWJ2kDOuz7+2BSXDgY38qusVX0MtjPCSFtdSo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
-github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
+github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/peterh/liner v1.2.2 h1:aJ4AOodmL+JxOZZEL2u9iJf8omNRpqHc/EbrK+3mAXw=
github.com/peterh/liner v1.2.2/go.mod h1:xFwJyiKIXJZUKItq5dGHZSTBRAuG/CpeNpWLyiNRNwI=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
@@ -602,12 +603,13 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
+github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
-github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
-github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
+github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
@@ -624,7 +626,6 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
@@ -632,20 +633,19 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
-github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
-github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
-github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
-github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
+github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
+github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/shirou/gopsutil/v3 v3.24.2 h1:kcR0erMbLg5/3LcInpw0X/rrPSqq4CDPyI6A6ZRC18Y=
+github.com/shirou/gopsutil/v3 v3.24.2/go.mod h1:tSg/594BcA+8UdQU2XcW803GWYgdtauFFPgJCJKZlVk=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
-github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
+github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
+github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@@ -665,11 +665,12 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
-github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
-github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
+github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
+github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
@@ -685,19 +686,20 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
-github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
-github.com/testcontainers/testcontainers-go v0.30.0/go.mod h1:K+kHNGiM5zjklKjgTtcrEetF3uhWbMUyqAQoyoh8Pf0=
-github.com/testcontainers/testcontainers-go/modules/localstack v0.28.0 h1:NOtK4tz2J1KbdAV6Lk9AQPUXB6Op8jGzKNfwVCThRxU=
-github.com/testcontainers/testcontainers-go/modules/localstack v0.28.0/go.mod h1:nLimAfgHTQfaDZ2cO8/B4Z1qr8e020sM3ybpSsOVAUY=
-github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
+github.com/testcontainers/testcontainers-go v0.31.0 h1:W0VwIhcEVhRflwL9as3dhY6jXjVCA27AkmbnZ+UTh3U=
+github.com/testcontainers/testcontainers-go v0.31.0/go.mod h1:D2lAoA0zUFiSY+eAflqK5mcUx/A5hrrORaEQrd0SefI=
+github.com/testcontainers/testcontainers-go/modules/localstack v0.31.0 h1:pPz0J5Gbu7eAirpWP7QDT/v3s0zpNb/sNA8Ww/rjkoQ=
+github.com/testcontainers/testcontainers-go/modules/localstack v0.31.0/go.mod h1:vqOXktUtHpTte9ilzE5enoUO8wt4FYDpZ3ARIAp28PM=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
-github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
+github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
+github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
+github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
+github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
@@ -713,10 +715,12 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
-github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
+github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI=
+github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8=
github.com/zclconf/go-cty-yaml v1.0.3 h1:og/eOQ7lvA/WWhHGFETVWNduJM7Rjsv2RRpx1sdFMLc=
github.com/zclconf/go-cty-yaml v1.0.3/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -730,25 +734,25 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
+go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
+go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
-go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
+go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
+go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
+go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
+go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
+go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
+go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
-go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
+go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
+go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -763,8 +767,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
-golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -803,8 +807,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
-golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -853,8 +857,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -880,8 +884,8 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
-golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
-golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
+golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
+golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -896,8 +900,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -973,14 +977,14 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
-golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
-golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
+golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -990,10 +994,9 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1122,8 +1125,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1227,10 +1228,10 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz
google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s=
google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 h1:AgADTJarZTBqgjiUzRgfaBchgYB3/WFTC80GPwsMcRI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1266,8 +1267,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
-google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
-google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
+google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1284,8 +1285,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
-google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/lib/aws.rego b/lib/aws.rego
new file mode 100644
index 00000000..f2dc956e
--- /dev/null
+++ b/lib/aws.rego
@@ -0,0 +1,40 @@
+package lib.aws
+
+import rego.v1
+
+multiregion_log_trails := [
+trail |
+ some trail in input.aws.cloudtrail.trails
+ trail.ismultiregion.value
+ trail.islogging.value
+]
+
+trails_without_filter(pattern) := [
+trail |
+ some trail in multiregion_log_trails
+ loggroup := _has_loggroup_for_trail(trail)
+ not _has_log_filter(loggroup, pattern)
+]
+
+trails_without_alarm_for_filter(pattern) := [
+trail |
+ some trail in multiregion_log_trails
+ loggroup := _has_loggroup_for_trail(trail)
+ filter := _has_log_filter(loggroup, pattern)
+ not _has_alarm_for_filter(filter)
+]
+
+_has_alarm_for_filter(filter) if {
+ some alarm in input.aws.cloudwatch.alarms
+ alarm.metricname.value == filter.filtername.value
+}
+
+_has_loggroup_for_trail(trail) := loggroup if {
+ some loggroup in input.aws.cloudwatch.loggroups
+ loggroup.arn.value == trail.cloudwatchlogsloggrouparn.value
+}
+
+_has_log_filter(loggroup, pattern) := filter if {
+ some filter in loggroup.metricfilters
+ filter.filterpattern.value == pattern
+}
diff --git a/lib/datetime.rego b/lib/datetime.rego
new file mode 100644
index 00000000..b7b17b77
--- /dev/null
+++ b/lib/datetime.rego
@@ -0,0 +1,15 @@
+package lib.datetime
+
+import rego.v1
+
+ns_in_day := 86400000000000
+
+zero_time_string := "0001-01-01T00:00:00Z"
+
+time_is_never(value) := time.parse_rfc3339_ns(value) == 0 # TODO: rego doesn't parse zero time
+
+time_diff_gt_days(value, days) := (time.now_ns() - time.parse_rfc3339_ns(value)) > days_to_ns(days)
+
+time_diff_lt_days(value, days) := (time.now_ns() - time.parse_rfc3339_ns(value)) < days_to_ns(days)
+
+days_to_ns(days) := days * ns_in_day
diff --git a/lib/iam.rego b/lib/iam.rego
new file mode 100644
index 00000000..c96caa60
--- /dev/null
+++ b/lib/iam.rego
@@ -0,0 +1,24 @@
+package lib.iam
+
+import rego.v1
+
+import data.lib.datetime
+
+is_user_logged_in(user) if {
+ # user.lastaccess.is_resolvable
+ not datetime.time_is_never(user.lastaccess.value)
+}
+
+user_has_mfa_devices(user) if count(user.mfadevices) > 0
+
+user_is_inactive(user, days) if {
+ is_user_logged_in(user)
+ datetime.time_diff_gt_days(user.lastaccess.value, days)
+}
+
+key_is_unused(key, days) if {
+ key.active.value
+ datetime.time_diff_gt_days(key.lastaccess.value, days)
+}
+
+is_root_user(user) := user.name.value == "root"
diff --git a/lib/s3.rego b/lib/s3.rego
new file mode 100644
index 00000000..20497051
--- /dev/null
+++ b/lib/s3.rego
@@ -0,0 +1,11 @@
+package lib.s3
+
+import rego.v1
+
+public_acls = {"public-read", "public-read-write", "website", "authenticated-read"}
+
+bucket_has_public_access(bucket) if {
+ bucket.acl.value in public_acls
+ not bucket.publicaccessblock.ignorepublicacls.value
+ not bucket.publicaccessblock.blockpublicacls.value
+}
diff --git a/lib/s3_test.rego b/lib/s3_test.rego
new file mode 100644
index 00000000..9ad052c5
--- /dev/null
+++ b/lib/s3_test.rego
@@ -0,0 +1,12 @@
+package lib.s3_test
+
+import rego.v1
+
+import data.lib.s3
+
+test_has_public_access if {
+ s3.bucket_has_public_access({"acl": {"value": "public-read"}})
+ not s3.bucket_has_public_access({"acl": {"value": "private"}})
+ s3.bucket_has_public_access({"acl": {"value": "public-read-write", "ignorepublicacls": {"value": true}}})
+ s3.bucket_has_public_access({"acl": {"value": "public-read-write", "blockpublicacls": {"value": true}}})
+}
diff --git a/lib/test.rego b/lib/test.rego
new file mode 100644
index 00000000..66f280eb
--- /dev/null
+++ b/lib/test.rego
@@ -0,0 +1,37 @@
+package lib.test
+
+import rego.v1
+
+assert_empty(v) if {
+ not assert_not_empty(v)
+}
+
+assert_not_empty(v) if {
+ count(v) > 0
+ trace_and_print(sprintf("assert_not_empty:\n %v", [v]))
+}
+
+assert_equal_message(expected, results) if {
+ assert_count(results, 1)
+ not _assert_equal_message(results, expected)
+}
+
+_assert_equal_message(expected, results) if {
+ msg := [res.msg | some res in results][0]
+ msg != expected # TODO: satisfy this
+ trace_and_print(sprintf("assert_equal_message:\n Got %q\n Expected %q", [msg, expected]))
+}
+
+assert_count(expected, results) if {
+ not _assert_count(results, expected)
+}
+
+_assert_count(expected, results) if {
+ count(results) != expected
+ trace_and_print(sprintf("assert_count:\n Got %v\n Expected %v", [count(results), expected]))
+}
+
+trace_and_print(v) if {
+ trace(v)
+ print(v)
+}