diff --git a/go.mod b/go.mod index d08e0100d209..b431ae7af5a9 100644 --- a/go.mod +++ b/go.mod @@ -117,6 +117,8 @@ require ( ) require ( + github.com/Masterminds/semver v1.5.0 + github.com/alecthomas/chroma v0.10.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/aws/smithy-go v1.19.0 github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c @@ -127,6 +129,7 @@ require ( github.com/liamg/memoryfs v1.6.0 github.com/mitchellh/go-homedir v1.1.0 github.com/olekukonko/tablewriter v0.0.5 + github.com/owenrumney/squealer v1.2.1 github.com/zclconf/go-cty v1.13.0 github.com/zclconf/go-cty-yaml v1.0.3 golang.org/x/crypto v0.18.0 @@ -164,7 +167,6 @@ require ( github.com/VividCortex/ewma v1.2.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect - github.com/alecthomas/chroma v0.10.0 // indirect github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect @@ -337,6 +339,7 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/runc v1.1.5 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -430,3 +433,5 @@ require ( // testcontainers-go has a bug with versions v0.25.0 and v0.26.0 // ref: https://github.com/testcontainers/testcontainers-go/issues/1782 replace github.com/testcontainers/testcontainers-go => github.com/testcontainers/testcontainers-go v0.23.0 + +replace github.com/aquasecurity/trivy-aws => /Users/simarpreetsingh/repos/trivy-aws diff --git a/go.sum b/go.sum index 19b3fa3f5ef7..62f6aee0ab20 100644 --- a/go.sum +++ b/go.sum @@ -344,8 +344,6 @@ github.com/aquasecurity/testdocker v0.0.0-20230111101738-e741bda259da h1:pj/adfN github.com/aquasecurity/testdocker v0.0.0-20230111101738-e741bda259da/go.mod h1:852lbQLpK2nCwlR4ZLYIccxYCfoQao6q9Nl6tjz54v8= github.com/aquasecurity/tml v0.6.1 h1:y2ZlGSfrhnn7t4ZJ/0rotuH+v5Jgv6BDDO5jB6A9gwo= github.com/aquasecurity/tml v0.6.1/go.mod h1:OnYMWY5lvI9ejU7yH9LCberWaaTBW7hBFsITiIMY2yY= -github.com/aquasecurity/trivy-aws v0.7.1 h1:XElKZsP9Hqe2JVekQgGCIkFtgRgVlP+80wKL2JWBctk= -github.com/aquasecurity/trivy-aws v0.7.1/go.mod h1:bJT7pzsqo9q5yi3arJSt789bAH0eDb7c+niFYMBNcMQ= github.com/aquasecurity/trivy-db v0.0.0-20231005141211-4fc651f7ac8d h1:fjI9mkoTUAkbGqpzt9nJsO24RAdfG+ZSiLFj0G2jO8c= github.com/aquasecurity/trivy-db v0.0.0-20231005141211-4fc651f7ac8d/go.mod h1:cj9/QmD9N3OZnKQMp+/DvdV+ym3HyIkd4e+F0ZM3ZGs= github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48 h1:JVgBIuIYbwG+ekC5lUHUpGJboPYiCcxiz06RCtz8neI= diff --git a/internal/adapters/arm/adapt.go b/internal/adapters/arm/adapt.go new file mode 100644 index 000000000000..55184c51bd65 --- /dev/null +++ b/internal/adapters/arm/adapt.go @@ -0,0 +1,50 @@ +package arm + +import ( + "context" + + "github.com/aquasecurity/trivy/internal/adapters/arm/appservice" + "github.com/aquasecurity/trivy/internal/adapters/arm/authorization" + "github.com/aquasecurity/trivy/internal/adapters/arm/compute" + "github.com/aquasecurity/trivy/internal/adapters/arm/container" + "github.com/aquasecurity/trivy/internal/adapters/arm/database" + "github.com/aquasecurity/trivy/internal/adapters/arm/datafactory" + "github.com/aquasecurity/trivy/internal/adapters/arm/datalake" + "github.com/aquasecurity/trivy/internal/adapters/arm/keyvault" + "github.com/aquasecurity/trivy/internal/adapters/arm/monitor" + "github.com/aquasecurity/trivy/internal/adapters/arm/network" + "github.com/aquasecurity/trivy/internal/adapters/arm/securitycenter" + "github.com/aquasecurity/trivy/internal/adapters/arm/storage" + "github.com/aquasecurity/trivy/internal/adapters/arm/synapse" + + "github.com/aquasecurity/trivy/pkg/providers/azure" + scanner "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/state" +) + +// Adapt ... +func Adapt(ctx context.Context, deployment scanner.Deployment) *state.State { + return &state.State{ + Azure: adaptAzure(deployment), + } +} + +func adaptAzure(deployment scanner.Deployment) azure.Azure { + + return azure.Azure{ + AppService: appservice.Adapt(deployment), + Authorization: authorization.Adapt(deployment), + Compute: compute.Adapt(deployment), + Container: container.Adapt(deployment), + Database: database.Adapt(deployment), + DataFactory: datafactory.Adapt(deployment), + DataLake: datalake.Adapt(deployment), + KeyVault: keyvault.Adapt(deployment), + Monitor: monitor.Adapt(deployment), + Network: network.Adapt(deployment), + SecurityCenter: securitycenter.Adapt(deployment), + Storage: storage.Adapt(deployment), + Synapse: synapse.Adapt(deployment), + } + +} diff --git a/internal/adapters/arm/appservice/adapt.go b/internal/adapters/arm/appservice/adapt.go new file mode 100644 index 000000000000..7c9d0a20264f --- /dev/null +++ b/internal/adapters/arm/appservice/adapt.go @@ -0,0 +1,58 @@ +package appservice + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/appservice" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(deployment azure.Deployment) appservice.AppService { + return appservice.AppService{ + Services: adaptServices(deployment), + FunctionApps: adaptFunctionApps(deployment), + } +} + +func adaptFunctionApps(deployment azure.Deployment) []appservice.FunctionApp { + var functionApps []appservice.FunctionApp + + for _, resource := range deployment.GetResourcesByType("Microsoft.Web/sites") { + functionApps = append(functionApps, adaptFunctionApp(resource)) + } + return functionApps +} + +func adaptServices(deployment azure.Deployment) []appservice.Service { + var services []appservice.Service + for _, resource := range deployment.GetResourcesByType("Microsoft.Web/sites") { + services = append(services, adaptService(resource)) + } + return services +} + +func adaptFunctionApp(resource azure.Resource) appservice.FunctionApp { + return appservice.FunctionApp{ + Metadata: resource.Metadata, + HTTPSOnly: resource.Properties.GetMapValue("httpsOnly").AsBoolValue(false, resource.Properties.GetMetadata()), + } +} + +func adaptService(resource azure.Resource) appservice.Service { + return appservice.Service{ + Metadata: resource.Metadata, + EnableClientCert: resource.Properties.GetMapValue("clientCertEnabled").AsBoolValue(false, resource.Properties.GetMetadata()), + Identity: struct{ Type defsecTypes.StringValue }{ + Type: resource.Properties.GetMapValue("identity").GetMapValue("type").AsStringValue("", resource.Properties.GetMetadata()), + }, + Authentication: struct{ Enabled defsecTypes.BoolValue }{ + Enabled: resource.Properties.GetMapValue("siteAuthSettings").GetMapValue("enabled").AsBoolValue(false, resource.Properties.GetMetadata()), + }, + Site: struct { + EnableHTTP2 defsecTypes.BoolValue + MinimumTLSVersion defsecTypes.StringValue + }{ + EnableHTTP2: resource.Properties.GetMapValue("httpsOnly").AsBoolValue(false, resource.Properties.GetMetadata()), + MinimumTLSVersion: resource.Properties.GetMapValue("minTlsVersion").AsStringValue("", resource.Properties.GetMetadata()), + }, + } +} diff --git a/internal/adapters/arm/authorization/adapt.go b/internal/adapters/arm/authorization/adapt.go new file mode 100644 index 000000000000..03d9c594c6e2 --- /dev/null +++ b/internal/adapters/arm/authorization/adapt.go @@ -0,0 +1,38 @@ +package authorization + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/authorization" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func Adapt(deployment azure.Deployment) authorization.Authorization { + return authorization.Authorization{ + RoleDefinitions: adaptRoleDefinitions(deployment), + } +} + +func adaptRoleDefinitions(deployment azure.Deployment) (roleDefinitions []authorization.RoleDefinition) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Authorization/roleDefinitions") { + roleDefinitions = append(roleDefinitions, adaptRoleDefinition(resource)) + } + return roleDefinitions +} + +func adaptRoleDefinition(resource azure.Resource) authorization.RoleDefinition { + + return authorization.RoleDefinition{ + Metadata: resource.Metadata, + Permissions: adaptPermissions(resource), + AssignableScopes: resource.Properties.GetMapValue("assignableScopes").AsStringValuesList(""), + } +} + +func adaptPermissions(resource azure.Resource) (permissions []authorization.Permission) { + for _, permission := range resource.Properties.GetMapValue("permissions").AsList() { + permissions = append(permissions, authorization.Permission{ + Metadata: resource.Metadata, + Actions: permission.GetMapValue("actions").AsStringValuesList(""), + }) + } + return permissions +} diff --git a/internal/adapters/arm/compute/adapt.go b/internal/adapters/arm/compute/adapt.go new file mode 100644 index 000000000000..6f222a3c3b0d --- /dev/null +++ b/internal/adapters/arm/compute/adapt.go @@ -0,0 +1,85 @@ +package compute + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/compute" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(deployment azure.Deployment) compute.Compute { + return compute.Compute{ + LinuxVirtualMachines: adaptLinuxVirtualMachines(deployment), + WindowsVirtualMachines: adaptWindowsVirtualMachines(deployment), + ManagedDisks: adaptManagedDisks(deployment), + } +} + +func adaptManagedDisks(deployment azure.Deployment) (managedDisks []compute.ManagedDisk) { + + for _, resource := range deployment.GetResourcesByType("Microsoft.Compute/disks") { + managedDisks = append(managedDisks, adaptManagedDisk(resource)) + } + + return managedDisks +} + +func adaptManagedDisk(resource azure.Resource) compute.ManagedDisk { + hasEncryption := resource.Properties.HasKey("encryption") + + return compute.ManagedDisk{ + Metadata: resource.Metadata, + Encryption: compute.Encryption{ + Metadata: resource.Metadata, + Enabled: defsecTypes.Bool(hasEncryption, resource.Metadata), + }, + } +} + +func adaptWindowsVirtualMachines(deployment azure.Deployment) (windowsVirtualMachines []compute.WindowsVirtualMachine) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Compute/virtualMachines") { + if resource.Properties.GetMapValue("osProfile").GetMapValue("windowsConfiguration").AsMap() != nil { + windowsVirtualMachines = append(windowsVirtualMachines, adaptWindowsVirtualMachine(resource)) + } + } + + return windowsVirtualMachines +} + +func adaptWindowsVirtualMachine(resource azure.Resource) compute.WindowsVirtualMachine { + return compute.WindowsVirtualMachine{ + Metadata: resource.Metadata, + VirtualMachine: compute.VirtualMachine{ + Metadata: resource.Metadata, + CustomData: resource.Properties.GetMapValue("osProfile"). + GetMapValue("customData").AsStringValue("", resource.Metadata), + }, + } +} + +func adaptLinuxVirtualMachines(deployment azure.Deployment) (linuxVirtualMachines []compute.LinuxVirtualMachine) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Compute/virtualMachines") { + if resource.Properties.GetMapValue("osProfile").GetMapValue("linuxConfiguration").AsMap() != nil { + linuxVirtualMachines = append(linuxVirtualMachines, adaptLinuxVirtualMachine(resource)) + } + } + + return linuxVirtualMachines +} + +func adaptLinuxVirtualMachine(resource azure.Resource) compute.LinuxVirtualMachine { + return compute.LinuxVirtualMachine{ + Metadata: resource.Metadata, + VirtualMachine: compute.VirtualMachine{ + Metadata: resource.Metadata, + CustomData: resource.Properties.GetMapValue("osProfile"). + GetMapValue("customData").AsStringValue("", resource.Metadata), + }, + OSProfileLinuxConfig: compute.OSProfileLinuxConfig{ + Metadata: resource.Metadata, + DisablePasswordAuthentication: resource.Properties.GetMapValue("osProfile"). + GetMapValue("linuxConfiguration"). + GetMapValue("disablePasswordAuthentication").AsBoolValue(false, resource.Metadata), + }, + } + +} diff --git a/internal/adapters/arm/compute/adapt_test.go b/internal/adapters/arm/compute/adapt_test.go new file mode 100644 index 000000000000..baf39bf45ddc --- /dev/null +++ b/internal/adapters/arm/compute/adapt_test.go @@ -0,0 +1,60 @@ +package compute + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" +) + +func Test_AdaptLinuxVM(t *testing.T) { + + input := azure.Deployment{ + Resources: []azure.Resource{ + { + Type: azure.NewValue("Microsoft.Compute/virtualMachines", types.NewTestMisconfigMetadata()), + Properties: azure.NewValue(map[string]azure.Value{ + "osProfile": azure.NewValue(map[string]azure.Value{ + "linuxConfiguration": azure.NewValue(map[string]azure.Value{ + "disablePasswordAuthentication": azure.NewValue(true, types.NewTestMisconfigMetadata()), + }, types.NewTestMisconfigMetadata()), + }, types.NewTestMisconfigMetadata()), + }, types.NewTestMisconfigMetadata()), + }, + }, + } + + output := Adapt(input) + + require.Len(t, output.LinuxVirtualMachines, 1) + require.Len(t, output.WindowsVirtualMachines, 0) + + linuxVM := output.LinuxVirtualMachines[0] + assert.True(t, linuxVM.OSProfileLinuxConfig.DisablePasswordAuthentication.IsTrue()) + +} + +func Test_AdaptWindowsVM(t *testing.T) { + + input := azure.Deployment{ + Resources: []azure.Resource{ + { + Type: azure.NewValue("Microsoft.Compute/virtualMachines", types.NewTestMisconfigMetadata()), + Properties: azure.NewValue(map[string]azure.Value{ + "osProfile": azure.NewValue(map[string]azure.Value{ + "windowsConfiguration": azure.NewValue(map[string]azure.Value{}, types.NewTestMisconfigMetadata()), + }, types.NewTestMisconfigMetadata()), + }, types.NewTestMisconfigMetadata()), + }, + }, + } + + output := Adapt(input) + + require.Len(t, output.LinuxVirtualMachines, 0) + require.Len(t, output.WindowsVirtualMachines, 1) +} diff --git a/internal/adapters/arm/container/adapt.go b/internal/adapters/arm/container/adapt.go new file mode 100644 index 000000000000..542caa4db1b6 --- /dev/null +++ b/internal/adapters/arm/container/adapt.go @@ -0,0 +1,17 @@ +package container + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/container" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func Adapt(deployment azure.Deployment) container.Container { + return container.Container{ + KubernetesClusters: adaptKubernetesClusters(deployment), + } +} + +func adaptKubernetesClusters(deployment azure.Deployment) []container.KubernetesCluster { + + return nil +} diff --git a/internal/adapters/arm/database/adapt.go b/internal/adapters/arm/database/adapt.go new file mode 100644 index 000000000000..ccbc9a62a562 --- /dev/null +++ b/internal/adapters/arm/database/adapt.go @@ -0,0 +1,35 @@ +package database + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/database" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func Adapt(deployment azure.Deployment) database.Database { + return database.Database{ + MSSQLServers: adaptMSSQLServers(deployment), + MariaDBServers: adaptMariaDBServers(deployment), + MySQLServers: adaptMySQLServers(deployment), + PostgreSQLServers: adaptPostgreSQLServers(deployment), + } +} + +func adaptMySQLServers(deployment azure.Deployment) (mysqlDbServers []database.MySQLServer) { + for _, resource := range deployment.GetResourcesByType("Microsoft.DBforMySQL/servers") { + mysqlDbServers = append(mysqlDbServers, adaptMySQLServer(resource, deployment)) + } + return mysqlDbServers +} + +func adaptMySQLServer(resource azure.Resource, deployment azure.Deployment) database.MySQLServer { + return database.MySQLServer{ + Metadata: resource.Metadata, + Server: database.Server{ + Metadata: resource.Metadata, + EnableSSLEnforcement: resource.Properties.GetMapValue("sslEnforcement").AsBoolValue(false, resource.Metadata), + MinimumTLSVersion: resource.Properties.GetMapValue("minimalTlsVersion").AsStringValue("TLSEnforcementDisabled", resource.Metadata), + EnablePublicNetworkAccess: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(false, resource.Metadata), + FirewallRules: addFirewallRule(resource), + }, + } +} diff --git a/internal/adapters/arm/database/firewall.go b/internal/adapters/arm/database/firewall.go new file mode 100644 index 000000000000..e1387fde8107 --- /dev/null +++ b/internal/adapters/arm/database/firewall.go @@ -0,0 +1,18 @@ +package database + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/database" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func addFirewallRule(resource azure.Resource) []database.FirewallRule { + var rules []database.FirewallRule + for _, rule := range resource.Properties.GetMapValue("firewallRules").AsMap() { + rules = append(rules, database.FirewallRule{ + Metadata: rule.MisconfigMetadata, + StartIP: rule.GetMapValue("startIpAddress").AsStringValue("", rule.MisconfigMetadata), + EndIP: rule.GetMapValue("endIpAddress").AsStringValue("", rule.MisconfigMetadata), + }) + } + return rules +} diff --git a/internal/adapters/arm/database/maria.go b/internal/adapters/arm/database/maria.go new file mode 100644 index 000000000000..1c8af37978ea --- /dev/null +++ b/internal/adapters/arm/database/maria.go @@ -0,0 +1,27 @@ +package database + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/database" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func adaptMariaDBServers(deployment azure.Deployment) (mariaDbServers []database.MariaDBServer) { + for _, resource := range deployment.GetResourcesByType("Microsoft.DBforMariaDB/servers") { + mariaDbServers = append(mariaDbServers, adaptMariaDBServer(resource, deployment)) + } + return mariaDbServers + +} + +func adaptMariaDBServer(resource azure.Resource, deployment azure.Deployment) database.MariaDBServer { + return database.MariaDBServer{ + Metadata: resource.Metadata, + Server: database.Server{ + Metadata: resource.Metadata, + EnableSSLEnforcement: resource.Properties.GetMapValue("sslEnforcement").AsBoolValue(false, resource.Metadata), + MinimumTLSVersion: resource.Properties.GetMapValue("minimalTlsVersion").AsStringValue("TLSEnforcementDisabled", resource.Metadata), + EnablePublicNetworkAccess: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(false, resource.Metadata), + FirewallRules: addFirewallRule(resource), + }, + } +} diff --git a/internal/adapters/arm/database/mssql.go b/internal/adapters/arm/database/mssql.go new file mode 100644 index 000000000000..397ead59ea03 --- /dev/null +++ b/internal/adapters/arm/database/mssql.go @@ -0,0 +1,61 @@ +package database + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/database" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptMSSQLServers(deployment azure.Deployment) (msSQlServers []database.MSSQLServer) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Sql/servers") { + msSQlServers = append(msSQlServers, adaptMSSQLServer(resource, deployment)) + } + return msSQlServers +} + +func adaptMSSQLServer(resource azure.Resource, deployment azure.Deployment) database.MSSQLServer { + return database.MSSQLServer{ + Metadata: resource.Metadata, + Server: database.Server{ + Metadata: resource.Metadata, + EnableSSLEnforcement: resource.Properties.GetMapValue("sslEnforcement").AsBoolValue(false, resource.Metadata), + MinimumTLSVersion: resource.Properties.GetMapValue("minimalTlsVersion").AsStringValue("TLSEnforcementDisabled", resource.Metadata), + EnablePublicNetworkAccess: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(false, resource.Metadata), + FirewallRules: addFirewallRule(resource), + }, + ExtendedAuditingPolicies: adaptExtendedAuditingPolicies(resource, deployment), + SecurityAlertPolicies: adaptSecurityAlertPolicies(resource, deployment), + } +} + +func adaptExtendedAuditingPolicies(resource azure.Resource, deployment azure.Deployment) (policies []database.ExtendedAuditingPolicy) { + + for _, policy := range deployment.GetResourcesByType("Microsoft.Sql/servers/extendedAuditingSettings") { + policies = append(policies, database.ExtendedAuditingPolicy{ + Metadata: policy.Metadata, + RetentionInDays: policy.Properties.GetMapValue("retentionDays").AsIntValue(0, policy.Metadata), + }) + } + + return policies +} + +func adaptSecurityAlertPolicies(resource azure.Resource, deployment azure.Deployment) (policies []database.SecurityAlertPolicy) { + for _, policy := range deployment.GetResourcesByType("Microsoft.Sql/servers/securityAlertPolicies") { + policies = append(policies, database.SecurityAlertPolicy{ + Metadata: policy.Metadata, + EmailAddresses: adaptStringList(policy.Properties.GetMapValue("emailAddresses")), + DisabledAlerts: adaptStringList(policy.Properties.GetMapValue("disabledAlerts")), + EmailAccountAdmins: policy.Properties.GetMapValue("emailAccountAdmins").AsBoolValue(false, policy.Metadata), + }) + } + return policies +} + +func adaptStringList(value azure.Value) []defsecTypes.StringValue { + var list []defsecTypes.StringValue + for _, v := range value.AsList() { + list = append(list, v.AsStringValue("", value.MisconfigMetadata)) + } + return list +} diff --git a/internal/adapters/arm/database/postgresql.go b/internal/adapters/arm/database/postgresql.go new file mode 100644 index 000000000000..3ac0bbc25379 --- /dev/null +++ b/internal/adapters/arm/database/postgresql.go @@ -0,0 +1,64 @@ +package database + +import ( + "fmt" + "strings" + + "github.com/aquasecurity/trivy/pkg/providers/azure/database" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptPostgreSQLServers(deployment azure.Deployment) (databases []database.PostgreSQLServer) { + for _, resource := range deployment.GetResourcesByType("Microsoft.DBforPostgreSQL/servers") { + databases = append(databases, adaptPostgreSQLServer(resource, deployment)) + } + + return databases +} + +func adaptPostgreSQLServer(resource azure.Resource, deployment azure.Deployment) database.PostgreSQLServer { + return database.PostgreSQLServer{ + Metadata: resource.Metadata, + Server: database.Server{ + Metadata: resource.Metadata, + EnableSSLEnforcement: resource.Properties.GetMapValue("sslEnforcement").AsBoolValue(false, resource.Metadata), + MinimumTLSVersion: resource.Properties.GetMapValue("minimalTlsVersion").AsStringValue("TLSEnforcementDisabled", resource.Metadata), + EnablePublicNetworkAccess: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(false, resource.Metadata), + FirewallRules: addFirewallRule(resource), + }, + Config: adaptPostgreSQLConfiguration(resource, deployment), + } +} + +func adaptPostgreSQLConfiguration(resource azure.Resource, deployment azure.Deployment) database.PostgresSQLConfig { + + parent := fmt.Sprintf("%s/", resource.Name.AsString()) + + config := database.PostgresSQLConfig{ + Metadata: resource.Metadata, + LogCheckpoints: defsecTypes.BoolDefault(false, resource.Metadata), + ConnectionThrottling: defsecTypes.BoolDefault(false, resource.Metadata), + LogConnections: defsecTypes.BoolDefault(false, resource.Metadata), + } + + for _, configuration := range deployment.GetResourcesByType("Microsoft.DBforPostgreSQL/servers/configurations") { + if strings.HasPrefix(configuration.Name.AsString(), parent) { + val := configuration.Properties.GetMapValue("value") + if strings.HasSuffix(configuration.Name.AsString(), "log_checkpoints") { + config.LogCheckpoints = val.AsBoolValue(false, configuration.Metadata) + continue + } + if strings.HasSuffix(configuration.Name.AsString(), "log_connections") { + config.LogConnections = val.AsBoolValue(false, configuration.Metadata) + continue + } + if strings.HasSuffix(configuration.Name.AsString(), "connection_throttling") { + config.ConnectionThrottling = val.AsBoolValue(false, configuration.Metadata) + continue + } + } + } + + return config +} diff --git a/internal/adapters/arm/datafactory/adapt.go b/internal/adapters/arm/datafactory/adapt.go new file mode 100644 index 000000000000..35679ef17193 --- /dev/null +++ b/internal/adapters/arm/datafactory/adapt.go @@ -0,0 +1,27 @@ +package datafactory + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/datafactory" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func Adapt(deployment azure.Deployment) datafactory.DataFactory { + + return datafactory.DataFactory{ + DataFactories: adaptDataFactories(deployment), + } +} + +func adaptDataFactories(deployment azure.Deployment) (factories []datafactory.Factory) { + for _, resource := range deployment.GetResourcesByType("Microsoft.DataFactory/factories") { + factories = append(factories, adaptDataFactory(resource)) + } + return factories +} + +func adaptDataFactory(resource azure.Resource) datafactory.Factory { + return datafactory.Factory{ + Metadata: resource.Metadata, + EnablePublicNetwork: resource.Properties.GetMapValue("publicNetworkAccess").AsBoolValue(true, resource.Metadata), + } +} diff --git a/internal/adapters/arm/datalake/adapt.go b/internal/adapters/arm/datalake/adapt.go new file mode 100644 index 000000000000..7c5438016b9c --- /dev/null +++ b/internal/adapters/arm/datalake/adapt.go @@ -0,0 +1,28 @@ +package datalake + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/datalake" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func Adapt(deployment azure.Deployment) datalake.DataLake { + + return datalake.DataLake{ + Stores: adaptStores(deployment), + } +} + +func adaptStores(deployment azure.Deployment) (stores []datalake.Store) { + for _, resource := range deployment.GetResourcesByType("Microsoft.DataLakeStore/accounts") { + stores = append(stores, adaptStore(resource)) + } + + return stores +} + +func adaptStore(resource azure.Resource) datalake.Store { + return datalake.Store{ + Metadata: resource.Metadata, + EnableEncryption: resource.Properties.GetMapValue("encryptionState").AsBoolValue(false, resource.Metadata), + } +} diff --git a/internal/adapters/arm/keyvault/adapt.go b/internal/adapters/arm/keyvault/adapt.go new file mode 100644 index 000000000000..23bc215cb042 --- /dev/null +++ b/internal/adapters/arm/keyvault/adapt.go @@ -0,0 +1,64 @@ +package keyvault + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/keyvault" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func Adapt(deployment azure.Deployment) keyvault.KeyVault { + return keyvault.KeyVault{ + Vaults: adaptVaults(deployment), + } +} + +func adaptVaults(deployment azure.Deployment) (vaults []keyvault.Vault) { + for _, resource := range deployment.GetResourcesByType("Microsoft.KeyVault/vaults") { + vaults = append(vaults, adaptVault(resource, deployment)) + } + + return vaults +} + +func adaptVault(resource azure.Resource, deployment azure.Deployment) keyvault.Vault { + return keyvault.Vault{ + Metadata: resource.Metadata, + Secrets: adaptSecrets(resource, deployment), + Keys: adaptKeys(resource, deployment), + EnablePurgeProtection: resource.Properties.GetMapValue("enablePurgeProtection").AsBoolValue(false, resource.Metadata), + SoftDeleteRetentionDays: resource.Properties.GetMapValue("softDeleteRetentionInDays").AsIntValue(7, resource.Metadata), + NetworkACLs: keyvault.NetworkACLs{ + Metadata: resource.Metadata, + DefaultAction: resource.Properties.GetMapValue("properties").GetMapValue("networkAcls").GetMapValue("defaultAction").AsStringValue("", resource.Metadata), + }, + } +} + +func adaptKeys(resource azure.Resource, deployment azure.Deployment) (keys []keyvault.Key) { + for _, resource := range deployment.GetResourcesByType("Microsoft.KeyVault/vaults/keys") { + keys = append(keys, adaptKey(resource)) + } + + return keys +} + +func adaptKey(resource azure.Resource) keyvault.Key { + return keyvault.Key{ + Metadata: resource.Metadata, + ExpiryDate: resource.Properties.GetMapValue("attributes").GetMapValue("exp").AsTimeValue(resource.Metadata), + } +} + +func adaptSecrets(resource azure.Resource, deployment azure.Deployment) (secrets []keyvault.Secret) { + for _, resource := range deployment.GetResourcesByType("Microsoft.KeyVault/vaults/secrets") { + secrets = append(secrets, adaptSecret(resource)) + } + return secrets +} + +func adaptSecret(resource azure.Resource) keyvault.Secret { + return keyvault.Secret{ + Metadata: resource.Metadata, + ContentType: resource.Properties.GetMapValue("contentType").AsStringValue("", resource.Metadata), + ExpiryDate: resource.Properties.GetMapValue("attributes").GetMapValue("exp").AsTimeValue(resource.Metadata), + } +} diff --git a/internal/adapters/arm/monitor/adapt.go b/internal/adapters/arm/monitor/adapt.go new file mode 100644 index 000000000000..7b7ece0cd477 --- /dev/null +++ b/internal/adapters/arm/monitor/adapt.go @@ -0,0 +1,45 @@ +package monitor + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/monitor" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(deployment azure.Deployment) monitor.Monitor { + return monitor.Monitor{ + LogProfiles: adaptLogProfiles(deployment), + } +} + +func adaptLogProfiles(deployment azure.Deployment) (logProfiles []monitor.LogProfile) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Insights/logProfiles") { + logProfiles = append(logProfiles, adaptLogProfile(resource)) + } + return logProfiles +} + +func adaptLogProfile(resource azure.Resource) monitor.LogProfile { + categories := resource.Properties.GetMapValue("categories").AsList() + var categoriesList []types.StringValue + for _, category := range categories { + categoriesList = append(categoriesList, category.AsStringValue("", category.MisconfigMetadata)) + } + + locations := resource.Properties.GetMapValue("locations").AsList() + var locationsList []types.StringValue + for _, location := range locations { + locationsList = append(locationsList, location.AsStringValue("", location.MisconfigMetadata)) + } + + return monitor.LogProfile{ + Metadata: resource.Metadata, + RetentionPolicy: monitor.RetentionPolicy{ + Metadata: resource.Metadata, + Enabled: resource.Properties.GetMapValue("retentionPolicy").GetMapValue("enabled").AsBoolValue(false, resource.Metadata), + Days: resource.Properties.GetMapValue("retentionPolicy").GetMapValue("days").AsIntValue(0, resource.Metadata), + }, + Categories: categoriesList, + Locations: locationsList, + } +} diff --git a/internal/adapters/arm/network/adapt.go b/internal/adapters/arm/network/adapt.go new file mode 100644 index 000000000000..05950a9b9d56 --- /dev/null +++ b/internal/adapters/arm/network/adapt.go @@ -0,0 +1,126 @@ +package network + +import ( + "strconv" + "strings" + + "github.com/aquasecurity/trivy/pkg/providers/azure/network" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(deployment azure.Deployment) network.Network { + return network.Network{ + SecurityGroups: adaptSecurityGroups(deployment), + NetworkWatcherFlowLogs: adaptNetworkWatcherFlowLogs(deployment), + } +} + +func adaptSecurityGroups(deployment azure.Deployment) (sgs []network.SecurityGroup) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Network/networkSecurityGroups") { + sgs = append(sgs, adaptSecurityGroup(resource, deployment)) + } + return sgs + +} + +func adaptSecurityGroup(resource azure.Resource, deployment azure.Deployment) network.SecurityGroup { + return network.SecurityGroup{ + Metadata: resource.Metadata, + Rules: adaptSecurityGroupRules(resource, deployment), + } +} + +func adaptSecurityGroupRules(resource azure.Resource, deployment azure.Deployment) (rules []network.SecurityGroupRule) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Network/networkSecurityGroups/securityRules") { + rules = append(rules, adaptSecurityGroupRule(resource)) + } + return rules +} + +func adaptSecurityGroupRule(resource azure.Resource) network.SecurityGroupRule { + sourceAddressPrefixes := resource.Properties.GetMapValue("sourceAddressPrefixes").AsStringValuesList("") + sourceAddressPrefixes = append(sourceAddressPrefixes, resource.Properties.GetMapValue("sourceAddressPrefix").AsStringValue("", resource.Metadata)) + + var sourcePortRanges []network.PortRange + for _, portRange := range resource.Properties.GetMapValue("sourcePortRanges").AsList() { + sourcePortRanges = append(sourcePortRanges, expandRange(portRange.AsString(), resource.Metadata)) + } + sourcePortRanges = append(sourcePortRanges, expandRange(resource.Properties.GetMapValue("sourcePortRange").AsString(), resource.Metadata)) + + destinationAddressPrefixes := resource.Properties.GetMapValue("destinationAddressPrefixes").AsStringValuesList("") + destinationAddressPrefixes = append(destinationAddressPrefixes, resource.Properties.GetMapValue("destinationAddressPrefix").AsStringValue("", resource.Metadata)) + + var destinationPortRanges []network.PortRange + for _, portRange := range resource.Properties.GetMapValue("destinationPortRanges").AsList() { + destinationPortRanges = append(destinationPortRanges, expandRange(portRange.AsString(), resource.Metadata)) + } + destinationPortRanges = append(destinationPortRanges, expandRange(resource.Properties.GetMapValue("destinationPortRange").AsString(), resource.Metadata)) + + allow := defsecTypes.BoolDefault(false, resource.Metadata) + if resource.Properties.GetMapValue("access").AsString() == "Allow" { + allow = defsecTypes.Bool(true, resource.Metadata) + } + + outbound := defsecTypes.BoolDefault(false, resource.Metadata) + if resource.Properties.GetMapValue("direction").AsString() == "Outbound" { + outbound = defsecTypes.Bool(true, resource.Metadata) + } + + return network.SecurityGroupRule{ + Metadata: resource.Metadata, + Outbound: outbound, + Allow: allow, + SourceAddresses: sourceAddressPrefixes, + SourcePorts: sourcePortRanges, + DestinationAddresses: destinationAddressPrefixes, + DestinationPorts: destinationPortRanges, + Protocol: resource.Properties.GetMapValue("protocol").AsStringValue("", resource.Metadata), + } +} + +func adaptNetworkWatcherFlowLogs(deployment azure.Deployment) (flowLogs []network.NetworkWatcherFlowLog) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Network/networkWatchers/flowLogs") { + flowLogs = append(flowLogs, adaptNetworkWatcherFlowLog(resource)) + } + return flowLogs +} + +func adaptNetworkWatcherFlowLog(resource azure.Resource) network.NetworkWatcherFlowLog { + return network.NetworkWatcherFlowLog{ + Metadata: resource.Metadata, + RetentionPolicy: network.RetentionPolicy{ + Metadata: resource.Metadata, + Enabled: resource.Properties.GetMapValue("retentionPolicy").GetMapValue("enabled").AsBoolValue(false, resource.Metadata), + Days: resource.Properties.GetMapValue("retentionPolicy").GetMapValue("days").AsIntValue(0, resource.Metadata), + }, + } +} + +func expandRange(r string, m defsecTypes.MisconfigMetadata) network.PortRange { + start := 0 + end := 65535 + switch { + case r == "*": + case strings.Contains(r, "-"): + if parts := strings.Split(r, "-"); len(parts) == 2 { + if p1, err := strconv.ParseInt(parts[0], 10, 32); err == nil { + start = int(p1) + } + if p2, err := strconv.ParseInt(parts[1], 10, 32); err == nil { + end = int(p2) + } + } + default: + if val, err := strconv.ParseInt(r, 10, 32); err == nil { + start = int(val) + end = int(val) + } + } + + return network.PortRange{ + Metadata: m, + Start: start, + End: end, + } +} diff --git a/internal/adapters/arm/securitycenter/adapt.go b/internal/adapters/arm/securitycenter/adapt.go new file mode 100644 index 000000000000..7706ed2d7443 --- /dev/null +++ b/internal/adapters/arm/securitycenter/adapt.go @@ -0,0 +1,43 @@ +package securitycenter + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/securitycenter" + "github.com/aquasecurity/trivy/pkg/scanners/azure" +) + +func Adapt(deployment azure.Deployment) securitycenter.SecurityCenter { + return securitycenter.SecurityCenter{ + Contacts: adaptContacts(deployment), + Subscriptions: adaptSubscriptions(deployment), + } +} + +func adaptContacts(deployment azure.Deployment) (contacts []securitycenter.Contact) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Security/securityContacts") { + contacts = append(contacts, adaptContact(resource)) + } + + return contacts +} + +func adaptContact(resource azure.Resource) securitycenter.Contact { + return securitycenter.Contact{ + Metadata: resource.Metadata, + EnableAlertNotifications: resource.Properties.GetMapValue("email").AsBoolValue(false, resource.Metadata), + Phone: resource.Properties.GetMapValue("phone").AsStringValue("", resource.Metadata), + } +} + +func adaptSubscriptions(deployment azure.Deployment) (subscriptions []securitycenter.SubscriptionPricing) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Security/pricings") { + subscriptions = append(subscriptions, adaptSubscription(resource)) + } + return subscriptions +} + +func adaptSubscription(resource azure.Resource) securitycenter.SubscriptionPricing { + return securitycenter.SubscriptionPricing{ + Metadata: resource.Metadata, + Tier: resource.Properties.GetMapValue("pricingTier").AsStringValue("Free", resource.Metadata), + } +} diff --git a/internal/adapters/arm/storage/adapt.go b/internal/adapters/arm/storage/adapt.go new file mode 100644 index 000000000000..14f51d2a51b7 --- /dev/null +++ b/internal/adapters/arm/storage/adapt.go @@ -0,0 +1,69 @@ +package storage + +import ( + "strings" + + "github.com/aquasecurity/trivy/pkg/providers/azure/storage" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(deployment azure.Deployment) storage.Storage { + return storage.Storage{ + Accounts: adaptAccounts(deployment), + } +} + +func adaptAccounts(deployment azure.Deployment) []storage.Account { + var accounts []storage.Account + for _, resource := range deployment.GetResourcesByType("Microsoft.Storage/storageAccounts") { + + var networkRules []storage.NetworkRule + for _, acl := range resource.Properties.GetMapValue("networkAcls").AsList() { + + var bypasses []types.StringValue + bypassProp := acl.GetMapValue("bypass") + for _, bypass := range strings.Split(bypassProp.AsString(), ",") { + bypasses = append(bypasses, types.String(bypass, bypassProp.GetMetadata())) + } + + networkRules = append(networkRules, storage.NetworkRule{ + Metadata: acl.GetMetadata(), + Bypass: bypasses, + AllowByDefault: types.Bool(acl.GetMapValue("defaultAction").EqualTo("Allow"), acl.GetMetadata()), + }) + } + + var queues []storage.Queue + for _, queueResource := range resource.GetResourcesByType("queueServices/queues") { + queues = append(queues, storage.Queue{ + Metadata: queueResource.Metadata, + Name: queueResource.Name.AsStringValue("", queueResource.Metadata), + }) + } + + var containers []storage.Container + for _, containerResource := range resource.GetResourcesByType("containerServices/containers") { + containers = append(containers, storage.Container{ + Metadata: containerResource.Metadata, + PublicAccess: containerResource.Properties.GetMapValue("publicAccess").AsStringValue("None", containerResource.Metadata), + }) + } + + account := storage.Account{ + Metadata: resource.Metadata, + NetworkRules: networkRules, + EnforceHTTPS: resource.Properties.GetMapValue("supportsHttpsTrafficOnly").AsBoolValue(false, resource.Properties.GetMetadata()), + Containers: containers, + QueueProperties: storage.QueueProperties{ + Metadata: resource.Properties.GetMetadata(), + EnableLogging: types.BoolDefault(false, resource.Properties.GetMetadata()), + }, + MinimumTLSVersion: resource.Properties.GetMapValue("minimumTlsVersion").AsStringValue("TLS1_0", resource.Properties.GetMetadata()), + Queues: queues, + } + accounts = append(accounts, account) + } + return accounts +} diff --git a/internal/adapters/arm/storage/adapt_test.go b/internal/adapters/arm/storage/adapt_test.go new file mode 100644 index 000000000000..b4d76cc93453 --- /dev/null +++ b/internal/adapters/arm/storage/adapt_test.go @@ -0,0 +1,59 @@ +package storage + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/azure" + + "github.com/stretchr/testify/assert" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/require" +) + +func Test_AdaptStorageDefaults(t *testing.T) { + + input := azure.Deployment{ + Resources: []azure.Resource{ + { + Type: azure.NewValue("Microsoft.Storage/storageAccounts", types.NewTestMisconfigMetadata()), + Properties: azure.NewValue(map[string]azure.Value{}, types.NewTestMisconfigMetadata()), + }, + }, + } + + output := Adapt(input) + + require.Len(t, output.Accounts, 1) + + account := output.Accounts[0] + assert.Equal(t, "TLS1_0", account.MinimumTLSVersion.Value()) + assert.Equal(t, false, account.EnforceHTTPS.Value()) + +} + +func Test_AdaptStorage(t *testing.T) { + + input := azure.Deployment{ + Resources: []azure.Resource{ + { + Type: azure.NewValue("Microsoft.Storage/storageAccounts", types.NewTestMisconfigMetadata()), + Name: azure.Value{}, + Properties: azure.NewValue(map[string]azure.Value{ + "minimumTlsVersion": azure.NewValue("TLS1_2", types.NewTestMisconfigMetadata()), + "supportsHttpsTrafficOnly": azure.NewValue(true, types.NewTestMisconfigMetadata()), + }, types.NewTestMisconfigMetadata()), + }, + }, + } + + output := Adapt(input) + + require.Len(t, output.Accounts, 1) + + account := output.Accounts[0] + assert.Equal(t, "TLS1_2", account.MinimumTLSVersion.Value()) + assert.Equal(t, true, account.EnforceHTTPS.Value()) + +} diff --git a/internal/adapters/arm/synapse/adapt.go b/internal/adapters/arm/synapse/adapt.go new file mode 100644 index 000000000000..07bcb1e78ae0 --- /dev/null +++ b/internal/adapters/arm/synapse/adapt.go @@ -0,0 +1,34 @@ +package synapse + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/synapse" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(deployment azure.Deployment) synapse.Synapse { + return synapse.Synapse{ + Workspaces: adaptWorkspaces(deployment), + } +} + +func adaptWorkspaces(deployment azure.Deployment) (workspaces []synapse.Workspace) { + for _, resource := range deployment.GetResourcesByType("Microsoft.Synapse/workspaces") { + workspaces = append(workspaces, adaptWorkspace(resource)) + } + return workspaces +} + +func adaptWorkspace(resource azure.Resource) synapse.Workspace { + + managedVirtualNetwork := resource.Properties.GetMapValue("managedVirtualNetwork").AsString() + enableManagedVirtualNetwork := types.BoolDefault(false, resource.Metadata) + if managedVirtualNetwork == "default" { + enableManagedVirtualNetwork = types.Bool(true, resource.Metadata) + } + + return synapse.Workspace{ + Metadata: resource.Metadata, + EnableManagedVirtualNetwork: enableManagedVirtualNetwork, + } +} diff --git a/internal/adapters/cloudformation/adapt.go b/internal/adapters/cloudformation/adapt.go new file mode 100644 index 000000000000..cec0dbd5dd65 --- /dev/null +++ b/internal/adapters/cloudformation/adapt.go @@ -0,0 +1,14 @@ +package cloudformation + +import ( + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/state" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) *state.State { + return &state.State{ + AWS: aws.Adapt(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/accessanalyzer/accessanalyzer.go b/internal/adapters/cloudformation/aws/accessanalyzer/accessanalyzer.go new file mode 100644 index 000000000000..4f1cd10d1268 --- /dev/null +++ b/internal/adapters/cloudformation/aws/accessanalyzer/accessanalyzer.go @@ -0,0 +1,13 @@ +package accessanalyzer + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/accessanalyzer" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) accessanalyzer.AccessAnalyzer { + return accessanalyzer.AccessAnalyzer{ + Analyzers: getAccessAnalyzer(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/accessanalyzer/analyzer.go b/internal/adapters/cloudformation/aws/accessanalyzer/analyzer.go new file mode 100644 index 000000000000..712a9bf60cfd --- /dev/null +++ b/internal/adapters/cloudformation/aws/accessanalyzer/analyzer.go @@ -0,0 +1,24 @@ +package accessanalyzer + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/accessanalyzer" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getAccessAnalyzer(ctx parser.FileContext) (analyzers []accessanalyzer.Analyzer) { + + analyzersList := ctx.GetResourcesByType("AWS::AccessAnalyzer::Analyzer") + + for _, r := range analyzersList { + aa := accessanalyzer.Analyzer{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("AnalyzerName"), + ARN: r.StringDefault(""), + Active: types.BoolDefault(false, r.Metadata()), + } + + analyzers = append(analyzers, aa) + } + return analyzers +} diff --git a/internal/adapters/cloudformation/aws/adapt.go b/internal/adapters/cloudformation/aws/adapt.go new file mode 100644 index 000000000000..31db0069154c --- /dev/null +++ b/internal/adapters/cloudformation/aws/adapt.go @@ -0,0 +1,74 @@ +package aws + +import ( + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/apigateway" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/athena" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/cloudfront" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/cloudtrail" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/cloudwatch" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/codebuild" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/config" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/documentdb" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/dynamodb" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/ec2" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/ecr" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/ecs" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/efs" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/eks" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/elasticache" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/elasticsearch" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/elb" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/iam" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/kinesis" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/lambda" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/mq" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/msk" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/neptune" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/rds" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/redshift" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/s3" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/sam" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/sns" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/sqs" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/ssm" + "github.com/aquasecurity/trivy/internal/adapters/cloudformation/aws/workspaces" + "github.com/aquasecurity/trivy/pkg/providers/aws" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) aws.AWS { + return aws.AWS{ + APIGateway: apigateway.Adapt(cfFile), + Athena: athena.Adapt(cfFile), + Cloudfront: cloudfront.Adapt(cfFile), + CloudTrail: cloudtrail.Adapt(cfFile), + CloudWatch: cloudwatch.Adapt(cfFile), + CodeBuild: codebuild.Adapt(cfFile), + Config: config.Adapt(cfFile), + DocumentDB: documentdb.Adapt(cfFile), + DynamoDB: dynamodb.Adapt(cfFile), + EC2: ec2.Adapt(cfFile), + ECR: ecr.Adapt(cfFile), + ECS: ecs.Adapt(cfFile), + EFS: efs.Adapt(cfFile), + IAM: iam.Adapt(cfFile), + EKS: eks.Adapt(cfFile), + ElastiCache: elasticache.Adapt(cfFile), + Elasticsearch: elasticsearch.Adapt(cfFile), + ELB: elb.Adapt(cfFile), + MSK: msk.Adapt(cfFile), + MQ: mq.Adapt(cfFile), + Kinesis: kinesis.Adapt(cfFile), + Lambda: lambda.Adapt(cfFile), + Neptune: neptune.Adapt(cfFile), + RDS: rds.Adapt(cfFile), + Redshift: redshift.Adapt(cfFile), + S3: s3.Adapt(cfFile), + SAM: sam.Adapt(cfFile), + SNS: sns.Adapt(cfFile), + SQS: sqs.Adapt(cfFile), + SSM: ssm.Adapt(cfFile), + WorkSpaces: workspaces.Adapt(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/apigateway/apigateway.go b/internal/adapters/cloudformation/aws/apigateway/apigateway.go new file mode 100644 index 000000000000..5d47a1f43872 --- /dev/null +++ b/internal/adapters/cloudformation/aws/apigateway/apigateway.go @@ -0,0 +1,21 @@ +package apigateway + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway" + v1 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v1" + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) apigateway.APIGateway { + return apigateway.APIGateway{ + V1: v1.APIGateway{ + APIs: nil, + DomainNames: nil, + }, + V2: v2.APIGateway{ + APIs: getApis(cfFile), + }, + } +} diff --git a/internal/adapters/cloudformation/aws/apigateway/stage.go b/internal/adapters/cloudformation/aws/apigateway/stage.go new file mode 100644 index 000000000000..6d1f3eee6422 --- /dev/null +++ b/internal/adapters/cloudformation/aws/apigateway/stage.go @@ -0,0 +1,68 @@ +package apigateway + +import ( + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getApis(cfFile parser.FileContext) (apis []v2.API) { + + apiResources := cfFile.GetResourcesByType("AWS::ApiGatewayV2::Api") + for _, apiRes := range apiResources { + api := v2.API{ + Metadata: apiRes.Metadata(), + Name: types.StringDefault("", apiRes.Metadata()), + ProtocolType: types.StringDefault("", apiRes.Metadata()), + Stages: getStages(apiRes.ID(), cfFile), + } + apis = append(apis, api) + } + + return apis +} + +func getStages(apiId string, cfFile parser.FileContext) []v2.Stage { + var apiStages []v2.Stage + + stageResources := cfFile.GetResourcesByType("AWS::ApiGatewayV2::Stage") + for _, r := range stageResources { + stageApiId := r.GetStringProperty("ApiId") + if stageApiId.Value() != apiId { + continue + } + + s := v2.Stage{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("StageName"), + AccessLogging: getAccessLogging(r), + } + apiStages = append(apiStages, s) + } + + return apiStages +} + +func getAccessLogging(r *parser.Resource) v2.AccessLogging { + + loggingProp := r.GetProperty("AccessLogSettings") + if loggingProp.IsNil() { + return v2.AccessLogging{ + Metadata: r.Metadata(), + CloudwatchLogGroupARN: types.StringDefault("", r.Metadata()), + } + } + + destinationProp := r.GetProperty("AccessLogSettings.DestinationArn") + + if destinationProp.IsNil() { + return v2.AccessLogging{ + Metadata: loggingProp.Metadata(), + CloudwatchLogGroupARN: types.StringDefault("", r.Metadata()), + } + } + return v2.AccessLogging{ + Metadata: destinationProp.Metadata(), + CloudwatchLogGroupARN: destinationProp.AsStringValue(), + } +} diff --git a/internal/adapters/cloudformation/aws/athena/athena.go b/internal/adapters/cloudformation/aws/athena/athena.go new file mode 100644 index 000000000000..0c44f7e4e602 --- /dev/null +++ b/internal/adapters/cloudformation/aws/athena/athena.go @@ -0,0 +1,14 @@ +package athena + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/athena" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) athena.Athena { + return athena.Athena{ + Databases: nil, + Workgroups: getWorkGroups(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/athena/workgroup.go b/internal/adapters/cloudformation/aws/athena/workgroup.go new file mode 100644 index 000000000000..a39bdc114d92 --- /dev/null +++ b/internal/adapters/cloudformation/aws/athena/workgroup.go @@ -0,0 +1,30 @@ +package athena + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/athena" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getWorkGroups(cfFile parser.FileContext) []athena.Workgroup { + + var workgroups []athena.Workgroup + + workgroupResources := cfFile.GetResourcesByType("AWS::Athena::WorkGroup") + + for _, r := range workgroupResources { + + wg := athena.Workgroup{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("Name"), + Encryption: athena.EncryptionConfiguration{ + Metadata: r.Metadata(), + Type: r.GetStringProperty("WorkGroupConfiguration.ResultConfiguration.EncryptionConfiguration.EncryptionOption"), + }, + EnforceConfiguration: r.GetBoolProperty("WorkGroupConfiguration.EnforceWorkGroupConfiguration"), + } + + workgroups = append(workgroups, wg) + } + + return workgroups +} diff --git a/internal/adapters/cloudformation/aws/cloudfront/cloudfront.go b/internal/adapters/cloudformation/aws/cloudfront/cloudfront.go new file mode 100644 index 000000000000..0666bd3d2471 --- /dev/null +++ b/internal/adapters/cloudformation/aws/cloudfront/cloudfront.go @@ -0,0 +1,13 @@ +package cloudfront + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudfront" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) cloudfront.Cloudfront { + return cloudfront.Cloudfront{ + Distributions: getDistributions(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/cloudfront/distribution.go b/internal/adapters/cloudformation/aws/cloudfront/distribution.go new file mode 100644 index 000000000000..0dffabcecca6 --- /dev/null +++ b/internal/adapters/cloudformation/aws/cloudfront/distribution.go @@ -0,0 +1,55 @@ +package cloudfront + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudfront" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getDistributions(ctx parser.FileContext) (distributions []cloudfront.Distribution) { + + distributionResources := ctx.GetResourcesByType("AWS::CloudFront::Distribution") + + for _, r := range distributionResources { + distribution := cloudfront.Distribution{ + Metadata: r.Metadata(), + WAFID: r.GetStringProperty("DistributionConfig.WebACLId"), + Logging: cloudfront.Logging{ + Metadata: r.Metadata(), + Bucket: r.GetStringProperty("DistributionConfig.Logging.Bucket"), + }, + DefaultCacheBehaviour: getDefaultCacheBehaviour(r), + OrdererCacheBehaviours: nil, + ViewerCertificate: cloudfront.ViewerCertificate{ + Metadata: r.Metadata(), + MinimumProtocolVersion: r.GetStringProperty("DistributionConfig.ViewerCertificate.MinimumProtocolVersion"), + }, + } + + distributions = append(distributions, distribution) + } + + return distributions +} + +func getDefaultCacheBehaviour(r *parser.Resource) cloudfront.CacheBehaviour { + defaultCache := r.GetProperty("DistributionConfig.DefaultCacheBehavior") + if defaultCache.IsNil() { + return cloudfront.CacheBehaviour{ + Metadata: r.Metadata(), + ViewerProtocolPolicy: types.StringDefault("allow-all", r.Metadata()), + } + } + protoProp := r.GetProperty("DistributionConfig.DefaultCacheBehavior.ViewerProtocolPolicy") + if protoProp.IsNotString() { + return cloudfront.CacheBehaviour{ + Metadata: r.Metadata(), + ViewerProtocolPolicy: types.StringDefault("allow-all", r.Metadata()), + } + } + + return cloudfront.CacheBehaviour{ + Metadata: r.Metadata(), + ViewerProtocolPolicy: protoProp.AsStringValue(), + } +} diff --git a/internal/adapters/cloudformation/aws/cloudtrail/cloudtrail.go b/internal/adapters/cloudformation/aws/cloudtrail/cloudtrail.go new file mode 100644 index 000000000000..514849b55631 --- /dev/null +++ b/internal/adapters/cloudformation/aws/cloudtrail/cloudtrail.go @@ -0,0 +1,13 @@ +package cloudtrail + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudtrail" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) cloudtrail.CloudTrail { + return cloudtrail.CloudTrail{ + Trails: getCloudTrails(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/cloudtrail/trails.go b/internal/adapters/cloudformation/aws/cloudtrail/trails.go new file mode 100644 index 000000000000..8cc6547be50f --- /dev/null +++ b/internal/adapters/cloudformation/aws/cloudtrail/trails.go @@ -0,0 +1,27 @@ +package cloudtrail + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudtrail" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getCloudTrails(ctx parser.FileContext) (trails []cloudtrail.Trail) { + + cloudtrailResources := ctx.GetResourcesByType("AWS::CloudTrail::Trail") + + for _, r := range cloudtrailResources { + ct := cloudtrail.Trail{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("TrailName"), + EnableLogFileValidation: r.GetBoolProperty("EnableLogFileValidation"), + IsMultiRegion: r.GetBoolProperty("IsMultiRegionTrail"), + KMSKeyID: r.GetStringProperty("KmsKeyId"), + CloudWatchLogsLogGroupArn: r.GetStringProperty("CloudWatchLogsLogGroupArn"), + IsLogging: r.GetBoolProperty("IsLogging"), + BucketName: r.GetStringProperty("S3BucketName"), + } + + trails = append(trails, ct) + } + return trails +} diff --git a/internal/adapters/cloudformation/aws/cloudwatch/cloudwatch.go b/internal/adapters/cloudformation/aws/cloudwatch/cloudwatch.go new file mode 100644 index 000000000000..de542dda9f06 --- /dev/null +++ b/internal/adapters/cloudformation/aws/cloudwatch/cloudwatch.go @@ -0,0 +1,14 @@ +package cloudwatch + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudwatch" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) cloudwatch.CloudWatch { + return cloudwatch.CloudWatch{ + LogGroups: getLogGroups(cfFile), + Alarms: nil, + } +} diff --git a/internal/adapters/cloudformation/aws/cloudwatch/log_group.go b/internal/adapters/cloudformation/aws/cloudwatch/log_group.go new file mode 100644 index 000000000000..c7a1766fdba1 --- /dev/null +++ b/internal/adapters/cloudformation/aws/cloudwatch/log_group.go @@ -0,0 +1,26 @@ +package cloudwatch + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudwatch" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getLogGroups(ctx parser.FileContext) (logGroups []cloudwatch.LogGroup) { + + logGroupResources := ctx.GetResourcesByType("AWS::Logs::LogGroup") + + for _, r := range logGroupResources { + group := cloudwatch.LogGroup{ + Metadata: r.Metadata(), + Arn: types.StringDefault("", r.Metadata()), + Name: r.GetStringProperty("LogGroupName"), + KMSKeyID: r.GetStringProperty("KmsKeyId"), + RetentionInDays: r.GetIntProperty("RetentionInDays", 0), + MetricFilters: nil, + } + logGroups = append(logGroups, group) + } + + return logGroups +} diff --git a/internal/adapters/cloudformation/aws/codebuild/codebuild.go b/internal/adapters/cloudformation/aws/codebuild/codebuild.go new file mode 100644 index 000000000000..01b8472b2d56 --- /dev/null +++ b/internal/adapters/cloudformation/aws/codebuild/codebuild.go @@ -0,0 +1,13 @@ +package codebuild + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/codebuild" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) codebuild.CodeBuild { + return codebuild.CodeBuild{ + Projects: getProjects(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/codebuild/project.go b/internal/adapters/cloudformation/aws/codebuild/project.go new file mode 100644 index 000000000000..23f1f485b884 --- /dev/null +++ b/internal/adapters/cloudformation/aws/codebuild/project.go @@ -0,0 +1,63 @@ +package codebuild + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/codebuild" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getProjects(ctx parser.FileContext) (projects []codebuild.Project) { + + projectResources := ctx.GetResourcesByType("AWS::CodeBuild::Project") + + for _, r := range projectResources { + project := codebuild.Project{ + Metadata: r.Metadata(), + ArtifactSettings: getArtifactSettings(r), + SecondaryArtifactSettings: getSecondaryArtifactSettings(r), + } + + projects = append(projects, project) + } + + return projects +} + +func getSecondaryArtifactSettings(r *parser.Resource) (secondaryArtifacts []codebuild.ArtifactSettings) { + secondaryArtifactsList := r.GetProperty("SecondaryArtifacts") + if secondaryArtifactsList.IsNil() || !secondaryArtifactsList.IsList() { + return + } + + for _, a := range secondaryArtifactsList.AsList() { + settings := codebuild.ArtifactSettings{ + Metadata: secondaryArtifactsList.Metadata(), + EncryptionEnabled: types.BoolDefault(true, secondaryArtifactsList.Metadata()), + } + encryptionDisabled := a.GetProperty("EncryptionDisabled") + if encryptionDisabled.IsBool() { + settings.EncryptionEnabled = types.Bool(!encryptionDisabled.AsBool(), encryptionDisabled.Metadata()) + } + secondaryArtifacts = append(secondaryArtifacts, settings) + } + + return secondaryArtifacts +} + +func getArtifactSettings(r *parser.Resource) codebuild.ArtifactSettings { + + settings := codebuild.ArtifactSettings{ + Metadata: r.Metadata(), + EncryptionEnabled: types.BoolDefault(true, r.Metadata()), + } + + artifactsProperty := r.GetProperty("Artifacts") + if artifactsProperty.IsNotNil() { + encryptionDisabled := artifactsProperty.GetProperty("EncryptionDisabled") + if encryptionDisabled.IsBool() { + settings.EncryptionEnabled = types.Bool(!encryptionDisabled.AsBool(), encryptionDisabled.Metadata()) + } + } + + return settings +} diff --git a/internal/adapters/cloudformation/aws/config/adapt_test.go b/internal/adapters/cloudformation/aws/config/adapt_test.go new file mode 100644 index 000000000000..39c350e94368 --- /dev/null +++ b/internal/adapters/cloudformation/aws/config/adapt_test.go @@ -0,0 +1,71 @@ +package config + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/aws/config" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +func TestAdapt(t *testing.T) { + tests := []struct { + name string + source string + expected config.Config + }{ + { + name: "Config aggregator with AccountAggregationSources", + source: `AWSTemplateFormatVersion: "2010-09-09" +Resources: + ConfigurationAggregator: + Type: AWS::Config::ConfigurationAggregator + Properties: + AccountAggregationSources: + - AllAwsRegions: "true" +`, + expected: config.Config{ + ConfigurationAggregrator: config.ConfigurationAggregrator{ + Metadata: types.NewTestMisconfigMetadata(), + SourceAllRegions: types.Bool(true, types.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "Config aggregator with OrganizationAggregationSource", + source: `AWSTemplateFormatVersion: "2010-09-09" +Resources: + ConfigurationAggregator: + Type: AWS::Config::ConfigurationAggregator + Properties: + OrganizationAggregationSource: + AllAwsRegions: "true" +`, + expected: config.Config{ + ConfigurationAggregrator: config.ConfigurationAggregrator{ + Metadata: types.NewTestMisconfigMetadata(), + SourceAllRegions: types.Bool(true, types.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "template.yaml": tt.source, + }) + + p := parser.New() + fctx, err := p.ParseFile(context.TODO(), fs, "template.yaml") + require.NoError(t, err) + + testutil.AssertDefsecEqual(t, tt.expected, Adapt(*fctx)) + }) + } + +} diff --git a/internal/adapters/cloudformation/aws/config/aggregator.go b/internal/adapters/cloudformation/aws/config/aggregator.go new file mode 100644 index 000000000000..046a20798034 --- /dev/null +++ b/internal/adapters/cloudformation/aws/config/aggregator.go @@ -0,0 +1,41 @@ +package config + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/config" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getConfigurationAggregator(ctx parser.FileContext) config.ConfigurationAggregrator { + + aggregator := config.ConfigurationAggregrator{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + SourceAllRegions: defsecTypes.BoolDefault(false, ctx.Metadata()), + } + + aggregatorResources := ctx.GetResourcesByType("AWS::Config::ConfigurationAggregator") + + if len(aggregatorResources) == 0 { + return aggregator + } + + return config.ConfigurationAggregrator{ + Metadata: aggregatorResources[0].Metadata(), + SourceAllRegions: isSourcingAllRegions(aggregatorResources[0]), + } +} + +func isSourcingAllRegions(r *parser.Resource) defsecTypes.BoolValue { + accountProp := r.GetProperty("AccountAggregationSources") + + if accountProp.IsNotNil() && accountProp.IsList() { + for _, a := range accountProp.AsList() { + regionsProp := a.GetProperty("AllAwsRegions") + if regionsProp.IsNotNil() { + return a.GetBoolProperty("AllAwsRegions") + } + } + } + + return r.GetBoolProperty("OrganizationAggregationSource.AllAwsRegions") +} diff --git a/internal/adapters/cloudformation/aws/config/config.go b/internal/adapters/cloudformation/aws/config/config.go new file mode 100644 index 000000000000..f3fc2cb4a096 --- /dev/null +++ b/internal/adapters/cloudformation/aws/config/config.go @@ -0,0 +1,13 @@ +package config + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/config" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) config.Config { + return config.Config{ + ConfigurationAggregrator: getConfigurationAggregator(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/documentdb/cluster.go b/internal/adapters/cloudformation/aws/documentdb/cluster.go new file mode 100644 index 000000000000..bf3a89b49187 --- /dev/null +++ b/internal/adapters/cloudformation/aws/documentdb/cluster.go @@ -0,0 +1,58 @@ +package documentdb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/documentdb" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getClusters(ctx parser.FileContext) (clusters []documentdb.Cluster) { + + clusterResources := ctx.GetResourcesByType("AWS::DocDB::DBCluster") + + for _, r := range clusterResources { + cluster := documentdb.Cluster{ + Metadata: r.Metadata(), + Identifier: r.GetStringProperty("DBClusterIdentifier"), + EnabledLogExports: getLogExports(r), + Instances: nil, + BackupRetentionPeriod: r.GetIntProperty("BackupRetentionPeriod", 1), + StorageEncrypted: r.GetBoolProperty("StorageEncrypted"), + KMSKeyID: r.GetStringProperty("KmsKeyId"), + } + + updateInstancesOnCluster(&cluster, ctx) + + clusters = append(clusters, cluster) + } + return clusters +} + +func updateInstancesOnCluster(cluster *documentdb.Cluster, ctx parser.FileContext) { + + instanceResources := ctx.GetResourcesByType("AWS::DocDB::DBInstance") + + for _, r := range instanceResources { + clusterIdentifier := r.GetStringProperty("DBClusterIdentifier") + if clusterIdentifier == cluster.Identifier { + cluster.Instances = append(cluster.Instances, documentdb.Instance{ + Metadata: r.Metadata(), + KMSKeyID: cluster.KMSKeyID, + }) + } + } +} + +func getLogExports(r *parser.Resource) (logExports []types.StringValue) { + + exportsList := r.GetProperty("EnableCloudwatchLogsExports") + + if exportsList.IsNil() || exportsList.IsNotList() { + return logExports + } + + for _, export := range exportsList.AsList() { + logExports = append(logExports, export.AsStringValue()) + } + return logExports +} diff --git a/internal/adapters/cloudformation/aws/documentdb/documentdb.go b/internal/adapters/cloudformation/aws/documentdb/documentdb.go new file mode 100644 index 000000000000..08d26d9976d0 --- /dev/null +++ b/internal/adapters/cloudformation/aws/documentdb/documentdb.go @@ -0,0 +1,13 @@ +package documentdb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/documentdb" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) documentdb.DocumentDB { + return documentdb.DocumentDB{ + Clusters: getClusters(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/dynamodb/cluster.go b/internal/adapters/cloudformation/aws/dynamodb/cluster.go new file mode 100644 index 000000000000..4f8a65a4547e --- /dev/null +++ b/internal/adapters/cloudformation/aws/dynamodb/cluster.go @@ -0,0 +1,36 @@ +package dynamodb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/dynamodb" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getClusters(file parser.FileContext) (clusters []dynamodb.DAXCluster) { + + clusterResources := file.GetResourcesByType("AWS::DAX::Cluster") + + for _, r := range clusterResources { + cluster := dynamodb.DAXCluster{ + Metadata: r.Metadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + KMSKeyID: defsecTypes.StringDefault("", r.Metadata()), + }, + PointInTimeRecovery: defsecTypes.BoolUnresolvable(r.Metadata()), + } + + if sseProp := r.GetProperty("SSESpecification"); sseProp.IsNotNil() { + cluster.ServerSideEncryption = dynamodb.ServerSideEncryption{ + Metadata: sseProp.Metadata(), + Enabled: r.GetBoolProperty("SSESpecification.SSEEnabled"), + KMSKeyID: defsecTypes.StringUnresolvable(sseProp.Metadata()), + } + } + + clusters = append(clusters, cluster) + } + + return clusters +} diff --git a/internal/adapters/cloudformation/aws/dynamodb/dynamodb.go b/internal/adapters/cloudformation/aws/dynamodb/dynamodb.go new file mode 100644 index 000000000000..98df1b4f10e5 --- /dev/null +++ b/internal/adapters/cloudformation/aws/dynamodb/dynamodb.go @@ -0,0 +1,13 @@ +package dynamodb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/dynamodb" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) dynamodb.DynamoDB { + return dynamodb.DynamoDB{ + DAXClusters: getClusters(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/ec2/adapt_test.go b/internal/adapters/cloudformation/aws/ec2/adapt_test.go new file mode 100644 index 000000000000..aabc14b56a99 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/adapt_test.go @@ -0,0 +1,176 @@ +package ec2 + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +func TestAdapt(t *testing.T) { + tests := []struct { + name string + source string + expected ec2.EC2 + }{ + { + name: "ec2 instance", + source: `AWSTemplateFormatVersion: 2010-09-09 +Resources: + MyEC2Instance: + Type: AWS::EC2::Instance + Properties: + ImageId: "ami-79fd7eee" + KeyName: "testkey" + BlockDeviceMappings: + - DeviceName: "/dev/sdm" + Ebs: + VolumeType: "io1" + Iops: "200" + DeleteOnTermination: "false" + VolumeSize: "20" + Encrypted: true + - DeviceName: "/dev/sdk" + NoDevice: {} +`, + expected: ec2.EC2{ + Instances: []ec2.Instance{ + { + Metadata: types.NewTestMisconfigMetadata(), + MetadataOptions: ec2.MetadataOptions{ + HttpEndpoint: types.StringDefault("enabled", types.NewTestMisconfigMetadata()), + HttpTokens: types.StringDefault("optional", types.NewTestMisconfigMetadata()), + }, + RootBlockDevice: &ec2.BlockDevice{ + Metadata: types.NewTestMisconfigMetadata(), + Encrypted: types.BoolDefault(true, types.NewTestMisconfigMetadata()), + }, + EBSBlockDevices: []*ec2.BlockDevice{ + { + Metadata: types.NewTestMisconfigMetadata(), + Encrypted: types.BoolDefault(false, types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + { + name: "ec2 instance with launch template, ref to name", + source: `AWSTemplateFormatVersion: 2010-09-09 +Resources: + MyLaunchTemplate: + Type: AWS::EC2::LaunchTemplate + Properties: + LaunchTemplateName: MyTemplate + LaunchTemplateData: + MetadataOptions: + HttpEndpoint: enabled + HttpTokens: required + MyEC2Instance: + Type: AWS::EC2::Instance + Properties: + ImageId: "ami-79fd7eee" + LaunchTemplate: + LaunchTemplateName: MyTemplate +`, + expected: ec2.EC2{ + LaunchTemplates: []ec2.LaunchTemplate{ + { + Metadata: types.NewTestMisconfigMetadata(), + Name: types.String("MyTemplate", types.NewTestMisconfigMetadata()), + Instance: ec2.Instance{ + Metadata: types.NewTestMisconfigMetadata(), + MetadataOptions: ec2.MetadataOptions{ + HttpEndpoint: types.String("enabled", types.NewTestMisconfigMetadata()), + HttpTokens: types.String("required", types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + Instances: []ec2.Instance{ + { + Metadata: types.NewTestMisconfigMetadata(), + MetadataOptions: ec2.MetadataOptions{ + HttpEndpoint: types.String("enabled", types.NewTestMisconfigMetadata()), + HttpTokens: types.String("required", types.NewTestMisconfigMetadata()), + }, + RootBlockDevice: &ec2.BlockDevice{ + Metadata: types.NewTestMisconfigMetadata(), + Encrypted: types.Bool(false, types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + { + name: "ec2 instance with launch template, ref to id", + source: `AWSTemplateFormatVersion: 2010-09-09 +Resources: + MyLaunchTemplate: + Type: AWS::EC2::LaunchTemplate + Properties: + LaunchTemplateName: MyTemplate + LaunchTemplateData: + MetadataOptions: + HttpEndpoint: enabled + HttpTokens: required + MyEC2Instance: + Type: AWS::EC2::Instance + Properties: + ImageId: "ami-79fd7eee" + LaunchTemplate: + LaunchTemplateId: !Ref MyLaunchTemplate +`, + expected: ec2.EC2{ + LaunchTemplates: []ec2.LaunchTemplate{ + { + Metadata: types.NewTestMisconfigMetadata(), + Name: types.String("MyTemplate", types.NewTestMisconfigMetadata()), + Instance: ec2.Instance{ + Metadata: types.NewTestMisconfigMetadata(), + MetadataOptions: ec2.MetadataOptions{ + HttpEndpoint: types.String("enabled", types.NewTestMisconfigMetadata()), + HttpTokens: types.String("required", types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + Instances: []ec2.Instance{ + { + Metadata: types.NewTestMisconfigMetadata(), + MetadataOptions: ec2.MetadataOptions{ + HttpEndpoint: types.String("enabled", types.NewTestMisconfigMetadata()), + HttpTokens: types.String("required", types.NewTestMisconfigMetadata()), + }, + RootBlockDevice: &ec2.BlockDevice{ + Metadata: types.NewTestMisconfigMetadata(), + Encrypted: types.Bool(false, types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + fsys := testutil.CreateFS(t, map[string]string{ + "main.yaml": tt.source, + }) + + fctx, err := parser.New().ParseFile(context.TODO(), fsys, "main.yaml") + require.NoError(t, err) + + adapted := Adapt(*fctx) + testutil.AssertDefsecEqual(t, tt.expected, adapted) + }) + } + +} diff --git a/internal/adapters/cloudformation/aws/ec2/ec2.go b/internal/adapters/cloudformation/aws/ec2/ec2.go new file mode 100644 index 000000000000..303653f25d50 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/ec2.go @@ -0,0 +1,20 @@ +package ec2 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) ec2.EC2 { + return ec2.EC2{ + LaunchConfigurations: getLaunchConfigurations(cfFile), + LaunchTemplates: getLaunchTemplates(cfFile), + Instances: getInstances(cfFile), + VPCs: nil, + NetworkACLs: getNetworkACLs(cfFile), + SecurityGroups: getSecurityGroups(cfFile), + Subnets: getSubnets(cfFile), + Volumes: getVolumes(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/ec2/instance.go b/internal/adapters/cloudformation/aws/ec2/instance.go new file mode 100644 index 000000000000..caf703bf5863 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/instance.go @@ -0,0 +1,106 @@ +package ec2 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getInstances(ctx parser.FileContext) (instances []ec2.Instance) { + instanceResources := ctx.GetResourcesByType("AWS::EC2::Instance") + + for _, r := range instanceResources { + instance := ec2.Instance{ + Metadata: r.Metadata(), + // metadata not supported by CloudFormation at the moment - + // https://github.com/aws-cloudformation/cloudformation-coverage-roadmap/issues/655 + MetadataOptions: ec2.MetadataOptions{ + Metadata: r.Metadata(), + HttpTokens: defsecTypes.StringDefault("optional", r.Metadata()), + HttpEndpoint: defsecTypes.StringDefault("enabled", r.Metadata()), + }, + UserData: r.GetStringProperty("UserData"), + } + + if launchTemplate, ok := findRelatedLaunchTemplate(ctx, r); ok { + instance = launchTemplate.Instance + } + + if instance.RootBlockDevice == nil { + instance.RootBlockDevice = &ec2.BlockDevice{ + Metadata: r.Metadata(), + Encrypted: defsecTypes.BoolDefault(false, r.Metadata()), + } + } + + blockDevices := getBlockDevices(r) + for i, device := range blockDevices { + copyDevice := device + if i == 0 { + instance.RootBlockDevice = copyDevice + continue + } + instance.EBSBlockDevices = append(instance.EBSBlockDevices, device) + } + instances = append(instances, instance) + } + + return instances +} + +func findRelatedLaunchTemplate(fctx parser.FileContext, r *parser.Resource) (ec2.LaunchTemplate, bool) { + launchTemplateRef := r.GetProperty("LaunchTemplate.LaunchTemplateName") + if launchTemplateRef.IsString() { + res := findLaunchTemplateByName(fctx, launchTemplateRef) + if res != nil { + return adaptLaunchTemplate(res), true + } + } + + launchTemplateRef = r.GetProperty("LaunchTemplate.LaunchTemplateId") + if !launchTemplateRef.IsString() { + return ec2.LaunchTemplate{}, false + } + + resource := fctx.GetResourceByLogicalID(launchTemplateRef.AsString()) + if resource == nil { + return ec2.LaunchTemplate{}, false + } + return adaptLaunchTemplate(resource), true +} + +func findLaunchTemplateByName(fctx parser.FileContext, prop *parser.Property) *parser.Resource { + for _, res := range fctx.GetResourcesByType("AWS::EC2::LaunchTemplate") { + templateName := res.GetProperty("LaunchTemplateName") + if templateName.IsNotString() { + continue + } + + if prop.EqualTo(templateName.AsString()) { + return res + } + } + + return nil +} + +func getBlockDevices(r *parser.Resource) []*ec2.BlockDevice { + var blockDevices []*ec2.BlockDevice + + devicesProp := r.GetProperty("BlockDeviceMappings") + + if devicesProp.IsNil() { + return blockDevices + } + + for _, d := range devicesProp.AsList() { + device := &ec2.BlockDevice{ + Metadata: d.Metadata(), + Encrypted: d.GetBoolProperty("Ebs.Encrypted"), + } + + blockDevices = append(blockDevices, device) + } + + return blockDevices +} diff --git a/internal/adapters/cloudformation/aws/ec2/launch_configuration.go b/internal/adapters/cloudformation/aws/ec2/launch_configuration.go new file mode 100644 index 000000000000..3e034cc8d428 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/launch_configuration.go @@ -0,0 +1,48 @@ +package ec2 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getLaunchConfigurations(file parser.FileContext) (launchConfigurations []ec2.LaunchConfiguration) { + launchConfigResources := file.GetResourcesByType("AWS::AutoScaling::LaunchConfiguration") + + for _, r := range launchConfigResources { + + launchConfig := ec2.LaunchConfiguration{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("Name"), + AssociatePublicIP: r.GetBoolProperty("AssociatePublicIpAddress"), + MetadataOptions: ec2.MetadataOptions{ + Metadata: r.Metadata(), + HttpTokens: types.StringDefault("optional", r.Metadata()), + HttpEndpoint: types.StringDefault("enabled", r.Metadata()), + }, + UserData: r.GetStringProperty("UserData", ""), + } + + if opts := r.GetProperty("MetadataOptions"); opts.IsNotNil() { + launchConfig.MetadataOptions = ec2.MetadataOptions{ + Metadata: opts.Metadata(), + HttpTokens: opts.GetStringProperty("HttpTokens", "optional"), + HttpEndpoint: opts.GetStringProperty("HttpEndpoint", "enabled"), + } + } + + blockDevices := getBlockDevices(r) + for i, device := range blockDevices { + copyDevice := device + if i == 0 { + launchConfig.RootBlockDevice = copyDevice + continue + } + launchConfig.EBSBlockDevices = append(launchConfig.EBSBlockDevices, device) + } + + launchConfigurations = append(launchConfigurations, launchConfig) + + } + return launchConfigurations +} diff --git a/internal/adapters/cloudformation/aws/ec2/launch_template.go b/internal/adapters/cloudformation/aws/ec2/launch_template.go new file mode 100644 index 000000000000..726b7b0e574f --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/launch_template.go @@ -0,0 +1,56 @@ +package ec2 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getLaunchTemplates(file parser.FileContext) (templates []ec2.LaunchTemplate) { + launchConfigResources := file.GetResourcesByType("AWS::EC2::LaunchTemplate") + + for _, r := range launchConfigResources { + templates = append(templates, adaptLaunchTemplate(r)) + } + return templates +} + +func adaptLaunchTemplate(r *parser.Resource) ec2.LaunchTemplate { + launchTemplate := ec2.LaunchTemplate{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("LaunchTemplateName", ""), + Instance: ec2.Instance{ + Metadata: r.Metadata(), + MetadataOptions: ec2.MetadataOptions{ + Metadata: r.Metadata(), + HttpTokens: types.StringDefault("optional", r.Metadata()), + HttpEndpoint: types.StringDefault("enabled", r.Metadata()), + }, + UserData: types.StringDefault("", r.Metadata()), + }, + } + + if data := r.GetProperty("LaunchTemplateData"); data.IsNotNil() { + if opts := data.GetProperty("MetadataOptions"); opts.IsNotNil() { + launchTemplate.MetadataOptions = ec2.MetadataOptions{ + Metadata: opts.Metadata(), + HttpTokens: opts.GetStringProperty("HttpTokens", "optional"), + HttpEndpoint: opts.GetStringProperty("HttpEndpoint", "enabled"), + } + } + + launchTemplate.Instance.UserData = data.GetStringProperty("UserData", "") + + blockDevices := getBlockDevices(r) + for i, device := range blockDevices { + copyDevice := device + if i == 0 { + launchTemplate.RootBlockDevice = copyDevice + } else { + launchTemplate.EBSBlockDevices = append(launchTemplate.EBSBlockDevices, device) + } + } + } + + return launchTemplate +} diff --git a/internal/adapters/cloudformation/aws/ec2/nacl.go b/internal/adapters/cloudformation/aws/ec2/nacl.go new file mode 100644 index 000000000000..75f782c5a6e7 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/nacl.go @@ -0,0 +1,71 @@ +package ec2 + +import ( + "strconv" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getNetworkACLs(ctx parser.FileContext) (acls []ec2.NetworkACL) { + for _, aclResource := range ctx.GetResourcesByType("AWS::EC2::NetworkAcl") { + acl := ec2.NetworkACL{ + Metadata: aclResource.Metadata(), + Rules: getRules(aclResource.ID(), ctx), + IsDefaultRule: defsecTypes.BoolDefault(false, aclResource.Metadata()), + } + acls = append(acls, acl) + } + return acls +} + +func getRules(id string, ctx parser.FileContext) (rules []ec2.NetworkACLRule) { + for _, ruleResource := range ctx.GetResourcesByType("AWS::EC2::NetworkAclEntry") { + aclID := ruleResource.GetProperty("NetworkAclId") + if aclID.IsString() && aclID.AsString() == id { + + rule := ec2.NetworkACLRule{ + Metadata: ruleResource.Metadata(), + Type: defsecTypes.StringDefault(ec2.TypeIngress, ruleResource.Metadata()), + Action: defsecTypes.StringDefault(ec2.ActionAllow, ruleResource.Metadata()), + Protocol: defsecTypes.String("-1", ruleResource.Metadata()), + CIDRs: nil, + } + + if egressProperty := ruleResource.GetProperty("Egress"); egressProperty.IsBool() { + if egressProperty.AsBool() { + rule.Type = defsecTypes.String(ec2.TypeEgress, egressProperty.Metadata()) + } else { + rule.Type = defsecTypes.String(ec2.TypeIngress, egressProperty.Metadata()) + } + } + + if actionProperty := ruleResource.GetProperty("RuleAction"); actionProperty.IsString() { + if actionProperty.AsString() == ec2.ActionAllow { + rule.Action = defsecTypes.String(ec2.ActionAllow, actionProperty.Metadata()) + } else { + rule.Action = defsecTypes.String(ec2.ActionDeny, actionProperty.Metadata()) + } + } + + if protocolProperty := ruleResource.GetProperty("Protocol"); protocolProperty.IsInt() { + protocol := protocolProperty.AsIntValue().Value() + rule.Protocol = defsecTypes.String(strconv.Itoa(protocol), protocolProperty.Metadata()) + } + + if ipv4Cidr := ruleResource.GetProperty("CidrBlock"); ipv4Cidr.IsString() { + rule.CIDRs = append(rule.CIDRs, ipv4Cidr.AsStringValue()) + } + + if ipv6Cidr := ruleResource.GetProperty("Ipv6CidrBlock"); ipv6Cidr.IsString() { + rule.CIDRs = append(rule.CIDRs, ipv6Cidr.AsStringValue()) + } + + rules = append(rules, rule) + } + } + return rules +} diff --git a/internal/adapters/cloudformation/aws/ec2/security_group.go b/internal/adapters/cloudformation/aws/ec2/security_group.go new file mode 100644 index 000000000000..d5c9f7955030 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/security_group.go @@ -0,0 +1,68 @@ +package ec2 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getSecurityGroups(ctx parser.FileContext) (groups []ec2.SecurityGroup) { + for _, r := range ctx.GetResourcesByType("AWS::EC2::SecurityGroup") { + group := ec2.SecurityGroup{ + Metadata: r.Metadata(), + Description: r.GetStringProperty("GroupDescription"), + IngressRules: getIngressRules(r), + EgressRules: getEgressRules(r), + IsDefault: types.Bool(r.GetStringProperty("GroupName").EqualTo("default"), r.Metadata()), + VPCID: r.GetStringProperty("VpcId"), + } + + groups = append(groups, group) + } + return groups +} + +func getIngressRules(r *parser.Resource) (sgRules []ec2.SecurityGroupRule) { + if ingressProp := r.GetProperty("SecurityGroupIngress"); ingressProp.IsList() { + for _, ingress := range ingressProp.AsList() { + rule := ec2.SecurityGroupRule{ + Metadata: ingress.Metadata(), + Description: ingress.GetStringProperty("Description"), + CIDRs: nil, + } + v4Cidr := ingress.GetProperty("CidrIp") + if v4Cidr.IsString() && v4Cidr.AsStringValue().IsNotEmpty() { + rule.CIDRs = append(rule.CIDRs, types.StringExplicit(v4Cidr.AsString(), v4Cidr.Metadata())) + } + v6Cidr := ingress.GetProperty("CidrIpv6") + if v6Cidr.IsString() && v6Cidr.AsStringValue().IsNotEmpty() { + rule.CIDRs = append(rule.CIDRs, types.StringExplicit(v6Cidr.AsString(), v6Cidr.Metadata())) + } + + sgRules = append(sgRules, rule) + } + } + return sgRules +} + +func getEgressRules(r *parser.Resource) (sgRules []ec2.SecurityGroupRule) { + if egressProp := r.GetProperty("SecurityGroupEgress"); egressProp.IsList() { + for _, egress := range egressProp.AsList() { + rule := ec2.SecurityGroupRule{ + Metadata: egress.Metadata(), + Description: egress.GetStringProperty("Description"), + } + v4Cidr := egress.GetProperty("CidrIp") + if v4Cidr.IsString() && v4Cidr.AsStringValue().IsNotEmpty() { + rule.CIDRs = append(rule.CIDRs, types.StringExplicit(v4Cidr.AsString(), v4Cidr.Metadata())) + } + v6Cidr := egress.GetProperty("CidrIpv6") + if v6Cidr.IsString() && v6Cidr.AsStringValue().IsNotEmpty() { + rule.CIDRs = append(rule.CIDRs, types.StringExplicit(v6Cidr.AsString(), v6Cidr.Metadata())) + } + + sgRules = append(sgRules, rule) + } + } + return sgRules +} diff --git a/internal/adapters/cloudformation/aws/ec2/subnet.go b/internal/adapters/cloudformation/aws/ec2/subnet.go new file mode 100644 index 000000000000..9a6e97b82562 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/subnet.go @@ -0,0 +1,21 @@ +package ec2 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getSubnets(ctx parser.FileContext) (subnets []ec2.Subnet) { + + subnetResources := ctx.GetResourcesByType("AWS::EC2::Subnet") + for _, r := range subnetResources { + + subnet := ec2.Subnet{ + Metadata: r.Metadata(), + MapPublicIpOnLaunch: r.GetBoolProperty("MapPublicIpOnLaunch"), + } + + subnets = append(subnets, subnet) + } + return subnets +} diff --git a/internal/adapters/cloudformation/aws/ec2/volume.go b/internal/adapters/cloudformation/aws/ec2/volume.go new file mode 100644 index 000000000000..d084c18f768e --- /dev/null +++ b/internal/adapters/cloudformation/aws/ec2/volume.go @@ -0,0 +1,25 @@ +package ec2 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getVolumes(ctx parser.FileContext) (volumes []ec2.Volume) { + + volumeResources := ctx.GetResourcesByType("AWS::EC2::Volume") + for _, r := range volumeResources { + + volume := ec2.Volume{ + Metadata: r.Metadata(), + Encryption: ec2.Encryption{ + Metadata: r.Metadata(), + Enabled: r.GetBoolProperty("Encrypted"), + KMSKeyID: r.GetStringProperty("KmsKeyId"), + }, + } + + volumes = append(volumes, volume) + } + return volumes +} diff --git a/internal/adapters/cloudformation/aws/ecr/ecr.go b/internal/adapters/cloudformation/aws/ecr/ecr.go new file mode 100644 index 000000000000..d48c8c8eaeaf --- /dev/null +++ b/internal/adapters/cloudformation/aws/ecr/ecr.go @@ -0,0 +1,13 @@ +package ecr + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ecr" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) ecr.ECR { + return ecr.ECR{ + Repositories: getRepositories(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/ecr/repository.go b/internal/adapters/cloudformation/aws/ecr/repository.go new file mode 100644 index 000000000000..b98082ca3d6d --- /dev/null +++ b/internal/adapters/cloudformation/aws/ecr/repository.go @@ -0,0 +1,93 @@ +package ecr + +import ( + "fmt" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + + "github.com/aquasecurity/trivy/pkg/providers/aws/ecr" + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + + "github.com/liamg/iamgo" +) + +func getRepositories(ctx parser.FileContext) (repositories []ecr.Repository) { + + repositoryResources := ctx.GetResourcesByType("AWS::ECR::Repository") + + for _, r := range repositoryResources { + + repository := ecr.Repository{ + Metadata: r.Metadata(), + ImageScanning: ecr.ImageScanning{ + Metadata: r.Metadata(), + ScanOnPush: defsecTypes.BoolDefault(false, r.Metadata()), + }, + ImageTagsImmutable: hasImmutableImageTags(r), + Policies: nil, + Encryption: ecr.Encryption{ + Metadata: r.Metadata(), + Type: defsecTypes.StringDefault(ecr.EncryptionTypeAES256, r.Metadata()), + KMSKeyID: defsecTypes.StringDefault("", r.Metadata()), + }, + } + + if imageScanningProp := r.GetProperty("ImageScanningConfiguration"); imageScanningProp.IsNotNil() { + repository.ImageScanning = ecr.ImageScanning{ + Metadata: imageScanningProp.Metadata(), + ScanOnPush: imageScanningProp.GetBoolProperty("ScanOnPush", false), + } + } + + if encProp := r.GetProperty("EncryptionConfiguration"); encProp.IsNotNil() { + repository.Encryption = ecr.Encryption{ + Metadata: encProp.Metadata(), + Type: encProp.GetStringProperty("EncryptionType", ecr.EncryptionTypeAES256), + KMSKeyID: encProp.GetStringProperty("KmsKey", ""), + } + } + + if policy, err := getPolicy(r); err == nil { + repository.Policies = append(repository.Policies, *policy) + } + + repositories = append(repositories, repository) + } + + return repositories +} + +func getPolicy(r *parser.Resource) (*iam.Policy, error) { + policyProp := r.GetProperty("RepositoryPolicyText") + if policyProp.IsNil() { + return nil, fmt.Errorf("missing policy") + } + + parsed, err := iamgo.Parse(policyProp.GetJsonBytes()) + if err != nil { + return nil, err + } + + return &iam.Policy{ + Metadata: policyProp.Metadata(), + Name: defsecTypes.StringDefault("", policyProp.Metadata()), + Document: iam.Document{ + Metadata: policyProp.Metadata(), + Parsed: *parsed, + }, + Builtin: defsecTypes.Bool(false, policyProp.Metadata()), + }, nil +} + +func hasImmutableImageTags(r *parser.Resource) defsecTypes.BoolValue { + mutabilityProp := r.GetProperty("ImageTagMutability") + if mutabilityProp.IsNil() { + return defsecTypes.BoolDefault(false, r.Metadata()) + } + if !mutabilityProp.EqualTo("IMMUTABLE") { + return defsecTypes.Bool(false, mutabilityProp.Metadata()) + } + return defsecTypes.Bool(true, mutabilityProp.Metadata()) +} diff --git a/internal/adapters/cloudformation/aws/ecs/cluster.go b/internal/adapters/cloudformation/aws/ecs/cluster.go new file mode 100644 index 000000000000..2037c7cf6e56 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ecs/cluster.go @@ -0,0 +1,57 @@ +package ecs + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ecs" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getClusters(ctx parser.FileContext) (clusters []ecs.Cluster) { + + clusterResources := ctx.GetResourcesByType("AWS::ECS::Cluster") + + for _, r := range clusterResources { + + cluster := ecs.Cluster{ + Metadata: r.Metadata(), + Settings: getClusterSettings(r), + } + + clusters = append(clusters, cluster) + + } + + return clusters +} + +func getClusterSettings(r *parser.Resource) ecs.ClusterSettings { + + clusterSettings := ecs.ClusterSettings{ + Metadata: r.Metadata(), + ContainerInsightsEnabled: types.BoolDefault(false, r.Metadata()), + } + + clusterSettingMap := r.GetProperty("ClusterSettings") + if clusterSettingMap.IsNil() || clusterSettingMap.IsNotList() { + return clusterSettings + } + + clusterSettings.Metadata = clusterSettingMap.Metadata() + + for _, setting := range clusterSettingMap.AsList() { + checkProperty(setting, &clusterSettings) + } + + return clusterSettings +} + +func checkProperty(setting *parser.Property, clusterSettings *ecs.ClusterSettings) { + settingMap := setting.AsMap() + name := settingMap["Name"] + if name.IsNotNil() && name.EqualTo("containerInsights") { + value := settingMap["Value"] + if value.IsNotNil() && value.EqualTo("enabled") { + clusterSettings.ContainerInsightsEnabled = types.Bool(true, value.Metadata()) + } + } +} diff --git a/internal/adapters/cloudformation/aws/ecs/ecs.go b/internal/adapters/cloudformation/aws/ecs/ecs.go new file mode 100644 index 000000000000..7457d6bcbd11 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ecs/ecs.go @@ -0,0 +1,14 @@ +package ecs + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ecs" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) ecs.ECS { + return ecs.ECS{ + Clusters: getClusters(cfFile), + TaskDefinitions: getTaskDefinitions(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/ecs/task_definition.go b/internal/adapters/cloudformation/aws/ecs/task_definition.go new file mode 100644 index 000000000000..6b96bf0958fe --- /dev/null +++ b/internal/adapters/cloudformation/aws/ecs/task_definition.go @@ -0,0 +1,86 @@ +package ecs + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ecs" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getTaskDefinitions(ctx parser.FileContext) (taskDefinitions []ecs.TaskDefinition) { + + taskDefResources := ctx.GetResourcesByType("AWS::ECS::TaskDefinition") + + for _, r := range taskDefResources { + definitions, _ := getContainerDefinitions(r) + taskDef := ecs.TaskDefinition{ + Metadata: r.Metadata(), + Volumes: getVolumes(r), + ContainerDefinitions: definitions, + } + taskDefinitions = append(taskDefinitions, taskDef) + } + + return taskDefinitions +} + +func getContainerDefinitions(r *parser.Resource) ([]ecs.ContainerDefinition, error) { + var definitions []ecs.ContainerDefinition + containerDefs := r.GetProperty("ContainerDefinitions") + if containerDefs.IsNil() || containerDefs.IsNotList() { + return definitions, nil + } + for _, containerDef := range containerDefs.AsList() { + + var envVars []ecs.EnvVar + envVarsList := containerDef.GetProperty("Environment") + if envVarsList.IsNotNil() && envVarsList.IsList() { + for _, envVar := range envVarsList.AsList() { + envVars = append(envVars, ecs.EnvVar{ + Name: envVar.GetStringProperty("Name", "").Value(), + Value: envVar.GetStringProperty("Value", "").Value(), + }) + } + } + definition := ecs.ContainerDefinition{ + Metadata: containerDef.Metadata(), + Name: containerDef.GetStringProperty("Name", ""), + Image: containerDef.GetStringProperty("Image", ""), + CPU: containerDef.GetIntProperty("CPU", 1), + Memory: containerDef.GetIntProperty("Memory", 128), + Essential: containerDef.GetBoolProperty("Essential", false), + Privileged: containerDef.GetBoolProperty("Privileged", false), + Environment: envVars, + PortMappings: nil, + } + definitions = append(definitions, definition) + } + if containerDefs.IsNotNil() && containerDefs.IsString() { + return ecs.CreateDefinitionsFromString(r.Metadata(), containerDefs.AsString()) + } + return definitions, nil +} + +func getVolumes(r *parser.Resource) (volumes []ecs.Volume) { + + volumesList := r.GetProperty("Volumes") + if volumesList.IsNil() || volumesList.IsNotList() { + return volumes + } + + for _, v := range volumesList.AsList() { + volume := ecs.Volume{ + Metadata: r.Metadata(), + EFSVolumeConfiguration: ecs.EFSVolumeConfiguration{ + Metadata: r.Metadata(), + TransitEncryptionEnabled: types.BoolDefault(false, r.Metadata()), + }, + } + transitProp := v.GetProperty("EFSVolumeConfiguration.TransitEncryption") + if transitProp.IsNotNil() && transitProp.EqualTo("enabled", parser.IgnoreCase) { + volume.EFSVolumeConfiguration.TransitEncryptionEnabled = types.Bool(true, transitProp.Metadata()) + } + + volumes = append(volumes, volume) + } + return volumes +} diff --git a/internal/adapters/cloudformation/aws/efs/efs.go b/internal/adapters/cloudformation/aws/efs/efs.go new file mode 100644 index 000000000000..94139fa537e4 --- /dev/null +++ b/internal/adapters/cloudformation/aws/efs/efs.go @@ -0,0 +1,13 @@ +package efs + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/efs" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) efs.EFS { + return efs.EFS{ + FileSystems: getFileSystems(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/efs/filesystem.go b/internal/adapters/cloudformation/aws/efs/filesystem.go new file mode 100644 index 000000000000..c6f37b433936 --- /dev/null +++ b/internal/adapters/cloudformation/aws/efs/filesystem.go @@ -0,0 +1,23 @@ +package efs + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/efs" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getFileSystems(ctx parser.FileContext) (filesystems []efs.FileSystem) { + + filesystemResources := ctx.GetResourcesByType("AWS::EFS::FileSystem") + + for _, r := range filesystemResources { + + filesystem := efs.FileSystem{ + Metadata: r.Metadata(), + Encrypted: r.GetBoolProperty("Encrypted"), + } + + filesystems = append(filesystems, filesystem) + } + + return filesystems +} diff --git a/internal/adapters/cloudformation/aws/eks/cluster.go b/internal/adapters/cloudformation/aws/eks/cluster.go new file mode 100644 index 000000000000..5cf1a1a3516c --- /dev/null +++ b/internal/adapters/cloudformation/aws/eks/cluster.go @@ -0,0 +1,56 @@ +package eks + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/eks" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getClusters(ctx parser.FileContext) (clusters []eks.Cluster) { + + clusterResources := ctx.GetResourcesByType("AWS::EKS::Cluster") + + for _, r := range clusterResources { + cluster := eks.Cluster{ + Metadata: r.Metadata(), + // Logging not supported for cloudformation https://github.com/aws/containers-roadmap/issues/242 + Logging: eks.Logging{ + Metadata: r.Metadata(), + API: defsecTypes.BoolUnresolvable(r.Metadata()), + Audit: defsecTypes.BoolUnresolvable(r.Metadata()), + Authenticator: defsecTypes.BoolUnresolvable(r.Metadata()), + ControllerManager: defsecTypes.BoolUnresolvable(r.Metadata()), + Scheduler: defsecTypes.BoolUnresolvable(r.Metadata()), + }, + Encryption: getEncryptionConfig(r), + // endpoint protection not supported - https://github.com/aws/containers-roadmap/issues/242 + PublicAccessEnabled: defsecTypes.BoolUnresolvable(r.Metadata()), + PublicAccessCIDRs: nil, + } + + clusters = append(clusters, cluster) + } + return clusters +} + +func getEncryptionConfig(r *parser.Resource) eks.Encryption { + + encryption := eks.Encryption{ + Metadata: r.Metadata(), + Secrets: defsecTypes.BoolDefault(false, r.Metadata()), + KMSKeyID: defsecTypes.StringDefault("", r.Metadata()), + } + + if encProp := r.GetProperty("EncryptionConfig"); encProp.IsNotNil() { + encryption.Metadata = encProp.Metadata() + encryption.KMSKeyID = encProp.GetStringProperty("Provider.KeyArn") + resourcesProp := encProp.GetProperty("Resources") + if resourcesProp.IsList() { + if resourcesProp.Contains("secrets") { + encryption.Secrets = defsecTypes.Bool(true, resourcesProp.Metadata()) + } + } + } + + return encryption +} diff --git a/internal/adapters/cloudformation/aws/eks/eks.go b/internal/adapters/cloudformation/aws/eks/eks.go new file mode 100644 index 000000000000..53b660ceafaa --- /dev/null +++ b/internal/adapters/cloudformation/aws/eks/eks.go @@ -0,0 +1,13 @@ +package eks + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/eks" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) eks.EKS { + return eks.EKS{ + Clusters: getClusters(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/elasticache/cluster.go b/internal/adapters/cloudformation/aws/elasticache/cluster.go new file mode 100644 index 000000000000..0096e61777b0 --- /dev/null +++ b/internal/adapters/cloudformation/aws/elasticache/cluster.go @@ -0,0 +1,24 @@ +package elasticache + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/elasticache" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getClusterGroups(ctx parser.FileContext) (clusters []elasticache.Cluster) { + + clusterResources := ctx.GetResourcesByType("AWS::ElastiCache::CacheCluster") + + for _, r := range clusterResources { + cluster := elasticache.Cluster{ + Metadata: r.Metadata(), + Engine: r.GetStringProperty("Engine"), + NodeType: r.GetStringProperty("CacheNodeType"), + SnapshotRetentionLimit: r.GetIntProperty("SnapshotRetentionLimit"), + } + + clusters = append(clusters, cluster) + } + + return clusters +} diff --git a/internal/adapters/cloudformation/aws/elasticache/elasticache.go b/internal/adapters/cloudformation/aws/elasticache/elasticache.go new file mode 100644 index 000000000000..e934e141712c --- /dev/null +++ b/internal/adapters/cloudformation/aws/elasticache/elasticache.go @@ -0,0 +1,15 @@ +package elasticache + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/elasticache" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) elasticache.ElastiCache { + return elasticache.ElastiCache{ + Clusters: getClusterGroups(cfFile), + ReplicationGroups: getReplicationGroups(cfFile), + SecurityGroups: getSecurityGroups(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/elasticache/replication_group.go b/internal/adapters/cloudformation/aws/elasticache/replication_group.go new file mode 100644 index 000000000000..dda6f148ef7d --- /dev/null +++ b/internal/adapters/cloudformation/aws/elasticache/replication_group.go @@ -0,0 +1,23 @@ +package elasticache + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/elasticache" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getReplicationGroups(ctx parser.FileContext) (replicationGroups []elasticache.ReplicationGroup) { + + replicationGroupResources := ctx.GetResourcesByType("AWS::ElastiCache::ReplicationGroup") + + for _, r := range replicationGroupResources { + replicationGroup := elasticache.ReplicationGroup{ + Metadata: r.Metadata(), + TransitEncryptionEnabled: r.GetBoolProperty("TransitEncryptionEnabled"), + AtRestEncryptionEnabled: r.GetBoolProperty("AtRestEncryptionEnabled"), + } + + replicationGroups = append(replicationGroups, replicationGroup) + } + + return replicationGroups +} diff --git a/internal/adapters/cloudformation/aws/elasticache/security_group.go b/internal/adapters/cloudformation/aws/elasticache/security_group.go new file mode 100644 index 000000000000..7bee357b19e5 --- /dev/null +++ b/internal/adapters/cloudformation/aws/elasticache/security_group.go @@ -0,0 +1,22 @@ +package elasticache + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/elasticache" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getSecurityGroups(ctx parser.FileContext) (securityGroups []elasticache.SecurityGroup) { + + sgResources := ctx.GetResourcesByType("AWS::ElastiCache::SecurityGroup") + + for _, r := range sgResources { + + sg := elasticache.SecurityGroup{ + Metadata: r.Metadata(), + Description: r.GetStringProperty("Description"), + } + securityGroups = append(securityGroups, sg) + } + + return securityGroups +} diff --git a/internal/adapters/cloudformation/aws/elasticsearch/domain.go b/internal/adapters/cloudformation/aws/elasticsearch/domain.go new file mode 100644 index 000000000000..649ff708542e --- /dev/null +++ b/internal/adapters/cloudformation/aws/elasticsearch/domain.go @@ -0,0 +1,84 @@ +package elasticsearch + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/elasticsearch" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getDomains(ctx parser.FileContext) (domains []elasticsearch.Domain) { + + domainResources := ctx.GetResourcesByType("AWS::Elasticsearch::Domain", "AWS::OpenSearchService::Domain") + + for _, r := range domainResources { + + domain := elasticsearch.Domain{ + Metadata: r.Metadata(), + DomainName: r.GetStringProperty("DomainName"), + AccessPolicies: r.GetStringProperty("AccessPolicies"), + DedicatedMasterEnabled: r.GetBoolProperty("ElasticsearchClusterConfig.DedicatedMasterEnabled"), + VpcId: defsecTypes.String("", r.Metadata()), + LogPublishing: elasticsearch.LogPublishing{ + Metadata: r.Metadata(), + AuditEnabled: defsecTypes.BoolDefault(false, r.Metadata()), + CloudWatchLogGroupArn: defsecTypes.String("", r.Metadata()), + }, + TransitEncryption: elasticsearch.TransitEncryption{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + }, + AtRestEncryption: elasticsearch.AtRestEncryption{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + KmsKeyId: defsecTypes.String("", r.Metadata()), + }, + Endpoint: elasticsearch.Endpoint{ + Metadata: r.Metadata(), + EnforceHTTPS: defsecTypes.BoolDefault(false, r.Metadata()), + TLSPolicy: defsecTypes.StringDefault("Policy-Min-TLS-1-0-2019-07", r.Metadata()), + }, + ServiceSoftwareOptions: elasticsearch.ServiceSoftwareOptions{ + Metadata: r.Metadata(), + CurrentVersion: defsecTypes.String("", r.Metadata()), + NewVersion: defsecTypes.String("", r.Metadata()), + UpdateStatus: defsecTypes.String("", r.Metadata()), + UpdateAvailable: defsecTypes.Bool(false, r.Metadata()), + }, + } + + if prop := r.GetProperty("LogPublishingOptions"); prop.IsNotNil() { + domain.LogPublishing = elasticsearch.LogPublishing{ + Metadata: prop.Metadata(), + AuditEnabled: prop.GetBoolProperty("AUDIT_LOGS.Enabled", false), + CloudWatchLogGroupArn: prop.GetStringProperty("CloudWatchLogsLogGroupArn"), + } + } + + if prop := r.GetProperty("NodeToNodeEncryptionOptions"); prop.IsNotNil() { + domain.TransitEncryption = elasticsearch.TransitEncryption{ + Metadata: prop.Metadata(), + Enabled: prop.GetBoolProperty("Enabled", false), + } + } + + if prop := r.GetProperty("EncryptionAtRestOptions"); prop.IsNotNil() { + domain.AtRestEncryption = elasticsearch.AtRestEncryption{ + Metadata: prop.Metadata(), + Enabled: prop.GetBoolProperty("Enabled", false), + KmsKeyId: prop.GetStringProperty("KmsKeyId"), + } + } + + if prop := r.GetProperty("DomainEndpointOptions"); prop.IsNotNil() { + domain.Endpoint = elasticsearch.Endpoint{ + Metadata: prop.Metadata(), + EnforceHTTPS: prop.GetBoolProperty("EnforceHTTPS", false), + TLSPolicy: prop.GetStringProperty("TLSSecurityPolicy", "Policy-Min-TLS-1-0-2019-07"), + } + } + + domains = append(domains, domain) + } + + return domains +} diff --git a/internal/adapters/cloudformation/aws/elasticsearch/elasticsearch.go b/internal/adapters/cloudformation/aws/elasticsearch/elasticsearch.go new file mode 100644 index 000000000000..805caef3debd --- /dev/null +++ b/internal/adapters/cloudformation/aws/elasticsearch/elasticsearch.go @@ -0,0 +1,13 @@ +package elasticsearch + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/elasticsearch" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) elasticsearch.Elasticsearch { + return elasticsearch.Elasticsearch{ + Domains: getDomains(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/elb/adapt_test.go b/internal/adapters/cloudformation/aws/elb/adapt_test.go new file mode 100644 index 000000000000..2f2a1255c82a --- /dev/null +++ b/internal/adapters/cloudformation/aws/elb/adapt_test.go @@ -0,0 +1,73 @@ +package elb + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/aws/elb" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +func TestAdapt(t *testing.T) { + tests := []struct { + name string + source string + expected elb.ELB + }{ + { + name: "LoadBalancer", + source: `AWSTemplateFormatVersion: "2010-09-09" +Resources: + LoadBalancer: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + DependsOn: + - ALBLogsBucketPermission + Properties: + Name: "k8s-dev" + IpAddressType: ipv4 + LoadBalancerAttributes: + - Key: routing.http2.enabled + Value: "true" + - Key: deletion_protection.enabled + Value: "true" + - Key: routing.http.drop_invalid_header_fields.enabled + Value: "true" + - Key: access_logs.s3.enabled + Value: "true" + Tags: + - Key: ingress.k8s.aws/resource + Value: LoadBalancer + - Key: elbv2.k8s.aws/cluster + Value: "biomage-dev" + Type: application +`, + expected: elb.ELB{ + LoadBalancers: []elb.LoadBalancer{ + { + Metadata: types.NewTestMisconfigMetadata(), + Type: types.String("application", types.NewTestMisconfigMetadata()), + DropInvalidHeaderFields: types.Bool(true, types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "template.yaml": tt.source, + }) + + p := parser.New() + fctx, err := p.ParseFile(context.TODO(), fs, "template.yaml") + require.NoError(t, err) + + testutil.AssertDefsecEqual(t, tt.expected, Adapt(*fctx)) + }) + } +} diff --git a/internal/adapters/cloudformation/aws/elb/elb.go b/internal/adapters/cloudformation/aws/elb/elb.go new file mode 100644 index 000000000000..3c586554f6a3 --- /dev/null +++ b/internal/adapters/cloudformation/aws/elb/elb.go @@ -0,0 +1,13 @@ +package elb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/elb" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) elb.ELB { + return elb.ELB{ + LoadBalancers: getLoadBalancers(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/elb/loadbalancer.go b/internal/adapters/cloudformation/aws/elb/loadbalancer.go new file mode 100644 index 000000000000..9d185d8cc38e --- /dev/null +++ b/internal/adapters/cloudformation/aws/elb/loadbalancer.go @@ -0,0 +1,81 @@ +package elb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/elb" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getLoadBalancers(ctx parser.FileContext) (loadbalancers []elb.LoadBalancer) { + + loadBalanacerResources := ctx.GetResourcesByType("AWS::ElasticLoadBalancingV2::LoadBalancer") + + for _, r := range loadBalanacerResources { + lb := elb.LoadBalancer{ + Metadata: r.Metadata(), + Type: r.GetStringProperty("Type", "application"), + DropInvalidHeaderFields: checkForDropInvalidHeaders(r), + Internal: isInternal(r), + Listeners: getListeners(r, ctx), + } + loadbalancers = append(loadbalancers, lb) + } + + return loadbalancers +} + +func getListeners(lbr *parser.Resource, ctx parser.FileContext) (listeners []elb.Listener) { + + listenerResources := ctx.GetResourcesByType("AWS::ElasticLoadBalancingV2::Listener") + + for _, r := range listenerResources { + if r.GetStringProperty("LoadBalancerArn").Value() == lbr.ID() { + listener := elb.Listener{ + Metadata: r.Metadata(), + Protocol: r.GetStringProperty("Protocol", "HTTP"), + TLSPolicy: r.GetStringProperty("SslPolicy", ""), + DefaultActions: getDefaultListenerActions(r), + } + + listeners = append(listeners, listener) + } + } + return listeners +} + +func getDefaultListenerActions(r *parser.Resource) (actions []elb.Action) { + defaultActionsProp := r.GetProperty("DefaultActions") + if defaultActionsProp.IsNotList() { + return actions + } + for _, action := range defaultActionsProp.AsList() { + actions = append(actions, elb.Action{ + Metadata: action.Metadata(), + Type: action.GetProperty("Type").AsStringValue(), + }) + } + return actions +} + +func isInternal(r *parser.Resource) types.BoolValue { + schemeProp := r.GetProperty("Scheme") + if schemeProp.IsNotString() { + return r.BoolDefault(false) + } + return types.Bool(schemeProp.EqualTo("internal", parser.IgnoreCase), schemeProp.Metadata()) +} + +func checkForDropInvalidHeaders(r *parser.Resource) types.BoolValue { + attributesProp := r.GetProperty("LoadBalancerAttributes") + if attributesProp.IsNotList() { + return types.BoolDefault(false, r.Metadata()) + } + + for _, attr := range attributesProp.AsList() { + if attr.GetStringProperty("Key").Value() == "routing.http.drop_invalid_header_fields.enabled" { + return attr.GetBoolProperty("Value") + } + } + + return r.BoolDefault(false) +} diff --git a/internal/adapters/cloudformation/aws/iam/iam.go b/internal/adapters/cloudformation/aws/iam/iam.go new file mode 100644 index 000000000000..9920f9284666 --- /dev/null +++ b/internal/adapters/cloudformation/aws/iam/iam.go @@ -0,0 +1,27 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) iam.IAM { + return iam.IAM{ + PasswordPolicy: iam.PasswordPolicy{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + ReusePreventionCount: defsecTypes.IntDefault(0, defsecTypes.NewUnmanagedMisconfigMetadata()), + RequireLowercase: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + RequireUppercase: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + RequireNumbers: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + RequireSymbols: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + MaxAgeDays: defsecTypes.IntDefault(0, defsecTypes.NewUnmanagedMisconfigMetadata()), + MinimumLength: defsecTypes.IntDefault(0, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + Policies: getPolicies(cfFile), + Groups: getGroups(cfFile), + Users: getUsers(cfFile), + Roles: getRoles(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/iam/policy.go b/internal/adapters/cloudformation/aws/iam/policy.go new file mode 100644 index 000000000000..cc0596ba8e83 --- /dev/null +++ b/internal/adapters/cloudformation/aws/iam/policy.go @@ -0,0 +1,125 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/iamgo" +) + +func getPolicies(ctx parser.FileContext) (policies []iam.Policy) { + for _, policyResource := range ctx.GetResourcesByType("AWS::IAM::Policy") { + + policy := iam.Policy{ + Metadata: policyResource.Metadata(), + Name: policyResource.GetStringProperty("PolicyName"), + Document: iam.Document{ + Metadata: policyResource.Metadata(), + Parsed: iamgo.Document{}, + }, + Builtin: defsecTypes.Bool(false, policyResource.Metadata()), + } + + if policyProp := policyResource.GetProperty("PolicyDocument"); policyProp.IsNotNil() { + doc, err := iamgo.Parse(policyProp.GetJsonBytes()) + if err != nil { + continue + } + policy.Document.Parsed = *doc + } + + policies = append(policies, policy) + } + return policies +} + +func getRoles(ctx parser.FileContext) (roles []iam.Role) { + for _, roleResource := range ctx.GetResourcesByType("AWS::IAM::Role") { + policyProp := roleResource.GetProperty("Policies") + roleName := roleResource.GetStringProperty("RoleName") + + roles = append(roles, iam.Role{ + Metadata: roleResource.Metadata(), + Name: roleName, + Policies: getPoliciesDocs(policyProp), + }) + } + return roles +} + +func getUsers(ctx parser.FileContext) (users []iam.User) { + for _, userResource := range ctx.GetResourcesByType("AWS::IAM::User") { + policyProp := userResource.GetProperty("Policies") + userName := userResource.GetStringProperty("GroupName") + + users = append(users, iam.User{ + Metadata: userResource.Metadata(), + Name: userName, + LastAccess: defsecTypes.TimeUnresolvable(userResource.Metadata()), + Policies: getPoliciesDocs(policyProp), + AccessKeys: getAccessKeys(ctx, userName.Value()), + }) + } + return users +} + +func getAccessKeys(ctx parser.FileContext, username string) (accessKeys []iam.AccessKey) { + for _, keyResource := range ctx.GetResourcesByType("AWS::IAM::AccessKey") { + keyUsername := keyResource.GetStringProperty("UserName") + if !keyUsername.EqualTo(username) { + continue + } + active := defsecTypes.BoolDefault(false, keyResource.Metadata()) + if statusProp := keyResource.GetProperty("Status"); statusProp.IsString() { + active = defsecTypes.Bool(statusProp.AsString() == "Active", statusProp.Metadata()) + } + + accessKeys = append(accessKeys, iam.AccessKey{ + Metadata: keyResource.Metadata(), + AccessKeyId: defsecTypes.StringUnresolvable(keyResource.Metadata()), + CreationDate: defsecTypes.TimeUnresolvable(keyResource.Metadata()), + LastAccess: defsecTypes.TimeUnresolvable(keyResource.Metadata()), + Active: active, + }) + } + return accessKeys +} + +func getGroups(ctx parser.FileContext) (groups []iam.Group) { + for _, groupResource := range ctx.GetResourcesByType("AWS::IAM::Group") { + policyProp := groupResource.GetProperty("Policies") + groupName := groupResource.GetStringProperty("GroupName") + + groups = append(groups, iam.Group{ + Metadata: groupResource.Metadata(), + Name: groupName, + Policies: getPoliciesDocs(policyProp), + }) + } + return groups +} + +func getPoliciesDocs(policiesProp *parser.Property) []iam.Policy { + var policies []iam.Policy + + for _, policy := range policiesProp.AsList() { + policyProp := policy.GetProperty("PolicyDocument") + policyName := policy.GetStringProperty("PolicyName") + + doc, err := iamgo.Parse(policyProp.GetJsonBytes()) + if err != nil { + continue + } + + policies = append(policies, iam.Policy{ + Metadata: policyProp.Metadata(), + Name: policyName, + Document: iam.Document{ + Metadata: policyProp.Metadata(), + Parsed: *doc, + }, + Builtin: defsecTypes.Bool(false, policyProp.Metadata()), + }) + } + return policies +} diff --git a/internal/adapters/cloudformation/aws/kinesis/kinesis.go b/internal/adapters/cloudformation/aws/kinesis/kinesis.go new file mode 100644 index 000000000000..b998e855c143 --- /dev/null +++ b/internal/adapters/cloudformation/aws/kinesis/kinesis.go @@ -0,0 +1,13 @@ +package kinesis + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/kinesis" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) kinesis.Kinesis { + return kinesis.Kinesis{ + Streams: getStreams(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/kinesis/stream.go b/internal/adapters/cloudformation/aws/kinesis/stream.go new file mode 100644 index 000000000000..b4864c9ff63d --- /dev/null +++ b/internal/adapters/cloudformation/aws/kinesis/stream.go @@ -0,0 +1,36 @@ +package kinesis + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/kinesis" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getStreams(ctx parser.FileContext) (streams []kinesis.Stream) { + + streamResources := ctx.GetResourcesByType("AWS::Kinesis::Stream") + + for _, r := range streamResources { + + stream := kinesis.Stream{ + Metadata: r.Metadata(), + Encryption: kinesis.Encryption{ + Metadata: r.Metadata(), + Type: types.StringDefault("KMS", r.Metadata()), + KMSKeyID: types.StringDefault("", r.Metadata()), + }, + } + + if prop := r.GetProperty("StreamEncryption"); prop.IsNotNil() { + stream.Encryption = kinesis.Encryption{ + Metadata: prop.Metadata(), + Type: prop.GetStringProperty("EncryptionType", "KMS"), + KMSKeyID: prop.GetStringProperty("KeyId"), + } + } + + streams = append(streams, stream) + } + + return streams +} diff --git a/internal/adapters/cloudformation/aws/lambda/function.go b/internal/adapters/cloudformation/aws/lambda/function.go new file mode 100644 index 000000000000..f14e8142c705 --- /dev/null +++ b/internal/adapters/cloudformation/aws/lambda/function.go @@ -0,0 +1,53 @@ +package lambda + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/lambda" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getFunctions(ctx parser.FileContext) (functions []lambda.Function) { + + functionResources := ctx.GetResourcesByType("AWS::Lambda::Function") + + for _, r := range functionResources { + + function := lambda.Function{ + Metadata: r.Metadata(), + Tracing: lambda.Tracing{ + Metadata: r.Metadata(), + Mode: types.StringDefault("PassThrough", r.Metadata()), + }, + Permissions: getPermissions(r, ctx), + } + + if prop := r.GetProperty("TracingConfig"); prop.IsNotNil() { + function.Tracing = lambda.Tracing{ + Metadata: prop.Metadata(), + Mode: prop.GetStringProperty("Mode", "PassThrough"), + } + } + + functions = append(functions, function) + } + + return functions +} + +func getPermissions(funcR *parser.Resource, ctx parser.FileContext) (perms []lambda.Permission) { + + permissionResources := ctx.GetResourcesByType("AWS::Lambda::Permission") + + for _, r := range permissionResources { + if prop := r.GetStringProperty("FunctionName"); prop.EqualTo(funcR.ID()) { + perm := lambda.Permission{ + Metadata: r.Metadata(), + Principal: r.GetStringProperty("Principal"), + SourceARN: r.GetStringProperty("SourceArn"), + } + perms = append(perms, perm) + } + } + + return perms +} diff --git a/internal/adapters/cloudformation/aws/lambda/lambda.go b/internal/adapters/cloudformation/aws/lambda/lambda.go new file mode 100644 index 000000000000..12198bc16dea --- /dev/null +++ b/internal/adapters/cloudformation/aws/lambda/lambda.go @@ -0,0 +1,13 @@ +package lambda + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/lambda" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) lambda.Lambda { + return lambda.Lambda{ + Functions: getFunctions(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/mq/broker.go b/internal/adapters/cloudformation/aws/mq/broker.go new file mode 100644 index 000000000000..2a21be16d2b8 --- /dev/null +++ b/internal/adapters/cloudformation/aws/mq/broker.go @@ -0,0 +1,33 @@ +package mq + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/mq" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getBrokers(ctx parser.FileContext) (brokers []mq.Broker) { + for _, r := range ctx.GetResourcesByType("AWS::AmazonMQ::Broker") { + + broker := mq.Broker{ + Metadata: r.Metadata(), + PublicAccess: r.GetBoolProperty("PubliclyAccessible"), + Logging: mq.Logging{ + Metadata: r.Metadata(), + General: types.BoolDefault(false, r.Metadata()), + Audit: types.BoolDefault(false, r.Metadata()), + }, + } + + if prop := r.GetProperty("Logs"); prop.IsNotNil() { + broker.Logging = mq.Logging{ + Metadata: prop.Metadata(), + General: prop.GetBoolProperty("General"), + Audit: prop.GetBoolProperty("Audit"), + } + } + + brokers = append(brokers, broker) + } + return brokers +} diff --git a/internal/adapters/cloudformation/aws/mq/mq.go b/internal/adapters/cloudformation/aws/mq/mq.go new file mode 100644 index 000000000000..c94e5021f356 --- /dev/null +++ b/internal/adapters/cloudformation/aws/mq/mq.go @@ -0,0 +1,13 @@ +package mq + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/mq" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) mq.MQ { + return mq.MQ{ + Brokers: getBrokers(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/msk/cluster.go b/internal/adapters/cloudformation/aws/msk/cluster.go new file mode 100644 index 000000000000..9cc22163eac4 --- /dev/null +++ b/internal/adapters/cloudformation/aws/msk/cluster.go @@ -0,0 +1,80 @@ +package msk + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/msk" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getClusters(ctx parser.FileContext) (clusters []msk.Cluster) { + for _, r := range ctx.GetResourcesByType("AWS::MSK::Cluster") { + + cluster := msk.Cluster{ + Metadata: r.Metadata(), + EncryptionInTransit: msk.EncryptionInTransit{ + Metadata: r.Metadata(), + ClientBroker: defsecTypes.StringDefault("TLS", r.Metadata()), + }, + EncryptionAtRest: msk.EncryptionAtRest{ + Metadata: r.Metadata(), + KMSKeyARN: defsecTypes.StringDefault("", r.Metadata()), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + }, + Logging: msk.Logging{ + Metadata: r.Metadata(), + Broker: msk.BrokerLogging{ + Metadata: r.Metadata(), + S3: msk.S3Logging{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + }, + Cloudwatch: msk.CloudwatchLogging{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + }, + Firehose: msk.FirehoseLogging{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + }, + }, + }, + } + + if encProp := r.GetProperty("EncryptionInfo.EncryptionInTransit"); encProp.IsNotNil() { + cluster.EncryptionInTransit = msk.EncryptionInTransit{ + Metadata: encProp.Metadata(), + ClientBroker: encProp.GetStringProperty("ClientBroker", "TLS"), + } + } + + if encAtRestProp := r.GetProperty("EncryptionInfo.EncryptionAtRest"); encAtRestProp.IsNotNil() { + cluster.EncryptionAtRest = msk.EncryptionAtRest{ + Metadata: encAtRestProp.Metadata(), + KMSKeyARN: encAtRestProp.GetStringProperty("DataVolumeKMSKeyId", ""), + Enabled: defsecTypes.BoolDefault(true, encAtRestProp.Metadata()), + } + } + + if loggingProp := r.GetProperty("LoggingInfo"); loggingProp.IsNotNil() { + cluster.Logging.Metadata = loggingProp.Metadata() + if brokerLoggingProp := loggingProp.GetProperty("BrokerLogs"); brokerLoggingProp.IsNotNil() { + cluster.Logging.Broker.Metadata = brokerLoggingProp.Metadata() + if s3Prop := brokerLoggingProp.GetProperty("S3"); s3Prop.IsNotNil() { + cluster.Logging.Broker.S3.Metadata = s3Prop.Metadata() + cluster.Logging.Broker.S3.Enabled = s3Prop.GetBoolProperty("Enabled", false) + } + if cwProp := brokerLoggingProp.GetProperty("CloudWatchLogs"); cwProp.IsNotNil() { + cluster.Logging.Broker.Cloudwatch.Metadata = cwProp.Metadata() + cluster.Logging.Broker.Cloudwatch.Enabled = cwProp.GetBoolProperty("Enabled", false) + } + if fhProp := brokerLoggingProp.GetProperty("Firehose"); fhProp.IsNotNil() { + cluster.Logging.Broker.Firehose.Metadata = fhProp.Metadata() + cluster.Logging.Broker.Firehose.Enabled = fhProp.GetBoolProperty("Enabled", false) + } + } + } + + clusters = append(clusters, cluster) + } + return clusters +} diff --git a/internal/adapters/cloudformation/aws/msk/msk.go b/internal/adapters/cloudformation/aws/msk/msk.go new file mode 100644 index 000000000000..e46003f1905b --- /dev/null +++ b/internal/adapters/cloudformation/aws/msk/msk.go @@ -0,0 +1,13 @@ +package msk + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/msk" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) msk.MSK { + return msk.MSK{ + Clusters: getClusters(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/neptune/cluster.go b/internal/adapters/cloudformation/aws/neptune/cluster.go new file mode 100644 index 000000000000..1b8ac30f7623 --- /dev/null +++ b/internal/adapters/cloudformation/aws/neptune/cluster.go @@ -0,0 +1,34 @@ +package neptune + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/neptune" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getClusters(ctx parser.FileContext) (clusters []neptune.Cluster) { + for _, r := range ctx.GetResourcesByType("AWS::Neptune::DBCluster") { + + cluster := neptune.Cluster{ + Metadata: r.Metadata(), + Logging: neptune.Logging{ + Metadata: r.Metadata(), + Audit: getAuditLog(r), + }, + StorageEncrypted: r.GetBoolProperty("StorageEncrypted"), + KMSKeyID: r.GetStringProperty("KmsKeyId"), + } + clusters = append(clusters, cluster) + } + return clusters +} + +func getAuditLog(r *parser.Resource) types.BoolValue { + if logsProp := r.GetProperty("EnableCloudwatchLogsExports"); logsProp.IsList() { + if logsProp.Contains("audit") { + return types.Bool(true, logsProp.Metadata()) + } + } + + return types.BoolDefault(false, r.Metadata()) +} diff --git a/internal/adapters/cloudformation/aws/neptune/neptune.go b/internal/adapters/cloudformation/aws/neptune/neptune.go new file mode 100644 index 000000000000..46898e78e403 --- /dev/null +++ b/internal/adapters/cloudformation/aws/neptune/neptune.go @@ -0,0 +1,13 @@ +package neptune + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/neptune" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) neptune.Neptune { + return neptune.Neptune{ + Clusters: getClusters(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/rds/adapt_test.go b/internal/adapters/cloudformation/aws/rds/adapt_test.go new file mode 100644 index 000000000000..ed40ac182e0c --- /dev/null +++ b/internal/adapters/cloudformation/aws/rds/adapt_test.go @@ -0,0 +1,158 @@ +package rds + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/aws/rds" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +func TestAdapt(t *testing.T) { + tests := []struct { + name string + source string + expected rds.RDS + }{ + { + name: "cluster with instances", + source: `AWSTemplateFormatVersion: 2010-09-09 +Resources: + RDSCluster: + Type: 'AWS::RDS::DBCluster' + Properties: + DBClusterIdentifier: my-cluster1 + Engine: aurora-postgresql + StorageEncrypted: true + KmsKeyId: "your-kms-key-id" + PerformanceInsightsEnabled: true + PerformanceInsightsKmsKeyId: "test-kms-key-id" + PublicAccess: true + DeletionProtection: true + BackupRetentionPeriod: 2 + RDSDBInstance1: + Type: 'AWS::RDS::DBInstance' + Properties: + Engine: aurora-mysql + EngineVersion: "5.7.12" + DBInstanceIdentifier: test + DBClusterIdentifier: + Ref: RDSCluster + PubliclyAccessible: 'false' + DBInstanceClass: db.r3.xlarge + StorageEncrypted: true + KmsKeyId: "your-kms-key-id" + EnablePerformanceInsights: true + PerformanceInsightsKMSKeyId: "test-kms-key-id2" + MultiAZ: true + AutoMinorVersionUpgrade: true + DBInstanceArn: "arn:aws:rds:us-east-2:123456789012:db:my-mysql-instance-1" + EnableIAMDatabaseAuthentication: true + EnableCloudwatchLogsExports: + - "error" + - "general" + DBParameterGroupName: "testgroup" + Tags: + - Key: "keyname1" + Value: "value1" + - Key: "keyname2" + Value: "value2" + RDSDBParameterGroup: + Type: 'AWS::RDS::DBParameterGroup' + Properties: + Description: "CloudFormation Sample MySQL Parameter Group" + DBParameterGroupName: "testgroup" +`, + expected: rds.RDS{ + ParameterGroups: []rds.ParameterGroups{ + { + Metadata: types.NewTestMisconfigMetadata(), + DBParameterGroupName: types.String("testgroup", types.NewTestMisconfigMetadata()), + }, + }, + Clusters: []rds.Cluster{ + { + Metadata: types.NewTestMisconfigMetadata(), + BackupRetentionPeriodDays: types.Int(2, types.NewTestMisconfigMetadata()), + Engine: types.String("aurora-postgresql", types.NewTestMisconfigMetadata()), + Encryption: rds.Encryption{ + EncryptStorage: types.Bool(true, types.NewTestMisconfigMetadata()), + KMSKeyID: types.String("your-kms-key-id", types.NewTestMisconfigMetadata()), + }, + PerformanceInsights: rds.PerformanceInsights{ + Metadata: types.NewTestMisconfigMetadata(), + Enabled: types.Bool(true, types.NewTestMisconfigMetadata()), + KMSKeyID: types.String("test-kms-key-id", types.NewTestMisconfigMetadata()), + }, + PublicAccess: types.Bool(false, types.NewTestMisconfigMetadata()), + DeletionProtection: types.Bool(true, types.NewTestMisconfigMetadata()), + Instances: []rds.ClusterInstance{ + { + Instance: rds.Instance{ + Metadata: types.NewTestMisconfigMetadata(), + StorageEncrypted: types.Bool(true, types.NewTestMisconfigMetadata()), + Encryption: rds.Encryption{ + EncryptStorage: types.Bool(true, types.NewTestMisconfigMetadata()), + KMSKeyID: types.String("your-kms-key-id", types.NewTestMisconfigMetadata()), + }, + DBInstanceIdentifier: types.String("test", types.NewTestMisconfigMetadata()), + PubliclyAccessible: types.Bool(false, types.NewTestMisconfigMetadata()), + PublicAccess: types.BoolDefault(false, types.NewTestMisconfigMetadata()), + BackupRetentionPeriodDays: types.IntDefault(1, types.NewTestMisconfigMetadata()), + Engine: types.StringDefault("aurora-mysql", types.NewTestMisconfigMetadata()), + EngineVersion: types.String("5.7.12", types.NewTestMisconfigMetadata()), + MultiAZ: types.Bool(true, types.NewTestMisconfigMetadata()), + AutoMinorVersionUpgrade: types.Bool(true, types.NewTestMisconfigMetadata()), + DBInstanceArn: types.String("arn:aws:rds:us-east-2:123456789012:db:my-mysql-instance-1", types.NewTestMisconfigMetadata()), + IAMAuthEnabled: types.Bool(true, types.NewTestMisconfigMetadata()), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: types.NewTestMisconfigMetadata(), + Enabled: types.Bool(true, types.NewTestMisconfigMetadata()), + KMSKeyID: types.String("test-kms-key-id2", types.NewTestMisconfigMetadata()), + }, + EnabledCloudwatchLogsExports: []types.StringValue{ + types.String("error", types.NewTestMisconfigMetadata()), + types.String("general", types.NewTestMisconfigMetadata()), + }, + DBParameterGroups: []rds.DBParameterGroupsList{ + { + DBParameterGroupName: types.String("testgroup", types.NewTestMisconfigMetadata()), + }, + }, + TagList: []rds.TagList{ + { + Metadata: types.NewTestMisconfigMetadata(), + }, + { + Metadata: types.NewTestMisconfigMetadata(), + }, + }, + }, + ClusterIdentifier: types.String("RDSCluster", types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "template.yaml": tt.source, + }) + + p := parser.New() + fctx, err := p.ParseFile(context.TODO(), fs, "template.yaml") + require.NoError(t, err) + + testutil.AssertDefsecEqual(t, tt.expected, Adapt(*fctx)) + }) + } + +} diff --git a/internal/adapters/cloudformation/aws/rds/cluster.go b/internal/adapters/cloudformation/aws/rds/cluster.go new file mode 100644 index 000000000000..9b403ed58276 --- /dev/null +++ b/internal/adapters/cloudformation/aws/rds/cluster.go @@ -0,0 +1,48 @@ +package rds + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/rds" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getClusters(ctx parser.FileContext) (clusters map[string]rds.Cluster) { + clusters = make(map[string]rds.Cluster) + for _, clusterResource := range ctx.GetResourcesByType("AWS::RDS::DBCluster") { + clusters[clusterResource.ID()] = rds.Cluster{ + Metadata: clusterResource.Metadata(), + BackupRetentionPeriodDays: clusterResource.GetIntProperty("BackupRetentionPeriod", 1), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: clusterResource.Metadata(), + Enabled: clusterResource.GetBoolProperty("PerformanceInsightsEnabled"), + KMSKeyID: clusterResource.GetStringProperty("PerformanceInsightsKmsKeyId"), + }, + Encryption: rds.Encryption{ + Metadata: clusterResource.Metadata(), + EncryptStorage: clusterResource.GetBoolProperty("StorageEncrypted"), + KMSKeyID: clusterResource.GetStringProperty("KmsKeyId"), + }, + PublicAccess: defsecTypes.BoolDefault(false, clusterResource.Metadata()), + Engine: clusterResource.GetStringProperty("Engine", rds.EngineAurora), + LatestRestorableTime: defsecTypes.TimeUnresolvable(clusterResource.Metadata()), + DeletionProtection: clusterResource.GetBoolProperty("DeletionProtection"), + } + } + return clusters +} + +func getClassic(ctx parser.FileContext) rds.Classic { + return rds.Classic{ + DBSecurityGroups: getClassicSecurityGroups(ctx), + } +} + +func getClassicSecurityGroups(ctx parser.FileContext) (groups []rds.DBSecurityGroup) { + for _, dbsgResource := range ctx.GetResourcesByType("AWS::RDS::DBSecurityGroup") { + group := rds.DBSecurityGroup{ + Metadata: dbsgResource.Metadata(), + } + groups = append(groups, group) + } + return groups +} diff --git a/internal/adapters/cloudformation/aws/rds/instance.go b/internal/adapters/cloudformation/aws/rds/instance.go new file mode 100644 index 000000000000..ba31387e38f3 --- /dev/null +++ b/internal/adapters/cloudformation/aws/rds/instance.go @@ -0,0 +1,130 @@ +package rds + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/rds" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getClustersAndInstances(ctx parser.FileContext) ([]rds.Cluster, []rds.Instance) { + + clusterMap := getClusters(ctx) + + var orphans []rds.Instance + + for _, r := range ctx.GetResourcesByType("AWS::RDS::DBInstance") { + + instance := rds.Instance{ + Metadata: r.Metadata(), + BackupRetentionPeriodDays: r.GetIntProperty("BackupRetentionPeriod", 1), + ReplicationSourceARN: r.GetStringProperty("SourceDBInstanceIdentifier"), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: r.Metadata(), + Enabled: r.GetBoolProperty("EnablePerformanceInsights"), + KMSKeyID: r.GetStringProperty("PerformanceInsightsKMSKeyId"), + }, + Encryption: rds.Encryption{ + Metadata: r.Metadata(), + EncryptStorage: r.GetBoolProperty("StorageEncrypted"), + KMSKeyID: r.GetStringProperty("KmsKeyId"), + }, + PublicAccess: r.GetBoolProperty("PubliclyAccessible", true), + Engine: r.GetStringProperty("Engine"), + IAMAuthEnabled: r.GetBoolProperty("EnableIAMDatabaseAuthentication"), + DeletionProtection: r.GetBoolProperty("DeletionProtection", false), + DBInstanceArn: r.GetStringProperty("DBInstanceArn"), + StorageEncrypted: r.GetBoolProperty("StorageEncrypted", false), + DBInstanceIdentifier: r.GetStringProperty("DBInstanceIdentifier"), + DBParameterGroups: getDBParameterGroups(ctx, r), + TagList: getTagList(r), + EnabledCloudwatchLogsExports: getEnabledCloudwatchLogsExports(r), + EngineVersion: r.GetStringProperty("EngineVersion"), + AutoMinorVersionUpgrade: r.GetBoolProperty("AutoMinorVersionUpgrade"), + MultiAZ: r.GetBoolProperty("MultiAZ"), + PubliclyAccessible: r.GetBoolProperty("PubliclyAccessible"), + LatestRestorableTime: types.TimeUnresolvable(r.Metadata()), + ReadReplicaDBInstanceIdentifiers: getReadReplicaDBInstanceIdentifiers(r), + } + + if clusterID := r.GetProperty("DBClusterIdentifier"); clusterID.IsString() { + if cluster, exist := clusterMap[clusterID.AsString()]; exist { + cluster.Instances = append(cluster.Instances, rds.ClusterInstance{ + Instance: instance, + ClusterIdentifier: clusterID.AsStringValue(), + }) + clusterMap[clusterID.AsString()] = cluster + } + } else { + orphans = append(orphans, instance) + } + } + + clusters := make([]rds.Cluster, 0, len(clusterMap)) + + for _, cluster := range clusterMap { + clusters = append(clusters, cluster) + } + + return clusters, orphans +} + +func getDBParameterGroups(ctx parser.FileContext, r *parser.Resource) (dbParameterGroup []rds.DBParameterGroupsList) { + + dbParameterGroupName := r.GetStringProperty("DBParameterGroupName") + + for _, r := range ctx.GetResourcesByType("AWS::RDS::DBParameterGroup") { + name := r.GetStringProperty("DBParameterGroupName") + if !dbParameterGroupName.EqualTo(name.Value()) { + continue + } + dbpmgl := rds.DBParameterGroupsList{ + Metadata: r.Metadata(), + DBParameterGroupName: name, + KMSKeyID: types.StringUnresolvable(r.Metadata()), + } + dbParameterGroup = append(dbParameterGroup, dbpmgl) + } + + return dbParameterGroup +} + +func getEnabledCloudwatchLogsExports(r *parser.Resource) (enabledcloudwatchlogexportslist []types.StringValue) { + enabledCloudwatchLogExportList := r.GetProperty("EnableCloudwatchLogsExports") + + if enabledCloudwatchLogExportList.IsNil() || enabledCloudwatchLogExportList.IsNotList() { + return enabledcloudwatchlogexportslist + } + + for _, ecle := range enabledCloudwatchLogExportList.AsList() { + enabledcloudwatchlogexportslist = append(enabledcloudwatchlogexportslist, ecle.AsStringValue()) + } + return enabledcloudwatchlogexportslist +} + +func getTagList(r *parser.Resource) (taglist []rds.TagList) { + tagLists := r.GetProperty("Tags") + + if tagLists.IsNil() || tagLists.IsNotList() { + return taglist + } + + for _, tl := range tagLists.AsList() { + taglist = append(taglist, rds.TagList{ + Metadata: tl.Metadata(), + }) + } + return taglist +} + +func getReadReplicaDBInstanceIdentifiers(r *parser.Resource) (readreplicadbidentifier []types.StringValue) { + readReplicaDBIdentifier := r.GetProperty("SourceDBInstanceIdentifier") + + if readReplicaDBIdentifier.IsNil() || readReplicaDBIdentifier.IsNotList() { + return readreplicadbidentifier + } + + for _, rr := range readReplicaDBIdentifier.AsList() { + readreplicadbidentifier = append(readreplicadbidentifier, rr.AsStringValue()) + } + return readreplicadbidentifier +} diff --git a/internal/adapters/cloudformation/aws/rds/parameter_groups.go b/internal/adapters/cloudformation/aws/rds/parameter_groups.go new file mode 100644 index 000000000000..5fc46bd204b2 --- /dev/null +++ b/internal/adapters/cloudformation/aws/rds/parameter_groups.go @@ -0,0 +1,42 @@ +package rds + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/rds" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getParameterGroups(ctx parser.FileContext) (parametergroups []rds.ParameterGroups) { + + for _, r := range ctx.GetResourcesByType("AWS::RDS::DBParameterGroup") { + + paramgroup := rds.ParameterGroups{ + Metadata: r.Metadata(), + DBParameterGroupName: r.GetStringProperty("DBParameterGroupName"), + DBParameterGroupFamily: r.GetStringProperty("DBParameterGroupFamily"), + Parameters: getParameters(r), + } + + parametergroups = append(parametergroups, paramgroup) + } + + return parametergroups +} + +func getParameters(r *parser.Resource) (parameters []rds.Parameters) { + + dBParam := r.GetProperty("Parameters") + + if dBParam.IsNil() || dBParam.IsNotList() { + return parameters + } + + for _, dbp := range dBParam.AsList() { + parameters = append(parameters, rds.Parameters{ + Metadata: dbp.Metadata(), + ParameterName: types.StringDefault("", dbp.Metadata()), + ParameterValue: types.StringDefault("", dbp.Metadata()), + }) + } + return parameters +} diff --git a/internal/adapters/cloudformation/aws/rds/rds.go b/internal/adapters/cloudformation/aws/rds/rds.go new file mode 100644 index 000000000000..6d67d28be093 --- /dev/null +++ b/internal/adapters/cloudformation/aws/rds/rds.go @@ -0,0 +1,18 @@ +package rds + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/rds" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) rds.RDS { + clusters, orphans := getClustersAndInstances(cfFile) + return rds.RDS{ + Instances: orphans, + Clusters: clusters, + Classic: getClassic(cfFile), + ParameterGroups: getParameterGroups(cfFile), + Snapshots: nil, + } +} diff --git a/internal/adapters/cloudformation/aws/redshift/cluster.go b/internal/adapters/cloudformation/aws/redshift/cluster.go new file mode 100644 index 000000000000..5d319897f387 --- /dev/null +++ b/internal/adapters/cloudformation/aws/redshift/cluster.go @@ -0,0 +1,54 @@ +package redshift + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/redshift" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getClusters(ctx parser.FileContext) (clusters []redshift.Cluster) { + for _, r := range ctx.GetResourcesByType("AWS::Redshift::Cluster") { + + cluster := redshift.Cluster{ + Metadata: r.Metadata(), + ClusterIdentifier: r.GetStringProperty("ClusterIdentifier"), + AllowVersionUpgrade: r.GetBoolProperty("AllowVersionUpgrade"), + NodeType: r.GetStringProperty("NodeType"), + NumberOfNodes: r.GetIntProperty("NumberOfNodes"), + PubliclyAccessible: r.GetBoolProperty("PubliclyAccessible"), + MasterUsername: r.GetStringProperty("MasterUsername"), + VpcId: types.String("", r.Metadata()), + LoggingEnabled: types.Bool(false, r.Metadata()), + AutomatedSnapshotRetentionPeriod: r.GetIntProperty("AutomatedSnapshotRetentionPeriod"), + Encryption: redshift.Encryption{ + Metadata: r.Metadata(), + Enabled: r.GetBoolProperty("Encrypted"), + KMSKeyID: r.GetStringProperty("KmsKeyId"), + }, + EndPoint: redshift.EndPoint{ + Metadata: r.Metadata(), + Port: r.GetIntProperty("Endpoint.Port"), + }, + SubnetGroupName: r.GetStringProperty("ClusterSubnetGroupName", ""), + } + + clusters = append(clusters, cluster) + } + return clusters +} + +func getParameters(ctx parser.FileContext) (parameter []redshift.ClusterParameter) { + + paraRes := ctx.GetResourcesByType("AWS::Redshift::ClusterParameterGroup") + var parameters []redshift.ClusterParameter + for _, r := range paraRes { + for _, par := range r.GetProperty("Parameters").AsList() { + parameters = append(parameters, redshift.ClusterParameter{ + Metadata: par.Metadata(), + ParameterName: par.GetStringProperty("ParameterName"), + ParameterValue: par.GetStringProperty("ParameterValue"), + }) + } + } + return parameters +} diff --git a/internal/adapters/cloudformation/aws/redshift/redshift.go b/internal/adapters/cloudformation/aws/redshift/redshift.go new file mode 100644 index 000000000000..d58bab8fa7f5 --- /dev/null +++ b/internal/adapters/cloudformation/aws/redshift/redshift.go @@ -0,0 +1,16 @@ +package redshift + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/redshift" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) redshift.Redshift { + return redshift.Redshift{ + Clusters: getClusters(cfFile), + SecurityGroups: getSecurityGroups(cfFile), + ClusterParameters: getParameters(cfFile), + ReservedNodes: nil, + } +} diff --git a/internal/adapters/cloudformation/aws/redshift/security_group.go b/internal/adapters/cloudformation/aws/redshift/security_group.go new file mode 100644 index 000000000000..c37961fc00b9 --- /dev/null +++ b/internal/adapters/cloudformation/aws/redshift/security_group.go @@ -0,0 +1,17 @@ +package redshift + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/redshift" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getSecurityGroups(ctx parser.FileContext) (groups []redshift.SecurityGroup) { + for _, groupResource := range ctx.GetResourcesByType("AWS::Redshift::ClusterSecurityGroup") { + group := redshift.SecurityGroup{ + Metadata: groupResource.Metadata(), + Description: groupResource.GetProperty("Description").AsStringValue(), + } + groups = append(groups, group) + } + return groups +} diff --git a/internal/adapters/cloudformation/aws/s3/bucket.go b/internal/adapters/cloudformation/aws/s3/bucket.go new file mode 100644 index 000000000000..074adf6cd3e2 --- /dev/null +++ b/internal/adapters/cloudformation/aws/s3/bucket.go @@ -0,0 +1,148 @@ +package s3 + +import ( + "regexp" + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/s3" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +var aclConvertRegex = regexp.MustCompile(`[A-Z][^A-Z]*`) + +func getBuckets(cfFile parser.FileContext) []s3.Bucket { + var buckets []s3.Bucket + bucketResources := cfFile.GetResourcesByType("AWS::S3::Bucket") + + for _, r := range bucketResources { + s3b := s3.Bucket{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("BucketName"), + PublicAccessBlock: getPublicAccessBlock(r), + Encryption: getEncryption(r, cfFile), + Versioning: s3.Versioning{ + Metadata: r.Metadata(), + Enabled: hasVersioning(r), + MFADelete: defsecTypes.BoolUnresolvable(r.Metadata()), + }, + Logging: getLogging(r), + ACL: convertAclValue(r.GetStringProperty("AccessControl", "private")), + LifecycleConfiguration: getLifecycle(r), + AccelerateConfigurationStatus: r.GetStringProperty("AccelerateConfiguration.AccelerationStatus"), + Website: getWebsite(r), + BucketLocation: defsecTypes.String("", r.Metadata()), + Objects: nil, + } + + buckets = append(buckets, s3b) + } + return buckets +} + +func getPublicAccessBlock(r *parser.Resource) *s3.PublicAccessBlock { + if block := r.GetProperty("PublicAccessBlockConfiguration"); block.IsNil() { + return nil + } + + return &s3.PublicAccessBlock{ + Metadata: r.Metadata(), + BlockPublicACLs: r.GetBoolProperty("PublicAccessBlockConfiguration.BlockPublicAcls"), + BlockPublicPolicy: r.GetBoolProperty("PublicAccessBlockConfiguration.BlockPublicPolicy"), + IgnorePublicACLs: r.GetBoolProperty("PublicAccessBlockConfiguration.IgnorePublicAcls"), + RestrictPublicBuckets: r.GetBoolProperty("PublicAccessBlockConfiguration.RestrictPublicBuckets"), + } +} + +func convertAclValue(aclValue defsecTypes.StringValue) defsecTypes.StringValue { + matches := aclConvertRegex.FindAllString(aclValue.Value(), -1) + + return defsecTypes.String(strings.ToLower(strings.Join(matches, "-")), aclValue.GetMetadata()) +} + +func getLogging(r *parser.Resource) s3.Logging { + + logging := s3.Logging{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + TargetBucket: defsecTypes.StringDefault("", r.Metadata()), + } + + if config := r.GetProperty("LoggingConfiguration"); config.IsNotNil() { + logging.TargetBucket = config.GetStringProperty("DestinationBucketName") + if logging.TargetBucket.IsNotEmpty() || !logging.TargetBucket.GetMetadata().IsResolvable() { + logging.Enabled = defsecTypes.Bool(true, config.Metadata()) + } + } + return logging +} + +func hasVersioning(r *parser.Resource) defsecTypes.BoolValue { + versioningProp := r.GetProperty("VersioningConfiguration.Status") + + if versioningProp.IsNil() { + return defsecTypes.BoolDefault(false, r.Metadata()) + } + + versioningEnabled := false + if versioningProp.EqualTo("Enabled") { + versioningEnabled = true + + } + return defsecTypes.Bool(versioningEnabled, versioningProp.Metadata()) +} + +func getEncryption(r *parser.Resource, _ parser.FileContext) s3.Encryption { + + encryption := s3.Encryption{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + Algorithm: defsecTypes.StringDefault("", r.Metadata()), + KMSKeyId: defsecTypes.StringDefault("", r.Metadata()), + } + + if encryptProps := r.GetProperty("BucketEncryption.ServerSideEncryptionConfiguration"); encryptProps.IsNotNil() { + for _, rule := range encryptProps.AsList() { + if algo := rule.GetProperty("ServerSideEncryptionByDefault.SSEAlgorithm"); algo.EqualTo("AES256") { + encryption.Enabled = defsecTypes.Bool(true, algo.Metadata()) + } else if kmsKeyProp := rule.GetProperty("ServerSideEncryptionByDefault.KMSMasterKeyID"); !kmsKeyProp.IsEmpty() && kmsKeyProp.IsString() { + encryption.KMSKeyId = kmsKeyProp.AsStringValue() + } + if encryption.Enabled.IsFalse() { + encryption.Enabled = rule.GetBoolProperty("BucketKeyEnabled", false) + } + } + } + + return encryption +} + +func getLifecycle(resource *parser.Resource) []s3.Rules { + LifecycleProp := resource.GetProperty("LifecycleConfiguration") + RuleProp := LifecycleProp.GetProperty("Rules") + + var rule []s3.Rules + + if RuleProp.IsNil() || RuleProp.IsNotList() { + return rule + } + + for _, r := range RuleProp.AsList() { + rule = append(rule, s3.Rules{ + Metadata: r.Metadata(), + Status: r.GetStringProperty("Status"), + }) + } + return rule +} + +func getWebsite(r *parser.Resource) *s3.Website { + if block := r.GetProperty("WebsiteConfiguration"); block.IsNil() { + return nil + } else { + return &s3.Website{ + Metadata: block.Metadata(), + } + } +} diff --git a/internal/adapters/cloudformation/aws/s3/s3.go b/internal/adapters/cloudformation/aws/s3/s3.go new file mode 100644 index 000000000000..1bd7e00cc43e --- /dev/null +++ b/internal/adapters/cloudformation/aws/s3/s3.go @@ -0,0 +1,13 @@ +package s3 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/s3" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) s3.S3 { + return s3.S3{ + Buckets: getBuckets(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/sam/api.go b/internal/adapters/cloudformation/aws/sam/api.go new file mode 100644 index 000000000000..13917c709175 --- /dev/null +++ b/internal/adapters/cloudformation/aws/sam/api.go @@ -0,0 +1,96 @@ +package sam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/sam" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getApis(cfFile parser.FileContext) (apis []sam.API) { + + apiResources := cfFile.GetResourcesByType("AWS::Serverless::Api") + for _, r := range apiResources { + api := sam.API{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("Name", ""), + TracingEnabled: r.GetBoolProperty("TracingEnabled"), + DomainConfiguration: getDomainConfiguration(r), + AccessLogging: getAccessLogging(r), + RESTMethodSettings: getRestMethodSettings(r), + } + + apis = append(apis, api) + } + + return apis +} + +func getRestMethodSettings(r *parser.Resource) sam.RESTMethodSettings { + + settings := sam.RESTMethodSettings{ + Metadata: r.Metadata(), + CacheDataEncrypted: defsecTypes.BoolDefault(false, r.Metadata()), + LoggingEnabled: defsecTypes.BoolDefault(false, r.Metadata()), + DataTraceEnabled: defsecTypes.BoolDefault(false, r.Metadata()), + MetricsEnabled: defsecTypes.BoolDefault(false, r.Metadata()), + } + + settingsProp := r.GetProperty("MethodSettings") + if settingsProp.IsNotNil() { + + settings = sam.RESTMethodSettings{ + Metadata: settingsProp.Metadata(), + CacheDataEncrypted: settingsProp.GetBoolProperty("CacheDataEncrypted"), + LoggingEnabled: defsecTypes.BoolDefault(false, settingsProp.Metadata()), + DataTraceEnabled: settingsProp.GetBoolProperty("DataTraceEnabled"), + MetricsEnabled: settingsProp.GetBoolProperty("MetricsEnabled"), + } + + if loggingLevel := settingsProp.GetProperty("LoggingLevel"); loggingLevel.IsNotNil() { + if loggingLevel.EqualTo("OFF", parser.IgnoreCase) { + settings.LoggingEnabled = defsecTypes.Bool(false, loggingLevel.Metadata()) + } else { + settings.LoggingEnabled = defsecTypes.Bool(true, loggingLevel.Metadata()) + } + } + } + + return settings +} + +func getAccessLogging(r *parser.Resource) sam.AccessLogging { + + logging := sam.AccessLogging{ + Metadata: r.Metadata(), + CloudwatchLogGroupARN: defsecTypes.StringDefault("", r.Metadata()), + } + + if access := r.GetProperty("AccessLogSetting"); access.IsNotNil() { + logging = sam.AccessLogging{ + Metadata: access.Metadata(), + CloudwatchLogGroupARN: access.GetStringProperty("DestinationArn", ""), + } + } + + return logging +} + +func getDomainConfiguration(r *parser.Resource) sam.DomainConfiguration { + + domainConfig := sam.DomainConfiguration{ + Metadata: r.Metadata(), + Name: defsecTypes.StringDefault("", r.Metadata()), + SecurityPolicy: defsecTypes.StringDefault("TLS_1_0", r.Metadata()), + } + + if domain := r.GetProperty("Domain"); domain.IsNotNil() { + domainConfig = sam.DomainConfiguration{ + Metadata: domain.Metadata(), + Name: domain.GetStringProperty("DomainName", ""), + SecurityPolicy: domain.GetStringProperty("SecurityPolicy", "TLS_1_0"), + } + } + + return domainConfig + +} diff --git a/internal/adapters/cloudformation/aws/sam/function.go b/internal/adapters/cloudformation/aws/sam/function.go new file mode 100644 index 000000000000..0ef43eb3cf29 --- /dev/null +++ b/internal/adapters/cloudformation/aws/sam/function.go @@ -0,0 +1,58 @@ +package sam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/sam" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/iamgo" +) + +func getFunctions(cfFile parser.FileContext) (functions []sam.Function) { + + functionResources := cfFile.GetResourcesByType("AWS::Serverless::Function") + for _, r := range functionResources { + function := sam.Function{ + Metadata: r.Metadata(), + FunctionName: r.GetStringProperty("FunctionName"), + Tracing: r.GetStringProperty("Tracing", sam.TracingModePassThrough), + ManagedPolicies: nil, + Policies: nil, + } + + setFunctionPolicies(r, &function) + functions = append(functions, function) + } + + return functions +} + +func setFunctionPolicies(r *parser.Resource, function *sam.Function) { + policies := r.GetProperty("Policies") + if policies.IsNotNil() { + if policies.IsString() { + function.ManagedPolicies = append(function.ManagedPolicies, policies.AsStringValue()) + } else if policies.IsList() { + for _, property := range policies.AsList() { + if property.IsMap() { + parsed, err := iamgo.Parse(property.GetJsonBytes(true)) + if err != nil { + continue + } + policy := iam.Policy{ + Metadata: property.Metadata(), + Name: defsecTypes.StringDefault("", property.Metadata()), + Document: iam.Document{ + Metadata: property.Metadata(), + Parsed: *parsed, + }, + Builtin: defsecTypes.Bool(false, property.Metadata()), + } + function.Policies = append(function.Policies, policy) + } else if property.IsString() { + function.ManagedPolicies = append(function.ManagedPolicies, property.AsStringValue()) + } + } + } + } +} diff --git a/internal/adapters/cloudformation/aws/sam/http_api.go b/internal/adapters/cloudformation/aws/sam/http_api.go new file mode 100644 index 000000000000..a83b0ec559c3 --- /dev/null +++ b/internal/adapters/cloudformation/aws/sam/http_api.go @@ -0,0 +1,64 @@ +package sam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/sam" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getHttpApis(cfFile parser.FileContext) (apis []sam.HttpAPI) { + + apiResources := cfFile.GetResourcesByType("AWS::Serverless::HttpApi") + for _, r := range apiResources { + api := sam.HttpAPI{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("Name", ""), + DomainConfiguration: getDomainConfiguration(r), + AccessLogging: getAccessLoggingV2(r), + DefaultRouteSettings: getRouteSettings(r), + } + + apis = append(apis, api) + } + + return apis +} + +func getAccessLoggingV2(r *parser.Resource) sam.AccessLogging { + + logging := sam.AccessLogging{ + Metadata: r.Metadata(), + CloudwatchLogGroupARN: types.StringDefault("", r.Metadata()), + } + + if access := r.GetProperty("AccessLogSettings"); access.IsNotNil() { + logging = sam.AccessLogging{ + Metadata: access.Metadata(), + CloudwatchLogGroupARN: access.GetStringProperty("DestinationArn", ""), + } + } + + return logging +} + +func getRouteSettings(r *parser.Resource) sam.RouteSettings { + + routeSettings := sam.RouteSettings{ + Metadata: r.Metadata(), + LoggingEnabled: types.BoolDefault(false, r.Metadata()), + DataTraceEnabled: types.BoolDefault(false, r.Metadata()), + DetailedMetricsEnabled: types.BoolDefault(false, r.Metadata()), + } + + if route := r.GetProperty("DefaultRouteSettings"); route.IsNotNil() { + routeSettings = sam.RouteSettings{ + Metadata: route.Metadata(), + LoggingEnabled: route.GetBoolProperty("LoggingLevel"), + DataTraceEnabled: route.GetBoolProperty("DataTraceEnabled"), + DetailedMetricsEnabled: route.GetBoolProperty("DetailedMetricsEnabled"), + } + } + + return routeSettings + +} diff --git a/internal/adapters/cloudformation/aws/sam/sam.go b/internal/adapters/cloudformation/aws/sam/sam.go new file mode 100644 index 000000000000..df913b9a3abc --- /dev/null +++ b/internal/adapters/cloudformation/aws/sam/sam.go @@ -0,0 +1,17 @@ +package sam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/sam" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) sam.SAM { + return sam.SAM{ + APIs: getApis(cfFile), + HttpAPIs: getHttpApis(cfFile), + Functions: getFunctions(cfFile), + StateMachines: getStateMachines(cfFile), + SimpleTables: getSimpleTables(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/sam/state_machines.go b/internal/adapters/cloudformation/aws/sam/state_machines.go new file mode 100644 index 000000000000..ffc77a6fc6da --- /dev/null +++ b/internal/adapters/cloudformation/aws/sam/state_machines.go @@ -0,0 +1,80 @@ +package sam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/sam" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/iamgo" +) + +func getStateMachines(cfFile parser.FileContext) (stateMachines []sam.StateMachine) { + + stateMachineResources := cfFile.GetResourcesByType("AWS::Serverless::StateMachine") + for _, r := range stateMachineResources { + stateMachine := sam.StateMachine{ + Metadata: r.Metadata(), + Name: r.GetStringProperty("Name"), + LoggingConfiguration: sam.LoggingConfiguration{ + Metadata: r.Metadata(), + LoggingEnabled: defsecTypes.BoolDefault(false, r.Metadata()), + }, + ManagedPolicies: nil, + Policies: nil, + Tracing: getTracingConfiguration(r), + } + + if logging := r.GetProperty("Logging"); logging.IsNotNil() { + stateMachine.LoggingConfiguration.Metadata = logging.Metadata() + if level := logging.GetProperty("Level"); level.IsNotNil() { + stateMachine.LoggingConfiguration.LoggingEnabled = defsecTypes.Bool(!level.EqualTo("OFF"), level.Metadata()) + } + } + + setStateMachinePolicies(r, &stateMachine) + stateMachines = append(stateMachines, stateMachine) + } + + return stateMachines +} + +func getTracingConfiguration(r *parser.Resource) sam.TracingConfiguration { + tracing := r.GetProperty("Tracing") + if tracing.IsNil() { + return sam.TracingConfiguration{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + } + } + + return sam.TracingConfiguration{ + Metadata: tracing.Metadata(), + Enabled: tracing.GetBoolProperty("Enabled"), + } +} + +func setStateMachinePolicies(r *parser.Resource, stateMachine *sam.StateMachine) { + policies := r.GetProperty("Policies") + if policies.IsNotNil() { + if policies.IsString() { + stateMachine.ManagedPolicies = append(stateMachine.ManagedPolicies, policies.AsStringValue()) + } else if policies.IsList() { + for _, property := range policies.AsList() { + parsed, err := iamgo.Parse(property.GetJsonBytes(true)) + if err != nil { + continue + } + policy := iam.Policy{ + Metadata: property.Metadata(), + Name: defsecTypes.StringDefault("", property.Metadata()), + Document: iam.Document{ + Metadata: property.Metadata(), + Parsed: *parsed, + }, + Builtin: defsecTypes.Bool(false, property.Metadata()), + } + stateMachine.Policies = append(stateMachine.Policies, policy) + } + } + } +} diff --git a/internal/adapters/cloudformation/aws/sam/tables.go b/internal/adapters/cloudformation/aws/sam/tables.go new file mode 100644 index 000000000000..3ab84d790143 --- /dev/null +++ b/internal/adapters/cloudformation/aws/sam/tables.go @@ -0,0 +1,42 @@ +package sam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/sam" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func getSimpleTables(cfFile parser.FileContext) (tables []sam.SimpleTable) { + + tableResources := cfFile.GetResourcesByType("AWS::Serverless::SimpleTable") + for _, r := range tableResources { + table := sam.SimpleTable{ + Metadata: r.Metadata(), + TableName: r.GetStringProperty("TableName"), + SSESpecification: getSSESpecification(r), + } + + tables = append(tables, table) + } + + return tables +} + +func getSSESpecification(r *parser.Resource) sam.SSESpecification { + + spec := sam.SSESpecification{ + Metadata: r.Metadata(), + Enabled: defsecTypes.BoolDefault(false, r.Metadata()), + KMSMasterKeyID: defsecTypes.StringDefault("", r.Metadata()), + } + + if sse := r.GetProperty("SSESpecification"); sse.IsNotNil() { + spec = sam.SSESpecification{ + Metadata: sse.Metadata(), + Enabled: sse.GetBoolProperty("SSEEnabled"), + KMSMasterKeyID: sse.GetStringProperty("KMSMasterKeyID"), + } + } + + return spec +} diff --git a/internal/adapters/cloudformation/aws/sns/sns.go b/internal/adapters/cloudformation/aws/sns/sns.go new file mode 100644 index 000000000000..3ec134e84316 --- /dev/null +++ b/internal/adapters/cloudformation/aws/sns/sns.go @@ -0,0 +1,13 @@ +package sns + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/sns" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) sns.SNS { + return sns.SNS{ + Topics: getTopics(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/sns/topic.go b/internal/adapters/cloudformation/aws/sns/topic.go new file mode 100644 index 000000000000..4dba10df6105 --- /dev/null +++ b/internal/adapters/cloudformation/aws/sns/topic.go @@ -0,0 +1,24 @@ +package sns + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/sns" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + "github.com/aquasecurity/trivy/pkg/types" +) + +func getTopics(ctx parser.FileContext) (topics []sns.Topic) { + for _, r := range ctx.GetResourcesByType("AWS::SNS::Topic") { + + topic := sns.Topic{ + Metadata: r.Metadata(), + ARN: types.StringDefault("", r.Metadata()), + Encryption: sns.Encryption{ + Metadata: r.Metadata(), + KMSKeyID: r.GetStringProperty("KmsMasterKeyId"), + }, + } + + topics = append(topics, topic) + } + return topics +} diff --git a/internal/adapters/cloudformation/aws/sqs/queue.go b/internal/adapters/cloudformation/aws/sqs/queue.go new file mode 100644 index 000000000000..91263f2a64f0 --- /dev/null +++ b/internal/adapters/cloudformation/aws/sqs/queue.go @@ -0,0 +1,66 @@ +package sqs + +import ( + "fmt" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/sqs" + + "github.com/liamg/iamgo" +) + +func getQueues(ctx parser.FileContext) (queues []sqs.Queue) { + for _, r := range ctx.GetResourcesByType("AWS::SQS::Queue") { + queue := sqs.Queue{ + Metadata: r.Metadata(), + QueueURL: defsecTypes.StringDefault("", r.Metadata()), + Encryption: sqs.Encryption{ + Metadata: r.Metadata(), + ManagedEncryption: defsecTypes.Bool(false, r.Metadata()), + KMSKeyID: r.GetStringProperty("KmsMasterKeyId"), + }, + Policies: []iam.Policy{}, + } + if policy, err := getPolicy(r.ID(), ctx); err == nil { + queue.Policies = append(queue.Policies, *policy) + } + queues = append(queues, queue) + } + return queues +} + +func getPolicy(id string, ctx parser.FileContext) (*iam.Policy, error) { + for _, policyResource := range ctx.GetResourcesByType("AWS::SQS::QueuePolicy") { + documentProp := policyResource.GetProperty("PolicyDocument") + if documentProp.IsNil() { + continue + } + queuesProp := policyResource.GetProperty("Queues") + if queuesProp.IsNil() { + continue + } + for _, queueRef := range queuesProp.AsList() { + if queueRef.IsString() && queueRef.AsString() == id { + raw := documentProp.GetJsonBytes() + parsed, err := iamgo.Parse(raw) + if err != nil { + continue + } + return &iam.Policy{ + Metadata: documentProp.Metadata(), + Name: defsecTypes.StringDefault("", documentProp.Metadata()), + Document: iam.Document{ + Metadata: documentProp.Metadata(), + Parsed: *parsed, + }, + Builtin: defsecTypes.Bool(false, documentProp.Metadata()), + }, nil + } + } + } + return nil, fmt.Errorf("no matching policy found") +} diff --git a/internal/adapters/cloudformation/aws/sqs/sqs.go b/internal/adapters/cloudformation/aws/sqs/sqs.go new file mode 100644 index 000000000000..db217c44a651 --- /dev/null +++ b/internal/adapters/cloudformation/aws/sqs/sqs.go @@ -0,0 +1,13 @@ +package sqs + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/sqs" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) sqs.SQS { + return sqs.SQS{ + Queues: getQueues(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/ssm/secret.go b/internal/adapters/cloudformation/aws/ssm/secret.go new file mode 100644 index 000000000000..f016dfa4c2bd --- /dev/null +++ b/internal/adapters/cloudformation/aws/ssm/secret.go @@ -0,0 +1,18 @@ +package ssm + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ssm" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getSecrets(ctx parser.FileContext) (secrets []ssm.Secret) { + for _, r := range ctx.GetResourcesByType("AWS::SecretsManager::Secret") { + secret := ssm.Secret{ + Metadata: r.Metadata(), + KMSKeyID: r.GetStringProperty("KmsKeyId"), + } + + secrets = append(secrets, secret) + } + return secrets +} diff --git a/internal/adapters/cloudformation/aws/ssm/ssm.go b/internal/adapters/cloudformation/aws/ssm/ssm.go new file mode 100644 index 000000000000..e262ec004f29 --- /dev/null +++ b/internal/adapters/cloudformation/aws/ssm/ssm.go @@ -0,0 +1,13 @@ +package ssm + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ssm" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) ssm.SSM { + return ssm.SSM{ + Secrets: getSecrets(cfFile), + } +} diff --git a/internal/adapters/cloudformation/aws/workspaces/workspace.go b/internal/adapters/cloudformation/aws/workspaces/workspace.go new file mode 100644 index 000000000000..a21387c6f2b1 --- /dev/null +++ b/internal/adapters/cloudformation/aws/workspaces/workspace.go @@ -0,0 +1,31 @@ +package workspaces + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/workspaces" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func getWorkSpaces(ctx parser.FileContext) (workSpaces []workspaces.WorkSpace) { + for _, r := range ctx.GetResourcesByType("AWS::WorkSpaces::Workspace") { + workspace := workspaces.WorkSpace{ + Metadata: r.Metadata(), + RootVolume: workspaces.Volume{ + Metadata: r.Metadata(), + Encryption: workspaces.Encryption{ + Metadata: r.Metadata(), + Enabled: r.GetBoolProperty("RootVolumeEncryptionEnabled"), + }, + }, + UserVolume: workspaces.Volume{ + Metadata: r.Metadata(), + Encryption: workspaces.Encryption{ + Metadata: r.Metadata(), + Enabled: r.GetBoolProperty("UserVolumeEncryptionEnabled"), + }, + }, + } + + workSpaces = append(workSpaces, workspace) + } + return workSpaces +} diff --git a/internal/adapters/cloudformation/aws/workspaces/workspaces.go b/internal/adapters/cloudformation/aws/workspaces/workspaces.go new file mode 100644 index 000000000000..0d36a0d1eb4b --- /dev/null +++ b/internal/adapters/cloudformation/aws/workspaces/workspaces.go @@ -0,0 +1,13 @@ +package workspaces + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/workspaces" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +// Adapt ... +func Adapt(cfFile parser.FileContext) workspaces.WorkSpaces { + return workspaces.WorkSpaces{ + WorkSpaces: getWorkSpaces(cfFile), + } +} diff --git a/internal/adapters/terraform/adapt.go b/internal/adapters/terraform/adapt.go new file mode 100644 index 000000000000..2b44889ae716 --- /dev/null +++ b/internal/adapters/terraform/adapt.go @@ -0,0 +1,31 @@ +package terraform + +import ( + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws" + "github.com/aquasecurity/trivy/internal/adapters/terraform/azure" + "github.com/aquasecurity/trivy/internal/adapters/terraform/cloudstack" + "github.com/aquasecurity/trivy/internal/adapters/terraform/digitalocean" + "github.com/aquasecurity/trivy/internal/adapters/terraform/github" + "github.com/aquasecurity/trivy/internal/adapters/terraform/google" + "github.com/aquasecurity/trivy/internal/adapters/terraform/kubernetes" + "github.com/aquasecurity/trivy/internal/adapters/terraform/nifcloud" + "github.com/aquasecurity/trivy/internal/adapters/terraform/openstack" + "github.com/aquasecurity/trivy/internal/adapters/terraform/oracle" + "github.com/aquasecurity/trivy/pkg/state" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) *state.State { + return &state.State{ + AWS: aws.Adapt(modules), + Azure: azure.Adapt(modules), + CloudStack: cloudstack.Adapt(modules), + DigitalOcean: digitalocean.Adapt(modules), + GitHub: github.Adapt(modules), + Google: google.Adapt(modules), + Kubernetes: kubernetes.Adapt(modules), + Nifcloud: nifcloud.Adapt(modules), + OpenStack: openstack.Adapt(modules), + Oracle: oracle.Adapt(modules), + } +} diff --git a/internal/adapters/terraform/aws/accessanalyzer/accessanalyzer.go b/internal/adapters/terraform/aws/accessanalyzer/accessanalyzer.go new file mode 100644 index 000000000000..fa1cc7e64741 --- /dev/null +++ b/internal/adapters/terraform/aws/accessanalyzer/accessanalyzer.go @@ -0,0 +1,40 @@ +package accessanalyzer + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/accessanalyzer" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) accessanalyzer.AccessAnalyzer { + return accessanalyzer.AccessAnalyzer{ + Analyzers: adaptTrails(modules), + } +} + +func adaptTrails(modules terraform.Modules) []accessanalyzer.Analyzer { + var analyzer []accessanalyzer.Analyzer + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_accessanalyzer_analyzer") { + analyzer = append(analyzer, adaptAnalyzers(resource)) + } + } + return analyzer +} + +func adaptAnalyzers(resource *terraform.Block) accessanalyzer.Analyzer { + + analyzerName := resource.GetAttribute("analyzer_name") + analyzerNameAttr := analyzerName.AsStringValueOrDefault("", resource) + + arnAnalyzer := resource.GetAttribute("arn") + arnAnalyzerAttr := arnAnalyzer.AsStringValueOrDefault("", resource) + + return accessanalyzer.Analyzer{ + Metadata: resource.GetMetadata(), + Name: analyzerNameAttr, + ARN: arnAnalyzerAttr, + Active: types.BoolDefault(false, resource.GetMetadata()), + } +} diff --git a/internal/adapters/terraform/aws/adapt.go b/internal/adapters/terraform/aws/adapt.go new file mode 100644 index 000000000000..4735034cf665 --- /dev/null +++ b/internal/adapters/terraform/aws/adapt.go @@ -0,0 +1,79 @@ +package aws + +import ( + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/apigateway" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/athena" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/cloudfront" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/cloudtrail" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/cloudwatch" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/codebuild" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/config" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/documentdb" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/dynamodb" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/ec2" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/ecr" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/ecs" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/efs" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/eks" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/elasticache" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/elasticsearch" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/elb" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/emr" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/iam" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/kinesis" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/kms" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/lambda" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/mq" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/msk" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/neptune" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/provider" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/rds" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/redshift" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/s3" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/sns" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/sqs" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/ssm" + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/workspaces" + "github.com/aquasecurity/trivy/pkg/providers/aws" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) aws.AWS { + return aws.AWS{ + Meta: aws.Meta{ + TFProviders: provider.Adapt(modules), + }, + APIGateway: apigateway.Adapt(modules), + Athena: athena.Adapt(modules), + Cloudfront: cloudfront.Adapt(modules), + CloudTrail: cloudtrail.Adapt(modules), + CloudWatch: cloudwatch.Adapt(modules), + CodeBuild: codebuild.Adapt(modules), + Config: config.Adapt(modules), + DocumentDB: documentdb.Adapt(modules), + DynamoDB: dynamodb.Adapt(modules), + EC2: ec2.Adapt(modules), + ECR: ecr.Adapt(modules), + ECS: ecs.Adapt(modules), + EFS: efs.Adapt(modules), + EKS: eks.Adapt(modules), + ElastiCache: elasticache.Adapt(modules), + Elasticsearch: elasticsearch.Adapt(modules), + ELB: elb.Adapt(modules), + EMR: emr.Adapt(modules), + IAM: iam.Adapt(modules), + Kinesis: kinesis.Adapt(modules), + KMS: kms.Adapt(modules), + Lambda: lambda.Adapt(modules), + MQ: mq.Adapt(modules), + MSK: msk.Adapt(modules), + Neptune: neptune.Adapt(modules), + RDS: rds.Adapt(modules), + Redshift: redshift.Adapt(modules), + S3: s3.Adapt(modules), + SNS: sns.Adapt(modules), + SQS: sqs.Adapt(modules), + SSM: ssm.Adapt(modules), + WorkSpaces: workspaces.Adapt(modules), + } +} diff --git a/internal/adapters/terraform/aws/apigateway/adapt.go b/internal/adapters/terraform/aws/apigateway/adapt.go new file mode 100644 index 000000000000..a6bd12909e8f --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/adapt.go @@ -0,0 +1,21 @@ +package apigateway + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway" + v1 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v1" + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) apigateway.APIGateway { + return apigateway.APIGateway{ + V1: v1.APIGateway{ + APIs: adaptAPIsV1(modules), + DomainNames: adaptDomainNamesV1(modules), + }, + V2: v2.APIGateway{ + APIs: adaptAPIsV2(modules), + DomainNames: adaptDomainNamesV2(modules), + }, + } +} diff --git a/internal/adapters/terraform/aws/apigateway/adapt_test.go b/internal/adapters/terraform/aws/apigateway/adapt_test.go new file mode 100644 index 000000000000..6d7737a38c0f --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/adapt_test.go @@ -0,0 +1,233 @@ +package apigateway + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway" + v1 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v1" + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected apigateway.APIGateway + }{ + { + name: "basic", + terraform: ` +resource "aws_api_gateway_rest_api" "MyDemoAPI" { + name = "MyDemoAPI" + description = "This is my API for demonstration purposes" +} +resource "aws_api_gateway_resource" "example" { + rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id +} +resource "aws_api_gateway_method" "example" { + rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id + resource_id = aws_api_gateway_resource.example.id + http_method = "GET" + authorization = "NONE" +} +resource "aws_apigatewayv2_api" "example" { + name = "tfsec" + protocol_type = "HTTP" +} + + +resource "aws_apigatewayv2_stage" "example" { + api_id = aws_apigatewayv2_api.example.id + name = "tfsec" + access_log_settings { + destination_arn = "arn:123" + } +} + +resource "aws_api_gateway_domain_name" "example" { + domain_name = "v1.com" + security_policy = "TLS_1_0" +} + +resource "aws_apigatewayv2_domain_name" "example" { + domain_name = "v2.com" + domain_name_configuration { + security_policy = "TLS_1_2" + } +} +`, + expected: apigateway.APIGateway{ + V1: v1.APIGateway{ + APIs: []v1.API{ + { + Metadata: defsecTypes.MisconfigMetadata{}, + Name: String("MyDemoAPI"), + Resources: []v1.Resource{ + { + Methods: []v1.Method{ + { + HTTPMethod: String("GET"), + AuthorizationType: String("NONE"), + APIKeyRequired: Bool(false), + }, + }, + }, + }, + }, + }, + DomainNames: []v1.DomainName{ + { + Name: String("v1.com"), + SecurityPolicy: String("TLS_1_0"), + }, + }, + }, + V2: v2.APIGateway{ + APIs: []v2.API{ + { + Name: String("tfsec"), + ProtocolType: String("HTTP"), + Stages: []v2.Stage{ + { + Name: String("tfsec"), + AccessLogging: v2.AccessLogging{ + CloudwatchLogGroupARN: String("arn:123"), + }, + }, + }, + }, + }, + DomainNames: []v2.DomainName{ + { + Name: String("v2.com"), + SecurityPolicy: String("TLS_1_2"), + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Int(i int) defsecTypes.IntValue { + return defsecTypes.Int(i, defsecTypes.NewTestMisconfigMetadata()) +} + +func Bool(b bool) defsecTypes.BoolValue { + return defsecTypes.Bool(b, defsecTypes.NewTestMisconfigMetadata()) +} + +func String(s string) defsecTypes.StringValue { + return defsecTypes.String(s, defsecTypes.NewTestMisconfigMetadata()) +} +func TestLines(t *testing.T) { + src := ` + resource "aws_api_gateway_rest_api" "MyDemoAPI" { + name = "MyDemoAPI" + description = "This is my API for demonstration purposes" + } + + resource "aws_api_gateway_resource" "example" { + rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id + } + + resource "aws_api_gateway_method" "example" { + rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id + resource_id = aws_api_gateway_resource.example.id + http_method = "GET" + authorization = "NONE" + api_key_required = true + } + + resource "aws_apigatewayv2_api" "example" { + name = "tfsec" + protocol_type = "HTTP" + } + + resource "aws_apigatewayv2_stage" "example" { + api_id = aws_apigatewayv2_api.example.id + name = "tfsec" + access_log_settings { + destination_arn = "arn:123" + } + } + + resource "aws_api_gateway_domain_name" "example" { + domain_name = "v1.com" + security_policy = "TLS_1_0" + } + + ` + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.V1.APIs, 1) + require.Len(t, adapted.V2.APIs, 1) + require.Len(t, adapted.V1.DomainNames, 1) + + apiV1 := adapted.V1.APIs[0] + apiV2 := adapted.V2.APIs[0] + domainName := adapted.V1.DomainNames[0] + + assert.Equal(t, 2, apiV1.Metadata.Range().GetStartLine()) + assert.Equal(t, 5, apiV1.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, apiV1.Name.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, apiV1.Name.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, apiV1.Resources[0].Methods[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 17, apiV1.Resources[0].Methods[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 14, apiV1.Resources[0].Methods[0].HTTPMethod.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 14, apiV1.Resources[0].Methods[0].HTTPMethod.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 15, apiV1.Resources[0].Methods[0].AuthorizationType.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 15, apiV1.Resources[0].Methods[0].AuthorizationType.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 16, apiV1.Resources[0].Methods[0].APIKeyRequired.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 16, apiV1.Resources[0].Methods[0].APIKeyRequired.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 19, apiV2.Metadata.Range().GetStartLine()) + assert.Equal(t, 22, apiV2.Metadata.Range().GetEndLine()) + + assert.Equal(t, 20, apiV2.Name.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 20, apiV2.Name.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 21, apiV2.ProtocolType.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 21, apiV2.ProtocolType.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 24, apiV2.Stages[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 30, apiV2.Stages[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 26, apiV2.Stages[0].Name.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 26, apiV2.Stages[0].Name.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 27, apiV2.Stages[0].AccessLogging.Metadata.Range().GetStartLine()) + assert.Equal(t, 29, apiV2.Stages[0].AccessLogging.Metadata.Range().GetEndLine()) + + assert.Equal(t, 28, apiV2.Stages[0].AccessLogging.CloudwatchLogGroupARN.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 28, apiV2.Stages[0].AccessLogging.CloudwatchLogGroupARN.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 32, domainName.Metadata.Range().GetStartLine()) + assert.Equal(t, 35, domainName.Metadata.Range().GetEndLine()) + + assert.Equal(t, 33, domainName.Name.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 33, domainName.Name.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 34, domainName.SecurityPolicy.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 34, domainName.SecurityPolicy.GetMetadata().Range().GetEndLine()) + +} diff --git a/internal/adapters/terraform/aws/apigateway/apiv1.go b/internal/adapters/terraform/aws/apigateway/apiv1.go new file mode 100644 index 000000000000..d20d363ce40b --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/apiv1.go @@ -0,0 +1,115 @@ +package apigateway + +import ( + v1 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v1" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptAPIResourcesV1(modules terraform.Modules, apiBlock *terraform.Block) []v1.Resource { + var resources []v1.Resource + for _, resourceBlock := range modules.GetReferencingResources(apiBlock, "aws_api_gateway_resource", "rest_api_id") { + method := v1.Resource{ + Metadata: resourceBlock.GetMetadata(), + Methods: adaptAPIMethodsV1(modules, resourceBlock), + } + resources = append(resources, method) + } + return resources +} + +func adaptAPIMethodsV1(modules terraform.Modules, resourceBlock *terraform.Block) []v1.Method { + var methods []v1.Method + for _, methodBlock := range modules.GetReferencingResources(resourceBlock, "aws_api_gateway_method", "resource_id") { + method := v1.Method{ + Metadata: methodBlock.GetMetadata(), + HTTPMethod: methodBlock.GetAttribute("http_method").AsStringValueOrDefault("", methodBlock), + AuthorizationType: methodBlock.GetAttribute("authorization").AsStringValueOrDefault("", methodBlock), + APIKeyRequired: methodBlock.GetAttribute("api_key_required").AsBoolValueOrDefault(false, methodBlock), + } + methods = append(methods, method) + } + return methods +} + +func adaptAPIsV1(modules terraform.Modules) []v1.API { + + var apis []v1.API + apiStageIDs := modules.GetChildResourceIDMapByType("aws_api_gateway_stage") + + for _, apiBlock := range modules.GetResourcesByType("aws_api_gateway_rest_api") { + api := v1.API{ + Metadata: apiBlock.GetMetadata(), + Name: apiBlock.GetAttribute("name").AsStringValueOrDefault("", apiBlock), + Stages: nil, + Resources: adaptAPIResourcesV1(modules, apiBlock), + } + + for _, stageBlock := range modules.GetReferencingResources(apiBlock, "aws_api_gateway_stage", "rest_api_id") { + apiStageIDs.Resolve(stageBlock.ID()) + stage := adaptStageV1(stageBlock, modules) + + api.Stages = append(api.Stages, stage) + } + + apis = append(apis, api) + } + + orphanResources := modules.GetResourceByIDs(apiStageIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := v1.API{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Name: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + } + for _, stage := range orphanResources { + orphanage.Stages = append(orphanage.Stages, adaptStageV1(stage, modules)) + } + apis = append(apis, orphanage) + } + + return apis +} + +func adaptStageV1(stageBlock *terraform.Block, modules terraform.Modules) v1.Stage { + stage := v1.Stage{ + Metadata: stageBlock.GetMetadata(), + Name: stageBlock.GetAttribute("name").AsStringValueOrDefault("", stageBlock), + AccessLogging: v1.AccessLogging{ + Metadata: stageBlock.GetMetadata(), + CloudwatchLogGroupARN: defsecTypes.StringDefault("", stageBlock.GetMetadata()), + }, + XRayTracingEnabled: stageBlock.GetAttribute("xray_tracing_enabled").AsBoolValueOrDefault(false, stageBlock), + } + for _, methodSettings := range modules.GetReferencingResources(stageBlock, "aws_api_gateway_method_settings", "stage_name") { + + restMethodSettings := v1.RESTMethodSettings{ + Metadata: methodSettings.GetMetadata(), + Method: defsecTypes.String("", methodSettings.GetMetadata()), + CacheDataEncrypted: defsecTypes.BoolDefault(false, methodSettings.GetMetadata()), + CacheEnabled: defsecTypes.BoolDefault(false, methodSettings.GetMetadata()), + } + + if settings := methodSettings.GetBlock("settings"); settings.IsNotNil() { + if encrypted := settings.GetAttribute("cache_data_encrypted"); encrypted.IsNotNil() { + restMethodSettings.CacheDataEncrypted = settings.GetAttribute("cache_data_encrypted").AsBoolValueOrDefault(false, settings) + } + if encrypted := settings.GetAttribute("caching_enabled"); encrypted.IsNotNil() { + restMethodSettings.CacheEnabled = settings.GetAttribute("caching_enabled").AsBoolValueOrDefault(false, settings) + } + } + + stage.RESTMethodSettings = append(stage.RESTMethodSettings, restMethodSettings) + } + + stage.Name = stageBlock.GetAttribute("stage_name").AsStringValueOrDefault("", stageBlock) + if accessLogging := stageBlock.GetBlock("access_log_settings"); accessLogging.IsNotNil() { + stage.AccessLogging.Metadata = accessLogging.GetMetadata() + stage.AccessLogging.CloudwatchLogGroupARN = accessLogging.GetAttribute("destination_arn").AsStringValueOrDefault("", accessLogging) + } else { + stage.AccessLogging.Metadata = stageBlock.GetMetadata() + stage.AccessLogging.CloudwatchLogGroupARN = defsecTypes.StringDefault("", stageBlock.GetMetadata()) + } + + return stage +} diff --git a/internal/adapters/terraform/aws/apigateway/apiv1_test.go b/internal/adapters/terraform/aws/apigateway/apiv1_test.go new file mode 100644 index 000000000000..706fee63a3e2 --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/apiv1_test.go @@ -0,0 +1,125 @@ +package apigateway + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + v1 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v1" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptAPIMethodsV1(t *testing.T) { + tests := []struct { + name string + terraform string + expected []v1.Method + }{ + { + name: "defaults", + terraform: ` +resource "aws_api_gateway_rest_api" "MyDemoAPI" { + name = "MyDemoAPI" + description = "This is my API for demonstration purposes" +} + +resource "aws_api_gateway_resource" "example" { + rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id +} + +resource "aws_api_gateway_method" "example" { + rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id + resource_id = aws_api_gateway_resource.example.id + http_method = "GET" + authorization = "NONE" +} +`, + expected: []v1.Method{ + { + HTTPMethod: String("GET"), + AuthorizationType: String("NONE"), + APIKeyRequired: Bool(false), + }, + }, + }, + { + name: "basic", + terraform: ` +resource "aws_api_gateway_rest_api" "MyDemoAPI" { + name = "MyDemoAPI" + description = "This is my API for demonstration purposes" +} + +resource "aws_api_gateway_resource" "example" { + rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id +} + +resource "aws_api_gateway_method" "example" { + rest_api_id = aws_api_gateway_rest_api.MyDemoAPI.id + resource_id = aws_api_gateway_resource.example.id + http_method = "GET" + authorization = "NONE" + api_key_required = true +} +`, + expected: []v1.Method{ + { + HTTPMethod: String("GET"), + AuthorizationType: String("NONE"), + APIKeyRequired: Bool(true), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + restApiBlock := modules.GetBlocks()[1] + adapted := adaptAPIMethodsV1(modules, restApiBlock) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptAPIsV1(t *testing.T) { + tests := []struct { + name string + terraform string + expected []v1.API + }{ + { + name: "defaults", + terraform: ` +resource "aws_api_gateway_rest_api" "example" { + +} +`, + expected: []v1.API{ + { + Name: String(""), + }, + }, + }, + { + name: "full", + terraform: ` +resource "aws_api_gateway_rest_api" "example" { + name = "tfsec" +} +`, + expected: []v1.API{ + { + Name: String("tfsec"), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptAPIsV1(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/aws/apigateway/apiv2.go b/internal/adapters/terraform/aws/apigateway/apiv2.go new file mode 100644 index 000000000000..121c5c55feac --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/apiv2.go @@ -0,0 +1,69 @@ +package apigateway + +import ( + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptAPIsV2(modules terraform.Modules) []v2.API { + + var apis []v2.API + apiStageIDs := modules.GetChildResourceIDMapByType("aws_apigatewayv2_stage") + + for _, module := range modules { + for _, apiBlock := range module.GetResourcesByType("aws_apigatewayv2_api") { + api := v2.API{ + Metadata: apiBlock.GetMetadata(), + Name: apiBlock.GetAttribute("name").AsStringValueOrDefault("", apiBlock), + ProtocolType: apiBlock.GetAttribute("protocol_type").AsStringValueOrDefault("", apiBlock), + Stages: nil, + } + + for _, stageBlock := range module.GetReferencingResources(apiBlock, "aws_apigatewayv2_stage", "api_id") { + apiStageIDs.Resolve(stageBlock.ID()) + + stage := adaptStageV2(stageBlock) + + api.Stages = append(api.Stages, stage) + } + + apis = append(apis, api) + } + } + + orphanResources := modules.GetResourceByIDs(apiStageIDs.Orphans()...) + if len(orphanResources) > 0 { + orphanage := v2.API{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Name: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + ProtocolType: defsecTypes.StringUnresolvable(defsecTypes.NewUnmanagedMisconfigMetadata()), + Stages: nil, + } + for _, stage := range orphanResources { + orphanage.Stages = append(orphanage.Stages, adaptStageV2(stage)) + } + apis = append(apis, orphanage) + } + + return apis +} + +func adaptStageV2(stageBlock *terraform.Block) v2.Stage { + stage := v2.Stage{ + Metadata: stageBlock.GetMetadata(), + Name: stageBlock.GetAttribute("name").AsStringValueOrDefault("", stageBlock), + AccessLogging: v2.AccessLogging{ + Metadata: stageBlock.GetMetadata(), + CloudwatchLogGroupARN: defsecTypes.StringDefault("", stageBlock.GetMetadata()), + }, + } + if accessLogging := stageBlock.GetBlock("access_log_settings"); accessLogging.IsNotNil() { + stage.AccessLogging.Metadata = accessLogging.GetMetadata() + stage.AccessLogging.CloudwatchLogGroupARN = accessLogging.GetAttribute("destination_arn").AsStringValueOrDefault("", accessLogging) + } else { + stage.AccessLogging.Metadata = stageBlock.GetMetadata() + stage.AccessLogging.CloudwatchLogGroupARN = defsecTypes.StringDefault("", stageBlock.GetMetadata()) + } + return stage +} diff --git a/internal/adapters/terraform/aws/apigateway/apiv2_test.go b/internal/adapters/terraform/aws/apigateway/apiv2_test.go new file mode 100644 index 000000000000..cb25e4b948bf --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/apiv2_test.go @@ -0,0 +1,103 @@ +package apigateway + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptAPIsV2(t *testing.T) { + tests := []struct { + name string + terraform string + expected []v2.API + }{ + { + name: "defaults", + terraform: ` +resource "aws_apigatewayv2_api" "example" { + protocol_type = "HTTP" +} +`, + expected: []v2.API{ + { + Name: String(""), + ProtocolType: String("HTTP"), + }, + }, + }, + { + name: "full", + terraform: ` +resource "aws_apigatewayv2_api" "example" { + name = "tfsec" + protocol_type = "HTTP" +} +`, + expected: []v2.API{ + { + Name: String("tfsec"), + ProtocolType: String("HTTP"), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptAPIsV2(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptStageV2(t *testing.T) { + tests := []struct { + name string + terraform string + expected v2.Stage + }{ + { + name: "defaults", + terraform: ` +resource "aws_apigatewayv2_stage" "example" { + +} +`, + expected: v2.Stage{ + Name: String(""), + AccessLogging: v2.AccessLogging{ + CloudwatchLogGroupARN: String(""), + }, + }, + }, + { + name: "basics", + terraform: ` +resource "aws_apigatewayv2_stage" "example" { + name = "tfsec" + access_log_settings { + destination_arn = "arn:123" + } +} +`, + expected: v2.Stage{ + Name: String("tfsec"), + AccessLogging: v2.AccessLogging{ + CloudwatchLogGroupARN: String("arn:123"), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptStageV2(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/aws/apigateway/namesv1.go b/internal/adapters/terraform/aws/apigateway/namesv1.go new file mode 100644 index 000000000000..7a92c519d129 --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/namesv1.go @@ -0,0 +1,24 @@ +package apigateway + +import ( + v1 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v1" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func adaptDomainNamesV1(modules terraform.Modules) []v1.DomainName { + + var domainNames []v1.DomainName + + for _, module := range modules { + for _, nameBlock := range module.GetResourcesByType("aws_api_gateway_domain_name") { + domainName := v1.DomainName{ + Metadata: nameBlock.GetMetadata(), + Name: nameBlock.GetAttribute("domain_name").AsStringValueOrDefault("", nameBlock), + SecurityPolicy: nameBlock.GetAttribute("security_policy").AsStringValueOrDefault("TLS_1_0", nameBlock), + } + domainNames = append(domainNames, domainName) + } + } + + return domainNames +} diff --git a/internal/adapters/terraform/aws/apigateway/namesv1_test.go b/internal/adapters/terraform/aws/apigateway/namesv1_test.go new file mode 100644 index 000000000000..b8409065863d --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/namesv1_test.go @@ -0,0 +1,54 @@ +package apigateway + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + v1 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v1" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptDomainNamesV1(t *testing.T) { + tests := []struct { + name string + terraform string + expected []v1.DomainName + }{ + { + name: "defaults", + terraform: ` +resource "aws_api_gateway_domain_name" "example" { +} +`, + expected: []v1.DomainName{ + { + Name: String(""), + SecurityPolicy: String("TLS_1_0"), + }, + }, + }, + { + name: "basic", + terraform: ` +resource "aws_api_gateway_domain_name" "example" { + domain_name = "testing.com" + security_policy = "TLS_1_2" +} +`, + expected: []v1.DomainName{ + { + Name: String("testing.com"), + SecurityPolicy: String("TLS_1_2"), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptDomainNamesV1(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/aws/apigateway/namesv2.go b/internal/adapters/terraform/aws/apigateway/namesv2.go new file mode 100644 index 000000000000..92bb981ef67b --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/namesv2.go @@ -0,0 +1,28 @@ +package apigateway + +import ( + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptDomainNamesV2(modules terraform.Modules) []v2.DomainName { + + var domainNames []v2.DomainName + + for _, module := range modules { + for _, nameBlock := range module.GetResourcesByType("aws_apigatewayv2_domain_name") { + domainName := v2.DomainName{ + Metadata: nameBlock.GetMetadata(), + Name: nameBlock.GetAttribute("domain_name").AsStringValueOrDefault("", nameBlock), + SecurityPolicy: types.StringDefault("TLS_1_0", nameBlock.GetMetadata()), + } + if config := nameBlock.GetBlock("domain_name_configuration"); config.IsNotNil() { + domainName.SecurityPolicy = config.GetAttribute("security_policy").AsStringValueOrDefault("TLS_1_0", config) + } + domainNames = append(domainNames, domainName) + } + } + + return domainNames +} diff --git a/internal/adapters/terraform/aws/apigateway/namesv2_test.go b/internal/adapters/terraform/aws/apigateway/namesv2_test.go new file mode 100644 index 000000000000..be146c8b516e --- /dev/null +++ b/internal/adapters/terraform/aws/apigateway/namesv2_test.go @@ -0,0 +1,56 @@ +package apigateway + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptDomainNamesV2(t *testing.T) { + tests := []struct { + name string + terraform string + expected []v2.DomainName + }{ + { + name: "defaults", + terraform: ` +resource "aws_apigatewayv2_domain_name" "example" { +} +`, + expected: []v2.DomainName{ + { + Name: String(""), + SecurityPolicy: String("TLS_1_0"), + }, + }, + }, + { + name: "fully populated", + terraform: ` +resource "aws_apigatewayv2_domain_name" "example" { + domain_name = "testing.com" + domain_name_configuration { + security_policy = "TLS_1_2" + } +} +`, + expected: []v2.DomainName{ + { + Name: String("testing.com"), + SecurityPolicy: String("TLS_1_2"), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptDomainNamesV2(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/aws/athena/adapt.go b/internal/adapters/terraform/aws/athena/adapt.go new file mode 100644 index 000000000000..70fd7298c8e9 --- /dev/null +++ b/internal/adapters/terraform/aws/athena/adapt.go @@ -0,0 +1,80 @@ +package athena + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/athena" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) athena.Athena { + return athena.Athena{ + Databases: adaptDatabases(modules), + Workgroups: adaptWorkgroups(modules), + } +} + +func adaptDatabases(modules terraform.Modules) []athena.Database { + var databases []athena.Database + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_athena_database") { + databases = append(databases, adaptDatabase(resource)) + } + } + return databases +} + +func adaptWorkgroups(modules terraform.Modules) []athena.Workgroup { + var workgroups []athena.Workgroup + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_athena_workgroup") { + workgroups = append(workgroups, adaptWorkgroup(resource)) + } + } + return workgroups +} + +func adaptDatabase(resource *terraform.Block) athena.Database { + database := athena.Database{ + Metadata: resource.GetMetadata(), + Name: resource.GetAttribute("name").AsStringValueOrDefault("", resource), + Encryption: athena.EncryptionConfiguration{ + Metadata: resource.GetMetadata(), + Type: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + } + if encryptionConfigBlock := resource.GetBlock("encryption_configuration"); encryptionConfigBlock.IsNotNil() { + database.Encryption.Metadata = encryptionConfigBlock.GetMetadata() + encryptionOptionAttr := encryptionConfigBlock.GetAttribute("encryption_option") + database.Encryption.Type = encryptionOptionAttr.AsStringValueOrDefault("", encryptionConfigBlock) + } + + return database +} + +func adaptWorkgroup(resource *terraform.Block) athena.Workgroup { + workgroup := athena.Workgroup{ + Metadata: resource.GetMetadata(), + Name: resource.GetAttribute("name").AsStringValueOrDefault("", resource), + Encryption: athena.EncryptionConfiguration{ + Metadata: resource.GetMetadata(), + Type: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + EnforceConfiguration: defsecTypes.BoolDefault(false, resource.GetMetadata()), + } + + if configBlock := resource.GetBlock("configuration"); configBlock.IsNotNil() { + + enforceWGConfigAttr := configBlock.GetAttribute("enforce_workgroup_configuration") + workgroup.EnforceConfiguration = enforceWGConfigAttr.AsBoolValueOrDefault(true, configBlock) + + if resultConfigBlock := configBlock.GetBlock("result_configuration"); configBlock.IsNotNil() { + if encryptionConfigBlock := resultConfigBlock.GetBlock("encryption_configuration"); encryptionConfigBlock.IsNotNil() { + encryptionOptionAttr := encryptionConfigBlock.GetAttribute("encryption_option") + workgroup.Encryption.Metadata = encryptionConfigBlock.GetMetadata() + workgroup.Encryption.Type = encryptionOptionAttr.AsStringValueOrDefault("", encryptionConfigBlock) + } + } + } + + return workgroup +} diff --git a/internal/adapters/terraform/aws/athena/adapt_test.go b/internal/adapters/terraform/aws/athena/adapt_test.go new file mode 100644 index 000000000000..10fbd8c779bd --- /dev/null +++ b/internal/adapters/terraform/aws/athena/adapt_test.go @@ -0,0 +1,211 @@ +package athena + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/athena" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptDatabase(t *testing.T) { + tests := []struct { + name string + terraform string + expected athena.Database + }{ + { + name: "athena database", + terraform: ` + resource "aws_athena_database" "my_wg" { + name = "database_name" + + encryption_configuration { + encryption_option = "SSE_KMS" + } + } +`, + expected: athena.Database{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("database_name", defsecTypes.NewTestMisconfigMetadata()), + Encryption: athena.EncryptionConfiguration{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String(athena.EncryptionTypeSSEKMS, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptDatabase(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptWorkgroup(t *testing.T) { + tests := []struct { + name string + terraform string + expected athena.Workgroup + }{ + { + name: "encryption type SSE KMS", + terraform: ` + resource "aws_athena_workgroup" "my_wg" { + name = "example" + + configuration { + enforce_workgroup_configuration = true + + result_configuration { + encryption_configuration { + encryption_option = "SSE_KMS" + } + } + } + } +`, + expected: athena.Workgroup{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("example", defsecTypes.NewTestMisconfigMetadata()), + Encryption: athena.EncryptionConfiguration{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String(athena.EncryptionTypeSSEKMS, defsecTypes.NewTestMisconfigMetadata()), + }, + EnforceConfiguration: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "configuration not enforced", + terraform: ` + resource "aws_athena_workgroup" "my_wg" { + name = "example" + + configuration { + enforce_workgroup_configuration = false + + result_configuration { + encryption_configuration { + encryption_option = "SSE_KMS" + } + } + } + } +`, + expected: athena.Workgroup{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("example", defsecTypes.NewTestMisconfigMetadata()), + Encryption: athena.EncryptionConfiguration{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String(athena.EncryptionTypeSSEKMS, defsecTypes.NewTestMisconfigMetadata()), + }, + EnforceConfiguration: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "enforce configuration defaults to true", + terraform: ` + resource "aws_athena_workgroup" "my_wg" { + name = "example" + + configuration { + result_configuration { + encryption_configuration { + encryption_option = "" + } + } + } + } +`, + expected: athena.Workgroup{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("example", defsecTypes.NewTestMisconfigMetadata()), + Encryption: athena.EncryptionConfiguration{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String(athena.EncryptionTypeNone, defsecTypes.NewTestMisconfigMetadata()), + }, + EnforceConfiguration: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "missing configuration block", + terraform: ` + resource "aws_athena_workgroup" "my_wg" { + name = "example" + } +`, + expected: athena.Workgroup{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("example", defsecTypes.NewTestMisconfigMetadata()), + Encryption: athena.EncryptionConfiguration{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String(athena.EncryptionTypeNone, defsecTypes.NewTestMisconfigMetadata()), + }, + EnforceConfiguration: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptWorkgroup(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_athena_database" "good_example" { + name = "database_name" + bucket = aws_s3_bucket.hoge.bucket + + encryption_configuration { + encryption_option = "SSE_KMS" + kms_key_arn = aws_kms_key.example.arn + } + } + + resource "aws_athena_workgroup" "good_example" { + name = "example" + + configuration { + enforce_workgroup_configuration = true + publish_cloudwatch_metrics_enabled = true + + result_configuration { + output_location = "s3://${aws_s3_bucket.example.bucket}/output/" + + encryption_configuration { + encryption_option = "SSE_KMS" + kms_key_arn = aws_kms_key.example.arn + } + } + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Databases, 1) + require.Len(t, adapted.Workgroups, 1) + + assert.Equal(t, 7, adapted.Databases[0].Encryption.Type.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, adapted.Databases[0].Encryption.Type.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 16, adapted.Workgroups[0].EnforceConfiguration.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 16, adapted.Workgroups[0].EnforceConfiguration.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 23, adapted.Workgroups[0].Encryption.Type.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 23, adapted.Workgroups[0].Encryption.Type.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/cloudfront/adapt.go b/internal/adapters/terraform/aws/cloudfront/adapt.go new file mode 100644 index 000000000000..e4608bfbc135 --- /dev/null +++ b/internal/adapters/terraform/aws/cloudfront/adapt.go @@ -0,0 +1,79 @@ +package cloudfront + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudfront" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) cloudfront.Cloudfront { + return cloudfront.Cloudfront{ + Distributions: adaptDistributions(modules), + } +} + +func adaptDistributions(modules terraform.Modules) []cloudfront.Distribution { + var distributions []cloudfront.Distribution + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_cloudfront_distribution") { + distributions = append(distributions, adaptDistribution(resource)) + } + } + return distributions +} + +func adaptDistribution(resource *terraform.Block) cloudfront.Distribution { + + distribution := cloudfront.Distribution{ + Metadata: resource.GetMetadata(), + WAFID: types.StringDefault("", resource.GetMetadata()), + Logging: cloudfront.Logging{ + Metadata: resource.GetMetadata(), + Bucket: types.StringDefault("", resource.GetMetadata()), + }, + DefaultCacheBehaviour: cloudfront.CacheBehaviour{ + Metadata: resource.GetMetadata(), + ViewerProtocolPolicy: types.String("allow-all", resource.GetMetadata()), + }, + OrdererCacheBehaviours: nil, + ViewerCertificate: cloudfront.ViewerCertificate{ + Metadata: resource.GetMetadata(), + MinimumProtocolVersion: types.StringDefault("TLSv1", resource.GetMetadata()), + }, + } + + distribution.WAFID = resource.GetAttribute("web_acl_id").AsStringValueOrDefault("", resource) + + if loggingBlock := resource.GetBlock("logging_config"); loggingBlock.IsNotNil() { + distribution.Logging.Metadata = loggingBlock.GetMetadata() + bucketAttr := loggingBlock.GetAttribute("bucket") + distribution.Logging.Bucket = bucketAttr.AsStringValueOrDefault("", loggingBlock) + } + + if defaultCacheBlock := resource.GetBlock("default_cache_behavior"); defaultCacheBlock.IsNotNil() { + distribution.DefaultCacheBehaviour.Metadata = defaultCacheBlock.GetMetadata() + viewerProtocolPolicyAttr := defaultCacheBlock.GetAttribute("viewer_protocol_policy") + distribution.DefaultCacheBehaviour.ViewerProtocolPolicy = viewerProtocolPolicyAttr.AsStringValueOrDefault("allow-all", defaultCacheBlock) + } + + orderedCacheBlocks := resource.GetBlocks("ordered_cache_behavior") + for _, orderedCacheBlock := range orderedCacheBlocks { + viewerProtocolPolicyAttr := orderedCacheBlock.GetAttribute("viewer_protocol_policy") + viewerProtocolPolicyVal := viewerProtocolPolicyAttr.AsStringValueOrDefault("allow-all", orderedCacheBlock) + distribution.OrdererCacheBehaviours = append(distribution.OrdererCacheBehaviours, cloudfront.CacheBehaviour{ + Metadata: orderedCacheBlock.GetMetadata(), + ViewerProtocolPolicy: viewerProtocolPolicyVal, + }) + } + + if viewerCertBlock := resource.GetBlock("viewer_certificate"); viewerCertBlock.IsNotNil() { + distribution.ViewerCertificate = cloudfront.ViewerCertificate{ + Metadata: viewerCertBlock.GetMetadata(), + MinimumProtocolVersion: viewerCertBlock.GetAttribute("minimum_protocol_version").AsStringValueOrDefault("TLSv1", viewerCertBlock), + SSLSupportMethod: viewerCertBlock.GetAttribute("ssl_support_method").AsStringValueOrDefault("", viewerCertBlock), + CloudfrontDefaultCertificate: viewerCertBlock.GetAttribute("cloudfront_default_certificate").AsBoolValueOrDefault(false, viewerCertBlock), + } + } + + return distribution +} diff --git a/internal/adapters/terraform/aws/cloudfront/adapt_test.go b/internal/adapters/terraform/aws/cloudfront/adapt_test.go new file mode 100644 index 000000000000..00f659ec49ff --- /dev/null +++ b/internal/adapters/terraform/aws/cloudfront/adapt_test.go @@ -0,0 +1,163 @@ +package cloudfront + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudfront" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptDistribution(t *testing.T) { + tests := []struct { + name string + terraform string + expected cloudfront.Distribution + }{ + { + name: "configured", + terraform: ` + resource "aws_cloudfront_distribution" "example" { + logging_config { + bucket = "mylogs.s3.amazonaws.com" + } + + web_acl_id = "waf_id" + + default_cache_behavior { + viewer_protocol_policy = "redirect-to-https" + } + + ordered_cache_behavior { + viewer_protocol_policy = "redirect-to-https" + } + + viewer_certificate { + cloudfront_default_certificate = true + minimum_protocol_version = "TLSv1.2_2021" + ssl_support_method = "sni-only" + } + } +`, + expected: cloudfront.Distribution{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + WAFID: defsecTypes.String("waf_id", defsecTypes.NewTestMisconfigMetadata()), + Logging: cloudfront.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Bucket: defsecTypes.String("mylogs.s3.amazonaws.com", defsecTypes.NewTestMisconfigMetadata()), + }, + DefaultCacheBehaviour: cloudfront.CacheBehaviour{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ViewerProtocolPolicy: defsecTypes.String("redirect-to-https", defsecTypes.NewTestMisconfigMetadata()), + }, + OrdererCacheBehaviours: []cloudfront.CacheBehaviour{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ViewerProtocolPolicy: defsecTypes.String("redirect-to-https", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + ViewerCertificate: cloudfront.ViewerCertificate{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + MinimumProtocolVersion: defsecTypes.String("TLSv1.2_2021", defsecTypes.NewTestMisconfigMetadata()), + CloudfrontDefaultCertificate: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + SSLSupportMethod: defsecTypes.String("sni-only", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_cloudfront_distribution" "example" { + } +`, + expected: cloudfront.Distribution{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + WAFID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Logging: cloudfront.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Bucket: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + DefaultCacheBehaviour: cloudfront.CacheBehaviour{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ViewerProtocolPolicy: defsecTypes.String("allow-all", defsecTypes.NewTestMisconfigMetadata()), + }, + + ViewerCertificate: cloudfront.ViewerCertificate{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + MinimumProtocolVersion: defsecTypes.String("TLSv1", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptDistribution(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_cloudfront_distribution" "example" { + logging_config { + bucket = "mylogs.s3.amazonaws.com" + } + + web_acl_id = "waf_id" + + default_cache_behavior { + viewer_protocol_policy = "redirect-to-https" + } + + ordered_cache_behavior { + viewer_protocol_policy = "redirect-to-https" + } + + viewer_certificate { + cloudfront_default_certificate = true + minimum_protocol_version = "TLSv1.2_2021" + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Distributions, 1) + distribution := adapted.Distributions[0] + + assert.Equal(t, 2, distribution.Metadata.Range().GetStartLine()) + assert.Equal(t, 21, distribution.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, distribution.Logging.Metadata.Range().GetStartLine()) + assert.Equal(t, 5, distribution.Logging.Metadata.Range().GetEndLine()) + + assert.Equal(t, 7, distribution.WAFID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, distribution.WAFID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 9, distribution.DefaultCacheBehaviour.Metadata.Range().GetStartLine()) + assert.Equal(t, 11, distribution.DefaultCacheBehaviour.Metadata.Range().GetEndLine()) + + assert.Equal(t, 10, distribution.DefaultCacheBehaviour.ViewerProtocolPolicy.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, distribution.DefaultCacheBehaviour.ViewerProtocolPolicy.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, distribution.OrdererCacheBehaviours[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 15, distribution.OrdererCacheBehaviours[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 14, distribution.OrdererCacheBehaviours[0].ViewerProtocolPolicy.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 14, distribution.OrdererCacheBehaviours[0].ViewerProtocolPolicy.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 17, distribution.ViewerCertificate.Metadata.Range().GetStartLine()) + assert.Equal(t, 20, distribution.ViewerCertificate.Metadata.Range().GetEndLine()) + + assert.Equal(t, 19, distribution.ViewerCertificate.MinimumProtocolVersion.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 19, distribution.ViewerCertificate.MinimumProtocolVersion.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/cloudtrail/adapt.go b/internal/adapters/terraform/aws/cloudtrail/adapt.go new file mode 100644 index 000000000000..c5c2b7087764 --- /dev/null +++ b/internal/adapters/terraform/aws/cloudtrail/adapt.go @@ -0,0 +1,67 @@ +package cloudtrail + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudtrail" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) cloudtrail.CloudTrail { + return cloudtrail.CloudTrail{ + Trails: adaptTrails(modules), + } +} + +func adaptTrails(modules terraform.Modules) []cloudtrail.Trail { + var trails []cloudtrail.Trail + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_cloudtrail") { + trails = append(trails, adaptTrail(resource)) + } + } + return trails +} + +func adaptTrail(resource *terraform.Block) cloudtrail.Trail { + nameAttr := resource.GetAttribute("name") + nameVal := nameAttr.AsStringValueOrDefault("", resource) + + enableLogFileValidationAttr := resource.GetAttribute("enable_log_file_validation") + enableLogFileValidationVal := enableLogFileValidationAttr.AsBoolValueOrDefault(false, resource) + + isMultiRegionAttr := resource.GetAttribute("is_multi_region_trail") + isMultiRegionVal := isMultiRegionAttr.AsBoolValueOrDefault(false, resource) + + KMSKeyIDAttr := resource.GetAttribute("kms_key_id") + KMSKeyIDVal := KMSKeyIDAttr.AsStringValueOrDefault("", resource) + + var selectors []cloudtrail.EventSelector + for _, selBlock := range resource.GetBlocks("event_selector") { + var resources []cloudtrail.DataResource + for _, resBlock := range selBlock.GetBlocks("data_resource") { + resources = append(resources, cloudtrail.DataResource{ + Metadata: resBlock.GetMetadata(), + Type: resBlock.GetAttribute("type").AsStringValueOrDefault("", resBlock), + Values: resBlock.GetAttribute("values").AsStringValues(), + }) + } + selector := cloudtrail.EventSelector{ + Metadata: selBlock.GetMetadata(), + DataResources: resources, + ReadWriteType: selBlock.GetAttribute("read_write_type").AsStringValueOrDefault("All", selBlock), + } + selectors = append(selectors, selector) + } + + return cloudtrail.Trail{ + Metadata: resource.GetMetadata(), + Name: nameVal, + EnableLogFileValidation: enableLogFileValidationVal, + IsMultiRegion: isMultiRegionVal, + KMSKeyID: KMSKeyIDVal, + CloudWatchLogsLogGroupArn: resource.GetAttribute("cloud_watch_logs_group_arn").AsStringValueOrDefault("", resource), + IsLogging: resource.GetAttribute("enable_logging").AsBoolValueOrDefault(true, resource), + BucketName: resource.GetAttribute("s3_bucket_name").AsStringValueOrDefault("", resource), + EventSelectors: selectors, + } +} diff --git a/internal/adapters/terraform/aws/cloudtrail/adapt_test.go b/internal/adapters/terraform/aws/cloudtrail/adapt_test.go new file mode 100644 index 000000000000..08ee24417bc4 --- /dev/null +++ b/internal/adapters/terraform/aws/cloudtrail/adapt_test.go @@ -0,0 +1,106 @@ +package cloudtrail + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudtrail" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptTrail(t *testing.T) { + tests := []struct { + name string + terraform string + expected cloudtrail.Trail + }{ + { + name: "configured", + terraform: ` + resource "aws_cloudtrail" "example" { + name = "example" + is_multi_region_trail = true + + enable_log_file_validation = true + kms_key_id = "kms-key" + s3_bucket_name = "abcdefgh" + cloud_watch_logs_group_arn = "abc" + enable_logging = false + } +`, + expected: cloudtrail.Trail{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("example", defsecTypes.NewTestMisconfigMetadata()), + EnableLogFileValidation: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + IsMultiRegion: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("kms-key", defsecTypes.NewTestMisconfigMetadata()), + CloudWatchLogsLogGroupArn: defsecTypes.String("abc", defsecTypes.NewTestMisconfigMetadata()), + IsLogging: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + BucketName: defsecTypes.String("abcdefgh", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_cloudtrail" "example" { + } +`, + expected: cloudtrail.Trail{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + EnableLogFileValidation: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + IsMultiRegion: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + BucketName: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + CloudWatchLogsLogGroupArn: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + IsLogging: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptTrail(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_cloudtrail" "example" { + name = "example" + is_multi_region_trail = true + + enable_log_file_validation = true + kms_key_id = "kms-key" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Trails, 1) + trail := adapted.Trails[0] + + assert.Equal(t, 2, trail.Metadata.Range().GetStartLine()) + assert.Equal(t, 8, trail.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, trail.Name.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, trail.Name.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, trail.IsMultiRegion.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, trail.IsMultiRegion.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, trail.EnableLogFileValidation.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, trail.EnableLogFileValidation.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 7, trail.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, trail.KMSKeyID.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/cloudwatch/adapt.go b/internal/adapters/terraform/aws/cloudwatch/adapt.go new file mode 100644 index 000000000000..e9c775c8061a --- /dev/null +++ b/internal/adapters/terraform/aws/cloudwatch/adapt.go @@ -0,0 +1,47 @@ +package cloudwatch + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudwatch" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) cloudwatch.CloudWatch { + return cloudwatch.CloudWatch{ + LogGroups: adaptLogGroups(modules), + } +} + +func adaptLogGroups(modules terraform.Modules) []cloudwatch.LogGroup { + var logGroups []cloudwatch.LogGroup + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_cloudwatch_log_group") { + logGroups = append(logGroups, adaptLogGroup(resource, module)) + } + } + return logGroups +} + +func adaptLogGroup(resource *terraform.Block, module *terraform.Module) cloudwatch.LogGroup { + nameAttr := resource.GetAttribute("name") + nameVal := nameAttr.AsStringValueOrDefault("", resource) + + KMSKeyIDAttr := resource.GetAttribute("kms_key_id") + KMSKeyIDVal := KMSKeyIDAttr.AsStringValueOrDefault("", resource) + + if keyBlock, err := module.GetReferencedBlock(KMSKeyIDAttr, resource); err == nil { + KMSKeyIDVal = types.String(keyBlock.FullName(), keyBlock.GetMetadata()) + } + + retentionInDaysAttr := resource.GetAttribute("retention_in_days") + retentionInDaysVal := retentionInDaysAttr.AsIntValueOrDefault(0, resource) + + return cloudwatch.LogGroup{ + Metadata: resource.GetMetadata(), + Arn: types.StringDefault("", resource.GetMetadata()), + Name: nameVal, + KMSKeyID: KMSKeyIDVal, + RetentionInDays: retentionInDaysVal, + MetricFilters: nil, + } +} diff --git a/internal/adapters/terraform/aws/cloudwatch/adapt_test.go b/internal/adapters/terraform/aws/cloudwatch/adapt_test.go new file mode 100644 index 000000000000..096b8c17c0fe --- /dev/null +++ b/internal/adapters/terraform/aws/cloudwatch/adapt_test.go @@ -0,0 +1,114 @@ +package cloudwatch + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudwatch" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptLogGroups(t *testing.T) { + tests := []struct { + name string + terraform string + expected []cloudwatch.LogGroup + }{ + { + name: "key referencing block", + terraform: ` + resource "aws_cloudwatch_log_group" "my-group" { + name = "my-group" + kms_key_id = aws_kms_key.log_key.arn + } + + resource "aws_kms_key" "log_key" { + } +`, + expected: []cloudwatch.LogGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Arn: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Name: defsecTypes.String("my-group", defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("aws_kms_key.log_key", defsecTypes.NewTestMisconfigMetadata()), + RetentionInDays: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + MetricFilters: nil, + }, + }, + }, + { + name: "key as string", + terraform: ` + resource "aws_cloudwatch_log_group" "my-group" { + name = "my-group" + kms_key_id = "key-as-string" + } +`, + expected: []cloudwatch.LogGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Arn: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Name: defsecTypes.String("my-group", defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("key-as-string", defsecTypes.NewTestMisconfigMetadata()), + RetentionInDays: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "missing key", + terraform: ` + resource "aws_cloudwatch_log_group" "my-group" { + name = "my-group" + retention_in_days = 3 + } +`, + expected: []cloudwatch.LogGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Arn: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Name: defsecTypes.String("my-group", defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + RetentionInDays: defsecTypes.Int(3, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptLogGroups(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_cloudwatch_log_group" "my-group" { + name = "my-group" + kms_key_id = aws_kms_key.log_key.arn + retention_in_days = 3 + + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + require.Len(t, adapted.LogGroups, 1) + logGroup := adapted.LogGroups[0] + + assert.Equal(t, 3, logGroup.Name.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, logGroup.Name.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, logGroup.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, logGroup.KMSKeyID.GetMetadata().Range().GetStartLine()) + + assert.Equal(t, 5, logGroup.RetentionInDays.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, logGroup.RetentionInDays.GetMetadata().Range().GetStartLine()) +} diff --git a/internal/adapters/terraform/aws/codebuild/adapt.go b/internal/adapters/terraform/aws/codebuild/adapt.go new file mode 100644 index 000000000000..fd72e470c3e2 --- /dev/null +++ b/internal/adapters/terraform/aws/codebuild/adapt.go @@ -0,0 +1,66 @@ +package codebuild + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/codebuild" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) codebuild.CodeBuild { + return codebuild.CodeBuild{ + Projects: adaptProjects(modules), + } +} + +func adaptProjects(modules terraform.Modules) []codebuild.Project { + var projects []codebuild.Project + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_codebuild_project") { + projects = append(projects, adaptProject(resource)) + } + } + return projects +} + +func adaptProject(resource *terraform.Block) codebuild.Project { + + project := codebuild.Project{ + Metadata: resource.GetMetadata(), + ArtifactSettings: codebuild.ArtifactSettings{ + Metadata: resource.GetMetadata(), + EncryptionEnabled: types.BoolDefault(true, resource.GetMetadata()), + }, + SecondaryArtifactSettings: nil, + } + + var hasArtifacts bool + + if artifactsBlock := resource.GetBlock("artifacts"); artifactsBlock.IsNotNil() { + project.ArtifactSettings.Metadata = artifactsBlock.GetMetadata() + typeAttr := artifactsBlock.GetAttribute("type") + encryptionDisabledAttr := artifactsBlock.GetAttribute("encryption_disabled") + hasArtifacts = typeAttr.NotEqual("NO_ARTIFACTS") + if encryptionDisabledAttr.IsTrue() && hasArtifacts { + project.ArtifactSettings.EncryptionEnabled = types.Bool(false, artifactsBlock.GetMetadata()) + } else { + project.ArtifactSettings.EncryptionEnabled = types.Bool(true, artifactsBlock.GetMetadata()) + } + } + + secondaryArtifactBlocks := resource.GetBlocks("secondary_artifacts") + for _, secondaryArtifactBlock := range secondaryArtifactBlocks { + + secondaryEncryptionEnabled := types.BoolDefault(true, secondaryArtifactBlock.GetMetadata()) + secondaryEncryptionDisabledAttr := secondaryArtifactBlock.GetAttribute("encryption_disabled") + if secondaryEncryptionDisabledAttr.IsTrue() && hasArtifacts { + secondaryEncryptionEnabled = types.Bool(false, secondaryArtifactBlock.GetMetadata()) + } + + project.SecondaryArtifactSettings = append(project.SecondaryArtifactSettings, codebuild.ArtifactSettings{ + Metadata: secondaryArtifactBlock.GetMetadata(), + EncryptionEnabled: secondaryEncryptionEnabled, + }) + } + + return project +} diff --git a/internal/adapters/terraform/aws/codebuild/adapt_test.go b/internal/adapters/terraform/aws/codebuild/adapt_test.go new file mode 100644 index 000000000000..6466569fa9ce --- /dev/null +++ b/internal/adapters/terraform/aws/codebuild/adapt_test.go @@ -0,0 +1,116 @@ +package codebuild + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/codebuild" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptProject(t *testing.T) { + tests := []struct { + name string + terraform string + expected codebuild.Project + }{ + { + name: "configured", + terraform: ` + resource "aws_codebuild_project" "codebuild" { + + artifacts { + encryption_disabled = false + } + + secondary_artifacts { + encryption_disabled = false + } + secondary_artifacts { + encryption_disabled = true + } + } +`, + expected: codebuild.Project{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ArtifactSettings: codebuild.ArtifactSettings{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptionEnabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + SecondaryArtifactSettings: []codebuild.ArtifactSettings{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptionEnabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptionEnabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + { + name: "defaults - encryption enabled", + terraform: ` + resource "aws_codebuild_project" "codebuild" { + } +`, + expected: codebuild.Project{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ArtifactSettings: codebuild.ArtifactSettings{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptionEnabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptProject(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_codebuild_project" "codebuild" { + artifacts { + encryption_disabled = false + } + + secondary_artifacts { + encryption_disabled = false + } + + secondary_artifacts { + encryption_disabled = true + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Projects, 1) + project := adapted.Projects[0] + + assert.Equal(t, 2, project.Metadata.Range().GetStartLine()) + assert.Equal(t, 14, project.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, project.ArtifactSettings.Metadata.Range().GetStartLine()) + assert.Equal(t, 5, project.ArtifactSettings.Metadata.Range().GetEndLine()) + + assert.Equal(t, 7, project.SecondaryArtifactSettings[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 9, project.SecondaryArtifactSettings[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 11, project.SecondaryArtifactSettings[1].Metadata.Range().GetStartLine()) + assert.Equal(t, 13, project.SecondaryArtifactSettings[1].Metadata.Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/config/adapt.go b/internal/adapters/terraform/aws/config/adapt.go new file mode 100644 index 000000000000..f0deb44c048d --- /dev/null +++ b/internal/adapters/terraform/aws/config/adapt.go @@ -0,0 +1,33 @@ +package config + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/config" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) config.Config { + return config.Config{ + ConfigurationAggregrator: adaptConfigurationAggregrator(modules), + } +} + +func adaptConfigurationAggregrator(modules terraform.Modules) config.ConfigurationAggregrator { + configurationAggregrator := config.ConfigurationAggregrator{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + SourceAllRegions: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + } + + for _, resource := range modules.GetResourcesByType("aws_config_configuration_aggregator") { + configurationAggregrator.Metadata = resource.GetMetadata() + aggregationBlock := resource.GetFirstMatchingBlock("account_aggregation_source", "organization_aggregation_source") + if aggregationBlock.IsNil() { + configurationAggregrator.SourceAllRegions = defsecTypes.Bool(false, resource.GetMetadata()) + } else { + allRegionsAttr := aggregationBlock.GetAttribute("all_regions") + allRegionsVal := allRegionsAttr.AsBoolValueOrDefault(false, aggregationBlock) + configurationAggregrator.SourceAllRegions = allRegionsVal + } + } + return configurationAggregrator +} diff --git a/internal/adapters/terraform/aws/config/adapt_test.go b/internal/adapters/terraform/aws/config/adapt_test.go new file mode 100644 index 000000000000..a760bb631b1c --- /dev/null +++ b/internal/adapters/terraform/aws/config/adapt_test.go @@ -0,0 +1,81 @@ +package config + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/config" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" +) + +func Test_adaptConfigurationAggregrator(t *testing.T) { + tests := []struct { + name string + terraform string + expected config.ConfigurationAggregrator + }{ + { + name: "configured", + terraform: ` + resource "aws_config_configuration_aggregator" "example" { + name = "example" + + account_aggregation_source { + account_ids = ["123456789012"] + all_regions = true + } + } +`, + expected: config.ConfigurationAggregrator{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + SourceAllRegions: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_config_configuration_aggregator" "example" { + } +`, + expected: config.ConfigurationAggregrator{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + SourceAllRegions: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptConfigurationAggregrator(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_config_configuration_aggregator" "example" { + name = "example" + + account_aggregation_source { + account_ids = ["123456789012"] + all_regions = true + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + aggregator := adapted.ConfigurationAggregrator + + assert.Equal(t, 2, aggregator.Metadata.Range().GetStartLine()) + assert.Equal(t, 9, aggregator.Metadata.Range().GetEndLine()) + + assert.Equal(t, 7, aggregator.SourceAllRegions.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, aggregator.SourceAllRegions.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/documentdb/adapt.go b/internal/adapters/terraform/aws/documentdb/adapt.go new file mode 100644 index 000000000000..cfac32a033ac --- /dev/null +++ b/internal/adapters/terraform/aws/documentdb/adapt.go @@ -0,0 +1,63 @@ +package documentdb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/documentdb" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) documentdb.DocumentDB { + return documentdb.DocumentDB{ + Clusters: adaptClusters(modules), + } +} + +func adaptClusters(modules terraform.Modules) []documentdb.Cluster { + var clusters []documentdb.Cluster + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_docdb_cluster") { + clusters = append(clusters, adaptCluster(resource, module)) + } + } + return clusters +} + +func adaptCluster(resource *terraform.Block, module *terraform.Module) documentdb.Cluster { + identifierAttr := resource.GetAttribute("cluster_identifier") + identifierVal := identifierAttr.AsStringValueOrDefault("", resource) + + var enabledLogExports []types.StringValue + var instances []documentdb.Instance + + enabledLogExportsAttr := resource.GetAttribute("enabled_cloudwatch_logs_exports") + for _, logExport := range enabledLogExportsAttr.AsStringValues() { + enabledLogExports = append(enabledLogExports, logExport) + } + + instancesRes := module.GetReferencingResources(resource, "aws_docdb_cluster_instance", "cluster_identifier") + for _, instanceRes := range instancesRes { + keyIDAttr := instanceRes.GetAttribute("kms_key_id") + keyIDVal := keyIDAttr.AsStringValueOrDefault("", instanceRes) + + instances = append(instances, documentdb.Instance{ + Metadata: instanceRes.GetMetadata(), + KMSKeyID: keyIDVal, + }) + } + + storageEncryptedAttr := resource.GetAttribute("storage_encrypted") + storageEncryptedVal := storageEncryptedAttr.AsBoolValueOrDefault(false, resource) + + KMSKeyIDAttr := resource.GetAttribute("kms_key_id") + KMSKeyIDVal := KMSKeyIDAttr.AsStringValueOrDefault("", resource) + + return documentdb.Cluster{ + Metadata: resource.GetMetadata(), + Identifier: identifierVal, + EnabledLogExports: enabledLogExports, + BackupRetentionPeriod: resource.GetAttribute("backup_retention_period").AsIntValueOrDefault(0, resource), + Instances: instances, + StorageEncrypted: storageEncryptedVal, + KMSKeyID: KMSKeyIDVal, + } +} diff --git a/internal/adapters/terraform/aws/documentdb/adapt_test.go b/internal/adapters/terraform/aws/documentdb/adapt_test.go new file mode 100644 index 000000000000..a809de50012e --- /dev/null +++ b/internal/adapters/terraform/aws/documentdb/adapt_test.go @@ -0,0 +1,125 @@ +package documentdb + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/documentdb" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptCluster(t *testing.T) { + tests := []struct { + name string + terraform string + expected documentdb.Cluster + }{ + { + name: "configured", + terraform: ` + resource "aws_docdb_cluster" "docdb" { + cluster_identifier = "my-docdb-cluster" + kms_key_id = "kms-key" + enabled_cloudwatch_logs_exports = "audit" + storage_encrypted = true + } + + resource "aws_docdb_cluster_instance" "cluster_instances" { + count = 1 + identifier = "my-docdb-cluster" + cluster_identifier = aws_docdb_cluster.docdb.id + kms_key_id = "kms-key#1" + } +`, + expected: documentdb.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Identifier: defsecTypes.String("my-docdb-cluster", defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("kms-key", defsecTypes.NewTestMisconfigMetadata()), + EnabledLogExports: []defsecTypes.StringValue{ + defsecTypes.String("audit", defsecTypes.NewTestMisconfigMetadata()), + }, + Instances: []documentdb.Instance{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + KMSKeyID: defsecTypes.String("kms-key#1", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + StorageEncrypted: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_docdb_cluster" "docdb" { + } +`, + expected: documentdb.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Identifier: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + StorageEncrypted: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptCluster(modules.GetBlocks()[0], modules[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_docdb_cluster" "docdb" { + cluster_identifier = "my-docdb-cluster" + kms_key_id = "kms-key" + enabled_cloudwatch_logs_exports = "audit" + storage_encrypted = true + } + + resource "aws_docdb_cluster_instance" "cluster_instances" { + count = 1 + identifier = "my-docdb-cluster" + cluster_identifier = aws_docdb_cluster.docdb.id + kms_key_id = "kms-key" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Clusters, 1) + require.Len(t, adapted.Clusters[0].Instances, 1) + + cluster := adapted.Clusters[0] + instance := cluster.Instances[0] + + assert.Equal(t, 2, cluster.Metadata.Range().GetStartLine()) + assert.Equal(t, 7, cluster.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, cluster.Identifier.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, cluster.Identifier.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, cluster.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, cluster.KMSKeyID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 5, cluster.EnabledLogExports[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, cluster.EnabledLogExports[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, cluster.StorageEncrypted.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, cluster.StorageEncrypted.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 9, instance.Metadata.Range().GetStartLine()) + assert.Equal(t, 14, instance.Metadata.Range().GetEndLine()) + + assert.Equal(t, 13, instance.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 13, instance.KMSKeyID.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/dynamodb/adapt.go b/internal/adapters/terraform/aws/dynamodb/adapt.go new file mode 100644 index 000000000000..c77a51c0067c --- /dev/null +++ b/internal/adapters/terraform/aws/dynamodb/adapt.go @@ -0,0 +1,94 @@ +package dynamodb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/dynamodb" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) dynamodb.DynamoDB { + return dynamodb.DynamoDB{ + DAXClusters: adaptClusters(modules), + Tables: adaptTables(modules), + } +} + +func adaptClusters(modules terraform.Modules) []dynamodb.DAXCluster { + var clusters []dynamodb.DAXCluster + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_dax_cluster") { + clusters = append(clusters, adaptCluster(resource, module)) + } + } + return clusters +} + +func adaptTables(modules terraform.Modules) []dynamodb.Table { + var tables []dynamodb.Table + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_dynamodb_table") { + tables = append(tables, adaptTable(resource, module)) + } + } + return tables +} + +func adaptCluster(resource *terraform.Block, module *terraform.Module) dynamodb.DAXCluster { + + cluster := dynamodb.DAXCluster{ + Metadata: resource.GetMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + KMSKeyID: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + PointInTimeRecovery: defsecTypes.BoolDefault(false, resource.GetMetadata()), + } + + if ssEncryptionBlock := resource.GetBlock("server_side_encryption"); ssEncryptionBlock.IsNotNil() { + cluster.ServerSideEncryption.Metadata = ssEncryptionBlock.GetMetadata() + enabledAttr := ssEncryptionBlock.GetAttribute("enabled") + cluster.ServerSideEncryption.Enabled = enabledAttr.AsBoolValueOrDefault(false, ssEncryptionBlock) + } + + if recoveryBlock := resource.GetBlock("point_in_time_recovery"); recoveryBlock.IsNotNil() { + recoveryEnabledAttr := recoveryBlock.GetAttribute("enabled") + cluster.PointInTimeRecovery = recoveryEnabledAttr.AsBoolValueOrDefault(false, recoveryBlock) + } + + return cluster +} + +func adaptTable(resource *terraform.Block, module *terraform.Module) dynamodb.Table { + + table := dynamodb.Table{ + Metadata: resource.GetMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + KMSKeyID: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + PointInTimeRecovery: defsecTypes.BoolDefault(false, resource.GetMetadata()), + } + + if ssEncryptionBlock := resource.GetBlock("server_side_encryption"); ssEncryptionBlock.IsNotNil() { + table.ServerSideEncryption.Metadata = ssEncryptionBlock.GetMetadata() + enabledAttr := ssEncryptionBlock.GetAttribute("enabled") + table.ServerSideEncryption.Enabled = enabledAttr.AsBoolValueOrDefault(false, ssEncryptionBlock) + + kmsKeyIdAttr := ssEncryptionBlock.GetAttribute("kms_key_arn") + table.ServerSideEncryption.KMSKeyID = kmsKeyIdAttr.AsStringValueOrDefault("alias/aws/dynamodb", ssEncryptionBlock) + + kmsBlock, err := module.GetReferencedBlock(kmsKeyIdAttr, resource) + if err == nil && kmsBlock.IsNotNil() { + table.ServerSideEncryption.KMSKeyID = defsecTypes.String(kmsBlock.FullName(), kmsBlock.GetMetadata()) + } + } + + if recoveryBlock := resource.GetBlock("point_in_time_recovery"); recoveryBlock.IsNotNil() { + recoveryEnabledAttr := recoveryBlock.GetAttribute("enabled") + table.PointInTimeRecovery = recoveryEnabledAttr.AsBoolValueOrDefault(false, recoveryBlock) + } + + return table +} diff --git a/internal/adapters/terraform/aws/dynamodb/adapt_test.go b/internal/adapters/terraform/aws/dynamodb/adapt_test.go new file mode 100644 index 000000000000..faa6bfc39711 --- /dev/null +++ b/internal/adapters/terraform/aws/dynamodb/adapt_test.go @@ -0,0 +1,176 @@ +package dynamodb + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/dynamodb" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptCluster(t *testing.T) { + tests := []struct { + name string + terraform string + expected dynamodb.DAXCluster + }{ + { + name: "cluster", + terraform: ` + resource "aws_dax_cluster" "example" { + server_side_encryption { + enabled = true + } + } +`, + expected: dynamodb.DAXCluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + PointInTimeRecovery: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptCluster(modules.GetBlocks()[0], modules[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptTable(t *testing.T) { + tests := []struct { + name string + terraform string + expected dynamodb.Table + }{ + { + name: "table", + terraform: ` + resource "aws_dynamodb_table" "example" { + name = "example" + + server_side_encryption { + enabled = true + kms_key_arn = "key-string" + } + + point_in_time_recovery { + enabled = true + } + } +`, + expected: dynamodb.Table{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("key-string", defsecTypes.NewTestMisconfigMetadata()), + }, + PointInTimeRecovery: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "table no kms", + terraform: ` + resource "aws_dax_cluster" "example" { + server_side_encryption { + enabled = true + } + } +`, + expected: dynamodb.Table{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("alias/aws/dynamodb", defsecTypes.NewTestMisconfigMetadata()), + }, + PointInTimeRecovery: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "reference key", + terraform: ` + resource "aws_dynamodb_table" "example" { + name = "example" + + server_side_encryption { + enabled = true + kms_key_arn = aws_kms_key.a.arn + } + } + + resource "aws_kms_key" "a" { + } +`, + expected: dynamodb.Table{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ServerSideEncryption: dynamodb.ServerSideEncryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("aws_kms_key.a", defsecTypes.NewTestMisconfigMetadata()), + }, + PointInTimeRecovery: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptTable(modules.GetBlocks()[0], modules[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_dynamodb_table" "example" { + name = "example" + + server_side_encryption { + enabled = true + kms_key_arn = "key-string" + } + + point_in_time_recovery { + enabled = true + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.DAXClusters, 0) + require.Len(t, adapted.Tables, 1) + table := adapted.Tables[0] + + assert.Equal(t, 2, table.Metadata.Range().GetStartLine()) + assert.Equal(t, 13, table.Metadata.Range().GetEndLine()) + + assert.Equal(t, 5, table.ServerSideEncryption.Metadata.Range().GetStartLine()) + assert.Equal(t, 8, table.ServerSideEncryption.Metadata.Range().GetEndLine()) + + assert.Equal(t, 6, table.ServerSideEncryption.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, table.ServerSideEncryption.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 7, table.ServerSideEncryption.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, table.ServerSideEncryption.KMSKeyID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, table.PointInTimeRecovery.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, table.PointInTimeRecovery.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/ec2/adapt.go b/internal/adapters/terraform/aws/ec2/adapt.go new file mode 100644 index 000000000000..9fb054f81ad1 --- /dev/null +++ b/internal/adapters/terraform/aws/ec2/adapt.go @@ -0,0 +1,102 @@ +package ec2 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) ec2.EC2 { + + naclAdapter := naclAdapter{naclRuleIDs: modules.GetChildResourceIDMapByType("aws_network_acl_rule")} + sgAdapter := sgAdapter{sgRuleIDs: modules.GetChildResourceIDMapByType("aws_security_group_rule")} + + return ec2.EC2{ + Instances: getInstances(modules), + VPCs: adaptVPCs(modules), + SecurityGroups: sgAdapter.adaptSecurityGroups(modules), + Subnets: adaptSubnets(modules), + NetworkACLs: naclAdapter.adaptNetworkACLs(modules), + LaunchConfigurations: adaptLaunchConfigurations(modules), + LaunchTemplates: adaptLaunchTemplates(modules), + Volumes: adaptVolumes(modules), + } +} + +func getInstances(modules terraform.Modules) []ec2.Instance { + var instances []ec2.Instance + + blocks := modules.GetResourcesByType("aws_instance") + + for _, b := range blocks { + instance := ec2.Instance{ + Metadata: b.GetMetadata(), + MetadataOptions: getMetadataOptions(b), + UserData: b.GetAttribute("user_data").AsStringValueOrDefault("", b), + } + + if launchTemplate := findRelatedLaunchTemplate(modules, b); launchTemplate != nil { + instance = launchTemplate.Instance + } + + if instance.RootBlockDevice == nil { + instance.RootBlockDevice = &ec2.BlockDevice{ + Metadata: b.GetMetadata(), + Encrypted: types.BoolDefault(false, b.GetMetadata()), + } + } + + if rootBlockDevice := b.GetBlock("root_block_device"); rootBlockDevice.IsNotNil() { + instance.RootBlockDevice = &ec2.BlockDevice{ + Metadata: rootBlockDevice.GetMetadata(), + Encrypted: rootBlockDevice.GetAttribute("encrypted").AsBoolValueOrDefault(false, b), + } + } + + for _, ebsBlock := range b.GetBlocks("ebs_block_device") { + instance.EBSBlockDevices = append(instance.EBSBlockDevices, &ec2.BlockDevice{ + Metadata: ebsBlock.GetMetadata(), + Encrypted: ebsBlock.GetAttribute("encrypted").AsBoolValueOrDefault(false, b), + }) + } + + for _, resource := range modules.GetResourcesByType("aws_ebs_encryption_by_default") { + if resource.GetAttribute("enabled").NotEqual(false) { + instance.RootBlockDevice.Encrypted = types.BoolDefault(true, resource.GetMetadata()) + for i := 0; i < len(instance.EBSBlockDevices); i++ { + ebs := instance.EBSBlockDevices[i] + ebs.Encrypted = types.BoolDefault(true, resource.GetMetadata()) + } + } + } + + instances = append(instances, instance) + } + + return instances +} + +func findRelatedLaunchTemplate(modules terraform.Modules, instanceBlock *terraform.Block) *ec2.LaunchTemplate { + launchTemplateBlock := instanceBlock.GetBlock("launch_template") + if launchTemplateBlock.IsNil() { + return nil + } + + templateRef := launchTemplateBlock.GetAttribute("name") + + if !templateRef.IsResolvable() { + templateRef = launchTemplateBlock.GetAttribute("id") + } + + if templateRef.IsString() { + for _, r := range modules.GetResourcesByType("aws_launch_template") { + templateName := r.GetAttribute("name").AsStringValueOrDefault("", r).Value() + if templateRef.Equals(r.ID()) || templateRef.Equals(templateName) { + launchTemplate := adaptLaunchTemplate(r) + return &launchTemplate + } + } + } + + return nil +} diff --git a/internal/adapters/terraform/aws/ec2/adapt_test.go b/internal/adapters/terraform/aws/ec2/adapt_test.go new file mode 100644 index 000000000000..82692aea19c7 --- /dev/null +++ b/internal/adapters/terraform/aws/ec2/adapt_test.go @@ -0,0 +1,255 @@ +package ec2 + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected ec2.EC2 + }{ + { + name: "configured", + terraform: ` + resource "aws_instance" "example" { + ami = "ami-7f89a64f" + instance_type = "t1.micro" + + root_block_device { + encrypted = true + } + + metadata_options { + http_tokens = "required" + http_endpoint = "disabled" + } + + ebs_block_device { + encrypted = true + } + + user_data = < 0 { + orphanage := ec2.SecurityGroup{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Description: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + IngressRules: nil, + EgressRules: nil, + IsDefault: defsecTypes.BoolUnresolvable(defsecTypes.NewUnmanagedMisconfigMetadata()), + VPCID: defsecTypes.StringUnresolvable(defsecTypes.NewUnmanagedMisconfigMetadata()), + } + for _, sgRule := range orphanResources { + if sgRule.GetAttribute("type").Equals("ingress") { + orphanage.IngressRules = append(orphanage.IngressRules, adaptSGRule(sgRule, modules)) + } else if sgRule.GetAttribute("type").Equals("egress") { + orphanage.EgressRules = append(orphanage.EgressRules, adaptSGRule(sgRule, modules)) + } + } + securityGroups = append(securityGroups, orphanage) + } + + return securityGroups +} + +func (a *naclAdapter) adaptNetworkACLs(modules terraform.Modules) []ec2.NetworkACL { + var networkACLs []ec2.NetworkACL + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_network_acl") { + networkACLs = append(networkACLs, a.adaptNetworkACL(resource, module)) + } + } + + orphanResources := modules.GetResourceByIDs(a.naclRuleIDs.Orphans()...) + if len(orphanResources) > 0 { + orphanage := ec2.NetworkACL{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Rules: nil, + IsDefaultRule: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + } + for _, naclRule := range orphanResources { + orphanage.Rules = append(orphanage.Rules, adaptNetworkACLRule(naclRule)) + } + networkACLs = append(networkACLs, orphanage) + } + + return networkACLs +} + +func (a *sgAdapter) adaptSecurityGroup(resource *terraform.Block, module terraform.Modules) ec2.SecurityGroup { + var ingressRules []ec2.SecurityGroupRule + var egressRules []ec2.SecurityGroupRule + + descriptionAttr := resource.GetAttribute("description") + descriptionVal := descriptionAttr.AsStringValueOrDefault("Managed by Terraform", resource) + + ingressBlocks := resource.GetBlocks("ingress") + for _, ingressBlock := range ingressBlocks { + ingressRules = append(ingressRules, adaptSGRule(ingressBlock, module)) + } + + egressBlocks := resource.GetBlocks("egress") + for _, egressBlock := range egressBlocks { + egressRules = append(egressRules, adaptSGRule(egressBlock, module)) + } + + rulesBlocks := module.GetReferencingResources(resource, "aws_security_group_rule", "security_group_id") + for _, ruleBlock := range rulesBlocks { + a.sgRuleIDs.Resolve(ruleBlock.ID()) + if ruleBlock.GetAttribute("type").Equals("ingress") { + ingressRules = append(ingressRules, adaptSGRule(ruleBlock, module)) + } else if ruleBlock.GetAttribute("type").Equals("egress") { + egressRules = append(egressRules, adaptSGRule(ruleBlock, module)) + } + } + + return ec2.SecurityGroup{ + Metadata: resource.GetMetadata(), + Description: descriptionVal, + IngressRules: ingressRules, + EgressRules: egressRules, + IsDefault: defsecTypes.Bool(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + VPCID: resource.GetAttribute("vpc_id").AsStringValueOrDefault("", resource), + } +} + +func adaptSGRule(resource *terraform.Block, modules terraform.Modules) ec2.SecurityGroupRule { + ruleDescAttr := resource.GetAttribute("description") + ruleDescVal := ruleDescAttr.AsStringValueOrDefault("", resource) + + var cidrs []defsecTypes.StringValue + + cidrBlocks := resource.GetAttribute("cidr_blocks") + ipv6cidrBlocks := resource.GetAttribute("ipv6_cidr_blocks") + varBlocks := modules.GetBlocks().OfType("variable") + + for _, vb := range varBlocks { + if cidrBlocks.IsNotNil() && cidrBlocks.ReferencesBlock(vb) { + cidrBlocks = vb.GetAttribute("default") + } + if ipv6cidrBlocks.IsNotNil() && ipv6cidrBlocks.ReferencesBlock(vb) { + ipv6cidrBlocks = vb.GetAttribute("default") + } + } + + if cidrBlocks.IsNotNil() { + cidrs = cidrBlocks.AsStringValues() + } + + if ipv6cidrBlocks.IsNotNil() { + cidrs = append(cidrs, ipv6cidrBlocks.AsStringValues()...) + } + + return ec2.SecurityGroupRule{ + Metadata: resource.GetMetadata(), + Description: ruleDescVal, + CIDRs: cidrs, + } +} + +func (a *naclAdapter) adaptNetworkACL(resource *terraform.Block, module *terraform.Module) ec2.NetworkACL { + var networkRules []ec2.NetworkACLRule + rulesBlocks := module.GetReferencingResources(resource, "aws_network_acl_rule", "network_acl_id") + for _, ruleBlock := range rulesBlocks { + a.naclRuleIDs.Resolve(ruleBlock.ID()) + networkRules = append(networkRules, adaptNetworkACLRule(ruleBlock)) + } + return ec2.NetworkACL{ + Metadata: resource.GetMetadata(), + Rules: networkRules, + IsDefaultRule: defsecTypes.BoolDefault(false, resource.GetMetadata()), + } +} + +func adaptNetworkACLRule(resource *terraform.Block) ec2.NetworkACLRule { + var cidrs []defsecTypes.StringValue + + typeVal := defsecTypes.StringDefault("ingress", resource.GetMetadata()) + + egressAtrr := resource.GetAttribute("egress") + if egressAtrr.IsTrue() { + typeVal = defsecTypes.String("egress", egressAtrr.GetMetadata()) + } else if egressAtrr.IsNotNil() { + typeVal = defsecTypes.String("ingress", egressAtrr.GetMetadata()) + } + + actionAttr := resource.GetAttribute("rule_action") + actionVal := actionAttr.AsStringValueOrDefault("", resource) + + protocolAtrr := resource.GetAttribute("protocol") + protocolVal := protocolAtrr.AsStringValueOrDefault("-1", resource) + + cidrAttr := resource.GetAttribute("cidr_block") + if cidrAttr.IsNotNil() { + cidrs = append(cidrs, cidrAttr.AsStringValueOrDefault("", resource)) + } + ipv4cidrAttr := resource.GetAttribute("ipv6_cidr_block") + if ipv4cidrAttr.IsNotNil() { + cidrs = append(cidrs, ipv4cidrAttr.AsStringValueOrDefault("", resource)) + } + + return ec2.NetworkACLRule{ + Metadata: resource.GetMetadata(), + Type: typeVal, + Action: actionVal, + Protocol: protocolVal, + CIDRs: cidrs, + } +} diff --git a/internal/adapters/terraform/aws/ec2/vpc_test.go b/internal/adapters/terraform/aws/ec2/vpc_test.go new file mode 100644 index 000000000000..dba58ec99e2a --- /dev/null +++ b/internal/adapters/terraform/aws/ec2/vpc_test.go @@ -0,0 +1,339 @@ +package ec2 + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_AdaptVPC(t *testing.T) { + tests := []struct { + name string + terraform string + expected ec2.EC2 + }{ + { + name: "defined", + terraform: ` + resource "aws_flow_log" "this" { + vpc_id = aws_vpc.main.id + } + resource "aws_default_vpc" "default" { + tags = { + Name = "Default VPC" + } + } + + resource "aws_vpc" "main" { + cidr_block = "4.5.6.7/32" + } + + resource "aws_security_group" "example" { + name = "http" + description = "Allow inbound HTTP traffic" + + ingress { + description = "Rule #1" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = [aws_vpc.main.cidr_block] + } + + egress { + cidr_blocks = ["1.2.3.4/32"] + } + } + + resource "aws_network_acl_rule" "example" { + egress = false + protocol = "tcp" + from_port = 22 + to_port = 22 + rule_action = "allow" + cidr_block = "10.0.0.0/16" + } + + resource "aws_security_group_rule" "example" { + type = "ingress" + description = "Rule #2" + security_group_id = aws_security_group.example.id + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = [ + "1.2.3.4/32", + "4.5.6.7/32", + ] + } +`, + expected: ec2.EC2{ + VPCs: []ec2.VPC{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + IsDefault: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + ID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + FlowLogsEnabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + IsDefault: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + ID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + FlowLogsEnabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + SecurityGroups: []ec2.SecurityGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("Allow inbound HTTP traffic", defsecTypes.NewTestMisconfigMetadata()), + IsDefault: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + VPCID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + IngressRules: []ec2.SecurityGroupRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + + Description: defsecTypes.String("Rule #1", defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("4.5.6.7/32", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + + Description: defsecTypes.String("Rule #2", defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("1.2.3.4/32", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("4.5.6.7/32", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + + EgressRules: []ec2.SecurityGroupRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("1.2.3.4/32", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + NetworkACLs: []ec2.NetworkACL{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Rules: []ec2.NetworkACLRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String("ingress", defsecTypes.NewTestMisconfigMetadata()), + Action: defsecTypes.String("allow", defsecTypes.NewTestMisconfigMetadata()), + Protocol: defsecTypes.String("tcp", defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("10.0.0.0/16", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + IsDefaultRule: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_security_group" "example" { + ingress { + } + + egress { + } + } + + resource "aws_network_acl_rule" "example" { + } +`, + expected: ec2.EC2{ + SecurityGroups: []ec2.SecurityGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("Managed by Terraform", defsecTypes.NewTestMisconfigMetadata()), + IsDefault: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + VPCID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + IngressRules: []ec2.SecurityGroupRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + + EgressRules: []ec2.SecurityGroupRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + NetworkACLs: []ec2.NetworkACL{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Rules: []ec2.NetworkACLRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String("ingress", defsecTypes.NewTestMisconfigMetadata()), + Action: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Protocol: defsecTypes.String("-1", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + IsDefaultRule: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + { + name: "aws_flow_log refer to locals", + terraform: ` +locals { + vpc_id = try(aws_vpc.this.id, "") +} + +resource "aws_vpc" "this" { +} + +resource "aws_flow_log" "this" { + vpc_id = local.vpc_id +} +`, + expected: ec2.EC2{ + VPCs: []ec2.VPC{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + IsDefault: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + ID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + FlowLogsEnabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestVPCLines(t *testing.T) { + src := ` + resource "aws_default_vpc" "default" { + } + + resource "aws_vpc" "main" { + cidr_block = "4.5.6.7/32" + } + + resource "aws_security_group" "example" { + name = "http" + description = "Allow inbound HTTP traffic" + + ingress { + description = "HTTP from VPC" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = [aws_vpc.main.cidr_block] + } + + egress { + cidr_blocks = ["1.2.3.4/32"] + } + } + + resource "aws_security_group_rule" "example" { + type = "ingress" + security_group_id = aws_security_group.example.id + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = [ + "1.2.3.4/32", + "4.5.6.7/32", + ] + } + + resource "aws_network_acl_rule" "example" { + egress = false + protocol = "tcp" + from_port = 22 + to_port = 22 + rule_action = "allow" + cidr_block = "10.0.0.0/16" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.VPCs, 2) + require.Len(t, adapted.SecurityGroups, 1) + require.Len(t, adapted.NetworkACLs, 1) + + defaultVPC := adapted.VPCs[0] + securityGroup := adapted.SecurityGroups[0] + networkACL := adapted.NetworkACLs[0] + + assert.Equal(t, 2, defaultVPC.Metadata.Range().GetStartLine()) + assert.Equal(t, 3, defaultVPC.Metadata.Range().GetEndLine()) + + assert.Equal(t, 9, securityGroup.Metadata.Range().GetStartLine()) + assert.Equal(t, 24, securityGroup.Metadata.Range().GetEndLine()) + + assert.Equal(t, 11, securityGroup.Description.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, securityGroup.Description.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, securityGroup.IngressRules[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 19, securityGroup.IngressRules[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 14, securityGroup.IngressRules[0].Description.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 14, securityGroup.IngressRules[0].Description.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 18, securityGroup.IngressRules[0].CIDRs[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 18, securityGroup.IngressRules[0].CIDRs[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 26, securityGroup.IngressRules[1].Metadata.Range().GetStartLine()) + assert.Equal(t, 36, securityGroup.IngressRules[1].Metadata.Range().GetEndLine()) + + assert.Equal(t, 32, securityGroup.IngressRules[1].CIDRs[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 35, securityGroup.IngressRules[1].CIDRs[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 21, securityGroup.EgressRules[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 23, securityGroup.EgressRules[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 22, securityGroup.EgressRules[0].CIDRs[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 22, securityGroup.EgressRules[0].CIDRs[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 38, networkACL.Rules[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 45, networkACL.Rules[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 39, networkACL.Rules[0].Type.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 39, networkACL.Rules[0].Type.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 40, networkACL.Rules[0].Protocol.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 40, networkACL.Rules[0].Protocol.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 43, networkACL.Rules[0].Action.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 43, networkACL.Rules[0].Action.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 44, networkACL.Rules[0].CIDRs[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 44, networkACL.Rules[0].CIDRs[0].GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/ecr/adapt.go b/internal/adapters/terraform/aws/ecr/adapt.go new file mode 100644 index 000000000000..8613538628ac --- /dev/null +++ b/internal/adapters/terraform/aws/ecr/adapt.go @@ -0,0 +1,113 @@ +package ecr + +import ( + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/ecr" + iamp "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/iamgo" +) + +func Adapt(modules terraform.Modules) ecr.ECR { + return ecr.ECR{ + Repositories: adaptRepositories(modules), + } +} + +func adaptRepositories(modules terraform.Modules) []ecr.Repository { + var repositories []ecr.Repository + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_ecr_repository") { + repositories = append(repositories, adaptRepository(resource, module, modules)) + } + } + return repositories +} + +func adaptRepository(resource *terraform.Block, module *terraform.Module, modules terraform.Modules) ecr.Repository { + repo := ecr.Repository{ + Metadata: resource.GetMetadata(), + ImageScanning: ecr.ImageScanning{ + Metadata: resource.GetMetadata(), + ScanOnPush: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + ImageTagsImmutable: defsecTypes.BoolDefault(false, resource.GetMetadata()), + Policies: nil, + Encryption: ecr.Encryption{ + Metadata: resource.GetMetadata(), + Type: defsecTypes.StringDefault("AES256", resource.GetMetadata()), + KMSKeyID: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + } + + if imageScanningBlock := resource.GetBlock("image_scanning_configuration"); imageScanningBlock.IsNotNil() { + repo.ImageScanning.Metadata = imageScanningBlock.GetMetadata() + scanOnPushAttr := imageScanningBlock.GetAttribute("scan_on_push") + repo.ImageScanning.ScanOnPush = scanOnPushAttr.AsBoolValueOrDefault(false, imageScanningBlock) + } + + mutabilityAttr := resource.GetAttribute("image_tag_mutability") + if mutabilityAttr.Equals("IMMUTABLE") { + repo.ImageTagsImmutable = defsecTypes.Bool(true, mutabilityAttr.GetMetadata()) + } else if mutabilityAttr.Equals("MUTABLE") { + repo.ImageTagsImmutable = defsecTypes.Bool(false, mutabilityAttr.GetMetadata()) + } + + policyBlocks := module.GetReferencingResources(resource, "aws_ecr_repository_policy", "repository") + for _, policyRes := range policyBlocks { + if policyAttr := policyRes.GetAttribute("policy"); policyAttr.IsString() { + + dataBlock, err := module.GetBlockByID(policyAttr.Value().AsString()) + if err != nil { + + parsed, err := iamgo.ParseString(policyAttr.Value().AsString()) + if err != nil { + continue + } + + policy := iamp.Policy{ + Metadata: policyRes.GetMetadata(), + Name: defsecTypes.StringDefault("", policyRes.GetMetadata()), + Document: iamp.Document{ + Parsed: *parsed, + Metadata: policyAttr.GetMetadata(), + }, + Builtin: defsecTypes.Bool(false, policyRes.GetMetadata()), + } + + repo.Policies = append(repo.Policies, policy) + } else if dataBlock.Type() == "data" && dataBlock.TypeLabel() == "aws_iam_policy_document" { + if doc, err := iam.ConvertTerraformDocument(modules, dataBlock); err == nil { + policy := iamp.Policy{ + Metadata: policyRes.GetMetadata(), + Name: defsecTypes.StringDefault("", policyRes.GetMetadata()), + Document: iamp.Document{ + Parsed: doc.Document, + Metadata: doc.Source.GetMetadata(), + IsOffset: true, + }, + Builtin: defsecTypes.Bool(false, policyRes.GetMetadata()), + } + repo.Policies = append(repo.Policies, policy) + } + } + } + } + + if encryptBlock := resource.GetBlock("encryption_configuration"); encryptBlock.IsNotNil() { + repo.Encryption.Metadata = encryptBlock.GetMetadata() + encryptionTypeAttr := encryptBlock.GetAttribute("encryption_type") + repo.Encryption.Type = encryptionTypeAttr.AsStringValueOrDefault("AES256", encryptBlock) + + kmsKeyAttr := encryptBlock.GetAttribute("kms_key") + repo.Encryption.KMSKeyID = kmsKeyAttr.AsStringValueOrDefault("", encryptBlock) + if kmsKeyAttr.IsResourceBlockReference("aws_kms_key") { + if keyBlock, err := module.GetReferencedBlock(kmsKeyAttr, encryptBlock); err == nil { + repo.Encryption.KMSKeyID = defsecTypes.String(keyBlock.FullName(), keyBlock.GetMetadata()) + } + } + } + + return repo +} diff --git a/internal/adapters/terraform/aws/ecr/adapt_test.go b/internal/adapters/terraform/aws/ecr/adapt_test.go new file mode 100644 index 000000000000..ecf0f932e9fa --- /dev/null +++ b/internal/adapters/terraform/aws/ecr/adapt_test.go @@ -0,0 +1,248 @@ +package ecr + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/ecr" + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/liamg/iamgo" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptRepository(t *testing.T) { + tests := []struct { + name string + terraform string + expected ecr.Repository + }{ + { + name: "configured", + terraform: ` + resource "aws_kms_key" "ecr_kms" { + enable_key_rotation = true + } + + resource "aws_ecr_repository" "foo" { + name = "bar" + image_tag_mutability = "MUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + encryption_configuration { + encryption_type = "KMS" + kms_key = aws_kms_key.ecr_kms.key_id + } + } + + resource "aws_ecr_repository_policy" "foopolicy" { + repository = aws_ecr_repository.foo.name + + policy = < 0 { + var volumes []ecs.Volume + for _, volumeBlock := range volumeBlocks { + volumes = append(volumes, ecs.Volume{ + Metadata: volumeBlock.GetMetadata(), + EFSVolumeConfiguration: adaptEFSVolumeConfiguration(volumeBlock), + }) + } + return volumes + } + + return []ecs.Volume{} +} + +func adaptEFSVolumeConfiguration(volumeBlock *terraform.Block) ecs.EFSVolumeConfiguration { + EFSVolumeConfiguration := ecs.EFSVolumeConfiguration{ + Metadata: volumeBlock.GetMetadata(), + TransitEncryptionEnabled: types.BoolDefault(true, volumeBlock.GetMetadata()), + } + + if EFSConfigBlock := volumeBlock.GetBlock("efs_volume_configuration"); EFSConfigBlock.IsNotNil() { + EFSVolumeConfiguration.Metadata = EFSConfigBlock.GetMetadata() + transitEncryptionAttr := EFSConfigBlock.GetAttribute("transit_encryption") + EFSVolumeConfiguration.TransitEncryptionEnabled = types.Bool(transitEncryptionAttr.Equals("ENABLED"), EFSConfigBlock.GetMetadata()) + if transitEncryptionAttr.IsNotNil() { + EFSVolumeConfiguration.TransitEncryptionEnabled = types.Bool(transitEncryptionAttr.Equals("ENABLED"), transitEncryptionAttr.GetMetadata()) + } + } + + return EFSVolumeConfiguration +} diff --git a/internal/adapters/terraform/aws/ecs/adapt_test.go b/internal/adapters/terraform/aws/ecs/adapt_test.go new file mode 100644 index 000000000000..4db70a7a9319 --- /dev/null +++ b/internal/adapters/terraform/aws/ecs/adapt_test.go @@ -0,0 +1,246 @@ +package ecs + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/ecs" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptClusterSettings(t *testing.T) { + tests := []struct { + name string + terraform string + expected ecs.ClusterSettings + }{ + { + name: "container insights enabled", + terraform: ` + resource "aws_ecs_cluster" "example" { + name = "services-cluster" + + setting { + name = "containerInsights" + value = "enabled" + } + } +`, + expected: ecs.ClusterSettings{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ContainerInsightsEnabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "invalid name", + terraform: ` + resource "aws_ecs_cluster" "example" { + name = "services-cluster" + + setting { + name = "invalidName" + value = "enabled" + } + } +`, + expected: ecs.ClusterSettings{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ContainerInsightsEnabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_ecs_cluster" "example" { + } +`, + expected: ecs.ClusterSettings{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ContainerInsightsEnabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptClusterSettings(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptTaskDefinitionResource(t *testing.T) { + tests := []struct { + name string + terraform string + expected ecs.TaskDefinition + }{ + { + name: "configured", + terraform: ` + resource "aws_ecs_task_definition" "example" { + family = "service" + container_definitions = < 0 { + orphanage := elb.LoadBalancer{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Type: defsecTypes.StringDefault(elb.TypeApplication, defsecTypes.NewUnmanagedMisconfigMetadata()), + DropInvalidHeaderFields: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + Internal: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + Listeners: nil, + } + for _, listenerResource := range orphanResources { + orphanage.Listeners = append(orphanage.Listeners, adaptListener(listenerResource, "application")) + } + loadBalancers = append(loadBalancers, orphanage) + } + + return loadBalancers +} + +func (a *adapter) adaptLoadBalancer(resource *terraform.Block, module terraform.Modules) elb.LoadBalancer { + var listeners []elb.Listener + + typeAttr := resource.GetAttribute("load_balancer_type") + typeVal := typeAttr.AsStringValueOrDefault("application", resource) + + dropInvalidHeadersAttr := resource.GetAttribute("drop_invalid_header_fields") + dropInvalidHeadersVal := dropInvalidHeadersAttr.AsBoolValueOrDefault(false, resource) + + internalAttr := resource.GetAttribute("internal") + internalVal := internalAttr.AsBoolValueOrDefault(false, resource) + + listenerBlocks := module.GetReferencingResources(resource, "aws_lb_listener", "load_balancer_arn") + listenerBlocks = append(listenerBlocks, module.GetReferencingResources(resource, "aws_alb_listener", "load_balancer_arn")...) + + for _, listenerBlock := range listenerBlocks { + a.listenerIDs.Resolve(listenerBlock.ID()) + listeners = append(listeners, adaptListener(listenerBlock, typeVal.Value())) + } + return elb.LoadBalancer{ + Metadata: resource.GetMetadata(), + Type: typeVal, + DropInvalidHeaderFields: dropInvalidHeadersVal, + Internal: internalVal, + Listeners: listeners, + } +} + +func (a *adapter) adaptClassicLoadBalancer(resource *terraform.Block, module terraform.Modules) elb.LoadBalancer { + internalAttr := resource.GetAttribute("internal") + internalVal := internalAttr.AsBoolValueOrDefault(false, resource) + + return elb.LoadBalancer{ + Metadata: resource.GetMetadata(), + Type: defsecTypes.String("classic", resource.GetMetadata()), + DropInvalidHeaderFields: defsecTypes.BoolDefault(false, resource.GetMetadata()), + Internal: internalVal, + Listeners: nil, + } +} + +func adaptListener(listenerBlock *terraform.Block, typeVal string) elb.Listener { + listener := elb.Listener{ + Metadata: listenerBlock.GetMetadata(), + Protocol: defsecTypes.StringDefault("", listenerBlock.GetMetadata()), + TLSPolicy: defsecTypes.StringDefault("", listenerBlock.GetMetadata()), + DefaultActions: nil, + } + + protocolAttr := listenerBlock.GetAttribute("protocol") + if typeVal == "application" { + listener.Protocol = protocolAttr.AsStringValueOrDefault("HTTP", listenerBlock) + } + + sslPolicyAttr := listenerBlock.GetAttribute("ssl_policy") + listener.TLSPolicy = sslPolicyAttr.AsStringValueOrDefault("", listenerBlock) + + for _, defaultActionBlock := range listenerBlock.GetBlocks("default_action") { + action := elb.Action{ + Metadata: defaultActionBlock.GetMetadata(), + Type: defaultActionBlock.GetAttribute("type").AsStringValueOrDefault("", defaultActionBlock), + } + listener.DefaultActions = append(listener.DefaultActions, action) + } + + return listener +} diff --git a/internal/adapters/terraform/aws/elb/adapt_test.go b/internal/adapters/terraform/aws/elb/adapt_test.go new file mode 100644 index 000000000000..2e3c80554439 --- /dev/null +++ b/internal/adapters/terraform/aws/elb/adapt_test.go @@ -0,0 +1,161 @@ +package elb + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/elb" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected elb.ELB + }{ + { + name: "configured", + terraform: ` + resource "aws_alb" "example" { + name = "good_alb" + internal = true + load_balancer_type = "application" + + access_logs { + bucket = aws_s3_bucket.lb_logs.bucket + prefix = "test-lb" + enabled = true + } + + drop_invalid_header_fields = true + } + + resource "aws_alb_listener" "example" { + load_balancer_arn = aws_alb.example.arn + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS-1-1-2017-01" + + default_action { + type = "forward" + } + } +`, + expected: elb.ELB{ + LoadBalancers: []elb.LoadBalancer{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String("application", defsecTypes.NewTestMisconfigMetadata()), + DropInvalidHeaderFields: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Internal: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Listeners: []elb.Listener{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Protocol: defsecTypes.String("HTTPS", defsecTypes.NewTestMisconfigMetadata()), + TLSPolicy: defsecTypes.String("ELBSecurityPolicy-TLS-1-1-2017-01", defsecTypes.NewTestMisconfigMetadata()), + DefaultActions: []elb.Action{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String("forward", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_alb" "example" { + } +`, + expected: elb.ELB{ + LoadBalancers: []elb.LoadBalancer{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String("application", defsecTypes.NewTestMisconfigMetadata()), + DropInvalidHeaderFields: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Internal: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Listeners: nil, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_alb" "example" { + name = "good_alb" + internal = true + load_balancer_type = "application" + drop_invalid_header_fields = true + + access_logs { + bucket = aws_s3_bucket.lb_logs.bucket + prefix = "test-lb" + enabled = true + } + } + + resource "aws_alb_listener" "example" { + load_balancer_arn = aws_alb.example.arn + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS-1-1-2017-01" + + default_action { + type = "forward" + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.LoadBalancers, 1) + loadBalancer := adapted.LoadBalancers[0] + + assert.Equal(t, 2, loadBalancer.Metadata.Range().GetStartLine()) + assert.Equal(t, 13, loadBalancer.Metadata.Range().GetEndLine()) + + assert.Equal(t, 4, loadBalancer.Internal.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, loadBalancer.Internal.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 5, loadBalancer.Type.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, loadBalancer.Type.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, loadBalancer.DropInvalidHeaderFields.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, loadBalancer.DropInvalidHeaderFields.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 15, loadBalancer.Listeners[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 23, loadBalancer.Listeners[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 17, loadBalancer.Listeners[0].Protocol.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 17, loadBalancer.Listeners[0].Protocol.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 18, loadBalancer.Listeners[0].TLSPolicy.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 18, loadBalancer.Listeners[0].TLSPolicy.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 20, loadBalancer.Listeners[0].DefaultActions[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 22, loadBalancer.Listeners[0].DefaultActions[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 21, loadBalancer.Listeners[0].DefaultActions[0].Type.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 21, loadBalancer.Listeners[0].DefaultActions[0].Type.GetMetadata().Range().GetEndLine()) + +} diff --git a/internal/adapters/terraform/aws/emr/adapt.go b/internal/adapters/terraform/aws/emr/adapt.go new file mode 100644 index 000000000000..a038385864a9 --- /dev/null +++ b/internal/adapters/terraform/aws/emr/adapt.go @@ -0,0 +1,49 @@ +package emr + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/emr" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) emr.EMR { + return emr.EMR{ + Clusters: adaptClusters(modules), + SecurityConfiguration: adaptSecurityConfigurations(modules), + } +} +func adaptClusters(modules terraform.Modules) []emr.Cluster { + var clusters []emr.Cluster + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_emr_cluster") { + clusters = append(clusters, adaptCluster(resource)) + } + } + return clusters +} + +func adaptCluster(resource *terraform.Block) emr.Cluster { + + return emr.Cluster{ + Metadata: resource.GetMetadata(), + } +} + +func adaptSecurityConfigurations(modules terraform.Modules) []emr.SecurityConfiguration { + var securityConfiguration []emr.SecurityConfiguration + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_emr_security_configuration") { + securityConfiguration = append(securityConfiguration, adaptSecurityConfiguration(resource)) + } + } + return securityConfiguration +} + +func adaptSecurityConfiguration(resource *terraform.Block) emr.SecurityConfiguration { + + return emr.SecurityConfiguration{ + Metadata: resource.GetMetadata(), + Name: resource.GetAttribute("name").AsStringValueOrDefault("", resource), + Configuration: resource.GetAttribute("configuration").AsStringValueOrDefault("", resource), + } + +} diff --git a/internal/adapters/terraform/aws/emr/adapt_test.go b/internal/adapters/terraform/aws/emr/adapt_test.go new file mode 100644 index 000000000000..41376143d9e3 --- /dev/null +++ b/internal/adapters/terraform/aws/emr/adapt_test.go @@ -0,0 +1,116 @@ +package emr + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/emr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptSecurityConfiguration(t *testing.T) { + tests := []struct { + name string + terraform string + expected emr.SecurityConfiguration + }{ + { + name: "test", + terraform: ` + resource "aws_emr_security_configuration" "foo" { + name = "emrsc_test" + configuration = < 0 { + return &iam.Document{ + Parsed: documents[0].Document, + Metadata: documents[0].Source.GetMetadata(), + IsOffset: true, + }, nil + } + + if attr.IsString() { + + dataBlock, err := modules.GetBlockById(attr.Value().AsString()) + if err != nil { + parsed, err := iamgo.Parse([]byte(unescapeVars(attr.Value().AsString()))) + if err != nil { + return nil, err + } + return &iam.Document{ + Parsed: *parsed, + Metadata: attr.GetMetadata(), + IsOffset: false, + HasRefs: len(attr.AllReferences()) > 0, + }, nil + } else if dataBlock.Type() == "data" && dataBlock.TypeLabel() == "aws_iam_policy_document" { + if doc, err := ConvertTerraformDocument(modules, dataBlock); err == nil { + return &iam.Document{ + Metadata: dataBlock.GetMetadata(), + Parsed: doc.Document, + IsOffset: true, + HasRefs: false, + }, nil + } + } + } + + return &iam.Document{ + Metadata: owner.GetMetadata(), + }, nil +} + +func unescapeVars(input string) string { + return strings.ReplaceAll(input, "&{", "${") +} + +// ConvertTerraformDocument converts a terraform data policy into an iamgo policy https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document +func ConvertTerraformDocument(modules terraform.Modules, block *terraform.Block) (*wrappedDocument, error) { + + builder := iamgo.NewPolicyBuilder() + + if sourceAttr := block.GetAttribute("source_json"); sourceAttr.IsString() { + doc, err := iamgo.ParseString(sourceAttr.Value().AsString()) + if err != nil { + return nil, err + } + builder = iamgo.PolicyBuilderFromDocument(*doc) + } + + if sourceDocumentsAttr := block.GetAttribute("source_policy_documents"); sourceDocumentsAttr.IsIterable() { + docs := findAllPolicies(modules, block, sourceDocumentsAttr) + for _, doc := range docs { + statements, _ := doc.Document.Statements() + for _, statement := range statements { + builder.WithStatement(statement) + } + } + } + + if idAttr := block.GetAttribute("policy_id"); idAttr.IsString() { + r := idAttr.GetMetadata().Range() + builder.WithId(idAttr.Value().AsString(), r.GetStartLine(), r.GetEndLine()) + } + + if versionAttr := block.GetAttribute("version"); versionAttr.IsString() { + r := versionAttr.GetMetadata().Range() + builder.WithVersion(versionAttr.Value().AsString(), r.GetStartLine(), r.GetEndLine()) + } + + for _, statementBlock := range block.GetBlocks("statement") { + statement := parseStatement(statementBlock) + builder.WithStatement(statement, statement.Range().StartLine, statement.Range().EndLine) + } + + if overrideDocumentsAttr := block.GetAttribute("override_policy_documents"); overrideDocumentsAttr.IsIterable() { + docs := findAllPolicies(modules, block, overrideDocumentsAttr) + for _, doc := range docs { + statements, _ := doc.Document.Statements() + for _, statement := range statements { + builder.WithStatement(statement, statement.Range().StartLine, statement.Range().EndLine) + } + } + } + + return &wrappedDocument{Document: builder.Build(), Source: block}, nil +} + +// nolint +func parseStatement(statementBlock *terraform.Block) iamgo.Statement { + + metadata := statementBlock.GetMetadata() + + builder := iamgo.NewStatementBuilder() + builder.WithRange(metadata.Range().GetStartLine(), metadata.Range().GetEndLine()) + + if sidAttr := statementBlock.GetAttribute("sid"); sidAttr.IsString() { + r := sidAttr.GetMetadata().Range() + builder.WithSid(sidAttr.Value().AsString(), r.GetStartLine(), r.GetEndLine()) + } + if actionsAttr := statementBlock.GetAttribute("actions"); actionsAttr.IsIterable() { + r := actionsAttr.GetMetadata().Range() + values := actionsAttr.AsStringValues().AsStrings() + builder.WithActions(values, r.GetStartLine(), r.GetEndLine()) + } + if notActionsAttr := statementBlock.GetAttribute("not_actions"); notActionsAttr.IsIterable() { + r := notActionsAttr.GetMetadata().Range() + values := notActionsAttr.AsStringValues().AsStrings() + builder.WithNotActions(values, r.GetStartLine(), r.GetEndLine()) + } + if resourcesAttr := statementBlock.GetAttribute("resources"); resourcesAttr.IsIterable() { + r := resourcesAttr.GetMetadata().Range() + values := resourcesAttr.AsStringValues().AsStrings() + builder.WithResources(values, r.GetStartLine(), r.GetEndLine()) + } + if notResourcesAttr := statementBlock.GetAttribute("not_resources"); notResourcesAttr.IsIterable() { + r := notResourcesAttr.GetMetadata().Range() + values := notResourcesAttr.AsStringValues().AsStrings() + builder.WithNotResources(values, r.GetStartLine(), r.GetEndLine()) + } + if effectAttr := statementBlock.GetAttribute("effect"); effectAttr.IsString() { + r := effectAttr.GetMetadata().Range() + builder.WithEffect(effectAttr.Value().AsString(), r.GetStartLine(), r.GetEndLine()) + } else { + builder.WithEffect(iamgo.EffectAllow) + } + + for _, principalBlock := range statementBlock.GetBlocks("principals") { + typeAttr := principalBlock.GetAttribute("type") + if !typeAttr.IsString() { + continue + } + identifiersAttr := principalBlock.GetAttribute("identifiers") + if !identifiersAttr.IsIterable() { + continue + } + r := principalBlock.GetMetadata().Range() + switch typeAttr.Value().AsString() { + case "*": + builder.WithAllPrincipals(true, r.GetStartLine(), r.GetEndLine()) + case "AWS": + values := identifiersAttr.AsStringValues().AsStrings() + builder.WithAWSPrincipals(values, r.GetStartLine(), r.GetEndLine()) + case "Federated": + values := identifiersAttr.AsStringValues().AsStrings() + builder.WithFederatedPrincipals(values, r.GetStartLine(), r.GetEndLine()) + case "Service": + values := identifiersAttr.AsStringValues().AsStrings() + builder.WithServicePrincipals(values, r.GetStartLine(), r.GetEndLine()) + case "CanonicalUser": + values := identifiersAttr.AsStringValues().AsStrings() + builder.WithCanonicalUsersPrincipals(values, r.GetStartLine(), r.GetEndLine()) + } + } + + for _, conditionBlock := range statementBlock.GetBlocks("condition") { + testAttr := conditionBlock.GetAttribute("test") + if !testAttr.IsString() { + continue + } + variableAttr := conditionBlock.GetAttribute("variable") + if !variableAttr.IsString() { + continue + } + valuesAttr := conditionBlock.GetAttribute("values") + values := valuesAttr.AsStringValues().AsStrings() + if valuesAttr.IsNil() || len(values) == 0 { + continue + } + + r := conditionBlock.GetMetadata().Range() + + builder.WithCondition( + testAttr.Value().AsString(), + variableAttr.Value().AsString(), + values, + r.GetStartLine(), + r.GetEndLine(), + ) + + } + return builder.Build() +} + +func findAllPolicies(modules terraform.Modules, parentBlock *terraform.Block, attr *terraform.Attribute) []wrappedDocument { + var documents []wrappedDocument + for _, ref := range attr.AllReferences() { + for _, b := range modules.GetBlocks() { + if b.Type() != "data" || b.TypeLabel() != "aws_iam_policy_document" { + continue + } + if ref.RefersTo(b.Reference()) { + document, err := ConvertTerraformDocument(modules, b) + if err != nil { + continue + } + documents = append(documents, *document) + continue + } + kref := *ref + kref.SetKey(parentBlock.Reference().RawKey()) + if kref.RefersTo(b.Reference()) { + document, err := ConvertTerraformDocument(modules, b) + if err != nil { + continue + } + documents = append(documents, *document) + } + } + } + return documents +} diff --git a/internal/adapters/terraform/aws/iam/groups.go b/internal/adapters/terraform/aws/iam/groups.go new file mode 100644 index 000000000000..5004824673e1 --- /dev/null +++ b/internal/adapters/terraform/aws/iam/groups.go @@ -0,0 +1,32 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func adaptGroups(modules terraform.Modules) []iam.Group { + var groups []iam.Group + + for _, groupBlock := range modules.GetResourcesByType("aws_iam_group") { + group := iam.Group{ + Metadata: groupBlock.GetMetadata(), + Name: groupBlock.GetAttribute("name").AsStringValueOrDefault("", groupBlock), + } + + if policy, ok := applyForDependentResource( + modules, groupBlock.ID(), "name", "aws_iam_group_policy", "group", findPolicy(modules), + ); ok && policy != nil { + group.Policies = append(group.Policies, *policy) + } + + if policy, ok := applyForDependentResource( + modules, groupBlock.ID(), "name", "aws_iam_group_policy_attachment", "group", findAttachmentPolicy(modules), + ); ok && policy != nil { + group.Policies = append(group.Policies, *policy) + } + + groups = append(groups, group) + } + return groups +} diff --git a/internal/adapters/terraform/aws/iam/groups_test.go b/internal/adapters/terraform/aws/iam/groups_test.go new file mode 100644 index 000000000000..e4d9af39277b --- /dev/null +++ b/internal/adapters/terraform/aws/iam/groups_test.go @@ -0,0 +1,115 @@ +package iam + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptGroups(t *testing.T) { + tests := []struct { + name string + terraform string + expected []iam.Group + }{ + { + name: "policy", + terraform: ` + resource "aws_iam_group_policy" "my_developer_policy" { + name = "my_developer_policy" + group = aws_iam_group.my_developers.name + + policy = < 0 { + orphanage := lambda.Function{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Tracing: lambda.Tracing{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Mode: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + Permissions: nil, + } + for _, permission := range orphanResources { + orphanage.Permissions = append(orphanage.Permissions, a.adaptPermission(permission)) + } + functions = append(functions, orphanage) + } + + return functions +} + +func (a *adapter) adaptFunction(function *terraform.Block, modules terraform.Modules, orphans terraform.ResourceIDResolutions) lambda.Function { + var permissions []lambda.Permission + for _, module := range modules { + for _, p := range module.GetResourcesByType("aws_lambda_permission") { + if referencedBlock, err := module.GetReferencedBlock(p.GetAttribute("function_name"), p); err == nil && referencedBlock == function { + permissions = append(permissions, a.adaptPermission(p)) + delete(orphans, p.ID()) + } + } + } + + return lambda.Function{ + Metadata: function.GetMetadata(), + Tracing: a.adaptTracing(function), + Permissions: permissions, + } +} + +func (a *adapter) adaptTracing(function *terraform.Block) lambda.Tracing { + if tracingConfig := function.GetBlock("tracing_config"); tracingConfig.IsNotNil() { + return lambda.Tracing{ + Metadata: tracingConfig.GetMetadata(), + Mode: tracingConfig.GetAttribute("mode").AsStringValueOrDefault("", tracingConfig), + } + } + + return lambda.Tracing{ + Metadata: function.GetMetadata(), + Mode: defsecTypes.StringDefault("", function.GetMetadata()), + } +} + +func (a *adapter) adaptPermission(permission *terraform.Block) lambda.Permission { + sourceARNAttr := permission.GetAttribute("source_arn") + sourceARN := sourceARNAttr.AsStringValueOrDefault("", permission) + + if len(sourceARNAttr.AllReferences()) > 0 { + sourceARN = defsecTypes.String(sourceARNAttr.AllReferences()[0].NameLabel(), sourceARNAttr.GetMetadata()) + } + + return lambda.Permission{ + Metadata: permission.GetMetadata(), + Principal: permission.GetAttribute("principal").AsStringValueOrDefault("", permission), + SourceARN: sourceARN, + } +} diff --git a/internal/adapters/terraform/aws/lambda/adapt_test.go b/internal/adapters/terraform/aws/lambda/adapt_test.go new file mode 100644 index 000000000000..7dd0afa43d46 --- /dev/null +++ b/internal/adapters/terraform/aws/lambda/adapt_test.go @@ -0,0 +1,155 @@ +package lambda + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/lambda" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected lambda.Lambda + }{ + { + name: "reference arn", + terraform: ` + resource "aws_lambda_function" "example" { + filename = "lambda_function_payload.zip" + function_name = "lambda_function_name" + role = aws_iam_role.iam_for_lambda.arn + runtime = "nodejs12.x" + + tracing_config { + mode = "Passthrough" + } + } + + resource "aws_lambda_permission" "example" { + statement_id = "AllowExecutionFromSNS" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.example.function_name + principal = "sns.amazonaws.com" + source_arn = aws_sns_topic.default.arn + } +`, + expected: lambda.Lambda{ + Functions: []lambda.Function{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Tracing: lambda.Tracing{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Mode: defsecTypes.String("Passthrough", defsecTypes.NewTestMisconfigMetadata()), + }, + Permissions: []lambda.Permission{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Principal: defsecTypes.String("sns.amazonaws.com", defsecTypes.NewTestMisconfigMetadata()), + SourceARN: defsecTypes.String("default", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + { + name: "defaults (with an orphan)", + terraform: ` + resource "aws_lambda_function" "example" { + tracing_config { + } + } + + resource "aws_lambda_permission" "example" { + } +`, + expected: lambda.Lambda{ + Functions: []lambda.Function{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Tracing: lambda.Tracing{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Mode: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Tracing: lambda.Tracing{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Mode: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + Permissions: []lambda.Permission{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Principal: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + SourceARN: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_lambda_function" "example" { + filename = "lambda_function_payload.zip" + function_name = "lambda_function_name" + role = aws_iam_role.iam_for_lambda.arn + runtime = "nodejs12.x" + + tracing_config { + mode = "Passthrough" + } + } + + resource "aws_lambda_permission" "example" { + statement_id = "AllowExecutionFromSNS" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.example.function_name + principal = "sns.amazonaws.com" + source_arn = "string arn" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Functions, 1) + function := adapted.Functions[0] + + assert.Equal(t, 2, function.Metadata.Range().GetStartLine()) + assert.Equal(t, 11, function.Metadata.Range().GetEndLine()) + + assert.Equal(t, 8, function.Tracing.Metadata.Range().GetStartLine()) + assert.Equal(t, 10, function.Tracing.Metadata.Range().GetEndLine()) + + assert.Equal(t, 9, function.Tracing.Mode.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 9, function.Tracing.Mode.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, function.Permissions[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 19, function.Permissions[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 17, function.Permissions[0].Principal.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 17, function.Permissions[0].Principal.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 18, function.Permissions[0].SourceARN.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 18, function.Permissions[0].SourceARN.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/mq/adapt.go b/internal/adapters/terraform/aws/mq/adapt.go new file mode 100644 index 000000000000..214fee302f58 --- /dev/null +++ b/internal/adapters/terraform/aws/mq/adapt.go @@ -0,0 +1,48 @@ +package mq + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/mq" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) mq.MQ { + return mq.MQ{ + Brokers: adaptBrokers(modules), + } +} + +func adaptBrokers(modules terraform.Modules) []mq.Broker { + var brokers []mq.Broker + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_mq_broker") { + brokers = append(brokers, adaptBroker(resource)) + } + } + return brokers +} + +func adaptBroker(resource *terraform.Block) mq.Broker { + + broker := mq.Broker{ + Metadata: resource.GetMetadata(), + PublicAccess: types.BoolDefault(false, resource.GetMetadata()), + Logging: mq.Logging{ + Metadata: resource.GetMetadata(), + General: types.BoolDefault(false, resource.GetMetadata()), + Audit: types.BoolDefault(false, resource.GetMetadata()), + }, + } + + publicAccessAttr := resource.GetAttribute("publicly_accessible") + broker.PublicAccess = publicAccessAttr.AsBoolValueOrDefault(false, resource) + if logsBlock := resource.GetBlock("logs"); logsBlock.IsNotNil() { + broker.Logging.Metadata = logsBlock.GetMetadata() + auditAttr := logsBlock.GetAttribute("audit") + broker.Logging.Audit = auditAttr.AsBoolValueOrDefault(false, logsBlock) + generalAttr := logsBlock.GetAttribute("general") + broker.Logging.General = generalAttr.AsBoolValueOrDefault(false, logsBlock) + } + + return broker +} diff --git a/internal/adapters/terraform/aws/mq/adapt_test.go b/internal/adapters/terraform/aws/mq/adapt_test.go new file mode 100644 index 000000000000..1aa7e872237f --- /dev/null +++ b/internal/adapters/terraform/aws/mq/adapt_test.go @@ -0,0 +1,119 @@ +package mq + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/mq" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptBroker(t *testing.T) { + tests := []struct { + name string + terraform string + expected mq.Broker + }{ + { + name: "audit logs", + terraform: ` + resource "aws_mq_broker" "example" { + logs { + audit = true + } + + publicly_accessible = false + } +`, + expected: mq.Broker{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + PublicAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Logging: mq.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + General: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Audit: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "general logs", + terraform: ` + resource "aws_mq_broker" "example" { + logs { + general = true + } + + publicly_accessible = true + } +`, + expected: mq.Broker{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + PublicAccess: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Logging: mq.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + General: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Audit: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_mq_broker" "example" { + } +`, + expected: mq.Broker{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + PublicAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Logging: mq.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + General: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Audit: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptBroker(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_mq_broker" "example" { + logs { + general = true + } + + publicly_accessible = true + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Brokers, 1) + broker := adapted.Brokers[0] + + assert.Equal(t, 2, broker.Metadata.Range().GetStartLine()) + assert.Equal(t, 8, broker.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, broker.Logging.Metadata.Range().GetStartLine()) + assert.Equal(t, 5, broker.Logging.Metadata.Range().GetEndLine()) + + assert.Equal(t, 4, broker.Logging.General.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, broker.Logging.General.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 7, broker.PublicAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, broker.PublicAccess.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/msk/adapt.go b/internal/adapters/terraform/aws/msk/adapt.go new file mode 100644 index 000000000000..71ae2707db57 --- /dev/null +++ b/internal/adapters/terraform/aws/msk/adapt.go @@ -0,0 +1,97 @@ +package msk + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/msk" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) msk.MSK { + return msk.MSK{ + Clusters: adaptClusters(modules), + } +} + +func adaptClusters(modules terraform.Modules) []msk.Cluster { + var clusters []msk.Cluster + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_msk_cluster") { + clusters = append(clusters, adaptCluster(resource)) + } + } + return clusters +} + +func adaptCluster(resource *terraform.Block) msk.Cluster { + cluster := msk.Cluster{ + Metadata: resource.GetMetadata(), + EncryptionInTransit: msk.EncryptionInTransit{ + Metadata: resource.GetMetadata(), + ClientBroker: defsecTypes.StringDefault("TLS_PLAINTEXT", resource.GetMetadata()), + }, + EncryptionAtRest: msk.EncryptionAtRest{ + Metadata: resource.GetMetadata(), + KMSKeyARN: defsecTypes.StringDefault("", resource.GetMetadata()), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + Logging: msk.Logging{ + Metadata: resource.GetMetadata(), + Broker: msk.BrokerLogging{ + Metadata: resource.GetMetadata(), + S3: msk.S3Logging{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + Cloudwatch: msk.CloudwatchLogging{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + Firehose: msk.FirehoseLogging{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + }, + }, + } + + if encryptBlock := resource.GetBlock("encryption_info"); encryptBlock.IsNotNil() { + if encryptionInTransitBlock := encryptBlock.GetBlock("encryption_in_transit"); encryptionInTransitBlock.IsNotNil() { + cluster.EncryptionInTransit.Metadata = encryptionInTransitBlock.GetMetadata() + if clientBrokerAttr := encryptionInTransitBlock.GetAttribute("client_broker"); clientBrokerAttr.IsNotNil() { + cluster.EncryptionInTransit.ClientBroker = clientBrokerAttr.AsStringValueOrDefault("TLS", encryptionInTransitBlock) + } + } + + if encryptionAtRestAttr := encryptBlock.GetAttribute("encryption_at_rest_kms_key_arn"); encryptionAtRestAttr.IsNotNil() { + cluster.EncryptionAtRest.Metadata = encryptionAtRestAttr.GetMetadata() + cluster.EncryptionAtRest.KMSKeyARN = encryptionAtRestAttr.AsStringValueOrDefault("", encryptBlock) + cluster.EncryptionAtRest.Enabled = defsecTypes.Bool(true, encryptionAtRestAttr.GetMetadata()) + } + } + + if logBlock := resource.GetBlock("logging_info"); logBlock.IsNotNil() { + cluster.Logging.Metadata = logBlock.GetMetadata() + if brokerLogsBlock := logBlock.GetBlock("broker_logs"); brokerLogsBlock.IsNotNil() { + cluster.Logging.Broker.Metadata = brokerLogsBlock.GetMetadata() + if brokerLogsBlock.HasChild("s3") { + if s3Block := brokerLogsBlock.GetBlock("s3"); s3Block.IsNotNil() { + s3enabledAttr := s3Block.GetAttribute("enabled") + cluster.Logging.Broker.S3.Metadata = s3Block.GetMetadata() + cluster.Logging.Broker.S3.Enabled = s3enabledAttr.AsBoolValueOrDefault(false, s3Block) + } + } + if cloudwatchBlock := brokerLogsBlock.GetBlock("cloudwatch_logs"); cloudwatchBlock.IsNotNil() { + cwEnabledAttr := cloudwatchBlock.GetAttribute("enabled") + cluster.Logging.Broker.Cloudwatch.Metadata = cloudwatchBlock.GetMetadata() + cluster.Logging.Broker.Cloudwatch.Enabled = cwEnabledAttr.AsBoolValueOrDefault(false, cloudwatchBlock) + } + if firehoseBlock := brokerLogsBlock.GetBlock("firehose"); firehoseBlock.IsNotNil() { + firehoseEnabledAttr := firehoseBlock.GetAttribute("enabled") + cluster.Logging.Broker.Firehose.Metadata = firehoseBlock.GetMetadata() + cluster.Logging.Broker.Firehose.Enabled = firehoseEnabledAttr.AsBoolValueOrDefault(false, firehoseBlock) + } + } + } + + return cluster +} diff --git a/internal/adapters/terraform/aws/msk/adapt_test.go b/internal/adapters/terraform/aws/msk/adapt_test.go new file mode 100644 index 000000000000..65ebaa2983f4 --- /dev/null +++ b/internal/adapters/terraform/aws/msk/adapt_test.go @@ -0,0 +1,200 @@ +package msk + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/msk" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptCluster(t *testing.T) { + tests := []struct { + name string + terraform string + expected msk.Cluster + }{ + { + name: "configured", + terraform: ` + resource "aws_msk_cluster" "example" { + cluster_name = "example" + + encryption_info { + encryption_in_transit { + client_broker = "TLS" + in_cluster = true + } + encryption_at_rest_kms_key_arn = "foo-bar-key" + } + + logging_info { + broker_logs { + cloudwatch_logs { + enabled = true + log_group = aws_cloudwatch_log_group.test.name + } + firehose { + enabled = true + delivery_stream = aws_kinesis_firehose_delivery_stream.test_stream.name + } + s3 { + enabled = true + bucket = aws_s3_bucket.bucket.id + prefix = "logs/msk-" + } + } + } + } +`, + expected: msk.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptionInTransit: msk.EncryptionInTransit{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ClientBroker: defsecTypes.String("TLS", defsecTypes.NewTestMisconfigMetadata()), + }, + EncryptionAtRest: msk.EncryptionAtRest{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + KMSKeyARN: defsecTypes.String("foo-bar-key", defsecTypes.NewTestMisconfigMetadata()), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + Logging: msk.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Broker: msk.BrokerLogging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + S3: msk.S3Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + Cloudwatch: msk.CloudwatchLogging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + Firehose: msk.FirehoseLogging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_msk_cluster" "example" { + } +`, + expected: msk.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptionInTransit: msk.EncryptionInTransit{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ClientBroker: defsecTypes.String("TLS_PLAINTEXT", defsecTypes.NewTestMisconfigMetadata()), + }, + Logging: msk.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Broker: msk.BrokerLogging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + S3: msk.S3Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + Cloudwatch: msk.CloudwatchLogging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + Firehose: msk.FirehoseLogging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptCluster(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_msk_cluster" "example" { + cluster_name = "example" + + encryption_info { + encryption_in_transit { + client_broker = "TLS" + in_cluster = true + } + encryption_at_rest_kms_key_arn = "foo-bar-key" + } + + logging_info { + broker_logs { + cloudwatch_logs { + enabled = true + log_group = aws_cloudwatch_log_group.test.name + } + firehose { + enabled = true + delivery_stream = aws_kinesis_firehose_delivery_stream.test_stream.name + } + s3 { + enabled = true + bucket = aws_s3_bucket.bucket.id + prefix = "logs/msk-" + } + } + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Clusters, 1) + cluster := adapted.Clusters[0] + + assert.Equal(t, 2, cluster.Metadata.Range().GetStartLine()) + assert.Equal(t, 30, cluster.Metadata.Range().GetEndLine()) + + assert.Equal(t, 6, cluster.EncryptionInTransit.Metadata.Range().GetStartLine()) + assert.Equal(t, 9, cluster.EncryptionInTransit.Metadata.Range().GetEndLine()) + + assert.Equal(t, 10, cluster.EncryptionAtRest.Metadata.Range().GetStartLine()) + assert.Equal(t, 10, cluster.EncryptionAtRest.Metadata.Range().GetEndLine()) + + assert.Equal(t, 13, cluster.Logging.Metadata.Range().GetStartLine()) + assert.Equal(t, 29, cluster.Logging.Metadata.Range().GetEndLine()) + + assert.Equal(t, 14, cluster.Logging.Broker.Metadata.Range().GetStartLine()) + assert.Equal(t, 28, cluster.Logging.Broker.Metadata.Range().GetEndLine()) + + assert.Equal(t, 15, cluster.Logging.Broker.Cloudwatch.Metadata.Range().GetStartLine()) + assert.Equal(t, 18, cluster.Logging.Broker.Cloudwatch.Metadata.Range().GetEndLine()) + + assert.Equal(t, 16, cluster.Logging.Broker.Cloudwatch.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 16, cluster.Logging.Broker.Cloudwatch.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 19, cluster.Logging.Broker.Firehose.Metadata.Range().GetStartLine()) + assert.Equal(t, 22, cluster.Logging.Broker.Firehose.Metadata.Range().GetEndLine()) + + assert.Equal(t, 20, cluster.Logging.Broker.Firehose.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 20, cluster.Logging.Broker.Firehose.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 23, cluster.Logging.Broker.S3.Metadata.Range().GetStartLine()) + assert.Equal(t, 27, cluster.Logging.Broker.S3.Metadata.Range().GetEndLine()) + + assert.Equal(t, 24, cluster.Logging.Broker.S3.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 24, cluster.Logging.Broker.S3.Enabled.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/neptune/adapt.go b/internal/adapters/terraform/aws/neptune/adapt.go new file mode 100644 index 000000000000..1e26aebb93c6 --- /dev/null +++ b/internal/adapters/terraform/aws/neptune/adapt.go @@ -0,0 +1,50 @@ +package neptune + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/neptune" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) neptune.Neptune { + return neptune.Neptune{ + Clusters: adaptClusters(modules), + } +} + +func adaptClusters(modules terraform.Modules) []neptune.Cluster { + var clusters []neptune.Cluster + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_neptune_cluster") { + clusters = append(clusters, adaptCluster(resource)) + } + } + return clusters +} + +func adaptCluster(resource *terraform.Block) neptune.Cluster { + cluster := neptune.Cluster{ + Metadata: resource.GetMetadata(), + Logging: neptune.Logging{ + Metadata: resource.GetMetadata(), + Audit: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + StorageEncrypted: defsecTypes.BoolDefault(false, resource.GetMetadata()), + KMSKeyID: defsecTypes.StringDefault("", resource.GetMetadata()), + } + + if enableLogExportsAttr := resource.GetAttribute("enable_cloudwatch_logs_exports"); enableLogExportsAttr.IsNotNil() { + cluster.Logging.Metadata = enableLogExportsAttr.GetMetadata() + if enableLogExportsAttr.Contains("audit") { + cluster.Logging.Audit = defsecTypes.Bool(true, enableLogExportsAttr.GetMetadata()) + } + } + + storageEncryptedAttr := resource.GetAttribute("storage_encrypted") + cluster.StorageEncrypted = storageEncryptedAttr.AsBoolValueOrDefault(false, resource) + + KMSKeyAttr := resource.GetAttribute("kms_key_arn") + cluster.KMSKeyID = KMSKeyAttr.AsStringValueOrDefault("", resource) + + return cluster +} diff --git a/internal/adapters/terraform/aws/neptune/adapt_test.go b/internal/adapters/terraform/aws/neptune/adapt_test.go new file mode 100644 index 000000000000..6a710b9108e7 --- /dev/null +++ b/internal/adapters/terraform/aws/neptune/adapt_test.go @@ -0,0 +1,97 @@ +package neptune + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/neptune" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptCluster(t *testing.T) { + tests := []struct { + name string + terraform string + expected neptune.Cluster + }{ + { + name: "configured", + terraform: ` + resource "aws_neptune_cluster" "example" { + enable_cloudwatch_logs_exports = ["audit"] + storage_encrypted = true + kms_key_arn = "kms-key" + } +`, + expected: neptune.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Logging: neptune.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Audit: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + StorageEncrypted: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("kms-key", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_neptune_cluster" "example" { + } +`, + expected: neptune.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Logging: neptune.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Audit: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + StorageEncrypted: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptCluster(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_neptune_cluster" "example" { + enable_cloudwatch_logs_exports = ["audit"] + storage_encrypted = true + kms_key_arn = "kms-key" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Clusters, 1) + cluster := adapted.Clusters[0] + + assert.Equal(t, 2, cluster.Metadata.Range().GetStartLine()) + assert.Equal(t, 6, cluster.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, cluster.Logging.Metadata.Range().GetStartLine()) + assert.Equal(t, 3, cluster.Logging.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, cluster.Logging.Audit.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, cluster.Logging.Audit.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, cluster.StorageEncrypted.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, cluster.StorageEncrypted.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 5, cluster.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, cluster.KMSKeyID.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/provider/adapt.go b/internal/adapters/terraform/aws/provider/adapt.go new file mode 100644 index 000000000000..38f8b4859af5 --- /dev/null +++ b/internal/adapters/terraform/aws/provider/adapt.go @@ -0,0 +1,166 @@ +package provider + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +const ( + defaultMaxRetires = 25 + defaultSharedConfigFile = "~/.aws/config" + //#nosec G101 -- False positive + defaultSharedCredentialsFile = "~/.aws/credentials" +) + +func Adapt(modules terraform.Modules) []aws.TerraformProvider { + return adaptProviders(modules) +} + +func adaptProviders(modules terraform.Modules) []aws.TerraformProvider { + var providers []aws.TerraformProvider + for _, providerBlock := range modules.GetBlocks().OfType("provider") { + if providerBlock.Label() == "aws" { + providers = append(providers, adaptProvider(providerBlock)) + } + } + + return providers +} + +func adaptProvider(b *terraform.Block) aws.TerraformProvider { + return aws.TerraformProvider{ + Metadata: b.GetMetadata(), + Alias: getStringAttrValue("alias", b), + Version: getStringAttrValue("version", b), + AccessKey: getStringAttrValue("access_key", b), + AllowedAccountsIDs: b.GetAttribute("allowed_account_ids").AsStringValueSliceOrEmpty(), + AssumeRole: adaptAssumeRole(b), + AssumeRoleWithWebIdentity: adaptAssumeRoleWithWebIdentity(b), + CustomCABundle: getStringAttrValue("custom_ca_bundle", b), + DefaultTags: adaptDefaultTags(b), + EC2MetadataServiceEndpoint: getStringAttrValue("ec2_metadata_service_endpoint", b), + EC2MetadataServiceEndpointMode: getStringAttrValue("ec2_metadata_service_endpoint_mode", b), + Endpoints: adaptEndpoints(b), + ForbiddenAccountIDs: b.GetAttribute("forbidden_account_ids").AsStringValueSliceOrEmpty(), + HttpProxy: getStringAttrValue("http_proxy", b), + IgnoreTags: adaptIgnoreTags(b), + Insecure: b.GetAttribute("insecure").AsBoolValueOrDefault(false, b), + MaxRetries: b.GetAttribute("max_retries").AsIntValueOrDefault(defaultMaxRetires, b), + Profile: getStringAttrValue("profile", b), + Region: getStringAttrValue("region", b), + RetryMode: getStringAttrValue("retry_mode", b), + S3UsePathStyle: b.GetAttribute("s3_use_path_style").AsBoolValueOrDefault(false, b), + S3USEast1RegionalEndpoint: getStringAttrValue("s3_us_east_1_regional_endpoint", b), + SecretKey: getStringAttrValue("secret_key", b), + SharedConfigFiles: b.GetAttribute("shared_config_files").AsStringValuesOrDefault(b, defaultSharedConfigFile), + SharedCredentialsFiles: b.GetAttribute("shared_credentials_files").AsStringValuesOrDefault(b, defaultSharedCredentialsFile), + SkipCredentialsValidation: b.GetAttribute("skip_credentials_validation").AsBoolValueOrDefault(false, b), + SkipMetadataAPICheck: b.GetAttribute("skip_metadata_api_check").AsBoolValueOrDefault(false, b), + SkipRegionValidation: b.GetAttribute("skip_region_validation").AsBoolValueOrDefault(false, b), + SkipRequestingAccountID: b.GetAttribute("skip_requesting_account_id").AsBoolValueOrDefault(false, b), + STSRegion: getStringAttrValue("sts_region", b), + Token: getStringAttrValue("token", b), + UseDualstackEndpoint: b.GetAttribute("use_dualstack_endpoint").AsBoolValueOrDefault(false, b), + UseFIPSEndpoint: b.GetAttribute("use_fips_endpoint").AsBoolValueOrDefault(false, b), + } +} + +func adaptAssumeRole(p *terraform.Block) aws.AssumeRole { + assumeRoleBlock := p.GetBlock("assume_role") + + if assumeRoleBlock.IsNil() { + return aws.AssumeRole{ + Metadata: p.GetMetadata(), + Duration: types.StringDefault("", p.GetMetadata()), + ExternalID: types.StringDefault("", p.GetMetadata()), + Policy: types.StringDefault("", p.GetMetadata()), + RoleARN: types.StringDefault("", p.GetMetadata()), + SessionName: types.StringDefault("", p.GetMetadata()), + SourceIdentity: types.StringDefault("", p.GetMetadata()), + } + } + + return aws.AssumeRole{ + Metadata: assumeRoleBlock.GetMetadata(), + Duration: getStringAttrValue("duration", p), + ExternalID: getStringAttrValue("external_id", p), + Policy: getStringAttrValue("policy", p), + PolicyARNs: p.GetAttribute("policy_arns").AsStringValueSliceOrEmpty(), + RoleARN: getStringAttrValue("role_arn", p), + SessionName: getStringAttrValue("session_name", p), + SourceIdentity: getStringAttrValue("source_identity", p), + Tags: p.GetAttribute("tags").AsMapValue(), + TransitiveTagKeys: p.GetAttribute("transitive_tag_keys").AsStringValueSliceOrEmpty(), + } +} + +func adaptAssumeRoleWithWebIdentity(p *terraform.Block) aws.AssumeRoleWithWebIdentity { + block := p.GetBlock("assume_role_with_web_identity") + if block.IsNil() { + return aws.AssumeRoleWithWebIdentity{ + Metadata: p.GetMetadata(), + Duration: types.StringDefault("", p.GetMetadata()), + Policy: types.StringDefault("", p.GetMetadata()), + RoleARN: types.StringDefault("", p.GetMetadata()), + SessionName: types.StringDefault("", p.GetMetadata()), + WebIdentityToken: types.StringDefault("", p.GetMetadata()), + WebIdentityTokenFile: types.StringDefault("", p.GetMetadata()), + } + } + + return aws.AssumeRoleWithWebIdentity{ + Metadata: block.GetMetadata(), + Duration: getStringAttrValue("duration", p), + Policy: getStringAttrValue("policy", p), + PolicyARNs: p.GetAttribute("policy_arns").AsStringValueSliceOrEmpty(), + RoleARN: getStringAttrValue("role_arn", p), + SessionName: getStringAttrValue("session_name", p), + WebIdentityToken: getStringAttrValue("web_identity_token", p), + WebIdentityTokenFile: getStringAttrValue("web_identity_token_file", p), + } +} + +func adaptEndpoints(p *terraform.Block) types.MapValue { + block := p.GetBlock("endpoints") + if block.IsNil() { + return types.MapDefault(make(map[string]string), p.GetMetadata()) + } + + values := make(map[string]string) + + for name, attr := range block.Attributes() { + values[name] = attr.AsStringValueOrDefault("", block).Value() + } + + return types.Map(values, block.GetMetadata()) +} + +func adaptDefaultTags(p *terraform.Block) aws.DefaultTags { + attr, _ := p.GetNestedAttribute("default_tags.tags") + if attr.IsNil() { + return aws.DefaultTags{} + } + + return aws.DefaultTags{ + Metadata: attr.GetMetadata(), + Tags: attr.AsMapValue(), + } +} + +func adaptIgnoreTags(p *terraform.Block) aws.IgnoreTags { + block := p.GetBlock("ignore_tags") + if block.IsNil() { + return aws.IgnoreTags{} + } + + return aws.IgnoreTags{ + Metadata: block.GetMetadata(), + Keys: block.GetAttribute("keys").AsStringValueSliceOrEmpty(), + KeyPrefixes: block.GetAttribute("key_prefixes").AsStringValueSliceOrEmpty(), + } +} + +func getStringAttrValue(name string, parent *terraform.Block) types.StringValue { + return parent.GetAttribute(name).AsStringValueOrDefault("", parent) +} diff --git a/internal/adapters/terraform/aws/provider/adapt_test.go b/internal/adapters/terraform/aws/provider/adapt_test.go new file mode 100644 index 000000000000..6f93f230d937 --- /dev/null +++ b/internal/adapters/terraform/aws/provider/adapt_test.go @@ -0,0 +1,129 @@ +package provider + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/aws" + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func TestAdapt(t *testing.T) { + tests := []struct { + name string + source string + expected []aws.TerraformProvider + }{ + { + name: "happy", + source: ` +variable "s3_use_path_style" { + default = true +} + +provider "aws" { + version = "~> 5.0" + region = "us-east-1" + profile = "localstack" + + access_key = "fake" + secret_key = "fake" + skip_credentials_validation = true + skip_metadata_api_check = true + skip_requesting_account_id = true + s3_use_path_style = var.s3_use_path_style + + endpoints { + dynamodb = "http://localhost:4566" + s3 = "http://localhost:4566" + } + + default_tags { + tags = { + Environment = "Local" + Name = "LocalStack" + } + } +}`, + expected: []aws.TerraformProvider{ + { + Version: types.String("~> 5.0", types.NewTestMisconfigMetadata()), + Region: types.String("us-east-1", types.NewTestMisconfigMetadata()), + DefaultTags: aws.DefaultTags{ + Metadata: types.NewTestMisconfigMetadata(), + Tags: types.Map(map[string]string{ + "Environment": "Local", + "Name": "LocalStack", + }, types.NewTestMisconfigMetadata()), + }, + Endpoints: types.Map(map[string]string{ + "dynamodb": "http://localhost:4566", + "s3": "http://localhost:4566", + }, types.NewTestMisconfigMetadata()), + Profile: types.String("localstack", types.NewTestMisconfigMetadata()), + AccessKey: types.String("fake", types.NewTestMisconfigMetadata()), + SecretKey: types.String("fake", types.NewTestMisconfigMetadata()), + SkipCredentialsValidation: types.Bool(true, types.NewTestMisconfigMetadata()), + SkipMetadataAPICheck: types.Bool(true, types.NewTestMisconfigMetadata()), + SkipRequestingAccountID: types.Bool(true, types.NewTestMisconfigMetadata()), + S3UsePathStyle: types.Bool(true, types.NewTestMisconfigMetadata()), + MaxRetries: types.IntDefault(defaultMaxRetires, types.NewTestMisconfigMetadata()), + SharedConfigFiles: types.StringValueList{ + types.StringDefault(defaultSharedConfigFile, types.NewTestMisconfigMetadata()), + }, + SharedCredentialsFiles: types.StringValueList{ + types.StringDefault(defaultSharedCredentialsFile, types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + { + name: "multiply provider configurations", + source: ` + +provider "aws" { + region = "us-east-1" +} + +provider "aws" { + alias = "west" + region = "us-west-2" +} +`, + expected: []aws.TerraformProvider{ + { + Region: types.String("us-east-1", types.NewTestMisconfigMetadata()), + Endpoints: types.Map(make(map[string]string), types.NewTestMisconfigMetadata()), + MaxRetries: types.IntDefault(defaultMaxRetires, types.NewTestMisconfigMetadata()), + SharedConfigFiles: types.StringValueList{ + types.StringDefault(defaultSharedConfigFile, types.NewTestMisconfigMetadata()), + }, + SharedCredentialsFiles: types.StringValueList{ + types.StringDefault(defaultSharedCredentialsFile, types.NewTestMisconfigMetadata()), + }, + }, + { + Alias: types.String("west", types.NewTestMisconfigMetadata()), + Region: types.String("us-west-2", types.NewTestMisconfigMetadata()), + Endpoints: types.Map(make(map[string]string), types.NewTestMisconfigMetadata()), + MaxRetries: types.IntDefault(defaultMaxRetires, types.NewTestMisconfigMetadata()), + SharedConfigFiles: types.StringValueList{ + types.StringDefault(defaultSharedConfigFile, types.NewTestMisconfigMetadata()), + }, + SharedCredentialsFiles: types.StringValueList{ + types.StringDefault(defaultSharedCredentialsFile, types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.source, ".tf") + testutil.AssertDefsecEqual(t, test.expected, Adapt(modules)) + }) + } +} diff --git a/internal/adapters/terraform/aws/rds/adapt.go b/internal/adapters/terraform/aws/rds/adapt.go new file mode 100644 index 000000000000..c05cd9abbd02 --- /dev/null +++ b/internal/adapters/terraform/aws/rds/adapt.go @@ -0,0 +1,256 @@ +package rds + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/rds" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) rds.RDS { + return rds.RDS{ + Instances: getInstances(modules), + Clusters: getClusters(modules), + Classic: getClassic(modules), + Snapshots: getSnapshots(modules), + ParameterGroups: getParameterGroups(modules), + } +} + +func getInstances(modules terraform.Modules) (instances []rds.Instance) { + for _, resource := range modules.GetResourcesByType("aws_db_instance") { + instances = append(instances, adaptInstance(resource, modules)) + } + + return instances +} + +func getParameterGroups(modules terraform.Modules) (parametergroups []rds.ParameterGroups) { + for _, resource := range modules.GetResourcesByType("aws_db_parameter_group") { + parametergroups = append(parametergroups, adaptDBParameterGroups(resource, modules)) + } + + return parametergroups +} + +func getSnapshots(modules terraform.Modules) (snapshots []rds.Snapshots) { + for _, resource := range modules.GetResourcesByType("aws_db_snapshot") { + snapshots = append(snapshots, adaptDBSnapshots(resource, modules)) + } + + return snapshots +} + +func getClusters(modules terraform.Modules) (clusters []rds.Cluster) { + + rdsInstanceMaps := modules.GetChildResourceIDMapByType("aws_rds_cluster_instance") + for _, resource := range modules.GetResourcesByType("aws_rds_cluster") { + cluster, instanceIDs := adaptCluster(resource, modules) + for _, id := range instanceIDs { + rdsInstanceMaps.Resolve(id) + } + clusters = append(clusters, cluster) + } + + orphanResources := modules.GetResourceByIDs(rdsInstanceMaps.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := rds.Cluster{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + BackupRetentionPeriodDays: defsecTypes.IntDefault(1, defsecTypes.NewUnmanagedMisconfigMetadata()), + ReplicationSourceARN: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Enabled: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + KMSKeyID: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + Instances: nil, + Encryption: rds.Encryption{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EncryptStorage: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + KMSKeyID: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + PublicAccess: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + Engine: defsecTypes.StringUnresolvable(defsecTypes.NewUnmanagedMisconfigMetadata()), + LatestRestorableTime: defsecTypes.TimeUnresolvable(defsecTypes.NewUnmanagedMisconfigMetadata()), + DeletionProtection: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + } + for _, orphan := range orphanResources { + orphanage.Instances = append(orphanage.Instances, adaptClusterInstance(orphan, modules)) + } + clusters = append(clusters, orphanage) + } + + return clusters +} + +func getClassic(modules terraform.Modules) rds.Classic { + classic := rds.Classic{ + DBSecurityGroups: nil, + } + for _, resource := range modules.GetResourcesByType("aws_db_security_group", "aws_redshift_security_group", "aws_elasticache_security_group") { + classic.DBSecurityGroups = append(classic.DBSecurityGroups, adaptClassicDBSecurityGroup(resource)) + } + return classic +} + +func adaptClusterInstance(resource *terraform.Block, modules terraform.Modules) rds.ClusterInstance { + clusterIdAttr := resource.GetAttribute("cluster_identifier") + clusterId := clusterIdAttr.AsStringValueOrDefault("", resource) + + if clusterIdAttr.IsResourceBlockReference("aws_rds_cluster") { + if referenced, err := modules.GetReferencedBlock(clusterIdAttr, resource); err == nil { + clusterId = defsecTypes.String(referenced.FullName(), referenced.GetMetadata()) + } + } + + return rds.ClusterInstance{ + ClusterIdentifier: clusterId, + Instance: adaptInstance(resource, modules), + } +} + +func adaptClassicDBSecurityGroup(resource *terraform.Block) rds.DBSecurityGroup { + return rds.DBSecurityGroup{ + Metadata: resource.GetMetadata(), + } +} + +func adaptInstance(resource *terraform.Block, modules terraform.Modules) rds.Instance { + + var ReadReplicaDBInstanceIdentifiers []defsecTypes.StringValue + rrdiAttr := resource.GetAttribute("replicate_source_db") + for _, rrdi := range rrdiAttr.AsStringValues() { + ReadReplicaDBInstanceIdentifiers = append(ReadReplicaDBInstanceIdentifiers, rrdi) + } + + var TagList []rds.TagList + tagres := resource.GetBlocks("tags") + for _, tagres := range tagres { + + TagList = append(TagList, rds.TagList{ + Metadata: tagres.GetMetadata(), + }) + } + + var EnabledCloudwatchLogsExports []defsecTypes.StringValue + ecweAttr := resource.GetAttribute("enabled_cloudwatch_logs_exports") + for _, ecwe := range ecweAttr.AsStringValues() { + EnabledCloudwatchLogsExports = append(EnabledCloudwatchLogsExports, ecwe) + } + + replicaSource := resource.GetAttribute("replicate_source_db") + replicaSourceValue := "" + if replicaSource.IsNotNil() { + if referenced, err := modules.GetReferencedBlock(replicaSource, resource); err == nil { + replicaSourceValue = referenced.ID() + } + } + return rds.Instance{ + Metadata: resource.GetMetadata(), + BackupRetentionPeriodDays: resource.GetAttribute("backup_retention_period").AsIntValueOrDefault(0, resource), + ReplicationSourceARN: defsecTypes.StringExplicit(replicaSourceValue, resource.GetMetadata()), + PerformanceInsights: adaptPerformanceInsights(resource), + Encryption: adaptEncryption(resource), + PublicAccess: resource.GetAttribute("publicly_accessible").AsBoolValueOrDefault(false, resource), + Engine: resource.GetAttribute("engine").AsStringValueOrDefault(rds.EngineAurora, resource), + IAMAuthEnabled: resource.GetAttribute("iam_database_authentication_enabled").AsBoolValueOrDefault(false, resource), + DeletionProtection: resource.GetAttribute("deletion_protection").AsBoolValueOrDefault(false, resource), + DBInstanceArn: resource.GetAttribute("arn").AsStringValueOrDefault("", resource), + StorageEncrypted: resource.GetAttribute("storage_encrypted").AsBoolValueOrDefault(true, resource), + DBInstanceIdentifier: resource.GetAttribute("identifier").AsStringValueOrDefault("", resource), + EngineVersion: resource.GetAttribute("engine_version").AsStringValueOrDefault("", resource), + AutoMinorVersionUpgrade: resource.GetAttribute("auto_minor_version_upgrade").AsBoolValueOrDefault(false, resource), + MultiAZ: resource.GetAttribute("multi_az").AsBoolValueOrDefault(false, resource), + PubliclyAccessible: resource.GetAttribute("publicly_accessible").AsBoolValueOrDefault(false, resource), + LatestRestorableTime: defsecTypes.TimeUnresolvable(resource.GetMetadata()), + ReadReplicaDBInstanceIdentifiers: ReadReplicaDBInstanceIdentifiers, + TagList: TagList, + EnabledCloudwatchLogsExports: EnabledCloudwatchLogsExports, + } +} + +func adaptDBParameterGroups(resource *terraform.Block, modules terraform.Modules) rds.ParameterGroups { + + var Parameters []rds.Parameters + paramres := resource.GetBlocks("parameter") + for _, paramres := range paramres { + + Parameters = append(Parameters, rds.Parameters{ + Metadata: paramres.GetMetadata(), + ParameterName: defsecTypes.StringDefault("", paramres.GetMetadata()), + ParameterValue: defsecTypes.StringDefault("", paramres.GetMetadata()), + }) + } + + return rds.ParameterGroups{ + Metadata: resource.GetMetadata(), + DBParameterGroupName: resource.GetAttribute("name").AsStringValueOrDefault("", resource), + DBParameterGroupFamily: resource.GetAttribute("family").AsStringValueOrDefault("", resource), + Parameters: Parameters, + } +} + +func adaptDBSnapshots(resource *terraform.Block, modules terraform.Modules) rds.Snapshots { + + return rds.Snapshots{ + Metadata: resource.GetMetadata(), + DBSnapshotIdentifier: resource.GetAttribute("db_snapshot_identifier").AsStringValueOrDefault("", resource), + DBSnapshotArn: resource.GetAttribute("db_snapshot_arn").AsStringValueOrDefault("", resource), + Encrypted: resource.GetAttribute("encrypted").AsBoolValueOrDefault(true, resource), + KmsKeyId: resource.GetAttribute("kms_key_id").AsStringValueOrDefault("", resource), + SnapshotAttributes: nil, + } +} + +func adaptCluster(resource *terraform.Block, modules terraform.Modules) (rds.Cluster, []string) { + + clusterInstances, ids := getClusterInstances(resource, modules) + + var public bool + for _, instance := range clusterInstances { + if instance.PublicAccess.IsTrue() { + public = true + break + } + } + + return rds.Cluster{ + Metadata: resource.GetMetadata(), + BackupRetentionPeriodDays: resource.GetAttribute("backup_retention_period").AsIntValueOrDefault(1, resource), + ReplicationSourceARN: resource.GetAttribute("replication_source_identifier").AsStringValueOrDefault("", resource), + PerformanceInsights: adaptPerformanceInsights(resource), + Instances: clusterInstances, + Encryption: adaptEncryption(resource), + PublicAccess: defsecTypes.Bool(public, resource.GetMetadata()), + Engine: resource.GetAttribute("engine").AsStringValueOrDefault(rds.EngineAurora, resource), + LatestRestorableTime: defsecTypes.TimeUnresolvable(resource.GetMetadata()), + AvailabilityZones: resource.GetAttribute("availability_zones").AsStringValueSliceOrEmpty(), + DeletionProtection: resource.GetAttribute("deletion_protection").AsBoolValueOrDefault(false, resource), + }, ids +} + +func getClusterInstances(resource *terraform.Block, modules terraform.Modules) (clusterInstances []rds.ClusterInstance, instanceIDs []string) { + clusterInstanceResources := modules.GetReferencingResources(resource, "aws_rds_cluster_instance", "cluster_identifier") + + for _, ciResource := range clusterInstanceResources { + instanceIDs = append(instanceIDs, ciResource.ID()) + clusterInstances = append(clusterInstances, adaptClusterInstance(ciResource, modules)) + } + return clusterInstances, instanceIDs +} + +func adaptPerformanceInsights(resource *terraform.Block) rds.PerformanceInsights { + return rds.PerformanceInsights{ + Metadata: resource.GetMetadata(), + Enabled: resource.GetAttribute("performance_insights_enabled").AsBoolValueOrDefault(false, resource), + KMSKeyID: resource.GetAttribute("performance_insights_kms_key_id").AsStringValueOrDefault("", resource), + } +} + +func adaptEncryption(resource *terraform.Block) rds.Encryption { + return rds.Encryption{ + Metadata: resource.GetMetadata(), + EncryptStorage: resource.GetAttribute("storage_encrypted").AsBoolValueOrDefault(false, resource), + KMSKeyID: resource.GetAttribute("kms_key_id").AsStringValueOrDefault("", resource), + } +} diff --git a/internal/adapters/terraform/aws/rds/adapt_test.go b/internal/adapters/terraform/aws/rds/adapt_test.go new file mode 100644 index 000000000000..bf55a98536d9 --- /dev/null +++ b/internal/adapters/terraform/aws/rds/adapt_test.go @@ -0,0 +1,332 @@ +package rds + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/pkg/providers/aws/rds" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected rds.RDS + }{ + { + name: "defined", + terraform: ` + + resource "aws_rds_cluster" "example" { + engine = "aurora-mysql" + availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] + backup_retention_period = 7 + kms_key_id = "kms_key_1" + storage_encrypted = true + replication_source_identifier = "arn-of-a-source-db-cluster" + deletion_protection = true + } + + resource "aws_rds_cluster_instance" "example" { + cluster_identifier = aws_rds_cluster.example.id + name = "bar" + performance_insights_enabled = true + performance_insights_kms_key_id = "performance_key_0" + kms_key_id = "kms_key_0" + storage_encrypted = true + } + + resource "aws_db_security_group" "example" { + # ... + } + + resource "aws_db_instance" "example" { + publicly_accessible = false + backup_retention_period = 5 + skip_final_snapshot = true + performance_insights_enabled = true + performance_insights_kms_key_id = "performance_key_1" + storage_encrypted = true + kms_key_id = "kms_key_2" + } +`, + expected: rds.RDS{ + Instances: []rds.Instance{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + BackupRetentionPeriodDays: defsecTypes.Int(5, defsecTypes.NewTestMisconfigMetadata()), + ReplicationSourceARN: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("performance_key_1", defsecTypes.NewTestMisconfigMetadata()), + }, + Encryption: rds.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptStorage: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("kms_key_2", defsecTypes.NewTestMisconfigMetadata()), + }, + PublicAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Engine: defsecTypes.String(rds.EngineAurora, defsecTypes.NewTestMisconfigMetadata()), + StorageEncrypted: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Clusters: []rds.Cluster{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + BackupRetentionPeriodDays: defsecTypes.Int(7, defsecTypes.NewTestMisconfigMetadata()), + ReplicationSourceARN: defsecTypes.String("arn-of-a-source-db-cluster", defsecTypes.NewTestMisconfigMetadata()), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + Encryption: rds.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptStorage: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("kms_key_1", defsecTypes.NewTestMisconfigMetadata()), + }, + Instances: []rds.ClusterInstance{ + { + Instance: rds.Instance{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + BackupRetentionPeriodDays: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + ReplicationSourceARN: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("performance_key_0", defsecTypes.NewTestMisconfigMetadata()), + }, + Encryption: rds.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptStorage: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("kms_key_0", defsecTypes.NewTestMisconfigMetadata()), + }, + PublicAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Engine: defsecTypes.String(rds.EngineAurora, defsecTypes.NewTestMisconfigMetadata()), + StorageEncrypted: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + ClusterIdentifier: defsecTypes.String("aws_rds_cluster.example", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + PublicAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Engine: defsecTypes.String(rds.EngineAuroraMysql, defsecTypes.NewTestMisconfigMetadata()), + AvailabilityZones: defsecTypes.StringValueList{ + defsecTypes.String("us-west-2a", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("us-west-2b", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("us-west-2c", defsecTypes.NewTestMisconfigMetadata()), + }, + DeletionProtection: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Classic: rds.Classic{ + DBSecurityGroups: []rds.DBSecurityGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptInstance(t *testing.T) { + tests := []struct { + name string + terraform string + expected rds.Instance + }{ + { + name: "instance defaults", + terraform: ` + resource "aws_db_instance" "example" { + } +`, + expected: rds.Instance{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + BackupRetentionPeriodDays: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + ReplicationSourceARN: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + Encryption: rds.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptStorage: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + PublicAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Engine: defsecTypes.String(rds.EngineAurora, defsecTypes.NewTestMisconfigMetadata()), + StorageEncrypted: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + IAMAuthEnabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptInstance(modules.GetBlocks()[0], modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptCluster(t *testing.T) { + tests := []struct { + name string + terraform string + expected rds.Cluster + }{ + { + name: "cluster defaults", + terraform: ` + resource "aws_rds_cluster" "example" { + } +`, + expected: rds.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + BackupRetentionPeriodDays: defsecTypes.Int(1, defsecTypes.NewTestMisconfigMetadata()), + ReplicationSourceARN: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + PerformanceInsights: rds.PerformanceInsights{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + Encryption: rds.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EncryptStorage: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + PublicAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Engine: defsecTypes.String(rds.EngineAurora, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted, _ := adaptCluster(modules.GetBlocks()[0], modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_rds_cluster" "example" { + backup_retention_period = 7 + kms_key_id = "kms_key_1" + storage_encrypted = true + replication_source_identifier = "arn-of-a-source-db-cluster" + } + + resource "aws_rds_cluster_instance" "example" { + cluster_identifier = aws_rds_cluster.example.id + backup_retention_period = 7 + performance_insights_enabled = true + performance_insights_kms_key_id = "performance_key" + storage_encrypted = true + kms_key_id = "kms_key_0" + } + + resource "aws_db_security_group" "example" { + } + + resource "aws_db_instance" "example" { + publicly_accessible = false + backup_retention_period = 7 + performance_insights_enabled = true + performance_insights_kms_key_id = "performance_key" + storage_encrypted = true + kms_key_id = "kms_key_0" + } +` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Clusters, 1) + require.Len(t, adapted.Instances, 1) + + cluster := adapted.Clusters[0] + instance := adapted.Instances[0] + classic := adapted.Classic + + assert.Equal(t, 2, cluster.Metadata.Range().GetStartLine()) + assert.Equal(t, 7, cluster.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, cluster.BackupRetentionPeriodDays.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, cluster.BackupRetentionPeriodDays.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, cluster.Encryption.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, cluster.Encryption.KMSKeyID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 5, cluster.Encryption.EncryptStorage.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, cluster.Encryption.EncryptStorage.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, cluster.ReplicationSourceARN.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, cluster.ReplicationSourceARN.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 9, cluster.Instances[0].Instance.Metadata.Range().GetStartLine()) + assert.Equal(t, 16, cluster.Instances[0].Instance.Metadata.Range().GetEndLine()) + + assert.Equal(t, 2, cluster.Instances[0].ClusterIdentifier.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, cluster.Instances[0].ClusterIdentifier.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, cluster.Instances[0].Instance.BackupRetentionPeriodDays.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, cluster.Instances[0].Instance.BackupRetentionPeriodDays.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 12, cluster.Instances[0].Instance.PerformanceInsights.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 12, cluster.Instances[0].Instance.PerformanceInsights.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, cluster.Instances[0].Instance.PerformanceInsights.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 13, cluster.Instances[0].Instance.PerformanceInsights.KMSKeyID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 14, cluster.Instances[0].Instance.Encryption.EncryptStorage.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 14, cluster.Instances[0].Instance.Encryption.EncryptStorage.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 15, cluster.Instances[0].Instance.Encryption.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 15, cluster.Instances[0].Instance.Encryption.KMSKeyID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 18, classic.DBSecurityGroups[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 19, classic.DBSecurityGroups[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 21, instance.Metadata.Range().GetStartLine()) + assert.Equal(t, 28, instance.Metadata.Range().GetEndLine()) + + assert.Equal(t, 22, instance.PublicAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 22, instance.PublicAccess.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 23, instance.BackupRetentionPeriodDays.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 23, instance.BackupRetentionPeriodDays.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 24, instance.PerformanceInsights.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 24, instance.PerformanceInsights.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 25, instance.PerformanceInsights.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 25, instance.PerformanceInsights.KMSKeyID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 26, instance.Encryption.EncryptStorage.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 26, instance.Encryption.EncryptStorage.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 27, instance.Encryption.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 27, instance.Encryption.KMSKeyID.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/redshift/adapt.go b/internal/adapters/terraform/aws/redshift/adapt.go new file mode 100644 index 000000000000..41567c6f3a37 --- /dev/null +++ b/internal/adapters/terraform/aws/redshift/adapt.go @@ -0,0 +1,117 @@ +package redshift + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/redshift" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) redshift.Redshift { + return redshift.Redshift{ + Clusters: adaptClusters(modules), + SecurityGroups: adaptSecurityGroups(modules), + ClusterParameters: adaptParameters(modules), + ReservedNodes: nil, + } +} + +func adaptClusters(modules terraform.Modules) []redshift.Cluster { + var clusters []redshift.Cluster + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_redshift_cluster") { + clusters = append(clusters, adaptCluster(resource, module)) + } + } + return clusters +} + +func adaptSecurityGroups(modules terraform.Modules) []redshift.SecurityGroup { + var securityGroups []redshift.SecurityGroup + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_redshift_security_group") { + securityGroups = append(securityGroups, adaptSecurityGroup(resource)) + } + } + return securityGroups +} + +func adaptParameters(modules terraform.Modules) []redshift.ClusterParameter { + var Parameters []redshift.ClusterParameter + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_redshift_parameter_group") { + for _, r := range resource.GetBlocks("parameter") { + Parameters = append(Parameters, adaptParameter(r)) + } + } + } + return Parameters +} + +func adaptCluster(resource *terraform.Block, module *terraform.Module) redshift.Cluster { + cluster := redshift.Cluster{ + Metadata: resource.GetMetadata(), + ClusterIdentifier: resource.GetAttribute("cluster_identifier").AsStringValueOrDefault("", resource), + NodeType: resource.GetAttribute("node_type").AsStringValueOrDefault("", resource), + MasterUsername: resource.GetAttribute("master_username").AsStringValueOrDefault("", resource), + NumberOfNodes: resource.GetAttribute("number_of_nodes").AsIntValueOrDefault(1, resource), + PubliclyAccessible: resource.GetAttribute("publicly_accessible").AsBoolValueOrDefault(true, resource), + LoggingEnabled: defsecTypes.Bool(false, resource.GetMetadata()), + AutomatedSnapshotRetentionPeriod: defsecTypes.Int(0, resource.GetMetadata()), + AllowVersionUpgrade: resource.GetAttribute("allow_version_upgrade").AsBoolValueOrDefault(true, resource), + VpcId: defsecTypes.String("", resource.GetMetadata()), + Encryption: redshift.Encryption{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + KMSKeyID: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + EndPoint: redshift.EndPoint{ + Metadata: resource.GetMetadata(), + Port: resource.GetAttribute("port").AsIntValueOrDefault(5439, resource), + }, + SubnetGroupName: defsecTypes.StringDefault("", resource.GetMetadata()), + } + + encryptedAttr := resource.GetAttribute("encrypted") + cluster.Encryption.Enabled = encryptedAttr.AsBoolValueOrDefault(false, resource) + + if logBlock := resource.GetBlock("logging"); logBlock.IsNotNil() { + cluster.LoggingEnabled = logBlock.GetAttribute("enable").AsBoolValueOrDefault(false, logBlock) + } + + if snapBlock := resource.GetBlock("snapshot_copy"); snapBlock.IsNotNil() { + snapAttr := snapBlock.GetAttribute("retention_period") + cluster.AutomatedSnapshotRetentionPeriod = snapAttr.AsIntValueOrDefault(7, snapBlock) + } + + KMSKeyIDAttr := resource.GetAttribute("kms_key_id") + cluster.Encryption.KMSKeyID = KMSKeyIDAttr.AsStringValueOrDefault("", resource) + if KMSKeyIDAttr.IsResourceBlockReference("aws_kms_key") { + if kmsKeyBlock, err := module.GetReferencedBlock(KMSKeyIDAttr, resource); err == nil { + cluster.Encryption.KMSKeyID = defsecTypes.String(kmsKeyBlock.FullName(), kmsKeyBlock.GetMetadata()) + } + } + + subnetGroupNameAttr := resource.GetAttribute("cluster_subnet_group_name") + cluster.SubnetGroupName = subnetGroupNameAttr.AsStringValueOrDefault("", resource) + + return cluster +} + +func adaptSecurityGroup(resource *terraform.Block) redshift.SecurityGroup { + descriptionAttr := resource.GetAttribute("description") + descriptionVal := descriptionAttr.AsStringValueOrDefault("Managed by Terraform", resource) + + return redshift.SecurityGroup{ + Metadata: resource.GetMetadata(), + Description: descriptionVal, + } +} + +func adaptParameter(resource *terraform.Block) redshift.ClusterParameter { + + return redshift.ClusterParameter{ + Metadata: resource.GetMetadata(), + ParameterName: resource.GetAttribute("name").AsStringValueOrDefault("", resource), + ParameterValue: resource.GetAttribute("value").AsStringValueOrDefault("", resource), + } +} diff --git a/internal/adapters/terraform/aws/redshift/adapt_test.go b/internal/adapters/terraform/aws/redshift/adapt_test.go new file mode 100644 index 000000000000..29b909093c41 --- /dev/null +++ b/internal/adapters/terraform/aws/redshift/adapt_test.go @@ -0,0 +1,230 @@ +package redshift + +import ( + "fmt" + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/redshift" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected redshift.Redshift + }{ + { + name: "reference key id", + terraform: ` + resource "aws_kms_key" "redshift" { + enable_key_rotation = true + } + + resource "aws_redshift_cluster" "example" { + cluster_identifier = "tf-redshift-cluster" + publicly_accessible = false + number_of_nodes = 1 + allow_version_upgrade = false + port = 5440 + encrypted = true + kms_key_id = aws_kms_key.redshift.key_id + cluster_subnet_group_name = "redshift_subnet" + } + + resource "aws_redshift_security_group" "default" { + name = "redshift-sg" + description = "some description" + } +`, + expected: redshift.Redshift{ + Clusters: []redshift.Cluster{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ClusterIdentifier: defsecTypes.String("tf-redshift-cluster", defsecTypes.NewTestMisconfigMetadata()), + PubliclyAccessible: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + NumberOfNodes: defsecTypes.Int(1, defsecTypes.NewTestMisconfigMetadata()), + AllowVersionUpgrade: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + EndPoint: redshift.EndPoint{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Port: defsecTypes.Int(5440, defsecTypes.NewTestMisconfigMetadata()), + }, + Encryption: redshift.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("aws_kms_key.redshift", defsecTypes.NewTestMisconfigMetadata()), + }, + SubnetGroupName: defsecTypes.String("redshift_subnet", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + SecurityGroups: []redshift.SecurityGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("some description", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + fmt.Println(adapted.SecurityGroups[0].Description.Value()) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptCluster(t *testing.T) { + tests := []struct { + name string + terraform string + expected redshift.Cluster + }{ + { + name: "key as string", + terraform: ` + resource "aws_redshift_cluster" "example" { + cluster_identifier = "tf-redshift-cluster" + publicly_accessible = false + number_of_nodes = 1 + allow_version_upgrade = false + port = 5440 + encrypted = true + kms_key_id = "key-id" + cluster_subnet_group_name = "redshift_subnet" + } +`, + expected: redshift.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ClusterIdentifier: defsecTypes.String("tf-redshift-cluster", defsecTypes.NewTestMisconfigMetadata()), + PubliclyAccessible: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + NumberOfNodes: defsecTypes.Int(1, defsecTypes.NewTestMisconfigMetadata()), + AllowVersionUpgrade: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + EndPoint: redshift.EndPoint{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Port: defsecTypes.Int(5440, defsecTypes.NewTestMisconfigMetadata()), + }, + Encryption: redshift.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("key-id", defsecTypes.NewTestMisconfigMetadata()), + }, + SubnetGroupName: defsecTypes.String("redshift_subnet", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "aws_redshift_cluster" "example" { + } +`, + expected: redshift.Cluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ClusterIdentifier: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + PubliclyAccessible: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + NumberOfNodes: defsecTypes.Int(1, defsecTypes.NewTestMisconfigMetadata()), + AllowVersionUpgrade: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + EndPoint: redshift.EndPoint{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Port: defsecTypes.Int(5439, defsecTypes.NewTestMisconfigMetadata()), + }, + Encryption: redshift.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + SubnetGroupName: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptCluster(modules.GetBlocks()[0], modules[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptSecurityGroup(t *testing.T) { + tests := []struct { + name string + terraform string + expected redshift.SecurityGroup + }{ + { + name: "defaults", + terraform: ` +resource "" "example" { +} +`, + expected: redshift.SecurityGroup{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("Managed by Terraform", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptSecurityGroup(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_kms_key" "redshift" { + enable_key_rotation = true + } + + resource "aws_redshift_cluster" "example" { + cluster_identifier = "tf-redshift-cluster" + encrypted = true + kms_key_id = aws_kms_key.redshift.key_id + cluster_subnet_group_name = "subnet name" + } + + resource "aws_redshift_security_group" "default" { + name = "redshift-sg" + description = "some description" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Clusters, 1) + require.Len(t, adapted.SecurityGroups, 1) + cluster := adapted.Clusters[0] + securityGroup := adapted.SecurityGroups[0] + + assert.Equal(t, 6, cluster.Metadata.Range().GetStartLine()) + assert.Equal(t, 11, cluster.Metadata.Range().GetEndLine()) + + assert.Equal(t, 8, cluster.Encryption.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 8, cluster.Encryption.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 2, cluster.Encryption.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, cluster.Encryption.KMSKeyID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 10, cluster.SubnetGroupName.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, cluster.SubnetGroupName.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, securityGroup.Metadata.Range().GetStartLine()) + assert.Equal(t, 16, securityGroup.Metadata.Range().GetEndLine()) + + assert.Equal(t, 15, securityGroup.Description.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 15, securityGroup.Description.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/s3/adapt.go b/internal/adapters/terraform/aws/s3/adapt.go new file mode 100644 index 000000000000..62a52ad8e63d --- /dev/null +++ b/internal/adapters/terraform/aws/s3/adapt.go @@ -0,0 +1,18 @@ +package s3 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/s3" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) s3.S3 { + + a := &adapter{ + modules: modules, + bucketMap: make(map[string]*s3.Bucket), + } + + return s3.S3{ + Buckets: a.adaptBuckets(), + } +} diff --git a/internal/adapters/terraform/aws/s3/adapt_test.go b/internal/adapters/terraform/aws/s3/adapt_test.go new file mode 100644 index 000000000000..fc793d897418 --- /dev/null +++ b/internal/adapters/terraform/aws/s3/adapt_test.go @@ -0,0 +1,385 @@ +package s3 + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/s3" + "github.com/aquasecurity/trivy/test/testutil" + "github.com/liamg/iamgo" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_PublicAccessBlock(t *testing.T) { + testCases := []struct { + desc string + source string + expectedBuckets int + hasPublicAccess bool + }{ + { + desc: "public access block is found when using the bucket name as the lookup", + source: ` +resource "aws_s3_bucket" "example" { + bucket = "bucketname" +} + +resource "aws_s3_bucket_public_access_block" "example_access_block"{ + bucket = "bucketname" +} +`, + expectedBuckets: 1, + hasPublicAccess: true, + }, + { + desc: "public access block is found when using the bucket name as the lookup", + source: ` +resource "aws_s3_bucket" "example" { + bucket = "bucketname" +} + +resource "aws_s3_bucket_public_access_block" "example_access_block"{ + bucket = aws_s3_bucket.example.id +} +`, + expectedBuckets: 1, + hasPublicAccess: true, + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + + modules := tftestutil.CreateModulesFromSource(t, tC.source, ".tf") + s3Ctx := Adapt(modules) + + assert.Equal(t, tC.expectedBuckets, len(s3Ctx.Buckets)) + + for _, bucket := range s3Ctx.Buckets { + if tC.hasPublicAccess { + assert.NotNil(t, bucket.PublicAccessBlock) + } else { + assert.Nil(t, bucket.PublicAccessBlock) + } + } + + bucket := s3Ctx.Buckets[0] + assert.NotNil(t, bucket.PublicAccessBlock) + + }) + } + +} + +func Test_PublicAccessDoesNotReference(t *testing.T) { + testCases := []struct { + desc string + source string + }{ + { + desc: "just a bucket, no public access block", + source: ` +resource "aws_s3_bucket" "example" { + bucket = "bucketname" +} + `, + }, + { + desc: "bucket with unrelated public access block", + source: ` +resource "aws_s3_bucket" "example" { + bucket = "bucketname" +} + +resource "aws_s3_bucket_public_access_block" "example_access_block"{ + bucket = aws_s3_bucket.other.id +} + `, + }, + { + desc: "bucket with unrelated public access block via name", + source: ` +resource "aws_s3_bucket" "example" { + bucket = "bucketname" +} + +resource "aws_s3_bucket_public_access_block" "example_access_block"{ + bucket = "something" +} + `, + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, tC.source, ".tf") + s3Ctx := Adapt(modules) + require.Len(t, s3Ctx.Buckets, 1) + assert.Nil(t, s3Ctx.Buckets[0].PublicAccessBlock) + + }) + } +} + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected s3.S3 + }{ + { + name: "basic", + terraform: ` + resource "aws_s3_bucket" "example" { + bucket = "bucket" + } + + resource "aws_s3_bucket_public_access_block" "example" { + bucket = aws_s3_bucket.example.id + + restrict_public_buckets = true + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + + } + + resource "aws_s3_bucket_acl" "example" { + bucket = aws_s3_bucket.example.id + acl = "private" + } + + resource "aws_s3_bucket_server_side_encryption_configuration" "example" { + bucket = aws_s3_bucket.example.bucket + + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = "string-key" + sse_algorithm = "aws:kms" + } + } + } + + resource "aws_s3_bucket_logging" "example" { + bucket = aws_s3_bucket.example.id + + target_bucket = aws_s3_bucket.example.id + target_prefix = "log/" + } + + resource "aws_s3_bucket_versioning" "versioning_example" { + bucket = aws_s3_bucket.example.id + versioning_configuration { + status = "Enabled" + mfa_delete = "Enabled" + } + } + + resource "aws_s3_bucket_policy" "allow_access_from_another_account" { + bucket = aws_s3_bucket.example.bucket + policy = data.aws_iam_policy_document.allow_access_from_another_account.json + } + + data "aws_iam_policy_document" "allow_access_from_another_account" { + statement { + + actions = [ + "s3:GetObject", + "s3:ListBucket", + ] + + resources = [ + "arn:aws:s3:::*", + ] + } + } + `, + expected: s3.S3{ + Buckets: []s3.Bucket{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("bucket", defsecTypes.NewTestMisconfigMetadata()), + PublicAccessBlock: &s3.PublicAccessBlock{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + BlockPublicACLs: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + BlockPublicPolicy: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + IgnorePublicACLs: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + RestrictPublicBuckets: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + BucketPolicies: []iam.Policy{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Document: func() iam.Document { + + builder := iamgo.NewPolicyBuilder() + + sb := iamgo.NewStatementBuilder() + sb.WithEffect(iamgo.EffectAllow) + sb.WithActions([]string{"s3:GetObject", "s3:ListBucket"}) + sb.WithResources([]string{"arn:aws:s3:::*"}) + + builder.WithStatement(sb.Build()) + + return iam.Document{ + Parsed: builder.Build(), + Metadata: defsecTypes.NewTestMisconfigMetadata(), + IsOffset: true, + HasRefs: false, + } + }(), + Builtin: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Encryption: s3.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Algorithm: defsecTypes.String("aws:kms", defsecTypes.NewTestMisconfigMetadata()), + KMSKeyId: defsecTypes.String("string-key", defsecTypes.NewTestMisconfigMetadata()), + }, + Versioning: s3.Versioning{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + MFADelete: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + Logging: s3.Logging{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + TargetBucket: defsecTypes.String("aws_s3_bucket.example", defsecTypes.NewTestMisconfigMetadata()), + }, + ACL: defsecTypes.String("private", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_s3_bucket" "example" { + bucket = "bucket" + } + + resource "aws_s3_bucket_public_access_block" "example" { + bucket = aws_s3_bucket.example.id + + restrict_public_buckets = true + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + } + + resource "aws_s3_bucket_acl" "example" { + bucket = aws_s3_bucket.example.id + acl = "private" + } + + resource "aws_s3_bucket_server_side_encryption_configuration" "example" { + bucket = aws_s3_bucket.example.bucket + + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = "string-key" + sse_algorithm = "aws:kms" + } + } + } + + resource "aws_s3_bucket_logging" "example" { + bucket = aws_s3_bucket.example.id + + target_bucket = aws_s3_bucket.example.id + target_prefix = "log/" + } + + resource "aws_s3_bucket_versioning" "versioning_example" { + bucket = aws_s3_bucket.example.id + versioning_configuration { + status = "Enabled" + } + } + + resource "aws_s3_bucket_policy" "allow_access_from_another_account" { + bucket = aws_s3_bucket.example.bucket + policy = data.aws_iam_policy_document.allow_access_from_another_account.json + } + + data "aws_iam_policy_document" "allow_access_from_another_account" { + statement { + + actions = [ + "s3:GetObject", + "s3:ListBucket", + ] + + resources = [ + "arn:aws:s3:::*", + ] + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Buckets, 1) + bucket := adapted.Buckets[0] + + assert.Equal(t, 2, bucket.Metadata.Range().GetStartLine()) + assert.Equal(t, 4, bucket.Metadata.Range().GetEndLine()) + + assert.Equal(t, 6, bucket.PublicAccessBlock.Metadata.Range().GetStartLine()) + assert.Equal(t, 13, bucket.PublicAccessBlock.Metadata.Range().GetEndLine()) + + assert.Equal(t, 9, bucket.PublicAccessBlock.RestrictPublicBuckets.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 9, bucket.PublicAccessBlock.RestrictPublicBuckets.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 10, bucket.PublicAccessBlock.BlockPublicACLs.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, bucket.PublicAccessBlock.BlockPublicACLs.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, bucket.PublicAccessBlock.BlockPublicPolicy.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, bucket.PublicAccessBlock.BlockPublicPolicy.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 12, bucket.PublicAccessBlock.IgnorePublicACLs.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 12, bucket.PublicAccessBlock.IgnorePublicACLs.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 17, bucket.ACL.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 17, bucket.ACL.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 20, bucket.Encryption.Metadata.Range().GetStartLine()) + assert.Equal(t, 29, bucket.Encryption.Metadata.Range().GetEndLine()) + + assert.Equal(t, 25, bucket.Encryption.KMSKeyId.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 25, bucket.Encryption.KMSKeyId.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 26, bucket.Encryption.Algorithm.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 26, bucket.Encryption.Algorithm.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 31, bucket.Logging.Metadata.Range().GetStartLine()) + assert.Equal(t, 36, bucket.Logging.Metadata.Range().GetEndLine()) + + assert.Equal(t, 34, bucket.Logging.TargetBucket.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 34, bucket.Logging.TargetBucket.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 38, bucket.Versioning.Metadata.Range().GetStartLine()) + assert.Equal(t, 43, bucket.Versioning.Metadata.Range().GetEndLine()) + + assert.Equal(t, 41, bucket.Versioning.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 41, bucket.Versioning.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 47, bucket.BucketPolicies[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 47, bucket.BucketPolicies[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 50, bucket.BucketPolicies[0].Document.Metadata.Range().GetStartLine()) + assert.Equal(t, 62, bucket.BucketPolicies[0].Document.Metadata.Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/s3/bucket.go b/internal/adapters/terraform/aws/s3/bucket.go new file mode 100644 index 000000000000..27a8e4665af8 --- /dev/null +++ b/internal/adapters/terraform/aws/s3/bucket.go @@ -0,0 +1,283 @@ +package s3 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/s3" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type adapter struct { + modules terraform.Modules + bucketMap map[string]*s3.Bucket +} + +func (a *adapter) adaptBuckets() []s3.Bucket { + for _, block := range a.modules.GetResourcesByType("aws_s3_bucket") { + bucket := &s3.Bucket{ + Metadata: block.GetMetadata(), + Name: block.GetAttribute("bucket").AsStringValueOrDefault("", block), + PublicAccessBlock: nil, + BucketPolicies: nil, + Encryption: getEncryption(block, a), + Versioning: getVersioning(block, a), + Logging: getLogging(block, a), + ACL: getBucketAcl(block, a), + AccelerateConfigurationStatus: getAccelerateStatus(block, a), + BucketLocation: block.GetAttribute("region").AsStringValueOrDefault("", block), + LifecycleConfiguration: getLifecycle(block, a), + Website: getWebsite(block, a), + Objects: getObject(block, a), + } + a.bucketMap[block.ID()] = bucket + } + + a.adaptBucketPolicies() + a.adaptPublicAccessBlocks() + + var buckets []s3.Bucket + for _, bucket := range a.bucketMap { + buckets = append(buckets, *bucket) + } + + return buckets +} + +func getEncryption(block *terraform.Block, a *adapter) s3.Encryption { + if sseConfgihuration := block.GetBlock("server_side_encryption_configuration"); sseConfgihuration != nil { + return newS3Encryption(block, sseConfgihuration) + } + if val, ok := applyForBucketRelatedResource(a, block, "aws_s3_bucket_server_side_encryption_configuration", func(resource *terraform.Block) s3.Encryption { + return newS3Encryption(resource, resource) + }); ok { + return val + } + return s3.Encryption{ + Metadata: block.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, block.GetMetadata()), + KMSKeyId: defsecTypes.StringDefault("", block.GetMetadata()), + Algorithm: defsecTypes.StringDefault("", block.GetMetadata()), + } +} + +func newS3Encryption(root *terraform.Block, sseConfgihuration *terraform.Block) s3.Encryption { + return s3.Encryption{ + Metadata: root.GetMetadata(), + Enabled: isEncrypted(sseConfgihuration), + Algorithm: terraform.MapNestedAttribute( + sseConfgihuration, + "rule.apply_server_side_encryption_by_default.sse_algorithm", + func(attr *terraform.Attribute, parent *terraform.Block) defsecTypes.StringValue { + return attr.AsStringValueOrDefault("", parent) + }, + ), + KMSKeyId: terraform.MapNestedAttribute( + sseConfgihuration, + "rule.apply_server_side_encryption_by_default.kms_master_key_id", + func(attr *terraform.Attribute, parent *terraform.Block) defsecTypes.StringValue { + return attr.AsStringValueOrDefault("", parent) + }, + ), + } +} + +func getVersioning(block *terraform.Block, a *adapter) s3.Versioning { + versioning := s3.Versioning{ + Metadata: block.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, block.GetMetadata()), + MFADelete: defsecTypes.BoolDefault(false, block.GetMetadata()), + } + if lockBlock := block.GetBlock("object_lock_configuration"); lockBlock != nil { + if enabled := isObjeckLockEnabled(lockBlock); enabled != nil { + versioning.Enabled = *enabled + } + } + if vBlock := block.GetBlock("versioning"); vBlock != nil { + versioning.Enabled = vBlock.GetAttribute("enabled").AsBoolValueOrDefault(true, vBlock) + versioning.MFADelete = vBlock.GetAttribute("mfa_delete").AsBoolValueOrDefault(false, vBlock) + } + + if enabled, ok := applyForBucketRelatedResource(a, block, "aws_s3_bucket_object_lock_configuration", func(resource *terraform.Block) *defsecTypes.BoolValue { + if block.GetAttribute("object_lock_enabled").IsTrue() { + return isObjeckLockEnabled(resource) + } + return nil + }); ok && enabled != nil { + versioning.Enabled = *enabled + } + + if val, ok := applyForBucketRelatedResource(a, block, "aws_s3_bucket_versioning", getVersioningFromResource); ok { + return val + } + return versioning +} + +func isObjeckLockEnabled(resource *terraform.Block) *defsecTypes.BoolValue { + var val defsecTypes.BoolValue + attr := resource.GetAttribute("object_lock_enabled") + switch { + case attr.IsNil(): // enabled by default + val = defsecTypes.BoolDefault(true, resource.GetMetadata()) + case attr.Equals("Enabled"): + val = defsecTypes.Bool(true, attr.GetMetadata()) + } + return &val +} + +// from aws_s3_bucket_versioning +func getVersioningFromResource(block *terraform.Block) s3.Versioning { + versioning := s3.Versioning{ + Metadata: block.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, block.GetMetadata()), + MFADelete: defsecTypes.BoolDefault(false, block.GetMetadata()), + } + if config := block.GetBlock("versioning_configuration"); config != nil { + if status := config.GetAttribute("status"); status.IsNotNil() { + versioning.Enabled = defsecTypes.Bool(status.Equals("Enabled", terraform.IgnoreCase), status.GetMetadata()) + } + if mfa := config.GetAttribute("mfa_delete"); mfa.IsNotNil() { + versioning.MFADelete = defsecTypes.Bool(mfa.Equals("Enabled", terraform.IgnoreCase), mfa.GetMetadata()) + } + } + return versioning +} + +func getLogging(block *terraform.Block, a *adapter) s3.Logging { + if loggingBlock := block.GetBlock("logging"); loggingBlock.IsNotNil() { + targetBucket := loggingBlock.GetAttribute("target_bucket").AsStringValueOrDefault("", loggingBlock) + if referencedBlock, err := a.modules.GetReferencedBlock(loggingBlock.GetAttribute("target_bucket"), loggingBlock); err == nil { + targetBucket = defsecTypes.String(referencedBlock.FullName(), loggingBlock.GetAttribute("target_bucket").GetMetadata()) + } + return s3.Logging{ + Metadata: loggingBlock.GetMetadata(), + Enabled: defsecTypes.Bool(true, loggingBlock.GetMetadata()), + TargetBucket: targetBucket, + } + } + + if val, ok := applyForBucketRelatedResource(a, block, "aws_s3_bucket_logging", func(resource *terraform.Block) s3.Logging { + targetBucket := resource.GetAttribute("target-bucket").AsStringValueOrDefault("", resource) + if referencedBlock, err := a.modules.GetReferencedBlock(resource.GetAttribute("target_bucket"), resource); err == nil { + targetBucket = defsecTypes.String(referencedBlock.FullName(), resource.GetAttribute("target_bucket").GetMetadata()) + } + return s3.Logging{ + Metadata: resource.GetMetadata(), + Enabled: hasLogging(resource), + TargetBucket: targetBucket, + } + }); ok { + return val + } + + return s3.Logging{ + Metadata: block.GetMetadata(), + Enabled: defsecTypes.Bool(false, block.GetMetadata()), + TargetBucket: defsecTypes.StringDefault("", block.GetMetadata()), + } +} + +func getBucketAcl(block *terraform.Block, a *adapter) defsecTypes.StringValue { + aclAttr := block.GetAttribute("acl") + if aclAttr.IsString() { + return aclAttr.AsStringValueOrDefault("private", block) + } + + if val, ok := applyForBucketRelatedResource(a, block, "aws_s3_bucket_acl", func(resource *terraform.Block) defsecTypes.StringValue { + return resource.GetAttribute("acl").AsStringValueOrDefault("private", resource) + }); ok { + return val + } + return defsecTypes.StringDefault("private", block.GetMetadata()) +} + +func isEncrypted(sseConfgihuration *terraform.Block) defsecTypes.BoolValue { + return terraform.MapNestedAttribute( + sseConfgihuration, + "rule.apply_server_side_encryption_by_default.sse_algorithm", + func(attr *terraform.Attribute, parent *terraform.Block) defsecTypes.BoolValue { + if attr.IsNil() { + return defsecTypes.BoolDefault(false, parent.GetMetadata()) + } + return defsecTypes.Bool( + true, + attr.GetMetadata(), + ) + }, + ) +} + +func hasLogging(b *terraform.Block) defsecTypes.BoolValue { + if loggingBlock := b.GetBlock("logging"); loggingBlock.IsNotNil() { + if targetAttr := loggingBlock.GetAttribute("target_bucket"); targetAttr.IsNotNil() && targetAttr.IsNotEmpty() { + return defsecTypes.Bool(true, targetAttr.GetMetadata()) + } + return defsecTypes.BoolDefault(false, loggingBlock.GetMetadata()) + } + if targetBucket := b.GetAttribute("target_bucket"); targetBucket.IsNotNil() { + return defsecTypes.Bool(true, targetBucket.GetMetadata()) + } + return defsecTypes.BoolDefault(false, b.GetMetadata()) +} + +func getLifecycle(b *terraform.Block, a *adapter) []s3.Rules { + + var rules []s3.Rules + for _, r := range a.modules.GetReferencingResources(b, "aws_s3_bucket_lifecycle_configuration", "bucket") { + ruleblock := r.GetBlocks("rule") + for _, rule := range ruleblock { + rules = append(rules, s3.Rules{ + Metadata: rule.GetMetadata(), + Status: rule.GetAttribute("status").AsStringValueOrDefault("Enabled", rule), + }) + } + } + return rules +} + +func getWebsite(b *terraform.Block, a *adapter) (website *s3.Website) { + for _, r := range a.modules.GetReferencingResources(b, "aws_s3_bucket_website_configuration", "bucket") { + website = &s3.Website{ + Metadata: r.GetMetadata(), + } + } + return website +} + +func getObject(b *terraform.Block, a *adapter) []s3.Contents { + var object []s3.Contents + for _, r := range a.modules.GetReferencingResources(b, "aws_s3_object", "bucket") { + object = append(object, s3.Contents{ + Metadata: r.GetMetadata(), + }) + } + return object +} + +func getAccelerateStatus(b *terraform.Block, a *adapter) defsecTypes.StringValue { + var status defsecTypes.StringValue + for _, r := range a.modules.GetReferencingResources(b, " aws_s3_bucket_accelerate_configuration", "bucket") { + status = r.GetAttribute("status").AsStringValueOrDefault("Enabled", r) + } + return status +} + +func applyForBucketRelatedResource[T any](a *adapter, block *terraform.Block, resType string, fn func(resource *terraform.Block) T) (T, bool) { + for _, resource := range a.modules.GetResourcesByType(resType) { + bucketAttr := resource.GetAttribute("bucket") + if bucketAttr.IsNotNil() { + if bucketAttr.IsString() { + actualBucketName := block.GetAttribute("bucket").AsStringValueOrDefault("", block).Value() + if bucketAttr.Equals(block.ID()) || bucketAttr.Equals(actualBucketName) { + return fn(resource), true + } + } + if referencedBlock, err := a.modules.GetReferencedBlock(bucketAttr, resource); err == nil { + if referencedBlock.ID() == block.ID() { + return fn(resource), true + } + } + } + + } + var res T + return res, false +} diff --git a/internal/adapters/terraform/aws/s3/bucket_test.go b/internal/adapters/terraform/aws/s3/bucket_test.go new file mode 100644 index 000000000000..4fcdc2e50ef9 --- /dev/null +++ b/internal/adapters/terraform/aws/s3/bucket_test.go @@ -0,0 +1,331 @@ +package s3 + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/stretchr/testify/assert" +) + +func Test_GetBuckets(t *testing.T) { + + source := ` +resource "aws_s3_bucket" "bucket1" { + + +} +` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + +} + +func Test_BucketGetACL(t *testing.T) { + + source := ` +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + acl = "authenticated-read" + + # ... other configuration ... +}` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.Equal(t, "authenticated-read", s3.Buckets[0].ACL.Value()) + +} + +func Test_V4BucketGetACL(t *testing.T) { + + source := ` +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" +} + +resource "aws_s3_bucket_acl" "example" { + bucket = aws_s3_bucket.example.id + acl = "authenticated-read" +}` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.Equal(t, "authenticated-read", s3.Buckets[0].ACL.Value()) + +} + +func Test_BucketGetLogging(t *testing.T) { + + source := ` +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +} +` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.True(t, s3.Buckets[0].Logging.Enabled.Value()) + +} + +func Test_V4BucketGetLogging(t *testing.T) { + + source := ` +resource "aws_s3_bucket" "log_bucket" { + bucket = "example-log-bucket" + + # ... other configuration ... +} + +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_logging" "example" { + bucket = aws_s3_bucket.example.id + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} +` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 2, len(s3.Buckets)) + for _, bucket := range s3.Buckets { + switch bucket.Name.Value() { + case "yournamehere": + assert.True(t, bucket.Logging.Enabled.Value()) + case "example-log-bucket": + assert.False(t, bucket.Logging.Enabled.Value()) + } + } +} + +func Test_BucketGetVersioning(t *testing.T) { + source := ` +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + versioning { + enabled = true + } +}` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.True(t, s3.Buckets[0].Versioning.Enabled.Value()) +} + +func Test_V4BucketGetVersioning(t *testing.T) { + source := ` +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_versioning" "example" { + bucket = aws_s3_bucket.example.id + versioning_configuration { + status = "Enabled" + } +}` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.True(t, s3.Buckets[0].Versioning.Enabled.Value()) +} + +func Test_BucketGetVersioningWithLockDeprecated(t *testing.T) { + source := ` +resource "aws_s3_bucket" "example" { + bucket = "mybucket" + object_lock_configuration { + object_lock_enabled = "Enabled" + } +} +` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.True(t, s3.Buckets[0].Versioning.Enabled.Value()) + +} + +func Test_BucketGetVersioningWithLockForNewBucket(t *testing.T) { + source := ` +resource "aws_s3_bucket" "example" { + bucket = "mybucket" + object_lock_enabled = true +} + +resource "aws_s3_bucket_object_lock_configuration" "example" { + bucket = aws_s3_bucket.example.id +} +` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.True(t, s3.Buckets[0].Versioning.Enabled.Value()) + +} + +func Test_BucketGetVersioningWhenLockDisabledButVersioningEnabled(t *testing.T) { + source := ` +resource "aws_s3_bucket" "example" { + bucket = "mybucket" +} + +resource "aws_s3_bucket_object_lock_configuration" "example" { + bucket = aws_s3_bucket.example.id +} + +resource "aws_s3_bucket_versioning" "example" { + bucket = aws_s3_bucket.example.id + versioning_configuration { + status = "Enabled" + } +} +` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.True(t, s3.Buckets[0].Versioning.Enabled.Value()) + +} + +func Test_BucketGetEncryption(t *testing.T) { + + source := ` + resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.mykey.arn + sse_algorithm = "aws:kms" + } + } + } +}` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.True(t, s3.Buckets[0].Encryption.Enabled.Value()) +} + +func Test_V4BucketGetEncryption(t *testing.T) { + + source := ` +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "example" { + bucket = aws_s3_bucket.example.id + + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.mykey.arn + sse_algorithm = "aws:kms" + } + } +} +` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + assert.Equal(t, 1, len(s3.Buckets)) + assert.True(t, s3.Buckets[0].Encryption.Enabled.Value()) +} + +func Test_BucketWithPolicy(t *testing.T) { + + source := ` +resource "aws_s3_bucket" "bucket1" { + bucket = "lol" +} + +resource "aws_s3_bucket_policy" "allow_access_from_another_account" { + bucket = aws_s3_bucket.bucket1.id + policy = data.aws_iam_policy_document.allow_access_from_another_account.json +} + +data "aws_iam_policy_document" "allow_access_from_another_account" { + statement { + principals { + type = "AWS" + identifiers = ["123456789012"] + } + + actions = [ + "s3:GetObject", + "s3:ListBucket", + ] + + resources = [ + aws_s3_bucket.bucket1.arn, + ] + } +} + +` + modules := tftestutil.CreateModulesFromSource(t, source, ".tf") + + s3 := Adapt(modules) + + require.Equal(t, 1, len(s3.Buckets)) + require.Equal(t, 1, len(s3.Buckets[0].BucketPolicies)) + + policy := s3.Buckets[0].BucketPolicies[0] + + statements, _ := policy.Document.Parsed.Statements() + require.Equal(t, 1, len(statements)) + + principals, _ := statements[0].Principals() + actions, _ := statements[0].Actions() + + awsPrincipals, _ := principals.AWS() + require.Equal(t, 1, len(awsPrincipals)) + require.Equal(t, 2, len(actions)) + +} diff --git a/internal/adapters/terraform/aws/s3/policies.go b/internal/adapters/terraform/aws/s3/policies.go new file mode 100644 index 000000000000..8ce46054b8bc --- /dev/null +++ b/internal/adapters/terraform/aws/s3/policies.go @@ -0,0 +1,53 @@ +package s3 + +import ( + iamAdapter "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func (a *adapter) adaptBucketPolicies() { + + for _, b := range a.modules.GetResourcesByType("aws_s3_bucket_policy") { + + policyAttr := b.GetAttribute("policy") + if policyAttr.IsNil() { + continue + } + doc, err := iamAdapter.ParsePolicyFromAttr(policyAttr, b, a.modules) + if err != nil { + continue + } + + policy := iam.Policy{ + Metadata: policyAttr.GetMetadata(), + Name: defsecTypes.StringDefault("", b.GetMetadata()), + Document: *doc, + Builtin: defsecTypes.Bool(false, b.GetMetadata()), + } + + var bucketName string + bucketAttr := b.GetAttribute("bucket") + + if bucketAttr.IsNotNil() { + if referencedBlock, err := a.modules.GetReferencedBlock(bucketAttr, b); err == nil { + if bucket, ok := a.bucketMap[referencedBlock.ID()]; ok { + bucket.BucketPolicies = append(bucket.BucketPolicies, policy) + a.bucketMap[referencedBlock.ID()] = bucket + continue + } + } + } + + if bucketAttr.IsString() { + bucketName = bucketAttr.Value().AsString() + for id, bucket := range a.bucketMap { + if bucket.Name.EqualTo(bucketName) { + bucket.BucketPolicies = append(bucket.BucketPolicies, policy) + a.bucketMap[id] = bucket + break + } + } + } + } +} diff --git a/internal/adapters/terraform/aws/s3/public_access_block.go b/internal/adapters/terraform/aws/s3/public_access_block.go new file mode 100644 index 000000000000..66592a2aff84 --- /dev/null +++ b/internal/adapters/terraform/aws/s3/public_access_block.go @@ -0,0 +1,41 @@ +package s3 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/s3" +) + +func (a *adapter) adaptPublicAccessBlocks() { + + for _, b := range a.modules.GetResourcesByType("aws_s3_bucket_public_access_block") { + + pba := s3.PublicAccessBlock{ + Metadata: b.GetMetadata(), + BlockPublicACLs: b.GetAttribute("block_public_acls").AsBoolValueOrDefault(false, b), + BlockPublicPolicy: b.GetAttribute("block_public_policy").AsBoolValueOrDefault(false, b), + IgnorePublicACLs: b.GetAttribute("ignore_public_acls").AsBoolValueOrDefault(false, b), + RestrictPublicBuckets: b.GetAttribute("restrict_public_buckets").AsBoolValueOrDefault(false, b), + } + + var bucketName string + bucketAttr := b.GetAttribute("bucket") + if bucketAttr.IsNotNil() { + if referencedBlock, err := a.modules.GetReferencedBlock(bucketAttr, b); err == nil { + if bucket, ok := a.bucketMap[referencedBlock.ID()]; ok { + bucket.PublicAccessBlock = &pba + a.bucketMap[referencedBlock.ID()] = bucket + continue + } + } + } + if bucketAttr.IsString() { + bucketName = bucketAttr.Value().AsString() + for id, bucket := range a.bucketMap { + if bucketAttr.Equals(id) || bucket.Name.EqualTo(bucketName) { + bucket.PublicAccessBlock = &pba + a.bucketMap[id] = bucket + continue + } + } + } + } +} diff --git a/internal/adapters/terraform/aws/sns/adapt.go b/internal/adapters/terraform/aws/sns/adapt.go new file mode 100644 index 000000000000..e173bcc909c1 --- /dev/null +++ b/internal/adapters/terraform/aws/sns/adapt.go @@ -0,0 +1,38 @@ +package sns + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/sns" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) sns.SNS { + return sns.SNS{ + Topics: adaptTopics(modules), + } +} + +func adaptTopics(modules terraform.Modules) []sns.Topic { + var topics []sns.Topic + for _, module := range modules { + for _, resource := range module.GetResourcesByType("aws_sns_topic") { + topics = append(topics, adaptTopic(resource)) + } + } + return topics +} + +func adaptTopic(resourceBlock *terraform.Block) sns.Topic { + return sns.Topic{ + Metadata: resourceBlock.GetMetadata(), + ARN: types.StringDefault("", resourceBlock.GetMetadata()), + Encryption: adaptEncryption(resourceBlock), + } +} + +func adaptEncryption(resourceBlock *terraform.Block) sns.Encryption { + return sns.Encryption{ + Metadata: resourceBlock.GetMetadata(), + KMSKeyID: resourceBlock.GetAttribute("kms_master_key_id").AsStringValueOrDefault("", resourceBlock), + } +} diff --git a/internal/adapters/terraform/aws/sns/adapt_test.go b/internal/adapters/terraform/aws/sns/adapt_test.go new file mode 100644 index 000000000000..9f9a8a975ecc --- /dev/null +++ b/internal/adapters/terraform/aws/sns/adapt_test.go @@ -0,0 +1,82 @@ +package sns + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/sns" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptTopic(t *testing.T) { + tests := []struct { + name string + terraform string + expected sns.Topic + }{ + { + name: "defined", + terraform: ` + resource "aws_sns_topic" "good_example" { + kms_master_key_id = "/blah" + } +`, + expected: sns.Topic{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ARN: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Encryption: sns.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + KMSKeyID: defsecTypes.String("/blah", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "default", + terraform: ` + resource "aws_sns_topic" "good_example" { + } +`, + expected: sns.Topic{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ARN: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Encryption: sns.Encryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + KMSKeyID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptTopic(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "aws_sns_topic" "good_example" { + kms_master_key_id = "/blah" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Topics, 1) + topic := adapted.Topics[0] + + assert.Equal(t, 2, topic.Metadata.Range().GetStartLine()) + assert.Equal(t, 4, topic.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, topic.Encryption.KMSKeyID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, topic.Encryption.KMSKeyID.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/aws/sqs/adapt.go b/internal/adapters/terraform/aws/sqs/adapt.go new file mode 100644 index 000000000000..71c6b541477e --- /dev/null +++ b/internal/adapters/terraform/aws/sqs/adapt.go @@ -0,0 +1,167 @@ +package sqs + +import ( + "github.com/aquasecurity/trivy/internal/adapters/terraform/aws/iam" + iamp "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/sqs" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/iamgo" + + "github.com/google/uuid" +) + +func Adapt(modules terraform.Modules) sqs.SQS { + return sqs.SQS{ + Queues: (&adapter{ + modules: modules, + queues: make(map[string]sqs.Queue), + }).adaptQueues(), + } +} + +type adapter struct { + modules terraform.Modules + queues map[string]sqs.Queue +} + +func (a *adapter) adaptQueues() []sqs.Queue { + for _, resource := range a.modules.GetResourcesByType("aws_sqs_queue") { + a.adaptQueue(resource) + } + + for _, policyBlock := range a.modules.GetResourcesByType("aws_sqs_queue_policy") { + + policy := iamp.Policy{ + Metadata: policyBlock.GetMetadata(), + Name: defsecTypes.StringDefault("", policyBlock.GetMetadata()), + Document: iamp.Document{ + Metadata: policyBlock.GetMetadata(), + }, + Builtin: defsecTypes.Bool(false, policyBlock.GetMetadata()), + } + if attr := policyBlock.GetAttribute("policy"); attr.IsString() { + dataBlock, err := a.modules.GetBlockById(attr.Value().AsString()) + if err != nil { + parsed, err := iamgo.ParseString(attr.Value().AsString()) + if err != nil { + continue + } + policy.Document.Parsed = *parsed + policy.Document.Metadata = attr.GetMetadata() + } else if dataBlock.Type() == "data" && dataBlock.TypeLabel() == "aws_iam_policy_document" { + if doc, err := iam.ConvertTerraformDocument(a.modules, dataBlock); err == nil { + policy.Document.Parsed = doc.Document + policy.Document.Metadata = doc.Source.GetMetadata() + policy.Document.IsOffset = true + } + } + } else if refBlock, err := a.modules.GetReferencedBlock(attr, policyBlock); err == nil { + if refBlock.Type() == "data" && refBlock.TypeLabel() == "aws_iam_policy_document" { + if doc, err := iam.ConvertTerraformDocument(a.modules, refBlock); err == nil { + policy.Document.Parsed = doc.Document + policy.Document.Metadata = doc.Source.GetMetadata() + } + } + } + + if urlAttr := policyBlock.GetAttribute("queue_url"); urlAttr.IsNotNil() { + if refBlock, err := a.modules.GetReferencedBlock(urlAttr, policyBlock); err == nil { + if queue, ok := a.queues[refBlock.ID()]; ok { + queue.Policies = append(queue.Policies, policy) + a.queues[refBlock.ID()] = queue + continue + } + } + } + + a.queues[uuid.NewString()] = sqs.Queue{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + QueueURL: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + Encryption: sqs.Encryption{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + ManagedEncryption: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + KMSKeyID: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + Policies: []iamp.Policy{policy}, + } + } + + var queues []sqs.Queue + for _, queue := range a.queues { + queues = append(queues, queue) + } + return queues +} + +func (a *adapter) adaptQueue(resource *terraform.Block) { + + kmsKeyIdAttr := resource.GetAttribute("kms_master_key_id") + kmsKeyIdVal := kmsKeyIdAttr.AsStringValueOrDefault("", resource) + managedEncryption := resource.GetAttribute("sqs_managed_sse_enabled") + + var policies []iamp.Policy + if attr := resource.GetAttribute("policy"); attr.IsString() { + + dataBlock, err := a.modules.GetBlockById(attr.Value().AsString()) + if err != nil { + policy := iamp.Policy{ + Metadata: attr.GetMetadata(), + Name: defsecTypes.StringDefault("", attr.GetMetadata()), + Document: iamp.Document{ + Metadata: attr.GetMetadata(), + }, + Builtin: defsecTypes.Bool(false, attr.GetMetadata()), + } + parsed, err := iamgo.ParseString(attr.Value().AsString()) + if err == nil { + policy.Document.Parsed = *parsed + policy.Document.Metadata = attr.GetMetadata() + policy.Metadata = attr.GetMetadata() + policies = append(policies, policy) + } + } else if dataBlock.Type() == "data" && dataBlock.TypeLabel() == "aws_iam_policy_document" { + if doc, err := iam.ConvertTerraformDocument(a.modules, dataBlock); err == nil { + policy := iamp.Policy{ + Metadata: attr.GetMetadata(), + Name: defsecTypes.StringDefault("", attr.GetMetadata()), + Document: iamp.Document{ + Metadata: doc.Source.GetMetadata(), + Parsed: doc.Document, + IsOffset: true, + HasRefs: false, + }, + Builtin: defsecTypes.Bool(false, attr.GetMetadata()), + } + policies = append(policies, policy) + } + } + + } else if refBlock, err := a.modules.GetReferencedBlock(attr, resource); err == nil { + if refBlock.Type() == "data" && refBlock.TypeLabel() == "aws_iam_policy_document" { + if doc, err := iam.ConvertTerraformDocument(a.modules, refBlock); err == nil { + policy := iamp.Policy{ + Metadata: doc.Source.GetMetadata(), + Name: defsecTypes.StringDefault("", doc.Source.GetMetadata()), + Document: iamp.Document{ + Metadata: doc.Source.GetMetadata(), + Parsed: doc.Document, + }, + Builtin: defsecTypes.Bool(false, refBlock.GetMetadata()), + } + policies = append(policies, policy) + } + } + } + + a.queues[resource.ID()] = sqs.Queue{ + Metadata: resource.GetMetadata(), + QueueURL: defsecTypes.StringDefault("", resource.GetMetadata()), + Encryption: sqs.Encryption{ + Metadata: resource.GetMetadata(), + ManagedEncryption: managedEncryption.AsBoolValueOrDefault(false, resource), + KMSKeyID: kmsKeyIdVal, + }, + Policies: policies, + } +} diff --git a/internal/adapters/terraform/aws/sqs/adapt_test.go b/internal/adapters/terraform/aws/sqs/adapt_test.go new file mode 100644 index 000000000000..e7daeb76dbf9 --- /dev/null +++ b/internal/adapters/terraform/aws/sqs/adapt_test.go @@ -0,0 +1,140 @@ +package sqs + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/sqs" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/liamg/iamgo" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected sqs.SQS + }{ + { + name: "np kms key", + terraform: ` + resource "aws_sqs_queue" "good_example" { + + policy = <= azurerm 2.97.0 + if omsAgentBlock := resource.GetBlock("oms_agent"); omsAgentBlock.IsNotNil() { + cluster.AddonProfile.OMSAgent.Metadata = omsAgentBlock.GetMetadata() + cluster.AddonProfile.OMSAgent.Enabled = defsecTypes.Bool(true, omsAgentBlock.GetMetadata()) + } + + // azurerm < 2.99.0 + if resource.HasChild("role_based_access_control") { + roleBasedAccessControlBlock := resource.GetBlock("role_based_access_control") + rbEnabledAttr := roleBasedAccessControlBlock.GetAttribute("enabled") + cluster.RoleBasedAccessControl.Metadata = roleBasedAccessControlBlock.GetMetadata() + cluster.RoleBasedAccessControl.Enabled = rbEnabledAttr.AsBoolValueOrDefault(false, roleBasedAccessControlBlock) + } + if resource.HasChild("role_based_access_control_enabled") { + // azurerm >= 2.99.0 + roleBasedAccessControlEnabledAttr := resource.GetAttribute("role_based_access_control_enabled") + cluster.RoleBasedAccessControl.Metadata = roleBasedAccessControlEnabledAttr.GetMetadata() + cluster.RoleBasedAccessControl.Enabled = roleBasedAccessControlEnabledAttr.AsBoolValueOrDefault(false, resource) + } + + if resource.HasChild("azure_active_directory_role_based_access_control") { + azureRoleBasedAccessControl := resource.GetBlock("azure_active_directory_role_based_access_control") + if azureRoleBasedAccessControl.IsNotNil() { + enabledAttr := azureRoleBasedAccessControl.GetAttribute("azure_rbac_enabled") + if !cluster.RoleBasedAccessControl.Enabled.IsTrue() { + cluster.RoleBasedAccessControl.Metadata = azureRoleBasedAccessControl.GetMetadata() + cluster.RoleBasedAccessControl.Enabled = enabledAttr.AsBoolValueOrDefault(false, azureRoleBasedAccessControl) + } + } + } + return cluster +} diff --git a/internal/adapters/terraform/azure/container/adapt_test.go b/internal/adapters/terraform/azure/container/adapt_test.go new file mode 100644 index 000000000000..92873310c1f7 --- /dev/null +++ b/internal/adapters/terraform/azure/container/adapt_test.go @@ -0,0 +1,262 @@ +package container + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/container" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptCluster(t *testing.T) { + tests := []struct { + name string + terraform string + expected container.KubernetesCluster + }{ + { + name: "defined", + terraform: ` + resource "azurerm_kubernetes_cluster" "example" { + private_cluster_enabled = true + + network_profile { + network_policy = "calico" + } + + api_server_access_profile { + + authorized_ip_ranges = [ + "1.2.3.4/32" + ] + + } + + addon_profile { + oms_agent { + enabled = true + } + } + + role_based_access_control { + enabled = true + } + } +`, + expected: container.KubernetesCluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkProfile: container.NetworkProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkPolicy: defsecTypes.String("calico", defsecTypes.NewTestMisconfigMetadata()), + }, + EnablePrivateCluster: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + APIServerAuthorizedIPRanges: []defsecTypes.StringValue{ + defsecTypes.String("1.2.3.4/32", defsecTypes.NewTestMisconfigMetadata()), + }, + AddonProfile: container.AddonProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + OMSAgent: container.OMSAgent{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + RoleBasedAccessControl: container.RoleBasedAccessControl{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "rbac with a new syntax", + terraform: ` + resource "azurerm_kubernetes_cluster" "example" { + role_based_access_control_enabled = true + } +`, + expected: container.KubernetesCluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkProfile: container.NetworkProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkPolicy: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + EnablePrivateCluster: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + AddonProfile: container.AddonProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + OMSAgent: container.OMSAgent{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + RoleBasedAccessControl: container.RoleBasedAccessControl{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "azurerm_kubernetes_cluster" "example" { + } +`, + expected: container.KubernetesCluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkProfile: container.NetworkProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkPolicy: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + EnablePrivateCluster: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + AddonProfile: container.AddonProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + OMSAgent: container.OMSAgent{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + RoleBasedAccessControl: container.RoleBasedAccessControl{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "rbac off with k8s rbac on", + terraform: ` +resource "azurerm_kubernetes_cluster" "misreporting_example" { + role_based_access_control_enabled = true # Enable k8s RBAC + azure_active_directory_role_based_access_control { + managed = true # Enable AKS-managed Azure AAD integration + azure_rbac_enabled = false # Explicitly disable Azure RBAC for Kubernetes Authorization + } + } +`, + expected: container.KubernetesCluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkProfile: container.NetworkProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkPolicy: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + EnablePrivateCluster: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + AddonProfile: container.AddonProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + OMSAgent: container.OMSAgent{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + RoleBasedAccessControl: container.RoleBasedAccessControl{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptCluster(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_kubernetes_cluster" "example" { + private_cluster_enabled = true + + network_profile { + network_policy = "calico" + } + + api_server_access_profile { + + authorized_ip_ranges = [ + "1.2.3.4/32" + ] + + } + + addon_profile { + oms_agent { + enabled = true + } + } + + role_based_access_control { + enabled = true + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.KubernetesClusters, 1) + cluster := adapted.KubernetesClusters[0] + + assert.Equal(t, 3, cluster.EnablePrivateCluster.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, cluster.EnablePrivateCluster.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 5, cluster.NetworkProfile.Metadata.Range().GetStartLine()) + assert.Equal(t, 7, cluster.NetworkProfile.Metadata.Range().GetEndLine()) + + assert.Equal(t, 6, cluster.NetworkProfile.NetworkPolicy.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, cluster.NetworkProfile.NetworkPolicy.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, cluster.APIServerAuthorizedIPRanges[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 13, cluster.APIServerAuthorizedIPRanges[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 17, cluster.AddonProfile.Metadata.Range().GetStartLine()) + assert.Equal(t, 21, cluster.AddonProfile.Metadata.Range().GetEndLine()) + + assert.Equal(t, 18, cluster.AddonProfile.OMSAgent.Metadata.Range().GetStartLine()) + assert.Equal(t, 20, cluster.AddonProfile.OMSAgent.Metadata.Range().GetEndLine()) + + assert.Equal(t, 19, cluster.AddonProfile.OMSAgent.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 19, cluster.AddonProfile.OMSAgent.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 23, cluster.RoleBasedAccessControl.Metadata.Range().GetStartLine()) + assert.Equal(t, 25, cluster.RoleBasedAccessControl.Metadata.Range().GetEndLine()) + + assert.Equal(t, 24, cluster.RoleBasedAccessControl.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 24, cluster.RoleBasedAccessControl.Enabled.GetMetadata().Range().GetEndLine()) +} + +func TestWithLocals(t *testing.T) { + src := ` + variable "ip_whitelist" { + description = "IP Ranges with allowed access." + type = list(string) + default = ["1.2.3.4"] +} + +locals { + ip_whitelist = concat(var.ip_whitelist, split(",", data.azurerm_public_ip.build_agents.ip_address)) +} + +resource "azurerm_kubernetes_cluster" "aks" { + # not working + api_server_access_profile { + authorized_ip_ranges = local.ip_whitelist + } + # working + api_server_access_profile { + authorized_ip_ranges = concat(var.ip_whitelist, split(",", data.azurerm_public_ip.example.ip_address)) + } +}` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.KubernetesClusters, 1) + cluster := adapted.KubernetesClusters[0] + require.Len(t, cluster.APIServerAuthorizedIPRanges, 1) + assert.False(t, cluster.APIServerAuthorizedIPRanges[0].GetMetadata().IsResolvable()) +} diff --git a/internal/adapters/terraform/azure/database/adapt.go b/internal/adapters/terraform/azure/database/adapt.go new file mode 100644 index 000000000000..1a8ff729b2d7 --- /dev/null +++ b/internal/adapters/terraform/azure/database/adapt.go @@ -0,0 +1,439 @@ +package database + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/database" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) database.Database { + + mssqlAdapter := mssqlAdapter{ + alertPolicyIDs: modules.GetChildResourceIDMapByType("azurerm_mssql_server_security_alert_policy"), + auditingPolicyIDs: modules.GetChildResourceIDMapByType("azurerm_mssql_server_extended_auditing_policy", "azurerm_mssql_database_extended_auditing_policy"), + firewallIDs: modules.GetChildResourceIDMapByType("azurerm_sql_firewall_rule", "azurerm_mssql_firewall_rule"), + } + + mysqlAdapter := mysqlAdapter{ + firewallIDs: modules.GetChildResourceIDMapByType("azurerm_mysql_firewall_rule"), + } + + mariaDBAdapter := mariaDBAdapter{ + firewallIDs: modules.GetChildResourceIDMapByType("azurerm_mariadb_firewall_rule"), + } + + postgresqlAdapter := postgresqlAdapter{ + firewallIDs: modules.GetChildResourceIDMapByType("azurerm_postgresql_firewall_rule"), + } + + return database.Database{ + MSSQLServers: mssqlAdapter.adaptMSSQLServers(modules), + MariaDBServers: mariaDBAdapter.adaptMariaDBServers(modules), + MySQLServers: mysqlAdapter.adaptMySQLServers(modules), + PostgreSQLServers: postgresqlAdapter.adaptPostgreSQLServers(modules), + } +} + +type mssqlAdapter struct { + alertPolicyIDs terraform.ResourceIDResolutions + auditingPolicyIDs terraform.ResourceIDResolutions + firewallIDs terraform.ResourceIDResolutions +} + +type mysqlAdapter struct { + firewallIDs terraform.ResourceIDResolutions +} + +type mariaDBAdapter struct { + firewallIDs terraform.ResourceIDResolutions +} + +type postgresqlAdapter struct { + firewallIDs terraform.ResourceIDResolutions +} + +func (a *mssqlAdapter) adaptMSSQLServers(modules terraform.Modules) []database.MSSQLServer { + var mssqlServers []database.MSSQLServer + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_sql_server") { + mssqlServers = append(mssqlServers, a.adaptMSSQLServer(resource, module)) + } + for _, resource := range module.GetResourcesByType("azurerm_mssql_server") { + mssqlServers = append(mssqlServers, a.adaptMSSQLServer(resource, module)) + } + } + + orphanResources := modules.GetResourceByIDs(a.alertPolicyIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := database.MSSQLServer{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnableSSLEnforcement: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + FirewallRules: nil, + }, + ExtendedAuditingPolicies: nil, + SecurityAlertPolicies: nil, + } + for _, policy := range orphanResources { + orphanage.SecurityAlertPolicies = append(orphanage.SecurityAlertPolicies, adaptMSSQLSecurityAlertPolicy(policy)) + } + mssqlServers = append(mssqlServers, orphanage) + + } + + orphanResources = modules.GetResourceByIDs(a.auditingPolicyIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := database.MSSQLServer{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnableSSLEnforcement: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + FirewallRules: nil, + }, + } + for _, policy := range orphanResources { + orphanage.ExtendedAuditingPolicies = append(orphanage.ExtendedAuditingPolicies, adaptMSSQLExtendedAuditingPolicy(policy)) + } + mssqlServers = append(mssqlServers, orphanage) + + } + + orphanResources = modules.GetResourceByIDs(a.firewallIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := database.MSSQLServer{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + } + for _, policy := range orphanResources { + orphanage.FirewallRules = append(orphanage.FirewallRules, adaptFirewallRule(policy)) + } + mssqlServers = append(mssqlServers, orphanage) + + } + + return mssqlServers +} +func (a *mysqlAdapter) adaptMySQLServers(modules terraform.Modules) []database.MySQLServer { + var mySQLServers []database.MySQLServer + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_mysql_server") { + mySQLServers = append(mySQLServers, a.adaptMySQLServer(resource, module)) + } + } + + orphanResources := modules.GetResourceByIDs(a.firewallIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := database.MySQLServer{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnableSSLEnforcement: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + FirewallRules: nil, + }, + } + for _, policy := range orphanResources { + orphanage.FirewallRules = append(orphanage.FirewallRules, adaptFirewallRule(policy)) + } + mySQLServers = append(mySQLServers, orphanage) + + } + + return mySQLServers +} + +func (a *mariaDBAdapter) adaptMariaDBServers(modules terraform.Modules) []database.MariaDBServer { + var mariaDBServers []database.MariaDBServer + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_mariadb_server") { + mariaDBServers = append(mariaDBServers, a.adaptMariaDBServer(resource, module)) + } + } + + orphanResources := modules.GetResourceByIDs(a.firewallIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := database.MariaDBServer{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnableSSLEnforcement: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + FirewallRules: nil, + }, + } + for _, policy := range orphanResources { + orphanage.FirewallRules = append(orphanage.FirewallRules, adaptFirewallRule(policy)) + } + mariaDBServers = append(mariaDBServers, orphanage) + + } + + return mariaDBServers +} + +func (a *postgresqlAdapter) adaptPostgreSQLServers(modules terraform.Modules) []database.PostgreSQLServer { + var postgreSQLServers []database.PostgreSQLServer + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_postgresql_server") { + postgreSQLServers = append(postgreSQLServers, a.adaptPostgreSQLServer(resource, module)) + } + } + + orphanResources := modules.GetResourceByIDs(a.firewallIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := database.PostgreSQLServer{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnableSSLEnforcement: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + FirewallRules: nil, + }, + Config: database.PostgresSQLConfig{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + LogCheckpoints: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + ConnectionThrottling: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + LogConnections: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + } + for _, policy := range orphanResources { + orphanage.FirewallRules = append(orphanage.FirewallRules, adaptFirewallRule(policy)) + } + postgreSQLServers = append(postgreSQLServers, orphanage) + + } + + return postgreSQLServers +} + +func (a *mssqlAdapter) adaptMSSQLServer(resource *terraform.Block, module *terraform.Module) database.MSSQLServer { + minTLSVersionVal := defsecTypes.StringDefault("", resource.GetMetadata()) + publicAccessVal := defsecTypes.BoolDefault(true, resource.GetMetadata()) + enableSSLEnforcementVal := defsecTypes.BoolDefault(false, resource.GetMetadata()) + + var auditingPolicies []database.ExtendedAuditingPolicy + var alertPolicies []database.SecurityAlertPolicy + var firewallRules []database.FirewallRule + + if resource.TypeLabel() == "azurerm_mssql_server" { + minTLSVersionAttr := resource.GetAttribute("minimum_tls_version") + minTLSVersionVal = minTLSVersionAttr.AsStringValueOrDefault("", resource) + + publicAccessAttr := resource.GetAttribute("public_network_access_enabled") + publicAccessVal = publicAccessAttr.AsBoolValueOrDefault(true, resource) + + } + + alertPolicyBlocks := module.GetReferencingResources(resource, "azurerm_mssql_server_security_alert_policy", "server_name") + for _, alertBlock := range alertPolicyBlocks { + a.alertPolicyIDs.Resolve(alertBlock.ID()) + alertPolicies = append(alertPolicies, adaptMSSQLSecurityAlertPolicy(alertBlock)) + } + + auditingPoliciesBlocks := module.GetReferencingResources(resource, "azurerm_mssql_server_extended_auditing_policy", "server_id") + if resource.HasChild("extended_auditing_policy") { + auditingPoliciesBlocks = append(auditingPoliciesBlocks, resource.GetBlocks("extended_auditing_policy")...) + } + + databasesRes := module.GetReferencingResources(resource, "azurerm_mssql_database", "server_id") + for _, databaseRes := range databasesRes { + dbAuditingBlocks := module.GetReferencingResources(databaseRes, "azurerm_mssql_database_extended_auditing_policy", "database_id") + auditingPoliciesBlocks = append(auditingPoliciesBlocks, dbAuditingBlocks...) + } + + for _, auditBlock := range auditingPoliciesBlocks { + a.auditingPolicyIDs.Resolve(auditBlock.ID()) + auditingPolicies = append(auditingPolicies, adaptMSSQLExtendedAuditingPolicy(auditBlock)) + } + + firewallRuleBlocks := module.GetReferencingResources(resource, "azurerm_sql_firewall_rule", "server_name") + firewallRuleBlocks = append(firewallRuleBlocks, module.GetReferencingResources(resource, "azurerm_mssql_firewall_rule", "server_id")...) + for _, firewallBlock := range firewallRuleBlocks { + a.firewallIDs.Resolve(firewallBlock.ID()) + firewallRules = append(firewallRules, adaptFirewallRule(firewallBlock)) + } + + return database.MSSQLServer{ + Metadata: resource.GetMetadata(), + Server: database.Server{ + Metadata: resource.GetMetadata(), + EnableSSLEnforcement: enableSSLEnforcementVal, + MinimumTLSVersion: minTLSVersionVal, + EnablePublicNetworkAccess: publicAccessVal, + FirewallRules: firewallRules, + }, + ExtendedAuditingPolicies: auditingPolicies, + SecurityAlertPolicies: alertPolicies, + } +} + +func (a *mysqlAdapter) adaptMySQLServer(resource *terraform.Block, module *terraform.Module) database.MySQLServer { + var firewallRules []database.FirewallRule + + enableSSLEnforcementAttr := resource.GetAttribute("ssl_enforcement_enabled") + enableSSLEnforcementVal := enableSSLEnforcementAttr.AsBoolValueOrDefault(false, resource) + + minTLSVersionAttr := resource.GetAttribute("ssl_minimal_tls_version_enforced") + minTLSVersionVal := minTLSVersionAttr.AsStringValueOrDefault("TLSEnforcementDisabled", resource) + + publicAccessAttr := resource.GetAttribute("public_network_access_enabled") + publicAccessVal := publicAccessAttr.AsBoolValueOrDefault(true, resource) + + firewallRuleBlocks := module.GetReferencingResources(resource, "azurerm_mysql_firewall_rule", "server_name") + for _, firewallBlock := range firewallRuleBlocks { + a.firewallIDs.Resolve(firewallBlock.ID()) + firewallRules = append(firewallRules, adaptFirewallRule(firewallBlock)) + } + + return database.MySQLServer{ + Metadata: resource.GetMetadata(), + Server: database.Server{ + Metadata: resource.GetMetadata(), + EnableSSLEnforcement: enableSSLEnforcementVal, + MinimumTLSVersion: minTLSVersionVal, + EnablePublicNetworkAccess: publicAccessVal, + FirewallRules: firewallRules, + }, + } +} + +func (a *mariaDBAdapter) adaptMariaDBServer(resource *terraform.Block, module *terraform.Module) database.MariaDBServer { + var firewallRules []database.FirewallRule + + enableSSLEnforcementAttr := resource.GetAttribute("ssl_enforcement_enabled") + enableSSLEnforcementVal := enableSSLEnforcementAttr.AsBoolValueOrDefault(false, resource) + + publicAccessAttr := resource.GetAttribute("public_network_access_enabled") + publicAccessVal := publicAccessAttr.AsBoolValueOrDefault(true, resource) + + firewallRuleBlocks := module.GetReferencingResources(resource, "azurerm_mariadb_firewall_rule", "server_name") + for _, firewallBlock := range firewallRuleBlocks { + a.firewallIDs.Resolve(firewallBlock.ID()) + firewallRules = append(firewallRules, adaptFirewallRule(firewallBlock)) + } + + return database.MariaDBServer{ + Metadata: resource.GetMetadata(), + Server: database.Server{ + Metadata: resource.GetMetadata(), + EnableSSLEnforcement: enableSSLEnforcementVal, + MinimumTLSVersion: defsecTypes.StringDefault("", resource.GetMetadata()), + EnablePublicNetworkAccess: publicAccessVal, + FirewallRules: firewallRules, + }, + } +} + +func (a *postgresqlAdapter) adaptPostgreSQLServer(resource *terraform.Block, module *terraform.Module) database.PostgreSQLServer { + var firewallRules []database.FirewallRule + + enableSSLEnforcementAttr := resource.GetAttribute("ssl_enforcement_enabled") + enableSSLEnforcementVal := enableSSLEnforcementAttr.AsBoolValueOrDefault(false, resource) + + minTLSVersionAttr := resource.GetAttribute("ssl_minimal_tls_version_enforced") + minTLSVersionVal := minTLSVersionAttr.AsStringValueOrDefault("TLSEnforcementDisabled", resource) + + publicAccessAttr := resource.GetAttribute("public_network_access_enabled") + publicAccessVal := publicAccessAttr.AsBoolValueOrDefault(true, resource) + + firewallRuleBlocks := module.GetReferencingResources(resource, "azurerm_postgresql_firewall_rule", "server_name") + for _, firewallBlock := range firewallRuleBlocks { + a.firewallIDs.Resolve(firewallBlock.ID()) + firewallRules = append(firewallRules, adaptFirewallRule(firewallBlock)) + } + + configBlocks := module.GetReferencingResources(resource, "azurerm_postgresql_configuration", "server_name") + config := adaptPostgreSQLConfig(resource, configBlocks) + + return database.PostgreSQLServer{ + Metadata: resource.GetMetadata(), + Server: database.Server{ + Metadata: resource.GetMetadata(), + EnableSSLEnforcement: enableSSLEnforcementVal, + MinimumTLSVersion: minTLSVersionVal, + EnablePublicNetworkAccess: publicAccessVal, + FirewallRules: firewallRules, + }, + Config: config, + } +} + +func adaptPostgreSQLConfig(resource *terraform.Block, configBlocks []*terraform.Block) database.PostgresSQLConfig { + config := database.PostgresSQLConfig{ + Metadata: resource.GetMetadata(), + LogCheckpoints: defsecTypes.BoolDefault(false, resource.GetMetadata()), + ConnectionThrottling: defsecTypes.BoolDefault(false, resource.GetMetadata()), + LogConnections: defsecTypes.BoolDefault(false, resource.GetMetadata()), + } + + for _, configBlock := range configBlocks { + + nameAttr := configBlock.GetAttribute("name") + valAttr := configBlock.GetAttribute("value") + + if nameAttr.Equals("log_checkpoints") { + config.LogCheckpoints = defsecTypes.Bool(valAttr.Equals("on"), valAttr.GetMetadata()) + } + if nameAttr.Equals("connection_throttling") { + config.ConnectionThrottling = defsecTypes.Bool(valAttr.Equals("on"), valAttr.GetMetadata()) + } + if nameAttr.Equals("log_connections") { + config.LogConnections = defsecTypes.Bool(valAttr.Equals("on"), valAttr.GetMetadata()) + } + } + + return config +} + +func adaptMSSQLSecurityAlertPolicy(resource *terraform.Block) database.SecurityAlertPolicy { + + emailAddressesAttr := resource.GetAttribute("email_addresses") + disabledAlertsAttr := resource.GetAttribute("disabled_alerts") + + emailAccountAdminsAttr := resource.GetAttribute("email_account_admins") + emailAccountAdminsVal := emailAccountAdminsAttr.AsBoolValueOrDefault(false, resource) + + return database.SecurityAlertPolicy{ + Metadata: resource.GetMetadata(), + EmailAddresses: emailAddressesAttr.AsStringValues(), + DisabledAlerts: disabledAlertsAttr.AsStringValues(), + EmailAccountAdmins: emailAccountAdminsVal, + } +} + +func adaptFirewallRule(resource *terraform.Block) database.FirewallRule { + startIPAttr := resource.GetAttribute("start_ip_address") + startIPVal := startIPAttr.AsStringValueOrDefault("", resource) + + endIPAttr := resource.GetAttribute("end_ip_address") + endIPVal := endIPAttr.AsStringValueOrDefault("", resource) + + return database.FirewallRule{ + Metadata: resource.GetMetadata(), + StartIP: startIPVal, + EndIP: endIPVal, + } +} + +func adaptMSSQLExtendedAuditingPolicy(resource *terraform.Block) database.ExtendedAuditingPolicy { + retentionInDaysAttr := resource.GetAttribute("retention_in_days") + retentionInDaysVal := retentionInDaysAttr.AsIntValueOrDefault(0, resource) + + return database.ExtendedAuditingPolicy{ + Metadata: resource.GetMetadata(), + RetentionInDays: retentionInDaysVal, + } +} diff --git a/internal/adapters/terraform/azure/database/adapt_test.go b/internal/adapters/terraform/azure/database/adapt_test.go new file mode 100644 index 000000000000..4cf2440892fe --- /dev/null +++ b/internal/adapters/terraform/azure/database/adapt_test.go @@ -0,0 +1,454 @@ +package database + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/pkg/providers/azure/database" + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected database.Database + }{ + { + name: "postgresql", + terraform: ` + resource "azurerm_postgresql_server" "example" { + name = "example" + + public_network_access_enabled = true + ssl_enforcement_enabled = true + ssl_minimal_tls_version_enforced = "TLS1_2" + } + + resource "azurerm_postgresql_configuration" "example" { + name = "log_connections" + resource_group_name = azurerm_resource_group.example.name + server_name = azurerm_postgresql_server.example.name + value = "on" + } + + resource "azurerm_postgresql_configuration" "example" { + name = "log_checkpoints" + resource_group_name = azurerm_resource_group.example.name + server_name = azurerm_postgresql_server.example.name + value = "on" + } + + resource "azurerm_postgresql_configuration" "example" { + name = "connection_throttling" + resource_group_name = azurerm_resource_group.example.name + server_name = azurerm_postgresql_server.example.name + value = "on" + } + + resource "azurerm_postgresql_firewall_rule" "example" { + name = "office" + resource_group_name = azurerm_resource_group.example.name + server_name = azurerm_postgresql_server.example.name + start_ip_address = "40.112.8.12" + end_ip_address = "40.112.8.12" + } +`, + expected: database.Database{ + PostgreSQLServers: []database.PostgreSQLServer{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableSSLEnforcement: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.String("TLS1_2", defsecTypes.NewTestMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + FirewallRules: []database.FirewallRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + StartIP: defsecTypes.String("40.112.8.12", defsecTypes.NewTestMisconfigMetadata()), + EndIP: defsecTypes.String("40.112.8.12", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + Config: database.PostgresSQLConfig{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + LogConnections: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + LogCheckpoints: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + ConnectionThrottling: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + { + name: "mariadb", + terraform: ` + resource "azurerm_mariadb_server" "example" { + name = "example-mariadb-server" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + + public_network_access_enabled = false + ssl_enforcement_enabled = true + } + + resource "azurerm_mariadb_firewall_rule" "example" { + name = "test-rule" + server_name = azurerm_mariadb_server.example.name + start_ip_address = "40.112.0.0" + end_ip_address = "40.112.255.255" + } +`, + expected: database.Database{ + MariaDBServers: []database.MariaDBServer{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableSSLEnforcement: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + FirewallRules: []database.FirewallRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + StartIP: defsecTypes.String("40.112.0.0", defsecTypes.NewTestMisconfigMetadata()), + EndIP: defsecTypes.String("40.112.255.255", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + }, + { + name: "mysql", + terraform: ` + resource "azurerm_mysql_server" "example" { + public_network_access_enabled = true + ssl_enforcement_enabled = true + ssl_minimal_tls_version_enforced = "TLS1_2" + } + + resource "azurerm_mysql_firewall_rule" "example" { + server_name = azurerm_mysql_server.example.name + start_ip_address = "40.112.8.12" + end_ip_address = "40.112.8.12" + } + `, + expected: database.Database{ + MySQLServers: []database.MySQLServer{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableSSLEnforcement: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.String("TLS1_2", defsecTypes.NewTestMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + FirewallRules: []database.FirewallRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + StartIP: defsecTypes.String("40.112.8.12", defsecTypes.NewTestMisconfigMetadata()), + EndIP: defsecTypes.String("40.112.8.12", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + }, + { + name: "ms sql", + terraform: ` + resource "azurerm_mssql_server" "example" { + name = "mssqlserver" + minimum_tls_version = "1.2" + public_network_access_enabled = false + } + + resource "azurerm_mssql_firewall_rule" "example" { + name = "FirewallRule1" + server_id = azurerm_mssql_server.example.id + start_ip_address = "10.0.17.62" + end_ip_address = "10.0.17.62" + } + + resource "azurerm_mssql_server_security_alert_policy" "example" { + resource_group_name = azurerm_resource_group.example.name + server_name = azurerm_mssql_server.example.name + disabled_alerts = [ + "Sql_Injection", + "Data_Exfiltration" + ] + email_account_admins = true + email_addresses = [ + "example@example.com" + ] + } + + resource "azurerm_mssql_server_extended_auditing_policy" "example" { + server_id = azurerm_mssql_server.example.id + retention_in_days = 6 + } + `, + expected: database.Database{ + MSSQLServers: []database.MSSQLServer{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Server: database.Server{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + MinimumTLSVersion: defsecTypes.String("1.2", defsecTypes.NewTestMisconfigMetadata()), + EnablePublicNetworkAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + EnableSSLEnforcement: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + FirewallRules: []database.FirewallRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + StartIP: defsecTypes.String("10.0.17.62", defsecTypes.NewTestMisconfigMetadata()), + EndIP: defsecTypes.String("10.0.17.62", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + ExtendedAuditingPolicies: []database.ExtendedAuditingPolicy{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + RetentionInDays: defsecTypes.Int(6, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + SecurityAlertPolicies: []database.SecurityAlertPolicy{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EmailAddresses: []defsecTypes.StringValue{ + defsecTypes.String("example@example.com", defsecTypes.NewTestMisconfigMetadata()), + }, + DisabledAlerts: []defsecTypes.StringValue{ + defsecTypes.String("Sql_Injection", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("Data_Exfiltration", defsecTypes.NewTestMisconfigMetadata()), + }, + EmailAccountAdmins: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_postgresql_server" "example" { + public_network_access_enabled = true + ssl_enforcement_enabled = true + ssl_minimal_tls_version_enforced = "TLS1_2" + } + + resource "azurerm_postgresql_configuration" "example" { + name = "log_connections" + server_name = azurerm_postgresql_server.example.name + value = "on" + } + + resource "azurerm_postgresql_configuration" "example" { + name = "log_checkpoints" + server_name = azurerm_postgresql_server.example.name + value = "on" + } + + resource "azurerm_postgresql_configuration" "example" { + name = "connection_throttling" + server_name = azurerm_postgresql_server.example.name + value = "on" + } + + resource "azurerm_postgresql_firewall_rule" "example" { + name = "office" + server_name = azurerm_postgresql_server.example.name + start_ip_address = "40.112.8.12" + end_ip_address = "40.112.8.12" + } + + resource "azurerm_mariadb_server" "example" { + public_network_access_enabled = false + ssl_enforcement_enabled = true + } + + resource "azurerm_mariadb_firewall_rule" "example" { + name = "test-rule" + server_name = azurerm_mariadb_server.example.name + start_ip_address = "40.112.0.0" + end_ip_address = "40.112.255.255" + } + + resource "azurerm_mysql_server" "example" { + public_network_access_enabled = true + ssl_enforcement_enabled = true + ssl_minimal_tls_version_enforced = "TLS1_2" + } + + resource "azurerm_mysql_firewall_rule" "example" { + server_name = azurerm_mysql_server.example.name + start_ip_address = "40.112.8.12" + end_ip_address = "40.112.8.12" + } + + resource "azurerm_mssql_server" "example" { + name = "mssqlserver" + public_network_access_enabled = false + minimum_tls_version = "1.2" + } + + resource "azurerm_mssql_firewall_rule" "example" { + name = "FirewallRule1" + server_id = azurerm_mssql_server.example.id + start_ip_address = "10.0.17.62" + end_ip_address = "10.0.17.62" + } + + resource "azurerm_mssql_server_security_alert_policy" "example" { + server_name = azurerm_mssql_server.example.name + disabled_alerts = [ + "Sql_Injection", + "Data_Exfiltration" + ] + email_account_admins = true + email_addresses = [ + "example@example.com" + ] + } + + resource "azurerm_mssql_server_extended_auditing_policy" "example" { + server_id = azurerm_mssql_server.example.id + retention_in_days = 6 + } + ` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.PostgreSQLServers, 1) + require.Len(t, adapted.MariaDBServers, 1) + require.Len(t, adapted.MySQLServers, 1) + require.Len(t, adapted.MSSQLServers, 1) + + postgres := adapted.PostgreSQLServers[0] + mariadb := adapted.MariaDBServers[0] + mysql := adapted.MySQLServers[0] + mssql := adapted.MSSQLServers[0] + + assert.Equal(t, 2, postgres.Metadata.Range().GetStartLine()) + assert.Equal(t, 6, postgres.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, postgres.EnablePublicNetworkAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, postgres.EnablePublicNetworkAccess.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, postgres.EnableSSLEnforcement.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, postgres.EnableSSLEnforcement.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 5, postgres.MinimumTLSVersion.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, postgres.MinimumTLSVersion.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, postgres.Config.LogConnections.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, postgres.Config.LogConnections.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 17, postgres.Config.LogCheckpoints.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 17, postgres.Config.LogCheckpoints.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 23, postgres.Config.ConnectionThrottling.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 23, postgres.Config.ConnectionThrottling.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 26, postgres.FirewallRules[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 31, postgres.FirewallRules[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 29, postgres.FirewallRules[0].StartIP.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 29, postgres.FirewallRules[0].StartIP.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 30, postgres.FirewallRules[0].EndIP.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 30, postgres.FirewallRules[0].EndIP.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 33, mariadb.Metadata.Range().GetStartLine()) + assert.Equal(t, 36, mariadb.Metadata.Range().GetEndLine()) + + assert.Equal(t, 34, mariadb.EnablePublicNetworkAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 34, mariadb.EnablePublicNetworkAccess.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 35, mariadb.EnableSSLEnforcement.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 35, mariadb.EnableSSLEnforcement.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 38, mariadb.FirewallRules[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 43, mariadb.FirewallRules[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 41, mariadb.FirewallRules[0].StartIP.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 41, mariadb.FirewallRules[0].StartIP.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 42, mariadb.FirewallRules[0].EndIP.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 42, mariadb.FirewallRules[0].EndIP.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 45, mysql.Metadata.Range().GetStartLine()) + assert.Equal(t, 49, mysql.Metadata.Range().GetEndLine()) + + assert.Equal(t, 46, mysql.EnablePublicNetworkAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 46, mysql.EnablePublicNetworkAccess.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 47, mysql.EnableSSLEnforcement.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 47, mysql.EnableSSLEnforcement.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 48, mysql.MinimumTLSVersion.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 48, mysql.MinimumTLSVersion.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 51, mysql.FirewallRules[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 55, mysql.FirewallRules[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 53, mysql.FirewallRules[0].StartIP.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 53, mysql.FirewallRules[0].StartIP.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 54, mysql.FirewallRules[0].EndIP.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 54, mysql.FirewallRules[0].EndIP.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 57, mssql.Metadata.Range().GetStartLine()) + assert.Equal(t, 61, mssql.Metadata.Range().GetEndLine()) + + assert.Equal(t, 59, mssql.EnablePublicNetworkAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 59, mssql.EnablePublicNetworkAccess.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 60, mssql.MinimumTLSVersion.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 60, mssql.MinimumTLSVersion.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 63, mssql.FirewallRules[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 68, mssql.FirewallRules[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 66, mssql.FirewallRules[0].StartIP.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 66, mssql.FirewallRules[0].StartIP.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 67, mssql.FirewallRules[0].EndIP.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 67, mssql.FirewallRules[0].EndIP.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 70, mssql.SecurityAlertPolicies[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 80, mssql.SecurityAlertPolicies[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 72, mssql.SecurityAlertPolicies[0].DisabledAlerts[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 75, mssql.SecurityAlertPolicies[0].DisabledAlerts[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 76, mssql.SecurityAlertPolicies[0].EmailAccountAdmins.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 76, mssql.SecurityAlertPolicies[0].EmailAccountAdmins.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 77, mssql.SecurityAlertPolicies[0].EmailAddresses[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 79, mssql.SecurityAlertPolicies[0].EmailAddresses[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 82, mssql.ExtendedAuditingPolicies[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 85, mssql.ExtendedAuditingPolicies[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 84, mssql.ExtendedAuditingPolicies[0].RetentionInDays.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 84, mssql.ExtendedAuditingPolicies[0].RetentionInDays.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/azure/datafactory/adapt.go b/internal/adapters/terraform/azure/datafactory/adapt.go new file mode 100644 index 000000000000..332a8aaff6de --- /dev/null +++ b/internal/adapters/terraform/azure/datafactory/adapt.go @@ -0,0 +1,33 @@ +package datafactory + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/datafactory" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) datafactory.DataFactory { + return datafactory.DataFactory{ + DataFactories: adaptFactories(modules), + } +} + +func adaptFactories(modules terraform.Modules) []datafactory.Factory { + var factories []datafactory.Factory + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_data_factory") { + factories = append(factories, adaptFactory(resource)) + } + } + return factories +} + +func adaptFactory(resource *terraform.Block) datafactory.Factory { + enablePublicNetworkAttr := resource.GetAttribute("public_network_enabled") + enablePublicNetworkVal := enablePublicNetworkAttr.AsBoolValueOrDefault(true, resource) + + return datafactory.Factory{ + Metadata: resource.GetMetadata(), + EnablePublicNetwork: enablePublicNetworkVal, + } +} diff --git a/internal/adapters/terraform/azure/datafactory/adapt_test.go b/internal/adapters/terraform/azure/datafactory/adapt_test.go new file mode 100644 index 000000000000..a6fd6a02cfc0 --- /dev/null +++ b/internal/adapters/terraform/azure/datafactory/adapt_test.go @@ -0,0 +1,79 @@ +package datafactory + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/datafactory" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptFactory(t *testing.T) { + tests := []struct { + name string + terraform string + expected datafactory.Factory + }{ + { + name: "defined", + terraform: ` + resource "azurerm_data_factory" "example" { + name = "example" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + public_network_enabled = false + } +`, + expected: datafactory.Factory{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnablePublicNetwork: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "default", + terraform: ` + resource "azurerm_data_factory" "example" { + name = "example" + } +`, + expected: datafactory.Factory{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnablePublicNetwork: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptFactory(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_data_factory" "example" { + name = "example" + location = azurerm_resource_group.example.location + resource_group_name = azurerm_resource_group.example.name + public_network_enabled = false + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.DataFactories, 1) + dataFactory := adapted.DataFactories[0] + + assert.Equal(t, 6, dataFactory.EnablePublicNetwork.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, dataFactory.EnablePublicNetwork.GetMetadata().Range().GetEndLine()) + +} diff --git a/internal/adapters/terraform/azure/datalake/adapt.go b/internal/adapters/terraform/azure/datalake/adapt.go new file mode 100644 index 000000000000..beee218e24a0 --- /dev/null +++ b/internal/adapters/terraform/azure/datalake/adapt.go @@ -0,0 +1,38 @@ +package datalake + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/datalake" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) datalake.DataLake { + return datalake.DataLake{ + Stores: adaptStores(modules), + } +} + +func adaptStores(modules terraform.Modules) []datalake.Store { + var stores []datalake.Store + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_data_lake_store") { + stores = append(stores, adaptStore(resource)) + } + } + return stores +} + +func adaptStore(resource *terraform.Block) datalake.Store { + store := datalake.Store{ + Metadata: resource.GetMetadata(), + EnableEncryption: types.BoolDefault(true, resource.GetMetadata()), + } + encryptionStateAttr := resource.GetAttribute("encryption_state") + if encryptionStateAttr.Equals("Disabled") { + store.EnableEncryption = types.Bool(false, encryptionStateAttr.GetMetadata()) + } else if encryptionStateAttr.Equals("Enabled") { + store.EnableEncryption = types.Bool(true, encryptionStateAttr.GetMetadata()) + } + return store +} diff --git a/internal/adapters/terraform/azure/datalake/adapt_test.go b/internal/adapters/terraform/azure/datalake/adapt_test.go new file mode 100644 index 000000000000..d130a8543e6e --- /dev/null +++ b/internal/adapters/terraform/azure/datalake/adapt_test.go @@ -0,0 +1,83 @@ +package datalake + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/datalake" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptStore(t *testing.T) { + tests := []struct { + name string + terraform string + expected datalake.Store + }{ + { + name: "enabled", + terraform: ` + resource "azurerm_data_lake_store" "good_example" { + encryption_state = "Enabled" + } +`, + expected: datalake.Store{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableEncryption: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "disabled", + terraform: ` + resource "azurerm_data_lake_store" "good_example" { + encryption_state = "Disabled" + } +`, + expected: datalake.Store{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableEncryption: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "enabled by default", + terraform: ` + resource "azurerm_data_lake_store" "good_example" { + } +`, + expected: datalake.Store{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableEncryption: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptStore(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_data_lake_store" "good_example" { + encryption_state = "Disabled" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Stores, 1) + store := adapted.Stores[0] + + assert.Equal(t, 3, store.EnableEncryption.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, store.EnableEncryption.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/azure/keyvault/adapt.go b/internal/adapters/terraform/azure/keyvault/adapt.go new file mode 100644 index 000000000000..30e34f124775 --- /dev/null +++ b/internal/adapters/terraform/azure/keyvault/adapt.go @@ -0,0 +1,159 @@ +package keyvault + +import ( + "time" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/terraform" + + "github.com/aquasecurity/trivy/pkg/providers/azure/keyvault" +) + +func Adapt(modules terraform.Modules) keyvault.KeyVault { + adapter := adapter{ + vaultSecretIDs: modules.GetChildResourceIDMapByType("azurerm_key_vault_secret"), + vaultKeyIDs: modules.GetChildResourceIDMapByType("azurerm_key_vault_key"), + } + + return keyvault.KeyVault{ + Vaults: adapter.adaptVaults(modules), + } +} + +type adapter struct { + vaultSecretIDs terraform.ResourceIDResolutions + vaultKeyIDs terraform.ResourceIDResolutions +} + +func (a *adapter) adaptVaults(modules terraform.Modules) []keyvault.Vault { + + var vaults []keyvault.Vault + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_key_vault") { + vaults = append(vaults, a.adaptVault(resource, module)) + + } + } + + orphanResources := modules.GetResourceByIDs(a.vaultSecretIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := keyvault.Vault{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Secrets: nil, + Keys: nil, + EnablePurgeProtection: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + SoftDeleteRetentionDays: defsecTypes.IntDefault(0, defsecTypes.NewUnmanagedMisconfigMetadata()), + NetworkACLs: keyvault.NetworkACLs{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + DefaultAction: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + } + for _, secretResource := range orphanResources { + orphanage.Secrets = append(orphanage.Secrets, adaptSecret(secretResource)) + } + vaults = append(vaults, orphanage) + } + + orphanResources = modules.GetResourceByIDs(a.vaultKeyIDs.Orphans()...) + + if len(orphanResources) > 0 { + orphanage := keyvault.Vault{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Secrets: nil, + Keys: nil, + EnablePurgeProtection: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + SoftDeleteRetentionDays: defsecTypes.IntDefault(0, defsecTypes.NewUnmanagedMisconfigMetadata()), + NetworkACLs: keyvault.NetworkACLs{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + DefaultAction: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + } + for _, secretResource := range orphanResources { + orphanage.Keys = append(orphanage.Keys, adaptKey(secretResource)) + } + vaults = append(vaults, orphanage) + } + + return vaults +} + +func (a *adapter) adaptVault(resource *terraform.Block, module *terraform.Module) keyvault.Vault { + var keys []keyvault.Key + var secrets []keyvault.Secret + + defaultActionVal := defsecTypes.StringDefault("", resource.GetMetadata()) + + secretBlocks := module.GetReferencingResources(resource, "azurerm_key_vault_secret", "key_vault_id") + for _, secretBlock := range secretBlocks { + a.vaultSecretIDs.Resolve(secretBlock.ID()) + secrets = append(secrets, adaptSecret(secretBlock)) + } + + keyBlocks := module.GetReferencingResources(resource, "azurerm_key_vault_key", "key_vault_id") + for _, keyBlock := range keyBlocks { + a.vaultKeyIDs.Resolve(keyBlock.ID()) + keys = append(keys, adaptKey(keyBlock)) + } + + purgeProtectionAttr := resource.GetAttribute("purge_protection_enabled") + purgeProtectionVal := purgeProtectionAttr.AsBoolValueOrDefault(false, resource) + + softDeleteRetentionDaysAttr := resource.GetAttribute("soft_delete_retention_days") + softDeleteRetentionDaysVal := softDeleteRetentionDaysAttr.AsIntValueOrDefault(0, resource) + + aclMetadata := defsecTypes.NewUnmanagedMisconfigMetadata() + if aclBlock := resource.GetBlock("network_acls"); aclBlock.IsNotNil() { + aclMetadata = aclBlock.GetMetadata() + defaultActionAttr := aclBlock.GetAttribute("default_action") + defaultActionVal = defaultActionAttr.AsStringValueOrDefault("", resource.GetBlock("network_acls")) + } + + return keyvault.Vault{ + Metadata: resource.GetMetadata(), + Secrets: secrets, + Keys: keys, + EnablePurgeProtection: purgeProtectionVal, + SoftDeleteRetentionDays: softDeleteRetentionDaysVal, + NetworkACLs: keyvault.NetworkACLs{ + Metadata: aclMetadata, + DefaultAction: defaultActionVal, + }, + } +} + +func adaptSecret(resource *terraform.Block) keyvault.Secret { + contentTypeAttr := resource.GetAttribute("content_type") + contentTypeVal := contentTypeAttr.AsStringValueOrDefault("", resource) + + return keyvault.Secret{ + Metadata: resource.GetMetadata(), + ContentType: contentTypeVal, + ExpiryDate: resolveExpiryDate(resource), + } +} + +func adaptKey(resource *terraform.Block) keyvault.Key { + + return keyvault.Key{ + Metadata: resource.GetMetadata(), + ExpiryDate: resolveExpiryDate(resource), + } +} + +func resolveExpiryDate(resource *terraform.Block) defsecTypes.TimeValue { + expiryDateAttr := resource.GetAttribute("expiration_date") + expiryDateVal := defsecTypes.TimeDefault(time.Time{}, resource.GetMetadata()) + + if expiryDateAttr.IsString() { + expiryDateString := expiryDateAttr.Value().AsString() + if expiryDate, err := time.Parse(time.RFC3339, expiryDateString); err == nil { + expiryDateVal = defsecTypes.Time(expiryDate, expiryDateAttr.GetMetadata()) + } + } else if expiryDateAttr.IsNotNil() { + expiryDateVal = defsecTypes.TimeUnresolvable(expiryDateAttr.GetMetadata()) + } + + return expiryDateVal +} diff --git a/internal/adapters/terraform/azure/keyvault/adapt_test.go b/internal/adapters/terraform/azure/keyvault/adapt_test.go new file mode 100644 index 000000000000..fbf07a5432ce --- /dev/null +++ b/internal/adapters/terraform/azure/keyvault/adapt_test.go @@ -0,0 +1,271 @@ +package keyvault + +import ( + "testing" + "time" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/keyvault" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected keyvault.KeyVault + }{ + { + name: "defined", + terraform: ` + resource "azurerm_key_vault" "example" { + name = "examplekeyvault" + enabled_for_disk_encryption = true + soft_delete_retention_days = 7 + purge_protection_enabled = true + + network_acls { + bypass = "AzureServices" + default_action = "Deny" + } + } +`, + expected: keyvault.KeyVault{ + Vaults: []keyvault.Vault{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnablePurgeProtection: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + SoftDeleteRetentionDays: defsecTypes.Int(7, defsecTypes.NewTestMisconfigMetadata()), + NetworkACLs: keyvault.NetworkACLs{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + DefaultAction: defsecTypes.String("Deny", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "azurerm_key_vault" "example" { + } +`, + expected: keyvault.KeyVault{ + Vaults: []keyvault.Vault{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnablePurgeProtection: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + SoftDeleteRetentionDays: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + NetworkACLs: keyvault.NetworkACLs{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + DefaultAction: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptSecret(t *testing.T) { + tests := []struct { + name string + terraform string + expected keyvault.Secret + }{ + { + name: "defaults", + terraform: ` + resource "azurerm_key_vault_secret" "example" { + } +`, + expected: keyvault.Secret{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ContentType: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + ExpiryDate: defsecTypes.Time(time.Time{}, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defined", + terraform: ` + resource "azurerm_key_vault_secret" "example" { + content_type = "password" + expiration_date = "1982-12-31T00:00:00Z" + } +`, + expected: keyvault.Secret{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ContentType: defsecTypes.String("password", defsecTypes.NewTestMisconfigMetadata()), + ExpiryDate: defsecTypes.Time(func(timeVal string) time.Time { + parsed, _ := time.Parse(time.RFC3339, timeVal) + return parsed + }("1982-12-31T00:00:00Z"), defsecTypes.NewTestMisconfigMetadata())}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptSecret(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptKey(t *testing.T) { + tests := []struct { + name string + terraform string + expected keyvault.Key + }{ + { + name: "defined", + terraform: ` + resource "azurerm_key_vault_key" "example" { + name = "generated-certificate" + expiration_date = "1982-12-31T00:00:00Z" + } +`, + expected: keyvault.Key{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ExpiryDate: defsecTypes.Time(func(timeVal string) time.Time { + parsed, _ := time.Parse(time.RFC3339, timeVal) + return parsed + }("1982-12-31T00:00:00Z"), defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "azurerm_key_vault_key" "example" { + } +`, + expected: keyvault.Key{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ExpiryDate: defsecTypes.Time(time.Time{}, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "expiration date refers to the resource", + terraform: ` +terraform { + required_version = ">=1.3.0" + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = ">=3.0.0" + } + time = { + source = "hashicorp/time" + version = ">=0.9.0" + } + } +} + +resource "azurerm_key_vault" "this" { + name = "keyvault" + location = "us-west" + resource_group_name = "resource-group" + tenant_id = "tenant-id" + sku_name = "Standard" +} + +resource "time_offset" "expiry" { + offset_years = 1 + base_rfc3339 = "YYYY-MM-DDTHH:MM:SSZ" +} + +resource "azurerm_key_vault_key" "this" { + name = "key" + key_vault_id = azurerm_key_vault.this.id + key_type = "RSA" + key_size = 2048 + key_opts = ["decrypt", "encrypt", "sign", "unwrapKey", "verify", "wrapKey"] + expiration_date = time_offset.expiry.rfc3339 +} +`, + expected: keyvault.Key{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ExpiryDate: defsecTypes.TimeUnresolvable(defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptKey(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_key_vault" "example" { + name = "examplekeyvault" + enabled_for_disk_encryption = true + soft_delete_retention_days = 7 + purge_protection_enabled = true + + network_acls { + bypass = "AzureServices" + default_action = "Deny" + } + } + + resource "azurerm_key_vault_key" "example" { + key_vault_id = azurerm_key_vault.example.id + name = "generated-certificate" + expiration_date = "1982-12-31T00:00:00Z" + } + + resource "azurerm_key_vault_secret" "example" { + key_vault_id = azurerm_key_vault.example.id + content_type = "password" + expiration_date = "1982-12-31T00:00:00Z" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Vaults, 1) + require.Len(t, adapted.Vaults[0].Keys, 1) + require.Len(t, adapted.Vaults[0].Secrets, 1) + + vault := adapted.Vaults[0] + key := vault.Keys[0] + secret := vault.Secrets[0] + + assert.Equal(t, 5, vault.SoftDeleteRetentionDays.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, vault.SoftDeleteRetentionDays.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, vault.EnablePurgeProtection.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, vault.EnablePurgeProtection.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 10, vault.NetworkACLs.DefaultAction.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, vault.NetworkACLs.DefaultAction.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 17, key.ExpiryDate.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 17, key.ExpiryDate.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 22, secret.ContentType.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 22, secret.ContentType.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 23, secret.ExpiryDate.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 23, secret.ExpiryDate.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/azure/monitor/adapt.go b/internal/adapters/terraform/azure/monitor/adapt.go new file mode 100644 index 000000000000..7bbd86abc721 --- /dev/null +++ b/internal/adapters/terraform/azure/monitor/adapt.go @@ -0,0 +1,56 @@ +package monitor + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/monitor" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) monitor.Monitor { + return monitor.Monitor{ + LogProfiles: adaptLogProfiles(modules), + } +} + +func adaptLogProfiles(modules terraform.Modules) []monitor.LogProfile { + var logProfiles []monitor.LogProfile + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_monitor_log_profile") { + logProfiles = append(logProfiles, adaptLogProfile(resource)) + } + } + return logProfiles +} + +func adaptLogProfile(resource *terraform.Block) monitor.LogProfile { + + logProfile := monitor.LogProfile{ + Metadata: resource.GetMetadata(), + RetentionPolicy: monitor.RetentionPolicy{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + Days: defsecTypes.IntDefault(0, resource.GetMetadata()), + }, + Categories: nil, + Locations: nil, + } + + if retentionPolicyBlock := resource.GetBlock("retention_policy"); retentionPolicyBlock.IsNotNil() { + logProfile.RetentionPolicy.Metadata = retentionPolicyBlock.GetMetadata() + enabledAttr := retentionPolicyBlock.GetAttribute("enabled") + logProfile.RetentionPolicy.Enabled = enabledAttr.AsBoolValueOrDefault(false, resource) + daysAttr := retentionPolicyBlock.GetAttribute("days") + logProfile.RetentionPolicy.Days = daysAttr.AsIntValueOrDefault(0, resource) + } + + if categoriesAttr := resource.GetAttribute("categories"); categoriesAttr.IsNotNil() { + logProfile.Categories = categoriesAttr.AsStringValues() + } + + if locationsAttr := resource.GetAttribute("locations"); locationsAttr.IsNotNil() { + logProfile.Locations = locationsAttr.AsStringValues() + } + + return logProfile +} diff --git a/internal/adapters/terraform/azure/monitor/adapt_test.go b/internal/adapters/terraform/azure/monitor/adapt_test.go new file mode 100644 index 000000000000..3af741edaf9c --- /dev/null +++ b/internal/adapters/terraform/azure/monitor/adapt_test.go @@ -0,0 +1,128 @@ +package monitor + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/monitor" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptLogProfile(t *testing.T) { + tests := []struct { + name string + terraform string + expected monitor.LogProfile + }{ + { + name: "defined", + terraform: ` + resource "azurerm_monitor_log_profile" "example" { + categories = [ + "Action", + "Delete", + "Write", + ] + + retention_policy { + enabled = true + days = 365 + } + + locations = [ + "eastus", + "eastus2", + "southcentralus" + ] + } +`, + expected: monitor.LogProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Categories: []defsecTypes.StringValue{ + defsecTypes.String("Action", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("Delete", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("Write", defsecTypes.NewTestMisconfigMetadata()), + }, + RetentionPolicy: monitor.RetentionPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Days: defsecTypes.Int(365, defsecTypes.NewTestMisconfigMetadata()), + }, + Locations: []defsecTypes.StringValue{ + defsecTypes.String("eastus", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("eastus2", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("southcentralus", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "default", + terraform: ` + resource "azurerm_monitor_log_profile" "example" { + } +`, + expected: monitor.LogProfile{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + RetentionPolicy: monitor.RetentionPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Days: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptLogProfile(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_monitor_log_profile" "example" { + categories = [ + "Action", + "Delete", + "Write", + ] + + retention_policy { + enabled = true + days = 365 + } + + locations = [ + "eastus", + "eastus2", + "southcentralus" + ] + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.LogProfiles, 1) + logProfile := adapted.LogProfiles[0] + + assert.Equal(t, 3, logProfile.Categories[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, logProfile.Categories[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 10, logProfile.RetentionPolicy.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, logProfile.RetentionPolicy.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, logProfile.RetentionPolicy.Days.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, logProfile.RetentionPolicy.Days.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 14, logProfile.Locations[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 18, logProfile.Locations[0].GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/azure/network/adapt.go b/internal/adapters/terraform/azure/network/adapt.go new file mode 100644 index 000000000000..f48e951fb71e --- /dev/null +++ b/internal/adapters/terraform/azure/network/adapt.go @@ -0,0 +1,220 @@ +package network + +import ( + "strconv" + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/terraform" + + "github.com/aquasecurity/trivy/pkg/providers/azure/network" + + "github.com/google/uuid" +) + +func Adapt(modules terraform.Modules) network.Network { + return network.Network{ + SecurityGroups: (&adapter{ + modules: modules, + groups: make(map[string]network.SecurityGroup), + }).adaptSecurityGroups(), + NetworkWatcherFlowLogs: adaptWatcherLogs(modules), + } +} + +type adapter struct { + modules terraform.Modules + groups map[string]network.SecurityGroup +} + +func (a *adapter) adaptSecurityGroups() []network.SecurityGroup { + + for _, module := range a.modules { + for _, resource := range module.GetResourcesByType("azurerm_network_security_group") { + a.adaptSecurityGroup(resource) + } + } + + for _, ruleBlock := range a.modules.GetResourcesByType("azurerm_network_security_rule") { + rule := a.adaptSGRule(ruleBlock) + + groupAttr := ruleBlock.GetAttribute("network_security_group_name") + if groupAttr.IsNotNil() { + if referencedBlock, err := a.modules.GetReferencedBlock(groupAttr, ruleBlock); err == nil { + if group, ok := a.groups[referencedBlock.ID()]; ok { + group.Rules = append(group.Rules, rule) + a.groups[referencedBlock.ID()] = group + continue + } + } + + } + + a.groups[uuid.NewString()] = network.SecurityGroup{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Rules: []network.SecurityGroupRule{rule}, + } + } + + var securityGroups []network.SecurityGroup + for _, group := range a.groups { + securityGroups = append(securityGroups, group) + } + + return securityGroups +} + +func adaptWatcherLogs(modules terraform.Modules) []network.NetworkWatcherFlowLog { + var watcherLogs []network.NetworkWatcherFlowLog + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_network_watcher_flow_log") { + watcherLogs = append(watcherLogs, adaptWatcherLog(resource)) + } + } + return watcherLogs +} + +func (a *adapter) adaptSecurityGroup(resource *terraform.Block) { + var rules []network.SecurityGroupRule + for _, ruleBlock := range resource.GetBlocks("security_rule") { + rules = append(rules, a.adaptSGRule(ruleBlock)) + } + a.groups[resource.ID()] = network.SecurityGroup{ + Metadata: resource.GetMetadata(), + Rules: rules, + } +} + +func (a *adapter) adaptSGRule(ruleBlock *terraform.Block) network.SecurityGroupRule { + + rule := network.SecurityGroupRule{ + Metadata: ruleBlock.GetMetadata(), + Outbound: defsecTypes.BoolDefault(false, ruleBlock.GetMetadata()), + Allow: defsecTypes.BoolDefault(true, ruleBlock.GetMetadata()), + SourceAddresses: nil, + SourcePorts: nil, + DestinationAddresses: nil, + DestinationPorts: nil, + Protocol: ruleBlock.GetAttribute("protocol").AsStringValueOrDefault("", ruleBlock), + } + + accessAttr := ruleBlock.GetAttribute("access") + if accessAttr.Equals("Allow") { + rule.Allow = defsecTypes.Bool(true, accessAttr.GetMetadata()) + } else if accessAttr.Equals("Deny") { + rule.Allow = defsecTypes.Bool(false, accessAttr.GetMetadata()) + } + + directionAttr := ruleBlock.GetAttribute("direction") + if directionAttr.Equals("Inbound") { + rule.Outbound = defsecTypes.Bool(false, directionAttr.GetMetadata()) + } else if directionAttr.Equals("Outbound") { + rule.Outbound = defsecTypes.Bool(true, directionAttr.GetMetadata()) + } + + a.adaptSource(ruleBlock, &rule) + a.adaptDestination(ruleBlock, &rule) + + return rule +} + +func (a *adapter) adaptSource(ruleBlock *terraform.Block, rule *network.SecurityGroupRule) { + if sourceAddressAttr := ruleBlock.GetAttribute("source_address_prefix"); sourceAddressAttr.IsString() { + rule.SourceAddresses = append(rule.SourceAddresses, sourceAddressAttr.AsStringValueOrDefault("", ruleBlock)) + } else if sourceAddressPrefixesAttr := ruleBlock.GetAttribute("source_address_prefixes"); sourceAddressPrefixesAttr.IsNotNil() { + rule.SourceAddresses = append(rule.SourceAddresses, sourceAddressPrefixesAttr.AsStringValues()...) + } + + if sourcePortRangesAttr := ruleBlock.GetAttribute("source_port_ranges"); sourcePortRangesAttr.IsNotNil() { + ports := sourcePortRangesAttr.AsStringValues() + for _, value := range ports { + rule.SourcePorts = append(rule.SourcePorts, expandRange(value.Value(), value.GetMetadata())) + } + } else if sourcePortRangeAttr := ruleBlock.GetAttribute("source_port_range"); sourcePortRangeAttr.IsString() { + rule.SourcePorts = append(rule.SourcePorts, expandRange(sourcePortRangeAttr.Value().AsString(), sourcePortRangeAttr.GetMetadata())) + } else if sourcePortRangeAttr := ruleBlock.GetAttribute("source_port_range"); sourcePortRangeAttr.IsNumber() { + f := sourcePortRangeAttr.AsNumber() + rule.SourcePorts = append(rule.SourcePorts, network.PortRange{ + Metadata: sourcePortRangeAttr.GetMetadata(), + Start: int(f), + End: int(f), + }) + } +} + +func (a *adapter) adaptDestination(ruleBlock *terraform.Block, rule *network.SecurityGroupRule) { + if destAddressAttr := ruleBlock.GetAttribute("destination_address_prefix"); destAddressAttr.IsString() { + rule.DestinationAddresses = append(rule.DestinationAddresses, destAddressAttr.AsStringValueOrDefault("", ruleBlock)) + } else if destAddressPrefixesAttr := ruleBlock.GetAttribute("destination_address_prefixes"); destAddressPrefixesAttr.IsNotNil() { + rule.DestinationAddresses = append(rule.DestinationAddresses, destAddressPrefixesAttr.AsStringValues()...) + } + + if destPortRangesAttr := ruleBlock.GetAttribute("destination_port_ranges"); destPortRangesAttr.IsNotNil() { + ports := destPortRangesAttr.AsStringValues() + for _, value := range ports { + rule.DestinationPorts = append(rule.DestinationPorts, expandRange(value.Value(), destPortRangesAttr.GetMetadata())) + } + } else if destPortRangeAttr := ruleBlock.GetAttribute("destination_port_range"); destPortRangeAttr.IsString() { + rule.DestinationPorts = append(rule.DestinationPorts, expandRange(destPortRangeAttr.Value().AsString(), destPortRangeAttr.GetMetadata())) + } else if destPortRangeAttr := ruleBlock.GetAttribute("destination_port_range"); destPortRangeAttr.IsNumber() { + f := destPortRangeAttr.AsNumber() + rule.DestinationPorts = append(rule.DestinationPorts, network.PortRange{ + Metadata: destPortRangeAttr.GetMetadata(), + Start: int(f), + End: int(f), + }) + } +} + +func expandRange(r string, m defsecTypes.MisconfigMetadata) network.PortRange { + start := 0 + end := 65535 + switch { + case r == "*": + case strings.Contains(r, "-"): + if parts := strings.Split(r, "-"); len(parts) == 2 { + if p1, err := strconv.ParseInt(parts[0], 10, 32); err == nil { + start = int(p1) + } + if p2, err := strconv.ParseInt(parts[1], 10, 32); err == nil { + end = int(p2) + } + } + default: + if val, err := strconv.ParseInt(r, 10, 32); err == nil { + start = int(val) + end = int(val) + } + } + + return network.PortRange{ + Metadata: m, + Start: start, + End: end, + } +} + +func adaptWatcherLog(resource *terraform.Block) network.NetworkWatcherFlowLog { + flowLog := network.NetworkWatcherFlowLog{ + Metadata: resource.GetMetadata(), + RetentionPolicy: network.RetentionPolicy{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + Days: defsecTypes.IntDefault(0, resource.GetMetadata()), + }, + } + + if retentionPolicyBlock := resource.GetBlock("retention_policy"); retentionPolicyBlock.IsNotNil() { + flowLog.RetentionPolicy.Metadata = retentionPolicyBlock.GetMetadata() + + enabledAttr := retentionPolicyBlock.GetAttribute("enabled") + flowLog.RetentionPolicy.Enabled = enabledAttr.AsBoolValueOrDefault(false, retentionPolicyBlock) + + daysAttr := retentionPolicyBlock.GetAttribute("days") + flowLog.RetentionPolicy.Days = daysAttr.AsIntValueOrDefault(0, retentionPolicyBlock) + } + + return flowLog +} diff --git a/internal/adapters/terraform/azure/network/adapt_test.go b/internal/adapters/terraform/azure/network/adapt_test.go new file mode 100644 index 000000000000..fed2d2bc0c06 --- /dev/null +++ b/internal/adapters/terraform/azure/network/adapt_test.go @@ -0,0 +1,262 @@ +package network + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/network" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected network.Network + }{ + { + name: "defined", + terraform: ` + resource "azurerm_network_security_rule" "example" { + name = "example_security_rule" + network_security_group_name = azurerm_network_security_group.example.name + direction = "Inbound" + access = "Allow" + protocol = "TCP" + source_port_range = "*" + destination_port_ranges = ["3389"] + source_address_prefix = "4.53.160.75" + destination_address_prefix = "*" + } + + resource "azurerm_network_security_group" "example" { + name = "tf-appsecuritygroup" + } + + resource "azurerm_network_watcher_flow_log" "example" { + resource_group_name = azurerm_resource_group.example.name + name = "example-log" + + retention_policy { + enabled = true + days = 7 + } + } +`, + expected: network.Network{ + SecurityGroups: []network.SecurityGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Rules: []network.SecurityGroupRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Outbound: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Allow: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + SourceAddresses: []defsecTypes.StringValue{ + defsecTypes.String("4.53.160.75", defsecTypes.NewTestMisconfigMetadata()), + }, + DestinationAddresses: []defsecTypes.StringValue{ + defsecTypes.String("*", defsecTypes.NewTestMisconfigMetadata()), + }, + SourcePorts: []network.PortRange{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Start: 0, + End: 65535, + }, + }, + DestinationPorts: []network.PortRange{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Start: 3389, + End: 3389, + }, + }, + Protocol: defsecTypes.String("TCP", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + NetworkWatcherFlowLogs: []network.NetworkWatcherFlowLog{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + RetentionPolicy: network.RetentionPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Days: defsecTypes.Int(7, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "azurerm_network_security_group" "example" { + name = "tf-appsecuritygroup" + security_rule { + } + } +`, + expected: network.Network{ + SecurityGroups: []network.SecurityGroup{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Rules: []network.SecurityGroupRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Outbound: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Allow: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Protocol: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptWatcherLog(t *testing.T) { + tests := []struct { + name string + terraform string + expected network.NetworkWatcherFlowLog + }{ + { + name: "defined", + terraform: ` + resource "azurerm_network_watcher_flow_log" "watcher" { + retention_policy { + enabled = true + days = 90 + } + } +`, + expected: network.NetworkWatcherFlowLog{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + RetentionPolicy: network.RetentionPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Days: defsecTypes.Int(90, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "azurerm_network_watcher_flow_log" "watcher" { + retention_policy { + } + } +`, + expected: network.NetworkWatcherFlowLog{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + RetentionPolicy: network.RetentionPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Days: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptWatcherLog(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_network_security_group" "example" { + name = "tf-appsecuritygroup" + } + + resource "azurerm_network_security_rule" "example" { + name = "example_security_rule" + network_security_group_name = azurerm_network_security_group.example.name + direction = "Inbound" + access = "Allow" + protocol = "TCP" + source_port_range = "*" + destination_port_ranges = ["3389"] + source_address_prefix = "4.53.160.75" + destination_address_prefix = "*" + } + + resource "azurerm_network_watcher_flow_log" "example" { + resource_group_name = azurerm_resource_group.example.name + name = "example-log" + + retention_policy { + enabled = true + days = 7 + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.SecurityGroups, 1) + require.Len(t, adapted.NetworkWatcherFlowLogs, 1) + + securityGroup := adapted.SecurityGroups[0] + rule := securityGroup.Rules[0] + watcher := adapted.NetworkWatcherFlowLogs[0] + + assert.Equal(t, 2, securityGroup.Metadata.Range().GetStartLine()) + assert.Equal(t, 4, securityGroup.Metadata.Range().GetEndLine()) + + assert.Equal(t, 6, rule.Metadata.Range().GetStartLine()) + assert.Equal(t, 16, rule.Metadata.Range().GetEndLine()) + + assert.Equal(t, 9, rule.Outbound.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 9, rule.Outbound.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 10, rule.Allow.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, rule.Allow.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, rule.Protocol.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, rule.Protocol.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 12, rule.SourcePorts[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 12, rule.SourcePorts[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 13, rule.DestinationPorts[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 13, rule.DestinationPorts[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 14, rule.SourceAddresses[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 14, rule.SourceAddresses[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 15, rule.DestinationAddresses[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 15, rule.DestinationAddresses[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 18, watcher.Metadata.Range().GetStartLine()) + assert.Equal(t, 26, watcher.Metadata.Range().GetEndLine()) + + assert.Equal(t, 22, watcher.RetentionPolicy.Metadata.Range().GetStartLine()) + assert.Equal(t, 25, watcher.RetentionPolicy.Metadata.Range().GetEndLine()) + + assert.Equal(t, 23, watcher.RetentionPolicy.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 23, watcher.RetentionPolicy.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 24, watcher.RetentionPolicy.Days.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 24, watcher.RetentionPolicy.Days.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/azure/securitycenter/adapt.go b/internal/adapters/terraform/azure/securitycenter/adapt.go new file mode 100644 index 000000000000..0ec0811ef03d --- /dev/null +++ b/internal/adapters/terraform/azure/securitycenter/adapt.go @@ -0,0 +1,59 @@ +package securitycenter + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/securitycenter" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) securitycenter.SecurityCenter { + return securitycenter.SecurityCenter{ + Contacts: adaptContacts(modules), + Subscriptions: adaptSubscriptions(modules), + } +} + +func adaptContacts(modules terraform.Modules) []securitycenter.Contact { + var contacts []securitycenter.Contact + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_security_center_contact") { + contacts = append(contacts, adaptContact(resource)) + } + } + return contacts +} + +func adaptSubscriptions(modules terraform.Modules) []securitycenter.SubscriptionPricing { + var subscriptions []securitycenter.SubscriptionPricing + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_security_center_subscription_pricing") { + subscriptions = append(subscriptions, adaptSubscription(resource)) + } + } + return subscriptions +} + +func adaptContact(resource *terraform.Block) securitycenter.Contact { + enableAlertNotifAttr := resource.GetAttribute("alert_notifications") + enableAlertNotifVal := enableAlertNotifAttr.AsBoolValueOrDefault(false, resource) + + phoneAttr := resource.GetAttribute("phone") + phoneVal := phoneAttr.AsStringValueOrDefault("", resource) + + return securitycenter.Contact{ + Metadata: resource.GetMetadata(), + EnableAlertNotifications: enableAlertNotifVal, + Phone: phoneVal, + } +} + +func adaptSubscription(resource *terraform.Block) securitycenter.SubscriptionPricing { + tierAttr := resource.GetAttribute("tier") + tierVal := tierAttr.AsStringValueOrDefault("Free", resource) + + return securitycenter.SubscriptionPricing{ + Metadata: resource.GetMetadata(), + Tier: tierVal, + } +} diff --git a/internal/adapters/terraform/azure/securitycenter/adapt_test.go b/internal/adapters/terraform/azure/securitycenter/adapt_test.go new file mode 100644 index 000000000000..0e11eb808e21 --- /dev/null +++ b/internal/adapters/terraform/azure/securitycenter/adapt_test.go @@ -0,0 +1,137 @@ +package securitycenter + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/securitycenter" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptContact(t *testing.T) { + tests := []struct { + name string + terraform string + expected securitycenter.Contact + }{ + { + name: "defined", + terraform: ` + resource "azurerm_security_center_contact" "example" { + phone = "+1-555-555-5555" + alert_notifications = true + } +`, + expected: securitycenter.Contact{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableAlertNotifications: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Phone: defsecTypes.String("+1-555-555-5555", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "azurerm_security_center_contact" "example" { + } +`, + expected: securitycenter.Contact{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableAlertNotifications: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Phone: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptContact(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptSubscription(t *testing.T) { + tests := []struct { + name string + terraform string + expected securitycenter.SubscriptionPricing + }{ + { + name: "free tier", + terraform: ` + resource "azurerm_security_center_subscription_pricing" "example" { + tier = "Free" + }`, + expected: securitycenter.SubscriptionPricing{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Tier: defsecTypes.String("Free", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "default - free tier", + terraform: ` + resource "azurerm_security_center_subscription_pricing" "example" { + }`, + expected: securitycenter.SubscriptionPricing{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Tier: defsecTypes.String("Free", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "standard tier", + terraform: ` + resource "azurerm_security_center_subscription_pricing" "example" { + tier = "Standard" + }`, + expected: securitycenter.SubscriptionPricing{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Tier: defsecTypes.String("Standard", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptSubscription(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_security_center_contact" "example" { + phone = "+1-555-555-5555" + alert_notifications = true + } + + resource "azurerm_security_center_subscription_pricing" "example" { + tier = "Standard" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Contacts, 1) + require.Len(t, adapted.Subscriptions, 1) + + contact := adapted.Contacts[0] + sub := adapted.Subscriptions[0] + + assert.Equal(t, 3, contact.Phone.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, contact.Phone.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, contact.EnableAlertNotifications.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, contact.EnableAlertNotifications.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 8, sub.Tier.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 8, sub.Tier.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/azure/storage/adapt.go b/internal/adapters/terraform/azure/storage/adapt.go new file mode 100644 index 000000000000..365b0a493794 --- /dev/null +++ b/internal/adapters/terraform/azure/storage/adapt.go @@ -0,0 +1,173 @@ +package storage + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/storage" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) storage.Storage { + accounts, containers, networkRules := adaptAccounts(modules) + + orphanAccount := storage.Account{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + NetworkRules: adaptOrphanNetworkRules(modules, networkRules), + EnforceHTTPS: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + Containers: adaptOrphanContainers(modules, containers), + QueueProperties: storage.QueueProperties{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnableLogging: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + MinimumTLSVersion: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + } + + accounts = append(accounts, orphanAccount) + + return storage.Storage{ + Accounts: accounts, + } +} + +func adaptOrphanContainers(modules terraform.Modules, containers []string) (orphans []storage.Container) { + accountedFor := make(map[string]bool) + for _, container := range containers { + accountedFor[container] = true + } + for _, module := range modules { + for _, containerResource := range module.GetResourcesByType("azurerm_storage_container") { + if _, ok := accountedFor[containerResource.ID()]; ok { + continue + } + orphans = append(orphans, adaptContainer(containerResource)) + } + } + + return orphans +} + +func adaptOrphanNetworkRules(modules terraform.Modules, networkRules []string) (orphans []storage.NetworkRule) { + accountedFor := make(map[string]bool) + for _, networkRule := range networkRules { + accountedFor[networkRule] = true + } + + for _, module := range modules { + for _, networkRuleResource := range module.GetResourcesByType("azurerm_storage_account_network_rules") { + if _, ok := accountedFor[networkRuleResource.ID()]; ok { + continue + } + + orphans = append(orphans, adaptNetworkRule(networkRuleResource)) + } + } + + return orphans +} + +func adaptAccounts(modules terraform.Modules) ([]storage.Account, []string, []string) { + var accounts []storage.Account + var accountedForContainers []string + var accountedForNetworkRules []string + + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_storage_account") { + account := adaptAccount(resource) + containerResource := module.GetReferencingResources(resource, "azurerm_storage_container", "storage_account_name") + for _, containerBlock := range containerResource { + accountedForContainers = append(accountedForContainers, containerBlock.ID()) + account.Containers = append(account.Containers, adaptContainer(containerBlock)) + } + networkRulesResource := module.GetReferencingResources(resource, "azurerm_storage_account_network_rules", "storage_account_name") + for _, networkRuleBlock := range networkRulesResource { + accountedForNetworkRules = append(accountedForNetworkRules, networkRuleBlock.ID()) + account.NetworkRules = append(account.NetworkRules, adaptNetworkRule(networkRuleBlock)) + } + for _, queueBlock := range module.GetReferencingResources(resource, "azurerm_storage_queue", "storage_account_name") { + queue := storage.Queue{ + Metadata: queueBlock.GetMetadata(), + Name: queueBlock.GetAttribute("name").AsStringValueOrDefault("", queueBlock), + } + account.Queues = append(account.Queues, queue) + } + accounts = append(accounts, account) + } + } + + return accounts, accountedForContainers, accountedForNetworkRules +} + +func adaptAccount(resource *terraform.Block) storage.Account { + account := storage.Account{ + Metadata: resource.GetMetadata(), + NetworkRules: nil, + EnforceHTTPS: defsecTypes.BoolDefault(true, resource.GetMetadata()), + Containers: nil, + QueueProperties: storage.QueueProperties{ + Metadata: resource.GetMetadata(), + EnableLogging: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + MinimumTLSVersion: defsecTypes.StringDefault("TLS1_2", resource.GetMetadata()), + } + + networkRulesBlocks := resource.GetBlocks("network_rules") + for _, networkBlock := range networkRulesBlocks { + account.NetworkRules = append(account.NetworkRules, adaptNetworkRule(networkBlock)) + } + + httpsOnlyAttr := resource.GetAttribute("enable_https_traffic_only") + account.EnforceHTTPS = httpsOnlyAttr.AsBoolValueOrDefault(true, resource) + + queuePropertiesBlock := resource.GetBlock("queue_properties") + if queuePropertiesBlock.IsNotNil() { + account.QueueProperties.Metadata = queuePropertiesBlock.GetMetadata() + loggingBlock := queuePropertiesBlock.GetBlock("logging") + if loggingBlock.IsNotNil() { + account.QueueProperties.EnableLogging = defsecTypes.Bool(true, loggingBlock.GetMetadata()) + } + } + + minTLSVersionAttr := resource.GetAttribute("min_tls_version") + account.MinimumTLSVersion = minTLSVersionAttr.AsStringValueOrDefault("TLS1_0", resource) + return account +} + +func adaptContainer(resource *terraform.Block) storage.Container { + accessTypeAttr := resource.GetAttribute("container_access_type") + publicAccess := defsecTypes.StringDefault(storage.PublicAccessOff, resource.GetMetadata()) + + if accessTypeAttr.Equals("blob") { + publicAccess = defsecTypes.String(storage.PublicAccessBlob, accessTypeAttr.GetMetadata()) + } else if accessTypeAttr.Equals("container") { + publicAccess = defsecTypes.String(storage.PublicAccessContainer, accessTypeAttr.GetMetadata()) + } + + return storage.Container{ + Metadata: resource.GetMetadata(), + PublicAccess: publicAccess, + } +} + +func adaptNetworkRule(resource *terraform.Block) storage.NetworkRule { + var allowByDefault defsecTypes.BoolValue + var bypass []defsecTypes.StringValue + + defaultActionAttr := resource.GetAttribute("default_action") + + if defaultActionAttr.IsNotNil() { + allowByDefault = defsecTypes.Bool(defaultActionAttr.Equals("Allow", terraform.IgnoreCase), defaultActionAttr.GetMetadata()) + } else { + allowByDefault = defsecTypes.BoolDefault(false, resource.GetMetadata()) + } + + if resource.HasChild("bypass") { + bypassAttr := resource.GetAttribute("bypass") + bypass = bypassAttr.AsStringValues() + } + + return storage.NetworkRule{ + Metadata: resource.GetMetadata(), + Bypass: bypass, + AllowByDefault: allowByDefault, + } +} diff --git a/internal/adapters/terraform/azure/storage/adapt_test.go b/internal/adapters/terraform/azure/storage/adapt_test.go new file mode 100644 index 000000000000..bdb61f8690cf --- /dev/null +++ b/internal/adapters/terraform/azure/storage/adapt_test.go @@ -0,0 +1,252 @@ +package storage + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/storage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected storage.Storage + }{ + { + name: "defined", + terraform: ` + resource "azurerm_resource_group" "example" { + name = "example" + } + + resource "azurerm_storage_account" "example" { + name = "storageaccountname" + resource_group_name = azurerm_resource_group.example.name + + network_rules { + default_action = "Deny" + bypass = ["Metrics", "AzureServices"] + } + + enable_https_traffic_only = true + queue_properties { + logging { + delete = true + read = true + write = true + version = "1.0" + retention_policy_days = 10 + } + } + min_tls_version = "TLS1_2" + } + + resource "azurerm_storage_account_network_rules" "test" { + resource_group_name = azurerm_resource_group.example.name + storage_account_name = azurerm_storage_account.example.name + + default_action = "Allow" + bypass = ["Metrics"] + } + + resource "azurerm_storage_container" "example" { + storage_account_name = azurerm_storage_account.example.name + resource_group_name = azurerm_resource_group.example.name + container_access_type = "blob" + } +`, + expected: storage.Storage{ + Accounts: []storage.Account{ + + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnforceHTTPS: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + MinimumTLSVersion: defsecTypes.String("TLS1_2", defsecTypes.NewTestMisconfigMetadata()), + NetworkRules: []storage.NetworkRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Bypass: []defsecTypes.StringValue{ + defsecTypes.String("Metrics", defsecTypes.NewTestMisconfigMetadata()), + defsecTypes.String("AzureServices", defsecTypes.NewTestMisconfigMetadata()), + }, + AllowByDefault: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Bypass: []defsecTypes.StringValue{ + defsecTypes.String("Metrics", defsecTypes.NewTestMisconfigMetadata()), + }, + AllowByDefault: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + QueueProperties: storage.QueueProperties{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableLogging: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + Containers: []storage.Container{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + PublicAccess: defsecTypes.String("blob", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + { + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnforceHTTPS: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + QueueProperties: storage.QueueProperties{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnableLogging: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + MinimumTLSVersion: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + }, + }, + }, + { + name: "orphans", + terraform: ` + resource "azurerm_storage_account_network_rules" "test" { + default_action = "Allow" + bypass = ["Metrics"] + } + + resource "azurerm_storage_container" "example" { + container_access_type = "blob" + } +`, + expected: storage.Storage{ + Accounts: []storage.Account{ + { + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnforceHTTPS: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + NetworkRules: []storage.NetworkRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Bypass: []defsecTypes.StringValue{ + defsecTypes.String("Metrics", defsecTypes.NewTestMisconfigMetadata()), + }, + AllowByDefault: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + QueueProperties: storage.QueueProperties{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnableLogging: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + MinimumTLSVersion: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + Containers: []storage.Container{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + PublicAccess: defsecTypes.String("blob", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_resource_group" "example" { + name = "example" + location = "West Europe" + } + + resource "azurerm_storage_account" "example" { + resource_group_name = azurerm_resource_group.example.name + + enable_https_traffic_only = true + min_tls_version = "TLS1_2" + + queue_properties { + logging { + delete = true + read = true + write = true + version = "1.0" + retention_policy_days = 10 + } + } + + network_rules { + default_action = "Deny" + bypass = ["Metrics", "AzureServices"] + } + } + + resource "azurerm_storage_account_network_rules" "test" { + resource_group_name = azurerm_resource_group.example.name + storage_account_name = azurerm_storage_account.example.name + + default_action = "Allow" + bypass = ["Metrics"] + } + + resource "azurerm_storage_container" "example" { + storage_account_name = azurerm_storage_account.example.name + resource_group_name = azurerm_resource_group.example.name + container_access_type = "blob" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Accounts, 2) //+orphans holder + account := adapted.Accounts[0] + + assert.Equal(t, 7, account.Metadata.Range().GetStartLine()) + assert.Equal(t, 27, account.Metadata.Range().GetEndLine()) + + assert.Equal(t, 10, account.EnforceHTTPS.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, account.EnforceHTTPS.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, account.MinimumTLSVersion.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, account.MinimumTLSVersion.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, account.QueueProperties.Metadata.Range().GetStartLine()) + assert.Equal(t, 21, account.QueueProperties.Metadata.Range().GetEndLine()) + + assert.Equal(t, 14, account.QueueProperties.EnableLogging.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 20, account.QueueProperties.EnableLogging.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 23, account.NetworkRules[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 26, account.NetworkRules[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 24, account.NetworkRules[0].AllowByDefault.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 24, account.NetworkRules[0].AllowByDefault.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 25, account.NetworkRules[0].Bypass[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 25, account.NetworkRules[0].Bypass[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 29, account.NetworkRules[1].Metadata.Range().GetStartLine()) + assert.Equal(t, 35, account.NetworkRules[1].Metadata.Range().GetEndLine()) + + assert.Equal(t, 33, account.NetworkRules[1].AllowByDefault.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 33, account.NetworkRules[1].AllowByDefault.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 34, account.NetworkRules[1].Bypass[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 34, account.NetworkRules[1].Bypass[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 37, account.Containers[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 41, account.Containers[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 40, account.Containers[0].PublicAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 40, account.Containers[0].PublicAccess.GetMetadata().Range().GetEndLine()) + +} diff --git a/internal/adapters/terraform/azure/synapse/adapt.go b/internal/adapters/terraform/azure/synapse/adapt.go new file mode 100644 index 000000000000..952b7b0a5921 --- /dev/null +++ b/internal/adapters/terraform/azure/synapse/adapt.go @@ -0,0 +1,32 @@ +package synapse + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/synapse" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) synapse.Synapse { + return synapse.Synapse{ + Workspaces: adaptWorkspaces(modules), + } +} + +func adaptWorkspaces(modules terraform.Modules) []synapse.Workspace { + var workspaces []synapse.Workspace + for _, module := range modules { + for _, resource := range module.GetResourcesByType("azurerm_synapse_workspace") { + workspaces = append(workspaces, adaptWorkspace(resource)) + } + } + return workspaces +} + +func adaptWorkspace(resource *terraform.Block) synapse.Workspace { + enableManagedVNAttr := resource.GetAttribute("managed_virtual_network_enabled") + enableManagedVNVal := enableManagedVNAttr.AsBoolValueOrDefault(false, resource) + + return synapse.Workspace{ + Metadata: resource.GetMetadata(), + EnableManagedVirtualNetwork: enableManagedVNVal, + } +} diff --git a/internal/adapters/terraform/azure/synapse/adapt_test.go b/internal/adapters/terraform/azure/synapse/adapt_test.go new file mode 100644 index 000000000000..c46a7815c4ac --- /dev/null +++ b/internal/adapters/terraform/azure/synapse/adapt_test.go @@ -0,0 +1,83 @@ +package synapse + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/azure/synapse" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptWorkspace(t *testing.T) { + tests := []struct { + name string + terraform string + expected synapse.Workspace + }{ + { + name: "enabled", + terraform: ` + resource "azurerm_synapse_workspace" "example" { + managed_virtual_network_enabled = true + } +`, + expected: synapse.Workspace{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableManagedVirtualNetwork: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "disabled", + terraform: ` + resource "azurerm_synapse_workspace" "example" { + managed_virtual_network_enabled = false + } +`, + expected: synapse.Workspace{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableManagedVirtualNetwork: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "default", + terraform: ` + resource "azurerm_synapse_workspace" "example" { + } +`, + expected: synapse.Workspace{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableManagedVirtualNetwork: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptWorkspace(modules.GetBlocks()[0]) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "azurerm_synapse_workspace" "example" { + managed_virtual_network_enabled = true + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Workspaces, 1) + workspace := adapted.Workspaces[0] + + assert.Equal(t, 3, workspace.EnableManagedVirtualNetwork.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, workspace.EnableManagedVirtualNetwork.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/cloudstack/adapt.go b/internal/adapters/terraform/cloudstack/adapt.go new file mode 100644 index 000000000000..545846f85c0d --- /dev/null +++ b/internal/adapters/terraform/cloudstack/adapt.go @@ -0,0 +1,13 @@ +package cloudstack + +import ( + "github.com/aquasecurity/trivy/internal/adapters/terraform/cloudstack/compute" + "github.com/aquasecurity/trivy/pkg/providers/cloudstack" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) cloudstack.CloudStack { + return cloudstack.CloudStack{ + Compute: compute.Adapt(modules), + } +} diff --git a/internal/adapters/terraform/cloudstack/compute/adapt.go b/internal/adapters/terraform/cloudstack/compute/adapt.go new file mode 100644 index 000000000000..3f268faea1b8 --- /dev/null +++ b/internal/adapters/terraform/cloudstack/compute/adapt.go @@ -0,0 +1,49 @@ +package compute + +import ( + "encoding/base64" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/terraform" + + "github.com/aquasecurity/trivy/pkg/providers/cloudstack/compute" +) + +func Adapt(modules terraform.Modules) compute.Compute { + return compute.Compute{ + Instances: adaptInstances(modules), + } +} + +func adaptInstances(modules terraform.Modules) []compute.Instance { + var instances []compute.Instance + for _, module := range modules { + for _, resource := range module.GetResourcesByType("cloudstack_instance") { + instances = append(instances, adaptInstance(resource)) + } + } + return instances +} + +func adaptInstance(resource *terraform.Block) compute.Instance { + userDataAttr := resource.GetAttribute("user_data") + var encoded []byte + var err error + + if userDataAttr.IsNotNil() && userDataAttr.IsString() { + encoded, err = base64.StdEncoding.DecodeString(userDataAttr.Value().AsString()) + if err != nil { + encoded = []byte(userDataAttr.Value().AsString()) + } + return compute.Instance{ + Metadata: resource.GetMetadata(), + UserData: types.String(string(encoded), userDataAttr.GetMetadata()), + } + } + + return compute.Instance{ + Metadata: resource.GetMetadata(), + UserData: types.StringDefault("", resource.GetMetadata()), + } +} diff --git a/internal/adapters/terraform/cloudstack/compute/adapt_test.go b/internal/adapters/terraform/cloudstack/compute/adapt_test.go new file mode 100644 index 000000000000..577446f1f15a --- /dev/null +++ b/internal/adapters/terraform/cloudstack/compute/adapt_test.go @@ -0,0 +1,91 @@ +package compute + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/cloudstack/compute" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptInstance(t *testing.T) { + tests := []struct { + name string + terraform string + expected compute.Instance + }{ + { + name: "sensitive user data", + terraform: ` + resource "cloudstack_instance" "web" { + name = "server-1" + user_data = < 0 { + cluster.NodeConfig = cluster.NodePools[0].NodeConfig + a.clusterMap[id] = cluster + } + } + + var clusters []gke.Cluster + for _, cluster := range a.clusterMap { + clusters = append(clusters, cluster) + } + return clusters +} + +func (a *adapter) adaptCluster(resource *terraform.Block, module *terraform.Module) { + + cluster := gke.Cluster{ + Metadata: resource.GetMetadata(), + NodePools: nil, + IPAllocationPolicy: gke.IPAllocationPolicy{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + MasterAuthorizedNetworks: gke.MasterAuthorizedNetworks{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + CIDRs: []defsecTypes.StringValue{}, + }, + NetworkPolicy: gke.NetworkPolicy{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + DatapathProvider: resource.GetAttribute("datapath_provider"). + AsStringValueOrDefault("DATAPATH_PROVIDER_UNSPECIFIED", resource), + PrivateCluster: gke.PrivateCluster{ + Metadata: resource.GetMetadata(), + EnablePrivateNodes: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + LoggingService: defsecTypes.StringDefault("logging.googleapis.com/kubernetes", resource.GetMetadata()), + MonitoringService: defsecTypes.StringDefault("monitoring.googleapis.com/kubernetes", resource.GetMetadata()), + MasterAuth: gke.MasterAuth{ + Metadata: resource.GetMetadata(), + ClientCertificate: gke.ClientCertificate{ + Metadata: resource.GetMetadata(), + IssueCertificate: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + Username: defsecTypes.StringDefault("", resource.GetMetadata()), + Password: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + NodeConfig: gke.NodeConfig{ + Metadata: resource.GetMetadata(), + ImageType: defsecTypes.StringDefault("", resource.GetMetadata()), + WorkloadMetadataConfig: gke.WorkloadMetadataConfig{ + Metadata: resource.GetMetadata(), + NodeMetadata: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + ServiceAccount: defsecTypes.StringDefault("", resource.GetMetadata()), + EnableLegacyEndpoints: defsecTypes.BoolDefault(true, resource.GetMetadata()), + }, + EnableShieldedNodes: defsecTypes.BoolDefault(true, resource.GetMetadata()), + EnableLegacyABAC: defsecTypes.BoolDefault(false, resource.GetMetadata()), + ResourceLabels: defsecTypes.MapDefault(make(map[string]string), resource.GetMetadata()), + RemoveDefaultNodePool: defsecTypes.BoolDefault(false, resource.GetMetadata()), + EnableAutpilot: defsecTypes.BoolDefault(false, resource.GetMetadata()), + } + + if allocBlock := resource.GetBlock("ip_allocation_policy"); allocBlock.IsNotNil() { + cluster.IPAllocationPolicy.Metadata = allocBlock.GetMetadata() + cluster.IPAllocationPolicy.Enabled = defsecTypes.Bool(true, allocBlock.GetMetadata()) + } + + if blocks := resource.GetBlocks("master_authorized_networks_config"); len(blocks) > 0 { + cluster.MasterAuthorizedNetworks = adaptMasterAuthNetworksAsBlocks(resource, blocks) + } + + if policyBlock := resource.GetBlock("network_policy"); policyBlock.IsNotNil() { + enabledAttr := policyBlock.GetAttribute("enabled") + cluster.NetworkPolicy.Metadata = policyBlock.GetMetadata() + cluster.NetworkPolicy.Enabled = enabledAttr.AsBoolValueOrDefault(false, policyBlock) + } + + if privBlock := resource.GetBlock("private_cluster_config"); privBlock.IsNotNil() { + privateNodesEnabledAttr := privBlock.GetAttribute("enable_private_nodes") + cluster.PrivateCluster.Metadata = privBlock.GetMetadata() + cluster.PrivateCluster.EnablePrivateNodes = privateNodesEnabledAttr.AsBoolValueOrDefault(false, privBlock) + } + + loggingAttr := resource.GetAttribute("logging_service") + cluster.LoggingService = loggingAttr.AsStringValueOrDefault("logging.googleapis.com/kubernetes", resource) + monitoringServiceAttr := resource.GetAttribute("monitoring_service") + cluster.MonitoringService = monitoringServiceAttr.AsStringValueOrDefault("monitoring.googleapis.com/kubernetes", resource) + + if masterBlock := resource.GetBlock("master_auth"); masterBlock.IsNotNil() { + cluster.MasterAuth = adaptMasterAuth(masterBlock) + } + + if configBlock := resource.GetBlock("node_config"); configBlock.IsNotNil() { + if configBlock.GetBlock("metadata").IsNotNil() { + cluster.NodeConfig.Metadata = configBlock.GetBlock("metadata").GetMetadata() + } + cluster.NodeConfig = adaptNodeConfig(configBlock) + } + + cluster.EnableShieldedNodes = resource.GetAttribute("enable_shielded_nodes").AsBoolValueOrDefault(true, resource) + + enableLegacyABACAttr := resource.GetAttribute("enable_legacy_abac") + cluster.EnableLegacyABAC = enableLegacyABACAttr.AsBoolValueOrDefault(false, resource) + + cluster.EnableAutpilot = resource.GetAttribute("enable_autopilot").AsBoolValueOrDefault(false, resource) + + resourceLabelsAttr := resource.GetAttribute("resource_labels") + if resourceLabelsAttr.IsNotNil() { + cluster.ResourceLabels = resourceLabelsAttr.AsMapValue() + } + + cluster.RemoveDefaultNodePool = resource.GetAttribute("remove_default_node_pool").AsBoolValueOrDefault(false, resource) + + a.clusterMap[resource.ID()] = cluster +} + +func (a *adapter) adaptNodePools() { + for _, nodePoolBlock := range a.modules.GetResourcesByType("google_container_node_pool") { + a.adaptNodePool(nodePoolBlock) + } +} + +func (a *adapter) adaptNodePool(resource *terraform.Block) { + nodeConfig := gke.NodeConfig{ + Metadata: resource.GetMetadata(), + ImageType: defsecTypes.StringDefault("", resource.GetMetadata()), + WorkloadMetadataConfig: gke.WorkloadMetadataConfig{ + Metadata: resource.GetMetadata(), + NodeMetadata: defsecTypes.StringDefault("", resource.GetMetadata()), + }, + ServiceAccount: defsecTypes.StringDefault("", resource.GetMetadata()), + EnableLegacyEndpoints: defsecTypes.BoolDefault(true, resource.GetMetadata()), + } + + management := gke.Management{ + Metadata: resource.GetMetadata(), + EnableAutoRepair: defsecTypes.BoolDefault(false, resource.GetMetadata()), + EnableAutoUpgrade: defsecTypes.BoolDefault(false, resource.GetMetadata()), + } + + if resource.HasChild("management") { + management.Metadata = resource.GetBlock("management").GetMetadata() + + autoRepairAttr := resource.GetBlock("management").GetAttribute("auto_repair") + management.EnableAutoRepair = autoRepairAttr.AsBoolValueOrDefault(false, resource.GetBlock("management")) + + autoUpgradeAttr := resource.GetBlock("management").GetAttribute("auto_upgrade") + management.EnableAutoUpgrade = autoUpgradeAttr.AsBoolValueOrDefault(false, resource.GetBlock("management")) + } + + if resource.HasChild("node_config") { + nodeConfig = adaptNodeConfig(resource.GetBlock("node_config")) + } + + nodePool := gke.NodePool{ + Metadata: resource.GetMetadata(), + Management: management, + NodeConfig: nodeConfig, + } + + clusterAttr := resource.GetAttribute("cluster") + if referencedCluster, err := a.modules.GetReferencedBlock(clusterAttr, resource); err == nil { + if referencedCluster.TypeLabel() == "google_container_cluster" { + if cluster, ok := a.clusterMap[referencedCluster.ID()]; ok { + cluster.NodePools = append(cluster.NodePools, nodePool) + a.clusterMap[referencedCluster.ID()] = cluster + return + } + } + } + + // we didn't find a cluster to put the nodepool in, so create a placeholder + a.clusterMap[uuid.NewString()] = gke.Cluster{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + NodePools: []gke.NodePool{nodePool}, + IPAllocationPolicy: gke.IPAllocationPolicy{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Enabled: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + MasterAuthorizedNetworks: gke.MasterAuthorizedNetworks{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Enabled: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + CIDRs: nil, + }, + NetworkPolicy: gke.NetworkPolicy{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Enabled: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + PrivateCluster: gke.PrivateCluster{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + EnablePrivateNodes: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + LoggingService: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + MonitoringService: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + MasterAuth: gke.MasterAuth{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + ClientCertificate: gke.ClientCertificate{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + IssueCertificate: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + Username: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + Password: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + NodeConfig: gke.NodeConfig{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + ImageType: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + WorkloadMetadataConfig: gke.WorkloadMetadataConfig{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + NodeMetadata: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + ServiceAccount: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + EnableLegacyEndpoints: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + }, + EnableShieldedNodes: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + EnableLegacyABAC: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + ResourceLabels: defsecTypes.MapDefault(nil, defsecTypes.NewUnmanagedMisconfigMetadata()), + RemoveDefaultNodePool: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + EnableAutpilot: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + } +} + +func adaptNodeConfig(resource *terraform.Block) gke.NodeConfig { + + config := gke.NodeConfig{ + Metadata: resource.GetMetadata(), + ImageType: resource.GetAttribute("image_type").AsStringValueOrDefault("", resource), + WorkloadMetadataConfig: gke.WorkloadMetadataConfig{ + Metadata: resource.GetMetadata(), + NodeMetadata: defsecTypes.StringDefault("UNSPECIFIED", resource.GetMetadata()), + }, + ServiceAccount: resource.GetAttribute("service_account").AsStringValueOrDefault("", resource), + EnableLegacyEndpoints: defsecTypes.BoolDefault(true, resource.GetMetadata()), + } + + if metadata := resource.GetAttribute("metadata"); metadata.IsNotNil() { + legacyMetadata := metadata.MapValue("disable-legacy-endpoints") + if legacyMetadata.IsWhollyKnown() && legacyMetadata.Type() == cty.Bool { + config.EnableLegacyEndpoints = defsecTypes.Bool(legacyMetadata.False(), metadata.GetMetadata()) + } + } + + workloadBlock := resource.GetBlock("workload_metadata_config") + if workloadBlock.IsNotNil() { + config.WorkloadMetadataConfig.Metadata = workloadBlock.GetMetadata() + modeAttr := workloadBlock.GetAttribute("node_metadata") + if modeAttr.IsNil() { + modeAttr = workloadBlock.GetAttribute("mode") // try newest version + } + config.WorkloadMetadataConfig.NodeMetadata = modeAttr.AsStringValueOrDefault("UNSPECIFIED", workloadBlock) + } + + return config +} + +func adaptMasterAuth(resource *terraform.Block) gke.MasterAuth { + clientCert := gke.ClientCertificate{ + Metadata: resource.GetMetadata(), + IssueCertificate: defsecTypes.BoolDefault(false, resource.GetMetadata()), + } + + if resource.HasChild("client_certificate_config") { + clientCertAttr := resource.GetBlock("client_certificate_config").GetAttribute("issue_client_certificate") + clientCert.IssueCertificate = clientCertAttr.AsBoolValueOrDefault(false, resource.GetBlock("client_certificate_config")) + clientCert.Metadata = resource.GetBlock("client_certificate_config").GetMetadata() + } + + username := resource.GetAttribute("username").AsStringValueOrDefault("", resource) + password := resource.GetAttribute("password").AsStringValueOrDefault("", resource) + + return gke.MasterAuth{ + Metadata: resource.GetMetadata(), + ClientCertificate: clientCert, + Username: username, + Password: password, + } +} + +func adaptMasterAuthNetworksAsBlocks(parent *terraform.Block, blocks terraform.Blocks) gke.MasterAuthorizedNetworks { + var cidrs []defsecTypes.StringValue + for _, block := range blocks { + for _, cidrBlock := range block.GetBlocks("cidr_blocks") { + if cidrAttr := cidrBlock.GetAttribute("cidr_block"); cidrAttr.IsNotNil() { + cidrs = append(cidrs, cidrAttr.AsStringValues()...) + } + } + } + enabled := defsecTypes.Bool(true, blocks[0].GetMetadata()) + return gke.MasterAuthorizedNetworks{ + Metadata: blocks[0].GetMetadata(), + Enabled: enabled, + CIDRs: cidrs, + } +} diff --git a/internal/adapters/terraform/google/gke/adapt_test.go b/internal/adapters/terraform/google/gke/adapt_test.go new file mode 100644 index 000000000000..043e0148e6c0 --- /dev/null +++ b/internal/adapters/terraform/google/gke/adapt_test.go @@ -0,0 +1,416 @@ +package gke + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/google/gke" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected gke.GKE + }{ + { + name: "separately defined pool", + terraform: ` +resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" +} + +resource "google_container_cluster" "example" { + name = "my-gke-cluster" + + node_config { + metadata = { + disable-legacy-endpoints = true + } + } + + pod_security_policy_config { + enabled = "true" + } + + enable_legacy_abac = "true" + enable_shielded_nodes = "true" + + remove_default_node_pool = true + initial_node_count = 1 + monitoring_service = "monitoring.googleapis.com/kubernetes" + logging_service = "logging.googleapis.com/kubernetes" + + master_auth { + client_certificate_config { + issue_client_certificate = true + } + } + + master_authorized_networks_config { + cidr_blocks { + cidr_block = "10.10.128.0/24" + display_name = "internal" + } + } + + resource_labels = { + "env" = "staging" + } + + private_cluster_config { + enable_private_nodes = true + } + + network_policy { + enabled = true + } + + ip_allocation_policy {} + + enable_autopilot = true + + datapath_provider = "ADVANCED_DATAPATH" +} + +resource "google_container_node_pool" "primary_preemptible_nodes" { + cluster = google_container_cluster.example.name + node_count = 1 + + node_config { + service_account = google_service_account.default.email + metadata = { + disable-legacy-endpoints = true + } + image_type = "COS_CONTAINERD" + workload_metadata_config { + mode = "GCE_METADATA" + } + } + management { + auto_repair = true + auto_upgrade = true + } +} +`, + expected: gke.GKE{ + Clusters: []gke.Cluster{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NodeConfig: gke.NodeConfig{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ImageType: defsecTypes.String("COS_CONTAINERD", defsecTypes.NewTestMisconfigMetadata()), + WorkloadMetadataConfig: gke.WorkloadMetadataConfig{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NodeMetadata: defsecTypes.String("GCE_METADATA", defsecTypes.NewTestMisconfigMetadata()), + }, + ServiceAccount: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + EnableLegacyEndpoints: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + NodePools: []gke.NodePool{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Management: gke.Management{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnableAutoRepair: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + EnableAutoUpgrade: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + NodeConfig: gke.NodeConfig{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ImageType: defsecTypes.String("COS_CONTAINERD", defsecTypes.NewTestMisconfigMetadata()), + WorkloadMetadataConfig: gke.WorkloadMetadataConfig{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NodeMetadata: defsecTypes.String("GCE_METADATA", defsecTypes.NewTestMisconfigMetadata()), + }, + ServiceAccount: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + EnableLegacyEndpoints: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + IPAllocationPolicy: gke.IPAllocationPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + MasterAuthorizedNetworks: gke.MasterAuthorizedNetworks{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("10.10.128.0/24", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + NetworkPolicy: gke.NetworkPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + DatapathProvider: defsecTypes.String("ADVANCED_DATAPATH", defsecTypes.NewTestMisconfigMetadata()), + PrivateCluster: gke.PrivateCluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnablePrivateNodes: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + LoggingService: defsecTypes.String("logging.googleapis.com/kubernetes", defsecTypes.NewTestMisconfigMetadata()), + MonitoringService: defsecTypes.String("monitoring.googleapis.com/kubernetes", defsecTypes.NewTestMisconfigMetadata()), + MasterAuth: gke.MasterAuth{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ClientCertificate: gke.ClientCertificate{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + IssueCertificate: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + Username: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Password: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + EnableShieldedNodes: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + EnableLegacyABAC: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + ResourceLabels: defsecTypes.Map(map[string]string{ + "env": "staging", + }, defsecTypes.NewTestMisconfigMetadata()), + RemoveDefaultNodePool: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + EnableAutpilot: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + { + name: "default node pool", + terraform: ` +resource "google_container_cluster" "example" { + node_config { + service_account = "service-account" + metadata = { + disable-legacy-endpoints = true + } + image_type = "COS" + workload_metadata_config { + mode = "GCE_METADATA" + } + } +} +`, + expected: gke.GKE{ + Clusters: []gke.Cluster{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NodeConfig: gke.NodeConfig{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ImageType: defsecTypes.String("COS", defsecTypes.NewTestMisconfigMetadata()), + WorkloadMetadataConfig: gke.WorkloadMetadataConfig{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NodeMetadata: defsecTypes.String("GCE_METADATA", defsecTypes.NewTestMisconfigMetadata()), + }, + ServiceAccount: defsecTypes.String("service-account", defsecTypes.NewTestMisconfigMetadata()), + EnableLegacyEndpoints: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + + IPAllocationPolicy: gke.IPAllocationPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + MasterAuthorizedNetworks: gke.MasterAuthorizedNetworks{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{}, + }, + NetworkPolicy: gke.NetworkPolicy{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + DatapathProvider: defsecTypes.StringDefault("DATAPATH_PROVIDER_UNSPECIFIED", defsecTypes.NewTestMisconfigMetadata()), + PrivateCluster: gke.PrivateCluster{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + EnablePrivateNodes: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + LoggingService: defsecTypes.String("logging.googleapis.com/kubernetes", defsecTypes.NewTestMisconfigMetadata()), + MonitoringService: defsecTypes.String("monitoring.googleapis.com/kubernetes", defsecTypes.NewTestMisconfigMetadata()), + MasterAuth: gke.MasterAuth{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + ClientCertificate: gke.ClientCertificate{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + IssueCertificate: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + Username: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Password: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + EnableShieldedNodes: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + EnableLegacyABAC: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + ResourceLabels: defsecTypes.Map(map[string]string{}, defsecTypes.NewTestMisconfigMetadata()), + RemoveDefaultNodePool: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` +resource "google_container_cluster" "example" { + + node_config { + metadata = { + disable-legacy-endpoints = true + } + } + pod_security_policy_config { + enabled = "true" + } + + enable_legacy_abac = "true" + enable_shielded_nodes = "true" + + remove_default_node_pool = true + monitoring_service = "monitoring.googleapis.com/kubernetes" + logging_service = "logging.googleapis.com/kubernetes" + + master_auth { + client_certificate_config { + issue_client_certificate = true + } + } + + master_authorized_networks_config { + cidr_blocks { + cidr_block = "10.10.128.0/24" + } + } + + resource_labels = { + "env" = "staging" + } + + private_cluster_config { + enable_private_nodes = true + } + + network_policy { + enabled = true + } + ip_allocation_policy {} +} + +resource "google_container_node_pool" "primary_preemptible_nodes" { + cluster = google_container_cluster.example.name + + node_config { + metadata = { + disable-legacy-endpoints = true + } + service_account = google_service_account.default.email + image_type = "COS_CONTAINERD" + + workload_metadata_config { + mode = "GCE_METADATA" + } + } + management { + auto_repair = true + auto_upgrade = true + } +} +` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Clusters, 1) + cluster := adapted.Clusters[0] + nodePool := cluster.NodePools[0] + + assert.Equal(t, 2, cluster.Metadata.Range().GetStartLine()) + assert.Equal(t, 44, cluster.Metadata.Range().GetEndLine()) + + assert.Equal(t, 49, cluster.NodeConfig.Metadata.Range().GetStartLine()) + assert.Equal(t, 59, cluster.NodeConfig.Metadata.Range().GetEndLine()) + + assert.Equal(t, 50, cluster.NodeConfig.EnableLegacyEndpoints.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 52, cluster.NodeConfig.EnableLegacyEndpoints.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, cluster.EnableLegacyABAC.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 13, cluster.EnableLegacyABAC.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 14, cluster.EnableShieldedNodes.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 14, cluster.EnableShieldedNodes.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 16, cluster.RemoveDefaultNodePool.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 16, cluster.RemoveDefaultNodePool.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 17, cluster.MonitoringService.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 17, cluster.MonitoringService.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 18, cluster.LoggingService.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 18, cluster.LoggingService.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 20, cluster.MasterAuth.Metadata.Range().GetStartLine()) + assert.Equal(t, 24, cluster.MasterAuth.Metadata.Range().GetEndLine()) + + assert.Equal(t, 21, cluster.MasterAuth.ClientCertificate.Metadata.Range().GetStartLine()) + assert.Equal(t, 23, cluster.MasterAuth.ClientCertificate.Metadata.Range().GetEndLine()) + + assert.Equal(t, 22, cluster.MasterAuth.ClientCertificate.IssueCertificate.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 22, cluster.MasterAuth.ClientCertificate.IssueCertificate.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 26, cluster.MasterAuthorizedNetworks.Metadata.Range().GetStartLine()) + assert.Equal(t, 30, cluster.MasterAuthorizedNetworks.Metadata.Range().GetEndLine()) + + assert.Equal(t, 28, cluster.MasterAuthorizedNetworks.CIDRs[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 28, cluster.MasterAuthorizedNetworks.CIDRs[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 32, cluster.ResourceLabels.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 34, cluster.ResourceLabels.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 36, cluster.PrivateCluster.Metadata.Range().GetStartLine()) + assert.Equal(t, 38, cluster.PrivateCluster.Metadata.Range().GetEndLine()) + + assert.Equal(t, 37, cluster.PrivateCluster.EnablePrivateNodes.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 37, cluster.PrivateCluster.EnablePrivateNodes.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 40, cluster.NetworkPolicy.Metadata.Range().GetStartLine()) + assert.Equal(t, 42, cluster.NetworkPolicy.Metadata.Range().GetEndLine()) + + assert.Equal(t, 41, cluster.NetworkPolicy.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 41, cluster.NetworkPolicy.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 43, cluster.IPAllocationPolicy.Metadata.Range().GetStartLine()) + assert.Equal(t, 43, cluster.IPAllocationPolicy.Metadata.Range().GetEndLine()) + + assert.Equal(t, 46, nodePool.Metadata.Range().GetStartLine()) + assert.Equal(t, 64, nodePool.Metadata.Range().GetEndLine()) + + assert.Equal(t, 49, nodePool.NodeConfig.Metadata.Range().GetStartLine()) + assert.Equal(t, 59, nodePool.NodeConfig.Metadata.Range().GetEndLine()) + + assert.Equal(t, 53, nodePool.NodeConfig.ServiceAccount.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 53, nodePool.NodeConfig.ServiceAccount.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 54, nodePool.NodeConfig.ImageType.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 54, nodePool.NodeConfig.ImageType.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 56, nodePool.NodeConfig.WorkloadMetadataConfig.Metadata.Range().GetStartLine()) + assert.Equal(t, 58, nodePool.NodeConfig.WorkloadMetadataConfig.Metadata.Range().GetEndLine()) + + assert.Equal(t, 57, nodePool.NodeConfig.WorkloadMetadataConfig.NodeMetadata.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 57, nodePool.NodeConfig.WorkloadMetadataConfig.NodeMetadata.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 60, nodePool.Management.Metadata.Range().GetStartLine()) + assert.Equal(t, 63, nodePool.Management.Metadata.Range().GetEndLine()) + + assert.Equal(t, 61, nodePool.Management.EnableAutoRepair.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 61, nodePool.Management.EnableAutoRepair.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 62, nodePool.Management.EnableAutoUpgrade.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 62, nodePool.Management.EnableAutoUpgrade.GetMetadata().Range().GetEndLine()) + +} diff --git a/internal/adapters/terraform/google/iam/adapt.go b/internal/adapters/terraform/google/iam/adapt.go new file mode 100644 index 000000000000..2ea775bf64a7 --- /dev/null +++ b/internal/adapters/terraform/google/iam/adapt.go @@ -0,0 +1,108 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/google/uuid" +) + +func Adapt(modules terraform.Modules) iam.IAM { + return (&adapter{ + orgs: make(map[string]iam.Organization), + modules: modules, + }).Adapt() +} + +type adapter struct { + modules terraform.Modules + orgs map[string]iam.Organization + folders []parentedFolder + projects []parentedProject + workloadIdentityPoolProviders []iam.WorkloadIdentityPoolProvider +} + +func (a *adapter) Adapt() iam.IAM { + a.adaptOrganizationIAM() + a.adaptFolders() + a.adaptFolderIAM() + a.adaptProjects() + a.adaptProjectIAM() + a.adaptWorkloadIdentityPoolProviders() + return a.merge() +} + +func (a *adapter) addOrg(blockID string) { + if _, ok := a.orgs[blockID]; !ok { + a.orgs[blockID] = iam.Organization{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + } + } +} + +func (a *adapter) merge() iam.IAM { + + // add projects to folders, orgs +PROJECT: + for _, project := range a.projects { + for i, folder := range a.folders { + if project.folderBlockID != "" && project.folderBlockID == folder.blockID { + folder.folder.Projects = append(folder.folder.Projects, project.project) + a.folders[i] = folder + continue PROJECT + } + } + if project.orgBlockID != "" { + if org, ok := a.orgs[project.orgBlockID]; ok { + org.Projects = append(org.Projects, project.project) + a.orgs[project.orgBlockID] = org + continue PROJECT + } + } + + org := iam.Organization{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + Projects: []iam.Project{project.project}, + } + a.orgs[uuid.NewString()] = org + } + + // add folders to folders, orgs +FOLDER_NESTED: + for _, folder := range a.folders { + for i, existing := range a.folders { + if folder.parentBlockID != "" && folder.parentBlockID == existing.blockID { + existing.folder.Folders = append(existing.folder.Folders, folder.folder) + a.folders[i] = existing + continue FOLDER_NESTED + } + + } + } +FOLDER_ORG: + for _, folder := range a.folders { + if folder.parentBlockID != "" { + if org, ok := a.orgs[folder.parentBlockID]; ok { + org.Folders = append(org.Folders, folder.folder) + a.orgs[folder.parentBlockID] = org + continue FOLDER_ORG + } + } else { + // add to placeholder? + org := iam.Organization{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + Folders: []iam.Folder{folder.folder}, + } + a.orgs[uuid.NewString()] = org + } + } + + output := iam.IAM{ + Organizations: nil, + WorkloadIdentityPoolProviders: a.workloadIdentityPoolProviders, + } + for _, org := range a.orgs { + output.Organizations = append(output.Organizations, org) + } + return output +} diff --git a/internal/adapters/terraform/google/iam/adapt_test.go b/internal/adapters/terraform/google/iam/adapt_test.go new file mode 100644 index 000000000000..1dc4a8679b28 --- /dev/null +++ b/internal/adapters/terraform/google/iam/adapt_test.go @@ -0,0 +1,266 @@ +package iam + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected iam.IAM + }{ + { + name: "basic", + terraform: ` + data "google_organization" "org" { + domain = "example.com" + } + + resource "google_project" "my_project" { + name = "My Project" + project_id = "your-project-id" + org_id = data.google_organization.org.id + auto_create_network = true + } + + resource "google_folder" "department1" { + display_name = "Department 1" + parent = data.google_organization.org.id + } + + resource "google_folder_iam_member" "admin" { + folder = google_folder.department1.name + role = "roles/editor" + member = "user:alice@gmail.com" + } + + resource "google_folder_iam_binding" "folder-123" { + folder = google_folder.department1.name + role = "roles/nothing" + members = [ + "user:not-alice@gmail.com", + ] + } + + resource "google_organization_iam_member" "org-123" { + org_id = data.google_organization.org.id + role = "roles/whatever" + member = "user:member@gmail.com" + } + + resource "google_organization_iam_binding" "binding" { + org_id = data.google_organization.org.id + role = "roles/browser" + + members = [ + "user:member_2@gmail.com", + ] + } + + resource "google_iam_workload_identity_pool_provider" "example" { + workload_identity_pool_id = "example-pool" + workload_identity_pool_provider_id = "example-provider" + attribute_condition = "assertion.repository_owner=='your-github-organization'" + } +`, + expected: iam.IAM{ + Organizations: []iam.Organization{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + + Projects: []iam.Project{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + AutoCreateNetwork: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + + Folders: []iam.Folder{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Members: []iam.Member{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Member: defsecTypes.String("user:alice@gmail.com", defsecTypes.NewTestMisconfigMetadata()), + Role: defsecTypes.String("roles/editor", defsecTypes.NewTestMisconfigMetadata()), + DefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Bindings: []iam.Binding{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Members: []defsecTypes.StringValue{ + defsecTypes.String("user:not-alice@gmail.com", defsecTypes.NewTestMisconfigMetadata()), + }, + Role: defsecTypes.String("roles/nothing", defsecTypes.NewTestMisconfigMetadata()), + IncludesDefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + Members: []iam.Member{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Member: defsecTypes.String("user:member@gmail.com", defsecTypes.NewTestMisconfigMetadata()), + Role: defsecTypes.String("roles/whatever", defsecTypes.NewTestMisconfigMetadata()), + DefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Bindings: []iam.Binding{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Members: []defsecTypes.StringValue{ + defsecTypes.String("user:member_2@gmail.com", defsecTypes.NewTestMisconfigMetadata())}, + Role: defsecTypes.String("roles/browser", defsecTypes.NewTestMisconfigMetadata()), + IncludesDefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + WorkloadIdentityPoolProviders: []iam.WorkloadIdentityPoolProvider{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + + WorkloadIdentityPoolId: defsecTypes.String("example-pool", defsecTypes.NewTestMisconfigMetadata()), + WorkloadIdentityPoolProviderId: defsecTypes.String("example-provider", defsecTypes.NewTestMisconfigMetadata()), + AttributeCondition: defsecTypes.String("assertion.repository_owner=='your-github-organization'", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + data "google_organization" "org" { + domain = "example.com" + } + + resource "google_project" "my_project" { + name = "My Project" + project_id = "your-project-id" + org_id = data.google_organization.org.id + auto_create_network = true + } + + resource "google_folder" "department1" { + display_name = "Department 1" + parent = data.google_organization.org.id + } + + resource "google_folder_iam_binding" "folder-123" { + folder = google_folder.department1.name + role = "roles/nothing" + members = [ + "user:not-alice@gmail.com", + ] + } + + resource "google_folder_iam_member" "admin" { + folder = google_folder.department1.name + role = "roles/editor" + member = "user:alice@gmail.com" + } + + resource "google_organization_iam_member" "org-123" { + org_id = data.google_organization.org.id + role = "roles/whatever" + member = "user:member@gmail.com" + } + + resource "google_organization_iam_binding" "binding" { + org_id = data.google_organization.org.id + role = "roles/browser" + + members = [ + "user:member_2@gmail.com", + ] + } + + resource "google_iam_workload_identity_pool_provider" "example" { + workload_identity_pool_id = "example-pool" + workload_identity_pool_provider_id = "example-provider" + attribute_condition = "assertion.repository_owner=='your-github-organization'" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Organizations, 1) + require.Len(t, adapted.Organizations[0].Projects, 1) + require.Len(t, adapted.Organizations[0].Folders, 1) + require.Len(t, adapted.Organizations[0].Bindings, 1) + require.Len(t, adapted.Organizations[0].Members, 1) + require.Len(t, adapted.WorkloadIdentityPoolProviders, 1) + + project := adapted.Organizations[0].Projects[0] + folder := adapted.Organizations[0].Folders[0] + binding := adapted.Organizations[0].Bindings[0] + member := adapted.Organizations[0].Members[0] + pool := adapted.WorkloadIdentityPoolProviders[0] + + assert.Equal(t, 6, project.Metadata.Range().GetStartLine()) + assert.Equal(t, 11, project.Metadata.Range().GetEndLine()) + + assert.Equal(t, 10, project.AutoCreateNetwork.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, project.AutoCreateNetwork.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, folder.Metadata.Range().GetStartLine()) + assert.Equal(t, 16, folder.Metadata.Range().GetEndLine()) + + assert.Equal(t, 18, folder.Bindings[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 24, folder.Bindings[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 20, folder.Bindings[0].Role.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 20, folder.Bindings[0].Role.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 21, folder.Bindings[0].Members[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 23, folder.Bindings[0].Members[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 26, folder.Members[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 30, folder.Members[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 29, folder.Members[0].Member.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 29, folder.Members[0].Member.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 28, folder.Members[0].Role.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 28, folder.Members[0].Role.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 32, member.Metadata.Range().GetStartLine()) + assert.Equal(t, 36, member.Metadata.Range().GetEndLine()) + + assert.Equal(t, 34, member.Role.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 34, member.Role.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 35, member.Member.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 35, member.Member.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 38, binding.Metadata.Range().GetStartLine()) + assert.Equal(t, 45, binding.Metadata.Range().GetEndLine()) + + assert.Equal(t, 40, binding.Role.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 40, binding.Role.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 42, binding.Members[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 44, binding.Members[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 51, pool.Metadata.Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/google/iam/convert.go b/internal/adapters/terraform/google/iam/convert.go new file mode 100644 index 000000000000..379c368c8d41 --- /dev/null +++ b/internal/adapters/terraform/google/iam/convert.go @@ -0,0 +1,26 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func ParsePolicyBlock(block *terraform.Block) []iam.Binding { + var bindings []iam.Binding + for _, bindingBlock := range block.GetBlocks("binding") { + binding := iam.Binding{ + Metadata: bindingBlock.GetMetadata(), + Members: nil, + Role: bindingBlock.GetAttribute("role").AsStringValueOrDefault("", bindingBlock), + IncludesDefaultServiceAccount: defsecTypes.BoolDefault(false, bindingBlock.GetMetadata()), + } + membersAttr := bindingBlock.GetAttribute("members") + members := membersAttr.AsStringValues().AsStrings() + for _, member := range members { + binding.Members = append(binding.Members, defsecTypes.String(member, membersAttr.GetMetadata())) + } + bindings = append(bindings, binding) + } + return bindings +} diff --git a/internal/adapters/terraform/google/iam/folder_iam.go b/internal/adapters/terraform/google/iam/folder_iam.go new file mode 100644 index 000000000000..2ecc02df1557 --- /dev/null +++ b/internal/adapters/terraform/google/iam/folder_iam.go @@ -0,0 +1,117 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + "github.com/aquasecurity/trivy/pkg/types" +) + +// see https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google_folder_iam + +func (a *adapter) adaptFolderIAM() { + a.adaptFolderMembers() + a.adaptFolderBindings() +} + +func (a *adapter) adaptFolderMembers() { + for _, iamBlock := range a.modules.GetResourcesByType("google_folder_iam_member") { + member := a.adaptMember(iamBlock) + folderAttr := iamBlock.GetAttribute("folder") + if refBlock, err := a.modules.GetReferencedBlock(folderAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_folder" { + var foundFolder bool + for i, folder := range a.folders { + if folder.blockID == refBlock.ID() { + folder.folder.Members = append(folder.folder.Members, member) + a.folders[i] = folder + foundFolder = true + break + } + } + if foundFolder { + continue + } + } + } + + // we didn't find the folder - add an unmanaged one + a.folders = append(a.folders, parentedFolder{ + folder: iam.Folder{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + Members: []iam.Member{member}, + }, + }) + } +} + +func (a *adapter) adaptFolderBindings() { + + for _, iamBlock := range a.modules.GetResourcesByType("google_folder_iam_policy") { + + policyAttr := iamBlock.GetAttribute("policy_data") + if policyAttr.IsNil() { + continue + } + policyBlock, err := a.modules.GetReferencedBlock(policyAttr, iamBlock) + if err != nil { + continue + } + bindings := ParsePolicyBlock(policyBlock) + folderAttr := iamBlock.GetAttribute("folder") + + if refBlock, err := a.modules.GetReferencedBlock(folderAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_folder" { + var foundFolder bool + for i, folder := range a.folders { + if folder.blockID == refBlock.ID() { + folder.folder.Bindings = append(folder.folder.Bindings, bindings...) + a.folders[i] = folder + foundFolder = true + break + } + } + if foundFolder { + continue + } + + } + } + + // we didn't find the project - add an unmanaged one + a.folders = append(a.folders, parentedFolder{ + folder: iam.Folder{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + Bindings: bindings, + }, + }) + } + + for _, iamBlock := range a.modules.GetResourcesByType("google_folder_iam_binding") { + binding := a.adaptBinding(iamBlock) + folderAttr := iamBlock.GetAttribute("folder") + if refBlock, err := a.modules.GetReferencedBlock(folderAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_folder" { + var foundFolder bool + for i, folder := range a.folders { + if folder.blockID == refBlock.ID() { + folder.folder.Bindings = append(folder.folder.Bindings, binding) + a.folders[i] = folder + foundFolder = true + break + } + } + if foundFolder { + continue + } + + } + } + + // we didn't find the folder - add an unmanaged one + a.folders = append(a.folders, parentedFolder{ + folder: iam.Folder{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + Bindings: []iam.Binding{binding}, + }, + }) + } +} diff --git a/internal/adapters/terraform/google/iam/folders.go b/internal/adapters/terraform/google/iam/folders.go new file mode 100644 index 000000000000..c63f2c62b5b9 --- /dev/null +++ b/internal/adapters/terraform/google/iam/folders.go @@ -0,0 +1,40 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/iam" +) + +type parentedFolder struct { + blockID string + parentBlockID string + parentRef string + folder iam.Folder +} + +func (a *adapter) adaptFolders() { + for _, folderBlock := range a.modules.GetResourcesByType("google_folder") { + var folder parentedFolder + parentAttr := folderBlock.GetAttribute("parent") + if parentAttr.IsNil() { + continue + } + + folder.folder.Metadata = folderBlock.GetMetadata() + folder.blockID = folderBlock.ID() + if parentAttr.IsString() { + folder.parentRef = parentAttr.Value().AsString() + } + + if referencedBlock, err := a.modules.GetReferencedBlock(parentAttr, folderBlock); err == nil { + if referencedBlock.TypeLabel() == "google_folder" { + folder.parentBlockID = referencedBlock.ID() + } + if referencedBlock.TypeLabel() == "google_organization" { + folder.parentBlockID = referencedBlock.ID() + a.addOrg(folder.parentBlockID) + } + } + + a.folders = append(a.folders, folder) + } +} diff --git a/internal/adapters/terraform/google/iam/org_iam.go b/internal/adapters/terraform/google/iam/org_iam.go new file mode 100644 index 000000000000..b7202ef3de77 --- /dev/null +++ b/internal/adapters/terraform/google/iam/org_iam.go @@ -0,0 +1,113 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/google/uuid" +) + +// see https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google_organization_iam + +func (a *adapter) adaptOrganizationIAM() { + a.adaptOrganizationMembers() + a.adaptOrganizationBindings() +} + +func (a *adapter) adaptOrganizationMembers() { + for _, iamBlock := range a.modules.GetResourcesByType("google_organization_iam_member") { + member := a.adaptMember(iamBlock) + organizationAttr := iamBlock.GetAttribute("organization") + if organizationAttr.IsNil() { + organizationAttr = iamBlock.GetAttribute("org_id") + } + + if refBlock, err := a.modules.GetReferencedBlock(organizationAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_organization" { + a.addOrg(refBlock.ID()) + org, ok := a.orgs[refBlock.ID()] + if !ok { + org = iam.Organization{ + Metadata: refBlock.GetMetadata(), + Folders: nil, + Projects: nil, + Members: []iam.Member{member}, + Bindings: nil, + } + } + org.Members = append(org.Members, member) + a.orgs[refBlock.ID()] = org + continue + } + } + + // we didn't find the organization - add an unmanaged one + placeholderID := uuid.NewString() + org := iam.Organization{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + Members: []iam.Member{member}, + } + a.orgs[placeholderID] = org + + } +} + +func (a *adapter) adaptOrganizationBindings() { + + for _, iamBlock := range a.modules.GetResourcesByType("google_organization_iam_policy") { + + policyAttr := iamBlock.GetAttribute("policy_data") + if policyAttr.IsNil() { + continue + } + policyBlock, err := a.modules.GetReferencedBlock(policyAttr, iamBlock) + if err != nil { + continue + } + bindings := ParsePolicyBlock(policyBlock) + orgAttr := iamBlock.GetAttribute("organization") + + if refBlock, err := a.modules.GetReferencedBlock(orgAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_organization" { + if org, ok := a.orgs[refBlock.ID()]; ok { + org.Bindings = append(org.Bindings, bindings...) + a.orgs[refBlock.ID()] = org + continue + } + } + } + + // we didn't find the organization - add an unmanaged one + placeholderID := uuid.NewString() + org := iam.Organization{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + Bindings: bindings, + } + a.orgs[placeholderID] = org + } + + for _, iamBlock := range a.modules.GetResourcesByType("google_organization_iam_binding") { + binding := a.adaptBinding(iamBlock) + organizationAttr := iamBlock.GetAttribute("organization") + if organizationAttr.IsNil() { + organizationAttr = iamBlock.GetAttribute("org_id") + } + + if refBlock, err := a.modules.GetReferencedBlock(organizationAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_organization" { + a.addOrg(refBlock.ID()) + org := a.orgs[refBlock.ID()] + org.Bindings = append(org.Bindings, binding) + a.orgs[refBlock.ID()] = org + continue + } + } + + // we didn't find the organization - add an unmanaged one + placeholderID := uuid.NewString() + org := iam.Organization{ + Metadata: types.NewUnmanagedMisconfigMetadata(), + Bindings: []iam.Binding{binding}, + } + a.orgs[placeholderID] = org + } +} diff --git a/internal/adapters/terraform/google/iam/project_iam.go b/internal/adapters/terraform/google/iam/project_iam.go new file mode 100644 index 000000000000..36555f2a8e13 --- /dev/null +++ b/internal/adapters/terraform/google/iam/project_iam.go @@ -0,0 +1,287 @@ +package iam + +import ( + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/terraform" + + "github.com/aquasecurity/trivy/pkg/providers/google/iam" +) + +// see https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/google_project_iam + +func (a *adapter) adaptProjectIAM() { + a.adaptProjectMembers() + a.adaptProjectBindings() +} + +func (a *adapter) adaptMember(iamBlock *terraform.Block) iam.Member { + return AdaptMember(iamBlock, a.modules) +} + +func AdaptMember(iamBlock *terraform.Block, modules terraform.Modules) iam.Member { + member := iam.Member{ + Metadata: iamBlock.GetMetadata(), + Member: iamBlock.GetAttribute("member").AsStringValueOrDefault("", iamBlock), + Role: iamBlock.GetAttribute("role").AsStringValueOrDefault("", iamBlock), + DefaultServiceAccount: defsecTypes.BoolDefault(false, iamBlock.GetMetadata()), + } + + memberAttr := iamBlock.GetAttribute("member") + if referencedBlock, err := modules.GetReferencedBlock(memberAttr, iamBlock); err == nil { + if strings.HasSuffix(referencedBlock.TypeLabel(), "_default_service_account") { + member.DefaultServiceAccount = defsecTypes.Bool(true, memberAttr.GetMetadata()) + } + } + + return member +} + +var projectMemberResources = []string{ + "google_project_iam_member", + "google_cloud_run_service_iam_member", + "google_compute_instance_iam_member", + "google_compute_subnetwork_iam_member", + "google_data_catalog_entry_group_iam_member", + "google_folder_iam_member", + "google_pubsub_subscription_iam_member", + "google_pubsub_topic_iam_member", + "google_sourcerepo_repository_iam_member", + "google_spanner_database_iam_member", + "google_spanner_instance_iam_member", + "google_storage_bucket_iam_member", +} + +func (a *adapter) adaptProjectMembers() { + + for _, memberType := range projectMemberResources { + for _, iamBlock := range a.modules.GetResourcesByType(memberType) { + member := a.adaptMember(iamBlock) + projectAttr := iamBlock.GetAttribute("project") + if projectAttr.IsString() { + var foundProject bool + projectID := projectAttr.Value().AsString() + for i, project := range a.projects { + if project.id == projectID { + project.project.Members = append(project.project.Members, member) + a.projects[i] = project + foundProject = true + break + } + } + if foundProject { + continue + } + } + + if refBlock, err := a.modules.GetReferencedBlock(projectAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_project" { + var foundProject bool + for i, project := range a.projects { + if project.blockID == refBlock.ID() { + project.project.Members = append(project.project.Members, member) + a.projects[i] = project + foundProject = true + break + } + } + if foundProject { + continue + } + + } + } + + // we didn't find the project - add an unmanaged one + // unless it already belongs to an existing folder + var foundFolder bool + if refBlock, err := a.modules.GetReferencedBlock(iamBlock.GetAttribute("folder"), iamBlock); err == nil { + for _, folder := range a.folders { + if folder.blockID == refBlock.ID() { + foundFolder = true + } + } + } + if foundFolder { + continue + } + + a.projects = append(a.projects, parentedProject{ + project: iam.Project{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + AutoCreateNetwork: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + Members: []iam.Member{member}, + Bindings: nil, + }, + }) + } + } +} + +func (a *adapter) adaptBinding(iamBlock *terraform.Block) iam.Binding { + return AdaptBinding(iamBlock, a.modules) +} + +func AdaptBinding(iamBlock *terraform.Block, modules terraform.Modules) iam.Binding { + binding := iam.Binding{ + Metadata: iamBlock.GetMetadata(), + Members: nil, + Role: iamBlock.GetAttribute("role").AsStringValueOrDefault("", iamBlock), + IncludesDefaultServiceAccount: defsecTypes.BoolDefault(false, iamBlock.GetMetadata()), + } + membersAttr := iamBlock.GetAttribute("members") + members := membersAttr.AsStringValues().AsStrings() + for _, member := range members { + binding.Members = append(binding.Members, defsecTypes.String(member, membersAttr.GetMetadata())) + } + if referencedBlock, err := modules.GetReferencedBlock(membersAttr, iamBlock); err == nil { + if strings.HasSuffix(referencedBlock.TypeLabel(), "_default_service_account") { + binding.IncludesDefaultServiceAccount = defsecTypes.Bool(true, membersAttr.GetMetadata()) + } + } + return binding +} + +var projectBindingResources = []string{ + "google_project_iam_binding", + "google_cloud_run_service_iam_binding", + "google_compute_instance_iam_binding", + "google_compute_subnetwork_iam_binding", + "google_data_catalog_entry_group_iam_binding", + "google_folder_iam_binding", + "google_pubsub_subscription_iam_binding", + "google_pubsub_topic_iam_binding", + "google_sourcerepo_repository_iam_binding", + "google_spanner_database_iam_binding", + "google_spanner_instance_iam_binding", + "google_storage_bucket_iam_binding", +} + +func (a *adapter) adaptProjectDataBindings() { + for _, iamBlock := range a.modules.GetResourcesByType("google_project_iam_policy") { + + policyAttr := iamBlock.GetAttribute("policy_data") + if policyAttr.IsNil() { + continue + } + policyBlock, err := a.modules.GetReferencedBlock(policyAttr, iamBlock) + if err != nil { + continue + } + bindings := ParsePolicyBlock(policyBlock) + projectAttr := iamBlock.GetAttribute("project") + if projectAttr.IsString() { + var foundProject bool + projectID := projectAttr.Value().AsString() + for i, project := range a.projects { + if project.id == projectID { + project.project.Bindings = append(project.project.Bindings, bindings...) + a.projects[i] = project + foundProject = true + break + } + } + if foundProject { + continue + } + } + + if refBlock, err := a.modules.GetReferencedBlock(projectAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_project" { + var foundProject bool + for i, project := range a.projects { + if project.blockID == refBlock.ID() { + project.project.Bindings = append(project.project.Bindings, bindings...) + a.projects[i] = project + foundProject = true + break + } + } + if foundProject { + continue + } + + } + } + + // we didn't find the project - add an unmanaged one + a.projects = append(a.projects, parentedProject{ + project: iam.Project{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + AutoCreateNetwork: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + Members: nil, + Bindings: bindings, + }, + }) + } + +} + +func (a *adapter) adaptProjectBindings() { + + a.adaptProjectDataBindings() + + for _, bindingType := range projectBindingResources { + for _, iamBlock := range a.modules.GetResourcesByType(bindingType) { + binding := a.adaptBinding(iamBlock) + projectAttr := iamBlock.GetAttribute("project") + if projectAttr.IsString() { + var foundProject bool + projectID := projectAttr.Value().AsString() + for i, project := range a.projects { + if project.id == projectID { + project.project.Bindings = append(project.project.Bindings, binding) + a.projects[i] = project + foundProject = true + break + } + } + if foundProject { + continue + } + } + + if refBlock, err := a.modules.GetReferencedBlock(projectAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_project" { + var foundProject bool + for i, project := range a.projects { + if project.blockID == refBlock.ID() { + project.project.Bindings = append(project.project.Bindings, binding) + a.projects[i] = project + foundProject = true + break + } + } + if foundProject { + continue + } + + } + } + + // we didn't find the project - add an unmanaged one + // unless it already belongs to an existing folder + var foundFolder bool + if refBlock, err := a.modules.GetReferencedBlock(iamBlock.GetAttribute("folder"), iamBlock); err == nil { + for _, folder := range a.folders { + if folder.blockID == refBlock.ID() { + foundFolder = true + } + } + } + if foundFolder { + continue + } + a.projects = append(a.projects, parentedProject{ + project: iam.Project{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + AutoCreateNetwork: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + Members: nil, + Bindings: []iam.Binding{binding}, + }, + }) + } + } +} diff --git a/internal/adapters/terraform/google/iam/project_iam_test.go b/internal/adapters/terraform/google/iam/project_iam_test.go new file mode 100644 index 000000000000..052090925aa9 --- /dev/null +++ b/internal/adapters/terraform/google/iam/project_iam_test.go @@ -0,0 +1,59 @@ +package iam + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_AdaptBinding(t *testing.T) { + tests := []struct { + name string + terraform string + expected iam.Binding + }{ + { + name: "defined", + terraform: ` + resource "google_organization_iam_binding" "binding" { + org_id = data.google_organization.org.id + role = "roles/browser" + + members = [ + "user:alice@gmail.com", + ] + }`, + expected: iam.Binding{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Members: []defsecTypes.StringValue{ + defsecTypes.String("user:alice@gmail.com", defsecTypes.NewTestMisconfigMetadata())}, + Role: defsecTypes.String("roles/browser", defsecTypes.NewTestMisconfigMetadata()), + IncludesDefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + { + name: "defaults", + terraform: ` + resource "google_organization_iam_binding" "binding" { + }`, + expected: iam.Binding{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Role: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + IncludesDefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := AdaptBinding(modules.GetBlocks()[0], modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/google/iam/projects.go b/internal/adapters/terraform/google/iam/projects.go new file mode 100644 index 000000000000..487b25653ffd --- /dev/null +++ b/internal/adapters/terraform/google/iam/projects.go @@ -0,0 +1,58 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/iam" +) + +type parentedProject struct { + blockID string + orgBlockID string + folderBlockID string + id string + orgID string + folderID string + project iam.Project +} + +func (a *adapter) adaptProjects() { + for _, projectBlock := range a.modules.GetResourcesByType("google_project") { + var project parentedProject + project.project.Metadata = projectBlock.GetMetadata() + idAttr := projectBlock.GetAttribute("project_id") + if !idAttr.IsString() { + continue + } + project.id = idAttr.Value().AsString() + + project.blockID = projectBlock.ID() + + orgAttr := projectBlock.GetAttribute("org_id") + if orgAttr.IsString() { + project.orgID = orgAttr.Value().AsString() + } + folderAttr := projectBlock.GetAttribute("folder_id") + if folderAttr.IsString() { + project.folderID = folderAttr.Value().AsString() + } + + autoCreateNetworkAttr := projectBlock.GetAttribute("auto_create_network") + project.project.AutoCreateNetwork = autoCreateNetworkAttr.AsBoolValueOrDefault(true, projectBlock) + + if orgAttr.IsNotNil() { + if referencedBlock, err := a.modules.GetReferencedBlock(orgAttr, projectBlock); err == nil { + if referencedBlock.TypeLabel() == "google_organization" { + project.orgBlockID = referencedBlock.ID() + a.addOrg(project.orgBlockID) + } + } + } + if folderAttr.IsNotNil() { + if referencedBlock, err := a.modules.GetReferencedBlock(folderAttr, projectBlock); err == nil { + if referencedBlock.TypeLabel() == "google_folder" { + project.folderBlockID = referencedBlock.ID() + } + } + } + a.projects = append(a.projects, project) + } +} diff --git a/internal/adapters/terraform/google/iam/workload_identity_pool_providers.go b/internal/adapters/terraform/google/iam/workload_identity_pool_providers.go new file mode 100644 index 000000000000..4f6349fd014d --- /dev/null +++ b/internal/adapters/terraform/google/iam/workload_identity_pool_providers.go @@ -0,0 +1,18 @@ +package iam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/iam" +) + +// See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/iam_workload_identity_pool_provider + +func (a *adapter) adaptWorkloadIdentityPoolProviders() { + for _, resource := range a.modules.GetResourcesByType("google_iam_workload_identity_pool_provider") { + a.workloadIdentityPoolProviders = append(a.workloadIdentityPoolProviders, iam.WorkloadIdentityPoolProvider{ + Metadata: resource.GetMetadata(), + WorkloadIdentityPoolId: resource.GetAttribute("workload_identity_pool_id").AsStringValueOrDefault("", resource), + WorkloadIdentityPoolProviderId: resource.GetAttribute("workload_identity_pool_provider_id").AsStringValueOrDefault("", resource), + AttributeCondition: resource.GetAttribute("attribute_condition").AsStringValueOrDefault("", resource), + }) + } +} diff --git a/internal/adapters/terraform/google/kms/adapt.go b/internal/adapters/terraform/google/kms/adapt.go new file mode 100644 index 000000000000..181600ce7de3 --- /dev/null +++ b/internal/adapters/terraform/google/kms/adapt.go @@ -0,0 +1,60 @@ +package kms + +import ( + "strconv" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/terraform" + + "github.com/aquasecurity/trivy/pkg/providers/google/kms" +) + +func Adapt(modules terraform.Modules) kms.KMS { + return kms.KMS{ + KeyRings: adaptKeyRings(modules), + } +} + +func adaptKeyRings(modules terraform.Modules) []kms.KeyRing { + var keyRings []kms.KeyRing + for _, module := range modules { + for _, resource := range module.GetResourcesByType("google_kms_key_ring") { + var keys []kms.Key + + keyBlocks := module.GetReferencingResources(resource, "google_kms_crypto_key", "key_ring") + for _, keyBlock := range keyBlocks { + keys = append(keys, adaptKey(keyBlock)) + } + keyRings = append(keyRings, kms.KeyRing{ + Metadata: resource.GetMetadata(), + Keys: keys, + }) + } + } + return keyRings +} + +func adaptKey(resource *terraform.Block) kms.Key { + + key := kms.Key{ + Metadata: resource.GetMetadata(), + RotationPeriodSeconds: types.IntDefault(-1, resource.GetMetadata()), + } + + rotationPeriodAttr := resource.GetAttribute("rotation_period") + if !rotationPeriodAttr.IsString() { + return key + } + rotationStr := rotationPeriodAttr.Value().AsString() + if rotationStr[len(rotationStr)-1:] != "s" { + return key + } + seconds, err := strconv.Atoi(rotationStr[:len(rotationStr)-1]) + if err != nil { + return key + } + + key.RotationPeriodSeconds = types.Int(seconds, rotationPeriodAttr.GetMetadata()) + return key +} diff --git a/internal/adapters/terraform/google/kms/adapt_test.go b/internal/adapters/terraform/google/kms/adapt_test.go new file mode 100644 index 000000000000..8e5350e5b57d --- /dev/null +++ b/internal/adapters/terraform/google/kms/adapt_test.go @@ -0,0 +1,126 @@ +package kms + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/google/kms" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_adaptKeyRings(t *testing.T) { + tests := []struct { + name string + terraform string + expected []kms.KeyRing + }{ + { + name: "configured", + terraform: ` + resource "google_kms_key_ring" "keyring" { + name = "keyring-example" + } + + resource "google_kms_crypto_key" "example-key" { + name = "crypto-key-example" + key_ring = google_kms_key_ring.keyring.id + rotation_period = "7776000s" + } +`, + expected: []kms.KeyRing{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Keys: []kms.Key{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + RotationPeriodSeconds: defsecTypes.Int(7776000, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + { + name: "no keys", + terraform: ` + resource "google_kms_key_ring" "keyring" { + name = "keyring-example" + } + +`, + expected: []kms.KeyRing{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + }, + }, + }, + { + name: "default rotation period", + terraform: ` + resource "google_kms_key_ring" "keyring" { + name = "keyring-example" + } + + resource "google_kms_crypto_key" "example-key" { + name = "crypto-key-example" + key_ring = google_kms_key_ring.keyring.id + } +`, + expected: []kms.KeyRing{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Keys: []kms.Key{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + RotationPeriodSeconds: defsecTypes.Int(-1, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptKeyRings(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "google_kms_key_ring" "keyring" { + name = "keyring-example" + } + + resource "google_kms_crypto_key" "example-key" { + name = "crypto-key-example" + key_ring = google_kms_key_ring.keyring.id + rotation_period = "7776000s" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.KeyRings, 1) + require.Len(t, adapted.KeyRings[0].Keys, 1) + + key := adapted.KeyRings[0].Keys[0] + + assert.Equal(t, 2, adapted.KeyRings[0].Metadata.Range().GetStartLine()) + assert.Equal(t, 4, adapted.KeyRings[0].Metadata.Range().GetEndLine()) + + assert.Equal(t, 6, key.Metadata.Range().GetStartLine()) + assert.Equal(t, 10, key.Metadata.Range().GetEndLine()) + + assert.Equal(t, 9, key.RotationPeriodSeconds.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 9, key.RotationPeriodSeconds.GetMetadata().Range().GetEndLine()) + +} diff --git a/internal/adapters/terraform/google/sql/adapt.go b/internal/adapters/terraform/google/sql/adapt.go new file mode 100644 index 000000000000..57850a4669de --- /dev/null +++ b/internal/adapters/terraform/google/sql/adapt.go @@ -0,0 +1,156 @@ +package sql + +import ( + "strconv" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/terraform" + + "github.com/aquasecurity/trivy/pkg/providers/google/sql" +) + +func Adapt(modules terraform.Modules) sql.SQL { + return sql.SQL{ + Instances: adaptInstances(modules), + } +} + +func adaptInstances(modules terraform.Modules) []sql.DatabaseInstance { + var instances []sql.DatabaseInstance + for _, module := range modules { + for _, resource := range module.GetResourcesByType("google_sql_database_instance") { + instances = append(instances, adaptInstance(resource)) + } + } + return instances +} + +func adaptInstance(resource *terraform.Block) sql.DatabaseInstance { + + instance := sql.DatabaseInstance{ + Metadata: resource.GetMetadata(), + DatabaseVersion: resource.GetAttribute("database_version").AsStringValueOrDefault("", resource), + IsReplica: defsecTypes.BoolDefault(false, resource.GetMetadata()), + Settings: sql.Settings{ + Metadata: resource.GetMetadata(), + Flags: sql.Flags{ + Metadata: resource.GetMetadata(), + LogTempFileSize: defsecTypes.IntDefault(-1, resource.GetMetadata()), + LocalInFile: defsecTypes.BoolDefault(false, resource.GetMetadata()), + ContainedDatabaseAuthentication: defsecTypes.BoolDefault(true, resource.GetMetadata()), + CrossDBOwnershipChaining: defsecTypes.BoolDefault(true, resource.GetMetadata()), + LogCheckpoints: defsecTypes.BoolDefault(false, resource.GetMetadata()), + LogConnections: defsecTypes.BoolDefault(false, resource.GetMetadata()), + LogDisconnections: defsecTypes.BoolDefault(false, resource.GetMetadata()), + LogLockWaits: defsecTypes.BoolDefault(false, resource.GetMetadata()), + LogMinMessages: defsecTypes.StringDefault("", resource.GetMetadata()), + LogMinDurationStatement: defsecTypes.IntDefault(-1, resource.GetMetadata()), + }, + Backups: sql.Backups{ + Metadata: resource.GetMetadata(), + Enabled: defsecTypes.BoolDefault(false, resource.GetMetadata()), + }, + IPConfiguration: sql.IPConfiguration{ + Metadata: resource.GetMetadata(), + RequireTLS: defsecTypes.BoolDefault(false, resource.GetMetadata()), + EnableIPv4: defsecTypes.BoolDefault(true, resource.GetMetadata()), + AuthorizedNetworks: nil, + }, + }, + } + + if attr := resource.GetAttribute("master_instance_name"); attr.IsNotNil() { + instance.IsReplica = defsecTypes.Bool(true, attr.GetMetadata()) + } + + if settingsBlock := resource.GetBlock("settings"); settingsBlock.IsNotNil() { + instance.Settings.Metadata = settingsBlock.GetMetadata() + if blocks := settingsBlock.GetBlocks("database_flags"); len(blocks) > 0 { + adaptFlags(blocks, &instance.Settings.Flags) + } + if backupBlock := settingsBlock.GetBlock("backup_configuration"); backupBlock.IsNotNil() { + instance.Settings.Backups.Metadata = backupBlock.GetMetadata() + backupConfigEnabledAttr := backupBlock.GetAttribute("enabled") + instance.Settings.Backups.Enabled = backupConfigEnabledAttr.AsBoolValueOrDefault(false, backupBlock) + } + if settingsBlock.HasChild("ip_configuration") { + instance.Settings.IPConfiguration = adaptIPConfig(settingsBlock.GetBlock("ip_configuration")) + } + } + return instance +} + +// nolint +func adaptFlags(resources terraform.Blocks, flags *sql.Flags) { + for _, resource := range resources { + + nameAttr := resource.GetAttribute("name") + valueAttr := resource.GetAttribute("value") + + if !nameAttr.IsString() || valueAttr.IsNil() { + continue + } + + switch nameAttr.Value().AsString() { + case "log_temp_files": + if logTempInt, err := strconv.Atoi(valueAttr.Value().AsString()); err == nil { + flags.LogTempFileSize = defsecTypes.Int(logTempInt, nameAttr.GetMetadata()) + } + case "log_min_messages": + flags.LogMinMessages = valueAttr.AsStringValueOrDefault("", resource) + case "log_min_duration_statement": + if logMinDS, err := strconv.Atoi(valueAttr.Value().AsString()); err == nil { + flags.LogMinDurationStatement = defsecTypes.Int(logMinDS, nameAttr.GetMetadata()) + } + case "local_infile": + flags.LocalInFile = defsecTypes.Bool(valueAttr.Equals("on"), valueAttr.GetMetadata()) + case "log_checkpoints": + flags.LogCheckpoints = defsecTypes.Bool(valueAttr.Equals("on"), valueAttr.GetMetadata()) + case "log_connections": + flags.LogConnections = defsecTypes.Bool(valueAttr.Equals("on"), valueAttr.GetMetadata()) + case "log_disconnections": + flags.LogDisconnections = defsecTypes.Bool(valueAttr.Equals("on"), valueAttr.GetMetadata()) + case "log_lock_waits": + flags.LogLockWaits = defsecTypes.Bool(valueAttr.Equals("on"), valueAttr.GetMetadata()) + case "contained database authentication": + flags.ContainedDatabaseAuthentication = defsecTypes.Bool(valueAttr.Equals("on"), valueAttr.GetMetadata()) + case "cross db ownership chaining": + flags.CrossDBOwnershipChaining = defsecTypes.Bool(valueAttr.Equals("on"), valueAttr.GetMetadata()) + } + } +} + +func adaptIPConfig(resource *terraform.Block) sql.IPConfiguration { + var authorizedNetworks []struct { + Name defsecTypes.StringValue + CIDR defsecTypes.StringValue + } + + tlsRequiredAttr := resource.GetAttribute("require_ssl") + tlsRequiredVal := tlsRequiredAttr.AsBoolValueOrDefault(false, resource) + + ipv4enabledAttr := resource.GetAttribute("ipv4_enabled") + ipv4enabledVal := ipv4enabledAttr.AsBoolValueOrDefault(true, resource) + + authNetworksBlocks := resource.GetBlocks("authorized_networks") + for _, authBlock := range authNetworksBlocks { + nameVal := authBlock.GetAttribute("name").AsStringValueOrDefault("", authBlock) + cidrVal := authBlock.GetAttribute("value").AsStringValueOrDefault("", authBlock) + + authorizedNetworks = append(authorizedNetworks, struct { + Name defsecTypes.StringValue + CIDR defsecTypes.StringValue + }{ + Name: nameVal, + CIDR: cidrVal, + }) + } + + return sql.IPConfiguration{ + Metadata: resource.GetMetadata(), + RequireTLS: tlsRequiredVal, + EnableIPv4: ipv4enabledVal, + AuthorizedNetworks: authorizedNetworks, + } +} diff --git a/internal/adapters/terraform/google/sql/adapt_test.go b/internal/adapters/terraform/google/sql/adapt_test.go new file mode 100644 index 000000000000..d136d1c65cd6 --- /dev/null +++ b/internal/adapters/terraform/google/sql/adapt_test.go @@ -0,0 +1,278 @@ +package sql + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/google/sql" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected sql.SQL + }{ + { + name: "default flags", + terraform: ` + resource "google_sql_database_instance" "db" { + database_version = "POSTGRES_12" + settings { + backup_configuration { + enabled = true + } + ip_configuration { + ipv4_enabled = false + authorized_networks { + value = "108.12.12.0/24" + name = "internal" + } + require_ssl = true + } + } + } +`, + expected: sql.SQL{ + Instances: []sql.DatabaseInstance{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + IsReplica: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + DatabaseVersion: defsecTypes.String("POSTGRES_12", defsecTypes.NewTestMisconfigMetadata()), + Settings: sql.Settings{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Backups: sql.Backups{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + Flags: sql.Flags{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + LogMinDurationStatement: defsecTypes.Int(-1, defsecTypes.NewTestMisconfigMetadata()), + ContainedDatabaseAuthentication: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + CrossDBOwnershipChaining: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + LocalInFile: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + LogCheckpoints: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + LogConnections: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + LogDisconnections: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + LogLockWaits: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + LogMinMessages: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + LogTempFileSize: defsecTypes.Int(-1, defsecTypes.NewTestMisconfigMetadata()), + }, + IPConfiguration: sql.IPConfiguration{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + RequireTLS: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + EnableIPv4: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + AuthorizedNetworks: []struct { + Name defsecTypes.StringValue + CIDR defsecTypes.StringValue + }{ + { + Name: defsecTypes.String("internal", defsecTypes.NewTestMisconfigMetadata()), + CIDR: defsecTypes.String("108.12.12.0/24", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func Test_adaptInstances(t *testing.T) { + tests := []struct { + name string + terraform string + expected []sql.DatabaseInstance + }{ + { + name: "all flags", + terraform: ` +resource "google_sql_database_instance" "backup_source_instance" { + name = "test-instance" + database_version = "POSTGRES_11" + + project = "test-project" + region = "europe-west6" + deletion_protection = false + settings { + tier = "db-f1-micro" + backup_configuration { + enabled = true + } + ip_configuration { + ipv4_enabled = false + private_network = "test-network" + require_ssl = true + } + database_flags { + name = "log_connections" + value = "on" + } + database_flags { + name = "log_temp_files" + value = "0" + } + database_flags { + name = "log_checkpoints" + value = "on" + } + database_flags { + name = "log_disconnections" + value = "on" + } + database_flags { + name = "log_lock_waits" + value = "on" + } + } +} + `, + expected: []sql.DatabaseInstance{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + DatabaseVersion: defsecTypes.String("POSTGRES_11", defsecTypes.NewTestMisconfigMetadata()), + IsReplica: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Settings: sql.Settings{ + Backups: sql.Backups{ + Enabled: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + Flags: sql.Flags{ + LogConnections: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + LogTempFileSize: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + LogCheckpoints: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + LogDisconnections: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + LogLockWaits: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + ContainedDatabaseAuthentication: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + CrossDBOwnershipChaining: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + LocalInFile: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + LogMinDurationStatement: defsecTypes.Int(-1, defsecTypes.NewTestMisconfigMetadata()), + LogMinMessages: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + IPConfiguration: sql.IPConfiguration{ + EnableIPv4: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + RequireTLS: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptInstances(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "google_sql_database_instance" "backup_source_instance" { + name = "test-instance" + database_version = "POSTGRES_11" + + settings { + backup_configuration { + enabled = true + } + + ip_configuration { + ipv4_enabled = false + require_ssl = true + authorized_networks { + name = "internal" + value = "108.12.12.0/24" + } + } + + database_flags { + name = "log_connections" + value = "on" + } + database_flags { + name = "log_temp_files" + value = "0" + } + database_flags { + name = "log_checkpoints" + value = "on" + } + database_flags { + name = "log_disconnections" + value = "on" + } + database_flags { + name = "log_lock_waits" + value = "on" + } + } + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Instances, 1) + instance := adapted.Instances[0] + + assert.Equal(t, 2, instance.Metadata.Range().GetStartLine()) + assert.Equal(t, 41, instance.Metadata.Range().GetEndLine()) + + assert.Equal(t, 4, instance.DatabaseVersion.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, instance.DatabaseVersion.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, instance.Settings.Metadata.Range().GetStartLine()) + assert.Equal(t, 40, instance.Settings.Metadata.Range().GetEndLine()) + + assert.Equal(t, 7, instance.Settings.Backups.Metadata.Range().GetStartLine()) + assert.Equal(t, 9, instance.Settings.Backups.Metadata.Range().GetEndLine()) + + assert.Equal(t, 8, instance.Settings.Backups.Enabled.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 8, instance.Settings.Backups.Enabled.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, instance.Settings.IPConfiguration.Metadata.Range().GetStartLine()) + assert.Equal(t, 18, instance.Settings.IPConfiguration.Metadata.Range().GetEndLine()) + + assert.Equal(t, 12, instance.Settings.IPConfiguration.EnableIPv4.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 12, instance.Settings.IPConfiguration.EnableIPv4.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, instance.Settings.IPConfiguration.RequireTLS.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 13, instance.Settings.IPConfiguration.RequireTLS.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 15, instance.Settings.IPConfiguration.AuthorizedNetworks[0].Name.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 15, instance.Settings.IPConfiguration.AuthorizedNetworks[0].Name.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 16, instance.Settings.IPConfiguration.AuthorizedNetworks[0].CIDR.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 16, instance.Settings.IPConfiguration.AuthorizedNetworks[0].CIDR.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 22, instance.Settings.Flags.LogConnections.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 22, instance.Settings.Flags.LogConnections.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 25, instance.Settings.Flags.LogTempFileSize.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 25, instance.Settings.Flags.LogTempFileSize.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 34, instance.Settings.Flags.LogDisconnections.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 34, instance.Settings.Flags.LogDisconnections.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 38, instance.Settings.Flags.LogLockWaits.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 38, instance.Settings.Flags.LogLockWaits.GetMetadata().Range().GetEndLine()) + +} diff --git a/internal/adapters/terraform/google/storage/adapt.go b/internal/adapters/terraform/google/storage/adapt.go new file mode 100644 index 000000000000..38994d29f838 --- /dev/null +++ b/internal/adapters/terraform/google/storage/adapt.go @@ -0,0 +1,129 @@ +package storage + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/storage" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func Adapt(modules terraform.Modules) storage.Storage { + return storage.Storage{ + Buckets: (&adapter{modules: modules}).adaptBuckets(), + } +} + +type adapter struct { + modules terraform.Modules + bindings []parentedBinding + members []parentedMember + bindingMap terraform.ResourceIDResolutions + memberMap terraform.ResourceIDResolutions +} + +func (a *adapter) adaptBuckets() []storage.Bucket { + + a.bindingMap = a.modules.GetChildResourceIDMapByType("google_storage_bucket_iam_binding", "google_storage_bucket_iam_policy") + a.memberMap = a.modules.GetChildResourceIDMapByType("google_storage_bucket_iam_member") + + a.adaptMembers() + a.adaptBindings() + + var buckets []storage.Bucket + for _, module := range a.modules { + for _, resource := range module.GetResourcesByType("google_storage_bucket") { + buckets = append(buckets, a.adaptBucketResource(resource)) + } + } + + orphanage := storage.Bucket{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Name: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + Location: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + EnableUniformBucketLevelAccess: defsecTypes.BoolDefault(false, defsecTypes.NewUnmanagedMisconfigMetadata()), + Members: nil, + Bindings: nil, + } + for _, orphanedBindingID := range a.bindingMap.Orphans() { + for _, binding := range a.bindings { + if binding.blockID == orphanedBindingID { + orphanage.Bindings = append(orphanage.Bindings, binding.bindings...) + break + } + } + } + for _, orphanedMemberID := range a.memberMap.Orphans() { + for _, member := range a.members { + if member.blockID == orphanedMemberID { + orphanage.Members = append(orphanage.Members, member.member) + break + } + } + } + if len(orphanage.Bindings) > 0 || len(orphanage.Members) > 0 { + buckets = append(buckets, orphanage) + } + + return buckets +} + +func (a *adapter) adaptBucketResource(resourceBlock *terraform.Block) storage.Bucket { + + nameAttr := resourceBlock.GetAttribute("name") + nameValue := nameAttr.AsStringValueOrDefault("", resourceBlock) + + locationAttr := resourceBlock.GetAttribute("location") + locationValue := locationAttr.AsStringValueOrDefault("", resourceBlock) + + // See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/storage_bucket#uniform_bucket_level_access + ublaAttr := resourceBlock.GetAttribute("uniform_bucket_level_access") + ublaValue := ublaAttr.AsBoolValueOrDefault(false, resourceBlock) + + bucket := storage.Bucket{ + Metadata: resourceBlock.GetMetadata(), + Name: nameValue, + Location: locationValue, + EnableUniformBucketLevelAccess: ublaValue, + Members: nil, + Bindings: nil, + Encryption: storage.BucketEncryption{ + Metadata: resourceBlock.GetMetadata(), + DefaultKMSKeyName: defsecTypes.StringDefault("", resourceBlock.GetMetadata()), + }, + } + + if encBlock := resourceBlock.GetBlock("encryption"); encBlock.IsNotNil() { + bucket.Encryption.Metadata = encBlock.GetMetadata() + kmsKeyNameAttr := encBlock.GetAttribute("default_kms_key_name") + bucket.Encryption.DefaultKMSKeyName = kmsKeyNameAttr.AsStringValueOrDefault("", encBlock) + } + + var name string + if nameAttr.IsString() { + name = nameAttr.Value().AsString() + } + + for _, member := range a.members { + if member.bucketBlockID == resourceBlock.ID() { + bucket.Members = append(bucket.Members, member.member) + a.memberMap.Resolve(member.blockID) + continue + } + if name != "" && name == member.bucketID { + bucket.Members = append(bucket.Members, member.member) + a.memberMap.Resolve(member.blockID) + } + } + for _, binding := range a.bindings { + if binding.bucketBlockID == resourceBlock.ID() { + bucket.Bindings = append(bucket.Bindings, binding.bindings...) + a.bindingMap.Resolve(binding.blockID) + continue + } + if name != "" && name == binding.bucketID { + bucket.Bindings = append(bucket.Bindings, binding.bindings...) + a.bindingMap.Resolve(binding.blockID) + } + } + + return bucket +} diff --git a/internal/adapters/terraform/google/storage/adapt_test.go b/internal/adapters/terraform/google/storage/adapt_test.go new file mode 100644 index 000000000000..4711ccf4c065 --- /dev/null +++ b/internal/adapters/terraform/google/storage/adapt_test.go @@ -0,0 +1,198 @@ +package storage + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + "github.com/aquasecurity/trivy/pkg/providers/google/storage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_Adapt(t *testing.T) { + tests := []struct { + name string + terraform string + expected storage.Storage + }{ + { + name: "defined", + terraform: ` + resource "google_storage_bucket" "static-site" { + name = "image-store.com" + location = "EU" + uniform_bucket_level_access = true + + encryption { + default_kms_key_name = "default-kms-key-name" + } + } + + resource "google_storage_bucket_iam_binding" "binding" { + bucket = google_storage_bucket.static-site.name + role = "roles/storage.admin #1" + members = [ + "group:test@example.com", + ] + } + + resource "google_storage_bucket_iam_member" "example" { + member = "serviceAccount:test@example.com" + bucket = google_storage_bucket.static-site.name + role = "roles/storage.admin #2" + }`, + expected: storage.Storage{ + Buckets: []storage.Bucket{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("image-store.com", defsecTypes.NewTestMisconfigMetadata()), + Location: defsecTypes.String("EU", defsecTypes.NewTestMisconfigMetadata()), + EnableUniformBucketLevelAccess: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + Bindings: []iam.Binding{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Members: []defsecTypes.StringValue{ + defsecTypes.String("group:test@example.com", defsecTypes.NewTestMisconfigMetadata()), + }, + Role: defsecTypes.String("roles/storage.admin #1", defsecTypes.NewTestMisconfigMetadata()), + IncludesDefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Members: []iam.Member{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Member: defsecTypes.String("serviceAccount:test@example.com", defsecTypes.NewTestMisconfigMetadata()), + Role: defsecTypes.String("roles/storage.admin #2", defsecTypes.NewTestMisconfigMetadata()), + DefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Encryption: storage.BucketEncryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + DefaultKMSKeyName: defsecTypes.String("default-kms-key-name", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + { + name: "defaults", + terraform: ` + resource "google_storage_bucket" "static-site" { + } + + resource "google_storage_bucket_iam_binding" "binding" { + bucket = google_storage_bucket.static-site.name + } + + resource "google_storage_bucket_iam_member" "example" { + bucket = google_storage_bucket.static-site.name + }`, + expected: storage.Storage{ + Buckets: []storage.Bucket{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Name: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Location: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + EnableUniformBucketLevelAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + Bindings: []iam.Binding{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Role: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + IncludesDefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Members: []iam.Member{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Member: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Role: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + DefaultServiceAccount: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Encryption: storage.BucketEncryption{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + DefaultKMSKeyName: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := Adapt(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} + +func TestLines(t *testing.T) { + src := ` + resource "google_storage_bucket" "static-site" { + name = "image-store.com" + location = "EU" + uniform_bucket_level_access = true + } + + resource "google_storage_bucket_iam_binding" "binding" { + bucket = google_storage_bucket.static-site.name + role = "roles/storage.admin #1" + members = [ + "group:test@example.com", + ] + } + + resource "google_storage_bucket_iam_member" "example" { + member = "serviceAccount:test@example.com" + bucket = google_storage_bucket.static-site.name + role = "roles/storage.admin #2" + }` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Buckets, 1) + require.Len(t, adapted.Buckets[0].Bindings, 1) + require.Len(t, adapted.Buckets[0].Members, 1) + + bucket := adapted.Buckets[0] + binding := adapted.Buckets[0].Bindings[0] + member := adapted.Buckets[0].Members[0] + + assert.Equal(t, 2, bucket.Metadata.Range().GetStartLine()) + assert.Equal(t, 6, bucket.Metadata.Range().GetEndLine()) + + assert.Equal(t, 3, bucket.Name.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, bucket.Name.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, bucket.Location.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, bucket.Location.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 5, bucket.EnableUniformBucketLevelAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, bucket.EnableUniformBucketLevelAccess.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 8, binding.Metadata.Range().GetStartLine()) + assert.Equal(t, 14, binding.Metadata.Range().GetEndLine()) + + assert.Equal(t, 10, binding.Role.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, binding.Role.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, binding.Members[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 13, binding.Members[0].GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 16, member.Metadata.Range().GetStartLine()) + assert.Equal(t, 20, member.Metadata.Range().GetEndLine()) + + assert.Equal(t, 17, member.Member.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 17, member.Member.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 19, member.Role.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 19, member.Role.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/google/storage/iam.go b/internal/adapters/terraform/google/storage/iam.go new file mode 100644 index 000000000000..5eac61d55e53 --- /dev/null +++ b/internal/adapters/terraform/google/storage/iam.go @@ -0,0 +1,96 @@ +package storage + +import ( + "github.com/aquasecurity/trivy/internal/adapters/terraform/google/iam" + iamTypes "github.com/aquasecurity/trivy/pkg/providers/google/iam" +) + +type parentedBinding struct { + blockID string + bucketID string + bucketBlockID string + bindings []iamTypes.Binding +} + +type parentedMember struct { + blockID string + bucketID string + bucketBlockID string + member iamTypes.Member +} + +func (a *adapter) adaptBindings() { + + for _, iamBlock := range a.modules.GetResourcesByType("google_storage_bucket_iam_policy") { + var parented parentedBinding + parented.blockID = iamBlock.ID() + + bucketAttr := iamBlock.GetAttribute("bucket") + if bucketAttr.IsString() { + parented.bucketID = bucketAttr.Value().AsString() + } + + if refBlock, err := a.modules.GetReferencedBlock(bucketAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_storage_bucket" { + parented.bucketBlockID = refBlock.ID() + } + } + + policyAttr := iamBlock.GetAttribute("policy_data") + if policyAttr.IsNil() { + continue + } + + policyBlock, err := a.modules.GetReferencedBlock(policyAttr, iamBlock) + if err != nil { + continue + } + + parented.bindings = iam.ParsePolicyBlock(policyBlock) + a.bindings = append(a.bindings, parented) + } + + for _, iamBlock := range a.modules.GetResourcesByType("google_storage_bucket_iam_binding") { + + var parented parentedBinding + parented.blockID = iamBlock.ID() + parented.bindings = []iamTypes.Binding{iam.AdaptBinding(iamBlock, a.modules)} + + bucketAttr := iamBlock.GetAttribute("bucket") + if bucketAttr.IsString() { + parented.bucketID = bucketAttr.Value().AsString() + } + + if refBlock, err := a.modules.GetReferencedBlock(bucketAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_storage_bucket" { + parented.bucketBlockID = refBlock.ID() + } + } + + a.bindings = append(a.bindings, parented) + } +} + +func (a *adapter) adaptMembers() { + + for _, iamBlock := range a.modules.GetResourcesByType("google_storage_bucket_iam_member") { + + var parented parentedMember + parented.blockID = iamBlock.ID() + parented.member = iam.AdaptMember(iamBlock, a.modules) + + bucketAttr := iamBlock.GetAttribute("bucket") + if bucketAttr.IsString() { + parented.bucketID = bucketAttr.Value().AsString() + } + + if refBlock, err := a.modules.GetReferencedBlock(bucketAttr, iamBlock); err == nil { + if refBlock.TypeLabel() == "google_storage_bucket" { + parented.bucketBlockID = refBlock.ID() + } + } + + a.members = append(a.members, parented) + } + +} diff --git a/internal/adapters/terraform/kubernetes/adapt.go b/internal/adapters/terraform/kubernetes/adapt.go new file mode 100644 index 000000000000..e74ae63e5c93 --- /dev/null +++ b/internal/adapters/terraform/kubernetes/adapt.go @@ -0,0 +1,123 @@ +package kubernetes + +import ( + "regexp" + "strings" + + "github.com/aquasecurity/trivy/pkg/providers/kubernetes" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +var versionRegex = regexp.MustCompile(`^v\d+(beta\d+)?$`) + +func Adapt(modules terraform.Modules) kubernetes.Kubernetes { + return kubernetes.Kubernetes{ + NetworkPolicies: adaptNetworkPolicies(modules), + } +} + +func adaptNetworkPolicies(modules terraform.Modules) []kubernetes.NetworkPolicy { + var networkPolicies []kubernetes.NetworkPolicy + for _, module := range modules { + for _, resource := range getBlocksIgnoreVersion(module, "resource", "kubernetes_network_policy") { + networkPolicies = append(networkPolicies, adaptNetworkPolicy(resource)) + } + } + return networkPolicies +} + +func adaptNetworkPolicy(resourceBlock *terraform.Block) kubernetes.NetworkPolicy { + + policy := kubernetes.NetworkPolicy{ + Metadata: resourceBlock.GetMetadata(), + Spec: kubernetes.NetworkPolicySpec{ + Metadata: resourceBlock.GetMetadata(), + Egress: kubernetes.Egress{ + Metadata: resourceBlock.GetMetadata(), + Ports: nil, + DestinationCIDRs: nil, + }, + Ingress: kubernetes.Ingress{ + Metadata: resourceBlock.GetMetadata(), + Ports: nil, + SourceCIDRs: nil, + }, + }, + } + + if specBlock := resourceBlock.GetBlock("spec"); specBlock.IsNotNil() { + if egressBlock := specBlock.GetBlock("egress"); egressBlock.IsNotNil() { + policy.Spec.Egress.Metadata = egressBlock.GetMetadata() + for _, port := range egressBlock.GetBlocks("ports") { + numberAttr := port.GetAttribute("number") + numberVal := numberAttr.AsStringValueOrDefault("", port) + + protocolAttr := port.GetAttribute("protocol") + protocolVal := protocolAttr.AsStringValueOrDefault("", port) + + policy.Spec.Egress.Ports = append(policy.Spec.Egress.Ports, kubernetes.Port{ + Metadata: port.GetMetadata(), + Number: numberVal, + Protocol: protocolVal, + }) + } + + for _, to := range egressBlock.GetBlocks("to") { + cidrAtrr := to.GetBlock("ip_block").GetAttribute("cidr") + cidrVal := cidrAtrr.AsStringValueOrDefault("", to) + + policy.Spec.Egress.DestinationCIDRs = append(policy.Spec.Egress.DestinationCIDRs, cidrVal) + } + } + + if ingressBlock := specBlock.GetBlock("ingress"); ingressBlock.IsNotNil() { + policy.Spec.Ingress.Metadata = ingressBlock.GetMetadata() + for _, port := range ingressBlock.GetBlocks("ports") { + numberAttr := port.GetAttribute("number") + numberVal := numberAttr.AsStringValueOrDefault("", port) + + protocolAttr := port.GetAttribute("protocol") + protocolVal := protocolAttr.AsStringValueOrDefault("", port) + + policy.Spec.Ingress.Ports = append(policy.Spec.Ingress.Ports, kubernetes.Port{ + Metadata: port.GetMetadata(), + Number: numberVal, + Protocol: protocolVal, + }) + } + + for _, from := range ingressBlock.GetBlocks("from") { + cidrAtrr := from.GetBlock("ip_block").GetAttribute("cidr") + cidrVal := cidrAtrr.AsStringValueOrDefault("", from) + + policy.Spec.Ingress.SourceCIDRs = append(policy.Spec.Ingress.SourceCIDRs, cidrVal) + } + } + } + + return policy +} + +// https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/guides/versioned-resources +func getBlocksIgnoreVersion(module *terraform.Module, blockType string, resourceType string) terraform.Blocks { + var res terraform.Blocks + for _, block := range module.GetBlocks().OfType(blockType) { + if isMatchingTypeLabel(block.TypeLabel(), resourceType) { + res = append(res, block) + } + } + return res +} + +func isMatchingTypeLabel(typeLabel string, resourceType string) bool { + if typeLabel == resourceType { + return true + } + + versionPart, found := strings.CutPrefix(typeLabel, resourceType+"_") + if !found { + return false + } + + return versionRegex.MatchString(versionPart) +} diff --git a/internal/adapters/terraform/kubernetes/adapt_test.go b/internal/adapters/terraform/kubernetes/adapt_test.go new file mode 100644 index 000000000000..eea390bd2e01 --- /dev/null +++ b/internal/adapters/terraform/kubernetes/adapt_test.go @@ -0,0 +1,60 @@ +package kubernetes + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsMatchingTypeLabel(t *testing.T) { + tests := []struct { + name string + typeLabel string + resourceType string + expected bool + }{ + { + name: "without version", + typeLabel: "kubernetes_network_policy", + resourceType: "kubernetes_network_policy", + expected: true, + }, + { + name: "v1", + typeLabel: "kubernetes_network_policy_v1", + resourceType: "kubernetes_network_policy", + expected: true, + }, + { + name: "beta version", + typeLabel: "kubernetes_horizontal_pod_autoscaler_v2beta2", + resourceType: "kubernetes_horizontal_pod_autoscaler", + expected: true, + }, + { + name: "another type of resource", + typeLabel: "kubernetes_network_policy", + resourceType: "kubernetes_horizontal_pod_autoscaler", + expected: false, + }, + { + name: "similar resource type", + typeLabel: "kubernetes_network_policy_test_v1", + resourceType: "kubernetes_network_policy", + expected: false, + }, + { + name: "empty resource type", + typeLabel: "kubernetes_network_policy_test_v1", + resourceType: "", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isMatchingTypeLabel(tt.typeLabel, tt.resourceType) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/computing/adapt.go b/internal/adapters/terraform/nifcloud/computing/adapt.go new file mode 100644 index 000000000000..5d2cec7b130f --- /dev/null +++ b/internal/adapters/terraform/nifcloud/computing/adapt.go @@ -0,0 +1,16 @@ +package computing + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/computing" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) computing.Computing { + + sgAdapter := sgAdapter{sgRuleIDs: modules.GetChildResourceIDMapByType("nifcloud_security_group_rule")} + + return computing.Computing{ + SecurityGroups: sgAdapter.adaptSecurityGroups(modules), + Instances: adaptInstances(modules), + } +} diff --git a/internal/adapters/terraform/nifcloud/computing/adapt_test.go b/internal/adapters/terraform/nifcloud/computing/adapt_test.go new file mode 100644 index 000000000000..d92848402304 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/computing/adapt_test.go @@ -0,0 +1,61 @@ +package computing + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLines(t *testing.T) { + src := ` +resource "nifcloud_instance" "example" { + security_group = nifcloud_security_group.example.group_name + + network_interface { + network_id = "net-COMMON_PRIVATE" + } +} + +resource "nifcloud_security_group" "example" { + group_name = "example" + description = "memo" +} + +resource "nifcloud_security_group_rule" "example" { + type = "IN" + security_group_names = [nifcloud_security_group.example.group_name] + from_port = 22 + to_port = 22 + protocol = "TCP" + description = "memo" + cidr_ip = "1.2.3.4/32" +} +` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Instances, 1) + require.Len(t, adapted.SecurityGroups, 1) + + instance := adapted.Instances[0] + sg := adapted.SecurityGroups[0] + + assert.Equal(t, 3, instance.SecurityGroup.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, instance.SecurityGroup.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, instance.NetworkInterfaces[0].NetworkID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, instance.NetworkInterfaces[0].NetworkID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 12, sg.Description.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 12, sg.Description.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 21, sg.IngressRules[0].Description.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 21, sg.IngressRules[0].Description.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 22, sg.IngressRules[0].CIDR.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 22, sg.IngressRules[0].CIDR.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/nifcloud/computing/instance.go b/internal/adapters/terraform/nifcloud/computing/instance.go new file mode 100644 index 000000000000..8137f954656a --- /dev/null +++ b/internal/adapters/terraform/nifcloud/computing/instance.go @@ -0,0 +1,35 @@ +package computing + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/computing" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func adaptInstances(modules terraform.Modules) []computing.Instance { + var instances []computing.Instance + + for _, resource := range modules.GetResourcesByType("nifcloud_instance") { + instances = append(instances, adaptInstance(resource)) + } + return instances +} + +func adaptInstance(resource *terraform.Block) computing.Instance { + var networkInterfaces []computing.NetworkInterface + networkInterfaceBlocks := resource.GetBlocks("network_interface") + for _, networkInterfaceBlock := range networkInterfaceBlocks { + networkInterfaces = append( + networkInterfaces, + computing.NetworkInterface{ + Metadata: networkInterfaceBlock.GetMetadata(), + NetworkID: networkInterfaceBlock.GetAttribute("network_id").AsStringValueOrDefault("", resource), + }, + ) + } + + return computing.Instance{ + Metadata: resource.GetMetadata(), + SecurityGroup: resource.GetAttribute("security_group").AsStringValueOrDefault("", resource), + NetworkInterfaces: networkInterfaces, + } +} diff --git a/internal/adapters/terraform/nifcloud/computing/instance_test.go b/internal/adapters/terraform/nifcloud/computing/instance_test.go new file mode 100644 index 000000000000..252e98013e90 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/computing/instance_test.go @@ -0,0 +1,71 @@ +package computing + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/computing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptInstances(t *testing.T) { + tests := []struct { + name string + terraform string + expected []computing.Instance + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_instance" "my_example" { + security_group = "example-security-group" + network_interface { + network_id = "net-COMMON_PRIVATE" + } + } +`, + expected: []computing.Instance{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + SecurityGroup: defsecTypes.String("example-security-group", defsecTypes.NewTestMisconfigMetadata()), + NetworkInterfaces: []computing.NetworkInterface{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkID: defsecTypes.String("net-COMMON_PRIVATE", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_instance" "my_example" { + network_interface { + } + } +`, + + expected: []computing.Instance{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + SecurityGroup: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + NetworkInterfaces: []computing.NetworkInterface{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptInstances(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/computing/security_group.go b/internal/adapters/terraform/nifcloud/computing/security_group.go new file mode 100644 index 000000000000..e88324a91d3c --- /dev/null +++ b/internal/adapters/terraform/nifcloud/computing/security_group.go @@ -0,0 +1,76 @@ +package computing + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/computing" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type sgAdapter struct { + sgRuleIDs terraform.ResourceIDResolutions +} + +func (a *sgAdapter) adaptSecurityGroups(modules terraform.Modules) []computing.SecurityGroup { + var securityGroups []computing.SecurityGroup + for _, resource := range modules.GetResourcesByType("nifcloud_security_group") { + securityGroups = append(securityGroups, a.adaptSecurityGroup(resource, modules)) + } + orphanResources := modules.GetResourceByIDs(a.sgRuleIDs.Orphans()...) + if len(orphanResources) > 0 { + orphanage := computing.SecurityGroup{ + Metadata: defsecTypes.NewUnmanagedMisconfigMetadata(), + Description: defsecTypes.StringDefault("", defsecTypes.NewUnmanagedMisconfigMetadata()), + IngressRules: nil, + } + for _, sgRule := range orphanResources { + if sgRule.GetAttribute("type").Equals("IN") { + orphanage.IngressRules = append(orphanage.IngressRules, adaptSGRule(sgRule, modules)) + } + if sgRule.GetAttribute("type").Equals("OUT") { + orphanage.EgressRules = append(orphanage.EgressRules, adaptSGRule(sgRule, modules)) + } + } + securityGroups = append(securityGroups, orphanage) + } + + return securityGroups +} + +func (a *sgAdapter) adaptSecurityGroup(resource *terraform.Block, module terraform.Modules) computing.SecurityGroup { + var ingressRules, egressRules []computing.SecurityGroupRule + + descriptionAttr := resource.GetAttribute("description") + descriptionVal := descriptionAttr.AsStringValueOrDefault("", resource) + + rulesBlocks := module.GetReferencingResources(resource, "nifcloud_security_group_rule", "security_group_names") + for _, ruleBlock := range rulesBlocks { + a.sgRuleIDs.Resolve(ruleBlock.ID()) + if ruleBlock.GetAttribute("type").Equals("IN") { + ingressRules = append(ingressRules, adaptSGRule(ruleBlock, module)) + } + if ruleBlock.GetAttribute("type").Equals("OUT") { + egressRules = append(egressRules, adaptSGRule(ruleBlock, module)) + } + } + + return computing.SecurityGroup{ + Metadata: resource.GetMetadata(), + Description: descriptionVal, + IngressRules: ingressRules, + EgressRules: egressRules, + } +} + +func adaptSGRule(resource *terraform.Block, modules terraform.Modules) computing.SecurityGroupRule { + ruleDescAttr := resource.GetAttribute("description") + ruleDescVal := ruleDescAttr.AsStringValueOrDefault("", resource) + + cidrAttr := resource.GetAttribute("cidr_ip") + cidrVal := cidrAttr.AsStringValueOrDefault("", resource) + + return computing.SecurityGroupRule{ + Metadata: resource.GetMetadata(), + Description: ruleDescVal, + CIDR: cidrVal, + } +} diff --git a/internal/adapters/terraform/nifcloud/computing/security_group_test.go b/internal/adapters/terraform/nifcloud/computing/security_group_test.go new file mode 100644 index 000000000000..96b42fc9cebc --- /dev/null +++ b/internal/adapters/terraform/nifcloud/computing/security_group_test.go @@ -0,0 +1,86 @@ +package computing + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/computing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptSecurityGroups(t *testing.T) { + tests := []struct { + name string + terraform string + expected []computing.SecurityGroup + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_security_group" "example" { + group_name = "example" + description = "memo" + } + + resource "nifcloud_security_group_rule" "example" { + type = "IN" + security_group_names = [nifcloud_security_group.example.group_name] + from_port = 22 + to_port = 22 + protocol = "TCP" + description = "memo" + cidr_ip = "1.2.3.4/32" + } +`, + expected: []computing.SecurityGroup{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("memo", defsecTypes.NewTestMisconfigMetadata()), + IngressRules: []computing.SecurityGroupRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + CIDR: defsecTypes.String("1.2.3.4/32", defsecTypes.NewTestMisconfigMetadata()), + Description: defsecTypes.String("memo", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_security_group" "example" { + } + + resource "nifcloud_security_group_rule" "example" { + type = "IN" + security_group_names = [nifcloud_security_group.example.group_name] + } + +`, + + expected: []computing.SecurityGroup{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + IngressRules: []computing.SecurityGroupRule{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + CIDR: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Description: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + sgAdapter := sgAdapter{sgRuleIDs: modules.GetChildResourceIDMapByType("nifcloud_security_group_rule")} + adapted := sgAdapter.adaptSecurityGroups(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/dns/adapt.go b/internal/adapters/terraform/nifcloud/dns/adapt.go new file mode 100644 index 000000000000..7ce0982eb6a1 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/dns/adapt.go @@ -0,0 +1,12 @@ +package dns + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/dns" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) dns.DNS { + return dns.DNS{ + Records: adaptRecords(modules), + } +} diff --git a/internal/adapters/terraform/nifcloud/dns/adapt_test.go b/internal/adapters/terraform/nifcloud/dns/adapt_test.go new file mode 100644 index 000000000000..e5e60e9d9853 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/dns/adapt_test.go @@ -0,0 +1,32 @@ +package dns + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLines(t *testing.T) { + src := ` +resource "nifcloud_dns_record" "example" { + type = "A" + record = "example-record" +} +` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.Records, 1) + + record := adapted.Records[0] + + assert.Equal(t, 3, record.Type.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, record.Type.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, record.Record.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, record.Record.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/nifcloud/dns/record.go b/internal/adapters/terraform/nifcloud/dns/record.go new file mode 100644 index 000000000000..a2b5dc332a03 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/dns/record.go @@ -0,0 +1,23 @@ +package dns + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/dns" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func adaptRecords(modules terraform.Modules) []dns.Record { + var records []dns.Record + + for _, resource := range modules.GetResourcesByType("nifcloud_dns_record") { + records = append(records, adaptRecord(resource)) + } + return records +} + +func adaptRecord(resource *terraform.Block) dns.Record { + return dns.Record{ + Metadata: resource.GetMetadata(), + Record: resource.GetAttribute("record").AsStringValueOrDefault("", resource), + Type: resource.GetAttribute("type").AsStringValueOrDefault("", resource), + } +} diff --git a/internal/adapters/terraform/nifcloud/dns/record_test.go b/internal/adapters/terraform/nifcloud/dns/record_test.go new file mode 100644 index 000000000000..8843cd3b7732 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/dns/record_test.go @@ -0,0 +1,56 @@ +package dns + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/dns" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptRecords(t *testing.T) { + tests := []struct { + name string + terraform string + expected []dns.Record + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_dns_record" "example" { + type = "A" + record = "example-record" + } +`, + expected: []dns.Record{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String("A", defsecTypes.NewTestMisconfigMetadata()), + Record: defsecTypes.String("example-record", defsecTypes.NewTestMisconfigMetadata()), + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_dns_record" "example" { + } +`, + + expected: []dns.Record{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Type: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + Record: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptRecords(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/nas/adapt.go b/internal/adapters/terraform/nifcloud/nas/adapt.go new file mode 100644 index 000000000000..e93e46400835 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/nas/adapt.go @@ -0,0 +1,13 @@ +package nas + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/nas" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) nas.NAS { + return nas.NAS{ + NASSecurityGroups: adaptNASSecurityGroups(modules), + NASInstances: adaptNASInstances(modules), + } +} diff --git a/internal/adapters/terraform/nifcloud/nas/adapt_test.go b/internal/adapters/terraform/nifcloud/nas/adapt_test.go new file mode 100644 index 000000000000..0998303dbed6 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/nas/adapt_test.go @@ -0,0 +1,44 @@ +package nas + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLines(t *testing.T) { + src := ` +resource "nifcloud_nas_instance" "example" { + network_id = "example-network" +} + +resource "nifcloud_nas_security_group" "example" { + description = "memo" + + rule { + cidr_ip = "0.0.0.0/0" + } +} +` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.NASInstances, 1) + require.Len(t, adapted.NASSecurityGroups, 1) + + nasInstance := adapted.NASInstances[0] + nasSecurityGroup := adapted.NASSecurityGroups[0] + + assert.Equal(t, 3, nasInstance.NetworkID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, nasInstance.NetworkID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 7, nasSecurityGroup.Description.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, nasSecurityGroup.Description.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 10, nasSecurityGroup.CIDRs[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 10, nasSecurityGroup.CIDRs[0].GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/nifcloud/nas/nas_instance.go b/internal/adapters/terraform/nifcloud/nas/nas_instance.go new file mode 100644 index 000000000000..49de292b229f --- /dev/null +++ b/internal/adapters/terraform/nifcloud/nas/nas_instance.go @@ -0,0 +1,22 @@ +package nas + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/nas" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func adaptNASInstances(modules terraform.Modules) []nas.NASInstance { + var nasInstances []nas.NASInstance + + for _, resource := range modules.GetResourcesByType("nifcloud_nas_instance") { + nasInstances = append(nasInstances, adaptNASInstance(resource)) + } + return nasInstances +} + +func adaptNASInstance(resource *terraform.Block) nas.NASInstance { + return nas.NASInstance{ + Metadata: resource.GetMetadata(), + NetworkID: resource.GetAttribute("network_id").AsStringValueOrDefault("net-COMMON_PRIVATE", resource), + } +} diff --git a/internal/adapters/terraform/nifcloud/nas/nas_instance_test.go b/internal/adapters/terraform/nifcloud/nas/nas_instance_test.go new file mode 100644 index 000000000000..a13253b11c20 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/nas/nas_instance_test.go @@ -0,0 +1,54 @@ +package nas + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/nas" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptNASInstances(t *testing.T) { + tests := []struct { + name string + terraform string + expected []nas.NASInstance + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_nas_instance" "example" { + network_id = "example-network" + } +`, + expected: []nas.NASInstance{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkID: defsecTypes.String("example-network", defsecTypes.NewTestMisconfigMetadata()), + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_nas_instance" "example" { + } +`, + + expected: []nas.NASInstance{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkID: defsecTypes.String("net-COMMON_PRIVATE", defsecTypes.NewTestMisconfigMetadata()), + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptNASInstances(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/nas/nas_security_group.go b/internal/adapters/terraform/nifcloud/nas/nas_security_group.go new file mode 100644 index 000000000000..d8b51e004d20 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/nas/nas_security_group.go @@ -0,0 +1,30 @@ +package nas + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/nas" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptNASSecurityGroups(modules terraform.Modules) []nas.NASSecurityGroup { + var nasSecurityGroups []nas.NASSecurityGroup + + for _, resource := range modules.GetResourcesByType("nifcloud_nas_security_group") { + nasSecurityGroups = append(nasSecurityGroups, adaptNASSecurityGroup(resource)) + } + return nasSecurityGroups +} + +func adaptNASSecurityGroup(resource *terraform.Block) nas.NASSecurityGroup { + var cidrs []defsecTypes.StringValue + + for _, rule := range resource.GetBlocks("rule") { + cidrs = append(cidrs, rule.GetAttribute("cidr_ip").AsStringValueOrDefault("", resource)) + } + + return nas.NASSecurityGroup{ + Metadata: resource.GetMetadata(), + Description: resource.GetAttribute("description").AsStringValueOrDefault("", resource), + CIDRs: cidrs, + } +} diff --git a/internal/adapters/terraform/nifcloud/nas/nas_security_group_test.go b/internal/adapters/terraform/nifcloud/nas/nas_security_group_test.go new file mode 100644 index 000000000000..9d1417e24c5b --- /dev/null +++ b/internal/adapters/terraform/nifcloud/nas/nas_security_group_test.go @@ -0,0 +1,66 @@ +package nas + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/nas" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptNASSecurityGroups(t *testing.T) { + tests := []struct { + name string + terraform string + expected []nas.NASSecurityGroup + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_nas_security_group" "example" { + description = "memo" + + rule { + cidr_ip = "0.0.0.0/0" + } + } +`, + expected: []nas.NASSecurityGroup{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("memo", defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("0.0.0.0/0", defsecTypes.NewTestMisconfigMetadata()), + }, + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_nas_security_group" "example" { + rule { + } + } +`, + + expected: []nas.NASSecurityGroup{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptNASSecurityGroups(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/network/adapt.go b/internal/adapters/terraform/nifcloud/network/adapt.go new file mode 100644 index 000000000000..f69213eab0d8 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/adapt.go @@ -0,0 +1,16 @@ +package network + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) network.Network { + + return network.Network{ + ElasticLoadBalancers: adaptElasticLoadBalancers(modules), + LoadBalancers: adaptLoadBalancers(modules), + Routers: adaptRouters(modules), + VpnGateways: adaptVpnGateways(modules), + } +} diff --git a/internal/adapters/terraform/nifcloud/network/adapt_test.go b/internal/adapters/terraform/nifcloud/network/adapt_test.go new file mode 100644 index 000000000000..9255e7e16d3b --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/adapt_test.go @@ -0,0 +1,83 @@ +package network + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLines(t *testing.T) { + src := ` +resource "nifcloud_elb" "example" { + protocol = "HTTP" + + network_interface { + network_id = "net-COMMON_PRIVATE" + is_vip_network = false + } +} + +resource "nifcloud_load_balancer" "example" { + ssl_policy_id = "example-ssl-policy-id" + load_balancer_port = 8080 +} + +resource "nifcloud_router" "example" { + security_group = nifcloud_security_group.example.group_name + + network_interface { + network_id = "net-COMMON_PRIVATE" + } +} + +resource "nifcloud_security_group" "example" { + group_name = "example" + description = "memo" +} + +resource "nifcloud_vpn_gateway" "example" { + security_group = nifcloud_security_group.example.group_name +} +` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.ElasticLoadBalancers, 1) + require.Len(t, adapted.LoadBalancers, 1) + require.Len(t, adapted.Routers, 1) + require.Len(t, adapted.VpnGateways, 1) + + elb := adapted.ElasticLoadBalancers[0] + lb := adapted.LoadBalancers[0] + router := adapted.Routers[0] + vpngw := adapted.VpnGateways[0] + + assert.Equal(t, 3, elb.Listeners[0].Protocol.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, elb.Listeners[0].Protocol.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, elb.NetworkInterfaces[0].NetworkID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, elb.NetworkInterfaces[0].NetworkID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 7, elb.NetworkInterfaces[0].IsVipNetwork.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, elb.NetworkInterfaces[0].IsVipNetwork.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 12, lb.Listeners[0].TLSPolicy.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 12, lb.Listeners[0].TLSPolicy.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 13, lb.Listeners[0].Protocol.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 13, lb.Listeners[0].Protocol.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 17, router.SecurityGroup.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 17, router.SecurityGroup.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 20, router.NetworkInterfaces[0].NetworkID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 20, router.NetworkInterfaces[0].NetworkID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 30, vpngw.SecurityGroup.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 30, vpngw.SecurityGroup.GetMetadata().Range().GetEndLine()) + +} diff --git a/internal/adapters/terraform/nifcloud/network/elastic_load_balancer.go b/internal/adapters/terraform/nifcloud/network/elastic_load_balancer.go new file mode 100644 index 000000000000..e8095117f9ea --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/elastic_load_balancer.go @@ -0,0 +1,50 @@ +package network + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func adaptElasticLoadBalancers(modules terraform.Modules) []network.ElasticLoadBalancer { + var elasticLoadBalancers []network.ElasticLoadBalancer + + for _, resource := range modules.GetResourcesByType("nifcloud_elb") { + elasticLoadBalancers = append(elasticLoadBalancers, adaptElasticLoadBalancer(resource, modules)) + } + return elasticLoadBalancers +} + +func adaptElasticLoadBalancer(resource *terraform.Block, modules terraform.Modules) network.ElasticLoadBalancer { + var listeners []network.ElasticLoadBalancerListener + var networkInterfaces []network.NetworkInterface + + networkInterfaceBlocks := resource.GetBlocks("network_interface") + for _, networkInterfaceBlock := range networkInterfaceBlocks { + networkInterfaces = append( + networkInterfaces, + network.NetworkInterface{ + Metadata: networkInterfaceBlock.GetMetadata(), + NetworkID: networkInterfaceBlock.GetAttribute("network_id").AsStringValueOrDefault("", resource), + IsVipNetwork: networkInterfaceBlock.GetAttribute("is_vip_network").AsBoolValueOrDefault(true, resource), + }, + ) + } + + listeners = append(listeners, adaptElasticLoadBalancerListener(resource)) + for _, listenerBlock := range modules.GetReferencingResources(resource, "nifcloud_elb_listener", "elb_id") { + listeners = append(listeners, adaptElasticLoadBalancerListener(listenerBlock)) + } + + return network.ElasticLoadBalancer{ + Metadata: resource.GetMetadata(), + NetworkInterfaces: networkInterfaces, + Listeners: listeners, + } +} + +func adaptElasticLoadBalancerListener(resource *terraform.Block) network.ElasticLoadBalancerListener { + return network.ElasticLoadBalancerListener{ + Metadata: resource.GetMetadata(), + Protocol: resource.GetAttribute("protocol").AsStringValueOrDefault("", resource), + } +} diff --git a/internal/adapters/terraform/nifcloud/network/elastic_load_balancer_test.go b/internal/adapters/terraform/nifcloud/network/elastic_load_balancer_test.go new file mode 100644 index 000000000000..cb28ce4edc65 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/elastic_load_balancer_test.go @@ -0,0 +1,90 @@ +package network + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptElasticLoadBalancers(t *testing.T) { + tests := []struct { + name string + terraform string + expected []network.ElasticLoadBalancer + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_elb" "example" { + protocol = "HTTP" + + network_interface { + network_id = "net-COMMON_PRIVATE" + is_vip_network = false + } + } + + resource "nifcloud_elb_listener" "example" { + elb_id = nifcloud_elb.example.id + protocol = "HTTPS" + } +`, + expected: []network.ElasticLoadBalancer{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkInterfaces: []network.NetworkInterface{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkID: defsecTypes.String("net-COMMON_PRIVATE", defsecTypes.NewTestMisconfigMetadata()), + IsVipNetwork: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Listeners: []network.ElasticLoadBalancerListener{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Protocol: defsecTypes.String("HTTP", defsecTypes.NewTestMisconfigMetadata()), + }, + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Protocol: defsecTypes.String("HTTPS", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_elb" "example" { + network_interface { + } + } +`, + + expected: []network.ElasticLoadBalancer{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkInterfaces: []network.NetworkInterface{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + IsVipNetwork: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }, + }, + Listeners: []network.ElasticLoadBalancerListener{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + }}, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptElasticLoadBalancers(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/network/load_balancer.go b/internal/adapters/terraform/nifcloud/network/load_balancer.go new file mode 100644 index 000000000000..4f7b8f7a00b2 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/load_balancer.go @@ -0,0 +1,67 @@ +package network + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptLoadBalancers(modules terraform.Modules) []network.LoadBalancer { + var loadBalancers []network.LoadBalancer + + for _, resource := range modules.GetResourcesByType("nifcloud_load_balancer") { + loadBalancers = append(loadBalancers, adaptLoadBalancer(resource, modules)) + } + + return loadBalancers +} + +func adaptLoadBalancer(resource *terraform.Block, modules terraform.Modules) network.LoadBalancer { + var listeners []network.LoadBalancerListener + + listeners = append(listeners, adaptListener(resource)) + for _, listenerBlock := range modules.GetReferencingResources(resource, "nifcloud_load_balancer_listener", "load_balancer_name") { + listeners = append(listeners, adaptListener(listenerBlock)) + } + + return network.LoadBalancer{ + Metadata: resource.GetMetadata(), + Listeners: listeners, + } +} + +func adaptListener(resource *terraform.Block) network.LoadBalancerListener { + protocolVal := defsecTypes.String("", resource.GetMetadata()) + policyVal := defsecTypes.String("", resource.GetMetadata()) + + portAttr := resource.GetAttribute("load_balancer_port") + if portAttr.IsNotNil() && portAttr.IsNumber() { + port := portAttr.AsNumber() + switch port { + case 21: + protocolVal = defsecTypes.String("FTP", portAttr.GetMetadata()) + case 80: + protocolVal = defsecTypes.String("HTTP", portAttr.GetMetadata()) + case 443: + protocolVal = defsecTypes.String("HTTPS", portAttr.GetMetadata()) + default: + protocolVal = defsecTypes.String("custom", portAttr.GetMetadata()) + } + } + + policyIDAttr := resource.GetAttribute("ssl_policy_id") + if policyIDAttr.IsNotNil() && policyIDAttr.IsString() { + policyVal = policyIDAttr.AsStringValueOrDefault("", resource) + } + + policyNameAttr := resource.GetAttribute("ssl_policy_name") + if policyNameAttr.IsNotNil() && policyNameAttr.IsString() { + policyVal = policyNameAttr.AsStringValueOrDefault("", resource) + } + + return network.LoadBalancerListener{ + Metadata: resource.GetMetadata(), + Protocol: protocolVal, + TLSPolicy: policyVal, + } +} diff --git a/internal/adapters/terraform/nifcloud/network/load_balancer_test.go b/internal/adapters/terraform/nifcloud/network/load_balancer_test.go new file mode 100644 index 000000000000..83049ff4c59a --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/load_balancer_test.go @@ -0,0 +1,75 @@ +package network + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptLoadBalancers(t *testing.T) { + tests := []struct { + name string + terraform string + expected []network.LoadBalancer + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_load_balancer" "example" { + load_balancer_name = "example" + load_balancer_port = 80 + ssl_policy_id = "example-ssl-policy-id" + } + + resource "nifcloud_load_balancer_listener" "example" { + load_balancer_name = nifcloud_load_balancer.example.load_balancer_name + load_balancer_port = 443 + ssl_policy_name = "example-ssl-policy-name" + } + +`, + expected: []network.LoadBalancer{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Listeners: []network.LoadBalancerListener{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + TLSPolicy: defsecTypes.String("example-ssl-policy-id", defsecTypes.NewTestMisconfigMetadata()), + Protocol: defsecTypes.String("HTTP", defsecTypes.NewTestMisconfigMetadata()), + }, + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + TLSPolicy: defsecTypes.String("example-ssl-policy-name", defsecTypes.NewTestMisconfigMetadata()), + Protocol: defsecTypes.String("HTTPS", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_load_balancer" "example" { + } +`, + + expected: []network.LoadBalancer{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Listeners: []network.LoadBalancerListener{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + }}, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptLoadBalancers(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/network/router.go b/internal/adapters/terraform/nifcloud/network/router.go new file mode 100644 index 000000000000..7ecb05dbb29e --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/router.go @@ -0,0 +1,37 @@ +package network + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptRouters(modules terraform.Modules) []network.Router { + var routers []network.Router + + for _, resource := range modules.GetResourcesByType("nifcloud_router") { + routers = append(routers, adaptRouter(resource)) + } + return routers +} + +func adaptRouter(resource *terraform.Block) network.Router { + var networkInterfaces []network.NetworkInterface + networkInterfaceBlocks := resource.GetBlocks("network_interface") + for _, networkInterfaceBlock := range networkInterfaceBlocks { + networkInterfaces = append( + networkInterfaces, + network.NetworkInterface{ + Metadata: networkInterfaceBlock.GetMetadata(), + NetworkID: networkInterfaceBlock.GetAttribute("network_id").AsStringValueOrDefault("", resource), + IsVipNetwork: types.Bool(false, networkInterfaceBlock.GetMetadata()), + }, + ) + } + + return network.Router{ + Metadata: resource.GetMetadata(), + SecurityGroup: resource.GetAttribute("security_group").AsStringValueOrDefault("", resource), + NetworkInterfaces: networkInterfaces, + } +} diff --git a/internal/adapters/terraform/nifcloud/network/router_test.go b/internal/adapters/terraform/nifcloud/network/router_test.go new file mode 100644 index 000000000000..62f1de2bd6fa --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/router_test.go @@ -0,0 +1,70 @@ +package network + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptRouters(t *testing.T) { + tests := []struct { + name string + terraform string + expected []network.Router + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_router" "example" { + security_group = "example-security-group" + network_interface { + network_id = "net-COMMON_PRIVATE" + } + } +`, + expected: []network.Router{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + SecurityGroup: defsecTypes.String("example-security-group", defsecTypes.NewTestMisconfigMetadata()), + NetworkInterfaces: []network.NetworkInterface{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkID: defsecTypes.String("net-COMMON_PRIVATE", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_router" "example" { + network_interface { + } + } +`, + + expected: []network.Router{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + SecurityGroup: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + NetworkInterfaces: []network.NetworkInterface{ + { + Metadata: defsecTypes.NewTestMisconfigMetadata(), + NetworkID: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptRouters(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/network/vpn_gateway.go b/internal/adapters/terraform/nifcloud/network/vpn_gateway.go new file mode 100644 index 000000000000..2026c06f9c46 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/vpn_gateway.go @@ -0,0 +1,22 @@ +package network + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func adaptVpnGateways(modules terraform.Modules) []network.VpnGateway { + var vpnGateways []network.VpnGateway + + for _, resource := range modules.GetResourcesByType("nifcloud_vpn_gateway") { + vpnGateways = append(vpnGateways, adaptVpnGateway(resource)) + } + return vpnGateways +} + +func adaptVpnGateway(resource *terraform.Block) network.VpnGateway { + return network.VpnGateway{ + Metadata: resource.GetMetadata(), + SecurityGroup: resource.GetAttribute("security_group").AsStringValueOrDefault("", resource), + } +} diff --git a/internal/adapters/terraform/nifcloud/network/vpn_gateway_test.go b/internal/adapters/terraform/nifcloud/network/vpn_gateway_test.go new file mode 100644 index 000000000000..d3ca295a9775 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/network/vpn_gateway_test.go @@ -0,0 +1,53 @@ +package network + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptVpnGateways(t *testing.T) { + tests := []struct { + name string + terraform string + expected []network.VpnGateway + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_vpn_gateway" "example" { + security_group = "example-security-group" + } +`, + expected: []network.VpnGateway{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + SecurityGroup: defsecTypes.String("example-security-group", defsecTypes.NewTestMisconfigMetadata()), + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_vpn_gateway" "example" { + } +`, + + expected: []network.VpnGateway{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + SecurityGroup: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptVpnGateways(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/nifcloud.go b/internal/adapters/terraform/nifcloud/nifcloud.go new file mode 100644 index 000000000000..43da291809dc --- /dev/null +++ b/internal/adapters/terraform/nifcloud/nifcloud.go @@ -0,0 +1,23 @@ +package nifcloud + +import ( + "github.com/aquasecurity/trivy/internal/adapters/terraform/nifcloud/computing" + "github.com/aquasecurity/trivy/internal/adapters/terraform/nifcloud/dns" + "github.com/aquasecurity/trivy/internal/adapters/terraform/nifcloud/nas" + "github.com/aquasecurity/trivy/internal/adapters/terraform/nifcloud/network" + "github.com/aquasecurity/trivy/internal/adapters/terraform/nifcloud/rdb" + "github.com/aquasecurity/trivy/internal/adapters/terraform/nifcloud/sslcertificate" + "github.com/aquasecurity/trivy/pkg/providers/nifcloud" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) nifcloud.Nifcloud { + return nifcloud.Nifcloud{ + Computing: computing.Adapt(modules), + DNS: dns.Adapt(modules), + NAS: nas.Adapt(modules), + Network: network.Adapt(modules), + RDB: rdb.Adapt(modules), + SSLCertificate: sslcertificate.Adapt(modules), + } +} diff --git a/internal/adapters/terraform/nifcloud/rdb/adapt.go b/internal/adapters/terraform/nifcloud/rdb/adapt.go new file mode 100644 index 000000000000..608eebfb5cd2 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/rdb/adapt.go @@ -0,0 +1,13 @@ +package rdb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/rdb" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) rdb.RDB { + return rdb.RDB{ + DBSecurityGroups: adaptDBSecurityGroups(modules), + DBInstances: adaptDBInstances(modules), + } +} diff --git a/internal/adapters/terraform/nifcloud/rdb/adapt_test.go b/internal/adapters/terraform/nifcloud/rdb/adapt_test.go new file mode 100644 index 000000000000..ab15a2f10747 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/rdb/adapt_test.go @@ -0,0 +1,60 @@ +package rdb + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLines(t *testing.T) { + src := ` +resource "nifcloud_db_instance" "example" { + publicly_accessible = false + engine = "MySQL" + engine_version = "5.7.15" + backup_retention_period = 2 + network_id = "example-network" +} + +resource "nifcloud_db_security_group" "example" { + description = "memo" + + rule { + cidr_ip = "0.0.0.0/0" + } +} +` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.DBInstances, 1) + require.Len(t, adapted.DBSecurityGroups, 1) + + dbInstance := adapted.DBInstances[0] + dbSecurityGroup := adapted.DBSecurityGroups[0] + + assert.Equal(t, 3, dbInstance.PublicAccess.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, dbInstance.PublicAccess.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 4, dbInstance.Engine.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 4, dbInstance.Engine.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 5, dbInstance.EngineVersion.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 5, dbInstance.EngineVersion.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 6, dbInstance.BackupRetentionPeriodDays.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 6, dbInstance.BackupRetentionPeriodDays.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 7, dbInstance.NetworkID.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 7, dbInstance.NetworkID.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 11, dbSecurityGroup.Description.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 11, dbSecurityGroup.Description.GetMetadata().Range().GetEndLine()) + + assert.Equal(t, 14, dbSecurityGroup.CIDRs[0].GetMetadata().Range().GetStartLine()) + assert.Equal(t, 14, dbSecurityGroup.CIDRs[0].GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/nifcloud/rdb/db_instance.go b/internal/adapters/terraform/nifcloud/rdb/db_instance.go new file mode 100644 index 000000000000..1d0a620f7f91 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/rdb/db_instance.go @@ -0,0 +1,26 @@ +package rdb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/rdb" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func adaptDBInstances(modules terraform.Modules) []rdb.DBInstance { + var dbInstances []rdb.DBInstance + + for _, resource := range modules.GetResourcesByType("nifcloud_db_instance") { + dbInstances = append(dbInstances, adaptDBInstance(resource)) + } + return dbInstances +} + +func adaptDBInstance(resource *terraform.Block) rdb.DBInstance { + return rdb.DBInstance{ + Metadata: resource.GetMetadata(), + BackupRetentionPeriodDays: resource.GetAttribute("backup_retention_period").AsIntValueOrDefault(0, resource), + Engine: resource.GetAttribute("engine").AsStringValueOrDefault("", resource), + EngineVersion: resource.GetAttribute("engine_version").AsStringValueOrDefault("", resource), + NetworkID: resource.GetAttribute("network_id").AsStringValueOrDefault("net-COMMON_PRIVATE", resource), + PublicAccess: resource.GetAttribute("publicly_accessible").AsBoolValueOrDefault(true, resource), + } +} diff --git a/internal/adapters/terraform/nifcloud/rdb/db_instance_test.go b/internal/adapters/terraform/nifcloud/rdb/db_instance_test.go new file mode 100644 index 000000000000..5e45a18ac811 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/rdb/db_instance_test.go @@ -0,0 +1,66 @@ +package rdb + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/rdb" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptDBInstances(t *testing.T) { + tests := []struct { + name string + terraform string + expected []rdb.DBInstance + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_db_instance" "example" { + backup_retention_period = 2 + engine = "MySQL" + engine_version = "5.7.15" + publicly_accessible = false + network_id = "example-network" + } +`, + expected: []rdb.DBInstance{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + BackupRetentionPeriodDays: defsecTypes.Int(2, defsecTypes.NewTestMisconfigMetadata()), + Engine: defsecTypes.String("MySQL", defsecTypes.NewTestMisconfigMetadata()), + EngineVersion: defsecTypes.String("5.7.15", defsecTypes.NewTestMisconfigMetadata()), + NetworkID: defsecTypes.String("example-network", defsecTypes.NewTestMisconfigMetadata()), + PublicAccess: defsecTypes.Bool(false, defsecTypes.NewTestMisconfigMetadata()), + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_db_instance" "example" { + } +`, + + expected: []rdb.DBInstance{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + BackupRetentionPeriodDays: defsecTypes.Int(0, defsecTypes.NewTestMisconfigMetadata()), + Engine: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + EngineVersion: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + NetworkID: defsecTypes.String("net-COMMON_PRIVATE", defsecTypes.NewTestMisconfigMetadata()), + PublicAccess: defsecTypes.Bool(true, defsecTypes.NewTestMisconfigMetadata()), + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptDBInstances(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/rdb/db_security_group.go b/internal/adapters/terraform/nifcloud/rdb/db_security_group.go new file mode 100644 index 000000000000..a44b01b38b91 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/rdb/db_security_group.go @@ -0,0 +1,30 @@ +package rdb + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/rdb" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptDBSecurityGroups(modules terraform.Modules) []rdb.DBSecurityGroup { + var dbSecurityGroups []rdb.DBSecurityGroup + + for _, resource := range modules.GetResourcesByType("nifcloud_db_security_group") { + dbSecurityGroups = append(dbSecurityGroups, adaptDBSecurityGroup(resource)) + } + return dbSecurityGroups +} + +func adaptDBSecurityGroup(resource *terraform.Block) rdb.DBSecurityGroup { + var cidrs []defsecTypes.StringValue + + for _, rule := range resource.GetBlocks("rule") { + cidrs = append(cidrs, rule.GetAttribute("cidr_ip").AsStringValueOrDefault("", resource)) + } + + return rdb.DBSecurityGroup{ + Metadata: resource.GetMetadata(), + Description: resource.GetAttribute("description").AsStringValueOrDefault("", resource), + CIDRs: cidrs, + } +} diff --git a/internal/adapters/terraform/nifcloud/rdb/db_security_group_test.go b/internal/adapters/terraform/nifcloud/rdb/db_security_group_test.go new file mode 100644 index 000000000000..025962ea22a8 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/rdb/db_security_group_test.go @@ -0,0 +1,66 @@ +package rdb + +import ( + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/rdb" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_adaptDBSecurityGroups(t *testing.T) { + tests := []struct { + name string + terraform string + expected []rdb.DBSecurityGroup + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_db_security_group" "example" { + description = "memo" + + rule { + cidr_ip = "0.0.0.0/0" + } + } +`, + expected: []rdb.DBSecurityGroup{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("memo", defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("0.0.0.0/0", defsecTypes.NewTestMisconfigMetadata()), + }, + }}, + }, + { + name: "defaults", + terraform: ` + resource "nifcloud_db_security_group" "example" { + rule { + } + } +`, + + expected: []rdb.DBSecurityGroup{{ + Metadata: defsecTypes.NewTestMisconfigMetadata(), + Description: defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + CIDRs: []defsecTypes.StringValue{ + defsecTypes.String("", defsecTypes.NewTestMisconfigMetadata()), + }, + }}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := tftestutil.CreateModulesFromSource(t, test.terraform, ".tf") + adapted := adaptDBSecurityGroups(modules) + testutil.AssertDefsecEqual(t, test.expected, adapted) + }) + } +} diff --git a/internal/adapters/terraform/nifcloud/sslcertificate/adapt.go b/internal/adapters/terraform/nifcloud/sslcertificate/adapt.go new file mode 100644 index 000000000000..cb3c993c2067 --- /dev/null +++ b/internal/adapters/terraform/nifcloud/sslcertificate/adapt.go @@ -0,0 +1,12 @@ +package sslcertificate + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/sslcertificate" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func Adapt(modules terraform.Modules) sslcertificate.SSLCertificate { + return sslcertificate.SSLCertificate{ + ServerCertificates: adaptServerCertificates(modules), + } +} diff --git a/internal/adapters/terraform/nifcloud/sslcertificate/adapt_test.go b/internal/adapters/terraform/nifcloud/sslcertificate/adapt_test.go new file mode 100644 index 000000000000..9483467e47cc --- /dev/null +++ b/internal/adapters/terraform/nifcloud/sslcertificate/adapt_test.go @@ -0,0 +1,28 @@ +package sslcertificate + +import ( + "testing" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLines(t *testing.T) { + src := ` +resource "nifcloud_ssl_certificate" "example" { + certificate = "generated-certificate" +} +` + + modules := tftestutil.CreateModulesFromSource(t, src, ".tf") + adapted := Adapt(modules) + + require.Len(t, adapted.ServerCertificates, 1) + + serverCertificate := adapted.ServerCertificates[0] + + assert.Equal(t, 3, serverCertificate.Expiration.GetMetadata().Range().GetStartLine()) + assert.Equal(t, 3, serverCertificate.Expiration.GetMetadata().Range().GetEndLine()) +} diff --git a/internal/adapters/terraform/nifcloud/sslcertificate/server_certificate.go b/internal/adapters/terraform/nifcloud/sslcertificate/server_certificate.go new file mode 100644 index 000000000000..c03ffe8ca2ca --- /dev/null +++ b/internal/adapters/terraform/nifcloud/sslcertificate/server_certificate.go @@ -0,0 +1,41 @@ +package sslcertificate + +import ( + "crypto/x509" + "encoding/pem" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/sslcertificate" + "github.com/aquasecurity/trivy/pkg/terraform" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +func adaptServerCertificates(modules terraform.Modules) []sslcertificate.ServerCertificate { + var serverCertificates []sslcertificate.ServerCertificate + + for _, resource := range modules.GetResourcesByType("nifcloud_ssl_certificate") { + serverCertificates = append(serverCertificates, adaptServerCertificate(resource)) + } + return serverCertificates +} + +func adaptServerCertificate(resource *terraform.Block) sslcertificate.ServerCertificate { + certificateAttr := resource.GetAttribute("certificate") + expiryDateVal := defsecTypes.TimeUnresolvable(resource.GetMetadata()) + + if certificateAttr.IsNotNil() { + expiryDateVal = defsecTypes.TimeUnresolvable(certificateAttr.GetMetadata()) + if certificateAttr.IsString() { + certificateString := certificateAttr.Value().AsString() + if block, _ := pem.Decode([]byte(certificateString)); block != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + expiryDateVal = defsecTypes.Time(cert.NotAfter, certificateAttr.GetMetadata()) + } + } + } + } + + return sslcertificate.ServerCertificate{ + Metadata: resource.GetMetadata(), + Expiration: expiryDateVal, + } +} diff --git a/internal/adapters/terraform/nifcloud/sslcertificate/server_certificate_test.go b/internal/adapters/terraform/nifcloud/sslcertificate/server_certificate_test.go new file mode 100644 index 000000000000..132f466f814c --- /dev/null +++ b/internal/adapters/terraform/nifcloud/sslcertificate/server_certificate_test.go @@ -0,0 +1,72 @@ +package sslcertificate + +import ( + "testing" + "time" + + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/sslcertificate" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/terraform/tftestutil" + + "github.com/aquasecurity/trivy/test/testutil" +) + +const certificate = ` +-----BEGIN CERTIFICATE----- +MIIB0zCCAX2gAwIBAgIJAI/M7BYjwB+uMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQwHhcNMTIwOTEyMjE1MjAyWhcNMTUwOTEyMjE1MjAyWjBF +MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 +ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANLJ +hPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wok/4xIA+ui35/MmNa +rtNuC+BdZ1tMuVCPFZcCAwEAAaNQME4wHQYDVR0OBBYEFJvKs8RfJaXTH08W+SGv +zQyKn0H8MB8GA1UdIwQYMBaAFJvKs8RfJaXTH08W+SGvzQyKn0H8MAwGA1UdEwQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADQQBJlffJHybjDGxRMqaRmDhX0+6v02TUKZsW +r5QuVbpQhH6u+0UgcW0jp9QwpxoPTLTWGXEWBBBurxFwiCBhkQ+V +-----END CERTIFICATE----- +` + +func Test_adaptServerCertificates(t *testing.T) { + tests := []struct { + name string + terraform string + expected []sslcertificate.ServerCertificate + }{ + { + name: "configured", + terraform: ` + resource "nifcloud_ssl_certificate" "example" { + certificate = < 0 { + for _, c := range csRule.Checks { + if rule.GetRule().AVDID == c.ID { + specRules = append(specRules, rule) + } + } + } + } + } + + return specRules +} + +func (r *registry) Reset() { + r.Lock() + defer r.Unlock() + r.frameworks = make(map[framework.Framework][]ruleTypes.RegisteredRule) +} + +func GetFrameworkRules(fw ...framework.Framework) []ruleTypes.RegisteredRule { + return coreRegistry.getFrameworkRules(fw...) +} + +func GetSpecRules(spec string) []ruleTypes.RegisteredRule { + if len(spec) > 0 { + return coreRegistry.getSpecRules(spec) + } + + return GetFrameworkRules() +} diff --git a/internal/rules/register_test.go b/internal/rules/register_test.go new file mode 100644 index 000000000000..b44a92ae983f --- /dev/null +++ b/internal/rules/register_test.go @@ -0,0 +1,138 @@ +package rules + +import ( + "fmt" + "testing" + + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Reset(t *testing.T) { + rule := scan.Rule{} + _ = Register(rule) + assert.Equal(t, 1, len(GetFrameworkRules())) + Reset() + assert.Equal(t, 0, len(GetFrameworkRules())) +} + +func Test_Registration(t *testing.T) { + var tests = []struct { + name string + registeredFrameworks map[framework.Framework][]string + inputFrameworks []framework.Framework + expected bool + }{ + { + name: "rule without framework specified should be returned when no frameworks are requested", + expected: true, + }, + { + name: "rule without framework specified should not be returned when a specific framework is requested", + inputFrameworks: []framework.Framework{framework.CIS_AWS_1_2}, + expected: false, + }, + { + name: "rule without framework specified should be returned when the default framework is requested", + inputFrameworks: []framework.Framework{framework.Default}, + expected: true, + }, + { + name: "rule with default framework specified should be returned when the default framework is requested", + registeredFrameworks: map[framework.Framework][]string{framework.Default: {"1.1"}}, + inputFrameworks: []framework.Framework{framework.Default}, + expected: true, + }, + { + name: "rule with default framework specified should not be returned when a specific framework is requested", + registeredFrameworks: map[framework.Framework][]string{framework.Default: {"1.1"}}, + inputFrameworks: []framework.Framework{framework.CIS_AWS_1_2}, + expected: false, + }, + { + name: "rule with specific framework specified should not be returned when a default framework is requested", + registeredFrameworks: map[framework.Framework][]string{framework.CIS_AWS_1_2: {"1.1"}}, + inputFrameworks: []framework.Framework{framework.Default}, + expected: false, + }, + { + name: "rule with specific framework specified should be returned when the specific framework is requested", + registeredFrameworks: map[framework.Framework][]string{framework.CIS_AWS_1_2: {"1.1"}}, + inputFrameworks: []framework.Framework{framework.CIS_AWS_1_2}, + expected: true, + }, + { + name: "rule with multiple frameworks specified should be returned when the specific framework is requested", + registeredFrameworks: map[framework.Framework][]string{framework.CIS_AWS_1_2: {"1.1"}, "blah": {"1.2"}}, + inputFrameworks: []framework.Framework{framework.CIS_AWS_1_2}, + expected: true, + }, + { + name: "rule with multiple frameworks specified should be returned only once when multiple matching frameworks are requested", + registeredFrameworks: map[framework.Framework][]string{framework.CIS_AWS_1_2: {"1.1"}, "blah": {"1.2"}, "something": {"1.3"}}, + inputFrameworks: []framework.Framework{framework.CIS_AWS_1_2, "blah", "other"}, + expected: true, + }, + } + + for i, test := range tests { + t.Run(test.name, func(t *testing.T) { + Reset() + rule := scan.Rule{ + AVDID: fmt.Sprintf("%d-%s", i, test.name), + Frameworks: test.registeredFrameworks, + } + _ = Register(rule) + var found bool + for _, matchedRule := range GetFrameworkRules(test.inputFrameworks...) { + if matchedRule.GetRule().AVDID == rule.AVDID { + assert.False(t, found, "rule should not be returned more than once") + found = true + } + } + assert.Equal(t, test.expected, found, "rule should be returned if it matches any of the input frameworks") + }) + } +} + +func Test_Deregistration(t *testing.T) { + Reset() + registrationA := Register(scan.Rule{ + AVDID: "A", + }) + registrationB := Register(scan.Rule{ + AVDID: "B", + }) + assert.Equal(t, 2, len(GetFrameworkRules())) + Deregister(registrationA) + actual := GetFrameworkRules() + require.Equal(t, 1, len(actual)) + assert.Equal(t, "B", actual[0].GetRule().AVDID) + Deregister(registrationB) + assert.Equal(t, 0, len(GetFrameworkRules())) +} + +func Test_DeregistrationMultipleFrameworks(t *testing.T) { + Reset() + registrationA := Register(scan.Rule{ + AVDID: "A", + }) + registrationB := Register(scan.Rule{ + AVDID: "B", + Frameworks: map[framework.Framework][]string{ + "a": nil, + "b": nil, + "c": nil, + framework.Default: nil, + }, + }) + assert.Equal(t, 2, len(GetFrameworkRules())) + Deregister(registrationA) + actual := GetFrameworkRules() + require.Equal(t, 1, len(actual)) + assert.Equal(t, "B", actual[0].GetRule().AVDID) + Deregister(registrationB) + assert.Equal(t, 0, len(GetFrameworkRules())) +} diff --git a/pkg/cloud/aws/cache/cache.go b/pkg/cloud/aws/cache/cache.go index ce3f9776f02a..9e7acff3a4b6 100644 --- a/pkg/cloud/aws/cache/cache.go +++ b/pkg/cloud/aws/cache/cache.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/aquasecurity/defsec/pkg/state" + "github.com/aquasecurity/trivy/pkg/state" ) type Cache struct { diff --git a/pkg/cloud/aws/commands/run_test.go b/pkg/cloud/aws/commands/run_test.go index 3d9d01f17292..e0345a57549c 100644 --- a/pkg/cloud/aws/commands/run_test.go +++ b/pkg/cloud/aws/commands/run_test.go @@ -12,10 +12,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - defsecTypes "github.com/aquasecurity/defsec/pkg/types" dbTypes "github.com/aquasecurity/trivy-db/pkg/types" "github.com/aquasecurity/trivy/pkg/compliance/spec" "github.com/aquasecurity/trivy/pkg/flag" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" ) const expectedS3ScanResult = `{ diff --git a/pkg/cloud/aws/scanner/scanner.go b/pkg/cloud/aws/scanner/scanner.go index 9c0514691c7c..a27a1f20524f 100644 --- a/pkg/cloud/aws/scanner/scanner.go +++ b/pkg/cloud/aws/scanner/scanner.go @@ -7,16 +7,16 @@ import ( "golang.org/x/xerrors" - "github.com/aquasecurity/defsec/pkg/framework" - "github.com/aquasecurity/defsec/pkg/scan" - "github.com/aquasecurity/defsec/pkg/scanners/options" - "github.com/aquasecurity/defsec/pkg/state" aws "github.com/aquasecurity/trivy-aws/pkg/scanner" "github.com/aquasecurity/trivy/pkg/cloud/aws/cache" "github.com/aquasecurity/trivy/pkg/commands/operation" "github.com/aquasecurity/trivy/pkg/flag" + "github.com/aquasecurity/trivy/pkg/framework" "github.com/aquasecurity/trivy/pkg/log" "github.com/aquasecurity/trivy/pkg/misconf" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/state" ) type AWSScanner struct { diff --git a/pkg/cloud/report/convert.go b/pkg/cloud/report/convert.go index d1e41bcb2fe9..261f2a3fb86c 100644 --- a/pkg/cloud/report/convert.go +++ b/pkg/cloud/report/convert.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/arn" - "github.com/aquasecurity/defsec/pkg/scan" ftypes "github.com/aquasecurity/trivy/pkg/fanal/types" + "github.com/aquasecurity/trivy/pkg/scan" "github.com/aquasecurity/trivy/pkg/types" ) diff --git a/pkg/cloud/report/convert_test.go b/pkg/cloud/report/convert_test.go index 6b2025209394..d237036c2f6a 100644 --- a/pkg/cloud/report/convert_test.go +++ b/pkg/cloud/report/convert_test.go @@ -7,10 +7,10 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/stretchr/testify/assert" - "github.com/aquasecurity/defsec/pkg/scan" - defsecTypes "github.com/aquasecurity/defsec/pkg/types" fanaltypes "github.com/aquasecurity/trivy/pkg/fanal/types" + "github.com/aquasecurity/trivy/pkg/scan" "github.com/aquasecurity/trivy/pkg/types" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" ) func Test_ResultConversion(t *testing.T) { diff --git a/pkg/cloud/report/report.go b/pkg/cloud/report/report.go index c60e00b45360..bc07f50f1a93 100644 --- a/pkg/cloud/report/report.go +++ b/pkg/cloud/report/report.go @@ -9,7 +9,6 @@ import ( "golang.org/x/xerrors" - "github.com/aquasecurity/defsec/pkg/scan" "github.com/aquasecurity/tml" "github.com/aquasecurity/trivy/pkg/clock" cr "github.com/aquasecurity/trivy/pkg/compliance/report" @@ -17,6 +16,7 @@ import ( "github.com/aquasecurity/trivy/pkg/flag" pkgReport "github.com/aquasecurity/trivy/pkg/report" "github.com/aquasecurity/trivy/pkg/result" + "github.com/aquasecurity/trivy/pkg/scan" "github.com/aquasecurity/trivy/pkg/types" ) diff --git a/pkg/cloud/report/service_test.go b/pkg/cloud/report/service_test.go index 55dd6cf5f77d..63e93627e78a 100644 --- a/pkg/cloud/report/service_test.go +++ b/pkg/cloud/report/service_test.go @@ -11,10 +11,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/aquasecurity/defsec/pkg/scan" - defsecTypes "github.com/aquasecurity/defsec/pkg/types" "github.com/aquasecurity/trivy-db/pkg/types" "github.com/aquasecurity/trivy/pkg/flag" + "github.com/aquasecurity/trivy/pkg/scan" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" ) func Test_ServiceReport(t *testing.T) { diff --git a/pkg/compliance/report/report.go b/pkg/compliance/report/report.go index 61a4973b2b0a..31dc15797fff 100644 --- a/pkg/compliance/report/report.go +++ b/pkg/compliance/report/report.go @@ -6,10 +6,10 @@ import ( "golang.org/x/xerrors" - defsecTypes "github.com/aquasecurity/defsec/pkg/types" dbTypes "github.com/aquasecurity/trivy-db/pkg/types" "github.com/aquasecurity/trivy/pkg/compliance/spec" "github.com/aquasecurity/trivy/pkg/types" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" ) const ( diff --git a/pkg/compliance/report/report_test.go b/pkg/compliance/report/report_test.go index b6a1dcc6bcc5..9f6610dfb3d3 100644 --- a/pkg/compliance/report/report_test.go +++ b/pkg/compliance/report/report_test.go @@ -6,13 +6,13 @@ import ( "github.com/stretchr/testify/assert" - defsecTypes "github.com/aquasecurity/defsec/pkg/types" dbTypes "github.com/aquasecurity/trivy-db/pkg/types" "github.com/aquasecurity/trivy-db/pkg/vulnsrc/vulnerability" "github.com/aquasecurity/trivy/pkg/compliance/report" "github.com/aquasecurity/trivy/pkg/compliance/spec" ftypes "github.com/aquasecurity/trivy/pkg/fanal/types" "github.com/aquasecurity/trivy/pkg/types" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" ) func TestBuildComplianceReport(t *testing.T) { diff --git a/pkg/compliance/spec/compliance.go b/pkg/compliance/spec/compliance.go index 73b7dfe635c1..1ac57b454764 100644 --- a/pkg/compliance/spec/compliance.go +++ b/pkg/compliance/spec/compliance.go @@ -9,9 +9,9 @@ import ( "golang.org/x/xerrors" "gopkg.in/yaml.v3" - defsecTypes "github.com/aquasecurity/defsec/pkg/types" sp "github.com/aquasecurity/trivy-policies/pkg/spec" "github.com/aquasecurity/trivy/pkg/types" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" ) type Severity string diff --git a/pkg/compliance/spec/compliance_test.go b/pkg/compliance/spec/compliance_test.go index f34722525e1f..bb893dcd431c 100644 --- a/pkg/compliance/spec/compliance_test.go +++ b/pkg/compliance/spec/compliance_test.go @@ -6,9 +6,9 @@ import ( "github.com/stretchr/testify/assert" - defsecTypes "github.com/aquasecurity/defsec/pkg/types" "github.com/aquasecurity/trivy/pkg/compliance/spec" "github.com/aquasecurity/trivy/pkg/types" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" ) func TestComplianceSpec_Scanners(t *testing.T) { diff --git a/pkg/debug/cgo_disabled.go b/pkg/debug/cgo_disabled.go new file mode 100644 index 000000000000..e994a4dc79fa --- /dev/null +++ b/pkg/debug/cgo_disabled.go @@ -0,0 +1,5 @@ +//go:build !cgo + +package debug + +const cgoEnabled = false diff --git a/pkg/debug/cgo_enabled.go b/pkg/debug/cgo_enabled.go new file mode 100644 index 000000000000..afa840a615b4 --- /dev/null +++ b/pkg/debug/cgo_enabled.go @@ -0,0 +1,5 @@ +//go:build cgo + +package debug + +const cgoEnabled = true diff --git a/pkg/debug/debug.go b/pkg/debug/debug.go new file mode 100644 index 000000000000..bd96be1656b5 --- /dev/null +++ b/pkg/debug/debug.go @@ -0,0 +1,91 @@ +package debug + +import ( + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +const timeFormat = "04:05.000000000" + +type Logger struct { + writer io.Writer + prefix string +} + +func New(w io.Writer, parts ...string) Logger { + return Logger{ + writer: w, + prefix: strings.Join(parts, "."), + } +} + +func (l *Logger) Extend(parts ...string) Logger { + return Logger{ + writer: l.writer, + prefix: strings.Join(append([]string{l.prefix}, parts...), "."), + } +} + +func (l *Logger) Log(format string, args ...interface{}) { + if l.writer == nil { + return + } + message := fmt.Sprintf(format, args...) + line := fmt.Sprintf("%s %-32s %s\n", time.Now().Format(timeFormat), l.prefix, message) + _, _ = l.writer.Write([]byte(line)) +} + +func LogSystemInfo(w io.Writer, appVersion string) { + if w == nil { + return + } + sys := New(w, "system", "info") + var appName string + if path, err := os.Executable(); err != nil { + if len(os.Args) > 0 { + appName = os.Args[0] + } + } else { + appName = filepath.Base(path) + } + + wd, _ := os.Getwd() + hostname, _ := os.Hostname() + + var inDocker bool + if _, err := os.Stat("/.dockerenv"); err == nil || !os.IsNotExist(err) { + inDocker = true + } + + var kernelInfo string + if data, err := os.ReadFile("/proc/version"); err == nil { + kernelInfo = strings.TrimSpace(string(data)) + } + + sys.Log("APP %s", appName) + sys.Log("VERSION %s", appVersion) + sys.Log("OS %s", runtime.GOOS) + sys.Log("ARCH %s", runtime.GOARCH) + sys.Log("KERNEL %s", kernelInfo) + sys.Log("TERM %s", os.Getenv("TERM")) + sys.Log("SHELL %s", os.Getenv("SHELL")) + sys.Log("GOVERSION %s", runtime.Version()) + sys.Log("GOROOT %s", runtime.GOROOT()) + sys.Log("CGO %t", cgoEnabled) + sys.Log("CPUCOUNT %d", runtime.NumCPU()) + sys.Log("MAXPROCS %d", runtime.GOMAXPROCS(0)) + sys.Log("WORKDIR %s", wd) + sys.Log("UID %d", os.Getuid()) + sys.Log("EUID %d", os.Geteuid()) + sys.Log("DOCKER %t", inDocker) + sys.Log("CI %t", os.Getenv("CI") != "") + sys.Log("HOSTNAME %s", hostname) + sys.Log("TEMP %s", os.TempDir()) + sys.Log("PATHSEP %c", filepath.Separator) + sys.Log("CMD %s", strings.Join(os.Args, " ")) +} diff --git a/pkg/detection/detect.go b/pkg/detection/detect.go new file mode 100644 index 000000000000..5eaa431240df --- /dev/null +++ b/pkg/detection/detect.go @@ -0,0 +1,296 @@ +package detection + +import ( + "bytes" + "encoding/json" + "io" + "path/filepath" + "strings" + + "gopkg.in/yaml.v3" + + "github.com/aquasecurity/trivy/pkg/scanners/azure/arm/parser/armjson" + "github.com/aquasecurity/trivy/pkg/types" +) + +type FileType string + +const ( + FileTypeCloudFormation FileType = "cloudformation" + FileTypeTerraform FileType = "terraform" + FileTypeTerraformPlan FileType = "terraformplan" + FileTypeDockerfile FileType = "dockerfile" + FileTypeKubernetes FileType = "kubernetes" + FileTypeRbac FileType = "rbac" + FileTypeYAML FileType = "yaml" + FileTypeTOML FileType = "toml" + FileTypeJSON FileType = "json" + FileTypeHelm FileType = "helm" + FileTypeAzureARM FileType = "azure-arm" +) + +var matchers = map[FileType]func(name string, r io.ReadSeeker) bool{} + +// nolint +func init() { + + matchers[FileTypeJSON] = func(name string, r io.ReadSeeker) bool { + ext := filepath.Ext(filepath.Base(name)) + if !strings.EqualFold(ext, ".json") { + return false + } + if resetReader(r) == nil { + return true + } + + var content interface{} + return json.NewDecoder(r).Decode(&content) == nil + } + + matchers[FileTypeYAML] = func(name string, r io.ReadSeeker) bool { + ext := filepath.Ext(filepath.Base(name)) + if !strings.EqualFold(ext, ".yaml") && !strings.EqualFold(ext, ".yml") { + return false + } + if resetReader(r) == nil { + return true + } + + var content interface{} + return yaml.NewDecoder(r).Decode(&content) == nil + } + + matchers[FileTypeHelm] = func(name string, r io.ReadSeeker) bool { + if IsHelmChartArchive(name, r) { + return true + } + + return strings.HasSuffix(name, "hart.yaml") + } + + matchers[FileTypeTOML] = func(name string, r io.ReadSeeker) bool { + ext := filepath.Ext(filepath.Base(name)) + return strings.EqualFold(ext, ".toml") + } + + matchers[FileTypeTerraform] = func(name string, _ io.ReadSeeker) bool { + return IsTerraformFile(name) + } + + matchers[FileTypeTerraformPlan] = func(name string, r io.ReadSeeker) bool { + if IsType(name, r, FileTypeJSON) { + if resetReader(r) == nil { + return false + } + + contents := make(map[string]interface{}) + err := json.NewDecoder(r).Decode(&contents) + if err == nil { + if _, ok := contents["terraform_version"]; ok { + _, stillOk := contents["format_version"] + return stillOk + } + } + } + return false + } + + matchers[FileTypeCloudFormation] = func(name string, r io.ReadSeeker) bool { + sniff := struct { + Resources map[string]map[string]interface{} `json:"Resources" yaml:"Resources"` + }{} + + switch { + case IsType(name, r, FileTypeYAML): + if resetReader(r) == nil { + return false + } + if err := yaml.NewDecoder(r).Decode(&sniff); err != nil { + return false + } + case IsType(name, r, FileTypeJSON): + if resetReader(r) == nil { + return false + } + if err := json.NewDecoder(r).Decode(&sniff); err != nil { + return false + } + default: + return false + } + + return sniff.Resources != nil + } + + matchers[FileTypeAzureARM] = func(name string, r io.ReadSeeker) bool { + + if resetReader(r) == nil { + return false + } + + sniff := struct { + ContentType string `json:"contentType"` + Parameters map[string]interface{} `json:"parameters"` + Resources []interface{} `json:"resources"` + }{} + metadata := types.NewUnmanagedMisconfigMetadata() + if err := armjson.UnmarshalFromReader(r, &sniff, &metadata); err != nil { + return false + } + + return (sniff.Parameters != nil && len(sniff.Parameters) > 0) || + (sniff.Resources != nil && len(sniff.Resources) > 0) + } + + matchers[FileTypeDockerfile] = func(name string, _ io.ReadSeeker) bool { + requiredFiles := []string{"Dockerfile", "Containerfile"} + for _, requiredFile := range requiredFiles { + base := filepath.Base(name) + ext := filepath.Ext(base) + if strings.TrimSuffix(base, ext) == requiredFile { + return true + } + if strings.EqualFold(ext, "."+requiredFile) { + return true + } + } + return false + } + + matchers[FileTypeHelm] = func(name string, r io.ReadSeeker) bool { + helmFiles := []string{"Chart.yaml", ".helmignore", "values.schema.json", "NOTES.txt"} + for _, expected := range helmFiles { + if strings.HasSuffix(name, expected) { + return true + } + } + helmFileExtensions := []string{".yaml", ".tpl"} + ext := filepath.Ext(filepath.Base(name)) + for _, expected := range helmFileExtensions { + if strings.EqualFold(ext, expected) { + return true + } + } + return IsHelmChartArchive(name, r) + } + + matchers[FileTypeKubernetes] = func(name string, r io.ReadSeeker) bool { + + if !IsType(name, r, FileTypeYAML) && !IsType(name, r, FileTypeJSON) { + return false + } + if resetReader(r) == nil { + return false + } + + expectedProperties := []string{"apiVersion", "kind", "metadata"} + + if IsType(name, r, FileTypeJSON) { + if resetReader(r) == nil { + return false + } + + var result map[string]interface{} + if err := json.NewDecoder(r).Decode(&result); err != nil { + return false + } + + for _, expected := range expectedProperties { + if _, ok := result[expected]; !ok { + return false + } + } + return true + } + + // at this point, we need to inspect bytes + var buf bytes.Buffer + if _, err := io.Copy(&buf, r); err != nil { + return false + } + data := buf.Bytes() + + marker := "\n---\n" + altMarker := "\r\n---\r\n" + if bytes.Contains(data, []byte(altMarker)) { + marker = altMarker + } + + for _, partial := range strings.Split(string(data), marker) { + var result map[string]interface{} + if err := yaml.Unmarshal([]byte(partial), &result); err != nil { + continue + } + match := true + for _, expected := range expectedProperties { + if _, ok := result[expected]; !ok { + match = false + break + } + } + if match { + return true + } + } + + return false + } +} + +func IsTerraformFile(path string) bool { + for _, ext := range []string{".tf", ".tf.json", ".tfvars"} { + if strings.HasSuffix(path, ext) { + return true + } + } + + return false +} + +func IsType(name string, r io.ReadSeeker, t FileType) bool { + r = ensureSeeker(r) + f, ok := matchers[t] + if !ok { + return false + } + return f(name, r) +} + +func GetTypes(name string, r io.ReadSeeker) []FileType { + var matched []FileType + r = ensureSeeker(r) + for check, f := range matchers { + if f(name, r) { + matched = append(matched, check) + } + resetReader(r) + } + return matched +} + +func ensureSeeker(r io.Reader) io.ReadSeeker { + if r == nil { + return nil + } + if seeker, ok := r.(io.ReadSeeker); ok { + return seeker + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, r); err == nil { + return bytes.NewReader(buf.Bytes()) + } + + return nil +} + +func resetReader(r io.Reader) io.ReadSeeker { + if r == nil { + return nil + } + if seeker, ok := r.(io.ReadSeeker); ok { + _, _ = seeker.Seek(0, 0) + return seeker + } + return ensureSeeker(r) +} diff --git a/pkg/detection/detect_test.go b/pkg/detection/detect_test.go new file mode 100644 index 000000000000..d1700a16af8e --- /dev/null +++ b/pkg/detection/detect_test.go @@ -0,0 +1,410 @@ +package detection + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Detection(t *testing.T) { + tests := []struct { + name string + path string + r io.ReadSeeker + expected []FileType + }{ + { + name: "text file, no reader", + path: "something.txt", + expected: nil, + }, + { + name: "text file, with reader", + path: "something.txt", + r: strings.NewReader("some file content"), + expected: nil, + }, + { + name: "terraform, no reader", + path: "main.tf", + expected: []FileType{ + FileTypeTerraform, + }, + }, + { + name: "terraform, with reader", + path: "main.tf", + r: strings.NewReader("some file content"), + expected: []FileType{ + FileTypeTerraform, + }, + }, + { + name: "terraform json, no reader", + path: "main.tf.json", + expected: []FileType{ + FileTypeTerraform, + FileTypeJSON, + }, + }, + { + name: "terraform json, with reader", + path: "main.tf.json", + r: strings.NewReader(` +{ + "variable": { + "example": { + "default": "hello" + } + } +} +`), + expected: []FileType{ + FileTypeTerraform, + FileTypeJSON, + }, + }, + { + name: "terraform vars, no reader", + path: "main.tfvars", + expected: []FileType{ + FileTypeTerraform, + }, + }, + { + name: "terraform vars, with reader", + path: "main.tfvars", + r: strings.NewReader("some_var = \"some value\""), + expected: []FileType{ + FileTypeTerraform, + }, + }, + { + name: "cloudformation, no reader", + path: "main.yaml", + expected: []FileType{ + FileTypeYAML, + FileTypeHelm, + }, + }, + { + name: "terraform plan, with reader", + path: "plan.json", + r: strings.NewReader(`{ + "format_version": "0.2", + "terraform_version": "1.0.3", + "variables": { + "bucket_name": { + "value": "tfsec-plan-testing" + } + }, + "planned_values": {}, + "resource_changes": [], + "prior_state": {}, + "configuration": {} + }`), + expected: []FileType{ + FileTypeTerraformPlan, + FileTypeJSON, + }, + }, + { + name: "cloudformation, with reader", + path: "main.yaml", + r: strings.NewReader(`--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: CodePipeline for continuous integration and continuous deployment + +Parameters: + RepositoryName: + Type: String + Description: Name of the CodeCommit repository + BuildDockerImage: + Type: String + Default: aws/codebuild/ubuntu-base:14.04 + Description: Docker image to use for the build phase + DeployDockerImage: + Type: String + Default: aws/codebuild/ubuntu-base:14.04 + Description: Docker image to use for the deployment phase + +Resources: + PipelineS3Bucket: + Type: AWS::S3::Bucket +`), + expected: []FileType{ + FileTypeCloudFormation, + FileTypeYAML, + FileTypeHelm, + }, + }, + { + name: "JSON with Resources, not cloudformation", + path: "whatever.json", + r: strings.NewReader(`{ + "Resources": ["something"] +}`), + expected: []FileType{ + FileTypeJSON, + }, + }, + { + name: "Dockerfile, no reader", + path: "Dockerfile", + r: nil, + expected: []FileType{ + FileTypeDockerfile, + }, + }, + { + name: "Containerfile, no reader", + path: "Containerfile", + r: nil, + expected: []FileType{ + FileTypeDockerfile, + }, + }, + { + name: "Dockerfile, reader", + path: "Dockerfile", + r: strings.NewReader("FROM ubuntu\n"), + expected: []FileType{ + FileTypeDockerfile, + }, + }, + { + name: "Dockerfile extension", + path: "lol.Dockerfile", + r: nil, + expected: []FileType{ + FileTypeDockerfile, + }, + }, + { + name: "kubernetes, no reader", + path: "k8s.yml", + r: nil, + expected: []FileType{ + FileTypeYAML, + }, + }, + { + name: "kubernetes, reader", + path: "k8s.yml", + r: strings.NewReader(`apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80`), + expected: []FileType{ + FileTypeKubernetes, + FileTypeYAML, + }, + }, + { + name: "kubernetes, reader, JSON", + path: "k8s.json", + r: strings.NewReader(`{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deployment", + "labels": { + "app": "nginx" + } + }, + "spec": { + "replicas": 3, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "name": "nginx", + "image": "nginx:1.14.2", + "ports": [ + { + "containerPort": 80 + } + ] + } + ] + } + } + } +}`), + expected: []FileType{ + FileTypeKubernetes, + FileTypeJSON, + }, + }, + { + name: "YAML, no reader", + path: "file.yaml", + r: nil, + expected: []FileType{ + FileTypeYAML, + FileTypeHelm, + }, + }, + { + name: "YML, no reader", + path: "file.yml", + r: nil, + expected: []FileType{ + FileTypeYAML, + }, + }, + { + name: "YML uppercase", + path: "file.YML", + r: nil, + expected: []FileType{ + FileTypeYAML, + }, + }, + { + name: "TOML, no reader", + path: "file.toml", + r: nil, + expected: []FileType{ + FileTypeTOML, + }, + }, + { + name: "JSON, no reader", + path: "file.json", + r: nil, + expected: []FileType{ + FileTypeJSON, + }, + }, + { + name: "kubernetes, configmap", + path: "k8s.yml", + r: strings.NewReader(`apiVersion: v1 +kind: ConfigMap +metadata: + name: test + namespace: default +data: + AWS_ACCESS_KEY_ID: "XXX" + AWS_SECRET_ACCESS_KEY: "XXX"`), + expected: []FileType{ + FileTypeKubernetes, + FileTypeYAML, + }, + }, + { + name: "kubernetes, clusterRole", + path: "k8s.yml", + r: strings.NewReader(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: view +trules: +- apiGroups: + - networking.k8s.io + resources: + - ingresses + - ingresses/status + - networkpolicies + verbs: + - get + - list + - watch`), + expected: []FileType{ + FileTypeKubernetes, + FileTypeYAML, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Run("GetTypes", func(t *testing.T) { + actualDetections := GetTypes(test.path, test.r) + assert.Equal(t, len(test.expected), len(actualDetections)) + for _, expected := range test.expected { + resetReader(test.r) + var found bool + for _, actual := range actualDetections { + if actual == expected { + found = true + break + } + } + assert.True(t, found, "%s should be detected", expected) + } + }) + for _, expected := range test.expected { + resetReader(test.r) + t.Run(fmt.Sprintf("IsType_%s", expected), func(t *testing.T) { + assert.True(t, IsType(test.path, test.r, expected)) + }) + } + t.Run("IsType_invalid", func(t *testing.T) { + resetReader(test.r) + assert.False(t, IsType(test.path, test.r, "invalid")) + }) + }) + } +} + +func BenchmarkIsType_SmallFile(b *testing.B) { + data, err := os.ReadFile(fmt.Sprintf("./testdata/%s", "small.file")) + assert.Nil(b, err) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = IsType(fmt.Sprintf("./testdata/%s", "small.file"), bytes.NewReader(data), FileTypeAzureARM) + } +} + +func BenchmarkIsType_BigFile(b *testing.B) { + data, err := os.ReadFile(fmt.Sprintf("./testdata/%s", "big.file")) + assert.Nil(b, err) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = IsType(fmt.Sprintf("./testdata/%s", "big.file"), bytes.NewReader(data), FileTypeAzureARM) + } +} diff --git a/pkg/detection/peek.go b/pkg/detection/peek.go new file mode 100644 index 000000000000..0e76115d9bd8 --- /dev/null +++ b/pkg/detection/peek.go @@ -0,0 +1,53 @@ +package detection + +import ( + "archive/tar" + "compress/gzip" + "errors" + "io" + "strings" +) + +func IsHelmChartArchive(path string, file io.Reader) bool { + + if !IsArchive(path) { + return false + } + + var err error + var fr = file + + if IsZip(path) { + if fr, err = gzip.NewReader(file); err != nil { + return false + } + } + tr := tar.NewReader(fr) + + if tr == nil { + return false + } + + for { + header, err := tr.Next() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return false + } + + if header.Typeflag == tar.TypeReg && strings.HasSuffix(header.Name, "Chart.yaml") { + return true + } + } + return false +} + +func IsArchive(path string) bool { + return strings.HasSuffix(path, ".tar") || IsZip(path) +} + +func IsZip(path string) bool { + return strings.HasSuffix(path, ".tgz") || strings.HasSuffix(path, ".tar.gz") +} diff --git a/pkg/detection/testdata/big.file b/pkg/detection/testdata/big.file new file mode 100644 index 000000000000..e7f3c2d40ecc Binary files /dev/null and b/pkg/detection/testdata/big.file differ diff --git a/pkg/detection/testdata/small.file b/pkg/detection/testdata/small.file new file mode 100644 index 000000000000..d8ae428a4800 --- /dev/null +++ b/pkg/detection/testdata/small.file @@ -0,0 +1,3 @@ +{ + "content": "foo bar baz" +} \ No newline at end of file diff --git a/pkg/fanal/secret/builtin-rules.go b/pkg/fanal/secret/builtin-rules.go index 47eaa716f34d..b61ff863da6b 100644 --- a/pkg/fanal/secret/builtin-rules.go +++ b/pkg/fanal/secret/builtin-rules.go @@ -5,8 +5,8 @@ import ( "github.com/samber/lo" - defsecRules "github.com/aquasecurity/defsec/pkg/rules" "github.com/aquasecurity/trivy/pkg/fanal/types" + defsecRules "github.com/aquasecurity/trivy/pkg/trules" ) var ( diff --git a/pkg/fanal/secret/scanner.go b/pkg/fanal/secret/scanner.go index c773b9707ae3..e32291fea659 100644 --- a/pkg/fanal/secret/scanner.go +++ b/pkg/fanal/secret/scanner.go @@ -25,18 +25,18 @@ type Scanner struct { } type Config struct { - // Enable only specified built-in rules. If only one ID is specified, all other rules are disabled. - // All the built-in rules are enabled if this field is not specified. It doesn't affect custom rules. - EnableBuiltinRuleIDs []string `yaml:"enable-builtin-rules"` + // Enable only specified built-in trules. If only one ID is specified, all other trules are disabled. + // All the built-in trules are enabled if this field is not specified. It doesn't affect custom trules. + EnableBuiltinRuleIDs []string `yaml:"enable-builtin-trules"` - // Disable rules. It is applied to enabled IDs. - DisableRuleIDs []string `yaml:"disable-rules"` + // Disable trules. It is applied to enabled IDs. + DisableRuleIDs []string `yaml:"disable-trules"` - // Disable allow rules. - DisableAllowRuleIDs []string `yaml:"disable-allow-rules"` + // Disable allow trules. + DisableAllowRuleIDs []string `yaml:"disable-allow-trules"` - CustomRules []Rule `yaml:"rules"` - CustomAllowRules AllowRules `yaml:"allow-rules"` + CustomRules []Rule `yaml:"trules"` + CustomAllowRules AllowRules `yaml:"allow-trules"` ExcludeBlock ExcludeBlock `yaml:"exclude-block"` } @@ -88,7 +88,7 @@ type Rule struct { Regex *Regexp `yaml:"regex"` Keywords []string `yaml:"keywords"` Path *Regexp `yaml:"path"` - AllowRules AllowRules `yaml:"allow-rules"` + AllowRules AllowRules `yaml:"allow-trules"` ExcludeBlock ExcludeBlock `yaml:"exclude-block"` SecretGroupName string `yaml:"secret-group-name"` } @@ -265,14 +265,14 @@ func (b *Blocks) find() { } func ParseConfig(configPath string) (*Config, error) { - // If no config is passed, use built-in rules and allow rules. + // If no config is passed, use built-in trules and allow trules. if configPath == "" { return nil, nil } f, err := os.Open(configPath) if errors.Is(err, os.ErrNotExist) { - // If the specified file doesn't exist, it just uses built-in rules and allow rules. + // If the specified file doesn't exist, it just uses built-in trules and allow trules. log.Logger.Debugf("No secret config detected: %s", configPath) return nil, nil } else if err != nil { @@ -291,7 +291,7 @@ func ParseConfig(configPath string) (*Config, error) { } func NewScanner(config *Config) Scanner { - // Use the default rules + // Use the default trules if config == nil { return Scanner{Global: &Global{ Rules: builtinRules, @@ -301,21 +301,21 @@ func NewScanner(config *Config) Scanner { enabledRules := builtinRules if len(config.EnableBuiltinRuleIDs) != 0 { - // Enable only specified built-in rules + // Enable only specified built-in trules enabledRules = lo.Filter(builtinRules, func(v Rule, _ int) bool { return slices.Contains(config.EnableBuiltinRuleIDs, v.ID) }) } - // Custom rules are enabled regardless of "enable-builtin-rules". + // Custom trules are enabled regardless of "enable-builtin-trules". enabledRules = append(enabledRules, config.CustomRules...) - // Disable specified rules + // Disable specified trules rules := lo.Filter(enabledRules, func(v Rule, _ int) bool { return !slices.Contains(config.DisableRuleIDs, v.ID) }) - // Disable specified allow rules + // Disable specified allow trules allowRules := append(builtinAllowRules, config.CustomAllowRules...) allowRules = lo.Filter(allowRules, func(v AllowRule, _ int) bool { return !slices.Contains(config.DisableAllowRuleIDs, v.ID) diff --git a/pkg/flag/report_flags_test.go b/pkg/flag/report_flags_test.go index 4207aa2747d1..ead8f87422ec 100644 --- a/pkg/flag/report_flags_test.go +++ b/pkg/flag/report_flags_test.go @@ -8,12 +8,12 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" - defsecTypes "github.com/aquasecurity/defsec/pkg/types" dbTypes "github.com/aquasecurity/trivy-db/pkg/types" "github.com/aquasecurity/trivy/pkg/compliance/spec" "github.com/aquasecurity/trivy/pkg/flag" "github.com/aquasecurity/trivy/pkg/log" "github.com/aquasecurity/trivy/pkg/types" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" ) func TestReportFlagGroup_ToOptions(t *testing.T) { diff --git a/pkg/framework/frameworks.go b/pkg/framework/frameworks.go new file mode 100644 index 000000000000..82f43947d568 --- /dev/null +++ b/pkg/framework/frameworks.go @@ -0,0 +1,11 @@ +package framework + +type Framework string + +const ( + Default Framework = "default" + Experimental Framework = "experimental" + CIS_AWS_1_2 Framework = "cis-aws-1.2" + CIS_AWS_1_4 Framework = "cis-aws-1.4" + ALL Framework = "all" +) diff --git a/pkg/misconf/scanner.go b/pkg/misconf/scanner.go index bcab56dafadd..8561351163c6 100644 --- a/pkg/misconf/scanner.go +++ b/pkg/misconf/scanner.go @@ -14,8 +14,9 @@ import ( "github.com/samber/lo" "golang.org/x/xerrors" - "github.com/aquasecurity/defsec/pkg/scan" - "github.com/aquasecurity/defsec/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/iac/detection" + "github.com/aquasecurity/trivy/pkg/iac/scan" + "github.com/aquasecurity/trivy/pkg/iac/scanners/options" "github.com/aquasecurity/trivy/pkg/fanal/types" "github.com/aquasecurity/trivy/pkg/iac/detection" "github.com/aquasecurity/trivy/pkg/iac/scanners" @@ -29,6 +30,17 @@ import ( tfpscanner "github.com/aquasecurity/trivy/pkg/iac/scanners/terraformplan" "github.com/aquasecurity/trivy/pkg/log" "github.com/aquasecurity/trivy/pkg/mapfs" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners" + "github.com/aquasecurity/trivy/pkg/scanners/azure/arm" + cfscanner "github.com/aquasecurity/trivy/pkg/scanners/cloudformation" + cfparser "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" + dfscanner "github.com/aquasecurity/trivy/pkg/scanners/dockerfile" + "github.com/aquasecurity/trivy/pkg/scanners/helm" + k8sscanner "github.com/aquasecurity/trivy/pkg/scanners/kubernetes" + "github.com/aquasecurity/trivy/pkg/scanners/options" + tfscanner "github.com/aquasecurity/trivy/pkg/scanners/terraform" + tfpscanner "github.com/aquasecurity/trivy/pkg/scanners/terraformplan" _ "embed" ) diff --git a/pkg/providers/aws/accessanalyzer/aa.go b/pkg/providers/aws/accessanalyzer/aa.go new file mode 100644 index 000000000000..851cdf128e4e --- /dev/null +++ b/pkg/providers/aws/accessanalyzer/aa.go @@ -0,0 +1,19 @@ +package accessanalyzer + +import "github.com/aquasecurity/trivy/pkg/types" + +type AccessAnalyzer struct { + Analyzers []Analyzer +} + +type Analyzer struct { + Metadata types.MisconfigMetadata + ARN types.StringValue + Name types.StringValue + Active types.BoolValue + Findings []Findings +} + +type Findings struct { + Metadata types.MisconfigMetadata +} diff --git a/pkg/providers/aws/apigateway/ag.go b/pkg/providers/aws/apigateway/ag.go new file mode 100644 index 000000000000..806c2eba8f79 --- /dev/null +++ b/pkg/providers/aws/apigateway/ag.go @@ -0,0 +1,11 @@ +package apigateway + +import ( + v1 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v1" + v2 "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway/v2" +) + +type APIGateway struct { + V1 v1.APIGateway + V2 v2.APIGateway +} diff --git a/pkg/providers/aws/apigateway/v1/apigateway.go b/pkg/providers/aws/apigateway/v1/apigateway.go new file mode 100755 index 000000000000..9e038672b9d3 --- /dev/null +++ b/pkg/providers/aws/apigateway/v1/apigateway.go @@ -0,0 +1,62 @@ +package v1 + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type APIGateway struct { + APIs []API + DomainNames []DomainName +} + +type API struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Stages []Stage + Resources []Resource +} + +type Stage struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + AccessLogging AccessLogging + XRayTracingEnabled defsecTypes.BoolValue + RESTMethodSettings []RESTMethodSettings +} + +type Resource struct { + Metadata defsecTypes.MisconfigMetadata + Methods []Method +} + +type AccessLogging struct { + Metadata defsecTypes.MisconfigMetadata + CloudwatchLogGroupARN defsecTypes.StringValue +} + +type RESTMethodSettings struct { + Metadata defsecTypes.MisconfigMetadata + Method defsecTypes.StringValue + CacheDataEncrypted defsecTypes.BoolValue + CacheEnabled defsecTypes.BoolValue +} + +const ( + AuthorizationNone = "NONE" + AuthorizationCustom = "CUSTOM" + AuthorizationIAM = "AWS_IAM" + AuthorizationCognitoUserPools = "COGNITO_USER_POOLS" +) + +type Method struct { + Metadata defsecTypes.MisconfigMetadata + HTTPMethod defsecTypes.StringValue + AuthorizationType defsecTypes.StringValue + APIKeyRequired defsecTypes.BoolValue +} + +type DomainName struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + SecurityPolicy defsecTypes.StringValue +} diff --git a/pkg/providers/aws/apigateway/v2/apigateway.go b/pkg/providers/aws/apigateway/v2/apigateway.go new file mode 100755 index 000000000000..d9747841fbcf --- /dev/null +++ b/pkg/providers/aws/apigateway/v2/apigateway.go @@ -0,0 +1,41 @@ +package v2 + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type APIGateway struct { + APIs []API + DomainNames []DomainName +} + +const ( + ProtocolTypeUnknown string = "" + ProtocolTypeREST string = "REST" + ProtocolTypeHTTP string = "HTTP" + ProtocolTypeWebsocket string = "WEBSOCKET" +) + +type API struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + ProtocolType defsecTypes.StringValue + Stages []Stage +} + +type Stage struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + AccessLogging AccessLogging +} + +type AccessLogging struct { + Metadata defsecTypes.MisconfigMetadata + CloudwatchLogGroupARN defsecTypes.StringValue +} + +type DomainName struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + SecurityPolicy defsecTypes.StringValue +} diff --git a/pkg/providers/aws/athena/athena.go b/pkg/providers/aws/athena/athena.go new file mode 100755 index 000000000000..0705c80662d8 --- /dev/null +++ b/pkg/providers/aws/athena/athena.go @@ -0,0 +1,35 @@ +package athena + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Athena struct { + Databases []Database + Workgroups []Workgroup +} + +type Database struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Encryption EncryptionConfiguration +} + +type Workgroup struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Encryption EncryptionConfiguration + EnforceConfiguration defsecTypes.BoolValue +} + +const ( + EncryptionTypeNone = "" + EncryptionTypeSSES3 = "SSE_S3" + EncryptionTypeSSEKMS = "SSE_KMS" + EncryptionTypeCSEKMS = "CSE_KMS" +) + +type EncryptionConfiguration struct { + Metadata defsecTypes.MisconfigMetadata + Type defsecTypes.StringValue +} diff --git a/pkg/providers/aws/aws.go b/pkg/providers/aws/aws.go new file mode 100755 index 000000000000..518d49ec67e0 --- /dev/null +++ b/pkg/providers/aws/aws.go @@ -0,0 +1,80 @@ +package aws + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/accessanalyzer" + "github.com/aquasecurity/trivy/pkg/providers/aws/apigateway" + "github.com/aquasecurity/trivy/pkg/providers/aws/athena" + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudfront" + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudtrail" + "github.com/aquasecurity/trivy/pkg/providers/aws/cloudwatch" + "github.com/aquasecurity/trivy/pkg/providers/aws/codebuild" + "github.com/aquasecurity/trivy/pkg/providers/aws/config" + "github.com/aquasecurity/trivy/pkg/providers/aws/documentdb" + "github.com/aquasecurity/trivy/pkg/providers/aws/dynamodb" + "github.com/aquasecurity/trivy/pkg/providers/aws/ec2" + "github.com/aquasecurity/trivy/pkg/providers/aws/ecr" + "github.com/aquasecurity/trivy/pkg/providers/aws/ecs" + "github.com/aquasecurity/trivy/pkg/providers/aws/efs" + "github.com/aquasecurity/trivy/pkg/providers/aws/eks" + "github.com/aquasecurity/trivy/pkg/providers/aws/elasticache" + "github.com/aquasecurity/trivy/pkg/providers/aws/elasticsearch" + "github.com/aquasecurity/trivy/pkg/providers/aws/elb" + "github.com/aquasecurity/trivy/pkg/providers/aws/emr" + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + "github.com/aquasecurity/trivy/pkg/providers/aws/kinesis" + "github.com/aquasecurity/trivy/pkg/providers/aws/kms" + "github.com/aquasecurity/trivy/pkg/providers/aws/lambda" + "github.com/aquasecurity/trivy/pkg/providers/aws/mq" + "github.com/aquasecurity/trivy/pkg/providers/aws/msk" + "github.com/aquasecurity/trivy/pkg/providers/aws/neptune" + "github.com/aquasecurity/trivy/pkg/providers/aws/rds" + "github.com/aquasecurity/trivy/pkg/providers/aws/redshift" + "github.com/aquasecurity/trivy/pkg/providers/aws/s3" + "github.com/aquasecurity/trivy/pkg/providers/aws/sam" + "github.com/aquasecurity/trivy/pkg/providers/aws/sns" + "github.com/aquasecurity/trivy/pkg/providers/aws/sqs" + "github.com/aquasecurity/trivy/pkg/providers/aws/ssm" + "github.com/aquasecurity/trivy/pkg/providers/aws/workspaces" +) + +type AWS struct { + Meta Meta + AccessAnalyzer accessanalyzer.AccessAnalyzer + APIGateway apigateway.APIGateway + Athena athena.Athena + Cloudfront cloudfront.Cloudfront + CloudTrail cloudtrail.CloudTrail + CloudWatch cloudwatch.CloudWatch + CodeBuild codebuild.CodeBuild + Config config.Config + DocumentDB documentdb.DocumentDB + DynamoDB dynamodb.DynamoDB + EC2 ec2.EC2 + ECR ecr.ECR + ECS ecs.ECS + EFS efs.EFS + EKS eks.EKS + ElastiCache elasticache.ElastiCache + Elasticsearch elasticsearch.Elasticsearch + ELB elb.ELB + EMR emr.EMR + IAM iam.IAM + Kinesis kinesis.Kinesis + KMS kms.KMS + Lambda lambda.Lambda + MQ mq.MQ + MSK msk.MSK + Neptune neptune.Neptune + RDS rds.RDS + Redshift redshift.Redshift + SAM sam.SAM + S3 s3.S3 + SNS sns.SNS + SQS sqs.SQS + SSM ssm.SSM + WorkSpaces workspaces.WorkSpaces +} + +type Meta struct { + TFProviders []TerraformProvider +} diff --git a/pkg/providers/aws/cloudfront/cloudfront.go b/pkg/providers/aws/cloudfront/cloudfront.go new file mode 100755 index 000000000000..0799f7bd4bff --- /dev/null +++ b/pkg/providers/aws/cloudfront/cloudfront.go @@ -0,0 +1,45 @@ +package cloudfront + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Cloudfront struct { + Distributions []Distribution +} + +type Distribution struct { + Metadata defsecTypes.MisconfigMetadata + WAFID defsecTypes.StringValue + Logging Logging + DefaultCacheBehaviour CacheBehaviour + OrdererCacheBehaviours []CacheBehaviour + ViewerCertificate ViewerCertificate +} + +type Logging struct { + Metadata defsecTypes.MisconfigMetadata + Bucket defsecTypes.StringValue +} + +type CacheBehaviour struct { + Metadata defsecTypes.MisconfigMetadata + ViewerProtocolPolicy defsecTypes.StringValue +} + +const ( + ViewerPolicyProtocolAllowAll = "allow-all" + ViewerPolicyProtocolHTTPSOnly = "https-only" + ViewerPolicyProtocolRedirectToHTTPS = "redirect-to-https" +) + +const ( + ProtocolVersionTLS1_2 = "TLSv1.2_2021" +) + +type ViewerCertificate struct { + Metadata defsecTypes.MisconfigMetadata + CloudfrontDefaultCertificate defsecTypes.BoolValue + SSLSupportMethod defsecTypes.StringValue + MinimumProtocolVersion defsecTypes.StringValue +} diff --git a/pkg/providers/aws/cloudtrail/cloudtrail.go b/pkg/providers/aws/cloudtrail/cloudtrail.go new file mode 100755 index 000000000000..6b9bb7d0a049 --- /dev/null +++ b/pkg/providers/aws/cloudtrail/cloudtrail.go @@ -0,0 +1,42 @@ +package cloudtrail + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type CloudTrail struct { + Trails []Trail +} + +func (c CloudTrail) MultiRegionTrails() (multiRegionTrails []Trail) { + for _, trail := range c.Trails { + if trail.IsMultiRegion.IsTrue() { + multiRegionTrails = append(multiRegionTrails, trail) + } + } + return multiRegionTrails +} + +type Trail struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + EnableLogFileValidation defsecTypes.BoolValue + IsMultiRegion defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue + CloudWatchLogsLogGroupArn defsecTypes.StringValue + IsLogging defsecTypes.BoolValue + BucketName defsecTypes.StringValue + EventSelectors []EventSelector +} + +type EventSelector struct { + Metadata defsecTypes.MisconfigMetadata + DataResources []DataResource + ReadWriteType defsecTypes.StringValue // ReadOnly, WriteOnly, All. Default value is All for TF. +} + +type DataResource struct { + Metadata defsecTypes.MisconfigMetadata + Type defsecTypes.StringValue // You can specify only the following value: "AWS::S3::Object", "AWS::Lambda::Function" and "AWS::DynamoDB::Table". + Values []defsecTypes.StringValue // List of ARNs/partial ARNs - e.g. arn:aws:s3:::/ for all objects in a bucket, arn:aws:s3:::/key for specific objects +} diff --git a/pkg/providers/aws/cloudwatch/cloudwatch.go b/pkg/providers/aws/cloudwatch/cloudwatch.go new file mode 100755 index 000000000000..704acbc4d749 --- /dev/null +++ b/pkg/providers/aws/cloudwatch/cloudwatch.go @@ -0,0 +1,63 @@ +package cloudwatch + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type CloudWatch struct { + LogGroups []LogGroup + Alarms []Alarm +} + +func (w CloudWatch) GetLogGroupByArn(arn string) (logGroup *LogGroup) { + for _, logGroup := range w.LogGroups { + if logGroup.Arn.EqualTo(arn) { + return &logGroup + } + } + return nil +} + +func (w CloudWatch) GetAlarmByMetricName(metricName string) (alarm *Alarm) { + for _, alarm := range w.Alarms { + if alarm.MetricName.EqualTo(metricName) { + return &alarm + } + } + return nil +} + +type Alarm struct { + Metadata defsecTypes.MisconfigMetadata + AlarmName defsecTypes.StringValue + MetricName defsecTypes.StringValue + Dimensions []AlarmDimension + Metrics []MetricDataQuery +} + +type AlarmDimension struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Value defsecTypes.StringValue +} + +type MetricFilter struct { + Metadata defsecTypes.MisconfigMetadata + FilterName defsecTypes.StringValue + FilterPattern defsecTypes.StringValue +} + +type MetricDataQuery struct { + Metadata defsecTypes.MisconfigMetadata + Expression defsecTypes.StringValue + ID defsecTypes.StringValue +} + +type LogGroup struct { + Metadata defsecTypes.MisconfigMetadata + Arn defsecTypes.StringValue + Name defsecTypes.StringValue + KMSKeyID defsecTypes.StringValue + RetentionInDays defsecTypes.IntValue + MetricFilters []MetricFilter +} diff --git a/pkg/providers/aws/codebuild/codebuild.go b/pkg/providers/aws/codebuild/codebuild.go new file mode 100755 index 000000000000..9f89a5eaee51 --- /dev/null +++ b/pkg/providers/aws/codebuild/codebuild.go @@ -0,0 +1,20 @@ +package codebuild + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type CodeBuild struct { + Projects []Project +} + +type Project struct { + Metadata defsecTypes.MisconfigMetadata + ArtifactSettings ArtifactSettings + SecondaryArtifactSettings []ArtifactSettings +} + +type ArtifactSettings struct { + Metadata defsecTypes.MisconfigMetadata + EncryptionEnabled defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/config/config.go b/pkg/providers/aws/config/config.go new file mode 100755 index 000000000000..30f7c7cb2b8e --- /dev/null +++ b/pkg/providers/aws/config/config.go @@ -0,0 +1,14 @@ +package config + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Config struct { + ConfigurationAggregrator ConfigurationAggregrator +} + +type ConfigurationAggregrator struct { + Metadata defsecTypes.MisconfigMetadata + SourceAllRegions defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/documentdb/documentdb.go b/pkg/providers/aws/documentdb/documentdb.go new file mode 100755 index 000000000000..82ed378a23ce --- /dev/null +++ b/pkg/providers/aws/documentdb/documentdb.go @@ -0,0 +1,29 @@ +package documentdb + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type DocumentDB struct { + Clusters []Cluster +} + +const ( + LogExportAudit = "audit" + LogExportProfiler = "profiler" +) + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + Identifier defsecTypes.StringValue + EnabledLogExports []defsecTypes.StringValue + BackupRetentionPeriod defsecTypes.IntValue + Instances []Instance + StorageEncrypted defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue +} + +type Instance struct { + Metadata defsecTypes.MisconfigMetadata + KMSKeyID defsecTypes.StringValue +} diff --git a/pkg/providers/aws/dynamodb/dynamodb.go b/pkg/providers/aws/dynamodb/dynamodb.go new file mode 100755 index 000000000000..2c742f2c3b49 --- /dev/null +++ b/pkg/providers/aws/dynamodb/dynamodb.go @@ -0,0 +1,30 @@ +package dynamodb + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type DynamoDB struct { + DAXClusters []DAXCluster + Tables []Table +} + +type DAXCluster struct { + Metadata defsecTypes.MisconfigMetadata + ServerSideEncryption ServerSideEncryption + PointInTimeRecovery defsecTypes.BoolValue +} + +type Table struct { + Metadata defsecTypes.MisconfigMetadata + ServerSideEncryption ServerSideEncryption + PointInTimeRecovery defsecTypes.BoolValue +} + +type ServerSideEncryption struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue +} + +const DefaultKMSKeyID = "alias/aws/dynamodb" diff --git a/pkg/providers/aws/ec2/ec2.go b/pkg/providers/aws/ec2/ec2.go new file mode 100755 index 000000000000..726e312f65aa --- /dev/null +++ b/pkg/providers/aws/ec2/ec2.go @@ -0,0 +1,12 @@ +package ec2 + +type EC2 struct { + Instances []Instance + LaunchConfigurations []LaunchConfiguration + LaunchTemplates []LaunchTemplate + VPCs []VPC + SecurityGroups []SecurityGroup + NetworkACLs []NetworkACL + Subnets []Subnet + Volumes []Volume +} diff --git a/pkg/providers/aws/ec2/instance.go b/pkg/providers/aws/ec2/instance.go new file mode 100755 index 000000000000..c732ded3f27d --- /dev/null +++ b/pkg/providers/aws/ec2/instance.go @@ -0,0 +1,54 @@ +package ec2 + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/owenrumney/squealer/pkg/squealer" +) + +type Instance struct { + Metadata defsecTypes.MisconfigMetadata + MetadataOptions MetadataOptions + UserData defsecTypes.StringValue + SecurityGroups []SecurityGroup + RootBlockDevice *BlockDevice + EBSBlockDevices []*BlockDevice +} + +type BlockDevice struct { + Metadata defsecTypes.MisconfigMetadata + Encrypted defsecTypes.BoolValue +} + +type MetadataOptions struct { + Metadata defsecTypes.MisconfigMetadata + HttpTokens defsecTypes.StringValue + HttpEndpoint defsecTypes.StringValue +} + +func NewInstance(metadata defsecTypes.MisconfigMetadata) *Instance { + return &Instance{ + Metadata: metadata, + MetadataOptions: MetadataOptions{ + Metadata: metadata, + HttpTokens: defsecTypes.StringDefault("optional", metadata), + HttpEndpoint: defsecTypes.StringDefault("enabled", metadata), + }, + UserData: defsecTypes.StringDefault("", metadata), + SecurityGroups: []SecurityGroup{}, + RootBlockDevice: nil, + EBSBlockDevices: nil, + } +} + +func (i *Instance) RequiresIMDSToken() bool { + return i.MetadataOptions.HttpTokens.EqualTo("required") +} + +func (i *Instance) HasHTTPEndpointDisabled() bool { + return i.MetadataOptions.HttpEndpoint.EqualTo("disabled") +} + +func (i *Instance) HasSensitiveInformationInUserData() bool { + scanner := squealer.NewStringScanner() + return scanner.Scan(i.UserData.Value()).TransgressionFound +} diff --git a/pkg/providers/aws/ec2/launch.go b/pkg/providers/aws/ec2/launch.go new file mode 100644 index 000000000000..fb8a6f1e5fd2 --- /dev/null +++ b/pkg/providers/aws/ec2/launch.go @@ -0,0 +1,29 @@ +package ec2 + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type LaunchConfiguration struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + AssociatePublicIP defsecTypes.BoolValue + RootBlockDevice *BlockDevice + EBSBlockDevices []*BlockDevice + MetadataOptions MetadataOptions + UserData defsecTypes.StringValue +} + +type LaunchTemplate struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Instance +} + +func (i *LaunchConfiguration) RequiresIMDSToken() bool { + return i.MetadataOptions.HttpTokens.EqualTo("required") +} + +func (i *LaunchConfiguration) HasHTTPEndpointDisabled() bool { + return i.MetadataOptions.HttpEndpoint.EqualTo("disabled") +} diff --git a/pkg/providers/aws/ec2/subnet.go b/pkg/providers/aws/ec2/subnet.go new file mode 100644 index 000000000000..b2d3ea741b2c --- /dev/null +++ b/pkg/providers/aws/ec2/subnet.go @@ -0,0 +1,10 @@ +package ec2 + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Subnet struct { + Metadata defsecTypes.MisconfigMetadata + MapPublicIpOnLaunch defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/ec2/volume.go b/pkg/providers/aws/ec2/volume.go new file mode 100644 index 000000000000..c9b5454c3b8b --- /dev/null +++ b/pkg/providers/aws/ec2/volume.go @@ -0,0 +1,16 @@ +package ec2 + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Volume struct { + Metadata defsecTypes.MisconfigMetadata + Encryption Encryption +} + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue +} diff --git a/pkg/providers/aws/ec2/vpc.go b/pkg/providers/aws/ec2/vpc.go new file mode 100644 index 000000000000..7c859c36e53f --- /dev/null +++ b/pkg/providers/aws/ec2/vpc.go @@ -0,0 +1,52 @@ +package ec2 + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type NetworkACL struct { + Metadata defsecTypes.MisconfigMetadata + Rules []NetworkACLRule + IsDefaultRule defsecTypes.BoolValue +} + +type SecurityGroup struct { + Metadata defsecTypes.MisconfigMetadata + IsDefault defsecTypes.BoolValue + Description defsecTypes.StringValue + IngressRules []SecurityGroupRule + EgressRules []SecurityGroupRule + VPCID defsecTypes.StringValue +} + +type SecurityGroupRule struct { + Metadata defsecTypes.MisconfigMetadata + Description defsecTypes.StringValue + CIDRs []defsecTypes.StringValue +} + +type VPC struct { + Metadata defsecTypes.MisconfigMetadata + ID defsecTypes.StringValue + IsDefault defsecTypes.BoolValue + SecurityGroups []SecurityGroup + FlowLogsEnabled defsecTypes.BoolValue +} + +const ( + TypeIngress = "ingress" + TypeEgress = "egress" +) + +const ( + ActionAllow = "allow" + ActionDeny = "deny" +) + +type NetworkACLRule struct { + Metadata defsecTypes.MisconfigMetadata + Type defsecTypes.StringValue + Action defsecTypes.StringValue + Protocol defsecTypes.StringValue + CIDRs []defsecTypes.StringValue +} diff --git a/pkg/providers/aws/ecr/ecr.go b/pkg/providers/aws/ecr/ecr.go new file mode 100755 index 000000000000..3c91b507b12a --- /dev/null +++ b/pkg/providers/aws/ecr/ecr.go @@ -0,0 +1,34 @@ +package ecr + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type ECR struct { + Repositories []Repository +} + +type Repository struct { + Metadata defsecTypes.MisconfigMetadata + ImageScanning ImageScanning + ImageTagsImmutable defsecTypes.BoolValue + Policies []iam.Policy + Encryption Encryption +} + +type ImageScanning struct { + Metadata defsecTypes.MisconfigMetadata + ScanOnPush defsecTypes.BoolValue +} + +const ( + EncryptionTypeKMS = "KMS" + EncryptionTypeAES256 = "AES256" +) + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + Type defsecTypes.StringValue + KMSKeyID defsecTypes.StringValue +} diff --git a/pkg/providers/aws/ecs/ecs.go b/pkg/providers/aws/ecs/ecs.go new file mode 100755 index 000000000000..6f6873e6bd34 --- /dev/null +++ b/pkg/providers/aws/ecs/ecs.go @@ -0,0 +1,119 @@ +package ecs + +import ( + "encoding/json" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type ECS struct { + Clusters []Cluster + TaskDefinitions []TaskDefinition +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + Settings ClusterSettings +} + +type ClusterSettings struct { + Metadata defsecTypes.MisconfigMetadata + ContainerInsightsEnabled defsecTypes.BoolValue +} + +type TaskDefinition struct { + Metadata defsecTypes.MisconfigMetadata + Volumes []Volume + ContainerDefinitions []ContainerDefinition +} + +func CreateDefinitionsFromString(metadata defsecTypes.MisconfigMetadata, str string) ([]ContainerDefinition, error) { + var containerDefinitionsJSON []containerDefinitionJSON + if err := json.Unmarshal([]byte(str), &containerDefinitionsJSON); err != nil { + return nil, err + } + var definitions []ContainerDefinition + for _, j := range containerDefinitionsJSON { + definitions = append(definitions, j.convert(metadata)) + } + return definitions, nil +} + +// see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html +type containerDefinitionJSON struct { + Name string `json:"name"` + Image string `json:"image"` + CPU int `json:"cpu"` + Memory int `json:"memory"` + Essential bool `json:"essential"` + PortMappings []portMappingJSON `json:"portMappings"` + EnvVars []envVarJSON `json:"environment"` + Privileged bool `json:"privileged"` +} + +type envVarJSON struct { + Name string `json:"name"` + Value string `json:"value"` +} + +type portMappingJSON struct { + ContainerPort int `json:"containerPort"` + HostPort int `json:"hostPort"` +} + +func (j containerDefinitionJSON) convert(metadata defsecTypes.MisconfigMetadata) ContainerDefinition { + var mappings []PortMapping + for _, jMapping := range j.PortMappings { + mappings = append(mappings, PortMapping{ + ContainerPort: defsecTypes.Int(jMapping.ContainerPort, metadata), + HostPort: defsecTypes.Int(jMapping.HostPort, metadata), + }) + } + var envVars []EnvVar + for _, env := range j.EnvVars { + envVars = append(envVars, EnvVar(env)) + } + return ContainerDefinition{ + Metadata: metadata, + Name: defsecTypes.String(j.Name, metadata), + Image: defsecTypes.String(j.Image, metadata), + CPU: defsecTypes.Int(j.CPU, metadata), + Memory: defsecTypes.Int(j.Memory, metadata), + Essential: defsecTypes.Bool(j.Essential, metadata), + PortMappings: mappings, + Environment: envVars, + Privileged: defsecTypes.Bool(j.Privileged, metadata), + } +} + +type ContainerDefinition struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Image defsecTypes.StringValue + CPU defsecTypes.IntValue + Memory defsecTypes.IntValue + Essential defsecTypes.BoolValue + PortMappings []PortMapping + Environment []EnvVar + Privileged defsecTypes.BoolValue +} + +type EnvVar struct { + Name string + Value string +} + +type PortMapping struct { + ContainerPort defsecTypes.IntValue + HostPort defsecTypes.IntValue +} + +type Volume struct { + Metadata defsecTypes.MisconfigMetadata + EFSVolumeConfiguration EFSVolumeConfiguration +} + +type EFSVolumeConfiguration struct { + Metadata defsecTypes.MisconfigMetadata + TransitEncryptionEnabled defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/efs/efs.go b/pkg/providers/aws/efs/efs.go new file mode 100755 index 000000000000..c547d7db8b97 --- /dev/null +++ b/pkg/providers/aws/efs/efs.go @@ -0,0 +1,14 @@ +package efs + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type EFS struct { + FileSystems []FileSystem +} + +type FileSystem struct { + Metadata defsecTypes.MisconfigMetadata + Encrypted defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/eks/eks.go b/pkg/providers/aws/eks/eks.go new file mode 100755 index 000000000000..f27994a27706 --- /dev/null +++ b/pkg/providers/aws/eks/eks.go @@ -0,0 +1,32 @@ +package eks + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type EKS struct { + Clusters []Cluster +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + Logging Logging + Encryption Encryption + PublicAccessEnabled defsecTypes.BoolValue + PublicAccessCIDRs []defsecTypes.StringValue +} + +type Logging struct { + Metadata defsecTypes.MisconfigMetadata + API defsecTypes.BoolValue + Audit defsecTypes.BoolValue + Authenticator defsecTypes.BoolValue + ControllerManager defsecTypes.BoolValue + Scheduler defsecTypes.BoolValue +} + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + Secrets defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue +} diff --git a/pkg/providers/aws/elasticache/elasticache.go b/pkg/providers/aws/elasticache/elasticache.go new file mode 100755 index 000000000000..396fcfb92f00 --- /dev/null +++ b/pkg/providers/aws/elasticache/elasticache.go @@ -0,0 +1,29 @@ +package elasticache + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type ElastiCache struct { + Clusters []Cluster + ReplicationGroups []ReplicationGroup + SecurityGroups []SecurityGroup +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + Engine defsecTypes.StringValue + NodeType defsecTypes.StringValue + SnapshotRetentionLimit defsecTypes.IntValue // days +} + +type ReplicationGroup struct { + Metadata defsecTypes.MisconfigMetadata + TransitEncryptionEnabled defsecTypes.BoolValue + AtRestEncryptionEnabled defsecTypes.BoolValue +} + +type SecurityGroup struct { + Metadata defsecTypes.MisconfigMetadata + Description defsecTypes.StringValue +} diff --git a/pkg/providers/aws/elasticsearch/elasticsearch.go b/pkg/providers/aws/elasticsearch/elasticsearch.go new file mode 100755 index 000000000000..2d28354cf77e --- /dev/null +++ b/pkg/providers/aws/elasticsearch/elasticsearch.go @@ -0,0 +1,53 @@ +package elasticsearch + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Elasticsearch struct { + Domains []Domain +} + +type Domain struct { + Metadata defsecTypes.MisconfigMetadata + DomainName defsecTypes.StringValue + AccessPolicies defsecTypes.StringValue + DedicatedMasterEnabled defsecTypes.BoolValue + VpcId defsecTypes.StringValue + LogPublishing LogPublishing + TransitEncryption TransitEncryption + AtRestEncryption AtRestEncryption + ServiceSoftwareOptions ServiceSoftwareOptions + Endpoint Endpoint +} + +type ServiceSoftwareOptions struct { + Metadata defsecTypes.MisconfigMetadata + CurrentVersion defsecTypes.StringValue + NewVersion defsecTypes.StringValue + UpdateAvailable defsecTypes.BoolValue + UpdateStatus defsecTypes.StringValue +} + +type Endpoint struct { + Metadata defsecTypes.MisconfigMetadata + EnforceHTTPS defsecTypes.BoolValue + TLSPolicy defsecTypes.StringValue +} + +type LogPublishing struct { + Metadata defsecTypes.MisconfigMetadata + AuditEnabled defsecTypes.BoolValue + CloudWatchLogGroupArn defsecTypes.StringValue +} + +type TransitEncryption struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type AtRestEncryption struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + KmsKeyId defsecTypes.StringValue +} diff --git a/pkg/providers/aws/elb/elb.go b/pkg/providers/aws/elb/elb.go new file mode 100755 index 000000000000..fd423334744b --- /dev/null +++ b/pkg/providers/aws/elb/elb.go @@ -0,0 +1,36 @@ +package elb + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type ELB struct { + LoadBalancers []LoadBalancer +} + +const ( + TypeApplication = "application" + TypeGateway = "gateway" + TypeNetwork = "network" + TypeClassic = "classic" +) + +type LoadBalancer struct { + Metadata defsecTypes.MisconfigMetadata + Type defsecTypes.StringValue + DropInvalidHeaderFields defsecTypes.BoolValue + Internal defsecTypes.BoolValue + Listeners []Listener +} + +type Listener struct { + Metadata defsecTypes.MisconfigMetadata + Protocol defsecTypes.StringValue + TLSPolicy defsecTypes.StringValue + DefaultActions []Action +} + +type Action struct { + Metadata defsecTypes.MisconfigMetadata + Type defsecTypes.StringValue +} diff --git a/pkg/providers/aws/emr/emr.go b/pkg/providers/aws/emr/emr.go new file mode 100644 index 000000000000..b93457df2f1f --- /dev/null +++ b/pkg/providers/aws/emr/emr.go @@ -0,0 +1,28 @@ +package emr + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type EMR struct { + Clusters []Cluster + SecurityConfiguration []SecurityConfiguration +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + Settings ClusterSettings +} + +type ClusterSettings struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + ReleaseLabel defsecTypes.StringValue + ServiceRole defsecTypes.StringValue +} + +type SecurityConfiguration struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Configuration defsecTypes.StringValue +} diff --git a/pkg/providers/aws/iam/actions.go b/pkg/providers/aws/iam/actions.go new file mode 100644 index 000000000000..564b4a38f917 --- /dev/null +++ b/pkg/providers/aws/iam/actions.go @@ -0,0 +1,5135 @@ +// Code generated by cmd/allowed_actions DO NOT EDIT. + +package iam + +var allowedActionsForResourceWildcardsMap = map[string]struct{}{ + "a2c:GetContainerizationJobDetails": {}, + "a2c:GetDeploymentJobDetails": {}, + "a2c:StartContainerizationJob": {}, + "a2c:StartDeploymentJob": {}, + "a4b:ApproveSkill": {}, + "a4b:AssociateSkillWithUsers": {}, + "a4b:CompleteRegistration": {}, + "a4b:CreateAddressBook": {}, + "a4b:CreateBusinessReportSchedule": {}, + "a4b:CreateConferenceProvider": {}, + "a4b:CreateContact": {}, + "a4b:CreateGatewayGroup": {}, + "a4b:CreateNetworkProfile": {}, + "a4b:CreateProfile": {}, + "a4b:CreateSkillGroup": {}, + "a4b:GetConferencePreference": {}, + "a4b:GetInvitationConfiguration": {}, + "a4b:ListBusinessReportSchedules": {}, + "a4b:ListConferenceProviders": {}, + "a4b:ListGatewayGroups": {}, + "a4b:ListSkills": {}, + "a4b:ListSkillsStoreCategories": {}, + "a4b:ListSkillsStoreSkillsByCategory": {}, + "a4b:PutConferencePreference": {}, + "a4b:PutDeviceSetupEvents": {}, + "a4b:PutInvitationConfiguration": {}, + "a4b:RegisterAVSDevice": {}, + "a4b:RegisterDevice": {}, + "a4b:RejectSkill": {}, + "a4b:ResolveRoom": {}, + "a4b:SearchAddressBooks": {}, + "a4b:SearchContacts": {}, + "a4b:SearchDevices": {}, + "a4b:SearchNetworkProfiles": {}, + "a4b:SearchProfiles": {}, + "a4b:SearchRooms": {}, + "a4b:SearchSkillGroups": {}, + "a4b:SearchUsers": {}, + "a4b:SendAnnouncement": {}, + "a4b:StartDeviceSync": {}, + "access-analyzer:CancelPolicyGeneration": {}, + "access-analyzer:CheckAccessNotGranted": {}, + "access-analyzer:CheckNoNewAccess": {}, + "access-analyzer:GetGeneratedPolicy": {}, + "access-analyzer:ListAnalyzers": {}, + "access-analyzer:ListPolicyGenerations": {}, + "access-analyzer:StartPolicyGeneration": {}, + "access-analyzer:ValidatePolicy": {}, + "acm-pca:CreateCertificateAuthority": {}, + "acm-pca:ListCertificateAuthorities": {}, + "acm:GetAccountConfiguration": {}, + "acm:ListCertificates": {}, + "acm:PutAccountConfiguration": {}, + "acm:RequestCertificate": {}, + "activate:CreateForm": {}, + "activate:GetAccountContact": {}, + "activate:GetContentInfo": {}, + "activate:GetCosts": {}, + "activate:GetCredits": {}, + "activate:GetMemberInfo": {}, + "activate:GetProgram": {}, + "activate:PutMemberInfo": {}, + "airflow:ListEnvironments": {}, + "amplify:ListApps": {}, + "amplifybackend:ListS3Buckets": {}, + "amplifyuibuilder:CreateComponent": {}, + "amplifyuibuilder:CreateForm": {}, + "amplifyuibuilder:CreateTheme": {}, + "amplifyuibuilder:ExchangeCodeForToken": {}, + "amplifyuibuilder:ExportComponents": {}, + "amplifyuibuilder:ExportForms": {}, + "amplifyuibuilder:ExportThemes": {}, + "amplifyuibuilder:GetMetadata": {}, + "amplifyuibuilder:ListCodegenJobs": {}, + "amplifyuibuilder:ListComponents": {}, + "amplifyuibuilder:ListForms": {}, + "amplifyuibuilder:ListThemes": {}, + "amplifyuibuilder:PutMetadataFlag": {}, + "amplifyuibuilder:RefreshToken": {}, + "amplifyuibuilder:ResetMetadataFlag": {}, + "amplifyuibuilder:StartCodegenJob": {}, + "aoss:BatchGetCollection": {}, + "aoss:BatchGetEffectiveLifecyclePolicy": {}, + "aoss:BatchGetLifecyclePolicy": {}, + "aoss:BatchGetVpcEndpoint": {}, + "aoss:CreateAccessPolicy": {}, + "aoss:CreateCollection": {}, + "aoss:CreateLifecyclePolicy": {}, + "aoss:CreateSecurityConfig": {}, + "aoss:CreateSecurityPolicy": {}, + "aoss:CreateVpcEndpoint": {}, + "aoss:DeleteAccessPolicy": {}, + "aoss:DeleteLifecyclePolicy": {}, + "aoss:DeleteSecurityConfig": {}, + "aoss:DeleteSecurityPolicy": {}, + "aoss:DeleteVpcEndpoint": {}, + "aoss:GetAccessPolicy": {}, + "aoss:GetAccountSettings": {}, + "aoss:GetPoliciesStats": {}, + "aoss:GetSecurityConfig": {}, + "aoss:GetSecurityPolicy": {}, + "aoss:ListAccessPolicies": {}, + "aoss:ListCollections": {}, + "aoss:ListLifecyclePolicies": {}, + "aoss:ListSecurityConfigs": {}, + "aoss:ListSecurityPolicies": {}, + "aoss:ListTagsForResource": {}, + "aoss:ListVpcEndpoints": {}, + "aoss:TagResource": {}, + "aoss:UntagResource": {}, + "aoss:UpdateAccessPolicy": {}, + "aoss:UpdateAccountSettings": {}, + "aoss:UpdateLifecyclePolicy": {}, + "aoss:UpdateSecurityConfig": {}, + "aoss:UpdateSecurityPolicy": {}, + "aoss:UpdateVpcEndpoint": {}, + "app-integrations:ListApplications": {}, + "app-integrations:ListDataIntegrationAssociations": {}, + "app-integrations:ListDataIntegrations": {}, + "app-integrations:ListEventIntegrationAssociations": {}, + "app-integrations:ListEventIntegrations": {}, + "appconfig:CreateApplication": {}, + "appconfig:CreateDeploymentStrategy": {}, + "appconfig:CreateExtension": {}, + "appconfig:CreateExtensionAssociation": {}, + "appconfig:ListApplications": {}, + "appconfig:ListDeploymentStrategies": {}, + "appconfig:ListExtensionAssociations": {}, + "appconfig:ListExtensions": {}, + "appfabric:ListAppBundles": {}, + "appflow:CreateConnectorProfile": {}, + "appflow:CreateFlow": {}, + "appflow:DescribeConnectorProfiles": {}, + "appflow:DescribeConnectors": {}, + "appflow:DescribeFlows": {}, + "appflow:RegisterConnector": {}, + "application-autoscaling:DescribeScalableTargets": {}, + "application-autoscaling:DescribeScalingActivities": {}, + "application-autoscaling:DescribeScalingPolicies": {}, + "application-autoscaling:DescribeScheduledActions": {}, + "application-cost-profiler:DeleteReportDefinition": {}, + "application-cost-profiler:GetReportDefinition": {}, + "application-cost-profiler:ImportApplicationUsage": {}, + "application-cost-profiler:ListReportDefinitions": {}, + "application-cost-profiler:PutReportDefinition": {}, + "application-cost-profiler:UpdateReportDefinition": {}, + "application-transformation:GetContainerization": {}, + "application-transformation:GetDeployment": {}, + "application-transformation:GetGroupingAssessment": {}, + "application-transformation:GetPortingCompatibilityAssessment": {}, + "application-transformation:GetPortingRecommendationAssessment": {}, + "application-transformation:GetRuntimeAssessment": {}, + "application-transformation:PutLogData": {}, + "application-transformation:PutMetricData": {}, + "application-transformation:StartContainerization": {}, + "application-transformation:StartDeployment": {}, + "application-transformation:StartGroupingAssessment": {}, + "application-transformation:StartPortingCompatibilityAssessment": {}, + "application-transformation:StartPortingRecommendationAssessment": {}, + "application-transformation:StartRuntimeAssessment": {}, + "applicationinsights:AddWorkload": {}, + "applicationinsights:CreateApplication": {}, + "applicationinsights:CreateComponent": {}, + "applicationinsights:CreateLogPattern": {}, + "applicationinsights:DeleteApplication": {}, + "applicationinsights:DeleteComponent": {}, + "applicationinsights:DeleteLogPattern": {}, + "applicationinsights:DescribeApplication": {}, + "applicationinsights:DescribeComponent": {}, + "applicationinsights:DescribeComponentConfiguration": {}, + "applicationinsights:DescribeComponentConfigurationRecommendation": {}, + "applicationinsights:DescribeLogPattern": {}, + "applicationinsights:DescribeObservation": {}, + "applicationinsights:DescribeProblem": {}, + "applicationinsights:DescribeProblemObservations": {}, + "applicationinsights:DescribeWorkload": {}, + "applicationinsights:Link": {}, + "applicationinsights:ListApplications": {}, + "applicationinsights:ListComponents": {}, + "applicationinsights:ListConfigurationHistory": {}, + "applicationinsights:ListLogPatternSets": {}, + "applicationinsights:ListLogPatterns": {}, + "applicationinsights:ListProblems": {}, + "applicationinsights:ListTagsForResource": {}, + "applicationinsights:ListWorkloads": {}, + "applicationinsights:RemoveWorkload": {}, + "applicationinsights:TagResource": {}, + "applicationinsights:UntagResource": {}, + "applicationinsights:UpdateApplication": {}, + "applicationinsights:UpdateComponent": {}, + "applicationinsights:UpdateComponentConfiguration": {}, + "applicationinsights:UpdateLogPattern": {}, + "applicationinsights:UpdateProblem": {}, + "applicationinsights:UpdateWorkload": {}, + "appmesh-preview:ListMeshes": {}, + "appmesh:ListMeshes": {}, + "apprunner:ListAutoScalingConfigurations": {}, + "apprunner:ListConnections": {}, + "apprunner:ListObservabilityConfigurations": {}, + "apprunner:ListServices": {}, + "apprunner:ListVpcConnectors": {}, + "apprunner:ListVpcIngressConnections": {}, + "appstream:CreateAppBlock": {}, + "appstream:CreateDirectoryConfig": {}, + "appstream:CreateUsageReportSubscription": {}, + "appstream:CreateUser": {}, + "appstream:DeleteDirectoryConfig": {}, + "appstream:DeleteUsageReportSubscription": {}, + "appstream:DeleteUser": {}, + "appstream:DescribeDirectoryConfigs": {}, + "appstream:DescribeUsageReportSubscriptions": {}, + "appstream:DescribeUsers": {}, + "appstream:DisableUser": {}, + "appstream:EnableUser": {}, + "appstream:ExpireSession": {}, + "appstream:ListTagsForResource": {}, + "appstream:UpdateDirectoryConfig": {}, + "appsync:CreateApiCache": {}, + "appsync:CreateApiKey": {}, + "appsync:CreateDataSource": {}, + "appsync:CreateDomainName": {}, + "appsync:CreateFunction": {}, + "appsync:CreateGraphqlApi": {}, + "appsync:CreateResolver": {}, + "appsync:CreateType": {}, + "appsync:DeleteApiCache": {}, + "appsync:DeleteApiKey": {}, + "appsync:DeleteDataSource": {}, + "appsync:DeleteFunction": {}, + "appsync:DeleteResolver": {}, + "appsync:DeleteResourcePolicy": {}, + "appsync:DeleteType": {}, + "appsync:EvaluateCode": {}, + "appsync:EvaluateMappingTemplate": {}, + "appsync:FlushApiCache": {}, + "appsync:GetApiCache": {}, + "appsync:GetDataSource": {}, + "appsync:GetDataSourceIntrospection": {}, + "appsync:GetFunction": {}, + "appsync:GetIntrospectionSchema": {}, + "appsync:GetResolver": {}, + "appsync:GetResourcePolicy": {}, + "appsync:GetSchemaCreationStatus": {}, + "appsync:GetType": {}, + "appsync:ListApiKeys": {}, + "appsync:ListDataSources": {}, + "appsync:ListDomainNames": {}, + "appsync:ListFunctions": {}, + "appsync:ListGraphqlApis": {}, + "appsync:ListResolvers": {}, + "appsync:ListResolversByFunction": {}, + "appsync:ListSourceApiAssociations": {}, + "appsync:ListTypes": {}, + "appsync:ListTypesByAssociation": {}, + "appsync:PutResourcePolicy": {}, + "appsync:SetWebACL": {}, + "appsync:StartDataSourceIntrospection": {}, + "appsync:StartSchemaCreation": {}, + "appsync:UpdateApiCache": {}, + "appsync:UpdateApiKey": {}, + "appsync:UpdateDataSource": {}, + "appsync:UpdateFunction": {}, + "appsync:UpdateResolver": {}, + "appsync:UpdateType": {}, + "aps:CreateWorkspace": {}, + "aps:GetDefaultScraperConfiguration": {}, + "aps:ListScrapers": {}, + "aps:ListWorkspaces": {}, + "arc-zonal-shift:ListAutoshifts": {}, + "arc-zonal-shift:ListManagedResources": {}, + "arc-zonal-shift:ListZonalShifts": {}, + "arsenal:RegisterOnPremisesAgent": {}, + "artifact:GetAccountSettings": {}, + "artifact:ListReports": {}, + "artifact:PutAccountSettings": {}, + "athena:GetCatalogs": {}, + "athena:GetExecutionEngine": {}, + "athena:GetExecutionEngines": {}, + "athena:GetNamespace": {}, + "athena:GetNamespaces": {}, + "athena:GetQueryExecutions": {}, + "athena:GetTable": {}, + "athena:GetTables": {}, + "athena:ListApplicationDPUSizes": {}, + "athena:ListCapacityReservations": {}, + "athena:ListDataCatalogs": {}, + "athena:ListEngineVersions": {}, + "athena:ListExecutors": {}, + "athena:ListWorkGroups": {}, + "athena:RunQuery": {}, + "auditmanager:CreateAssessment": {}, + "auditmanager:CreateAssessmentFramework": {}, + "auditmanager:CreateControl": {}, + "auditmanager:DeleteAssessmentFrameworkShare": {}, + "auditmanager:DeregisterAccount": {}, + "auditmanager:DeregisterOrganizationAdminAccount": {}, + "auditmanager:GetAccountStatus": {}, + "auditmanager:GetDelegations": {}, + "auditmanager:GetEvidenceFileUploadUrl": {}, + "auditmanager:GetInsights": {}, + "auditmanager:GetInsightsByAssessment": {}, + "auditmanager:GetOrganizationAdminAccount": {}, + "auditmanager:GetServicesInScope": {}, + "auditmanager:GetSettings": {}, + "auditmanager:ListAssessmentControlInsightsByControlDomain": {}, + "auditmanager:ListAssessmentFrameworkShareRequests": {}, + "auditmanager:ListAssessmentFrameworks": {}, + "auditmanager:ListAssessmentReports": {}, + "auditmanager:ListAssessments": {}, + "auditmanager:ListControlDomainInsights": {}, + "auditmanager:ListControlDomainInsightsByAssessment": {}, + "auditmanager:ListControlInsightsByControlDomain": {}, + "auditmanager:ListControls": {}, + "auditmanager:ListKeywordsForDataSource": {}, + "auditmanager:ListNotifications": {}, + "auditmanager:RegisterAccount": {}, + "auditmanager:RegisterOrganizationAdminAccount": {}, + "auditmanager:UpdateAssessmentFrameworkShare": {}, + "auditmanager:UpdateSettings": {}, + "auditmanager:ValidateAssessmentReportIntegrity": {}, + "autoscaling-plans:CreateScalingPlan": {}, + "autoscaling-plans:DeleteScalingPlan": {}, + "autoscaling-plans:DescribeScalingPlanResources": {}, + "autoscaling-plans:DescribeScalingPlans": {}, + "autoscaling-plans:GetScalingPlanResourceForecastData": {}, + "autoscaling-plans:UpdateScalingPlan": {}, + "autoscaling:DescribeAccountLimits": {}, + "autoscaling:DescribeAdjustmentTypes": {}, + "autoscaling:DescribeAutoScalingGroups": {}, + "autoscaling:DescribeAutoScalingInstances": {}, + "autoscaling:DescribeAutoScalingNotificationTypes": {}, + "autoscaling:DescribeInstanceRefreshes": {}, + "autoscaling:DescribeLaunchConfigurations": {}, + "autoscaling:DescribeLifecycleHookTypes": {}, + "autoscaling:DescribeLifecycleHooks": {}, + "autoscaling:DescribeLoadBalancerTargetGroups": {}, + "autoscaling:DescribeLoadBalancers": {}, + "autoscaling:DescribeMetricCollectionTypes": {}, + "autoscaling:DescribeNotificationConfigurations": {}, + "autoscaling:DescribePolicies": {}, + "autoscaling:DescribeScalingActivities": {}, + "autoscaling:DescribeScalingProcessTypes": {}, + "autoscaling:DescribeScheduledActions": {}, + "autoscaling:DescribeTags": {}, + "autoscaling:DescribeTerminationPolicyTypes": {}, + "autoscaling:DescribeTrafficSources": {}, + "autoscaling:DescribeWarmPool": {}, + "autoscaling:GetPredictiveScalingForecast": {}, + "aws-marketplace-management:GetAdditionalSellerNotificationRecipients": {}, + "aws-marketplace-management:GetBankAccountVerificationDetails": {}, + "aws-marketplace-management:GetSecondaryUserVerificationDetails": {}, + "aws-marketplace-management:GetSellerVerificationDetails": {}, + "aws-marketplace-management:PutAdditionalSellerNotificationRecipients": {}, + "aws-marketplace-management:PutBankAccountVerificationDetails": {}, + "aws-marketplace-management:PutSecondaryUserVerificationDetails": {}, + "aws-marketplace-management:PutSellerVerificationDetails": {}, + "aws-marketplace-management:uploadFiles": {}, + "aws-marketplace-management:viewMarketing": {}, + "aws-marketplace-management:viewReports": {}, + "aws-marketplace-management:viewSettings": {}, + "aws-marketplace-management:viewSupport": {}, + "aws-marketplace:AcceptAgreementApprovalRequest": {}, + "aws-marketplace:AcceptAgreementRequest": {}, + "aws-marketplace:AssociateProductsWithPrivateMarketplace": {}, + "aws-marketplace:BatchMeterUsage": {}, + "aws-marketplace:CancelAgreement": {}, + "aws-marketplace:CancelAgreementRequest": {}, + "aws-marketplace:CompleteTask": {}, + "aws-marketplace:CreateAgreementRequest": {}, + "aws-marketplace:CreatePrivateMarketplaceRequests": {}, + "aws-marketplace:DescribeAgreement": {}, + "aws-marketplace:DescribeBuilds": {}, + "aws-marketplace:DescribePrivateMarketplaceRequests": {}, + "aws-marketplace:DescribeProcurementSystemConfiguration": {}, + "aws-marketplace:DescribeTask": {}, + "aws-marketplace:DisassociateProductsFromPrivateMarketplace": {}, + "aws-marketplace:GetAgreementApprovalRequest": {}, + "aws-marketplace:GetAgreementRequest": {}, + "aws-marketplace:GetAgreementTerms": {}, + "aws-marketplace:ListAgreementApprovalRequests": {}, + "aws-marketplace:ListAgreementRequests": {}, + "aws-marketplace:ListBuilds": {}, + "aws-marketplace:ListChangeSets": {}, + "aws-marketplace:ListEntities": {}, + "aws-marketplace:ListEntitlementDetails": {}, + "aws-marketplace:ListPrivateListings": {}, + "aws-marketplace:ListPrivateMarketplaceRequests": {}, + "aws-marketplace:ListTasks": {}, + "aws-marketplace:MeterUsage": {}, + "aws-marketplace:PutProcurementSystemConfiguration": {}, + "aws-marketplace:RegisterUsage": {}, + "aws-marketplace:RejectAgreementApprovalRequest": {}, + "aws-marketplace:ResolveCustomer": {}, + "aws-marketplace:SearchAgreements": {}, + "aws-marketplace:StartBuild": {}, + "aws-marketplace:Subscribe": {}, + "aws-marketplace:Unsubscribe": {}, + "aws-marketplace:UpdateAgreementApprovalRequest": {}, + "aws-marketplace:UpdateTask": {}, + "aws-marketplace:ViewSubscriptions": {}, + "aws-portal:GetConsoleActionSetEnforced": {}, + "aws-portal:ModifyAccount": {}, + "aws-portal:ModifyBilling": {}, + "aws-portal:ModifyPaymentMethods": {}, + "aws-portal:UpdateConsoleActionSetEnforced": {}, + "aws-portal:ViewAccount": {}, + "aws-portal:ViewBilling": {}, + "aws-portal:ViewPaymentMethods": {}, + "aws-portal:ViewUsage": {}, + "awsconnector:GetConnectorHealth": {}, + "awsconnector:RegisterConnector": {}, + "awsconnector:ValidateConnectorId": {}, + "b2bi:CreateProfile": {}, + "b2bi:CreateTransformer": {}, + "b2bi:ListCapabilities": {}, + "b2bi:ListPartnerships": {}, + "b2bi:ListProfiles": {}, + "b2bi:ListTransformers": {}, + "backup-gateway:CreateGateway": {}, + "backup-gateway:ImportHypervisorConfiguration": {}, + "backup-gateway:ListGateways": {}, + "backup-gateway:ListHypervisors": {}, + "backup-gateway:ListVirtualMachines": {}, + "backup-storage:CommitBackupJob": {}, + "backup-storage:DeleteObjects": {}, + "backup-storage:DescribeBackupJob": {}, + "backup-storage:GetBaseBackup": {}, + "backup-storage:GetChunk": {}, + "backup-storage:GetIncrementalBaseBackup": {}, + "backup-storage:GetObjectMetadata": {}, + "backup-storage:ListChunks": {}, + "backup-storage:ListObjects": {}, + "backup-storage:MountCapsule": {}, + "backup-storage:NotifyObjectComplete": {}, + "backup-storage:PutChunk": {}, + "backup-storage:PutObject": {}, + "backup-storage:StartObject": {}, + "backup-storage:UpdateObjectComplete": {}, + "backup:DescribeBackupJob": {}, + "backup:DescribeCopyJob": {}, + "backup:DescribeGlobalSettings": {}, + "backup:DescribeProtectedResource": {}, + "backup:DescribeRegionSettings": {}, + "backup:DescribeReportJob": {}, + "backup:DescribeRestoreJob": {}, + "backup:ExportBackupPlanTemplate": {}, + "backup:GetBackupPlanFromJSON": {}, + "backup:GetBackupPlanFromTemplate": {}, + "backup:GetRestoreJobMetadata": {}, + "backup:GetRestoreTestingInferredMetadata": {}, + "backup:GetSupportedResourceTypes": {}, + "backup:ListBackupJobSummaries": {}, + "backup:ListBackupJobs": {}, + "backup:ListBackupPlanTemplates": {}, + "backup:ListBackupPlans": {}, + "backup:ListBackupVaults": {}, + "backup:ListCopyJobSummaries": {}, + "backup:ListCopyJobs": {}, + "backup:ListFrameworks": {}, + "backup:ListLegalHolds": {}, + "backup:ListProtectedResources": {}, + "backup:ListRecoveryPointsByResource": {}, + "backup:ListReportJobs": {}, + "backup:ListReportPlans": {}, + "backup:ListRestoreJobSummaries": {}, + "backup:ListRestoreJobs": {}, + "backup:ListRestoreJobsByProtectedResource": {}, + "backup:ListRestoreTestingPlans": {}, + "backup:PutRestoreValidationResult": {}, + "backup:StopBackupJob": {}, + "backup:UpdateGlobalSettings": {}, + "backup:UpdateRegionSettings": {}, + "batch:DescribeComputeEnvironments": {}, + "batch:DescribeJobDefinitions": {}, + "batch:DescribeJobQueues": {}, + "batch:DescribeJobs": {}, + "batch:DescribeSchedulingPolicies": {}, + "batch:ListJobs": {}, + "batch:ListSchedulingPolicies": {}, + "bcm-data-exports:ListExports": {}, + "bcm-data-exports:ListTables": {}, + "bedrock:AssociateThirdPartyKnowledgeBase": {}, + "bedrock:CreateAgent": {}, + "bedrock:CreateFoundationModelAgreement": {}, + "bedrock:CreateGuardrail": {}, + "bedrock:CreateKnowledgeBase": {}, + "bedrock:DeleteFoundationModelAgreement": {}, + "bedrock:DeleteModelInvocationLoggingConfiguration": {}, + "bedrock:GetFoundationModelAvailability": {}, + "bedrock:GetModelInvocationLoggingConfiguration": {}, + "bedrock:GetUseCaseForModelAccess": {}, + "bedrock:ListAgents": {}, + "bedrock:ListCustomModels": {}, + "bedrock:ListFoundationModelAgreementOffers": {}, + "bedrock:ListFoundationModels": {}, + "bedrock:ListKnowledgeBases": {}, + "bedrock:ListModelCustomizationJobs": {}, + "bedrock:ListModelEvaluationJobs": {}, + "bedrock:ListModelInvocationJobs": {}, + "bedrock:ListProvisionedModelThroughputs": {}, + "bedrock:PutFoundationModelEntitlement": {}, + "bedrock:PutModelInvocationLoggingConfiguration": {}, + "bedrock:PutUseCaseForModelAccess": {}, + "bedrock:RetrieveAndGenerate": {}, + "billing:GetBillingData": {}, + "billing:GetBillingDetails": {}, + "billing:GetBillingNotifications": {}, + "billing:GetBillingPreferences": {}, + "billing:GetContractInformation": {}, + "billing:GetCredits": {}, + "billing:GetIAMAccessPreference": {}, + "billing:GetSellerOfRecord": {}, + "billing:ListBillingViews": {}, + "billing:PutContractInformation": {}, + "billing:RedeemCredits": {}, + "billing:UpdateBillingPreferences": {}, + "billing:UpdateIAMAccessPreference": {}, + "billingconductor:CreatePricingRule": {}, + "billingconductor:ListAccountAssociations": {}, + "billingconductor:ListBillingGroupCostReports": {}, + "billingconductor:ListBillingGroups": {}, + "billingconductor:ListCustomLineItems": {}, + "billingconductor:ListPricingPlans": {}, + "billingconductor:ListPricingRules": {}, + "braket:AcceptUserAgreement": {}, + "braket:AccessBraketFeature": {}, + "braket:CreateJob": {}, + "braket:CreateQuantumTask": {}, + "braket:GetDevice": {}, + "braket:GetServiceLinkedRoleStatus": {}, + "braket:GetUserAgreementStatus": {}, + "braket:SearchDevices": {}, + "braket:SearchJobs": {}, + "braket:SearchQuantumTasks": {}, + "budgets:DescribeBudgetActionsForAccount": {}, + "bugbust:CreateEvent": {}, + "bugbust:ListEvents": {}, + "cases:CreateDomain": {}, + "cases:ListDomains": {}, + "cases:ListTagsForResource": {}, + "ce:CreateAnomalyMonitor": {}, + "ce:CreateAnomalySubscription": {}, + "ce:CreateCostCategoryDefinition": {}, + "ce:CreateNotificationSubscription": {}, + "ce:CreateReport": {}, + "ce:DeleteNotificationSubscription": {}, + "ce:DeleteReport": {}, + "ce:DescribeNotificationSubscription": {}, + "ce:DescribeReport": {}, + "ce:GetApproximateUsageRecords": {}, + "ce:GetConsoleActionSetEnforced": {}, + "ce:GetCostAndUsage": {}, + "ce:GetCostAndUsageWithResources": {}, + "ce:GetCostCategories": {}, + "ce:GetCostForecast": {}, + "ce:GetDimensionValues": {}, + "ce:GetPreferences": {}, + "ce:GetReservationCoverage": {}, + "ce:GetReservationPurchaseRecommendation": {}, + "ce:GetReservationUtilization": {}, + "ce:GetRightsizingRecommendation": {}, + "ce:GetSavingsPlanPurchaseRecommendationDetails": {}, + "ce:GetSavingsPlansCoverage": {}, + "ce:GetSavingsPlansPurchaseRecommendation": {}, + "ce:GetSavingsPlansUtilization": {}, + "ce:GetSavingsPlansUtilizationDetails": {}, + "ce:GetTags": {}, + "ce:GetUsageForecast": {}, + "ce:ListCostAllocationTags": {}, + "ce:ListCostCategoryDefinitions": {}, + "ce:ListSavingsPlansPurchaseRecommendationGeneration": {}, + "ce:ProvideAnomalyFeedback": {}, + "ce:StartSavingsPlansPurchaseRecommendationGeneration": {}, + "ce:UpdateConsoleActionSetEnforced": {}, + "ce:UpdateCostAllocationTagsStatus": {}, + "ce:UpdateNotificationSubscription": {}, + "ce:UpdatePreferences": {}, + "ce:UpdateReport": {}, + "chatbot:CreateChimeWebhookConfiguration": {}, + "chatbot:CreateMicrosoftTeamsChannelConfiguration": {}, + "chatbot:CreateSlackChannelConfiguration": {}, + "chatbot:DeleteMicrosoftTeamsChannelConfiguration": {}, + "chatbot:DeleteMicrosoftTeamsConfiguredTeam": {}, + "chatbot:DeleteMicrosoftTeamsUserIdentity": {}, + "chatbot:DeleteSlackUserIdentity": {}, + "chatbot:DeleteSlackWorkspaceAuthorization": {}, + "chatbot:DescribeChimeWebhookConfigurations": {}, + "chatbot:DescribeSlackChannelConfigurations": {}, + "chatbot:DescribeSlackChannels": {}, + "chatbot:DescribeSlackUserIdentities": {}, + "chatbot:DescribeSlackWorkspaces": {}, + "chatbot:GetAccountPreferences": {}, + "chatbot:GetMicrosoftTeamsChannelConfiguration": {}, + "chatbot:GetMicrosoftTeamsOauthParameters": {}, + "chatbot:GetSlackOauthParameters": {}, + "chatbot:ListMicrosoftTeamsChannelConfigurations": {}, + "chatbot:ListMicrosoftTeamsConfiguredTeams": {}, + "chatbot:ListMicrosoftTeamsUserIdentities": {}, + "chatbot:RedeemMicrosoftTeamsOauthCode": {}, + "chatbot:RedeemSlackOauthCode": {}, + "chatbot:UpdateAccountPreferences": {}, + "chatbot:UpdateMicrosoftTeamsChannelConfiguration": {}, + "chime:AcceptDelegate": {}, + "chime:ActivateUsers": {}, + "chime:AddDomain": {}, + "chime:AddOrUpdateGroups": {}, + "chime:AssociatePhoneNumberWithUser": {}, + "chime:AssociatePhoneNumbersWithVoiceConnectorGroup": {}, + "chime:AssociateSigninDelegateGroupsWithAccount": {}, + "chime:AuthorizeDirectory": {}, + "chime:BatchCreateRoomMembership": {}, + "chime:BatchDeletePhoneNumber": {}, + "chime:BatchSuspendUser": {}, + "chime:BatchUnsuspendUser": {}, + "chime:BatchUpdatePhoneNumber": {}, + "chime:BatchUpdateUser": {}, + "chime:ConnectDirectory": {}, + "chime:CreateAccount": {}, + "chime:CreateApiKey": {}, + "chime:CreateAppInstance": {}, + "chime:CreateAppInstanceBot": {}, + "chime:CreateAppInstanceUser": {}, + "chime:CreateBot": {}, + "chime:CreateCDRBucket": {}, + "chime:CreateMediaCapturePipeline": {}, + "chime:CreateMediaConcatenationPipeline": {}, + "chime:CreateMediaInsightsPipelineConfiguration": {}, + "chime:CreateMediaLiveConnectorPipeline": {}, + "chime:CreateMediaPipelineKinesisVideoStreamPool": {}, + "chime:CreateMeeting": {}, + "chime:CreateMeetingWithAttendees": {}, + "chime:CreatePhoneNumberOrder": {}, + "chime:CreateRoom": {}, + "chime:CreateRoomMembership": {}, + "chime:CreateSipMediaApplication": {}, + "chime:CreateUser": {}, + "chime:CreateVoiceConnector": {}, + "chime:CreateVoiceProfile": {}, + "chime:CreateVoiceProfileDomain": {}, + "chime:DeleteAccount": {}, + "chime:DeleteAccountOpenIdConfig": {}, + "chime:DeleteApiKey": {}, + "chime:DeleteCDRBucket": {}, + "chime:DeleteDelegate": {}, + "chime:DeleteDomain": {}, + "chime:DeleteEventsConfiguration": {}, + "chime:DeleteGroups": {}, + "chime:DeletePhoneNumber": {}, + "chime:DeleteRoom": {}, + "chime:DeleteRoomMembership": {}, + "chime:DeleteSipRule": {}, + "chime:DeleteVoiceConnectorGroup": {}, + "chime:DisassociatePhoneNumberFromUser": {}, + "chime:DisassociatePhoneNumbersFromVoiceConnectorGroup": {}, + "chime:DisassociateSigninDelegateGroupsFromAccount": {}, + "chime:DisconnectDirectory": {}, + "chime:GetAccount": {}, + "chime:GetAccountResource": {}, + "chime:GetAccountSettings": {}, + "chime:GetAccountWithOpenIdConfig": {}, + "chime:GetBot": {}, + "chime:GetCDRBucket": {}, + "chime:GetDomain": {}, + "chime:GetEventsConfiguration": {}, + "chime:GetGlobalSettings": {}, + "chime:GetMeetingDetail": {}, + "chime:GetMessagingSessionEndpoint": {}, + "chime:GetPhoneNumber": {}, + "chime:GetPhoneNumberOrder": {}, + "chime:GetPhoneNumberSettings": {}, + "chime:GetRetentionSettings": {}, + "chime:GetRoom": {}, + "chime:GetSipRule": {}, + "chime:GetTelephonyLimits": {}, + "chime:GetUser": {}, + "chime:GetUserActivityReportData": {}, + "chime:GetUserByEmail": {}, + "chime:GetUserSettings": {}, + "chime:GetVoiceConnectorGroup": {}, + "chime:InviteDelegate": {}, + "chime:InviteUsers": {}, + "chime:InviteUsersFromProvider": {}, + "chime:ListAccountUsageReportData": {}, + "chime:ListAccounts": {}, + "chime:ListApiKeys": {}, + "chime:ListAvailableVoiceConnectorRegions": {}, + "chime:ListBots": {}, + "chime:ListCDRBucket": {}, + "chime:ListCallingRegions": {}, + "chime:ListDelegates": {}, + "chime:ListDirectories": {}, + "chime:ListDomains": {}, + "chime:ListGroups": {}, + "chime:ListMediaCapturePipelines": {}, + "chime:ListMediaInsightsPipelineConfigurations": {}, + "chime:ListMediaPipelineKinesisVideoStreamPools": {}, + "chime:ListMediaPipelines": {}, + "chime:ListMeetingEvents": {}, + "chime:ListMeetings": {}, + "chime:ListMeetingsReportData": {}, + "chime:ListPhoneNumberOrders": {}, + "chime:ListPhoneNumbers": {}, + "chime:ListRoomMemberships": {}, + "chime:ListRooms": {}, + "chime:ListSipMediaApplications": {}, + "chime:ListSupportedPhoneNumberCountries": {}, + "chime:ListUsers": {}, + "chime:ListVoiceConnectorGroups": {}, + "chime:ListVoiceConnectors": {}, + "chime:ListVoiceProfileDomains": {}, + "chime:LogoutUser": {}, + "chime:PutEventsConfiguration": {}, + "chime:PutRetentionSettings": {}, + "chime:RedactConversationMessage": {}, + "chime:RedactRoomMessage": {}, + "chime:RegenerateSecurityToken": {}, + "chime:RenameAccount": {}, + "chime:RenewDelegate": {}, + "chime:ResetAccountResource": {}, + "chime:ResetPersonalPIN": {}, + "chime:RestorePhoneNumber": {}, + "chime:RetrieveDataExports": {}, + "chime:SearchAvailablePhoneNumbers": {}, + "chime:StartDataExport": {}, + "chime:StartMeetingTranscription": {}, + "chime:StopMeetingTranscription": {}, + "chime:SubmitSupportRequest": {}, + "chime:SuspendUsers": {}, + "chime:UnauthorizeDirectory": {}, + "chime:UpdateAccount": {}, + "chime:UpdateAccountOpenIdConfig": {}, + "chime:UpdateAccountResource": {}, + "chime:UpdateAccountSettings": {}, + "chime:UpdateBot": {}, + "chime:UpdateCDRSettings": {}, + "chime:UpdateGlobalSettings": {}, + "chime:UpdatePhoneNumber": {}, + "chime:UpdatePhoneNumberSettings": {}, + "chime:UpdateRoom": {}, + "chime:UpdateRoomMembership": {}, + "chime:UpdateSupportedLicenses": {}, + "chime:UpdateUser": {}, + "chime:UpdateUserLicenses": {}, + "chime:UpdateUserSettings": {}, + "chime:ValidateAccountResource": {}, + "chime:ValidateE911Address": {}, + "cleanrooms-ml:CreateTrainingDataset": {}, + "cleanrooms-ml:ListAudienceModels": {}, + "cleanrooms-ml:ListConfiguredAudienceModels": {}, + "cleanrooms-ml:ListTrainingDatasets": {}, + "cleanrooms:ListCollaborations": {}, + "cleanrooms:ListConfiguredTables": {}, + "cleanrooms:ListMemberships": {}, + "cloud9:CreateEnvironmentEC2": {}, + "cloud9:CreateEnvironmentSSH": {}, + "cloud9:GetMigrationExperiences": {}, + "cloud9:GetUserPublicKey": {}, + "cloud9:GetUserSettings": {}, + "cloud9:ListEnvironments": {}, + "cloud9:UpdateUserSettings": {}, + "cloud9:ValidateEnvironmentName": {}, + "clouddirectory:CreateSchema": {}, + "clouddirectory:ListDevelopmentSchemaArns": {}, + "clouddirectory:ListDirectories": {}, + "clouddirectory:ListManagedSchemaArns": {}, + "clouddirectory:ListPublishedSchemaArns": {}, + "clouddirectory:PutSchemaFromJson": {}, + "cloudformation:ActivateOrganizationsAccess": {}, + "cloudformation:ActivateType": {}, + "cloudformation:BatchDescribeTypeConfigurations": {}, + "cloudformation:CancelResourceRequest": {}, + "cloudformation:CreateResource": {}, + "cloudformation:CreateStackSet": {}, + "cloudformation:CreateUploadBucket": {}, + "cloudformation:DeactivateOrganizationsAccess": {}, + "cloudformation:DeactivateType": {}, + "cloudformation:DeleteResource": {}, + "cloudformation:DeregisterType": {}, + "cloudformation:DescribeAccountLimits": {}, + "cloudformation:DescribeOrganizationsAccess": {}, + "cloudformation:DescribePublisher": {}, + "cloudformation:DescribeStackDriftDetectionStatus": {}, + "cloudformation:DescribeType": {}, + "cloudformation:DescribeTypeRegistration": {}, + "cloudformation:EstimateTemplateCost": {}, + "cloudformation:GetResource": {}, + "cloudformation:GetResourceRequestStatus": {}, + "cloudformation:ListExports": {}, + "cloudformation:ListImports": {}, + "cloudformation:ListResourceRequests": {}, + "cloudformation:ListResources": {}, + "cloudformation:ListStackSets": {}, + "cloudformation:ListStacks": {}, + "cloudformation:ListTypeRegistrations": {}, + "cloudformation:ListTypeVersions": {}, + "cloudformation:ListTypes": {}, + "cloudformation:PublishType": {}, + "cloudformation:RegisterPublisher": {}, + "cloudformation:RegisterType": {}, + "cloudformation:SetTypeConfiguration": {}, + "cloudformation:SetTypeDefaultVersion": {}, + "cloudformation:TestType": {}, + "cloudformation:UpdateResource": {}, + "cloudformation:ValidateTemplate": {}, + "cloudfront:CreateFieldLevelEncryptionConfig": {}, + "cloudfront:CreateFieldLevelEncryptionProfile": {}, + "cloudfront:CreateKeyGroup": {}, + "cloudfront:CreateMonitoringSubscription": {}, + "cloudfront:CreateOriginAccessControl": {}, + "cloudfront:CreatePublicKey": {}, + "cloudfront:CreateSavingsPlan": {}, + "cloudfront:DeleteKeyGroup": {}, + "cloudfront:DeleteMonitoringSubscription": {}, + "cloudfront:DeletePublicKey": {}, + "cloudfront:GetKeyGroup": {}, + "cloudfront:GetKeyGroupConfig": {}, + "cloudfront:GetMonitoringSubscription": {}, + "cloudfront:GetPublicKey": {}, + "cloudfront:GetPublicKeyConfig": {}, + "cloudfront:GetSavingsPlan": {}, + "cloudfront:ListCachePolicies": {}, + "cloudfront:ListCloudFrontOriginAccessIdentities": {}, + "cloudfront:ListContinuousDeploymentPolicies": {}, + "cloudfront:ListDistributions": {}, + "cloudfront:ListDistributionsByCachePolicyId": {}, + "cloudfront:ListDistributionsByKeyGroup": {}, + "cloudfront:ListDistributionsByLambdaFunction": {}, + "cloudfront:ListDistributionsByOriginRequestPolicyId": {}, + "cloudfront:ListDistributionsByRealtimeLogConfig": {}, + "cloudfront:ListDistributionsByResponseHeadersPolicyId": {}, + "cloudfront:ListDistributionsByWebACLId": {}, + "cloudfront:ListFieldLevelEncryptionConfigs": {}, + "cloudfront:ListFieldLevelEncryptionProfiles": {}, + "cloudfront:ListFunctions": {}, + "cloudfront:ListKeyGroups": {}, + "cloudfront:ListKeyValueStores": {}, + "cloudfront:ListOriginAccessControls": {}, + "cloudfront:ListOriginRequestPolicies": {}, + "cloudfront:ListPublicKeys": {}, + "cloudfront:ListRateCards": {}, + "cloudfront:ListRealtimeLogConfigs": {}, + "cloudfront:ListResponseHeadersPolicies": {}, + "cloudfront:ListSavingsPlans": {}, + "cloudfront:ListStreamingDistributions": {}, + "cloudfront:ListUsages": {}, + "cloudfront:UpdateFieldLevelEncryptionConfig": {}, + "cloudfront:UpdateKeyGroup": {}, + "cloudfront:UpdatePublicKey": {}, + "cloudfront:UpdateSavingsPlan": {}, + "cloudhsm:AddTagsToResource": {}, + "cloudhsm:CreateHapg": {}, + "cloudhsm:CreateLunaClient": {}, + "cloudhsm:DeleteHapg": {}, + "cloudhsm:DeleteHsm": {}, + "cloudhsm:DeleteLunaClient": {}, + "cloudhsm:DescribeBackups": {}, + "cloudhsm:DescribeClusters": {}, + "cloudhsm:DescribeHapg": {}, + "cloudhsm:DescribeHsm": {}, + "cloudhsm:DescribeLunaClient": {}, + "cloudhsm:GetConfig": {}, + "cloudhsm:ListAvailableZones": {}, + "cloudhsm:ListHapgs": {}, + "cloudhsm:ListHsms": {}, + "cloudhsm:ListLunaClients": {}, + "cloudhsm:ListTagsForResource": {}, + "cloudhsm:ModifyHapg": {}, + "cloudhsm:ModifyHsm": {}, + "cloudhsm:ModifyLunaClient": {}, + "cloudhsm:RemoveTagsFromResource": {}, + "cloudshell:CreateEnvironment": {}, + "cloudtrail:DeregisterOrganizationDelegatedAdmin": {}, + "cloudtrail:DescribeTrails": {}, + "cloudtrail:GetImport": {}, + "cloudtrail:ListChannels": {}, + "cloudtrail:ListEventDataStores": {}, + "cloudtrail:ListImportFailures": {}, + "cloudtrail:ListImports": {}, + "cloudtrail:ListPublicKeys": {}, + "cloudtrail:ListServiceLinkedChannels": {}, + "cloudtrail:ListTrails": {}, + "cloudtrail:LookupEvents": {}, + "cloudtrail:RegisterOrganizationDelegatedAdmin": {}, + "cloudtrail:StartImport": {}, + "cloudtrail:StopImport": {}, + "cloudwatch:BatchGetServiceLevelIndicatorReport": {}, + "cloudwatch:CreateServiceLevelObjective": {}, + "cloudwatch:DeleteAnomalyDetector": {}, + "cloudwatch:DescribeAlarmsForMetric": {}, + "cloudwatch:DescribeAnomalyDetectors": {}, + "cloudwatch:DescribeInsightRules": {}, + "cloudwatch:EnableTopologyDiscovery": {}, + "cloudwatch:GenerateQuery": {}, + "cloudwatch:GetMetricData": {}, + "cloudwatch:GetMetricStatistics": {}, + "cloudwatch:GetMetricWidgetImage": {}, + "cloudwatch:GetTopologyDiscoveryStatus": {}, + "cloudwatch:GetTopologyMap": {}, + "cloudwatch:Link": {}, + "cloudwatch:ListDashboards": {}, + "cloudwatch:ListManagedInsightRules": {}, + "cloudwatch:ListMetricStreams": {}, + "cloudwatch:ListMetrics": {}, + "cloudwatch:ListServiceLevelObjectives": {}, + "cloudwatch:ListServices": {}, + "cloudwatch:PutAnomalyDetector": {}, + "cloudwatch:PutManagedInsightRules": {}, + "cloudwatch:PutMetricData": {}, + "codeartifact:CreateDomain": {}, + "codeartifact:CreateRepository": {}, + "codeartifact:ListDomains": {}, + "codeartifact:ListRepositories": {}, + "codebuild:DeleteOAuthToken": {}, + "codebuild:DeleteSourceCredentials": {}, + "codebuild:ImportSourceCredentials": {}, + "codebuild:ListBuildBatches": {}, + "codebuild:ListBuilds": {}, + "codebuild:ListConnectedOAuthAccounts": {}, + "codebuild:ListCuratedEnvironmentImages": {}, + "codebuild:ListProjects": {}, + "codebuild:ListReportGroups": {}, + "codebuild:ListReports": {}, + "codebuild:ListRepositories": {}, + "codebuild:ListSharedProjects": {}, + "codebuild:ListSharedReportGroups": {}, + "codebuild:ListSourceCredentials": {}, + "codebuild:PersistOAuthToken": {}, + "codecatalyst:AcceptConnection": {}, + "codecatalyst:CreateIdentityCenterApplication": {}, + "codecatalyst:CreateSpace": {}, + "codecatalyst:GetPendingConnection": {}, + "codecatalyst:ListConnections": {}, + "codecatalyst:ListIdentityCenterApplications": {}, + "codecatalyst:ListIdentityCenterApplicationsForSpace": {}, + "codecatalyst:RejectConnection": {}, + "codecommit:CreateApprovalRuleTemplate": {}, + "codecommit:DeleteApprovalRuleTemplate": {}, + "codecommit:GetApprovalRuleTemplate": {}, + "codecommit:ListApprovalRuleTemplates": {}, + "codecommit:ListRepositories": {}, + "codecommit:ListRepositoriesForApprovalRuleTemplate": {}, + "codecommit:UpdateApprovalRuleTemplateContent": {}, + "codecommit:UpdateApprovalRuleTemplateDescription": {}, + "codecommit:UpdateApprovalRuleTemplateName": {}, + "codedeploy-commands-secure:GetDeploymentSpecification": {}, + "codedeploy-commands-secure:PollHostCommand": {}, + "codedeploy-commands-secure:PutHostCommandAcknowledgement": {}, + "codedeploy-commands-secure:PutHostCommandComplete": {}, + "codedeploy:BatchGetDeploymentTargets": {}, + "codedeploy:ContinueDeployment": {}, + "codedeploy:DeleteGitHubAccountToken": {}, + "codedeploy:DeleteResourcesByExternalId": {}, + "codedeploy:GetDeploymentTarget": {}, + "codedeploy:ListApplications": {}, + "codedeploy:ListDeploymentConfigs": {}, + "codedeploy:ListDeploymentTargets": {}, + "codedeploy:ListGitHubAccountTokenNames": {}, + "codedeploy:ListOnPremisesInstances": {}, + "codedeploy:PutLifecycleEventHookExecutionStatus": {}, + "codedeploy:SkipWaitTimeForInstanceTermination": {}, + "codedeploy:StopDeployment": {}, + "codeguru-profiler:CreateProfilingGroup": {}, + "codeguru-profiler:GetFindingsReportAccountSummary": {}, + "codeguru-profiler:ListProfilingGroups": {}, + "codeguru-reviewer:AssociateRepository": {}, + "codeguru-reviewer:CreateConnectionToken": {}, + "codeguru-reviewer:GetMetricsData": {}, + "codeguru-reviewer:ListCodeReviews": {}, + "codeguru-reviewer:ListRepositoryAssociations": {}, + "codeguru-reviewer:ListThirdPartyRepositories": {}, + "codeguru-security:DeleteScansByCategory": {}, + "codeguru-security:GetAccountConfiguration": {}, + "codeguru-security:GetMetricsSummary": {}, + "codeguru-security:ListFindings": {}, + "codeguru-security:ListFindingsMetrics": {}, + "codeguru-security:ListScans": {}, + "codeguru-security:UpdateAccountConfiguration": {}, + "codeguru:GetCodeGuruFreeTrialSummary": {}, + "codepipeline:AcknowledgeJob": {}, + "codepipeline:AcknowledgeThirdPartyJob": {}, + "codepipeline:GetActionType": {}, + "codepipeline:GetJobDetails": {}, + "codepipeline:GetThirdPartyJobDetails": {}, + "codepipeline:ListActionTypes": {}, + "codepipeline:ListPipelines": {}, + "codepipeline:PollForThirdPartyJobs": {}, + "codepipeline:PutJobFailureResult": {}, + "codepipeline:PutJobSuccessResult": {}, + "codepipeline:PutThirdPartyJobFailureResult": {}, + "codepipeline:PutThirdPartyJobSuccessResult": {}, + "codestar-connections:CreateConnection": {}, + "codestar-connections:CreateHost": {}, + "codestar-connections:DeleteSyncConfiguration": {}, + "codestar-connections:GetIndividualAccessToken": {}, + "codestar-connections:GetInstallationUrl": {}, + "codestar-connections:GetResourceSyncStatus": {}, + "codestar-connections:GetSyncBlockerSummary": {}, + "codestar-connections:GetSyncConfiguration": {}, + "codestar-connections:ListHosts": {}, + "codestar-connections:ListInstallationTargets": {}, + "codestar-connections:ListRepositoryLinks": {}, + "codestar-connections:ListRepositorySyncDefinitions": {}, + "codestar-connections:ListSyncConfigurations": {}, + "codestar-connections:RegisterAppCode": {}, + "codestar-connections:StartAppRegistrationHandshake": {}, + "codestar-connections:StartOAuthHandshake": {}, + "codestar-connections:UpdateSyncBlocker": {}, + "codestar-connections:UpdateSyncConfiguration": {}, + "codestar-notifications:DeleteTarget": {}, + "codestar-notifications:ListEventTypes": {}, + "codestar-notifications:ListNotificationRules": {}, + "codestar-notifications:ListTargets": {}, + "codestar:CreateProject": {}, + "codestar:DescribeUserProfile": {}, + "codestar:ListProjects": {}, + "codestar:ListUserProfiles": {}, + "codewhisperer:GenerateRecommendations": {}, + "codewhisperer:ListProfiles": {}, + "cognito-identity:CreateIdentityPool": {}, + "cognito-identity:DeleteIdentities": {}, + "cognito-identity:DescribeIdentity": {}, + "cognito-identity:GetCredentialsForIdentity": {}, + "cognito-identity:GetId": {}, + "cognito-identity:GetOpenIdToken": {}, + "cognito-identity:ListIdentityPools": {}, + "cognito-identity:SetIdentityPoolRoles": {}, + "cognito-identity:SetPrincipalTagAttributeMap": {}, + "cognito-identity:UnlinkIdentity": {}, + "cognito-idp:AssociateSoftwareToken": {}, + "cognito-idp:ChangePassword": {}, + "cognito-idp:ConfirmDevice": {}, + "cognito-idp:ConfirmForgotPassword": {}, + "cognito-idp:ConfirmSignUp": {}, + "cognito-idp:CreateUserPool": {}, + "cognito-idp:DeleteUser": {}, + "cognito-idp:DeleteUserAttributes": {}, + "cognito-idp:DescribeUserPoolDomain": {}, + "cognito-idp:ForgetDevice": {}, + "cognito-idp:ForgotPassword": {}, + "cognito-idp:GetDevice": {}, + "cognito-idp:GetUser": {}, + "cognito-idp:GetUserAttributeVerificationCode": {}, + "cognito-idp:GlobalSignOut": {}, + "cognito-idp:InitiateAuth": {}, + "cognito-idp:ListDevices": {}, + "cognito-idp:ListUserPools": {}, + "cognito-idp:ResendConfirmationCode": {}, + "cognito-idp:RespondToAuthChallenge": {}, + "cognito-idp:RevokeToken": {}, + "cognito-idp:SetUserMFAPreference": {}, + "cognito-idp:SetUserSettings": {}, + "cognito-idp:SignUp": {}, + "cognito-idp:UpdateDeviceStatus": {}, + "cognito-idp:UpdateUserAttributes": {}, + "cognito-idp:VerifySoftwareToken": {}, + "cognito-idp:VerifyUserAttribute": {}, + "comprehend:BatchDetectDominantLanguage": {}, + "comprehend:BatchDetectEntities": {}, + "comprehend:BatchDetectKeyPhrases": {}, + "comprehend:BatchDetectSentiment": {}, + "comprehend:BatchDetectSyntax": {}, + "comprehend:BatchDetectTargetedSentiment": {}, + "comprehend:ContainsPiiEntities": {}, + "comprehend:DetectDominantLanguage": {}, + "comprehend:DetectKeyPhrases": {}, + "comprehend:DetectPiiEntities": {}, + "comprehend:DetectSentiment": {}, + "comprehend:DetectSyntax": {}, + "comprehend:DetectTargetedSentiment": {}, + "comprehend:DetectToxicContent": {}, + "comprehend:ListDocumentClassificationJobs": {}, + "comprehend:ListDocumentClassifierSummaries": {}, + "comprehend:ListDocumentClassifiers": {}, + "comprehend:ListDominantLanguageDetectionJobs": {}, + "comprehend:ListEndpoints": {}, + "comprehend:ListEntitiesDetectionJobs": {}, + "comprehend:ListEntityRecognizerSummaries": {}, + "comprehend:ListEntityRecognizers": {}, + "comprehend:ListEventsDetectionJobs": {}, + "comprehend:ListFlywheels": {}, + "comprehend:ListKeyPhrasesDetectionJobs": {}, + "comprehend:ListPiiEntitiesDetectionJobs": {}, + "comprehend:ListSentimentDetectionJobs": {}, + "comprehend:ListTargetedSentimentDetectionJobs": {}, + "comprehend:ListTopicsDetectionJobs": {}, + "comprehendmedical:DescribeEntitiesDetectionV2Job": {}, + "comprehendmedical:DescribeICD10CMInferenceJob": {}, + "comprehendmedical:DescribePHIDetectionJob": {}, + "comprehendmedical:DescribeRxNormInferenceJob": {}, + "comprehendmedical:DescribeSNOMEDCTInferenceJob": {}, + "comprehendmedical:DetectEntitiesV2": {}, + "comprehendmedical:DetectPHI": {}, + "comprehendmedical:InferICD10CM": {}, + "comprehendmedical:InferRxNorm": {}, + "comprehendmedical:InferSNOMEDCT": {}, + "comprehendmedical:ListEntitiesDetectionV2Jobs": {}, + "comprehendmedical:ListICD10CMInferenceJobs": {}, + "comprehendmedical:ListPHIDetectionJobs": {}, + "comprehendmedical:ListRxNormInferenceJobs": {}, + "comprehendmedical:ListSNOMEDCTInferenceJobs": {}, + "comprehendmedical:StartEntitiesDetectionV2Job": {}, + "comprehendmedical:StartICD10CMInferenceJob": {}, + "comprehendmedical:StartPHIDetectionJob": {}, + "comprehendmedical:StartRxNormInferenceJob": {}, + "comprehendmedical:StartSNOMEDCTInferenceJob": {}, + "comprehendmedical:StopEntitiesDetectionV2Job": {}, + "comprehendmedical:StopICD10CMInferenceJob": {}, + "comprehendmedical:StopPHIDetectionJob": {}, + "comprehendmedical:StopRxNormInferenceJob": {}, + "comprehendmedical:StopSNOMEDCTInferenceJob": {}, + "compute-optimizer:DeleteRecommendationPreferences": {}, + "compute-optimizer:DescribeRecommendationExportJobs": {}, + "compute-optimizer:ExportAutoScalingGroupRecommendations": {}, + "compute-optimizer:ExportEBSVolumeRecommendations": {}, + "compute-optimizer:ExportEC2InstanceRecommendations": {}, + "compute-optimizer:ExportECSServiceRecommendations": {}, + "compute-optimizer:ExportLambdaFunctionRecommendations": {}, + "compute-optimizer:ExportLicenseRecommendations": {}, + "compute-optimizer:GetAutoScalingGroupRecommendations": {}, + "compute-optimizer:GetEBSVolumeRecommendations": {}, + "compute-optimizer:GetEC2InstanceRecommendations": {}, + "compute-optimizer:GetEC2RecommendationProjectedMetrics": {}, + "compute-optimizer:GetECSServiceRecommendationProjectedMetrics": {}, + "compute-optimizer:GetECSServiceRecommendations": {}, + "compute-optimizer:GetEffectiveRecommendationPreferences": {}, + "compute-optimizer:GetEnrollmentStatus": {}, + "compute-optimizer:GetEnrollmentStatusesForOrganization": {}, + "compute-optimizer:GetLambdaFunctionRecommendations": {}, + "compute-optimizer:GetLicenseRecommendations": {}, + "compute-optimizer:GetRecommendationPreferences": {}, + "compute-optimizer:GetRecommendationSummaries": {}, + "compute-optimizer:PutRecommendationPreferences": {}, + "compute-optimizer:UpdateEnrollmentStatus": {}, + "config:BatchGetResourceConfig": {}, + "config:DeleteConfigurationRecorder": {}, + "config:DeleteDeliveryChannel": {}, + "config:DeletePendingAggregationRequest": {}, + "config:DeleteRemediationExceptions": {}, + "config:DeleteResourceConfig": {}, + "config:DeleteRetentionConfiguration": {}, + "config:DeliverConfigSnapshot": {}, + "config:DescribeAggregationAuthorizations": {}, + "config:DescribeComplianceByConfigRule": {}, + "config:DescribeComplianceByResource": {}, + "config:DescribeConfigRuleEvaluationStatus": {}, + "config:DescribeConfigRules": {}, + "config:DescribeConfigurationAggregators": {}, + "config:DescribeConfigurationRecorderStatus": {}, + "config:DescribeConfigurationRecorders": {}, + "config:DescribeConformancePackStatus": {}, + "config:DescribeConformancePacks": {}, + "config:DescribeDeliveryChannelStatus": {}, + "config:DescribeDeliveryChannels": {}, + "config:DescribeOrganizationConfigRuleStatuses": {}, + "config:DescribeOrganizationConfigRules": {}, + "config:DescribeOrganizationConformancePackStatuses": {}, + "config:DescribeOrganizationConformancePacks": {}, + "config:DescribePendingAggregationRequests": {}, + "config:DescribeRemediationExceptions": {}, + "config:DescribeRetentionConfigurations": {}, + "config:GetComplianceDetailsByResource": {}, + "config:GetComplianceSummaryByConfigRule": {}, + "config:GetComplianceSummaryByResourceType": {}, + "config:GetDiscoveredResourceCounts": {}, + "config:GetResourceConfigHistory": {}, + "config:GetResourceEvaluationSummary": {}, + "config:ListConformancePackComplianceScores": {}, + "config:ListDiscoveredResources": {}, + "config:ListResourceEvaluations": {}, + "config:ListStoredQueries": {}, + "config:PutConfigurationRecorder": {}, + "config:PutDeliveryChannel": {}, + "config:PutEvaluations": {}, + "config:PutRemediationExceptions": {}, + "config:PutResourceConfig": {}, + "config:PutRetentionConfiguration": {}, + "config:SelectResourceConfig": {}, + "config:StartConfigurationRecorder": {}, + "config:StartRemediationExecution": {}, + "config:StartResourceEvaluation": {}, + "config:StopConfigurationRecorder": {}, + "connect-campaigns:DeleteConnectInstanceConfig": {}, + "connect-campaigns:DeleteInstanceOnboardingJob": {}, + "connect-campaigns:GetConnectInstanceConfig": {}, + "connect-campaigns:GetInstanceOnboardingJobStatus": {}, + "connect-campaigns:ListCampaigns": {}, + "connect-campaigns:StartInstanceOnboardingJob": {}, + "connect:CreateInstance": {}, + "connect:ListInstances": {}, + "connect:SendChatIntegrationEvent": {}, + "consoleapp:ListDeviceIdentities": {}, + "consolidatedbilling:GetAccountBillingRole": {}, + "consolidatedbilling:ListLinkedAccounts": {}, + "controltower:CreateLandingZone": {}, + "controltower:CreateManagedAccount": {}, + "controltower:DeregisterManagedAccount": {}, + "controltower:DeregisterOrganizationalUnit": {}, + "controltower:DescribeAccountFactoryConfig": {}, + "controltower:DescribeCoreService": {}, + "controltower:DescribeGuardrail": {}, + "controltower:DescribeGuardrailForTarget": {}, + "controltower:DescribeLandingZoneConfiguration": {}, + "controltower:DescribeManagedAccount": {}, + "controltower:DescribeManagedOrganizationalUnit": {}, + "controltower:DescribeRegisterOrganizationalUnitOperation": {}, + "controltower:DescribeSingleSignOn": {}, + "controltower:DisableGuardrail": {}, + "controltower:EnableGuardrail": {}, + "controltower:GetAccountInfo": {}, + "controltower:GetAvailableUpdates": {}, + "controltower:GetControlOperation": {}, + "controltower:GetGuardrailComplianceStatus": {}, + "controltower:GetHomeRegion": {}, + "controltower:GetLandingZoneDriftStatus": {}, + "controltower:GetLandingZoneOperation": {}, + "controltower:GetLandingZoneStatus": {}, + "controltower:ListDirectoryGroups": {}, + "controltower:ListDriftDetails": {}, + "controltower:ListEnabledControls": {}, + "controltower:ListEnabledGuardrails": {}, + "controltower:ListExtendGovernancePrecheckDetails": {}, + "controltower:ListExternalConfigRuleCompliance": {}, + "controltower:ListGuardrailViolations": {}, + "controltower:ListGuardrails": {}, + "controltower:ListGuardrailsForTarget": {}, + "controltower:ListLandingZones": {}, + "controltower:ListManagedAccounts": {}, + "controltower:ListManagedAccountsForGuardrail": {}, + "controltower:ListManagedAccountsForParent": {}, + "controltower:ListManagedOrganizationalUnits": {}, + "controltower:ListManagedOrganizationalUnitsForGuardrail": {}, + "controltower:ManageOrganizationalUnit": {}, + "controltower:PerformPreLaunchChecks": {}, + "controltower:SetupLandingZone": {}, + "controltower:UpdateAccountFactoryConfig": {}, + "cost-optimization-hub:GetPreferences": {}, + "cost-optimization-hub:GetRecommendation": {}, + "cost-optimization-hub:ListEnrollmentStatuses": {}, + "cost-optimization-hub:ListRecommendationSummaries": {}, + "cost-optimization-hub:ListRecommendations": {}, + "cost-optimization-hub:UpdateEnrollmentStatus": {}, + "cost-optimization-hub:UpdatePreferences": {}, + "cur:DescribeReportDefinitions": {}, + "cur:GetClassicReport": {}, + "cur:GetClassicReportPreferences": {}, + "cur:GetUsageReport": {}, + "cur:PutClassicReportPreferences": {}, + "cur:ValidateReportDestination": {}, + "customer-verification:CreateCustomerVerificationDetails": {}, + "customer-verification:GetCustomerVerificationDetails": {}, + "customer-verification:GetCustomerVerificationEligibility": {}, + "customer-verification:UpdateCustomerVerificationDetails": {}, + "databrew:CreateDataset": {}, + "databrew:CreateProfileJob": {}, + "databrew:CreateProject": {}, + "databrew:CreateRecipe": {}, + "databrew:CreateRecipeJob": {}, + "databrew:CreateRuleset": {}, + "databrew:CreateSchedule": {}, + "databrew:ListDatasets": {}, + "databrew:ListJobs": {}, + "databrew:ListProjects": {}, + "databrew:ListRecipes": {}, + "databrew:ListRulesets": {}, + "databrew:ListSchedules": {}, + "dataexchange:CreateDataSet": {}, + "dataexchange:CreateEventAction": {}, + "dataexchange:CreateJob": {}, + "dataexchange:ListDataSets": {}, + "dataexchange:ListEventActions": {}, + "dataexchange:ListJobs": {}, + "datapipeline:CreatePipeline": {}, + "datapipeline:GetAccountLimits": {}, + "datapipeline:ListPipelines": {}, + "datapipeline:PollForTask": {}, + "datapipeline:PutAccountLimits": {}, + "datapipeline:ReportTaskRunnerHeartbeat": {}, + "datasync:CreateAgent": {}, + "datasync:CreateLocationAzureBlob": {}, + "datasync:CreateLocationEfs": {}, + "datasync:CreateLocationFsxLustre": {}, + "datasync:CreateLocationFsxOntap": {}, + "datasync:CreateLocationFsxOpenZfs": {}, + "datasync:CreateLocationFsxWindows": {}, + "datasync:CreateLocationHdfs": {}, + "datasync:CreateLocationNfs": {}, + "datasync:CreateLocationObjectStorage": {}, + "datasync:CreateLocationS3": {}, + "datasync:CreateLocationSmb": {}, + "datasync:ListAgents": {}, + "datasync:ListDiscoveryJobs": {}, + "datasync:ListLocations": {}, + "datasync:ListStorageSystems": {}, + "datasync:ListTaskExecutions": {}, + "datasync:ListTasks": {}, + "datazone:AcceptPredictions": {}, + "datazone:AcceptSubscriptionRequest": {}, + "datazone:CancelSubscription": {}, + "datazone:CreateAsset": {}, + "datazone:CreateAssetRevision": {}, + "datazone:CreateAssetType": {}, + "datazone:CreateDataSource": {}, + "datazone:CreateDomain": {}, + "datazone:CreateEnvironment": {}, + "datazone:CreateEnvironmentBlueprint": {}, + "datazone:CreateEnvironmentProfile": {}, + "datazone:CreateFormType": {}, + "datazone:CreateGlossary": {}, + "datazone:CreateGlossaryTerm": {}, + "datazone:CreateGroupProfile": {}, + "datazone:CreateListingChangeSet": {}, + "datazone:CreateProject": {}, + "datazone:CreateProjectMembership": {}, + "datazone:CreateSubscriptionGrant": {}, + "datazone:CreateSubscriptionRequest": {}, + "datazone:CreateSubscriptionTarget": {}, + "datazone:CreateUserProfile": {}, + "datazone:DeleteAsset": {}, + "datazone:DeleteAssetType": {}, + "datazone:DeleteDataSource": {}, + "datazone:DeleteDomainSharingPolicy": {}, + "datazone:DeleteEnvironment": {}, + "datazone:DeleteEnvironmentBlueprint": {}, + "datazone:DeleteEnvironmentBlueprintConfiguration": {}, + "datazone:DeleteEnvironmentProfile": {}, + "datazone:DeleteFormType": {}, + "datazone:DeleteGlossary": {}, + "datazone:DeleteGlossaryTerm": {}, + "datazone:DeleteListing": {}, + "datazone:DeleteProject": {}, + "datazone:DeleteProjectMembership": {}, + "datazone:DeleteSubscriptionGrant": {}, + "datazone:DeleteSubscriptionRequest": {}, + "datazone:DeleteSubscriptionTarget": {}, + "datazone:GetAsset": {}, + "datazone:GetAssetType": {}, + "datazone:GetDataSource": {}, + "datazone:GetDataSourceRun": {}, + "datazone:GetDomainSharingPolicy": {}, + "datazone:GetEnvironment": {}, + "datazone:GetEnvironmentActionLink": {}, + "datazone:GetEnvironmentBlueprint": {}, + "datazone:GetEnvironmentBlueprintConfiguration": {}, + "datazone:GetEnvironmentCredentials": {}, + "datazone:GetEnvironmentProfile": {}, + "datazone:GetFormType": {}, + "datazone:GetGlossary": {}, + "datazone:GetGlossaryTerm": {}, + "datazone:GetGroupProfile": {}, + "datazone:GetIamPortalLoginUrl": {}, + "datazone:GetListing": {}, + "datazone:GetMetadataGenerationRun": {}, + "datazone:GetProject": {}, + "datazone:GetSubscription": {}, + "datazone:GetSubscriptionEligibility": {}, + "datazone:GetSubscriptionGrant": {}, + "datazone:GetSubscriptionRequestDetails": {}, + "datazone:GetSubscriptionTarget": {}, + "datazone:GetUserProfile": {}, + "datazone:ListAccountEnvironments": {}, + "datazone:ListAssetRevisions": {}, + "datazone:ListDataSourceRunActivities": {}, + "datazone:ListDataSourceRuns": {}, + "datazone:ListDataSources": {}, + "datazone:ListDomains": {}, + "datazone:ListEnvironmentBlueprintConfigurations": {}, + "datazone:ListEnvironmentBlueprints": {}, + "datazone:ListEnvironmentProfiles": {}, + "datazone:ListEnvironments": {}, + "datazone:ListGroupsForUser": {}, + "datazone:ListMetadataGenerationRuns": {}, + "datazone:ListNotifications": {}, + "datazone:ListProjectMemberships": {}, + "datazone:ListProjects": {}, + "datazone:ListSubscriptionGrants": {}, + "datazone:ListSubscriptionRequests": {}, + "datazone:ListSubscriptionTargets": {}, + "datazone:ListSubscriptions": {}, + "datazone:ListWarehouseMetadata": {}, + "datazone:ProvisionDomain": {}, + "datazone:PutDomainSharingPolicy": {}, + "datazone:PutEnvironmentBlueprintConfiguration": {}, + "datazone:RefreshToken": {}, + "datazone:RejectPredictions": {}, + "datazone:RejectSubscriptionRequest": {}, + "datazone:RevokeSubscription": {}, + "datazone:Search": {}, + "datazone:SearchGroupProfiles": {}, + "datazone:SearchListings": {}, + "datazone:SearchTypes": {}, + "datazone:SearchUserProfiles": {}, + "datazone:SsoLogin": {}, + "datazone:SsoLogout": {}, + "datazone:StartDataSourceRun": {}, + "datazone:StartMetadataGenerationRun": {}, + "datazone:StopMetadataGenerationRun": {}, + "datazone:UpdateDataSource": {}, + "datazone:UpdateEnvironment": {}, + "datazone:UpdateEnvironmentBlueprint": {}, + "datazone:UpdateEnvironmentConfiguration": {}, + "datazone:UpdateEnvironmentDeploymentStatus": {}, + "datazone:UpdateEnvironmentProfile": {}, + "datazone:UpdateGlossary": {}, + "datazone:UpdateGlossaryTerm": {}, + "datazone:UpdateGroupProfile": {}, + "datazone:UpdateProject": {}, + "datazone:UpdateSubscriptionGrantStatus": {}, + "datazone:UpdateSubscriptionRequest": {}, + "datazone:UpdateSubscriptionTarget": {}, + "datazone:UpdateUserProfile": {}, + "datazone:ValidatePassRole": {}, + "dax:CreateParameterGroup": {}, + "dax:CreateSubnetGroup": {}, + "dax:DeleteParameterGroup": {}, + "dax:DeleteSubnetGroup": {}, + "dax:DescribeDefaultParameters": {}, + "dax:DescribeEvents": {}, + "dax:DescribeParameterGroups": {}, + "dax:DescribeParameters": {}, + "dax:DescribeSubnetGroups": {}, + "dax:UpdateParameterGroup": {}, + "dax:UpdateSubnetGroup": {}, + "dbqms:CreateFavoriteQuery": {}, + "dbqms:CreateTab": {}, + "dbqms:DeleteFavoriteQueries": {}, + "dbqms:DeleteQueryHistory": {}, + "dbqms:DeleteTab": {}, + "dbqms:DescribeFavoriteQueries": {}, + "dbqms:DescribeQueryHistory": {}, + "dbqms:DescribeTabs": {}, + "dbqms:GetQueryString": {}, + "dbqms:UpdateFavoriteQuery": {}, + "dbqms:UpdateQueryHistory": {}, + "dbqms:UpdateTab": {}, + "deepcomposer:AssociateCoupon": {}, + "deepracer:AdminGetAccountConfig": {}, + "deepracer:AdminListAssociatedResources": {}, + "deepracer:AdminListAssociatedUsers": {}, + "deepracer:AdminManageUser": {}, + "deepracer:AdminSetAccountConfig": {}, + "deepracer:CreateCar": {}, + "deepracer:CreateLeaderboard": {}, + "deepracer:GetAccountConfig": {}, + "deepracer:GetAlias": {}, + "deepracer:GetCars": {}, + "deepracer:ImportModel": {}, + "deepracer:ListLeaderboards": {}, + "deepracer:ListModels": {}, + "deepracer:ListPrivateLeaderboards": {}, + "deepracer:ListSubscribedPrivateLeaderboards": {}, + "deepracer:ListTracks": {}, + "deepracer:MigrateModels": {}, + "deepracer:SetAlias": {}, + "deepracer:TestRewardFunction": {}, + "detective:AcceptInvitation": {}, + "detective:BatchGetMembershipDatasources": {}, + "detective:CreateGraph": {}, + "detective:DisableOrganizationAdminAccount": {}, + "detective:DisassociateMembership": {}, + "detective:EnableOrganizationAdminAccount": {}, + "detective:GetPricingInformation": {}, + "detective:ListGraphs": {}, + "detective:ListInvitations": {}, + "detective:ListOrganizationAdminAccount": {}, + "detective:RejectInvitation": {}, + "devicefarm:CreateInstanceProfile": {}, + "devicefarm:CreateProject": {}, + "devicefarm:CreateTestGridProject": {}, + "devicefarm:CreateVPCEConfiguration": {}, + "devicefarm:GetAccountSettings": {}, + "devicefarm:GetOfferingStatus": {}, + "devicefarm:ListDeviceInstances": {}, + "devicefarm:ListDevices": {}, + "devicefarm:ListInstanceProfiles": {}, + "devicefarm:ListOfferingPromotions": {}, + "devicefarm:ListOfferingTransactions": {}, + "devicefarm:ListOfferings": {}, + "devicefarm:ListProjects": {}, + "devicefarm:ListTestGridProjects": {}, + "devicefarm:ListVPCEConfigurations": {}, + "devicefarm:PurchaseOffering": {}, + "devicefarm:RenewOffering": {}, + "devops-guru:DeleteInsight": {}, + "devops-guru:DescribeAccountHealth": {}, + "devops-guru:DescribeAccountOverview": {}, + "devops-guru:DescribeAnomaly": {}, + "devops-guru:DescribeEventSourcesConfig": {}, + "devops-guru:DescribeFeedback": {}, + "devops-guru:DescribeInsight": {}, + "devops-guru:DescribeOrganizationHealth": {}, + "devops-guru:DescribeOrganizationOverview": {}, + "devops-guru:DescribeOrganizationResourceCollectionHealth": {}, + "devops-guru:DescribeResourceCollectionHealth": {}, + "devops-guru:DescribeServiceIntegration": {}, + "devops-guru:GetCostEstimation": {}, + "devops-guru:GetResourceCollection": {}, + "devops-guru:ListAnomaliesForInsight": {}, + "devops-guru:ListAnomalousLogGroups": {}, + "devops-guru:ListEvents": {}, + "devops-guru:ListInsights": {}, + "devops-guru:ListMonitoredResources": {}, + "devops-guru:ListNotificationChannels": {}, + "devops-guru:ListOrganizationInsights": {}, + "devops-guru:ListRecommendations": {}, + "devops-guru:PutFeedback": {}, + "devops-guru:SearchInsights": {}, + "devops-guru:SearchOrganizationInsights": {}, + "devops-guru:StartCostEstimation": {}, + "devops-guru:UpdateEventSourcesConfig": {}, + "devops-guru:UpdateResourceCollection": {}, + "devops-guru:UpdateServiceIntegration": {}, + "directconnect:ConfirmCustomerAgreement": {}, + "directconnect:CreateDirectConnectGateway": {}, + "directconnect:DeleteDirectConnectGatewayAssociationProposal": {}, + "directconnect:DescribeCustomerMetadata": {}, + "directconnect:DescribeLocations": {}, + "directconnect:DescribeVirtualGateways": {}, + "directconnect:UpdateDirectConnectGatewayAssociation": {}, + "discovery:AssociateConfigurationItemsToApplication": {}, + "discovery:BatchDeleteAgents": {}, + "discovery:BatchDeleteImportData": {}, + "discovery:CreateApplication": {}, + "discovery:CreateTags": {}, + "discovery:DeleteApplications": {}, + "discovery:DeleteTags": {}, + "discovery:DescribeAgents": {}, + "discovery:DescribeBatchDeleteConfigurationTask": {}, + "discovery:DescribeConfigurations": {}, + "discovery:DescribeContinuousExports": {}, + "discovery:DescribeExportConfigurations": {}, + "discovery:DescribeExportTasks": {}, + "discovery:DescribeImportTasks": {}, + "discovery:DescribeTags": {}, + "discovery:DisassociateConfigurationItemsFromApplication": {}, + "discovery:ExportConfigurations": {}, + "discovery:GetDiscoverySummary": {}, + "discovery:GetNetworkConnectionGraph": {}, + "discovery:ListConfigurations": {}, + "discovery:ListServerNeighbors": {}, + "discovery:StartBatchDeleteConfigurationTask": {}, + "discovery:StartContinuousExport": {}, + "discovery:StartDataCollectionByAgentIds": {}, + "discovery:StartExportTask": {}, + "discovery:StartImportTask": {}, + "discovery:StopContinuousExport": {}, + "discovery:StopDataCollectionByAgentIds": {}, + "discovery:UpdateApplication": {}, + "dlm:CreateLifecyclePolicy": {}, + "dlm:GetLifecyclePolicies": {}, + "dms:BatchStartRecommendations": {}, + "dms:CreateDataProvider": {}, + "dms:CreateEndpoint": {}, + "dms:CreateEventSubscription": {}, + "dms:CreateFleetAdvisorCollector": {}, + "dms:CreateInstanceProfile": {}, + "dms:CreateReplicationInstance": {}, + "dms:CreateReplicationSubnetGroup": {}, + "dms:DeleteFleetAdvisorCollector": {}, + "dms:DeleteFleetAdvisorDatabases": {}, + "dms:DescribeAccountAttributes": {}, + "dms:DescribeCertificates": {}, + "dms:DescribeConnections": {}, + "dms:DescribeDataMigrations": {}, + "dms:DescribeEndpointSettings": {}, + "dms:DescribeEndpointTypes": {}, + "dms:DescribeEndpoints": {}, + "dms:DescribeEngineVersions": {}, + "dms:DescribeEventCategories": {}, + "dms:DescribeEventSubscriptions": {}, + "dms:DescribeEvents": {}, + "dms:DescribeFleetAdvisorCollectors": {}, + "dms:DescribeFleetAdvisorDatabases": {}, + "dms:DescribeFleetAdvisorLsaAnalysis": {}, + "dms:DescribeFleetAdvisorSchemaObjectSummary": {}, + "dms:DescribeFleetAdvisorSchemas": {}, + "dms:DescribeOrderableReplicationInstances": {}, + "dms:DescribePendingMaintenanceActions": {}, + "dms:DescribeRecommendationLimitations": {}, + "dms:DescribeRecommendations": {}, + "dms:DescribeReplicationConfigs": {}, + "dms:DescribeReplicationInstances": {}, + "dms:DescribeReplicationSubnetGroups": {}, + "dms:DescribeReplicationTasks": {}, + "dms:DescribeReplications": {}, + "dms:ImportCertificate": {}, + "dms:ModifyEventSubscription": {}, + "dms:ModifyFleetAdvisorCollector": {}, + "dms:ModifyFleetAdvisorCollectorStatuses": {}, + "dms:ModifyReplicationSubnetGroup": {}, + "dms:RunFleetAdvisorLsaAnalysis": {}, + "dms:StartRecommendations": {}, + "dms:UpdateSubscriptionsToEventBridge": {}, + "dms:UploadFileMetadataList": {}, + "docdb-elastic:CreateCluster": {}, + "docdb-elastic:ListClusterSnapshots": {}, + "docdb-elastic:ListClusters": {}, + "drs:BatchDeleteSnapshotRequestForDrs": {}, + "drs:CreateExtendedSourceServer": {}, + "drs:CreateLaunchConfigurationTemplate": {}, + "drs:CreateReplicationConfigurationTemplate": {}, + "drs:CreateSourceNetwork": {}, + "drs:CreateSourceServerForDrs": {}, + "drs:DescribeJobs": {}, + "drs:DescribeLaunchConfigurationTemplates": {}, + "drs:DescribeRecoveryInstances": {}, + "drs:DescribeReplicationConfigurationTemplates": {}, + "drs:DescribeReplicationServerAssociationsForDrs": {}, + "drs:DescribeSnapshotRequestsForDrs": {}, + "drs:DescribeSourceNetworks": {}, + "drs:DescribeSourceServers": {}, + "drs:GetAgentInstallationAssetsForDrs": {}, + "drs:GetChannelCommandsForDrs": {}, + "drs:InitializeService": {}, + "drs:ListExtensibleSourceServers": {}, + "drs:ListStagingAccounts": {}, + "drs:ListTagsForResource": {}, + "drs:SendChannelCommandResultForDrs": {}, + "drs:SendClientLogsForDrs": {}, + "drs:SendClientMetricsForDrs": {}, + "ds:CheckAlias": {}, + "ds:ConnectDirectory": {}, + "ds:CreateDirectory": {}, + "ds:CreateIdentityPoolDirectory": {}, + "ds:CreateMicrosoftAD": {}, + "ds:DescribeDirectories": {}, + "ds:DescribeSnapshots": {}, + "ds:DescribeTrusts": {}, + "ds:GetDirectoryLimits": {}, + "ds:ListLogSubscriptions": {}, + "dynamodb:DescribeEndpoints": {}, + "dynamodb:DescribeLimits": {}, + "dynamodb:DescribeReservedCapacity": {}, + "dynamodb:DescribeReservedCapacityOfferings": {}, + "dynamodb:ListBackups": {}, + "dynamodb:ListContributorInsights": {}, + "dynamodb:ListExports": {}, + "dynamodb:ListGlobalTables": {}, + "dynamodb:ListImports": {}, + "dynamodb:ListStreams": {}, + "dynamodb:ListTables": {}, + "dynamodb:PurchaseReservedCapacityOfferings": {}, + "ec2:AcceptReservedInstancesExchangeQuote": {}, + "ec2:AdvertiseByoipCidr": {}, + "ec2:AssociateIpamByoasn": {}, + "ec2:AssociateTrunkInterface": {}, + "ec2:BundleInstance": {}, + "ec2:CancelBundleTask": {}, + "ec2:CancelConversionTask": {}, + "ec2:CancelReservedInstancesListing": {}, + "ec2:ConfirmProductInstance": {}, + "ec2:CreateDefaultSubnet": {}, + "ec2:CreateDefaultVpc": {}, + "ec2:CreateReservedInstancesListing": {}, + "ec2:CreateSpotDatafeedSubscription": {}, + "ec2:CreateSubnetCidrReservation": {}, + "ec2:DeleteQueuedReservedInstances": {}, + "ec2:DeleteSpotDatafeedSubscription": {}, + "ec2:DeleteSubnetCidrReservation": {}, + "ec2:DeprovisionByoipCidr": {}, + "ec2:DeregisterInstanceEventNotificationAttributes": {}, + "ec2:DescribeAccountAttributes": {}, + "ec2:DescribeAddressTransfers": {}, + "ec2:DescribeAddresses": {}, + "ec2:DescribeAggregateIdFormat": {}, + "ec2:DescribeAvailabilityZones": {}, + "ec2:DescribeAwsNetworkPerformanceMetricSubscriptions": {}, + "ec2:DescribeBundleTasks": {}, + "ec2:DescribeByoipCidrs": {}, + "ec2:DescribeCapacityBlockOfferings": {}, + "ec2:DescribeCapacityReservationFleets": {}, + "ec2:DescribeCapacityReservations": {}, + "ec2:DescribeCarrierGateways": {}, + "ec2:DescribeClassicLinkInstances": {}, + "ec2:DescribeCoipPools": {}, + "ec2:DescribeConversionTasks": {}, + "ec2:DescribeCustomerGateways": {}, + "ec2:DescribeDhcpOptions": {}, + "ec2:DescribeEgressOnlyInternetGateways": {}, + "ec2:DescribeElasticGpus": {}, + "ec2:DescribeExportImageTasks": {}, + "ec2:DescribeExportTasks": {}, + "ec2:DescribeFastLaunchImages": {}, + "ec2:DescribeFastSnapshotRestores": {}, + "ec2:DescribeFleets": {}, + "ec2:DescribeFlowLogs": {}, + "ec2:DescribeFpgaImages": {}, + "ec2:DescribeHostReservationOfferings": {}, + "ec2:DescribeHostReservations": {}, + "ec2:DescribeHosts": {}, + "ec2:DescribeIamInstanceProfileAssociations": {}, + "ec2:DescribeIdFormat": {}, + "ec2:DescribeIdentityIdFormat": {}, + "ec2:DescribeImages": {}, + "ec2:DescribeImportImageTasks": {}, + "ec2:DescribeImportSnapshotTasks": {}, + "ec2:DescribeInstanceConnectEndpoints": {}, + "ec2:DescribeInstanceCreditSpecifications": {}, + "ec2:DescribeInstanceEventNotificationAttributes": {}, + "ec2:DescribeInstanceEventWindows": {}, + "ec2:DescribeInstanceStatus": {}, + "ec2:DescribeInstanceTopology": {}, + "ec2:DescribeInstanceTypeOfferings": {}, + "ec2:DescribeInstanceTypes": {}, + "ec2:DescribeInstances": {}, + "ec2:DescribeInternetGateways": {}, + "ec2:DescribeIpamByoasn": {}, + "ec2:DescribeIpamPools": {}, + "ec2:DescribeIpamResourceDiscoveries": {}, + "ec2:DescribeIpamResourceDiscoveryAssociations": {}, + "ec2:DescribeIpamScopes": {}, + "ec2:DescribeIpams": {}, + "ec2:DescribeIpv6Pools": {}, + "ec2:DescribeKeyPairs": {}, + "ec2:DescribeLaunchTemplateVersions": {}, + "ec2:DescribeLaunchTemplates": {}, + "ec2:DescribeLocalGatewayRouteTablePermissions": {}, + "ec2:DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations": {}, + "ec2:DescribeLocalGatewayRouteTableVpcAssociations": {}, + "ec2:DescribeLocalGatewayRouteTables": {}, + "ec2:DescribeLocalGatewayVirtualInterfaceGroups": {}, + "ec2:DescribeLocalGatewayVirtualInterfaces": {}, + "ec2:DescribeLocalGateways": {}, + "ec2:DescribeLockedSnapshots": {}, + "ec2:DescribeManagedPrefixLists": {}, + "ec2:DescribeMovingAddresses": {}, + "ec2:DescribeNatGateways": {}, + "ec2:DescribeNetworkAcls": {}, + "ec2:DescribeNetworkInsightsAccessScopeAnalyses": {}, + "ec2:DescribeNetworkInsightsAccessScopes": {}, + "ec2:DescribeNetworkInsightsAnalyses": {}, + "ec2:DescribeNetworkInsightsPaths": {}, + "ec2:DescribeNetworkInterfaceAttribute": {}, + "ec2:DescribeNetworkInterfacePermissions": {}, + "ec2:DescribeNetworkInterfaces": {}, + "ec2:DescribePlacementGroups": {}, + "ec2:DescribePrefixLists": {}, + "ec2:DescribePrincipalIdFormat": {}, + "ec2:DescribePublicIpv4Pools": {}, + "ec2:DescribeRegions": {}, + "ec2:DescribeReplaceRootVolumeTasks": {}, + "ec2:DescribeReservedInstances": {}, + "ec2:DescribeReservedInstancesListings": {}, + "ec2:DescribeReservedInstancesModifications": {}, + "ec2:DescribeReservedInstancesOfferings": {}, + "ec2:DescribeRouteTables": {}, + "ec2:DescribeScheduledInstanceAvailability": {}, + "ec2:DescribeScheduledInstances": {}, + "ec2:DescribeSecurityGroupReferences": {}, + "ec2:DescribeSecurityGroupRules": {}, + "ec2:DescribeSecurityGroups": {}, + "ec2:DescribeSnapshotTierStatus": {}, + "ec2:DescribeSnapshots": {}, + "ec2:DescribeSpotDatafeedSubscription": {}, + "ec2:DescribeSpotFleetRequests": {}, + "ec2:DescribeSpotInstanceRequests": {}, + "ec2:DescribeSpotPriceHistory": {}, + "ec2:DescribeStaleSecurityGroups": {}, + "ec2:DescribeStoreImageTasks": {}, + "ec2:DescribeSubnets": {}, + "ec2:DescribeTags": {}, + "ec2:DescribeTrafficMirrorFilters": {}, + "ec2:DescribeTrafficMirrorSessions": {}, + "ec2:DescribeTrafficMirrorTargets": {}, + "ec2:DescribeTransitGatewayAttachments": {}, + "ec2:DescribeTransitGatewayConnectPeers": {}, + "ec2:DescribeTransitGatewayConnects": {}, + "ec2:DescribeTransitGatewayMulticastDomains": {}, + "ec2:DescribeTransitGatewayPeeringAttachments": {}, + "ec2:DescribeTransitGatewayPolicyTables": {}, + "ec2:DescribeTransitGatewayRouteTableAnnouncements": {}, + "ec2:DescribeTransitGatewayRouteTables": {}, + "ec2:DescribeTransitGatewayVpcAttachments": {}, + "ec2:DescribeTransitGateways": {}, + "ec2:DescribeTrunkInterfaceAssociations": {}, + "ec2:DescribeVerifiedAccessEndpoints": {}, + "ec2:DescribeVerifiedAccessGroups": {}, + "ec2:DescribeVerifiedAccessInstanceLoggingConfigurations": {}, + "ec2:DescribeVerifiedAccessInstanceWebAclAssociations": {}, + "ec2:DescribeVerifiedAccessInstances": {}, + "ec2:DescribeVerifiedAccessTrustProviders": {}, + "ec2:DescribeVolumeStatus": {}, + "ec2:DescribeVolumes": {}, + "ec2:DescribeVolumesModifications": {}, + "ec2:DescribeVpcClassicLink": {}, + "ec2:DescribeVpcClassicLinkDnsSupport": {}, + "ec2:DescribeVpcEndpointConnectionNotifications": {}, + "ec2:DescribeVpcEndpointConnections": {}, + "ec2:DescribeVpcEndpointServiceConfigurations": {}, + "ec2:DescribeVpcEndpointServices": {}, + "ec2:DescribeVpcEndpoints": {}, + "ec2:DescribeVpcPeeringConnections": {}, + "ec2:DescribeVpcs": {}, + "ec2:DescribeVpnConnections": {}, + "ec2:DescribeVpnGateways": {}, + "ec2:DisableAwsNetworkPerformanceMetricSubscription": {}, + "ec2:DisableEbsEncryptionByDefault": {}, + "ec2:DisableImageBlockPublicAccess": {}, + "ec2:DisableIpamOrganizationAdminAccount": {}, + "ec2:DisableSerialConsoleAccess": {}, + "ec2:DisableSnapshotBlockPublicAccess": {}, + "ec2:DisassociateIpamByoasn": {}, + "ec2:DisassociateTrunkInterface": {}, + "ec2:EnableAwsNetworkPerformanceMetricSubscription": {}, + "ec2:EnableEbsEncryptionByDefault": {}, + "ec2:EnableImageBlockPublicAccess": {}, + "ec2:EnableIpamOrganizationAdminAccount": {}, + "ec2:EnableReachabilityAnalyzerOrganizationSharing": {}, + "ec2:EnableSerialConsoleAccess": {}, + "ec2:EnableSnapshotBlockPublicAccess": {}, + "ec2:ExportTransitGatewayRoutes": {}, + "ec2:GetAssociatedIpv6PoolCidrs": {}, + "ec2:GetAwsNetworkPerformanceData": {}, + "ec2:GetDefaultCreditSpecification": {}, + "ec2:GetEbsDefaultKmsKeyId": {}, + "ec2:GetEbsEncryptionByDefault": {}, + "ec2:GetHostReservationPurchasePreview": {}, + "ec2:GetImageBlockPublicAccessState": {}, + "ec2:GetInstanceTypesFromInstanceRequirements": {}, + "ec2:GetReservedInstancesExchangeQuote": {}, + "ec2:GetSerialConsoleAccessStatus": {}, + "ec2:GetSnapshotBlockPublicAccessState": {}, + "ec2:GetSpotPlacementScores": {}, + "ec2:GetSubnetCidrReservations": {}, + "ec2:GetTransitGatewayAttachmentPropagations": {}, + "ec2:GetTransitGatewayPrefixListReferences": {}, + "ec2:GetTransitGatewayRouteTableAssociations": {}, + "ec2:GetTransitGatewayRouteTablePropagations": {}, + "ec2:GetVpnConnectionDeviceTypes": {}, + "ec2:InjectApiError": {}, + "ec2:ListImagesInRecycleBin": {}, + "ec2:ListSnapshotsInRecycleBin": {}, + "ec2:ModifyAvailabilityZoneGroup": {}, + "ec2:ModifyDefaultCreditSpecification": {}, + "ec2:ModifyEbsDefaultKmsKeyId": {}, + "ec2:ModifyIdFormat": {}, + "ec2:ModifyIdentityIdFormat": {}, + "ec2:MoveAddressToVpc": {}, + "ec2:ProvisionByoipCidr": {}, + "ec2:PurchaseReservedInstancesOffering": {}, + "ec2:PurchaseScheduledInstances": {}, + "ec2:RegisterInstanceEventNotificationAttributes": {}, + "ec2:ReportInstanceStatus": {}, + "ec2:ResetEbsDefaultKmsKeyId": {}, + "ec2:RestoreAddressToClassic": {}, + "ec2:RunScheduledInstances": {}, + "ec2:WithdrawByoipCidr": {}, + "ec2messages:AcknowledgeMessage": {}, + "ec2messages:DeleteMessage": {}, + "ec2messages:FailMessage": {}, + "ec2messages:GetEndpoint": {}, + "ec2messages:GetMessages": {}, + "ec2messages:SendReply": {}, + "ecr-public:GetAuthorizationToken": {}, + "ecr:BatchImportUpstreamImage": {}, + "ecr:CreatePullThroughCacheRule": {}, + "ecr:CreateRepository": {}, + "ecr:CreateRepositoryCreationTemplate": {}, + "ecr:DeletePullThroughCacheRule": {}, + "ecr:DeleteRegistryPolicy": {}, + "ecr:DeleteRepositoryCreationTemplate": {}, + "ecr:DescribePullThroughCacheRules": {}, + "ecr:DescribeRegistry": {}, + "ecr:DescribeRepositoryCreationTemplate": {}, + "ecr:GetAuthorizationToken": {}, + "ecr:GetRegistryPolicy": {}, + "ecr:GetRegistryScanningConfiguration": {}, + "ecr:PutRegistryPolicy": {}, + "ecr:PutRegistryScanningConfiguration": {}, + "ecr:PutReplicationConfiguration": {}, + "ecr:UpdatePullThroughCacheRule": {}, + "ecr:ValidatePullThroughCacheRule": {}, + "ecs:CreateCapacityProvider": {}, + "ecs:CreateCluster": {}, + "ecs:CreateTaskSet": {}, + "ecs:DeleteAccountSetting": {}, + "ecs:DeregisterTaskDefinition": {}, + "ecs:DescribeTaskDefinition": {}, + "ecs:DiscoverPollEndpoint": {}, + "ecs:ListAccountSettings": {}, + "ecs:ListClusters": {}, + "ecs:ListServices": {}, + "ecs:ListServicesByNamespace": {}, + "ecs:ListTaskDefinitionFamilies": {}, + "ecs:ListTaskDefinitions": {}, + "ecs:PutAccountSetting": {}, + "ecs:PutAccountSettingDefault": {}, + "ecs:RegisterTaskDefinition": {}, + "eks:CreateCluster": {}, + "eks:CreateEksAnywhereSubscription": {}, + "eks:DescribeAddonConfiguration": {}, + "eks:DescribeAddonVersions": {}, + "eks:ListClusters": {}, + "eks:ListEksAnywhereSubscriptions": {}, + "eks:RegisterCluster": {}, + "elasticache:DescribeCacheEngineVersions": {}, + "elasticache:DescribeEngineDefaultParameters": {}, + "elasticache:DescribeEvents": {}, + "elasticache:DescribeReservedCacheNodesOfferings": {}, + "elasticache:DescribeServiceUpdates": {}, + "elasticbeanstalk:CheckDNSAvailability": {}, + "elasticbeanstalk:CreateStorageLocation": {}, + "elasticbeanstalk:DescribeAccountAttributes": {}, + "elasticbeanstalk:ListPlatformBranches": {}, + "elasticfilesystem:CreateFileSystem": {}, + "elasticfilesystem:DescribeAccountPreferences": {}, + "elasticfilesystem:PutAccountPreferences": {}, + "elasticloadbalancing:DescribeAccountLimits": {}, + "elasticloadbalancing:DescribeInstanceHealth": {}, + "elasticloadbalancing:DescribeListenerCertificates": {}, + "elasticloadbalancing:DescribeListeners": {}, + "elasticloadbalancing:DescribeLoadBalancerAttributes": {}, + "elasticloadbalancing:DescribeLoadBalancerPolicies": {}, + "elasticloadbalancing:DescribeLoadBalancerPolicyTypes": {}, + "elasticloadbalancing:DescribeLoadBalancers": {}, + "elasticloadbalancing:DescribeRules": {}, + "elasticloadbalancing:DescribeSSLPolicies": {}, + "elasticloadbalancing:DescribeTags": {}, + "elasticloadbalancing:DescribeTargetGroupAttributes": {}, + "elasticloadbalancing:DescribeTargetGroups": {}, + "elasticloadbalancing:DescribeTargetHealth": {}, + "elasticloadbalancing:DescribeTrustStoreAssociations": {}, + "elasticloadbalancing:DescribeTrustStoreRevocations": {}, + "elasticloadbalancing:DescribeTrustStores": {}, + "elasticloadbalancing:SetWebAcl": {}, + "elasticmapreduce:CreateRepository": {}, + "elasticmapreduce:CreateSecurityConfiguration": {}, + "elasticmapreduce:CreateStudio": {}, + "elasticmapreduce:DeleteRepository": {}, + "elasticmapreduce:DeleteSecurityConfiguration": {}, + "elasticmapreduce:DescribeReleaseLabel": {}, + "elasticmapreduce:DescribeRepository": {}, + "elasticmapreduce:DescribeSecurityConfiguration": {}, + "elasticmapreduce:GetBlockPublicAccessConfiguration": {}, + "elasticmapreduce:LinkRepository": {}, + "elasticmapreduce:ListClusters": {}, + "elasticmapreduce:ListEditors": {}, + "elasticmapreduce:ListNotebookExecutions": {}, + "elasticmapreduce:ListReleaseLabels": {}, + "elasticmapreduce:ListRepositories": {}, + "elasticmapreduce:ListSecurityConfigurations": {}, + "elasticmapreduce:ListStudioSessionMappings": {}, + "elasticmapreduce:ListStudios": {}, + "elasticmapreduce:ListSupportedInstanceTypes": {}, + "elasticmapreduce:PutBlockPublicAccessConfiguration": {}, + "elasticmapreduce:RunJobFlow": {}, + "elasticmapreduce:UnlinkRepository": {}, + "elasticmapreduce:UpdateRepository": {}, + "elasticmapreduce:ViewEventsFromAllClustersInConsole": {}, + "elastictranscoder:CreatePipeline": {}, + "elastictranscoder:CreatePreset": {}, + "elastictranscoder:ListJobsByStatus": {}, + "elastictranscoder:ListPipelines": {}, + "elastictranscoder:ListPresets": {}, + "elastictranscoder:TestRole": {}, + "elemental-activations:CompleteAccountRegistration": {}, + "elemental-activations:CompleteFileUpload": {}, + "elemental-activations:DownloadSoftware": {}, + "elemental-activations:GenerateLicenses": {}, + "elemental-activations:StartAccountRegistration": {}, + "elemental-activations:StartFileUpload": {}, + "elemental-appliances-software:CompleteUpload": {}, + "elemental-appliances-software:CreateOrderV1": {}, + "elemental-appliances-software:GetAvsCorrectAddress": {}, + "elemental-appliances-software:GetBillingAddresses": {}, + "elemental-appliances-software:GetDeliveryAddressesV2": {}, + "elemental-appliances-software:GetOrder": {}, + "elemental-appliances-software:GetOrdersV2": {}, + "elemental-appliances-software:GetTaxes": {}, + "elemental-appliances-software:ListQuotes": {}, + "elemental-appliances-software:StartUpload": {}, + "elemental-appliances-software:SubmitOrderV1": {}, + "elemental-support-cases:CheckCasePermission": {}, + "elemental-support-cases:CreateCase": {}, + "elemental-support-cases:GetCase": {}, + "elemental-support-cases:GetCases": {}, + "elemental-support-cases:UpdateCase": {}, + "elemental-support-content:Query": {}, + "emr-containers:CreateJobTemplate": {}, + "emr-containers:CreateVirtualCluster": {}, + "emr-containers:ListJobTemplates": {}, + "emr-containers:ListVirtualClusters": {}, + "emr-serverless:CreateApplication": {}, + "emr-serverless:ListApplications": {}, + "entityresolution:CreateIdMappingWorkflow": {}, + "entityresolution:CreateMatchingWorkflow": {}, + "entityresolution:CreateSchemaMapping": {}, + "entityresolution:ListIdMappingWorkflows": {}, + "entityresolution:ListMatchingWorkflows": {}, + "entityresolution:ListSchemaMappings": {}, + "entityresolution:ListTagsForResource": {}, + "entityresolution:TagResource": {}, + "entityresolution:UntagResource": {}, + "es:AcceptInboundConnection": {}, + "es:AcceptInboundCrossClusterSearchConnection": {}, + "es:AuthorizeVpcEndpointAccess": {}, + "es:CreateElasticsearchServiceRole": {}, + "es:CreatePackage": {}, + "es:CreateServiceRole": {}, + "es:CreateVpcEndpoint": {}, + "es:DeleteElasticsearchServiceRole": {}, + "es:DeleteInboundConnection": {}, + "es:DeleteInboundCrossClusterSearchConnection": {}, + "es:DeleteOutboundConnection": {}, + "es:DeleteOutboundCrossClusterSearchConnection": {}, + "es:DeletePackage": {}, + "es:DeleteVpcEndpoint": {}, + "es:DescribeElasticsearchInstanceTypeLimits": {}, + "es:DescribeInboundConnections": {}, + "es:DescribeInboundCrossClusterSearchConnections": {}, + "es:DescribeInstanceTypeLimits": {}, + "es:DescribeOutboundConnections": {}, + "es:DescribeOutboundCrossClusterSearchConnections": {}, + "es:DescribePackages": {}, + "es:DescribeReservedElasticsearchInstanceOfferings": {}, + "es:DescribeReservedElasticsearchInstances": {}, + "es:DescribeReservedInstanceOfferings": {}, + "es:DescribeReservedInstances": {}, + "es:DescribeVpcEndpoints": {}, + "es:GetPackageVersionHistory": {}, + "es:ListDomainNames": {}, + "es:ListDomainsForPackage": {}, + "es:ListElasticsearchInstanceTypeDetails": {}, + "es:ListElasticsearchInstanceTypes": {}, + "es:ListElasticsearchVersions": {}, + "es:ListInstanceTypeDetails": {}, + "es:ListVersions": {}, + "es:ListVpcEndpointAccess": {}, + "es:ListVpcEndpoints": {}, + "es:ListVpcEndpointsForDomain": {}, + "es:PurchaseReservedElasticsearchInstanceOffering": {}, + "es:PurchaseReservedInstanceOffering": {}, + "es:RejectInboundConnection": {}, + "es:RejectInboundCrossClusterSearchConnection": {}, + "es:RevokeVpcEndpointAccess": {}, + "es:UpdatePackage": {}, + "es:UpdateVpcEndpoint": {}, + "events:ListApiDestinations": {}, + "events:ListArchives": {}, + "events:ListConnections": {}, + "events:ListEndpoints": {}, + "events:ListEventBuses": {}, + "events:ListEventSources": {}, + "events:ListPartnerEventSources": {}, + "events:ListReplays": {}, + "events:ListRuleNamesByTarget": {}, + "events:ListRules": {}, + "events:PutPartnerEvents": {}, + "events:PutPermission": {}, + "events:RemovePermission": {}, + "events:TestEventPattern": {}, + "evidently:CreateExperiment": {}, + "evidently:CreateFeature": {}, + "evidently:CreateLaunch": {}, + "evidently:CreateProject": {}, + "evidently:CreateSegment": {}, + "evidently:ListExperiments": {}, + "evidently:ListFeatures": {}, + "evidently:ListLaunches": {}, + "evidently:ListProjects": {}, + "evidently:ListSegmentReferences": {}, + "evidently:ListSegments": {}, + "evidently:ListTagsForResource": {}, + "evidently:TestSegmentPattern": {}, + "finspace:CreateKxEnvironment": {}, + "finspace:ListKxEnvironments": {}, + "firehose:ListDeliveryStreams": {}, + "fis:GetTargetResourceType": {}, + "fis:ListActions": {}, + "fis:ListExperimentTemplates": {}, + "fis:ListExperiments": {}, + "fis:ListTargetResourceTypes": {}, + "fms:AssociateAdminAccount": {}, + "fms:AssociateThirdPartyFirewall": {}, + "fms:DeleteNotificationChannel": {}, + "fms:DisassociateAdminAccount": {}, + "fms:DisassociateThirdPartyFirewall": {}, + "fms:GetAdminAccount": {}, + "fms:GetAdminScope": {}, + "fms:GetNotificationChannel": {}, + "fms:GetThirdPartyFirewallAssociationStatus": {}, + "fms:ListAdminAccountsForOrganization": {}, + "fms:ListAdminsManagingAccount": {}, + "fms:ListAppsLists": {}, + "fms:ListDiscoveredResources": {}, + "fms:ListMemberAccounts": {}, + "fms:ListPolicies": {}, + "fms:ListProtocolsLists": {}, + "fms:ListResourceSets": {}, + "fms:ListThirdPartyFirewallFirewallPolicies": {}, + "fms:PutAdminAccount": {}, + "fms:PutNotificationChannel": {}, + "forecast:CreateAutoPredictor": {}, + "forecast:ListDatasetGroups": {}, + "forecast:ListDatasetImportJobs": {}, + "forecast:ListDatasets": {}, + "forecast:ListExplainabilities": {}, + "forecast:ListExplainabilityExports": {}, + "forecast:ListForecastExportJobs": {}, + "forecast:ListForecasts": {}, + "forecast:ListMonitors": {}, + "forecast:ListPredictorBacktestExportJobs": {}, + "forecast:ListPredictors": {}, + "forecast:ListWhatIfAnalyses": {}, + "forecast:ListWhatIfForecastExports": {}, + "forecast:ListWhatIfForecasts": {}, + "frauddetector:BatchCreateVariable": {}, + "frauddetector:CreateList": {}, + "frauddetector:CreateVariable": {}, + "frauddetector:GetKMSEncryptionKey": {}, + "frauddetector:PutKMSEncryptionKey": {}, + "freertos:CreateSubscription": {}, + "freertos:DescribeHardwarePlatform": {}, + "freertos:GetEmpPatchUrl": {}, + "freertos:GetSoftwareURL": {}, + "freertos:GetSoftwareURLForConfiguration": {}, + "freertos:GetSubscriptionBillingAmount": {}, + "freertos:ListFreeRTOSVersions": {}, + "freertos:ListHardwarePlatforms": {}, + "freertos:ListHardwareVendors": {}, + "freertos:ListSoftwareConfigurations": {}, + "freertos:ListSoftwarePatches": {}, + "freertos:ListSubscriptionEmails": {}, + "freertos:ListSubscriptions": {}, + "freertos:UpdateEmailRecipients": {}, + "freertos:VerifyEmail": {}, + "freetier:GetFreeTierAlertPreference": {}, + "freetier:GetFreeTierUsage": {}, + "freetier:PutFreeTierAlertPreference": {}, + "fsx:DescribeBackups": {}, + "fsx:DescribeDataRepositoryAssociations": {}, + "fsx:DescribeDataRepositoryTasks": {}, + "fsx:DescribeFileCaches": {}, + "fsx:DescribeFileSystems": {}, + "fsx:DescribeSharedVpcConfiguration": {}, + "fsx:DescribeSnapshots": {}, + "fsx:DescribeStorageVirtualMachines": {}, + "fsx:DescribeVolumes": {}, + "fsx:UpdateSharedVpcConfiguration": {}, + "gamelift:AcceptMatch": {}, + "gamelift:CreateAlias": {}, + "gamelift:CreateBuild": {}, + "gamelift:CreateFleet": {}, + "gamelift:CreateGameServerGroup": {}, + "gamelift:CreateGameSession": {}, + "gamelift:CreateGameSessionQueue": {}, + "gamelift:CreateLocation": {}, + "gamelift:CreateMatchmakingConfiguration": {}, + "gamelift:CreateMatchmakingRuleSet": {}, + "gamelift:CreatePlayerSession": {}, + "gamelift:CreatePlayerSessions": {}, + "gamelift:CreateScript": {}, + "gamelift:CreateVpcPeeringAuthorization": {}, + "gamelift:CreateVpcPeeringConnection": {}, + "gamelift:DeleteVpcPeeringAuthorization": {}, + "gamelift:DeleteVpcPeeringConnection": {}, + "gamelift:DescribeEC2InstanceLimits": {}, + "gamelift:DescribeFleetAttributes": {}, + "gamelift:DescribeFleetCapacity": {}, + "gamelift:DescribeFleetUtilization": {}, + "gamelift:DescribeGameSessionDetails": {}, + "gamelift:DescribeGameSessionPlacement": {}, + "gamelift:DescribeGameSessionQueues": {}, + "gamelift:DescribeGameSessions": {}, + "gamelift:DescribeMatchmaking": {}, + "gamelift:DescribeMatchmakingConfigurations": {}, + "gamelift:DescribeMatchmakingRuleSets": {}, + "gamelift:DescribePlayerSessions": {}, + "gamelift:DescribeVpcPeeringAuthorizations": {}, + "gamelift:DescribeVpcPeeringConnections": {}, + "gamelift:GetGameSessionLogUrl": {}, + "gamelift:ListAliases": {}, + "gamelift:ListBuilds": {}, + "gamelift:ListFleets": {}, + "gamelift:ListGameServerGroups": {}, + "gamelift:ListLocations": {}, + "gamelift:ListScripts": {}, + "gamelift:SearchGameSessions": {}, + "gamelift:StartMatchBackfill": {}, + "gamelift:StartMatchmaking": {}, + "gamelift:StopGameSessionPlacement": {}, + "gamelift:StopMatchmaking": {}, + "gamelift:UpdateGameSession": {}, + "gamelift:ValidateMatchmakingRuleSet": {}, + "glacier:GetDataRetrievalPolicy": {}, + "glacier:ListProvisionedCapacity": {}, + "glacier:ListVaults": {}, + "glacier:PurchaseProvisionedCapacity": {}, + "glacier:SetDataRetrievalPolicy": {}, + "globalaccelerator:AdvertiseByoipCidr": {}, + "globalaccelerator:CreateAccelerator": {}, + "globalaccelerator:CreateCrossAccountAttachment": {}, + "globalaccelerator:CreateCustomRoutingAccelerator": {}, + "globalaccelerator:DeprovisionByoipCidr": {}, + "globalaccelerator:ListAccelerators": {}, + "globalaccelerator:ListByoipCidrs": {}, + "globalaccelerator:ListCrossAccountAttachments": {}, + "globalaccelerator:ListCrossAccountResourceAccounts": {}, + "globalaccelerator:ListCrossAccountResources": {}, + "globalaccelerator:ListCustomRoutingAccelerators": {}, + "globalaccelerator:ListCustomRoutingPortMappingsByDestination": {}, + "globalaccelerator:ProvisionByoipCidr": {}, + "globalaccelerator:WithdrawByoipCidr": {}, + "glue:CheckSchemaVersionValidity": {}, + "glue:CreateClassifier": {}, + "glue:CreateCrawler": {}, + "glue:CreateCustomEntityType": {}, + "glue:CreateDataQualityRuleset": {}, + "glue:CreateDevEndpoint": {}, + "glue:CreateMLTransform": {}, + "glue:CreateScript": {}, + "glue:CreateSecurityConfiguration": {}, + "glue:CreateSession": {}, + "glue:DeleteClassifier": {}, + "glue:DeleteSecurityConfiguration": {}, + "glue:DeregisterDataPreview": {}, + "glue:GetClassifier": {}, + "glue:GetClassifiers": {}, + "glue:GetColumnStatisticsTaskRun": {}, + "glue:GetColumnStatisticsTaskRuns": {}, + "glue:GetCrawlerMetrics": {}, + "glue:GetCrawlers": {}, + "glue:GetDataPreviewStatement": {}, + "glue:GetDataflowGraph": {}, + "glue:GetDevEndpoints": {}, + "glue:GetJobBookmark": {}, + "glue:GetJobs": {}, + "glue:GetMapping": {}, + "glue:GetNotebookInstanceStatus": {}, + "glue:GetPlan": {}, + "glue:GetSecurityConfiguration": {}, + "glue:GetSecurityConfigurations": {}, + "glue:GetTriggers": {}, + "glue:GlueNotebookAuthorize": {}, + "glue:GlueNotebookRefreshCredentials": {}, + "glue:ListBlueprints": {}, + "glue:ListColumnStatisticsTaskRuns": {}, + "glue:ListCrawlers": {}, + "glue:ListCrawls": {}, + "glue:ListCustomEntityTypes": {}, + "glue:ListDevEndpoints": {}, + "glue:ListJobs": {}, + "glue:ListRegistries": {}, + "glue:ListSessions": {}, + "glue:ListTriggers": {}, + "glue:ListWorkflows": {}, + "glue:ResetJobBookmark": {}, + "glue:RunDataPreviewStatement": {}, + "glue:SendFeedback": {}, + "glue:StartCompletion": {}, + "glue:StartCrawlerSchedule": {}, + "glue:StartNotebook": {}, + "glue:StopCrawlerSchedule": {}, + "glue:TerminateNotebook": {}, + "glue:TestConnection": {}, + "glue:UpdateClassifier": {}, + "glue:UpdateCrawlerSchedule": {}, + "glue:UseGlueStudio": {}, + "grafana:CreateWorkspace": {}, + "grafana:ListWorkspaces": {}, + "greengrass:AssociateServiceRoleToAccount": {}, + "greengrass:CreateConnectorDefinition": {}, + "greengrass:CreateCoreDefinition": {}, + "greengrass:CreateDeployment": {}, + "greengrass:CreateDeviceDefinition": {}, + "greengrass:CreateFunctionDefinition": {}, + "greengrass:CreateGroup": {}, + "greengrass:CreateLoggerDefinition": {}, + "greengrass:CreateResourceDefinition": {}, + "greengrass:CreateSoftwareUpdateJob": {}, + "greengrass:CreateSubscriptionDefinition": {}, + "greengrass:DisassociateServiceRoleFromAccount": {}, + "greengrass:GetServiceRoleForAccount": {}, + "greengrass:ListBulkDeployments": {}, + "greengrass:ListComponents": {}, + "greengrass:ListConnectorDefinitions": {}, + "greengrass:ListCoreDefinitions": {}, + "greengrass:ListCoreDevices": {}, + "greengrass:ListDeployments": {}, + "greengrass:ListDeviceDefinitions": {}, + "greengrass:ListFunctionDefinitions": {}, + "greengrass:ListGroups": {}, + "greengrass:ListLoggerDefinitions": {}, + "greengrass:ListResourceDefinitions": {}, + "greengrass:ListSubscriptionDefinitions": {}, + "greengrass:StartBulkDeployment": {}, + "groundstation:CreateConfig": {}, + "groundstation:CreateDataflowEndpointGroup": {}, + "groundstation:CreateEphemeris": {}, + "groundstation:CreateMissionProfile": {}, + "groundstation:GetMinuteUsage": {}, + "groundstation:ListConfigs": {}, + "groundstation:ListContacts": {}, + "groundstation:ListDataflowEndpointGroups": {}, + "groundstation:ListEphemerides": {}, + "groundstation:ListGroundStations": {}, + "groundstation:ListMissionProfiles": {}, + "groundstation:ListSatellites": {}, + "groundstation:RegisterAgent": {}, + "groundstation:ReserveContact": {}, + "groundtruthlabeling:AssociatePatchToManifestJob": {}, + "groundtruthlabeling:DescribeConsoleJob": {}, + "groundtruthlabeling:ListDatasetObjects": {}, + "groundtruthlabeling:RunFilterOrSampleDatasetJob": {}, + "groundtruthlabeling:RunGenerateManifestByCrawlingJob": {}, + "guardduty:AcceptAdministratorInvitation": {}, + "guardduty:AcceptInvitation": {}, + "guardduty:ArchiveFindings": {}, + "guardduty:CreateDetector": {}, + "guardduty:CreateIPSet": {}, + "guardduty:CreateMembers": {}, + "guardduty:CreatePublishingDestination": {}, + "guardduty:CreateSampleFindings": {}, + "guardduty:CreateThreatIntelSet": {}, + "guardduty:DeclineInvitations": {}, + "guardduty:DeleteInvitations": {}, + "guardduty:DeleteMembers": {}, + "guardduty:DescribeMalwareScans": {}, + "guardduty:DescribeOrganizationConfiguration": {}, + "guardduty:DisableOrganizationAdminAccount": {}, + "guardduty:DisassociateFromAdministratorAccount": {}, + "guardduty:DisassociateFromMasterAccount": {}, + "guardduty:DisassociateMembers": {}, + "guardduty:EnableOrganizationAdminAccount": {}, + "guardduty:GetAdministratorAccount": {}, + "guardduty:GetFindings": {}, + "guardduty:GetFindingsStatistics": {}, + "guardduty:GetInvitationsCount": {}, + "guardduty:GetMalwareScanSettings": {}, + "guardduty:GetMasterAccount": {}, + "guardduty:GetMemberDetectors": {}, + "guardduty:GetMembers": {}, + "guardduty:GetRemainingFreeTrialDays": {}, + "guardduty:GetUsageStatistics": {}, + "guardduty:InviteMembers": {}, + "guardduty:ListDetectors": {}, + "guardduty:ListFilters": {}, + "guardduty:ListFindings": {}, + "guardduty:ListIPSets": {}, + "guardduty:ListInvitations": {}, + "guardduty:ListMembers": {}, + "guardduty:ListOrganizationAdminAccounts": {}, + "guardduty:ListPublishingDestinations": {}, + "guardduty:ListThreatIntelSets": {}, + "guardduty:SendSecurityTelemetry": {}, + "guardduty:StartMalwareScan": {}, + "guardduty:StartMonitoringMembers": {}, + "guardduty:StopMonitoringMembers": {}, + "guardduty:UnarchiveFindings": {}, + "guardduty:UpdateFindingsFeedback": {}, + "guardduty:UpdateMalwareScanSettings": {}, + "guardduty:UpdateMemberDetectors": {}, + "guardduty:UpdateOrganizationConfiguration": {}, + "health:DescribeAffectedAccountsForOrganization": {}, + "health:DescribeAffectedEntitiesForOrganization": {}, + "health:DescribeEntityAggregates": {}, + "health:DescribeEntityAggregatesForOrganization": {}, + "health:DescribeEventAggregates": {}, + "health:DescribeEventDetailsForOrganization": {}, + "health:DescribeEventTypes": {}, + "health:DescribeEvents": {}, + "health:DescribeEventsForOrganization": {}, + "health:DescribeHealthServiceStatusForOrganization": {}, + "health:DisableHealthServiceAccessForOrganization": {}, + "health:EnableHealthServiceAccessForOrganization": {}, + "healthlake:CreateFHIRDatastore": {}, + "healthlake:ListFHIRDatastores": {}, + "honeycode:ApproveTeamAssociation": {}, + "honeycode:CreateTeam": {}, + "honeycode:CreateTenant": {}, + "honeycode:DeleteDomains": {}, + "honeycode:DeregisterGroups": {}, + "honeycode:DescribeTeam": {}, + "honeycode:ListDomains": {}, + "honeycode:ListGroups": {}, + "honeycode:ListTagsForResource": {}, + "honeycode:ListTeamAssociations": {}, + "honeycode:ListTenants": {}, + "honeycode:RegisterDomainForVerification": {}, + "honeycode:RegisterGroups": {}, + "honeycode:RejectTeamAssociation": {}, + "honeycode:RestartDomainVerification": {}, + "honeycode:TagResource": {}, + "honeycode:UntagResource": {}, + "honeycode:UpdateTeam": {}, + "iam:CreateAccountAlias": {}, + "iam:DeleteAccountAlias": {}, + "iam:DeleteAccountPasswordPolicy": {}, + "iam:DeleteCloudFrontPublicKey": {}, + "iam:GenerateCredentialReport": {}, + "iam:GetAccountAuthorizationDetails": {}, + "iam:GetAccountEmailAddress": {}, + "iam:GetAccountName": {}, + "iam:GetAccountPasswordPolicy": {}, + "iam:GetAccountSummary": {}, + "iam:GetCloudFrontPublicKey": {}, + "iam:GetContextKeysForCustomPolicy": {}, + "iam:GetCredentialReport": {}, + "iam:GetOrganizationsAccessReport": {}, + "iam:GetServiceLastAccessedDetails": {}, + "iam:GetServiceLastAccessedDetailsWithEntities": {}, + "iam:ListAccountAliases": {}, + "iam:ListCloudFrontPublicKeys": {}, + "iam:ListGroups": {}, + "iam:ListInstanceProfiles": {}, + "iam:ListOpenIDConnectProviders": {}, + "iam:ListPolicies": {}, + "iam:ListRoles": {}, + "iam:ListSAMLProviders": {}, + "iam:ListSTSRegionalEndpointsStatus": {}, + "iam:ListServerCertificates": {}, + "iam:ListUsers": {}, + "iam:ListVirtualMFADevices": {}, + "iam:SetSTSRegionalEndpointStatus": {}, + "iam:SetSecurityTokenServicePreferences": {}, + "iam:SimulateCustomPolicy": {}, + "iam:UpdateAccountEmailAddress": {}, + "iam:UpdateAccountName": {}, + "iam:UpdateAccountPasswordPolicy": {}, + "iam:UpdateCloudFrontPublicKey": {}, + "iam:UploadCloudFrontPublicKey": {}, + "identity-sync:CreateSyncProfile": {}, + "identitystore-auth:BatchDeleteSession": {}, + "identitystore-auth:BatchGetSession": {}, + "identitystore-auth:ListSessions": {}, + "imagebuilder:ListComponents": {}, + "imagebuilder:ListContainerRecipes": {}, + "imagebuilder:ListDistributionConfigurations": {}, + "imagebuilder:ListImagePipelines": {}, + "imagebuilder:ListImageRecipes": {}, + "imagebuilder:ListImages": {}, + "imagebuilder:ListInfrastructureConfigurations": {}, + "imagebuilder:ListLifecyclePolicies": {}, + "importexport:CancelJob": {}, + "importexport:CreateJob": {}, + "importexport:GetShippingLabel": {}, + "importexport:GetStatus": {}, + "importexport:ListJobs": {}, + "importexport:UpdateJob": {}, + "inspector-scan:ScanSbom": {}, + "inspector2:AssociateMember": {}, + "inspector2:BatchGetAccountStatus": {}, + "inspector2:BatchGetCodeSnippet": {}, + "inspector2:BatchGetFindingDetails": {}, + "inspector2:BatchGetFreeTrialInfo": {}, + "inspector2:BatchGetMemberEc2DeepInspectionStatus": {}, + "inspector2:BatchUpdateMemberEc2DeepInspectionStatus": {}, + "inspector2:CancelFindingsReport": {}, + "inspector2:CancelSbomExport": {}, + "inspector2:CreateFindingsReport": {}, + "inspector2:CreateSbomExport": {}, + "inspector2:DescribeOrganizationConfiguration": {}, + "inspector2:Disable": {}, + "inspector2:DisableDelegatedAdminAccount": {}, + "inspector2:DisassociateMember": {}, + "inspector2:Enable": {}, + "inspector2:EnableDelegatedAdminAccount": {}, + "inspector2:GetConfiguration": {}, + "inspector2:GetDelegatedAdminAccount": {}, + "inspector2:GetEc2DeepInspectionConfiguration": {}, + "inspector2:GetEncryptionKey": {}, + "inspector2:GetFindingsReportStatus": {}, + "inspector2:GetMember": {}, + "inspector2:GetSbomExport": {}, + "inspector2:ListAccountPermissions": {}, + "inspector2:ListCoverage": {}, + "inspector2:ListCoverageStatistics": {}, + "inspector2:ListDelegatedAdminAccounts": {}, + "inspector2:ListFilters": {}, + "inspector2:ListFindingAggregations": {}, + "inspector2:ListFindings": {}, + "inspector2:ListMembers": {}, + "inspector2:ListTagsForResource": {}, + "inspector2:ListUsageTotals": {}, + "inspector2:ResetEncryptionKey": {}, + "inspector2:SearchVulnerabilities": {}, + "inspector2:TagResource": {}, + "inspector2:UntagResource": {}, + "inspector2:UpdateConfiguration": {}, + "inspector2:UpdateEc2DeepInspectionConfiguration": {}, + "inspector2:UpdateEncryptionKey": {}, + "inspector2:UpdateOrgEc2DeepInspectionConfiguration": {}, + "inspector2:UpdateOrganizationConfiguration": {}, + "inspector:AddAttributesToFindings": {}, + "inspector:CreateAssessmentTarget": {}, + "inspector:CreateAssessmentTemplate": {}, + "inspector:CreateExclusionsPreview": {}, + "inspector:CreateResourceGroup": {}, + "inspector:DeleteAssessmentRun": {}, + "inspector:DeleteAssessmentTarget": {}, + "inspector:DeleteAssessmentTemplate": {}, + "inspector:DescribeAssessmentRuns": {}, + "inspector:DescribeAssessmentTargets": {}, + "inspector:DescribeAssessmentTemplates": {}, + "inspector:DescribeCrossAccountAccessRole": {}, + "inspector:DescribeExclusions": {}, + "inspector:DescribeFindings": {}, + "inspector:DescribeResourceGroups": {}, + "inspector:DescribeRulesPackages": {}, + "inspector:GetAssessmentReport": {}, + "inspector:GetExclusionsPreview": {}, + "inspector:GetTelemetryMetadata": {}, + "inspector:ListAssessmentRunAgents": {}, + "inspector:ListAssessmentRuns": {}, + "inspector:ListAssessmentTargets": {}, + "inspector:ListAssessmentTemplates": {}, + "inspector:ListEventSubscriptions": {}, + "inspector:ListExclusions": {}, + "inspector:ListFindings": {}, + "inspector:ListRulesPackages": {}, + "inspector:ListTagsForResource": {}, + "inspector:PreviewAgents": {}, + "inspector:RegisterCrossAccountAccessRole": {}, + "inspector:RemoveAttributesFromFindings": {}, + "inspector:SetTagsForResource": {}, + "inspector:StartAssessmentRun": {}, + "inspector:StopAssessmentRun": {}, + "inspector:SubscribeToEvent": {}, + "inspector:UnsubscribeFromEvent": {}, + "inspector:UpdateAssessmentTarget": {}, + "internetmonitor:ListMonitors": {}, + "invoicing:GetInvoiceEmailDeliveryPreferences": {}, + "invoicing:GetInvoicePDF": {}, + "invoicing:ListInvoiceSummaries": {}, + "invoicing:PutInvoiceEmailDeliveryPreferences": {}, + "iot-device-tester:CheckVersion": {}, + "iot-device-tester:DownloadTestSuite": {}, + "iot-device-tester:LatestIdt": {}, + "iot-device-tester:SendMetrics": {}, + "iot-device-tester:SupportedVersion": {}, + "iot1click:ClaimDevicesByClaimCode": {}, + "iot1click:ListDevices": {}, + "iot1click:ListProjects": {}, + "iot:AttachThingPrincipal": {}, + "iot:CancelAuditMitigationActionsTask": {}, + "iot:CancelAuditTask": {}, + "iot:CancelDetectMitigationActionsTask": {}, + "iot:ClearDefaultAuthorizer": {}, + "iot:CreateAuditSuppression": {}, + "iot:CreateCertificateFromCsr": {}, + "iot:CreateKeysAndCertificate": {}, + "iot:DeleteAccountAuditConfiguration": {}, + "iot:DeleteAuditSuppression": {}, + "iot:DeleteRegistrationCode": {}, + "iot:DeleteV2LoggingLevel": {}, + "iot:DescribeAccountAuditConfiguration": {}, + "iot:DescribeAuditFinding": {}, + "iot:DescribeAuditMitigationActionsTask": {}, + "iot:DescribeAuditSuppression": {}, + "iot:DescribeAuditTask": {}, + "iot:DescribeDefaultAuthorizer": {}, + "iot:DescribeDetectMitigationActionsTask": {}, + "iot:DescribeEndpoint": {}, + "iot:DescribeEventConfigurations": {}, + "iot:DescribeThingRegistrationTask": {}, + "iot:DetachThingPrincipal": {}, + "iot:GetIndexingConfiguration": {}, + "iot:GetLoggingOptions": {}, + "iot:GetPackageConfiguration": {}, + "iot:GetRegistrationCode": {}, + "iot:GetV2LoggingOptions": {}, + "iot:ListAttachedPolicies": {}, + "iot:ListAuditFindings": {}, + "iot:ListAuditMitigationActionsExecutions": {}, + "iot:ListAuditMitigationActionsTasks": {}, + "iot:ListAuditSuppressions": {}, + "iot:ListAuditTasks": {}, + "iot:ListAuthorizers": {}, + "iot:ListBillingGroups": {}, + "iot:ListCACertificates": {}, + "iot:ListCertificates": {}, + "iot:ListCertificatesByCA": {}, + "iot:ListCustomMetrics": {}, + "iot:ListDetectMitigationActionsTasks": {}, + "iot:ListDimensions": {}, + "iot:ListDomainConfigurations": {}, + "iot:ListFleetMetrics": {}, + "iot:ListIndices": {}, + "iot:ListJobTemplates": {}, + "iot:ListJobs": {}, + "iot:ListManagedJobTemplates": {}, + "iot:ListMitigationActions": {}, + "iot:ListOTAUpdates": {}, + "iot:ListOutgoingCertificates": {}, + "iot:ListPackageVersions": {}, + "iot:ListPackages": {}, + "iot:ListPolicies": {}, + "iot:ListPolicyPrincipals": {}, + "iot:ListPrincipalPolicies": {}, + "iot:ListPrincipalThings": {}, + "iot:ListProvisioningTemplates": {}, + "iot:ListRelatedResourcesForAuditFinding": {}, + "iot:ListRetainedMessages": {}, + "iot:ListRoleAliases": {}, + "iot:ListScheduledAudits": {}, + "iot:ListStreams": {}, + "iot:ListThingGroups": {}, + "iot:ListThingPrincipals": {}, + "iot:ListThingRegistrationTaskReports": {}, + "iot:ListThingRegistrationTasks": {}, + "iot:ListThingTypes": {}, + "iot:ListThings": {}, + "iot:ListTopicRuleDestinations": {}, + "iot:ListTopicRules": {}, + "iot:ListTunnels": {}, + "iot:ListV2LoggingLevels": {}, + "iot:OpenTunnel": {}, + "iot:PutVerificationStateOnViolation": {}, + "iot:RegisterCACertificate": {}, + "iot:RegisterCertificate": {}, + "iot:RegisterCertificateWithoutCA": {}, + "iot:RegisterThing": {}, + "iot:SetLoggingOptions": {}, + "iot:SetV2LoggingLevel": {}, + "iot:SetV2LoggingOptions": {}, + "iot:StartAuditMitigationActionsTask": {}, + "iot:StartOnDemandAuditTask": {}, + "iot:StartThingRegistrationTask": {}, + "iot:StopThingRegistrationTask": {}, + "iot:UpdateAccountAuditConfiguration": {}, + "iot:UpdateAuditSuppression": {}, + "iot:UpdateEventConfigurations": {}, + "iot:UpdateIndexingConfiguration": {}, + "iot:UpdatePackageConfiguration": {}, + "iot:ValidateSecurityProfileBehaviors": {}, + "iotanalytics:DescribeLoggingOptions": {}, + "iotanalytics:ListChannels": {}, + "iotanalytics:ListDatasets": {}, + "iotanalytics:ListDatastores": {}, + "iotanalytics:ListPipelines": {}, + "iotanalytics:PutLoggingOptions": {}, + "iotanalytics:RunPipelineActivity": {}, + "iotdeviceadvisor:CreateSuiteDefinition": {}, + "iotdeviceadvisor:GetEndpoint": {}, + "iotdeviceadvisor:ListSuiteDefinitions": {}, + "iotdeviceadvisor:StartSuiteRun": {}, + "iotevents:DescribeDetectorModelAnalysis": {}, + "iotevents:DescribeLoggingOptions": {}, + "iotevents:GetDetectorModelAnalysisResults": {}, + "iotevents:ListAlarmModels": {}, + "iotevents:ListDetectorModels": {}, + "iotevents:ListInputRoutings": {}, + "iotevents:ListInputs": {}, + "iotevents:PutLoggingOptions": {}, + "iotevents:StartDetectorModelAnalysis": {}, + "iotfleethub:CreateApplication": {}, + "iotfleethub:ListApplications": {}, + "iotfleetwise:GetEncryptionConfiguration": {}, + "iotfleetwise:GetLoggingOptions": {}, + "iotfleetwise:GetRegisterAccountStatus": {}, + "iotfleetwise:ListCampaigns": {}, + "iotfleetwise:ListDecoderManifests": {}, + "iotfleetwise:ListFleets": {}, + "iotfleetwise:ListModelManifests": {}, + "iotfleetwise:ListSignalCatalogs": {}, + "iotfleetwise:ListVehicles": {}, + "iotfleetwise:PutEncryptionConfiguration": {}, + "iotfleetwise:PutLoggingOptions": {}, + "iotfleetwise:RegisterAccount": {}, + "iotroborunner:CreateSite": {}, + "iotroborunner:ListSites": {}, + "iotsitewise:CreateAssetModel": {}, + "iotsitewise:CreateBulkImportJob": {}, + "iotsitewise:CreateGateway": {}, + "iotsitewise:CreatePortal": {}, + "iotsitewise:DescribeBulkImportJob": {}, + "iotsitewise:DescribeDefaultEncryptionConfiguration": {}, + "iotsitewise:DescribeLoggingOptions": {}, + "iotsitewise:DescribeStorageConfiguration": {}, + "iotsitewise:EnableSiteWiseIntegration": {}, + "iotsitewise:ExecuteQuery": {}, + "iotsitewise:ListAssetModels": {}, + "iotsitewise:ListBulkImportJobs": {}, + "iotsitewise:ListGateways": {}, + "iotsitewise:ListPortals": {}, + "iotsitewise:PutDefaultEncryptionConfiguration": {}, + "iotsitewise:PutLoggingOptions": {}, + "iotsitewise:PutStorageConfiguration": {}, + "iottwinmaker:CreateMetadataTransferJob": {}, + "iottwinmaker:CreateWorkspace": {}, + "iottwinmaker:GetPricingPlan": {}, + "iottwinmaker:ListMetadataTransferJobs": {}, + "iottwinmaker:ListWorkspaces": {}, + "iottwinmaker:UpdatePricingPlan": {}, + "iotwireless:AssociateAwsAccountWithPartnerAccount": {}, + "iotwireless:CreateDestination": {}, + "iotwireless:CreateDeviceProfile": {}, + "iotwireless:CreateFuotaTask": {}, + "iotwireless:CreateMulticastGroup": {}, + "iotwireless:CreateServiceProfile": {}, + "iotwireless:CreateWirelessDevice": {}, + "iotwireless:CreateWirelessGateway": {}, + "iotwireless:CreateWirelessGatewayTaskDefinition": {}, + "iotwireless:DeleteQueuedMessages": {}, + "iotwireless:GetEventConfigurationByResourceTypes": {}, + "iotwireless:GetLogLevelsByResourceTypes": {}, + "iotwireless:GetPositionEstimate": {}, + "iotwireless:GetServiceEndpoint": {}, + "iotwireless:ListDestinations": {}, + "iotwireless:ListDeviceProfiles": {}, + "iotwireless:ListEventConfigurations": {}, + "iotwireless:ListFuotaTasks": {}, + "iotwireless:ListMulticastGroups": {}, + "iotwireless:ListNetworkAnalyzerConfigurations": {}, + "iotwireless:ListPartnerAccounts": {}, + "iotwireless:ListPositionConfigurations": {}, + "iotwireless:ListQueuedMessages": {}, + "iotwireless:ListServiceProfiles": {}, + "iotwireless:ListWirelessDeviceImportTasks": {}, + "iotwireless:ListWirelessDevices": {}, + "iotwireless:ListWirelessGatewayTaskDefinitions": {}, + "iotwireless:ListWirelessGateways": {}, + "iotwireless:ResetAllResourceLogLevels": {}, + "iotwireless:StartSingleWirelessDeviceImportTask": {}, + "iotwireless:UpdateEventConfigurationByResourceTypes": {}, + "iotwireless:UpdateLogLevelsByResourceTypes": {}, + "iq:span": {}, + "ivs:ListEncoderConfigurations": {}, + "ivs:ListStorageConfigurations": {}, + "kafka:DescribeClusterOperation": {}, + "kafka:DescribeClusterOperationV2": {}, + "kafka:GetBootstrapBrokers": {}, + "kafka:GetCompatibleKafkaVersions": {}, + "kafka:ListClusters": {}, + "kafka:ListClustersV2": {}, + "kafka:ListConfigurations": {}, + "kafka:ListKafkaVersions": {}, + "kafka:ListReplicators": {}, + "kafka:ListVpcConnections": {}, + "kafkaconnect:CreateConnector": {}, + "kafkaconnect:CreateCustomPlugin": {}, + "kafkaconnect:CreateWorkerConfiguration": {}, + "kafkaconnect:DeleteConnector": {}, + "kafkaconnect:DeleteCustomPlugin": {}, + "kafkaconnect:ListConnectors": {}, + "kafkaconnect:ListCustomPlugins": {}, + "kafkaconnect:ListWorkerConfigurations": {}, + "kafkaconnect:UpdateConnector": {}, + "kendra-ranking:CreateRescoreExecutionPlan": {}, + "kendra-ranking:ListRescoreExecutionPlans": {}, + "kendra:CreateIndex": {}, + "kendra:ListIndices": {}, + "kinesis:DescribeLimits": {}, + "kinesis:DisableEnhancedMonitoring": {}, + "kinesis:EnableEnhancedMonitoring": {}, + "kinesis:ListStreams": {}, + "kinesis:UpdateShardCount": {}, + "kinesis:UpdateStreamMode": {}, + "kinesisanalytics:CreateApplication": {}, + "kinesisanalytics:DiscoverInputSchema": {}, + "kinesisanalytics:ListApplications": {}, + "kinesisvideo:ListEdgeAgentConfigurations": {}, + "kinesisvideo:ListSignalingChannels": {}, + "kinesisvideo:ListStreams": {}, + "kms:ConnectCustomKeyStore": {}, + "kms:CreateCustomKeyStore": {}, + "kms:CreateKey": {}, + "kms:DeleteCustomKeyStore": {}, + "kms:DescribeCustomKeyStores": {}, + "kms:DisconnectCustomKeyStore": {}, + "kms:GenerateRandom": {}, + "kms:ListAliases": {}, + "kms:ListKeys": {}, + "kms:ListRetirableGrants": {}, + "kms:UpdateCustomKeyStore": {}, + "lakeformation:AddLFTagsToResource": {}, + "lakeformation:BatchGrantPermissions": {}, + "lakeformation:BatchRevokePermissions": {}, + "lakeformation:CancelTransaction": {}, + "lakeformation:CommitTransaction": {}, + "lakeformation:CreateDataCellsFilter": {}, + "lakeformation:CreateLFTag": {}, + "lakeformation:CreateLakeFormationOptIn": {}, + "lakeformation:DeleteDataCellsFilter": {}, + "lakeformation:DeleteLFTag": {}, + "lakeformation:DeleteLakeFormationOptIn": {}, + "lakeformation:DeleteObjectsOnCancel": {}, + "lakeformation:DeregisterResource": {}, + "lakeformation:DescribeResource": {}, + "lakeformation:DescribeTransaction": {}, + "lakeformation:ExtendTransaction": {}, + "lakeformation:GetDataAccess": {}, + "lakeformation:GetDataCellsFilter": {}, + "lakeformation:GetDataLakeSettings": {}, + "lakeformation:GetEffectivePermissionsForPath": {}, + "lakeformation:GetLFTag": {}, + "lakeformation:GetQueryState": {}, + "lakeformation:GetQueryStatistics": {}, + "lakeformation:GetResourceLFTags": {}, + "lakeformation:GetTableObjects": {}, + "lakeformation:GetWorkUnitResults": {}, + "lakeformation:GetWorkUnits": {}, + "lakeformation:GrantPermissions": {}, + "lakeformation:ListDataCellsFilter": {}, + "lakeformation:ListLFTags": {}, + "lakeformation:ListLakeFormationOptIns": {}, + "lakeformation:ListPermissions": {}, + "lakeformation:ListResources": {}, + "lakeformation:ListTableStorageOptimizers": {}, + "lakeformation:ListTransactions": {}, + "lakeformation:PutDataLakeSettings": {}, + "lakeformation:RegisterResource": {}, + "lakeformation:RemoveLFTagsFromResource": {}, + "lakeformation:RevokePermissions": {}, + "lakeformation:SearchDatabasesByLFTags": {}, + "lakeformation:SearchTablesByLFTags": {}, + "lakeformation:StartQueryPlanning": {}, + "lakeformation:StartTransaction": {}, + "lakeformation:UpdateDataCellsFilter": {}, + "lakeformation:UpdateLFTag": {}, + "lakeformation:UpdateResource": {}, + "lakeformation:UpdateTableObjects": {}, + "lakeformation:UpdateTableStorageOptimizer": {}, + "lambda:CreateCodeSigningConfig": {}, + "lambda:CreateEventSourceMapping": {}, + "lambda:GetAccountSettings": {}, + "lambda:ListCodeSigningConfigs": {}, + "lambda:ListEventSourceMappings": {}, + "lambda:ListFunctions": {}, + "lambda:ListLayerVersions": {}, + "lambda:ListLayers": {}, + "launchwizard:CreateAdditionalNode": {}, + "launchwizard:CreateDeployment": {}, + "launchwizard:CreateSettingsSet": {}, + "launchwizard:DeleteAdditionalNode": {}, + "launchwizard:DeleteApp": {}, + "launchwizard:DeleteDeployment": {}, + "launchwizard:DeleteSettingsSet": {}, + "launchwizard:DescribeAdditionalNode": {}, + "launchwizard:DescribeProvisionedApp": {}, + "launchwizard:DescribeProvisioningEvents": {}, + "launchwizard:DescribeSettingsSet": {}, + "launchwizard:GetDeployment": {}, + "launchwizard:GetInfrastructureSuggestion": {}, + "launchwizard:GetIpAddress": {}, + "launchwizard:GetResourceCostEstimate": {}, + "launchwizard:GetResourceRecommendation": {}, + "launchwizard:GetSettingsSet": {}, + "launchwizard:GetWorkload": {}, + "launchwizard:GetWorkloadAsset": {}, + "launchwizard:GetWorkloadAssets": {}, + "launchwizard:ListAdditionalNodes": {}, + "launchwizard:ListAllowedResources": {}, + "launchwizard:ListDeploymentEvents": {}, + "launchwizard:ListDeployments": {}, + "launchwizard:ListProvisionedApps": {}, + "launchwizard:ListResourceCostEstimates": {}, + "launchwizard:ListSettingsSets": {}, + "launchwizard:ListWorkloadDeploymentOptions": {}, + "launchwizard:ListWorkloadDeploymentPatterns": {}, + "launchwizard:ListWorkloads": {}, + "launchwizard:PutSettingsSet": {}, + "launchwizard:StartProvisioning": {}, + "launchwizard:UpdateSettingsSet": {}, + "lex:CreateTestSet": {}, + "lex:CreateUploadUrl": {}, + "lex:GetBotAliases": {}, + "lex:GetBots": {}, + "lex:GetBuiltinIntent": {}, + "lex:GetBuiltinIntents": {}, + "lex:GetBuiltinSlotTypes": {}, + "lex:GetImport": {}, + "lex:GetIntents": {}, + "lex:GetMigration": {}, + "lex:GetMigrations": {}, + "lex:GetSlotTypes": {}, + "lex:ListBots": {}, + "lex:ListBuiltInIntents": {}, + "lex:ListBuiltInSlotTypes": {}, + "lex:ListExports": {}, + "lex:ListImports": {}, + "lex:ListTestExecutions": {}, + "lex:ListTestSets": {}, + "lex:StartImport": {}, + "license-manager-linux-subscriptions:GetServiceSettings": {}, + "license-manager-linux-subscriptions:ListLinuxSubscriptionInstances": {}, + "license-manager-linux-subscriptions:ListLinuxSubscriptions": {}, + "license-manager-linux-subscriptions:UpdateServiceSettings": {}, + "license-manager-user-subscriptions:AssociateUser": {}, + "license-manager-user-subscriptions:DeregisterIdentityProvider": {}, + "license-manager-user-subscriptions:DisassociateUser": {}, + "license-manager-user-subscriptions:ListIdentityProviders": {}, + "license-manager-user-subscriptions:ListInstances": {}, + "license-manager-user-subscriptions:ListProductSubscriptions": {}, + "license-manager-user-subscriptions:ListUserAssociations": {}, + "license-manager-user-subscriptions:RegisterIdentityProvider": {}, + "license-manager-user-subscriptions:StartProductSubscription": {}, + "license-manager-user-subscriptions:StopProductSubscription": {}, + "license-manager-user-subscriptions:UpdateIdentityProviderSettings": {}, + "license-manager:CheckInLicense": {}, + "license-manager:CheckoutLicense": {}, + "license-manager:CreateLicense": {}, + "license-manager:CreateLicenseConfiguration": {}, + "license-manager:CreateLicenseConversionTaskForResource": {}, + "license-manager:CreateLicenseManagerReportGenerator": {}, + "license-manager:DeleteToken": {}, + "license-manager:ExtendLicenseConsumption": {}, + "license-manager:GetAccessToken": {}, + "license-manager:GetLicenseConversionTask": {}, + "license-manager:GetServiceSettings": {}, + "license-manager:ListDistributedGrants": {}, + "license-manager:ListLicenseConfigurations": {}, + "license-manager:ListLicenseConversionTasks": {}, + "license-manager:ListLicenseSpecificationsForResource": {}, + "license-manager:ListLicenses": {}, + "license-manager:ListReceivedGrants": {}, + "license-manager:ListReceivedGrantsForOrganization": {}, + "license-manager:ListReceivedLicenses": {}, + "license-manager:ListReceivedLicensesForOrganization": {}, + "license-manager:ListResourceInventory": {}, + "license-manager:ListTokens": {}, + "license-manager:UpdateServiceSettings": {}, + "lightsail:AllocateStaticIp": {}, + "lightsail:CopySnapshot": {}, + "lightsail:CreateBucket": {}, + "lightsail:CreateCertificate": {}, + "lightsail:CreateCloudFormationStack": {}, + "lightsail:CreateContactMethod": {}, + "lightsail:CreateContainerService": {}, + "lightsail:CreateContainerServiceRegistryLogin": {}, + "lightsail:CreateDisk": {}, + "lightsail:CreateDistribution": {}, + "lightsail:CreateDomain": {}, + "lightsail:CreateInstances": {}, + "lightsail:CreateKeyPair": {}, + "lightsail:CreateLoadBalancer": {}, + "lightsail:CreateRelationalDatabase": {}, + "lightsail:CreateRelationalDatabaseSnapshot": {}, + "lightsail:DeleteAutoSnapshot": {}, + "lightsail:DeleteContactMethod": {}, + "lightsail:DisableAddOn": {}, + "lightsail:DownloadDefaultKeyPair": {}, + "lightsail:EnableAddOn": {}, + "lightsail:GetActiveNames": {}, + "lightsail:GetAlarms": {}, + "lightsail:GetAutoSnapshots": {}, + "lightsail:GetBlueprints": {}, + "lightsail:GetBucketAccessKeys": {}, + "lightsail:GetBucketBundles": {}, + "lightsail:GetBucketMetricData": {}, + "lightsail:GetBuckets": {}, + "lightsail:GetBundles": {}, + "lightsail:GetCertificates": {}, + "lightsail:GetCloudFormationStackRecords": {}, + "lightsail:GetContactMethods": {}, + "lightsail:GetContainerAPIMetadata": {}, + "lightsail:GetContainerImages": {}, + "lightsail:GetContainerLog": {}, + "lightsail:GetContainerServiceDeployments": {}, + "lightsail:GetContainerServiceMetricData": {}, + "lightsail:GetContainerServicePowers": {}, + "lightsail:GetContainerServices": {}, + "lightsail:GetDisk": {}, + "lightsail:GetDiskSnapshot": {}, + "lightsail:GetDiskSnapshots": {}, + "lightsail:GetDisks": {}, + "lightsail:GetDistributionBundles": {}, + "lightsail:GetDistributionLatestCacheReset": {}, + "lightsail:GetDistributionMetricData": {}, + "lightsail:GetDistributions": {}, + "lightsail:GetDomain": {}, + "lightsail:GetDomains": {}, + "lightsail:GetExportSnapshotRecords": {}, + "lightsail:GetInstance": {}, + "lightsail:GetInstanceMetricData": {}, + "lightsail:GetInstancePortStates": {}, + "lightsail:GetInstanceSnapshot": {}, + "lightsail:GetInstanceSnapshots": {}, + "lightsail:GetInstanceState": {}, + "lightsail:GetInstances": {}, + "lightsail:GetKeyPair": {}, + "lightsail:GetKeyPairs": {}, + "lightsail:GetLoadBalancer": {}, + "lightsail:GetLoadBalancerMetricData": {}, + "lightsail:GetLoadBalancerTlsCertificates": {}, + "lightsail:GetLoadBalancerTlsPolicies": {}, + "lightsail:GetLoadBalancers": {}, + "lightsail:GetOperation": {}, + "lightsail:GetOperations": {}, + "lightsail:GetOperationsForResource": {}, + "lightsail:GetRegions": {}, + "lightsail:GetRelationalDatabase": {}, + "lightsail:GetRelationalDatabaseBlueprints": {}, + "lightsail:GetRelationalDatabaseBundles": {}, + "lightsail:GetRelationalDatabaseEvents": {}, + "lightsail:GetRelationalDatabaseLogEvents": {}, + "lightsail:GetRelationalDatabaseLogStreams": {}, + "lightsail:GetRelationalDatabaseMetricData": {}, + "lightsail:GetRelationalDatabaseParameters": {}, + "lightsail:GetRelationalDatabaseSnapshot": {}, + "lightsail:GetRelationalDatabaseSnapshots": {}, + "lightsail:GetRelationalDatabases": {}, + "lightsail:GetStaticIp": {}, + "lightsail:GetStaticIps": {}, + "lightsail:ImportKeyPair": {}, + "lightsail:IsVpcPeered": {}, + "lightsail:PeerVpc": {}, + "lightsail:SendContactMethodVerification": {}, + "lightsail:UnpeerVpc": {}, + "logs:CancelExportTask": {}, + "logs:CreateLogDelivery": {}, + "logs:DeleteAccountPolicy": {}, + "logs:DeleteLogDelivery": {}, + "logs:DeleteQueryDefinition": {}, + "logs:DeleteResourcePolicy": {}, + "logs:DescribeAccountPolicies": {}, + "logs:DescribeDeliveries": {}, + "logs:DescribeDeliveryDestinations": {}, + "logs:DescribeDeliverySources": {}, + "logs:DescribeDestinations": {}, + "logs:DescribeExportTasks": {}, + "logs:DescribeLogGroups": {}, + "logs:DescribeQueries": {}, + "logs:DescribeQueryDefinitions": {}, + "logs:DescribeResourcePolicies": {}, + "logs:GetLogDelivery": {}, + "logs:Link": {}, + "logs:ListLogDeliveries": {}, + "logs:PutAccountPolicy": {}, + "logs:PutQueryDefinition": {}, + "logs:PutResourcePolicy": {}, + "logs:StartLiveTail": {}, + "logs:StopLiveTail": {}, + "logs:StopQuery": {}, + "logs:TestMetricFilter": {}, + "logs:UpdateLogDelivery": {}, + "lookoutequipment:DescribeDataIngestionJob": {}, + "lookoutequipment:ListDatasets": {}, + "lookoutequipment:ListInferenceSchedulers": {}, + "lookoutequipment:ListModels": {}, + "lookoutequipment:ListRetrainingSchedulers": {}, + "lookoutmetrics:GetSampleData": {}, + "lookoutmetrics:ListAnomalyDetectors": {}, + "lookoutvision:CreateDataset": {}, + "lookoutvision:DeleteDataset": {}, + "lookoutvision:DescribeDataset": {}, + "lookoutvision:DescribeModelPackagingJob": {}, + "lookoutvision:DescribeTrialDetection": {}, + "lookoutvision:ListDatasetEntries": {}, + "lookoutvision:ListModelPackagingJobs": {}, + "lookoutvision:ListModels": {}, + "lookoutvision:ListProjects": {}, + "lookoutvision:ListTrialDetections": {}, + "lookoutvision:StartTrialDetection": {}, + "lookoutvision:UpdateDatasetEntries": {}, + "m2:CreateApplication": {}, + "m2:CreateEnvironment": {}, + "m2:GetSignedBluinsightsUrl": {}, + "m2:ListApplications": {}, + "m2:ListEngineVersions": {}, + "m2:ListEnvironments": {}, + "m2:ListTagsForResource": {}, + "machinelearning:DescribeBatchPredictions": {}, + "machinelearning:DescribeDataSources": {}, + "machinelearning:DescribeEvaluations": {}, + "machinelearning:DescribeMLModels": {}, + "macie2:AcceptInvitation": {}, + "macie2:CreateAllowList": {}, + "macie2:CreateInvitations": {}, + "macie2:CreateSampleFindings": {}, + "macie2:DeclineInvitations": {}, + "macie2:DeleteInvitations": {}, + "macie2:DescribeBuckets": {}, + "macie2:DescribeOrganizationConfiguration": {}, + "macie2:DisableMacie": {}, + "macie2:DisableOrganizationAdminAccount": {}, + "macie2:DisassociateFromAdministratorAccount": {}, + "macie2:DisassociateFromMasterAccount": {}, + "macie2:EnableMacie": {}, + "macie2:EnableOrganizationAdminAccount": {}, + "macie2:GetAdministratorAccount": {}, + "macie2:GetAutomatedDiscoveryConfiguration": {}, + "macie2:GetBucketStatistics": {}, + "macie2:GetClassificationExportConfiguration": {}, + "macie2:GetClassificationScope": {}, + "macie2:GetFindingStatistics": {}, + "macie2:GetFindings": {}, + "macie2:GetFindingsPublicationConfiguration": {}, + "macie2:GetInvitationsCount": {}, + "macie2:GetMacieSession": {}, + "macie2:GetMasterAccount": {}, + "macie2:GetResourceProfile": {}, + "macie2:GetRevealConfiguration": {}, + "macie2:GetSensitiveDataOccurrences": {}, + "macie2:GetSensitiveDataOccurrencesAvailability": {}, + "macie2:GetSensitivityInspectionTemplate": {}, + "macie2:GetUsageStatistics": {}, + "macie2:GetUsageTotals": {}, + "macie2:ListAllowLists": {}, + "macie2:ListClassificationJobs": {}, + "macie2:ListClassificationScopes": {}, + "macie2:ListCustomDataIdentifiers": {}, + "macie2:ListFindings": {}, + "macie2:ListFindingsFilters": {}, + "macie2:ListInvitations": {}, + "macie2:ListManagedDataIdentifiers": {}, + "macie2:ListMembers": {}, + "macie2:ListOrganizationAdminAccounts": {}, + "macie2:ListResourceProfileArtifacts": {}, + "macie2:ListResourceProfileDetections": {}, + "macie2:ListSensitivityInspectionTemplates": {}, + "macie2:PutClassificationExportConfiguration": {}, + "macie2:PutFindingsPublicationConfiguration": {}, + "macie2:SearchResources": {}, + "macie2:TestCustomDataIdentifier": {}, + "macie2:UpdateAutomatedDiscoveryConfiguration": {}, + "macie2:UpdateClassificationScope": {}, + "macie2:UpdateMacieSession": {}, + "macie2:UpdateMemberSession": {}, + "macie2:UpdateOrganizationConfiguration": {}, + "macie2:UpdateResourceProfile": {}, + "macie2:UpdateResourceProfileDetections": {}, + "macie2:UpdateRevealConfiguration": {}, + "macie2:UpdateSensitivityInspectionTemplate": {}, + "managedblockchain-query:BatchGetTokenBalance": {}, + "managedblockchain-query:GetAssetContract": {}, + "managedblockchain-query:GetTokenBalance": {}, + "managedblockchain-query:GetTransaction": {}, + "managedblockchain-query:ListAssetContracts": {}, + "managedblockchain-query:ListTokenBalances": {}, + "managedblockchain-query:ListTransactionEvents": {}, + "managedblockchain-query:ListTransactions": {}, + "managedblockchain:CreateAccessor": {}, + "managedblockchain:CreateNetwork": {}, + "managedblockchain:GET": {}, + "managedblockchain:Invoke": {}, + "managedblockchain:InvokeRpcBitcoinMainnet": {}, + "managedblockchain:InvokeRpcBitcoinTestnet": {}, + "managedblockchain:InvokeRpcPolygonMainnet": {}, + "managedblockchain:InvokeRpcPolygonMumbaiTestnet": {}, + "managedblockchain:ListAccessors": {}, + "managedblockchain:ListInvitations": {}, + "managedblockchain:ListNetworks": {}, + "managedblockchain:POST": {}, + "mechanicalturk:AcceptQualificationRequest": {}, + "mechanicalturk:ApproveAssignment": {}, + "mechanicalturk:AssociateQualificationWithWorker": {}, + "mechanicalturk:CreateAdditionalAssignmentsForHIT": {}, + "mechanicalturk:CreateHIT": {}, + "mechanicalturk:CreateHITType": {}, + "mechanicalturk:CreateHITWithHITType": {}, + "mechanicalturk:CreateQualificationType": {}, + "mechanicalturk:CreateWorkerBlock": {}, + "mechanicalturk:DeleteHIT": {}, + "mechanicalturk:DeleteQualificationType": {}, + "mechanicalturk:DeleteWorkerBlock": {}, + "mechanicalturk:DisassociateQualificationFromWorker": {}, + "mechanicalturk:GetAccountBalance": {}, + "mechanicalturk:GetAssignment": {}, + "mechanicalturk:GetFileUploadURL": {}, + "mechanicalturk:GetHIT": {}, + "mechanicalturk:GetQualificationScore": {}, + "mechanicalturk:GetQualificationType": {}, + "mechanicalturk:ListAssignmentsForHIT": {}, + "mechanicalturk:ListBonusPayments": {}, + "mechanicalturk:ListHITs": {}, + "mechanicalturk:ListHITsForQualificationType": {}, + "mechanicalturk:ListQualificationRequests": {}, + "mechanicalturk:ListQualificationTypes": {}, + "mechanicalturk:ListReviewPolicyResultsForHIT": {}, + "mechanicalturk:ListReviewableHITs": {}, + "mechanicalturk:ListWorkerBlocks": {}, + "mechanicalturk:ListWorkersWithQualificationType": {}, + "mechanicalturk:NotifyWorkers": {}, + "mechanicalturk:RejectAssignment": {}, + "mechanicalturk:RejectQualificationRequest": {}, + "mechanicalturk:SendBonus": {}, + "mechanicalturk:SendTestEventNotification": {}, + "mechanicalturk:UpdateExpirationForHIT": {}, + "mechanicalturk:UpdateHITReviewStatus": {}, + "mechanicalturk:UpdateHITTypeOfHIT": {}, + "mechanicalturk:UpdateNotificationSettings": {}, + "mechanicalturk:UpdateQualificationType": {}, + "mediaconnect:AddFlowMediaStreams": {}, + "mediaconnect:AddFlowOutputs": {}, + "mediaconnect:AddFlowSources": {}, + "mediaconnect:AddFlowVpcInterfaces": {}, + "mediaconnect:CreateFlow": {}, + "mediaconnect:DeleteFlow": {}, + "mediaconnect:DescribeFlow": {}, + "mediaconnect:DescribeOffering": {}, + "mediaconnect:DescribeReservation": {}, + "mediaconnect:DiscoverGatewayPollEndpoint": {}, + "mediaconnect:GrantFlowEntitlements": {}, + "mediaconnect:ListEntitlements": {}, + "mediaconnect:ListFlows": {}, + "mediaconnect:ListGateways": {}, + "mediaconnect:ListOfferings": {}, + "mediaconnect:ListReservations": {}, + "mediaconnect:ListTagsForResource": {}, + "mediaconnect:PollGateway": {}, + "mediaconnect:PurchaseOffering": {}, + "mediaconnect:RemoveFlowMediaStream": {}, + "mediaconnect:RemoveFlowOutput": {}, + "mediaconnect:RemoveFlowSource": {}, + "mediaconnect:RemoveFlowVpcInterface": {}, + "mediaconnect:RevokeFlowEntitlement": {}, + "mediaconnect:StartFlow": {}, + "mediaconnect:StopFlow": {}, + "mediaconnect:SubmitGatewayStateChange": {}, + "mediaconnect:TagResource": {}, + "mediaconnect:UntagResource": {}, + "mediaconnect:UpdateFlow": {}, + "mediaconnect:UpdateFlowEntitlement": {}, + "mediaconnect:UpdateFlowMediaStream": {}, + "mediaconnect:UpdateFlowOutput": {}, + "mediaconnect:UpdateFlowSource": {}, + "mediaconvert:AssociateCertificate": {}, + "mediaconvert:CreatePreset": {}, + "mediaconvert:CreateQueue": {}, + "mediaconvert:DeletePolicy": {}, + "mediaconvert:DescribeEndpoints": {}, + "mediaconvert:DisassociateCertificate": {}, + "mediaconvert:GetPolicy": {}, + "mediaconvert:ListJobTemplates": {}, + "mediaconvert:ListPresets": {}, + "mediaconvert:ListQueues": {}, + "mediaconvert:PutPolicy": {}, + "mediaimport:CreateDatabaseBinarySnapshot": {}, + "medialive:BatchDelete": {}, + "medialive:BatchStart": {}, + "medialive:BatchStop": {}, + "medialive:DescribeAccountConfiguration": {}, + "medialive:ListChannels": {}, + "medialive:ListInputDeviceTransfers": {}, + "medialive:ListInputDevices": {}, + "medialive:ListInputSecurityGroups": {}, + "medialive:ListInputs": {}, + "medialive:ListMultiplexPrograms": {}, + "medialive:ListMultiplexes": {}, + "medialive:ListOfferings": {}, + "medialive:ListReservations": {}, + "medialive:UpdateAccountConfiguration": {}, + "mediapackage-vod:CreateAsset": {}, + "mediapackage-vod:CreatePackagingConfiguration": {}, + "mediapackage-vod:CreatePackagingGroup": {}, + "mediapackage-vod:ListAssets": {}, + "mediapackage-vod:ListPackagingConfigurations": {}, + "mediapackage-vod:ListPackagingGroups": {}, + "mediapackage:CreateChannel": {}, + "mediapackage:CreateHarvestJob": {}, + "mediapackage:CreateOriginEndpoint": {}, + "mediapackage:ListChannels": {}, + "mediapackage:ListHarvestJobs": {}, + "mediapackage:ListOriginEndpoints": {}, + "mediapackagev2:ListChannelGroups": {}, + "mediastore:CreateContainer": {}, + "mediastore:ListContainers": {}, + "mediatailor:CreateChannel": {}, + "mediatailor:CreateLiveSource": {}, + "mediatailor:CreateProgram": {}, + "mediatailor:CreateSourceLocation": {}, + "mediatailor:CreateVodSource": {}, + "mediatailor:ListAlerts": {}, + "mediatailor:ListChannels": {}, + "mediatailor:ListLiveSources": {}, + "mediatailor:ListPlaybackConfigurations": {}, + "mediatailor:ListSourceLocations": {}, + "mediatailor:ListVodSources": {}, + "mediatailor:PutPlaybackConfiguration": {}, + "medical-imaging:CreateDatastore": {}, + "medical-imaging:ListDatastores": {}, + "memorydb:CreateParameterGroup": {}, + "memorydb:CreateSubnetGroup": {}, + "memorydb:CreateUser": {}, + "memorydb:DescribeEngineVersions": {}, + "memorydb:DescribeEvents": {}, + "memorydb:DescribeReservedNodesOfferings": {}, + "memorydb:DescribeServiceUpdates": {}, + "mgh:CreateHomeRegionControl": {}, + "mgh:DeleteHomeRegionControl": {}, + "mgh:DescribeApplicationState": {}, + "mgh:DescribeHomeRegionControls": {}, + "mgh:GetHomeRegion": {}, + "mgh:ListApplicationStates": {}, + "mgh:ListMigrationTasks": {}, + "mgh:ListProgressUpdateStreams": {}, + "mgh:NotifyApplicationState": {}, + "mgn:BatchDeleteSnapshotRequestForMgn": {}, + "mgn:CreateApplication": {}, + "mgn:CreateConnector": {}, + "mgn:CreateLaunchConfigurationTemplate": {}, + "mgn:CreateReplicationConfigurationTemplate": {}, + "mgn:CreateVcenterClientForMgn": {}, + "mgn:CreateWave": {}, + "mgn:DescribeJobs": {}, + "mgn:DescribeLaunchConfigurationTemplates": {}, + "mgn:DescribeReplicationConfigurationTemplates": {}, + "mgn:DescribeReplicationServerAssociationsForMgn": {}, + "mgn:DescribeSnapshotRequestsForMgn": {}, + "mgn:DescribeSourceServers": {}, + "mgn:DescribeVcenterClients": {}, + "mgn:GetAgentInstallationAssetsForMgn": {}, + "mgn:GetChannelCommandsForMgn": {}, + "mgn:InitializeService": {}, + "mgn:ListApplications": {}, + "mgn:ListConnectors": {}, + "mgn:ListExports": {}, + "mgn:ListImports": {}, + "mgn:ListManagedAccounts": {}, + "mgn:ListTagsForResource": {}, + "mgn:ListWaves": {}, + "mgn:RegisterAgentForMgn": {}, + "mgn:SendChannelCommandResultForMgn": {}, + "mgn:SendClientLogsForMgn": {}, + "mgn:SendClientMetricsForMgn": {}, + "mgn:StartExport": {}, + "mgn:StartImport": {}, + "mgn:VerifyClientRoleForMgn": {}, + "migrationhub-orchestrator:CreateWorkflow": {}, + "migrationhub-orchestrator:GetMessage": {}, + "migrationhub-orchestrator:GetTemplate": {}, + "migrationhub-orchestrator:GetTemplateStep": {}, + "migrationhub-orchestrator:GetTemplateStepGroup": {}, + "migrationhub-orchestrator:ListPlugins": {}, + "migrationhub-orchestrator:ListTemplateStepGroups": {}, + "migrationhub-orchestrator:ListTemplateSteps": {}, + "migrationhub-orchestrator:ListTemplates": {}, + "migrationhub-orchestrator:ListWorkflows": {}, + "migrationhub-orchestrator:RegisterPlugin": {}, + "migrationhub-orchestrator:SendMessage": {}, + "migrationhub-strategy:GetAntiPattern": {}, + "migrationhub-strategy:GetApplicationComponentDetails": {}, + "migrationhub-strategy:GetApplicationComponentStrategies": {}, + "migrationhub-strategy:GetAssessment": {}, + "migrationhub-strategy:GetImportFileTask": {}, + "migrationhub-strategy:GetLatestAssessmentId": {}, + "migrationhub-strategy:GetMessage": {}, + "migrationhub-strategy:GetPortfolioPreferences": {}, + "migrationhub-strategy:GetPortfolioSummary": {}, + "migrationhub-strategy:GetRecommendationReportDetails": {}, + "migrationhub-strategy:GetServerDetails": {}, + "migrationhub-strategy:GetServerStrategies": {}, + "migrationhub-strategy:ListAnalyzableServers": {}, + "migrationhub-strategy:ListAntiPatterns": {}, + "migrationhub-strategy:ListApplicationComponents": {}, + "migrationhub-strategy:ListCollectors": {}, + "migrationhub-strategy:ListImportFileTask": {}, + "migrationhub-strategy:ListJarArtifacts": {}, + "migrationhub-strategy:ListServers": {}, + "migrationhub-strategy:PutPortfolioPreferences": {}, + "migrationhub-strategy:RegisterCollector": {}, + "migrationhub-strategy:SendMessage": {}, + "migrationhub-strategy:StartAssessment": {}, + "migrationhub-strategy:StartImportFileTask": {}, + "migrationhub-strategy:StartRecommendationReportGeneration": {}, + "migrationhub-strategy:StopAssessment": {}, + "migrationhub-strategy:UpdateApplicationComponentConfig": {}, + "migrationhub-strategy:UpdateCollectorConfiguration": {}, + "migrationhub-strategy:UpdateServerConfig": {}, + "mobileanalytics:PutEvents": {}, + "monitron:CreateProject": {}, + "monitron:ListProjects": {}, + "mq:CreateBroker": {}, + "mq:CreateConfiguration": {}, + "mq:DescribeBrokerEngineTypes": {}, + "mq:DescribeBrokerInstanceOptions": {}, + "mq:ListBrokers": {}, + "mq:ListConfigurations": {}, + "network-firewall:ListRuleGroups": {}, + "networkmanager-chat:CancelMessageResponse": {}, + "networkmanager-chat:CreateConversation": {}, + "networkmanager-chat:DeleteConversation": {}, + "networkmanager-chat:ListConversationMessages": {}, + "networkmanager-chat:ListConversations": {}, + "networkmanager-chat:NotifyConversationIsActive": {}, + "networkmanager-chat:SendConversationMessage": {}, + "networkmanager:CreateGlobalNetwork": {}, + "networkmanager:ListCoreNetworks": {}, + "networkmanager:ListOrganizationServiceAccessStatus": {}, + "networkmanager:ListPeerings": {}, + "networkmanager:StartOrganizationServiceAccessUpdate": {}, + "nimble:GetFeatureMap": {}, + "nimble:ListStudios": {}, + "notifications-contacts:CreateEmailContact": {}, + "notifications-contacts:ListEmailContacts": {}, + "notifications-contacts:ListTagsForResource": {}, + "notifications:CreateEventRule": {}, + "notifications:CreateNotificationConfiguration": {}, + "notifications:DeregisterNotificationHub": {}, + "notifications:ListChannels": {}, + "notifications:ListEventRules": {}, + "notifications:ListNotificationConfigurations": {}, + "notifications:ListNotificationEvents": {}, + "notifications:ListNotificationHubs": {}, + "notifications:ListTagsForResource": {}, + "notifications:RegisterNotificationHub": {}, + "oam:CreateSink": {}, + "oam:ListLinks": {}, + "oam:ListSinks": {}, + "omics:AcceptShare": {}, + "omics:CreateAnnotationStore": {}, + "omics:CreateReferenceStore": {}, + "omics:CreateRunGroup": {}, + "omics:CreateSequenceStore": {}, + "omics:CreateShare": {}, + "omics:CreateVariantStore": {}, + "omics:CreateWorkflow": {}, + "omics:DeleteShare": {}, + "omics:GetShare": {}, + "omics:ListAnnotationImportJobs": {}, + "omics:ListAnnotationStores": {}, + "omics:ListReferenceStores": {}, + "omics:ListRunGroups": {}, + "omics:ListRuns": {}, + "omics:ListSequenceStores": {}, + "omics:ListShares": {}, + "omics:ListTagsForResource": {}, + "omics:ListVariantImportJobs": {}, + "omics:ListVariantStores": {}, + "omics:ListWorkflows": {}, + "omics:StartAnnotationImportJob": {}, + "omics:StartRun": {}, + "omics:StartVariantImportJob": {}, + "one:CreateDeviceConfigurationTemplate": {}, + "one:CreateDeviceInstance": {}, + "one:CreateSite": {}, + "one:ListDeviceConfigurationTemplates": {}, + "one:ListDeviceInstances": {}, + "one:ListSites": {}, + "one:ListUsers": {}, + "opsworks-cm:AssociateNode": {}, + "opsworks-cm:CreateBackup": {}, + "opsworks-cm:CreateServer": {}, + "opsworks-cm:DeleteBackup": {}, + "opsworks-cm:DeleteServer": {}, + "opsworks-cm:DescribeAccountAttributes": {}, + "opsworks-cm:DescribeBackups": {}, + "opsworks-cm:DescribeEvents": {}, + "opsworks-cm:DescribeNodeAssociationStatus": {}, + "opsworks-cm:DescribeServers": {}, + "opsworks-cm:DisassociateNode": {}, + "opsworks-cm:ExportServerEngineAttribute": {}, + "opsworks-cm:ListTagsForResource": {}, + "opsworks-cm:RestoreServer": {}, + "opsworks-cm:StartMaintenance": {}, + "opsworks-cm:TagResource": {}, + "opsworks-cm:UntagResource": {}, + "opsworks-cm:UpdateServer": {}, + "opsworks-cm:UpdateServerEngineAttributes": {}, + "opsworks:CreateStack": {}, + "opsworks:CreateUserProfile": {}, + "opsworks:DeleteUserProfile": {}, + "opsworks:DescribeMyUserProfile": {}, + "opsworks:DescribeOperatingSystems": {}, + "opsworks:DescribeUserProfiles": {}, + "opsworks:UpdateMyUserProfile": {}, + "opsworks:UpdateUserProfile": {}, + "organizations:CreateAccount": {}, + "organizations:CreateGovCloudAccount": {}, + "organizations:CreateOrganization": {}, + "organizations:CreatePolicy": {}, + "organizations:DeleteOrganization": {}, + "organizations:DeleteResourcePolicy": {}, + "organizations:DescribeCreateAccountStatus": {}, + "organizations:DescribeOrganization": {}, + "organizations:DescribeResourcePolicy": {}, + "organizations:DisableAWSServiceAccess": {}, + "organizations:EnableAWSServiceAccess": {}, + "organizations:EnableAllFeatures": {}, + "organizations:LeaveOrganization": {}, + "organizations:ListAWSServiceAccessForOrganization": {}, + "organizations:ListAccounts": {}, + "organizations:ListCreateAccountStatus": {}, + "organizations:ListDelegatedAdministrators": {}, + "organizations:ListHandshakesForAccount": {}, + "organizations:ListHandshakesForOrganization": {}, + "organizations:ListPolicies": {}, + "organizations:ListRoots": {}, + "osis:CreatePipeline": {}, + "osis:ListPipelineBlueprints": {}, + "osis:ListPipelines": {}, + "osis:ValidatePipeline": {}, + "outposts:CancelOrder": {}, + "outposts:CreatePrivateConnectivityConfig": {}, + "outposts:CreateSite": {}, + "outposts:GetCatalogItem": {}, + "outposts:GetConnection": {}, + "outposts:GetOrder": {}, + "outposts:GetPrivateConnectivityConfig": {}, + "outposts:ListAssets": {}, + "outposts:ListCatalogItems": {}, + "outposts:ListOrders": {}, + "outposts:ListOutposts": {}, + "outposts:ListSites": {}, + "outposts:ListTagsForResource": {}, + "outposts:StartConnection": {}, + "panorama:CreateApplicationInstance": {}, + "panorama:CreateJobForDevices": {}, + "panorama:CreateNodeFromTemplateJob": {}, + "panorama:CreatePackage": {}, + "panorama:CreatePackageImportJob": {}, + "panorama:DescribeDeviceJob": {}, + "panorama:DescribeNode": {}, + "panorama:DescribeNodeFromTemplateJob": {}, + "panorama:DescribePackageImportJob": {}, + "panorama:DescribeSoftware": {}, + "panorama:GetWebSocketURL": {}, + "panorama:ListDevices": {}, + "panorama:ListNodeFromTemplateJobs": {}, + "panorama:ListNodes": {}, + "panorama:ListPackageImportJobs": {}, + "panorama:ListPackages": {}, + "panorama:ProvisionDevice": {}, + "partnercentral-account-management:AssociatePartnerAccount": {}, + "partnercentral-account-management:AssociatePartnerUser": {}, + "partnercentral-account-management:DisassociatePartnerUser": {}, + "payment-cryptography:CreateKey": {}, + "payment-cryptography:DecryptData": {}, + "payment-cryptography:EncryptData": {}, + "payment-cryptography:GenerateCardValidationData": {}, + "payment-cryptography:GenerateMac": {}, + "payment-cryptography:GeneratePinData": {}, + "payment-cryptography:GetParametersForExport": {}, + "payment-cryptography:GetParametersForImport": {}, + "payment-cryptography:ImportKey": {}, + "payment-cryptography:ReEncryptData": {}, + "payment-cryptography:TranslatePinData": {}, + "payment-cryptography:VerifyAuthRequestCryptogram": {}, + "payment-cryptography:VerifyCardValidationData": {}, + "payment-cryptography:VerifyMac": {}, + "payment-cryptography:VerifyPinData": {}, + "payments:CreatePaymentInstrument": {}, + "payments:DeletePaymentInstrument": {}, + "payments:GetPaymentInstrument": {}, + "payments:GetPaymentStatus": {}, + "payments:ListPaymentPreferences": {}, + "payments:MakePayment": {}, + "payments:UpdatePaymentPreferences": {}, + "pca-connector-ad:CreateConnector": {}, + "pca-connector-ad:CreateDirectoryRegistration": {}, + "pca-connector-ad:ListConnectors": {}, + "pca-connector-ad:ListDirectoryRegistrations": {}, + "pca-connector-ad:ListTagsForResource": {}, + "personalize:ListBatchInferenceJobs": {}, + "personalize:ListBatchSegmentJobs": {}, + "personalize:ListCampaigns": {}, + "personalize:ListDataInsightsJobs": {}, + "personalize:ListDatasetExportJobs": {}, + "personalize:ListDatasetGroups": {}, + "personalize:ListDatasetImportJobs": {}, + "personalize:ListDatasets": {}, + "personalize:ListEventTrackers": {}, + "personalize:ListFilters": {}, + "personalize:ListMetricAttributionMetrics": {}, + "personalize:ListMetricAttributions": {}, + "personalize:ListRecipes": {}, + "personalize:ListRecommenders": {}, + "personalize:ListSchemas": {}, + "personalize:ListSolutionVersions": {}, + "personalize:ListSolutions": {}, + "personalize:ListTagsForResource": {}, + "personalize:PutActionInteractions": {}, + "personalize:PutEvents": {}, + "personalize:TagResource": {}, + "personalize:UntagResource": {}, + "pipes:ListPipes": {}, + "polly:DescribeVoices": {}, + "polly:GetSpeechSynthesisTask": {}, + "polly:ListLexicons": {}, + "polly:ListSpeechSynthesisTasks": {}, + "pricing:DescribeServices": {}, + "pricing:GetAttributeValues": {}, + "pricing:GetPriceListFileUrl": {}, + "pricing:GetProducts": {}, + "pricing:ListPriceLists": {}, + "private-networks:ListNetworks": {}, + "private-networks:ListTagsForResource": {}, + "private-networks:Ping": {}, + "profile:GetProfileObjectTypeTemplate": {}, + "profile:ListAccountIntegrations": {}, + "profile:ListDomains": {}, + "profile:ListProfileObjectTypeTemplates": {}, + "proton:CreateEnvironmentAccountConnection": {}, + "proton:CreateServiceSyncConfig": {}, + "proton:CreateTemplateSyncConfig": {}, + "proton:DeleteAccountRoles": {}, + "proton:DeleteServiceSyncConfig": {}, + "proton:DeleteTemplateSyncConfig": {}, + "proton:GetAccountRoles": {}, + "proton:GetAccountSettings": {}, + "proton:GetRepositorySyncStatus": {}, + "proton:GetResourceTemplateVersionStatusCounts": {}, + "proton:GetResourcesSummary": {}, + "proton:GetServiceInstanceSyncStatus": {}, + "proton:GetServiceSyncBlockerSummary": {}, + "proton:GetServiceSyncConfig": {}, + "proton:GetTemplateSyncConfig": {}, + "proton:GetTemplateSyncStatus": {}, + "proton:ListDeployments": {}, + "proton:ListEnvironmentAccountConnections": {}, + "proton:ListEnvironmentTemplates": {}, + "proton:ListEnvironments": {}, + "proton:ListRepositories": {}, + "proton:ListRepositorySyncDefinitions": {}, + "proton:ListServiceInstances": {}, + "proton:ListServiceTemplates": {}, + "proton:ListServices": {}, + "proton:UpdateAccountRoles": {}, + "proton:UpdateAccountSettings": {}, + "proton:UpdateServiceSyncBlocker": {}, + "proton:UpdateServiceSyncConfig": {}, + "proton:UpdateTemplateSyncConfig": {}, + "purchase-orders:GetConsoleActionSetEnforced": {}, + "purchase-orders:ListPurchaseOrders": {}, + "purchase-orders:UpdateConsoleActionSetEnforced": {}, + "q:GetConversation": {}, + "q:GetTroubleshootingResults": {}, + "q:SendMessage": {}, + "q:StartConversation": {}, + "q:StartTroubleshootingAnalysis": {}, + "q:StartTroubleshootingResolutionExplanation": {}, + "qbusiness:AddUserLicenses": {}, + "qbusiness:CreateApplication": {}, + "qbusiness:CreateLicense": {}, + "qbusiness:ListApplications": {}, + "qbusiness:ListUserLicenses": {}, + "qbusiness:RemoveUserLicenses": {}, + "qldb:ListJournalS3Exports": {}, + "qldb:ListLedgers": {}, + "quicksight:AccountConfigurations": {}, + "quicksight:CreateAccountCustomization": {}, + "quicksight:CreateAccountSubscription": {}, + "quicksight:CreateCustomPermissions": {}, + "quicksight:CreateDataSource": {}, + "quicksight:CreateRoleMembership": {}, + "quicksight:CreateVPCConnection": {}, + "quicksight:DeleteCustomPermissions": {}, + "quicksight:DeleteIdentityPropagationConfig": {}, + "quicksight:DeleteRoleCustomPermission": {}, + "quicksight:DeleteRoleMembership": {}, + "quicksight:DescribeAccountSettings": {}, + "quicksight:DescribeCustomPermissions": {}, + "quicksight:DescribeIpRestriction": {}, + "quicksight:DescribeRoleCustomPermission": {}, + "quicksight:GetAnonymousUserEmbedUrl": {}, + "quicksight:GetGroupMapping": {}, + "quicksight:GetSessionEmbedUrl": {}, + "quicksight:ListCustomPermissions": {}, + "quicksight:ListCustomerManagedKeys": {}, + "quicksight:ListDataSets": {}, + "quicksight:ListDataSources": {}, + "quicksight:ListIdentityPropagationConfigs": {}, + "quicksight:ListIngestions": {}, + "quicksight:ListKMSKeysForUser": {}, + "quicksight:ListNamespaces": {}, + "quicksight:ListRefreshSchedules": {}, + "quicksight:ListRoleMemberships": {}, + "quicksight:ListTopicRefreshSchedules": {}, + "quicksight:ListTopics": {}, + "quicksight:ListVPCConnections": {}, + "quicksight:RegisterCustomerManagedKey": {}, + "quicksight:RemoveCustomerManagedKey": {}, + "quicksight:ScopeDownPolicy": {}, + "quicksight:SearchDirectoryGroups": {}, + "quicksight:SetGroupMapping": {}, + "quicksight:Subscribe": {}, + "quicksight:Unsubscribe": {}, + "quicksight:UpdateAccountSettings": {}, + "quicksight:UpdateCustomPermissions": {}, + "quicksight:UpdateIdentityPropagationConfig": {}, + "quicksight:UpdateIpRestriction": {}, + "quicksight:UpdatePublicSharingSettings": {}, + "quicksight:UpdateResourcePermissions": {}, + "quicksight:UpdateRoleCustomPermission": {}, + "ram:CreatePermission": {}, + "ram:CreateResourceShare": {}, + "ram:EnableSharingWithAwsOrganization": {}, + "ram:GetResourcePolicies": {}, + "ram:GetResourceShareAssociations": {}, + "ram:GetResourceShareInvitations": {}, + "ram:GetResourceShares": {}, + "ram:ListPermissionVersions": {}, + "ram:ListPermissions": {}, + "ram:ListPrincipals": {}, + "ram:ListReplacePermissionAssociationsWork": {}, + "ram:ListResourceTypes": {}, + "ram:ListResources": {}, + "rbin:ListRules": {}, + "rds:CancelExportTask": {}, + "rds:CreateDBProxy": {}, + "rds:CrossRegionCommunication": {}, + "rds:DescribeAccountAttributes": {}, + "rds:DescribeCertificates": {}, + "rds:DescribeDBEngineVersions": {}, + "rds:DescribeEngineDefaultClusterParameters": {}, + "rds:DescribeEngineDefaultParameters": {}, + "rds:DescribeEventCategories": {}, + "rds:DescribeEvents": {}, + "rds:DescribeExportTasks": {}, + "rds:DescribeOrderableDBInstanceOptions": {}, + "rds:DescribeRecommendationGroups": {}, + "rds:DescribeRecommendations": {}, + "rds:DescribeReservedDBInstancesOfferings": {}, + "rds:DescribeSourceRegions": {}, + "rds:ModifyCertificates": {}, + "rds:ModifyRecommendation": {}, + "rds:StartExportTask": {}, + "redshift-data:CancelStatement": {}, + "redshift-data:DescribeStatement": {}, + "redshift-data:GetStatementResult": {}, + "redshift-data:ListStatements": {}, + "redshift-serverless:CreateUsageLimit": {}, + "redshift-serverless:DeleteResourcePolicy": {}, + "redshift-serverless:DeleteScheduledAction": {}, + "redshift-serverless:DeleteSnapshotCopyConfiguration": {}, + "redshift-serverless:DeleteUsageLimit": {}, + "redshift-serverless:GetResourcePolicy": {}, + "redshift-serverless:GetScheduledAction": {}, + "redshift-serverless:GetTableRestoreStatus": {}, + "redshift-serverless:GetUsageLimit": {}, + "redshift-serverless:ListCustomDomainAssociations": {}, + "redshift-serverless:ListNamespaces": {}, + "redshift-serverless:ListScheduledActions": {}, + "redshift-serverless:ListTableRestoreStatus": {}, + "redshift-serverless:ListUsageLimits": {}, + "redshift-serverless:ListWorkgroups": {}, + "redshift-serverless:PutResourcePolicy": {}, + "redshift-serverless:UpdateScheduledAction": {}, + "redshift-serverless:UpdateSnapshotCopyConfiguration": {}, + "redshift-serverless:UpdateUsageLimit": {}, + "redshift-serverless:span": {}, + "redshift:AcceptReservedNodeExchange": {}, + "redshift:AddPartner": {}, + "redshift:AuthorizeEndpointAccess": {}, + "redshift:CancelQuery": {}, + "redshift:CancelQuerySession": {}, + "redshift:CreateAuthenticationProfile": {}, + "redshift:CreateEndpointAccess": {}, + "redshift:CreateRedshiftIdcApplication": {}, + "redshift:CreateSavedQuery": {}, + "redshift:CreateScheduledAction": {}, + "redshift:DeleteAuthenticationProfile": {}, + "redshift:DeleteEndpointAccess": {}, + "redshift:DeletePartner": {}, + "redshift:DeleteSavedQueries": {}, + "redshift:DeleteScheduledAction": {}, + "redshift:DescribeAccountAttributes": {}, + "redshift:DescribeAuthenticationProfiles": {}, + "redshift:DescribeClusterDbRevisions": {}, + "redshift:DescribeClusterParameterGroups": {}, + "redshift:DescribeClusterSecurityGroups": {}, + "redshift:DescribeClusterSnapshots": {}, + "redshift:DescribeClusterSubnetGroups": {}, + "redshift:DescribeClusterTracks": {}, + "redshift:DescribeClusterVersions": {}, + "redshift:DescribeClusters": {}, + "redshift:DescribeCustomDomainAssociations": {}, + "redshift:DescribeDataShares": {}, + "redshift:DescribeDataSharesForConsumer": {}, + "redshift:DescribeDataSharesForProducer": {}, + "redshift:DescribeDefaultClusterParameters": {}, + "redshift:DescribeEndpointAccess": {}, + "redshift:DescribeEndpointAuthorization": {}, + "redshift:DescribeEventCategories": {}, + "redshift:DescribeEventSubscriptions": {}, + "redshift:DescribeEvents": {}, + "redshift:DescribeHsmClientCertificates": {}, + "redshift:DescribeHsmConfigurations": {}, + "redshift:DescribeInboundIntegrations": {}, + "redshift:DescribeNodeConfigurationOptions": {}, + "redshift:DescribeOrderableClusterOptions": {}, + "redshift:DescribePartners": {}, + "redshift:DescribeQuery": {}, + "redshift:DescribeReservedNodeExchangeStatus": {}, + "redshift:DescribeReservedNodeOfferings": {}, + "redshift:DescribeReservedNodes": {}, + "redshift:DescribeSavedQueries": {}, + "redshift:DescribeScheduledActions": {}, + "redshift:DescribeSnapshotCopyGrants": {}, + "redshift:DescribeStorage": {}, + "redshift:DescribeTable": {}, + "redshift:DescribeTableRestoreStatus": {}, + "redshift:ExecuteQuery": {}, + "redshift:FetchResults": {}, + "redshift:GetReservedNodeExchangeConfigurationOptions": {}, + "redshift:GetReservedNodeExchangeOfferings": {}, + "redshift:ListDatabases": {}, + "redshift:ListSavedQueries": {}, + "redshift:ListSchemas": {}, + "redshift:ListTables": {}, + "redshift:ModifyAuthenticationProfile": {}, + "redshift:ModifyClusterMaintenance": {}, + "redshift:ModifyEndpointAccess": {}, + "redshift:ModifySavedQuery": {}, + "redshift:ModifyScheduledAction": {}, + "redshift:PurchaseReservedNodeOffering": {}, + "redshift:RevokeEndpointAccess": {}, + "redshift:UpdatePartnerStatus": {}, + "redshift:ViewQueriesFromConsole": {}, + "redshift:ViewQueriesInConsole": {}, + "refactor-spaces:CreateApplication": {}, + "refactor-spaces:CreateEnvironment": {}, + "refactor-spaces:CreateRoute": {}, + "refactor-spaces:CreateService": {}, + "refactor-spaces:DeleteResourcePolicy": {}, + "refactor-spaces:GetResourcePolicy": {}, + "refactor-spaces:ListEnvironments": {}, + "refactor-spaces:ListTagsForResource": {}, + "refactor-spaces:PutResourcePolicy": {}, + "rekognition:CompareFaces": {}, + "rekognition:CreateFaceLivenessSession": {}, + "rekognition:DescribeProjects": {}, + "rekognition:DetectFaces": {}, + "rekognition:DetectLabels": {}, + "rekognition:DetectProtectiveEquipment": {}, + "rekognition:DetectText": {}, + "rekognition:GetCelebrityInfo": {}, + "rekognition:GetCelebrityRecognition": {}, + "rekognition:GetContentModeration": {}, + "rekognition:GetFaceDetection": {}, + "rekognition:GetFaceLivenessSessionResults": {}, + "rekognition:GetFaceSearch": {}, + "rekognition:GetLabelDetection": {}, + "rekognition:GetMediaAnalysisJob": {}, + "rekognition:GetPersonTracking": {}, + "rekognition:GetSegmentDetection": {}, + "rekognition:GetTextDetection": {}, + "rekognition:ListCollections": {}, + "rekognition:ListMediaAnalysisJobs": {}, + "rekognition:RecognizeCelebrities": {}, + "rekognition:StartCelebrityRecognition": {}, + "rekognition:StartContentModeration": {}, + "rekognition:StartFaceDetection": {}, + "rekognition:StartFaceLivenessSession": {}, + "rekognition:StartLabelDetection": {}, + "rekognition:StartPersonTracking": {}, + "rekognition:StartSegmentDetection": {}, + "rekognition:StartTextDetection": {}, + "repostspace:CreateSpace": {}, + "repostspace:ListSpaces": {}, + "resiliencehub:CreateApp": {}, + "resiliencehub:CreateResiliencyPolicy": {}, + "resiliencehub:ListAppAssessments": {}, + "resiliencehub:ListApps": {}, + "resiliencehub:ListResiliencyPolicies": {}, + "resiliencehub:ListSuggestedResiliencyPolicies": {}, + "resiliencehub:ListTagsForResource": {}, + "resource-explorer-2:BatchGetView": {}, + "resource-explorer-2:CreateIndex": {}, + "resource-explorer-2:CreateView": {}, + "resource-explorer-2:DisassociateDefaultView": {}, + "resource-explorer-2:GetAccountLevelServiceConfiguration": {}, + "resource-explorer-2:GetDefaultView": {}, + "resource-explorer-2:GetIndex": {}, + "resource-explorer-2:ListIndexes": {}, + "resource-explorer-2:ListIndexesForMembers": {}, + "resource-explorer-2:ListSupportedResourceTypes": {}, + "resource-explorer-2:ListViews": {}, + "resource-groups:CreateGroup": {}, + "resource-groups:GetAccountSettings": {}, + "resource-groups:ListGroups": {}, + "resource-groups:SearchResources": {}, + "resource-groups:UpdateAccountSettings": {}, + "rhelkb:GetRhelURL": {}, + "robomaker:BatchDeleteWorlds": {}, + "robomaker:BatchDescribeSimulationJob": {}, + "robomaker:CreateDeploymentJob": {}, + "robomaker:CreateFleet": {}, + "robomaker:CreateRobot": {}, + "robomaker:CreateRobotApplication": {}, + "robomaker:CreateSimulationApplication": {}, + "robomaker:CreateSimulationJob": {}, + "robomaker:CreateWorldTemplate": {}, + "robomaker:ListDeploymentJobs": {}, + "robomaker:ListFleets": {}, + "robomaker:ListRobotApplications": {}, + "robomaker:ListRobots": {}, + "robomaker:ListSimulationApplications": {}, + "robomaker:ListSimulationJobBatches": {}, + "robomaker:ListSimulationJobs": {}, + "robomaker:ListWorldExportJobs": {}, + "robomaker:ListWorldGenerationJobs": {}, + "robomaker:ListWorldTemplates": {}, + "robomaker:ListWorlds": {}, + "robomaker:StartSimulationJobBatch": {}, + "rolesanywhere:CreateProfile": {}, + "rolesanywhere:CreateTrustAnchor": {}, + "rolesanywhere:ImportCrl": {}, + "rolesanywhere:ListCrls": {}, + "rolesanywhere:ListProfiles": {}, + "rolesanywhere:ListSubjects": {}, + "rolesanywhere:ListTagsForResource": {}, + "rolesanywhere:ListTrustAnchors": {}, + "route53-recovery-cluster:ListRoutingControls": {}, + "route53-recovery-control-config:ListAssociatedRoute53HealthChecks": {}, + "route53-recovery-control-config:ListClusters": {}, + "route53-recovery-control-config:ListControlPanels": {}, + "route53-recovery-control-config:ListRoutingControls": {}, + "route53-recovery-control-config:ListTagsForResource": {}, + "route53-recovery-readiness:CreateCrossAccountAuthorization": {}, + "route53-recovery-readiness:DeleteCrossAccountAuthorization": {}, + "route53-recovery-readiness:ListCells": {}, + "route53-recovery-readiness:ListCrossAccountAuthorizations": {}, + "route53-recovery-readiness:ListReadinessChecks": {}, + "route53-recovery-readiness:ListRecoveryGroups": {}, + "route53-recovery-readiness:ListResourceSets": {}, + "route53-recovery-readiness:ListRules": {}, + "route53-recovery-readiness:ListTagsForResources": {}, + "route53:CreateCidrCollection": {}, + "route53:CreateHealthCheck": {}, + "route53:CreateHostedZone": {}, + "route53:CreateReusableDelegationSet": {}, + "route53:CreateTrafficPolicy": {}, + "route53:GetAccountLimit": {}, + "route53:GetCheckerIpRanges": {}, + "route53:GetGeoLocation": {}, + "route53:GetHealthCheckCount": {}, + "route53:GetHostedZoneCount": {}, + "route53:GetTrafficPolicyInstanceCount": {}, + "route53:ListCidrCollections": {}, + "route53:ListGeoLocations": {}, + "route53:ListHealthChecks": {}, + "route53:ListHostedZones": {}, + "route53:ListHostedZonesByName": {}, + "route53:ListHostedZonesByVPC": {}, + "route53:ListReusableDelegationSets": {}, + "route53:ListTrafficPolicies": {}, + "route53:ListTrafficPolicyInstances": {}, + "route53:TestDNSAnswer": {}, + "route53domains:AcceptDomainTransferFromAnotherAwsAccount": {}, + "route53domains:AssociateDelegationSignerToDomain": {}, + "route53domains:CancelDomainTransferToAnotherAwsAccount": {}, + "route53domains:CheckDomainAvailability": {}, + "route53domains:CheckDomainTransferability": {}, + "route53domains:DeleteDomain": {}, + "route53domains:DeleteTagsForDomain": {}, + "route53domains:DisableDomainAutoRenew": {}, + "route53domains:DisableDomainTransferLock": {}, + "route53domains:DisassociateDelegationSignerFromDomain": {}, + "route53domains:EnableDomainAutoRenew": {}, + "route53domains:EnableDomainTransferLock": {}, + "route53domains:GetContactReachabilityStatus": {}, + "route53domains:GetDomainDetail": {}, + "route53domains:GetDomainSuggestions": {}, + "route53domains:GetOperationDetail": {}, + "route53domains:ListDomains": {}, + "route53domains:ListOperations": {}, + "route53domains:ListPrices": {}, + "route53domains:ListTagsForDomain": {}, + "route53domains:PushDomain": {}, + "route53domains:RegisterDomain": {}, + "route53domains:RejectDomainTransferFromAnotherAwsAccount": {}, + "route53domains:RenewDomain": {}, + "route53domains:ResendContactReachabilityEmail": {}, + "route53domains:ResendOperationAuthorization": {}, + "route53domains:RetrieveDomainAuthCode": {}, + "route53domains:TransferDomain": {}, + "route53domains:TransferDomainToAnotherAwsAccount": {}, + "route53domains:UpdateDomainContact": {}, + "route53domains:UpdateDomainContactPrivacy": {}, + "route53domains:UpdateDomainNameservers": {}, + "route53domains:UpdateTagsForDomain": {}, + "route53domains:ViewBilling": {}, + "route53resolver:CreateResolverQueryLogConfig": {}, + "route53resolver:GetResolverQueryLogConfigAssociation": {}, + "route53resolver:ListFirewallConfigs": {}, + "route53resolver:ListFirewallDomainLists": {}, + "route53resolver:ListFirewallRuleGroupAssociations": {}, + "route53resolver:ListFirewallRuleGroups": {}, + "route53resolver:ListOutpostResolvers": {}, + "route53resolver:ListResolverEndpoints": {}, + "route53resolver:ListResolverQueryLogConfigAssociations": {}, + "route53resolver:ListResolverQueryLogConfigs": {}, + "route53resolver:ListResolverRuleAssociations": {}, + "route53resolver:ListResolverRules": {}, + "rum:ListAppMonitors": {}, + "rum:ListTagsForResource": {}, + "s3-outposts:GetAccessPoint": {}, + "s3-outposts:ListAccessPoints": {}, + "s3-outposts:ListEndpoints": {}, + "s3-outposts:ListOutpostsWithS3": {}, + "s3-outposts:ListRegionalBuckets": {}, + "s3-outposts:ListSharedEndpoints": {}, + "s3:CreateJob": {}, + "s3:CreateStorageLensGroup": {}, + "s3:GetAccessPoint": {}, + "s3:GetAccountPublicAccessBlock": {}, + "s3:ListAccessGrantsInstances": {}, + "s3:ListAccessPoints": {}, + "s3:ListAccessPointsForObjectLambda": {}, + "s3:ListAllMyBuckets": {}, + "s3:ListJobs": {}, + "s3:ListMultiRegionAccessPoints": {}, + "s3:ListStorageLensConfigurations": {}, + "s3:ListStorageLensGroups": {}, + "s3:PutAccessPointPublicAccessBlock": {}, + "s3:PutAccountPublicAccessBlock": {}, + "s3:PutStorageLensConfiguration": {}, + "s3express:ListAllMyDirectoryBuckets": {}, + "sagemaker-geospatial:ListEarthObservationJobs": {}, + "sagemaker-geospatial:ListRasterDataCollections": {}, + "sagemaker-geospatial:ListVectorEnrichmentJobs": {}, + "sagemaker-geospatial:SearchRasterDataCollection": {}, + "sagemaker-groundtruth-synthetic:CreateProject": {}, + "sagemaker-groundtruth-synthetic:DeleteProject": {}, + "sagemaker-groundtruth-synthetic:GetAccountDetails": {}, + "sagemaker-groundtruth-synthetic:GetBatch": {}, + "sagemaker-groundtruth-synthetic:GetProject": {}, + "sagemaker-groundtruth-synthetic:ListBatchDataTransfers": {}, + "sagemaker-groundtruth-synthetic:ListBatchSummaries": {}, + "sagemaker-groundtruth-synthetic:ListProjectDataTransfers": {}, + "sagemaker-groundtruth-synthetic:ListProjectSummaries": {}, + "sagemaker-groundtruth-synthetic:StartBatchDataTransfer": {}, + "sagemaker-groundtruth-synthetic:StartProjectDataTransfer": {}, + "sagemaker-groundtruth-synthetic:UpdateBatch": {}, + "sagemaker:CreateLineageGroupPolicy": {}, + "sagemaker:DeleteLineageGroupPolicy": {}, + "sagemaker:DescribeLineageGroup": {}, + "sagemaker:DisableSagemakerServicecatalogPortfolio": {}, + "sagemaker:EnableSagemakerServicecatalogPortfolio": {}, + "sagemaker:GetLineageGroupPolicy": {}, + "sagemaker:GetSagemakerServicecatalogPortfolioStatus": {}, + "sagemaker:GetSearchSuggestions": {}, + "sagemaker:ListActions": {}, + "sagemaker:ListAlgorithms": {}, + "sagemaker:ListAppImageConfigs": {}, + "sagemaker:ListApps": {}, + "sagemaker:ListArtifacts": {}, + "sagemaker:ListAssociations": {}, + "sagemaker:ListAutoMLJobs": {}, + "sagemaker:ListCandidatesForAutoMLJob": {}, + "sagemaker:ListClusters": {}, + "sagemaker:ListCodeRepositories": {}, + "sagemaker:ListCompilationJobs": {}, + "sagemaker:ListContexts": {}, + "sagemaker:ListDataQualityJobDefinitions": {}, + "sagemaker:ListDeviceFleets": {}, + "sagemaker:ListDevices": {}, + "sagemaker:ListDomains": {}, + "sagemaker:ListEdgeDeploymentPlans": {}, + "sagemaker:ListEdgePackagingJobs": {}, + "sagemaker:ListEndpointConfigs": {}, + "sagemaker:ListEndpoints": {}, + "sagemaker:ListExperiments": {}, + "sagemaker:ListFeatureGroups": {}, + "sagemaker:ListFlowDefinitions": {}, + "sagemaker:ListHubs": {}, + "sagemaker:ListHumanLoops": {}, + "sagemaker:ListHumanTaskUis": {}, + "sagemaker:ListHyperParameterTuningJobs": {}, + "sagemaker:ListImages": {}, + "sagemaker:ListInferenceComponents": {}, + "sagemaker:ListInferenceExperiments": {}, + "sagemaker:ListInferenceRecommendationsJobSteps": {}, + "sagemaker:ListInferenceRecommendationsJobs": {}, + "sagemaker:ListLabelingJobs": {}, + "sagemaker:ListLineageGroups": {}, + "sagemaker:ListModelBiasJobDefinitions": {}, + "sagemaker:ListModelCards": {}, + "sagemaker:ListModelExplainabilityJobDefinitions": {}, + "sagemaker:ListModelMetadata": {}, + "sagemaker:ListModelPackageGroups": {}, + "sagemaker:ListModelQualityJobDefinitions": {}, + "sagemaker:ListModels": {}, + "sagemaker:ListMonitoringAlertHistory": {}, + "sagemaker:ListMonitoringAlerts": {}, + "sagemaker:ListMonitoringExecutions": {}, + "sagemaker:ListMonitoringSchedules": {}, + "sagemaker:ListNotebookInstanceLifecycleConfigs": {}, + "sagemaker:ListNotebookInstances": {}, + "sagemaker:ListPipelines": {}, + "sagemaker:ListProcessingJobs": {}, + "sagemaker:ListProjects": {}, + "sagemaker:ListResourceCatalogs": {}, + "sagemaker:ListSharedModelEvents": {}, + "sagemaker:ListSharedModels": {}, + "sagemaker:ListSpaces": {}, + "sagemaker:ListStageDevices": {}, + "sagemaker:ListStudioLifecycleConfigs": {}, + "sagemaker:ListSubscribedWorkteams": {}, + "sagemaker:ListTrainingJobs": {}, + "sagemaker:ListTransformJobs": {}, + "sagemaker:ListTrialComponents": {}, + "sagemaker:ListTrials": {}, + "sagemaker:ListUserProfiles": {}, + "sagemaker:ListWorkforces": {}, + "sagemaker:ListWorkteams": {}, + "sagemaker:PutLineageGroupPolicy": {}, + "sagemaker:QueryLineage": {}, + "sagemaker:RenderUiTemplate": {}, + "sagemaker:Search": {}, + "savingsplans:CreateSavingsPlan": {}, + "savingsplans:DescribeSavingsPlansOfferingRates": {}, + "savingsplans:DescribeSavingsPlansOfferings": {}, + "scheduler:ListScheduleGroups": {}, + "scheduler:ListSchedules": {}, + "schemas:CreateDiscoverer": {}, + "schemas:GetDiscoveredSchema": {}, + "sdb:ListDomains": {}, + "secretsmanager:BatchGetSecretValue": {}, + "secretsmanager:GetRandomPassword": {}, + "secretsmanager:ListSecrets": {}, + "securityhub:BatchGetConfigurationPolicyAssociations": {}, + "securityhub:BatchGetSecurityControls": {}, + "securityhub:BatchGetStandardsControlAssociations": {}, + "securityhub:BatchUpdateStandardsControlAssociations": {}, + "securityhub:CreateAutomationRule": {}, + "securityhub:CreateConfigurationPolicy": {}, + "securityhub:CreateFindingAggregator": {}, + "securityhub:GetConfigurationPolicyAssociation": {}, + "securityhub:GetSecurityControlDefinition": {}, + "securityhub:ListAutomationRules": {}, + "securityhub:ListConfigurationPolicies": {}, + "securityhub:ListConfigurationPolicyAssociations": {}, + "securityhub:ListFindingAggregators": {}, + "securityhub:ListSecurityControlDefinitions": {}, + "securityhub:ListStandardsControlAssociations": {}, + "securityhub:UpdateSecurityControl": {}, + "securitylake:CreateDataLakeExceptionSubscription": {}, + "securitylake:CreateSubscriber": {}, + "securitylake:DeleteDataLakeExceptionSubscription": {}, + "securitylake:DeregisterDataLakeDelegatedAdministrator": {}, + "securitylake:GetDataLakeExceptionSubscription": {}, + "securitylake:ListDataLakeExceptions": {}, + "securitylake:ListDataLakes": {}, + "securitylake:ListLogSources": {}, + "securitylake:ListSubscribers": {}, + "securitylake:RegisterDataLakeDelegatedAdministrator": {}, + "securitylake:UpdateDataLakeExceptionSubscription": {}, + "serverlessrepo:CreateApplication": {}, + "serverlessrepo:ListApplications": {}, + "serverlessrepo:SearchApplications": {}, + "servicecatalog:AssociateBudgetWithResource": {}, + "servicecatalog:AssociateProductWithPortfolio": {}, + "servicecatalog:BatchAssociateServiceActionWithProvisioningArtifact": {}, + "servicecatalog:BatchDisassociateServiceActionFromProvisioningArtifact": {}, + "servicecatalog:CopyProduct": {}, + "servicecatalog:CreateProvisionedProductPlan": {}, + "servicecatalog:CreateServiceAction": {}, + "servicecatalog:CreateTagOption": {}, + "servicecatalog:DeleteConstraint": {}, + "servicecatalog:DeleteProvisionedProductPlan": {}, + "servicecatalog:DeleteServiceAction": {}, + "servicecatalog:DeleteTagOption": {}, + "servicecatalog:DescribeConstraint": {}, + "servicecatalog:DescribeCopyProductStatus": {}, + "servicecatalog:DescribePortfolioShareStatus": {}, + "servicecatalog:DescribeProductView": {}, + "servicecatalog:DescribeProvisionedProduct": {}, + "servicecatalog:DescribeProvisionedProductPlan": {}, + "servicecatalog:DescribeRecord": {}, + "servicecatalog:DescribeServiceAction": {}, + "servicecatalog:DescribeServiceActionExecutionParameters": {}, + "servicecatalog:DescribeTagOption": {}, + "servicecatalog:DisableAWSOrganizationsAccess": {}, + "servicecatalog:DisassociateBudgetFromResource": {}, + "servicecatalog:DisassociateProductFromPortfolio": {}, + "servicecatalog:EnableAWSOrganizationsAccess": {}, + "servicecatalog:ExecuteProvisionedProductPlan": {}, + "servicecatalog:ExecuteProvisionedProductServiceAction": {}, + "servicecatalog:GetAWSOrganizationsAccessStatus": {}, + "servicecatalog:GetConfiguration": {}, + "servicecatalog:GetProvisionedProductOutputs": {}, + "servicecatalog:ListAcceptedPortfolioShares": {}, + "servicecatalog:ListApplications": {}, + "servicecatalog:ListAttributeGroups": {}, + "servicecatalog:ListBudgetsForResource": {}, + "servicecatalog:ListConstraintsForPortfolio": {}, + "servicecatalog:ListOrganizationPortfolioAccess": {}, + "servicecatalog:ListPortfolios": {}, + "servicecatalog:ListProvisionedProductPlans": {}, + "servicecatalog:ListProvisioningArtifactsForServiceAction": {}, + "servicecatalog:ListRecordHistory": {}, + "servicecatalog:ListResourcesForTagOption": {}, + "servicecatalog:ListServiceActions": {}, + "servicecatalog:ListStackInstancesForProvisionedProduct": {}, + "servicecatalog:ListTagOptions": {}, + "servicecatalog:NotifyProvisionProductEngineWorkflowResult": {}, + "servicecatalog:NotifyTerminateProvisionedProductEngineWorkflowResult": {}, + "servicecatalog:NotifyUpdateProvisionedProductEngineWorkflowResult": {}, + "servicecatalog:PutConfiguration": {}, + "servicecatalog:ScanProvisionedProducts": {}, + "servicecatalog:SearchProducts": {}, + "servicecatalog:SearchProductsAsAdmin": {}, + "servicecatalog:SearchProvisionedProducts": {}, + "servicecatalog:SyncResource": {}, + "servicecatalog:TerminateProvisionedProduct": {}, + "servicecatalog:UpdateConstraint": {}, + "servicecatalog:UpdateProvisionedProduct": {}, + "servicecatalog:UpdateProvisionedProductProperties": {}, + "servicecatalog:UpdateServiceAction": {}, + "servicecatalog:UpdateTagOption": {}, + "servicediscovery:CreateHttpNamespace": {}, + "servicediscovery:CreatePrivateDnsNamespace": {}, + "servicediscovery:CreatePublicDnsNamespace": {}, + "servicediscovery:DiscoverInstances": {}, + "servicediscovery:DiscoverInstancesRevision": {}, + "servicediscovery:GetInstance": {}, + "servicediscovery:GetInstancesHealthStatus": {}, + "servicediscovery:GetOperation": {}, + "servicediscovery:ListInstances": {}, + "servicediscovery:ListNamespaces": {}, + "servicediscovery:ListOperations": {}, + "servicediscovery:ListServices": {}, + "servicediscovery:ListTagsForResource": {}, + "servicediscovery:TagResource": {}, + "servicediscovery:UntagResource": {}, + "servicediscovery:UpdateInstanceCustomHealthStatus": {}, + "serviceextract:GetConfig": {}, + "servicequotas:AssociateServiceQuotaTemplate": {}, + "servicequotas:DeleteServiceQuotaIncreaseRequestFromTemplate": {}, + "servicequotas:DisassociateServiceQuotaTemplate": {}, + "servicequotas:GetAWSDefaultServiceQuota": {}, + "servicequotas:GetAssociationForServiceQuotaTemplate": {}, + "servicequotas:GetRequestedServiceQuotaChange": {}, + "servicequotas:GetServiceQuota": {}, + "servicequotas:GetServiceQuotaIncreaseRequestFromTemplate": {}, + "servicequotas:ListAWSDefaultServiceQuotas": {}, + "servicequotas:ListRequestedServiceQuotaChangeHistory": {}, + "servicequotas:ListRequestedServiceQuotaChangeHistoryByQuota": {}, + "servicequotas:ListServiceQuotaIncreaseRequestsInTemplate": {}, + "servicequotas:ListServiceQuotas": {}, + "servicequotas:ListServices": {}, + "servicequotas:ListTagsForResource": {}, + "servicequotas:TagResource": {}, + "servicequotas:UntagResource": {}, + "ses:CloneReceiptRuleSet": {}, + "ses:CreateConfigurationSet": {}, + "ses:CreateConfigurationSetEventDestination": {}, + "ses:CreateConfigurationSetTrackingOptions": {}, + "ses:CreateCustomVerificationEmailTemplate": {}, + "ses:CreateDedicatedIpPool": {}, + "ses:CreateEmailIdentity": {}, + "ses:CreateExportJob": {}, + "ses:CreateImportJob": {}, + "ses:CreateReceiptFilter": {}, + "ses:CreateReceiptRule": {}, + "ses:CreateReceiptRuleSet": {}, + "ses:CreateTemplate": {}, + "ses:DeleteConfigurationSet": {}, + "ses:DeleteConfigurationSetEventDestination": {}, + "ses:DeleteConfigurationSetTrackingOptions": {}, + "ses:DeleteCustomVerificationEmailTemplate": {}, + "ses:DeleteIdentity": {}, + "ses:DeleteIdentityPolicy": {}, + "ses:DeleteReceiptFilter": {}, + "ses:DeleteReceiptRule": {}, + "ses:DeleteReceiptRuleSet": {}, + "ses:DeleteSuppressedDestination": {}, + "ses:DeleteTemplate": {}, + "ses:DeleteVerifiedEmailAddress": {}, + "ses:DescribeActiveReceiptRuleSet": {}, + "ses:DescribeConfigurationSet": {}, + "ses:DescribeReceiptRule": {}, + "ses:DescribeReceiptRuleSet": {}, + "ses:GetAccount": {}, + "ses:GetAccountSendingEnabled": {}, + "ses:GetBlacklistReports": {}, + "ses:GetCustomVerificationEmailTemplate": {}, + "ses:GetDedicatedIp": {}, + "ses:GetDeliverabilityDashboardOptions": {}, + "ses:GetDomainDeliverabilityCampaign": {}, + "ses:GetIdentityDkimAttributes": {}, + "ses:GetIdentityMailFromDomainAttributes": {}, + "ses:GetIdentityNotificationAttributes": {}, + "ses:GetIdentityPolicies": {}, + "ses:GetIdentityVerificationAttributes": {}, + "ses:GetMessageInsights": {}, + "ses:GetSendQuota": {}, + "ses:GetSendStatistics": {}, + "ses:GetSuppressedDestination": {}, + "ses:GetTemplate": {}, + "ses:ListConfigurationSets": {}, + "ses:ListContactLists": {}, + "ses:ListCustomVerificationEmailTemplates": {}, + "ses:ListDedicatedIpPools": {}, + "ses:ListDeliverabilityTestReports": {}, + "ses:ListDomainDeliverabilityCampaigns": {}, + "ses:ListEmailIdentities": {}, + "ses:ListEmailTemplates": {}, + "ses:ListExportJobs": {}, + "ses:ListIdentities": {}, + "ses:ListIdentityPolicies": {}, + "ses:ListImportJobs": {}, + "ses:ListReceiptFilters": {}, + "ses:ListReceiptRuleSets": {}, + "ses:ListSuppressedDestinations": {}, + "ses:ListTemplates": {}, + "ses:ListVerifiedEmailAddresses": {}, + "ses:PutAccountDedicatedIpWarmupAttributes": {}, + "ses:PutAccountDetails": {}, + "ses:PutAccountSendingAttributes": {}, + "ses:PutAccountSuppressionAttributes": {}, + "ses:PutAccountVdmAttributes": {}, + "ses:PutConfigurationSetDeliveryOptions": {}, + "ses:PutDedicatedIpWarmupAttributes": {}, + "ses:PutDeliverabilityDashboardOption": {}, + "ses:PutIdentityPolicy": {}, + "ses:PutSuppressedDestination": {}, + "ses:ReorderReceiptRuleSet": {}, + "ses:SetActiveReceiptRuleSet": {}, + "ses:SetIdentityDkimEnabled": {}, + "ses:SetIdentityFeedbackForwardingEnabled": {}, + "ses:SetIdentityHeadersInNotificationsEnabled": {}, + "ses:SetIdentityMailFromDomain": {}, + "ses:SetIdentityNotificationTopic": {}, + "ses:SetReceiptRulePosition": {}, + "ses:TestRenderTemplate": {}, + "ses:UpdateAccountSendingEnabled": {}, + "ses:UpdateConfigurationSetEventDestination": {}, + "ses:UpdateConfigurationSetReputationMetricsEnabled": {}, + "ses:UpdateConfigurationSetSendingEnabled": {}, + "ses:UpdateConfigurationSetTrackingOptions": {}, + "ses:UpdateCustomVerificationEmailTemplate": {}, + "ses:UpdateReceiptRule": {}, + "ses:UpdateTemplate": {}, + "ses:VerifyDomainDkim": {}, + "ses:VerifyDomainIdentity": {}, + "ses:VerifyEmailAddress": {}, + "ses:VerifyEmailIdentity": {}, + "shield:AssociateDRTLogBucket": {}, + "shield:AssociateDRTRole": {}, + "shield:AssociateProactiveEngagementDetails": {}, + "shield:CreateProtection": {}, + "shield:CreateProtectionGroup": {}, + "shield:CreateSubscription": {}, + "shield:DeleteSubscription": {}, + "shield:DescribeAttackStatistics": {}, + "shield:DescribeDRTAccess": {}, + "shield:DescribeEmergencyContactSettings": {}, + "shield:DescribeSubscription": {}, + "shield:DisableApplicationLayerAutomaticResponse": {}, + "shield:DisableProactiveEngagement": {}, + "shield:DisassociateDRTLogBucket": {}, + "shield:DisassociateDRTRole": {}, + "shield:EnableApplicationLayerAutomaticResponse": {}, + "shield:EnableProactiveEngagement": {}, + "shield:GetSubscriptionState": {}, + "shield:ListAttacks": {}, + "shield:ListProtectionGroups": {}, + "shield:ListProtections": {}, + "shield:UpdateApplicationLayerAutomaticResponse": {}, + "shield:UpdateEmergencyContactSettings": {}, + "shield:UpdateSubscription": {}, + "signer:GetSigningPlatform": {}, + "signer:ListSigningJobs": {}, + "signer:ListSigningPlatforms": {}, + "signer:ListSigningProfiles": {}, + "signer:PutSigningProfile": {}, + "simspaceweaver:ListSimulations": {}, + "simspaceweaver:ListTagsForResource": {}, + "simspaceweaver:StartSimulation": {}, + "sms-voice:CreateConfigurationSet": {}, + "sms-voice:CreateConfigurationSetEventDestination": {}, + "sms-voice:CreateOptOutList": {}, + "sms-voice:CreateRegistration": {}, + "sms-voice:CreateRegistrationAttachment": {}, + "sms-voice:CreateVerifiedDestinationNumber": {}, + "sms-voice:DeleteConfigurationSet": {}, + "sms-voice:DeleteConfigurationSetEventDestination": {}, + "sms-voice:DeleteTextMessageSpendLimitOverride": {}, + "sms-voice:DeleteVoiceMessageSpendLimitOverride": {}, + "sms-voice:DescribeAccountAttributes": {}, + "sms-voice:DescribeAccountLimits": {}, + "sms-voice:DescribeRegistrationFieldDefinitions": {}, + "sms-voice:DescribeRegistrationSectionDefinitions": {}, + "sms-voice:DescribeRegistrationTypeDefinitions": {}, + "sms-voice:DescribeSpendLimits": {}, + "sms-voice:GetConfigurationSetEventDestinations": {}, + "sms-voice:ListConfigurationSets": {}, + "sms-voice:RequestSenderId": {}, + "sms-voice:SendVoiceMessage": {}, + "sms-voice:SetTextMessageSpendLimitOverride": {}, + "sms-voice:SetVoiceMessageSpendLimitOverride": {}, + "sms-voice:UpdateConfigurationSetEventDestination": {}, + "sms:CreateApp": {}, + "sms:CreateReplicationJob": {}, + "sms:DeleteApp": {}, + "sms:DeleteAppLaunchConfiguration": {}, + "sms:DeleteAppReplicationConfiguration": {}, + "sms:DeleteAppValidationConfiguration": {}, + "sms:DeleteReplicationJob": {}, + "sms:DeleteServerCatalog": {}, + "sms:DisassociateConnector": {}, + "sms:GenerateChangeSet": {}, + "sms:GenerateTemplate": {}, + "sms:GetApp": {}, + "sms:GetAppLaunchConfiguration": {}, + "sms:GetAppReplicationConfiguration": {}, + "sms:GetAppValidationConfiguration": {}, + "sms:GetAppValidationOutput": {}, + "sms:GetConnectors": {}, + "sms:GetReplicationJobs": {}, + "sms:GetReplicationRuns": {}, + "sms:GetServers": {}, + "sms:ImportAppCatalog": {}, + "sms:ImportServerCatalog": {}, + "sms:LaunchApp": {}, + "sms:ListApps": {}, + "sms:NotifyAppValidationOutput": {}, + "sms:PutAppLaunchConfiguration": {}, + "sms:PutAppReplicationConfiguration": {}, + "sms:PutAppValidationConfiguration": {}, + "sms:StartAppReplication": {}, + "sms:StartOnDemandAppReplication": {}, + "sms:StartOnDemandReplicationRun": {}, + "sms:StopAppReplication": {}, + "sms:TerminateApp": {}, + "sms:UpdateApp": {}, + "sms:UpdateReplicationJob": {}, + "snow-device-management:CreateTask": {}, + "snow-device-management:DescribeExecution": {}, + "snow-device-management:ListDevices": {}, + "snow-device-management:ListExecutions": {}, + "snow-device-management:ListTagsForResource": {}, + "snow-device-management:ListTasks": {}, + "snowball:CancelCluster": {}, + "snowball:CancelJob": {}, + "snowball:CreateAddress": {}, + "snowball:CreateCluster": {}, + "snowball:CreateJob": {}, + "snowball:CreateLongTermPricing": {}, + "snowball:CreateReturnShippingLabel": {}, + "snowball:DescribeAddress": {}, + "snowball:DescribeAddresses": {}, + "snowball:DescribeCluster": {}, + "snowball:DescribeJob": {}, + "snowball:DescribeReturnShippingLabel": {}, + "snowball:GetJobManifest": {}, + "snowball:GetJobUnlockCode": {}, + "snowball:GetSnowballUsage": {}, + "snowball:GetSoftwareUpdates": {}, + "snowball:ListClusterJobs": {}, + "snowball:ListClusters": {}, + "snowball:ListCompatibleImages": {}, + "snowball:ListJobs": {}, + "snowball:ListLongTermPricing": {}, + "snowball:ListPickupLocations": {}, + "snowball:ListServiceVersions": {}, + "snowball:UpdateCluster": {}, + "snowball:UpdateJob": {}, + "snowball:UpdateJobShipmentState": {}, + "snowball:UpdateLongTermPricing": {}, + "sns:CheckIfPhoneNumberIsOptedOut": {}, + "sns:CreatePlatformApplication": {}, + "sns:CreatePlatformEndpoint": {}, + "sns:CreateSMSSandboxPhoneNumber": {}, + "sns:DeleteEndpoint": {}, + "sns:DeletePlatformApplication": {}, + "sns:DeleteSMSSandboxPhoneNumber": {}, + "sns:GetEndpointAttributes": {}, + "sns:GetPlatformApplicationAttributes": {}, + "sns:GetSMSAttributes": {}, + "sns:GetSMSSandboxAccountStatus": {}, + "sns:GetSubscriptionAttributes": {}, + "sns:ListEndpointsByPlatformApplication": {}, + "sns:ListOriginationNumbers": {}, + "sns:ListPhoneNumbersOptedOut": {}, + "sns:ListPlatformApplications": {}, + "sns:ListSMSSandboxPhoneNumbers": {}, + "sns:ListSubscriptions": {}, + "sns:ListTopics": {}, + "sns:OptInPhoneNumber": {}, + "sns:SetEndpointAttributes": {}, + "sns:SetPlatformApplicationAttributes": {}, + "sns:SetSMSAttributes": {}, + "sns:SetSubscriptionAttributes": {}, + "sns:Unsubscribe": {}, + "sns:VerifySMSSandboxPhoneNumber": {}, + "sqlworkbench:BatchDeleteFolder": {}, + "sqlworkbench:CreateAccount": {}, + "sqlworkbench:CreateFolder": {}, + "sqlworkbench:DeleteTab": {}, + "sqlworkbench:GenerateSession": {}, + "sqlworkbench:GetAccountInfo": {}, + "sqlworkbench:GetAccountSettings": {}, + "sqlworkbench:GetAutocompletionMetadata": {}, + "sqlworkbench:GetAutocompletionResource": {}, + "sqlworkbench:GetQSqlRecommendations": {}, + "sqlworkbench:GetQueryExecutionHistory": {}, + "sqlworkbench:GetSchemaInference": {}, + "sqlworkbench:GetUserInfo": {}, + "sqlworkbench:GetUserWorkspaceSettings": {}, + "sqlworkbench:ListConnections": {}, + "sqlworkbench:ListDatabases": {}, + "sqlworkbench:ListFiles": {}, + "sqlworkbench:ListNotebooks": {}, + "sqlworkbench:ListQueryExecutionHistory": {}, + "sqlworkbench:ListRedshiftClusters": {}, + "sqlworkbench:ListSampleDatabases": {}, + "sqlworkbench:ListTabs": {}, + "sqlworkbench:ListTaggedResources": {}, + "sqlworkbench:PutTab": {}, + "sqlworkbench:PutUserWorkspaceSettings": {}, + "sqlworkbench:UpdateAccountConnectionSettings": {}, + "sqlworkbench:UpdateAccountExportSettings": {}, + "sqlworkbench:UpdateAccountGeneralSettings": {}, + "sqlworkbench:UpdateAccountQSqlSettings": {}, + "sqlworkbench:UpdateFolder": {}, + "sqs:ListQueues": {}, + "ssm-contacts:ListContacts": {}, + "ssm-contacts:ListEngagements": {}, + "ssm-contacts:ListRotations": {}, + "ssm-guiconnect:CancelConnection": {}, + "ssm-guiconnect:GetConnection": {}, + "ssm-guiconnect:StartConnection": {}, + "ssm-incidents:CreateReplicationSet": {}, + "ssm-incidents:CreateResponsePlan": {}, + "ssm-incidents:ListIncidentRecords": {}, + "ssm-incidents:ListReplicationSets": {}, + "ssm-incidents:ListResponsePlans": {}, + "ssm-sap:BackupDatabase": {}, + "ssm-sap:DeleteResourcePermission": {}, + "ssm-sap:GetApplication": {}, + "ssm-sap:GetDatabase": {}, + "ssm-sap:GetOperation": {}, + "ssm-sap:GetResourcePermission": {}, + "ssm-sap:ListApplications": {}, + "ssm-sap:ListDatabases": {}, + "ssm-sap:ListOperations": {}, + "ssm-sap:ListTagsForResource": {}, + "ssm-sap:PutResourcePermission": {}, + "ssm-sap:RegisterApplication": {}, + "ssm-sap:RestoreDatabase": {}, + "ssm-sap:UpdateHANABackupSettings": {}, + "ssm:CancelCommand": {}, + "ssm:CreateActivation": {}, + "ssm:CreateMaintenanceWindow": {}, + "ssm:CreateOpsItem": {}, + "ssm:CreateOpsMetadata": {}, + "ssm:CreatePatchBaseline": {}, + "ssm:DeleteActivation": {}, + "ssm:DeleteInventory": {}, + "ssm:DescribeActivations": {}, + "ssm:DescribeAutomationExecutions": {}, + "ssm:DescribeAvailablePatches": {}, + "ssm:DescribeInstanceInformation": {}, + "ssm:DescribeInstancePatchStates": {}, + "ssm:DescribeInstancePatchStatesForPatchGroup": {}, + "ssm:DescribeInstancePatches": {}, + "ssm:DescribeInstanceProperties": {}, + "ssm:DescribeInventoryDeletions": {}, + "ssm:DescribeMaintenanceWindowExecutionTaskInvocations": {}, + "ssm:DescribeMaintenanceWindowSchedule": {}, + "ssm:DescribeMaintenanceWindows": {}, + "ssm:DescribeMaintenanceWindowsForTarget": {}, + "ssm:DescribeOpsItems": {}, + "ssm:DescribeParameters": {}, + "ssm:DescribePatchBaselines": {}, + "ssm:DescribePatchGroupState": {}, + "ssm:DescribePatchGroups": {}, + "ssm:DescribePatchProperties": {}, + "ssm:DescribeSessions": {}, + "ssm:GetCommandInvocation": {}, + "ssm:GetDeployablePatchSnapshotForInstance": {}, + "ssm:GetInventory": {}, + "ssm:GetInventorySchema": {}, + "ssm:GetMaintenanceWindowExecution": {}, + "ssm:GetMaintenanceWindowExecutionTask": {}, + "ssm:GetMaintenanceWindowExecutionTaskInvocation": {}, + "ssm:GetManifest": {}, + "ssm:ListAssociations": {}, + "ssm:ListCommandInvocations": {}, + "ssm:ListCommands": {}, + "ssm:ListComplianceItems": {}, + "ssm:ListComplianceSummaries": {}, + "ssm:ListDocuments": {}, + "ssm:ListInventoryEntries": {}, + "ssm:ListOpsItemEvents": {}, + "ssm:ListOpsItemRelatedItems": {}, + "ssm:ListOpsMetadata": {}, + "ssm:ListResourceComplianceSummaries": {}, + "ssm:ListResourceDataSync": {}, + "ssm:PutConfigurePackageResult": {}, + "ssm:PutInventory": {}, + "ssm:RegisterManagedInstance": {}, + "ssmmessages:CreateControlChannel": {}, + "ssmmessages:CreateDataChannel": {}, + "ssmmessages:OpenControlChannel": {}, + "ssmmessages:OpenDataChannel": {}, + "sso-directory:AddMemberToGroup": {}, + "sso-directory:CompleteVirtualMfaDeviceRegistration": {}, + "sso-directory:CompleteWebAuthnDeviceRegistration": {}, + "sso-directory:CreateAlias": {}, + "sso-directory:CreateBearerToken": {}, + "sso-directory:CreateExternalIdPConfigurationForDirectory": {}, + "sso-directory:CreateGroup": {}, + "sso-directory:CreateProvisioningTenant": {}, + "sso-directory:CreateUser": {}, + "sso-directory:DeleteBearerToken": {}, + "sso-directory:DeleteExternalIdPCertificate": {}, + "sso-directory:DeleteExternalIdPConfigurationForDirectory": {}, + "sso-directory:DeleteGroup": {}, + "sso-directory:DeleteMfaDeviceForUser": {}, + "sso-directory:DeleteProvisioningTenant": {}, + "sso-directory:DeleteUser": {}, + "sso-directory:DescribeDirectory": {}, + "sso-directory:DescribeGroup": {}, + "sso-directory:DescribeGroups": {}, + "sso-directory:DescribeProvisioningTenant": {}, + "sso-directory:DescribeUser": {}, + "sso-directory:DescribeUserByUniqueAttribute": {}, + "sso-directory:DescribeUsers": {}, + "sso-directory:DisableExternalIdPConfigurationForDirectory": {}, + "sso-directory:DisableUser": {}, + "sso-directory:EnableExternalIdPConfigurationForDirectory": {}, + "sso-directory:EnableUser": {}, + "sso-directory:GetAWSSPConfigurationForDirectory": {}, + "sso-directory:GetUserPoolInfo": {}, + "sso-directory:ImportExternalIdPCertificate": {}, + "sso-directory:IsMemberInGroup": {}, + "sso-directory:ListBearerTokens": {}, + "sso-directory:ListExternalIdPCertificates": {}, + "sso-directory:ListExternalIdPConfigurationsForDirectory": {}, + "sso-directory:ListGroupsForMember": {}, + "sso-directory:ListGroupsForUser": {}, + "sso-directory:ListMembersInGroup": {}, + "sso-directory:ListMfaDevicesForUser": {}, + "sso-directory:ListProvisioningTenants": {}, + "sso-directory:RemoveMemberFromGroup": {}, + "sso-directory:SearchGroups": {}, + "sso-directory:SearchUsers": {}, + "sso-directory:StartVirtualMfaDeviceRegistration": {}, + "sso-directory:StartWebAuthnDeviceRegistration": {}, + "sso-directory:UpdateExternalIdPConfigurationForDirectory": {}, + "sso-directory:UpdateGroup": {}, + "sso-directory:UpdateGroupDisplayName": {}, + "sso-directory:UpdateMfaDeviceForUser": {}, + "sso-directory:UpdatePassword": {}, + "sso-directory:UpdateUser": {}, + "sso-directory:UpdateUserName": {}, + "sso-directory:VerifyEmail": {}, + "sso:AssociateDirectory": {}, + "sso:AssociateProfile": {}, + "sso:CreateApplicationInstance": {}, + "sso:CreateApplicationInstanceCertificate": {}, + "sso:CreateManagedApplicationInstance": {}, + "sso:CreateProfile": {}, + "sso:CreateTrust": {}, + "sso:DeleteApplicationInstance": {}, + "sso:DeleteApplicationInstanceCertificate": {}, + "sso:DeleteManagedApplicationInstance": {}, + "sso:DeletePermissionsPolicy": {}, + "sso:DeleteProfile": {}, + "sso:DescribeDirectories": {}, + "sso:DescribePermissionsPolicies": {}, + "sso:DescribeRegisteredRegions": {}, + "sso:DescribeTrusts": {}, + "sso:DisassociateDirectory": {}, + "sso:DisassociateProfile": {}, + "sso:GetApplicationInstance": {}, + "sso:GetApplicationTemplate": {}, + "sso:GetManagedApplicationInstance": {}, + "sso:GetMfaDeviceManagementForDirectory": {}, + "sso:GetPermissionSet": {}, + "sso:GetPermissionsPolicy": {}, + "sso:GetProfile": {}, + "sso:GetSSOStatus": {}, + "sso:GetSharedSsoConfiguration": {}, + "sso:GetSsoConfiguration": {}, + "sso:GetTrust": {}, + "sso:ImportApplicationInstanceServiceProviderMetadata": {}, + "sso:ListApplicationInstanceCertificates": {}, + "sso:ListApplicationInstances": {}, + "sso:ListApplicationTemplates": {}, + "sso:ListApplications": {}, + "sso:ListDirectoryAssociations": {}, + "sso:ListInstances": {}, + "sso:ListProfileAssociations": {}, + "sso:ListProfiles": {}, + "sso:PutMfaDeviceManagementForDirectory": {}, + "sso:PutPermissionsPolicy": {}, + "sso:SearchGroups": {}, + "sso:SearchUsers": {}, + "sso:StartSSO": {}, + "sso:UpdateApplicationInstanceActiveCertificate": {}, + "sso:UpdateApplicationInstanceDisplayData": {}, + "sso:UpdateApplicationInstanceResponseConfiguration": {}, + "sso:UpdateApplicationInstanceResponseSchemaConfiguration": {}, + "sso:UpdateApplicationInstanceSecurityConfiguration": {}, + "sso:UpdateApplicationInstanceServiceProviderConfiguration": {}, + "sso:UpdateApplicationInstanceStatus": {}, + "sso:UpdateDirectoryAssociation": {}, + "sso:UpdateManagedApplicationInstanceStatus": {}, + "sso:UpdateProfile": {}, + "sso:UpdateSSOConfiguration": {}, + "sso:UpdateTrust": {}, + "states:InvokeHTTPEndpoint": {}, + "states:ListActivities": {}, + "states:ListStateMachines": {}, + "states:RevealSecrets": {}, + "states:SendTaskFailure": {}, + "states:SendTaskHeartbeat": {}, + "states:SendTaskSuccess": {}, + "states:TestState": {}, + "storagegateway:ActivateGateway": {}, + "storagegateway:CreateTapePool": {}, + "storagegateway:DeleteTapeArchive": {}, + "storagegateway:DescribeTapeArchives": {}, + "storagegateway:ListAutomaticTapeCreationPolicies": {}, + "storagegateway:ListFileShares": {}, + "storagegateway:ListFileSystemAssociations": {}, + "storagegateway:ListGateways": {}, + "storagegateway:ListTapePools": {}, + "storagegateway:ListTapes": {}, + "storagegateway:ListVolumes": {}, + "sts:DecodeAuthorizationMessage": {}, + "sts:GetAccessKeyInfo": {}, + "sts:GetCallerIdentity": {}, + "sts:GetServiceBearerToken": {}, + "sts:GetSessionToken": {}, + "support:AddAttachmentsToSet": {}, + "support:AddCommunicationToCase": {}, + "support:CreateCase": {}, + "support:DescribeAttachment": {}, + "support:DescribeCaseAttributes": {}, + "support:DescribeCases": {}, + "support:DescribeCommunication": {}, + "support:DescribeCommunications": {}, + "support:DescribeCreateCaseOptions": {}, + "support:DescribeIssueTypes": {}, + "support:DescribeServices": {}, + "support:DescribeSeverityLevels": {}, + "support:DescribeSupportLevel": {}, + "support:DescribeSupportedLanguages": {}, + "support:DescribeTrustedAdvisorCheckRefreshStatuses": {}, + "support:DescribeTrustedAdvisorCheckResult": {}, + "support:DescribeTrustedAdvisorCheckSummaries": {}, + "support:DescribeTrustedAdvisorChecks": {}, + "support:InitiateCallForCase": {}, + "support:InitiateChatForCase": {}, + "support:PutCaseAttributes": {}, + "support:RateCaseCommunication": {}, + "support:RefreshTrustedAdvisorCheck": {}, + "support:ResolveCase": {}, + "support:SearchForCases": {}, + "supportapp:CreateSlackChannelConfiguration": {}, + "supportapp:DeleteAccountAlias": {}, + "supportapp:DeleteSlackChannelConfiguration": {}, + "supportapp:DeleteSlackWorkspaceConfiguration": {}, + "supportapp:DescribeSlackChannels": {}, + "supportapp:GetAccountAlias": {}, + "supportapp:GetSlackOauthParameters": {}, + "supportapp:ListSlackChannelConfigurations": {}, + "supportapp:ListSlackWorkspaceConfigurations": {}, + "supportapp:PutAccountAlias": {}, + "supportapp:RedeemSlackOauthCode": {}, + "supportapp:RegisterSlackWorkspaceForOrganization": {}, + "supportapp:UpdateSlackChannelConfiguration": {}, + "supportplans:CreateSupportPlanSchedule": {}, + "supportplans:GetSupportPlan": {}, + "supportplans:GetSupportPlanUpdateStatus": {}, + "supportplans:StartSupportPlanUpdate": {}, + "sustainability:GetCarbonFootprintSummary": {}, + "swf:ListDomains": {}, + "swf:RegisterDomain": {}, + "synthetics:CreateCanary": {}, + "synthetics:CreateGroup": {}, + "synthetics:DescribeCanaries": {}, + "synthetics:DescribeCanariesLastRun": {}, + "synthetics:DescribeRuntimeVersions": {}, + "synthetics:ListGroups": {}, + "tag:DescribeReportCreation": {}, + "tag:GetComplianceSummary": {}, + "tag:GetResources": {}, + "tag:GetTagKeys": {}, + "tag:GetTagValues": {}, + "tag:StartReportCreation": {}, + "tag:TagResources": {}, + "tag:UntagResources": {}, + "tax:BatchPutTaxRegistration": {}, + "tax:DeleteTaxRegistration": {}, + "tax:GetExemptions": {}, + "tax:GetTaxInheritance": {}, + "tax:GetTaxInterview": {}, + "tax:GetTaxRegistration": {}, + "tax:GetTaxRegistrationDocument": {}, + "tax:ListTaxRegistrations": {}, + "tax:PutTaxInheritance": {}, + "tax:PutTaxInterview": {}, + "tax:PutTaxRegistration": {}, + "tax:UpdateExemptions": {}, + "textract:AnalyzeDocument": {}, + "textract:AnalyzeExpense": {}, + "textract:AnalyzeID": {}, + "textract:CreateAdapter": {}, + "textract:DetectDocumentText": {}, + "textract:GetDocumentAnalysis": {}, + "textract:GetDocumentTextDetection": {}, + "textract:GetExpenseAnalysis": {}, + "textract:GetLendingAnalysis": {}, + "textract:GetLendingAnalysisSummary": {}, + "textract:ListAdapterVersions": {}, + "textract:ListAdapters": {}, + "textract:StartDocumentAnalysis": {}, + "textract:StartDocumentTextDetection": {}, + "textract:StartExpenseAnalysis": {}, + "textract:StartLendingAnalysis": {}, + "thinclient:CreateEnvironment": {}, + "thinclient:ListDeviceSessions": {}, + "thinclient:ListDevices": {}, + "thinclient:ListEnvironments": {}, + "thinclient:ListSoftwareSets": {}, + "thinclient:ListTagsForResource": {}, + "timestream:CancelQuery": {}, + "timestream:CreateScheduledQuery": {}, + "timestream:DescribeBatchLoadTask": {}, + "timestream:DescribeEndpoints": {}, + "timestream:GetAwsBackupStatus": {}, + "timestream:GetAwsRestoreStatus": {}, + "timestream:ListBatchLoadTasks": {}, + "timestream:ListDatabases": {}, + "timestream:ListScheduledQueries": {}, + "timestream:ResumeBatchLoadTask": {}, + "timestream:SelectValues": {}, + "tiros:CreateQuery": {}, + "tiros:ExtendQuery": {}, + "tiros:GetQueryAnswer": {}, + "tiros:GetQueryExplanation": {}, + "tiros:GetQueryExtensionAccounts": {}, + "tnb:ListTagsForResource": {}, + "transcribe:CreateCallAnalyticsCategory": {}, + "transcribe:CreateLanguageModel": {}, + "transcribe:CreateMedicalVocabulary": {}, + "transcribe:CreateVocabulary": {}, + "transcribe:CreateVocabularyFilter": {}, + "transcribe:DeleteCallAnalyticsCategory": {}, + "transcribe:DeleteCallAnalyticsJob": {}, + "transcribe:GetCallAnalyticsCategory": {}, + "transcribe:GetCallAnalyticsJob": {}, + "transcribe:ListCallAnalyticsCategories": {}, + "transcribe:ListCallAnalyticsJobs": {}, + "transcribe:ListLanguageModels": {}, + "transcribe:ListMedicalScribeJobs": {}, + "transcribe:ListMedicalTranscriptionJobs": {}, + "transcribe:ListMedicalVocabularies": {}, + "transcribe:ListTagsForResource": {}, + "transcribe:ListTranscriptionJobs": {}, + "transcribe:ListVocabularies": {}, + "transcribe:ListVocabularyFilters": {}, + "transcribe:StartCallAnalyticsJob": {}, + "transcribe:StartCallAnalyticsStreamTranscription": {}, + "transcribe:StartCallAnalyticsStreamTranscriptionWebSocket": {}, + "transcribe:StartMedicalScribeJob": {}, + "transcribe:StartMedicalStreamTranscription": {}, + "transcribe:StartMedicalStreamTranscriptionWebSocket": {}, + "transcribe:StartMedicalTranscriptionJob": {}, + "transcribe:StartStreamTranscription": {}, + "transcribe:StartStreamTranscriptionWebSocket": {}, + "transcribe:StartTranscriptionJob": {}, + "transcribe:TagResource": {}, + "transcribe:UntagResource": {}, + "transcribe:UpdateCallAnalyticsCategory": {}, + "transfer:CreateConnector": {}, + "transfer:CreateProfile": {}, + "transfer:CreateServer": {}, + "transfer:CreateWorkflow": {}, + "transfer:DescribeSecurityPolicy": {}, + "transfer:ImportCertificate": {}, + "transfer:ListCertificates": {}, + "transfer:ListConnectors": {}, + "transfer:ListProfiles": {}, + "transfer:ListSecurityPolicies": {}, + "transfer:ListServers": {}, + "transfer:ListWorkflows": {}, + "transfer:UpdateAccess": {}, + "translate:DescribeTextTranslationJob": {}, + "translate:ListLanguages": {}, + "translate:ListParallelData": {}, + "translate:ListTerminologies": {}, + "translate:ListTextTranslationJobs": {}, + "translate:StopTextTranslationJob": {}, + "trustedadvisor:CreateEngagement": {}, + "trustedadvisor:CreateEngagementAttachment": {}, + "trustedadvisor:CreateEngagementCommunication": {}, + "trustedadvisor:DeleteNotificationConfigurationForDelegatedAdmin": {}, + "trustedadvisor:DescribeAccount": {}, + "trustedadvisor:DescribeAccountAccess": {}, + "trustedadvisor:DescribeChecks": {}, + "trustedadvisor:DescribeNotificationConfigurations": {}, + "trustedadvisor:DescribeNotificationPreferences": {}, + "trustedadvisor:DescribeOrganization": {}, + "trustedadvisor:DescribeOrganizationAccounts": {}, + "trustedadvisor:DescribeReports": {}, + "trustedadvisor:DescribeRisk": {}, + "trustedadvisor:DescribeRiskResources": {}, + "trustedadvisor:DescribeRisks": {}, + "trustedadvisor:DescribeServiceMetadata": {}, + "trustedadvisor:DownloadRisk": {}, + "trustedadvisor:GenerateReport": {}, + "trustedadvisor:GetEngagement": {}, + "trustedadvisor:GetEngagementAttachment": {}, + "trustedadvisor:GetEngagementType": {}, + "trustedadvisor:GetOrganizationRecommendation": {}, + "trustedadvisor:GetRecommendation": {}, + "trustedadvisor:ListAccountsForParent": {}, + "trustedadvisor:ListChecks": {}, + "trustedadvisor:ListEngagementCommunications": {}, + "trustedadvisor:ListEngagementTypes": {}, + "trustedadvisor:ListEngagements": {}, + "trustedadvisor:ListOrganizationRecommendationAccounts": {}, + "trustedadvisor:ListOrganizationRecommendationResources": {}, + "trustedadvisor:ListOrganizationRecommendations": {}, + "trustedadvisor:ListOrganizationalUnitsForParent": {}, + "trustedadvisor:ListRecommendationResources": {}, + "trustedadvisor:ListRecommendations": {}, + "trustedadvisor:ListRoots": {}, + "trustedadvisor:SetAccountAccess": {}, + "trustedadvisor:SetOrganizationAccess": {}, + "trustedadvisor:UpdateEngagement": {}, + "trustedadvisor:UpdateEngagementStatus": {}, + "trustedadvisor:UpdateNotificationConfigurations": {}, + "trustedadvisor:UpdateNotificationPreferences": {}, + "trustedadvisor:UpdateOrganizationRecommendationLifecycle": {}, + "trustedadvisor:UpdateRecommendationLifecycle": {}, + "trustedadvisor:UpdateRiskStatus": {}, + "ts:ListExecutions": {}, + "ts:ListTools": {}, + "ts:StartExecution": {}, + "vendor-insights:CreateDataSource": {}, + "vendor-insights:CreateSecurityProfile": {}, + "vendor-insights:GetProfileAccessTerms": {}, + "vendor-insights:ListDataSources": {}, + "vendor-insights:ListEntitledSecurityProfiles": {}, + "vendor-insights:ListSecurityProfiles": {}, + "verified-access:AllowVerifiedAccess": {}, + "verifiedpermissions:CreatePolicyStore": {}, + "verifiedpermissions:ListPolicyStores": {}, + "voiceid:CreateDomain": {}, + "voiceid:DescribeComplianceConsent": {}, + "voiceid:ListDomains": {}, + "voiceid:RegisterComplianceConsent": {}, + "vpc-lattice:ListAccessLogSubscriptions": {}, + "vpc-lattice:ListListeners": {}, + "vpc-lattice:ListRules": {}, + "vpc-lattice:ListServiceNetworkServiceAssociations": {}, + "vpc-lattice:ListServiceNetworkVpcAssociations": {}, + "vpc-lattice:ListServiceNetworks": {}, + "vpc-lattice:ListServices": {}, + "vpc-lattice:ListTagsForResource": {}, + "vpc-lattice:ListTargetGroups": {}, + "waf-regional:GetChangeToken": {}, + "waf-regional:GetChangeTokenStatus": {}, + "waf-regional:ListActivatedRulesInRuleGroup": {}, + "waf-regional:ListByteMatchSets": {}, + "waf-regional:ListGeoMatchSets": {}, + "waf-regional:ListIPSets": {}, + "waf-regional:ListLoggingConfigurations": {}, + "waf-regional:ListRateBasedRules": {}, + "waf-regional:ListRegexMatchSets": {}, + "waf-regional:ListRegexPatternSets": {}, + "waf-regional:ListRuleGroups": {}, + "waf-regional:ListRules": {}, + "waf-regional:ListSizeConstraintSets": {}, + "waf-regional:ListSqlInjectionMatchSets": {}, + "waf-regional:ListSubscribedRuleGroups": {}, + "waf-regional:ListWebACLs": {}, + "waf-regional:ListXssMatchSets": {}, + "waf:GetChangeToken": {}, + "waf:GetChangeTokenStatus": {}, + "waf:ListActivatedRulesInRuleGroup": {}, + "waf:ListByteMatchSets": {}, + "waf:ListGeoMatchSets": {}, + "waf:ListIPSets": {}, + "waf:ListLoggingConfigurations": {}, + "waf:ListRateBasedRules": {}, + "waf:ListRegexMatchSets": {}, + "waf:ListRegexPatternSets": {}, + "waf:ListRuleGroups": {}, + "waf:ListRules": {}, + "waf:ListSizeConstraintSets": {}, + "waf:ListSqlInjectionMatchSets": {}, + "waf:ListSubscribedRuleGroups": {}, + "waf:ListWebACLs": {}, + "waf:ListXssMatchSets": {}, + "wafv2:CheckCapacity": {}, + "wafv2:CreateAPIKey": {}, + "wafv2:DescribeAllManagedProducts": {}, + "wafv2:DescribeManagedProductsByVendor": {}, + "wafv2:DescribeManagedRuleGroup": {}, + "wafv2:GenerateMobileSdkReleaseUrl": {}, + "wafv2:GetDecryptedAPIKey": {}, + "wafv2:GetMobileSdkRelease": {}, + "wafv2:ListAPIKeys": {}, + "wafv2:ListAvailableManagedRuleGroupVersions": {}, + "wafv2:ListAvailableManagedRuleGroups": {}, + "wafv2:ListIPSets": {}, + "wafv2:ListLoggingConfigurations": {}, + "wafv2:ListManagedRuleSets": {}, + "wafv2:ListMobileSdkReleases": {}, + "wafv2:ListRegexPatternSets": {}, + "wafv2:ListRuleGroups": {}, + "wafv2:ListWebACLs": {}, + "wam:AuthenticatePackager": {}, + "wellarchitected:CreateProfile": {}, + "wellarchitected:CreateReviewTemplate": {}, + "wellarchitected:CreateWorkload": {}, + "wellarchitected:GetConsolidatedReport": {}, + "wellarchitected:GetProfileTemplate": {}, + "wellarchitected:ImportLens": {}, + "wellarchitected:ListLenses": {}, + "wellarchitected:ListNotifications": {}, + "wellarchitected:ListProfileNotifications": {}, + "wellarchitected:ListProfiles": {}, + "wellarchitected:ListReviewTemplates": {}, + "wellarchitected:ListShareInvitations": {}, + "wellarchitected:ListWorkloads": {}, + "wellarchitected:UpdateGlobalSettings": {}, + "wellarchitected:UpdateShareInvitation": {}, + "wickr:CreateNetwork": {}, + "wickr:ListNetworks": {}, + "wickr:ListTagsForResource": {}, + "wisdom:CreateAssistant": {}, + "wisdom:CreateKnowledgeBase": {}, + "wisdom:ListAssistants": {}, + "wisdom:ListKnowledgeBases": {}, + "wisdom:ListTagsForResource": {}, + "workdocs:AbortDocumentVersionUpload": {}, + "workdocs:ActivateUser": {}, + "workdocs:AddNotificationPermissions": {}, + "workdocs:AddResourcePermissions": {}, + "workdocs:AddUserToGroup": {}, + "workdocs:CheckAlias": {}, + "workdocs:CreateComment": {}, + "workdocs:CreateCustomMetadata": {}, + "workdocs:CreateFolder": {}, + "workdocs:CreateInstance": {}, + "workdocs:CreateLabels": {}, + "workdocs:CreateNotificationSubscription": {}, + "workdocs:CreateUser": {}, + "workdocs:DeactivateUser": {}, + "workdocs:DeleteComment": {}, + "workdocs:DeleteCustomMetadata": {}, + "workdocs:DeleteDocument": {}, + "workdocs:DeleteDocumentVersion": {}, + "workdocs:DeleteFolder": {}, + "workdocs:DeleteFolderContents": {}, + "workdocs:DeleteInstance": {}, + "workdocs:DeleteLabels": {}, + "workdocs:DeleteNotificationPermissions": {}, + "workdocs:DeleteNotificationSubscription": {}, + "workdocs:DeleteUser": {}, + "workdocs:DeregisterDirectory": {}, + "workdocs:DescribeActivities": {}, + "workdocs:DescribeAvailableDirectories": {}, + "workdocs:DescribeComments": {}, + "workdocs:DescribeDocumentVersions": {}, + "workdocs:DescribeFolderContents": {}, + "workdocs:DescribeGroups": {}, + "workdocs:DescribeInstances": {}, + "workdocs:DescribeNotificationPermissions": {}, + "workdocs:DescribeNotificationSubscriptions": {}, + "workdocs:DescribeResourcePermissions": {}, + "workdocs:DescribeRootFolders": {}, + "workdocs:DescribeUsers": {}, + "workdocs:DownloadDocumentVersion": {}, + "workdocs:GetCurrentUser": {}, + "workdocs:GetDocument": {}, + "workdocs:GetDocumentPath": {}, + "workdocs:GetDocumentVersion": {}, + "workdocs:GetFolder": {}, + "workdocs:GetFolderPath": {}, + "workdocs:GetGroup": {}, + "workdocs:GetResources": {}, + "workdocs:InitiateDocumentVersionUpload": {}, + "workdocs:RegisterDirectory": {}, + "workdocs:RemoveAllResourcePermissions": {}, + "workdocs:RemoveResourcePermission": {}, + "workdocs:RestoreDocumentVersions": {}, + "workdocs:SearchResources": {}, + "workdocs:UpdateDocument": {}, + "workdocs:UpdateDocumentVersion": {}, + "workdocs:UpdateFolder": {}, + "workdocs:UpdateInstanceAlias": {}, + "workdocs:UpdateUser": {}, + "workdocs:UpdateUserAdministrativeSettings": {}, + "worklink:CreateFleet": {}, + "worklink:ListFleets": {}, + "workmail:CreateOrganization": {}, + "workmail:DescribeDirectories": {}, + "workmail:DescribeKmsKeys": {}, + "workmail:DescribeOrganizations": {}, + "workmail:ListOrganizations": {}, + "workspaces-web:CreateBrowserSettings": {}, + "workspaces-web:CreateIpAccessSettings": {}, + "workspaces-web:CreateNetworkSettings": {}, + "workspaces-web:CreatePortal": {}, + "workspaces-web:CreateTrustStore": {}, + "workspaces-web:CreateUserAccessLoggingSettings": {}, + "workspaces-web:CreateUserSettings": {}, + "workspaces-web:ListBrowserSettings": {}, + "workspaces-web:ListIpAccessSettings": {}, + "workspaces-web:ListNetworkSettings": {}, + "workspaces-web:ListPortals": {}, + "workspaces-web:ListTagsForResource": {}, + "workspaces-web:ListTrustStoreCertificates": {}, + "workspaces-web:ListTrustStores": {}, + "workspaces-web:ListUserAccessLoggingSettings": {}, + "workspaces-web:ListUserSettings": {}, + "workspaces:CreateConnectionAlias": {}, + "workspaces:CreateIpGroup": {}, + "workspaces:CreateTags": {}, + "workspaces:DeleteTags": {}, + "workspaces:DescribeAccount": {}, + "workspaces:DescribeAccountModifications": {}, + "workspaces:DescribeApplications": {}, + "workspaces:DescribeConnectionAliases": {}, + "workspaces:DescribeTags": {}, + "workspaces:DescribeWorkspaceBundles": {}, + "workspaces:DescribeWorkspaceDirectories": {}, + "workspaces:DescribeWorkspaceImages": {}, + "workspaces:DescribeWorkspaces": {}, + "workspaces:DescribeWorkspacesConnectionStatus": {}, + "workspaces:ImportWorkspaceImage": {}, + "workspaces:ListAvailableManagementCidrRanges": {}, + "workspaces:ModifyAccount": {}, + "xray:BatchGetTraceSummaryById": {}, + "xray:BatchGetTraces": {}, + "xray:DeleteResourcePolicy": {}, + "xray:GetDistinctTraceGraphs": {}, + "xray:GetEncryptionConfig": {}, + "xray:GetGroups": {}, + "xray:GetInsight": {}, + "xray:GetInsightEvents": {}, + "xray:GetInsightImpactGraph": {}, + "xray:GetInsightSummaries": {}, + "xray:GetSamplingRules": {}, + "xray:GetSamplingStatisticSummaries": {}, + "xray:GetSamplingTargets": {}, + "xray:GetServiceGraph": {}, + "xray:GetTimeSeriesServiceStatistics": {}, + "xray:GetTraceGraph": {}, + "xray:GetTraceSummaries": {}, + "xray:Link": {}, + "xray:ListResourcePolicies": {}, + "xray:PutEncryptionConfig": {}, + "xray:PutResourcePolicy": {}, + "xray:PutTelemetryRecords": {}, + "xray:PutTraceSegments": {}, +} \ No newline at end of file diff --git a/pkg/providers/aws/iam/iam.go b/pkg/providers/aws/iam/iam.go new file mode 100755 index 000000000000..2a3ce63152be --- /dev/null +++ b/pkg/providers/aws/iam/iam.go @@ -0,0 +1,111 @@ +package iam + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/iamgo" +) + +type IAM struct { + PasswordPolicy PasswordPolicy + Policies []Policy + Groups []Group + Users []User + Roles []Role + ServerCertificates []ServerCertificate +} + +type ServerCertificate struct { + Metadata defsecTypes.MisconfigMetadata + Expiration defsecTypes.TimeValue +} + +type Policy struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Document Document + Builtin defsecTypes.BoolValue +} + +type Document struct { + Metadata defsecTypes.MisconfigMetadata + Parsed iamgo.Document + IsOffset bool + HasRefs bool +} + +func (d Document) ToRego() interface{} { + m := d.Metadata + doc, _ := d.Parsed.MarshalJSON() + return map[string]interface{}{ + "filepath": m.Range().GetFilename(), + "startline": m.Range().GetStartLine(), + "endline": m.Range().GetEndLine(), + "managed": m.IsManaged(), + "explicit": m.IsExplicit(), + "value": string(doc), + "fskey": defsecTypes.CreateFSKey(m.Range().GetFS()), + } +} + +type Group struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Users []User + Policies []Policy +} + +type User struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Groups []Group + Policies []Policy + AccessKeys []AccessKey + MFADevices []MFADevice + LastAccess defsecTypes.TimeValue +} + +func (u *User) HasLoggedIn() bool { + return u.LastAccess.GetMetadata().IsResolvable() && !u.LastAccess.IsNever() +} + +type MFADevice struct { + Metadata defsecTypes.MisconfigMetadata + IsVirtual defsecTypes.BoolValue +} + +type AccessKey struct { + Metadata defsecTypes.MisconfigMetadata + AccessKeyId defsecTypes.StringValue + Active defsecTypes.BoolValue + CreationDate defsecTypes.TimeValue + LastAccess defsecTypes.TimeValue +} + +type Role struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Policies []Policy +} + +func (d Document) MetadataFromIamGo(r ...iamgo.Range) defsecTypes.MisconfigMetadata { + m := d.Metadata + if d.HasRefs { + return m + } + newRange := m.Range() + var start int + if !d.IsOffset { + start = newRange.GetStartLine() + } + for _, rng := range r { + newRange := defsecTypes.NewRange( + newRange.GetLocalFilename(), + start+rng.StartLine, + start+rng.EndLine, + newRange.GetSourcePrefix(), + newRange.GetFS(), + ) + m = defsecTypes.NewMisconfigMetadata(newRange, m.Reference()).WithParent(m) + } + return m +} diff --git a/pkg/providers/aws/iam/passwords.go b/pkg/providers/aws/iam/passwords.go new file mode 100755 index 000000000000..da304df79077 --- /dev/null +++ b/pkg/providers/aws/iam/passwords.go @@ -0,0 +1,16 @@ +package iam + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type PasswordPolicy struct { + Metadata defsecTypes.MisconfigMetadata + ReusePreventionCount defsecTypes.IntValue + RequireLowercase defsecTypes.BoolValue + RequireUppercase defsecTypes.BoolValue + RequireNumbers defsecTypes.BoolValue + RequireSymbols defsecTypes.BoolValue + MaxAgeDays defsecTypes.IntValue + MinimumLength defsecTypes.IntValue +} diff --git a/pkg/providers/aws/iam/wildcards.go b/pkg/providers/aws/iam/wildcards.go new file mode 100755 index 000000000000..bee5c4e9637c --- /dev/null +++ b/pkg/providers/aws/iam/wildcards.go @@ -0,0 +1,10 @@ +package iam + +func IsWildcardAllowed(actions ...string) (bool, string) { + for _, action := range actions { + if _, exist := allowedActionsForResourceWildcardsMap[action]; !exist { + return false, action + } + } + return true, "" +} diff --git a/pkg/providers/aws/kinesis/kinesis.go b/pkg/providers/aws/kinesis/kinesis.go new file mode 100755 index 000000000000..d3db51aff8a2 --- /dev/null +++ b/pkg/providers/aws/kinesis/kinesis.go @@ -0,0 +1,24 @@ +package kinesis + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Kinesis struct { + Streams []Stream +} + +type Stream struct { + Metadata defsecTypes.MisconfigMetadata + Encryption Encryption +} + +const ( + EncryptionTypeKMS = "KMS" +) + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + Type defsecTypes.StringValue + KMSKeyID defsecTypes.StringValue +} diff --git a/pkg/providers/aws/kms/kms.go b/pkg/providers/aws/kms/kms.go new file mode 100755 index 000000000000..82b524ebcd9e --- /dev/null +++ b/pkg/providers/aws/kms/kms.go @@ -0,0 +1,19 @@ +package kms + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type KMS struct { + Keys []Key +} + +const ( + KeyUsageSignAndVerify = "SIGN_VERIFY" +) + +type Key struct { + Metadata defsecTypes.MisconfigMetadata + Usage defsecTypes.StringValue + RotationEnabled defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/lambda/lambda.go b/pkg/providers/aws/lambda/lambda.go new file mode 100755 index 000000000000..1168a16f81de --- /dev/null +++ b/pkg/providers/aws/lambda/lambda.go @@ -0,0 +1,31 @@ +package lambda + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Lambda struct { + Functions []Function +} + +type Function struct { + Metadata defsecTypes.MisconfigMetadata + Tracing Tracing + Permissions []Permission +} + +const ( + TracingModePassThrough = "PassThrough" + TracingModeActive = "Active" +) + +type Tracing struct { + Metadata defsecTypes.MisconfigMetadata + Mode defsecTypes.StringValue +} + +type Permission struct { + Metadata defsecTypes.MisconfigMetadata + Principal defsecTypes.StringValue + SourceARN defsecTypes.StringValue +} diff --git a/pkg/providers/aws/mq/mq.go b/pkg/providers/aws/mq/mq.go new file mode 100755 index 000000000000..044fd5d97e34 --- /dev/null +++ b/pkg/providers/aws/mq/mq.go @@ -0,0 +1,21 @@ +package mq + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type MQ struct { + Brokers []Broker +} + +type Broker struct { + Metadata defsecTypes.MisconfigMetadata + PublicAccess defsecTypes.BoolValue + Logging Logging +} + +type Logging struct { + Metadata defsecTypes.MisconfigMetadata + General defsecTypes.BoolValue + Audit defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/msk/msk.go b/pkg/providers/aws/msk/msk.go new file mode 100755 index 000000000000..ddf2fd11c311 --- /dev/null +++ b/pkg/providers/aws/msk/msk.go @@ -0,0 +1,60 @@ +package msk + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type MSK struct { + Clusters []Cluster +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + EncryptionInTransit EncryptionInTransit + EncryptionAtRest EncryptionAtRest + Logging Logging +} + +const ( + ClientBrokerEncryptionTLS = "TLS" + ClientBrokerEncryptionPlaintext = "PLAINTEXT" + ClientBrokerEncryptionTLSOrPlaintext = "TLS_PLAINTEXT" +) + +type EncryptionInTransit struct { + Metadata defsecTypes.MisconfigMetadata + ClientBroker defsecTypes.StringValue +} + +type EncryptionAtRest struct { + Metadata defsecTypes.MisconfigMetadata + KMSKeyARN defsecTypes.StringValue + Enabled defsecTypes.BoolValue +} + +type Logging struct { + Metadata defsecTypes.MisconfigMetadata + Broker BrokerLogging +} + +type BrokerLogging struct { + Metadata defsecTypes.MisconfigMetadata + S3 S3Logging + Cloudwatch CloudwatchLogging + Firehose FirehoseLogging +} + +type S3Logging struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type CloudwatchLogging struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type FirehoseLogging struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/neptune/neptune.go b/pkg/providers/aws/neptune/neptune.go new file mode 100755 index 000000000000..5e9186686ad6 --- /dev/null +++ b/pkg/providers/aws/neptune/neptune.go @@ -0,0 +1,21 @@ +package neptune + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Neptune struct { + Clusters []Cluster +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + Logging Logging + StorageEncrypted defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue +} + +type Logging struct { + Metadata defsecTypes.MisconfigMetadata + Audit defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/provider.go b/pkg/providers/aws/provider.go new file mode 100644 index 000000000000..c2428ff33de8 --- /dev/null +++ b/pkg/providers/aws/provider.go @@ -0,0 +1,77 @@ +package aws + +import "github.com/aquasecurity/trivy/pkg/types" + +type TerraformProvider struct { + Metadata types.MisconfigMetadata + // generic fields + Alias types.StringValue + Version types.StringValue + + // provider specific fields + AccessKey types.StringValue + AllowedAccountsIDs types.StringValueList + AssumeRole AssumeRole + AssumeRoleWithWebIdentity AssumeRoleWithWebIdentity + CustomCABundle types.StringValue + DefaultTags DefaultTags + EC2MetadataServiceEndpoint types.StringValue + EC2MetadataServiceEndpointMode types.StringValue + Endpoints types.MapValue + ForbiddenAccountIDs types.StringValueList + HttpProxy types.StringValue + IgnoreTags IgnoreTags + Insecure types.BoolValue + MaxRetries types.IntValue + Profile types.StringValue + Region types.StringValue + RetryMode types.StringValue + S3UsePathStyle types.BoolValue + S3USEast1RegionalEndpoint types.StringValue + SecretKey types.StringValue + SharedConfigFiles types.StringValueList + SharedCredentialsFiles types.StringValueList + SkipCredentialsValidation types.BoolValue + SkipMetadataAPICheck types.BoolValue + SkipRegionValidation types.BoolValue + SkipRequestingAccountID types.BoolValue + STSRegion types.StringValue + Token types.StringValue + UseDualstackEndpoint types.BoolValue + UseFIPSEndpoint types.BoolValue +} + +type AssumeRole struct { + Metadata types.MisconfigMetadata + Duration types.StringValue + ExternalID types.StringValue + Policy types.StringValue + PolicyARNs types.StringValueList + RoleARN types.StringValue + SessionName types.StringValue + SourceIdentity types.StringValue + Tags types.MapValue + TransitiveTagKeys types.StringValueList +} + +type AssumeRoleWithWebIdentity struct { + Metadata types.MisconfigMetadata + Duration types.StringValue + Policy types.StringValue + PolicyARNs types.StringValueList + RoleARN types.StringValue + SessionName types.StringValue + WebIdentityToken types.StringValue + WebIdentityTokenFile types.StringValue +} + +type IgnoreTags struct { + Metadata types.MisconfigMetadata + Keys types.StringValueList + KeyPrefixes types.StringValueList +} + +type DefaultTags struct { + Metadata types.MisconfigMetadata + Tags types.MapValue +} diff --git a/pkg/providers/aws/rds/classic.go b/pkg/providers/aws/rds/classic.go new file mode 100755 index 000000000000..77113e1eb976 --- /dev/null +++ b/pkg/providers/aws/rds/classic.go @@ -0,0 +1,13 @@ +package rds + +import ( + "github.com/aquasecurity/trivy/pkg/types" +) + +type Classic struct { + DBSecurityGroups []DBSecurityGroup +} + +type DBSecurityGroup struct { + Metadata types.MisconfigMetadata +} diff --git a/pkg/providers/aws/rds/rds.go b/pkg/providers/aws/rds/rds.go new file mode 100755 index 000000000000..c37b8f597e89 --- /dev/null +++ b/pkg/providers/aws/rds/rds.go @@ -0,0 +1,127 @@ +package rds + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type RDS struct { + Instances []Instance + Clusters []Cluster + Classic Classic + Snapshots []Snapshots + ParameterGroups []ParameterGroups +} + +type Instance struct { + Metadata defsecTypes.MisconfigMetadata + BackupRetentionPeriodDays defsecTypes.IntValue + ReplicationSourceARN defsecTypes.StringValue + PerformanceInsights PerformanceInsights + Encryption Encryption + PublicAccess defsecTypes.BoolValue + Engine defsecTypes.StringValue + IAMAuthEnabled defsecTypes.BoolValue + DeletionProtection defsecTypes.BoolValue + DBInstanceArn defsecTypes.StringValue + StorageEncrypted defsecTypes.BoolValue + DBInstanceIdentifier defsecTypes.StringValue + DBParameterGroups []DBParameterGroupsList + TagList []TagList + EnabledCloudwatchLogsExports []defsecTypes.StringValue + EngineVersion defsecTypes.StringValue + AutoMinorVersionUpgrade defsecTypes.BoolValue + MultiAZ defsecTypes.BoolValue + PubliclyAccessible defsecTypes.BoolValue + LatestRestorableTime defsecTypes.TimeValue + ReadReplicaDBInstanceIdentifiers []defsecTypes.StringValue +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + BackupRetentionPeriodDays defsecTypes.IntValue + ReplicationSourceARN defsecTypes.StringValue + PerformanceInsights PerformanceInsights + Instances []ClusterInstance + Encryption Encryption + PublicAccess defsecTypes.BoolValue + Engine defsecTypes.StringValue + LatestRestorableTime defsecTypes.TimeValue + AvailabilityZones []defsecTypes.StringValue + DeletionProtection defsecTypes.BoolValue + SkipFinalSnapshot defsecTypes.BoolValue +} + +type Snapshots struct { + Metadata defsecTypes.MisconfigMetadata + DBSnapshotIdentifier defsecTypes.StringValue + DBSnapshotArn defsecTypes.StringValue + Encrypted defsecTypes.BoolValue + KmsKeyId defsecTypes.StringValue + SnapshotAttributes []DBSnapshotAttributes +} + +type Parameters struct { + Metadata defsecTypes.MisconfigMetadata + ParameterName defsecTypes.StringValue + ParameterValue defsecTypes.StringValue +} + +type ParameterGroups struct { + Metadata defsecTypes.MisconfigMetadata + DBParameterGroupName defsecTypes.StringValue + DBParameterGroupFamily defsecTypes.StringValue + Parameters []Parameters +} + +type DBSnapshotAttributes struct { + Metadata defsecTypes.MisconfigMetadata + AttributeValues []defsecTypes.StringValue +} + +const ( + EngineAurora = "aurora" + EngineAuroraMysql = "aurora-mysql" + EngineAuroraPostgresql = "aurora-postgresql" + EngineMySQL = "mysql" + EnginePostgres = "postgres" + EngineCustomOracleEE = "custom-oracle-ee" + EngineOracleEE = "oracle-ee" + EngineOracleEECDB = "oracle-ee-cdb" + EngineOracleSE2 = "oracle-se2" + EngineOracleSE2CDB = "oracle-se2-cdb" + EngineSQLServerEE = "sqlserver-ee" + EngineSQLServerSE = "sqlserver-se" + EngineSQLServerEX = "sqlserver-ex" + EngineSQLServerWEB = "sqlserver-web" + EngineMariaDB = "mariadb" + EngineCustomSQLServerEE = "custom-sqlserver-ee" + EngineCustomSQLServerSE = "custom-sqlserver-se" + EngineCustomSQLServerWEB = "custom-sqlserver-web" +) + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + EncryptStorage defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue +} + +type ClusterInstance struct { + Instance + ClusterIdentifier defsecTypes.StringValue +} + +type PerformanceInsights struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue +} + +type DBParameterGroupsList struct { + Metadata defsecTypes.MisconfigMetadata + DBParameterGroupName defsecTypes.StringValue + KMSKeyID defsecTypes.StringValue +} + +type TagList struct { + Metadata defsecTypes.MisconfigMetadata +} diff --git a/pkg/providers/aws/redshift/redshift.go b/pkg/providers/aws/redshift/redshift.go new file mode 100755 index 000000000000..b9710442f406 --- /dev/null +++ b/pkg/providers/aws/redshift/redshift.go @@ -0,0 +1,55 @@ +package redshift + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Redshift struct { + Clusters []Cluster + ReservedNodes []ReservedNode + ClusterParameters []ClusterParameter + SecurityGroups []SecurityGroup +} + +type SecurityGroup struct { + Metadata defsecTypes.MisconfigMetadata + Description defsecTypes.StringValue +} + +type ReservedNode struct { + Metadata defsecTypes.MisconfigMetadata + NodeType defsecTypes.StringValue +} + +type ClusterParameter struct { + Metadata defsecTypes.MisconfigMetadata + ParameterName defsecTypes.StringValue + ParameterValue defsecTypes.StringValue +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + ClusterIdentifier defsecTypes.StringValue + NodeType defsecTypes.StringValue + VpcId defsecTypes.StringValue + NumberOfNodes defsecTypes.IntValue + PubliclyAccessible defsecTypes.BoolValue + AllowVersionUpgrade defsecTypes.BoolValue + MasterUsername defsecTypes.StringValue + AutomatedSnapshotRetentionPeriod defsecTypes.IntValue + LoggingEnabled defsecTypes.BoolValue + EndPoint EndPoint + Encryption Encryption + SubnetGroupName defsecTypes.StringValue +} + +type EndPoint struct { + Metadata defsecTypes.MisconfigMetadata + Port defsecTypes.IntValue +} + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + KMSKeyID defsecTypes.StringValue +} diff --git a/pkg/providers/aws/s3/bucket.go b/pkg/providers/aws/s3/bucket.go new file mode 100755 index 000000000000..81633cf6fc44 --- /dev/null +++ b/pkg/providers/aws/s3/bucket.go @@ -0,0 +1,67 @@ +package s3 + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Bucket struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + PublicAccessBlock *PublicAccessBlock + BucketPolicies []iam.Policy + Encryption Encryption + Versioning Versioning + Logging Logging + ACL defsecTypes.StringValue + BucketLocation defsecTypes.StringValue + AccelerateConfigurationStatus defsecTypes.StringValue + LifecycleConfiguration []Rules + Objects []Contents + Website *Website +} + +func (b *Bucket) HasPublicExposureACL() bool { + for _, publicACL := range []string{"public-read", "public-read-write", "website", "authenticated-read"} { + if b.ACL.EqualTo(publicACL) { + // if there is a public access block, check the public ACL blocks + if b.PublicAccessBlock != nil && b.PublicAccessBlock.Metadata.IsManaged() { + return b.PublicAccessBlock.IgnorePublicACLs.IsFalse() && b.PublicAccessBlock.BlockPublicACLs.IsFalse() + } + return true + } + } + return false +} + +type Logging struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + TargetBucket defsecTypes.StringValue +} + +type Versioning struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + MFADelete defsecTypes.BoolValue +} + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + Algorithm defsecTypes.StringValue + KMSKeyId defsecTypes.StringValue +} + +type Rules struct { + Metadata defsecTypes.MisconfigMetadata + Status defsecTypes.StringValue +} + +type Contents struct { + Metadata defsecTypes.MisconfigMetadata +} + +type Website struct { + Metadata defsecTypes.MisconfigMetadata +} diff --git a/pkg/providers/aws/s3/bucket_public_access_block.go b/pkg/providers/aws/s3/bucket_public_access_block.go new file mode 100755 index 000000000000..617aa39d2143 --- /dev/null +++ b/pkg/providers/aws/s3/bucket_public_access_block.go @@ -0,0 +1,23 @@ +package s3 + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type PublicAccessBlock struct { + Metadata defsecTypes.MisconfigMetadata + BlockPublicACLs defsecTypes.BoolValue + BlockPublicPolicy defsecTypes.BoolValue + IgnorePublicACLs defsecTypes.BoolValue + RestrictPublicBuckets defsecTypes.BoolValue +} + +func NewPublicAccessBlock(metadata defsecTypes.MisconfigMetadata) PublicAccessBlock { + return PublicAccessBlock{ + Metadata: metadata, + BlockPublicPolicy: defsecTypes.BoolDefault(false, metadata), + BlockPublicACLs: defsecTypes.BoolDefault(false, metadata), + IgnorePublicACLs: defsecTypes.BoolDefault(false, metadata), + RestrictPublicBuckets: defsecTypes.BoolDefault(false, metadata), + } +} diff --git a/pkg/providers/aws/s3/s3.go b/pkg/providers/aws/s3/s3.go new file mode 100755 index 000000000000..230269a9e660 --- /dev/null +++ b/pkg/providers/aws/s3/s3.go @@ -0,0 +1,5 @@ +package s3 + +type S3 struct { + Buckets []Bucket +} diff --git a/pkg/providers/aws/sam/api.go b/pkg/providers/aws/sam/api.go new file mode 100644 index 000000000000..3e8d390a4154 --- /dev/null +++ b/pkg/providers/aws/sam/api.go @@ -0,0 +1,38 @@ +package sam + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type API struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + TracingEnabled defsecTypes.BoolValue + DomainConfiguration DomainConfiguration + AccessLogging AccessLogging + RESTMethodSettings RESTMethodSettings +} + +type ApiAuth struct { + Metadata defsecTypes.MisconfigMetadata + ApiKeyRequired defsecTypes.BoolValue +} + +type AccessLogging struct { + Metadata defsecTypes.MisconfigMetadata + CloudwatchLogGroupARN defsecTypes.StringValue +} + +type DomainConfiguration struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + SecurityPolicy defsecTypes.StringValue +} + +type RESTMethodSettings struct { + Metadata defsecTypes.MisconfigMetadata + CacheDataEncrypted defsecTypes.BoolValue + LoggingEnabled defsecTypes.BoolValue + DataTraceEnabled defsecTypes.BoolValue + MetricsEnabled defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/sam/application.go b/pkg/providers/aws/sam/application.go new file mode 100644 index 000000000000..07038615aba5 --- /dev/null +++ b/pkg/providers/aws/sam/application.go @@ -0,0 +1,17 @@ +package sam + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Application struct { + Metadata defsecTypes.MisconfigMetadata + LocationPath defsecTypes.StringValue + Location Location +} + +type Location struct { + Metadata defsecTypes.MisconfigMetadata + ApplicationID defsecTypes.StringValue + SemanticVersion defsecTypes.StringValue +} diff --git a/pkg/providers/aws/sam/function.go b/pkg/providers/aws/sam/function.go new file mode 100644 index 000000000000..bdfc9ef5569c --- /dev/null +++ b/pkg/providers/aws/sam/function.go @@ -0,0 +1,25 @@ +package sam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Function struct { + Metadata defsecTypes.MisconfigMetadata + FunctionName defsecTypes.StringValue + Tracing defsecTypes.StringValue + ManagedPolicies []defsecTypes.StringValue + Policies []iam.Policy +} + +const ( + TracingModePassThrough = "PassThrough" + TracingModeActive = "Active" +) + +type Permission struct { + Metadata defsecTypes.MisconfigMetadata + Principal defsecTypes.StringValue + SourceARN defsecTypes.StringValue +} diff --git a/pkg/providers/aws/sam/http_api.go b/pkg/providers/aws/sam/http_api.go new file mode 100644 index 000000000000..2a26225af25b --- /dev/null +++ b/pkg/providers/aws/sam/http_api.go @@ -0,0 +1,20 @@ +package sam + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type HttpAPI struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + AccessLogging AccessLogging + DefaultRouteSettings RouteSettings + DomainConfiguration DomainConfiguration +} + +type RouteSettings struct { + Metadata defsecTypes.MisconfigMetadata + LoggingEnabled defsecTypes.BoolValue + DataTraceEnabled defsecTypes.BoolValue + DetailedMetricsEnabled defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/sam/sam.go b/pkg/providers/aws/sam/sam.go new file mode 100644 index 000000000000..ed75777d8053 --- /dev/null +++ b/pkg/providers/aws/sam/sam.go @@ -0,0 +1,10 @@ +package sam + +type SAM struct { + APIs []API + Applications []Application + Functions []Function + HttpAPIs []HttpAPI + SimpleTables []SimpleTable + StateMachines []StateMachine +} diff --git a/pkg/providers/aws/sam/state_machine.go b/pkg/providers/aws/sam/state_machine.go new file mode 100644 index 000000000000..33c73761dd64 --- /dev/null +++ b/pkg/providers/aws/sam/state_machine.go @@ -0,0 +1,25 @@ +package sam + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type StateMachine struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + LoggingConfiguration LoggingConfiguration + ManagedPolicies []defsecTypes.StringValue + Policies []iam.Policy + Tracing TracingConfiguration +} + +type LoggingConfiguration struct { + Metadata defsecTypes.MisconfigMetadata + LoggingEnabled defsecTypes.BoolValue +} + +type TracingConfiguration struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/sam/table.go b/pkg/providers/aws/sam/table.go new file mode 100644 index 000000000000..e9b0e6e6f7ea --- /dev/null +++ b/pkg/providers/aws/sam/table.go @@ -0,0 +1,18 @@ +package sam + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SimpleTable struct { + Metadata defsecTypes.MisconfigMetadata + TableName defsecTypes.StringValue + SSESpecification SSESpecification +} + +type SSESpecification struct { + Metadata defsecTypes.MisconfigMetadata + + Enabled defsecTypes.BoolValue + KMSMasterKeyID defsecTypes.StringValue +} diff --git a/pkg/providers/aws/sns/sns.go b/pkg/providers/aws/sns/sns.go new file mode 100755 index 000000000000..c11a642b73e6 --- /dev/null +++ b/pkg/providers/aws/sns/sns.go @@ -0,0 +1,31 @@ +package sns + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SNS struct { + Topics []Topic +} + +func NewTopic(arn string, metadata defsecTypes.MisconfigMetadata) *Topic { + return &Topic{ + Metadata: metadata, + ARN: defsecTypes.String(arn, metadata), + Encryption: Encryption{ + Metadata: metadata, + KMSKeyID: defsecTypes.StringDefault("", metadata), + }, + } +} + +type Topic struct { + Metadata defsecTypes.MisconfigMetadata + ARN defsecTypes.StringValue + Encryption Encryption +} + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + KMSKeyID defsecTypes.StringValue +} diff --git a/pkg/providers/aws/sqs/sqs.go b/pkg/providers/aws/sqs/sqs.go new file mode 100755 index 000000000000..25ae7595f2f3 --- /dev/null +++ b/pkg/providers/aws/sqs/sqs.go @@ -0,0 +1,23 @@ +package sqs + +import ( + "github.com/aquasecurity/trivy/pkg/providers/aws/iam" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SQS struct { + Queues []Queue +} + +type Queue struct { + Metadata defsecTypes.MisconfigMetadata + QueueURL defsecTypes.StringValue + Encryption Encryption + Policies []iam.Policy +} + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + KMSKeyID defsecTypes.StringValue + ManagedEncryption defsecTypes.BoolValue +} diff --git a/pkg/providers/aws/ssm/ssm.go b/pkg/providers/aws/ssm/ssm.go new file mode 100755 index 000000000000..f14c2eecc110 --- /dev/null +++ b/pkg/providers/aws/ssm/ssm.go @@ -0,0 +1,16 @@ +package ssm + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SSM struct { + Secrets []Secret +} + +type Secret struct { + Metadata defsecTypes.MisconfigMetadata + KMSKeyID defsecTypes.StringValue +} + +const DefaultKMSKeyID = "alias/aws/secretsmanager" diff --git a/pkg/providers/aws/workspaces/workspaces.go b/pkg/providers/aws/workspaces/workspaces.go new file mode 100755 index 000000000000..2cf65f5949fa --- /dev/null +++ b/pkg/providers/aws/workspaces/workspaces.go @@ -0,0 +1,25 @@ +package workspaces + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type WorkSpaces struct { + WorkSpaces []WorkSpace +} + +type WorkSpace struct { + Metadata defsecTypes.MisconfigMetadata + RootVolume Volume + UserVolume Volume +} + +type Volume struct { + Metadata defsecTypes.MisconfigMetadata + Encryption Encryption +} + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} diff --git a/pkg/providers/azure/appservice/appservice.go b/pkg/providers/azure/appservice/appservice.go new file mode 100755 index 000000000000..2c8c7d071387 --- /dev/null +++ b/pkg/providers/azure/appservice/appservice.go @@ -0,0 +1,30 @@ +package appservice + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type AppService struct { + Services []Service + FunctionApps []FunctionApp +} + +type Service struct { + Metadata defsecTypes.MisconfigMetadata + EnableClientCert defsecTypes.BoolValue + Identity struct { + Type defsecTypes.StringValue + } + Authentication struct { + Enabled defsecTypes.BoolValue + } + Site struct { + EnableHTTP2 defsecTypes.BoolValue + MinimumTLSVersion defsecTypes.StringValue + } +} + +type FunctionApp struct { + Metadata defsecTypes.MisconfigMetadata + HTTPSOnly defsecTypes.BoolValue +} diff --git a/pkg/providers/azure/authorization/authorization.go b/pkg/providers/azure/authorization/authorization.go new file mode 100755 index 000000000000..267375a29c5c --- /dev/null +++ b/pkg/providers/azure/authorization/authorization.go @@ -0,0 +1,20 @@ +package authorization + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Authorization struct { + RoleDefinitions []RoleDefinition +} + +type RoleDefinition struct { + Metadata defsecTypes.MisconfigMetadata + Permissions []Permission + AssignableScopes []defsecTypes.StringValue +} + +type Permission struct { + Metadata defsecTypes.MisconfigMetadata + Actions []defsecTypes.StringValue +} diff --git a/pkg/providers/azure/azure.go b/pkg/providers/azure/azure.go new file mode 100755 index 000000000000..c9f6e960ebc3 --- /dev/null +++ b/pkg/providers/azure/azure.go @@ -0,0 +1,33 @@ +package azure + +import ( + "github.com/aquasecurity/trivy/pkg/providers/azure/appservice" + "github.com/aquasecurity/trivy/pkg/providers/azure/authorization" + "github.com/aquasecurity/trivy/pkg/providers/azure/compute" + "github.com/aquasecurity/trivy/pkg/providers/azure/container" + "github.com/aquasecurity/trivy/pkg/providers/azure/database" + "github.com/aquasecurity/trivy/pkg/providers/azure/datafactory" + "github.com/aquasecurity/trivy/pkg/providers/azure/datalake" + "github.com/aquasecurity/trivy/pkg/providers/azure/keyvault" + "github.com/aquasecurity/trivy/pkg/providers/azure/monitor" + "github.com/aquasecurity/trivy/pkg/providers/azure/network" + "github.com/aquasecurity/trivy/pkg/providers/azure/securitycenter" + "github.com/aquasecurity/trivy/pkg/providers/azure/storage" + "github.com/aquasecurity/trivy/pkg/providers/azure/synapse" +) + +type Azure struct { + AppService appservice.AppService + Authorization authorization.Authorization + Compute compute.Compute + Container container.Container + Database database.Database + DataFactory datafactory.DataFactory + DataLake datalake.DataLake + KeyVault keyvault.KeyVault + Monitor monitor.Monitor + Network network.Network + SecurityCenter securitycenter.SecurityCenter + Storage storage.Storage + Synapse synapse.Synapse +} diff --git a/pkg/providers/azure/compute/compute.go b/pkg/providers/azure/compute/compute.go new file mode 100755 index 000000000000..dd198e18e676 --- /dev/null +++ b/pkg/providers/azure/compute/compute.go @@ -0,0 +1,42 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Compute struct { + LinuxVirtualMachines []LinuxVirtualMachine + WindowsVirtualMachines []WindowsVirtualMachine + ManagedDisks []ManagedDisk +} + +type VirtualMachine struct { + Metadata defsecTypes.MisconfigMetadata + CustomData defsecTypes.StringValue // NOT base64 encoded +} + +type LinuxVirtualMachine struct { + Metadata defsecTypes.MisconfigMetadata + VirtualMachine + OSProfileLinuxConfig OSProfileLinuxConfig +} + +type WindowsVirtualMachine struct { + Metadata defsecTypes.MisconfigMetadata + VirtualMachine +} + +type OSProfileLinuxConfig struct { + Metadata defsecTypes.MisconfigMetadata + DisablePasswordAuthentication defsecTypes.BoolValue +} + +type ManagedDisk struct { + Metadata defsecTypes.MisconfigMetadata + Encryption Encryption +} + +type Encryption struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} diff --git a/pkg/providers/azure/container/container.go b/pkg/providers/azure/container/container.go new file mode 100755 index 000000000000..0a2cfa30704c --- /dev/null +++ b/pkg/providers/azure/container/container.go @@ -0,0 +1,38 @@ +package container + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Container struct { + KubernetesClusters []KubernetesCluster +} + +type KubernetesCluster struct { + Metadata defsecTypes.MisconfigMetadata + NetworkProfile NetworkProfile + EnablePrivateCluster defsecTypes.BoolValue + APIServerAuthorizedIPRanges []defsecTypes.StringValue + AddonProfile AddonProfile + RoleBasedAccessControl RoleBasedAccessControl +} + +type RoleBasedAccessControl struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type AddonProfile struct { + Metadata defsecTypes.MisconfigMetadata + OMSAgent OMSAgent +} + +type OMSAgent struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type NetworkProfile struct { + Metadata defsecTypes.MisconfigMetadata + NetworkPolicy defsecTypes.StringValue // "", "calico", "azure" +} diff --git a/pkg/providers/azure/database/database.go b/pkg/providers/azure/database/database.go new file mode 100755 index 000000000000..d14cacdca7c8 --- /dev/null +++ b/pkg/providers/azure/database/database.go @@ -0,0 +1,68 @@ +package database + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Database struct { + MSSQLServers []MSSQLServer + MariaDBServers []MariaDBServer + MySQLServers []MySQLServer + PostgreSQLServers []PostgreSQLServer +} + +type MariaDBServer struct { + Metadata defsecTypes.MisconfigMetadata + Server +} + +type MySQLServer struct { + Metadata defsecTypes.MisconfigMetadata + Server +} + +type PostgreSQLServer struct { + Metadata defsecTypes.MisconfigMetadata + Server + Config PostgresSQLConfig +} + +type PostgresSQLConfig struct { + Metadata defsecTypes.MisconfigMetadata + LogCheckpoints defsecTypes.BoolValue + ConnectionThrottling defsecTypes.BoolValue + LogConnections defsecTypes.BoolValue +} + +type Server struct { + Metadata defsecTypes.MisconfigMetadata + EnableSSLEnforcement defsecTypes.BoolValue + MinimumTLSVersion defsecTypes.StringValue + EnablePublicNetworkAccess defsecTypes.BoolValue + FirewallRules []FirewallRule +} + +type MSSQLServer struct { + Metadata defsecTypes.MisconfigMetadata + Server + ExtendedAuditingPolicies []ExtendedAuditingPolicy + SecurityAlertPolicies []SecurityAlertPolicy +} + +type SecurityAlertPolicy struct { + Metadata defsecTypes.MisconfigMetadata + EmailAddresses []defsecTypes.StringValue + DisabledAlerts []defsecTypes.StringValue + EmailAccountAdmins defsecTypes.BoolValue +} + +type ExtendedAuditingPolicy struct { + Metadata defsecTypes.MisconfigMetadata + RetentionInDays defsecTypes.IntValue +} + +type FirewallRule struct { + Metadata defsecTypes.MisconfigMetadata + StartIP defsecTypes.StringValue + EndIP defsecTypes.StringValue +} diff --git a/pkg/providers/azure/datafactory/datafactory.go b/pkg/providers/azure/datafactory/datafactory.go new file mode 100755 index 000000000000..8416450349f7 --- /dev/null +++ b/pkg/providers/azure/datafactory/datafactory.go @@ -0,0 +1,14 @@ +package datafactory + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type DataFactory struct { + DataFactories []Factory +} + +type Factory struct { + Metadata defsecTypes.MisconfigMetadata + EnablePublicNetwork defsecTypes.BoolValue +} diff --git a/pkg/providers/azure/datalake/datalake.go b/pkg/providers/azure/datalake/datalake.go new file mode 100755 index 000000000000..720e3094b407 --- /dev/null +++ b/pkg/providers/azure/datalake/datalake.go @@ -0,0 +1,14 @@ +package datalake + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type DataLake struct { + Stores []Store +} + +type Store struct { + Metadata defsecTypes.MisconfigMetadata + EnableEncryption defsecTypes.BoolValue +} diff --git a/pkg/providers/azure/keyvault/keyvault.go b/pkg/providers/azure/keyvault/keyvault.go new file mode 100755 index 000000000000..c6443e1c7380 --- /dev/null +++ b/pkg/providers/azure/keyvault/keyvault.go @@ -0,0 +1,34 @@ +package keyvault + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type KeyVault struct { + Vaults []Vault +} + +type Vault struct { + Metadata defsecTypes.MisconfigMetadata + Secrets []Secret + Keys []Key + EnablePurgeProtection defsecTypes.BoolValue + SoftDeleteRetentionDays defsecTypes.IntValue + NetworkACLs NetworkACLs +} + +type NetworkACLs struct { + Metadata defsecTypes.MisconfigMetadata + DefaultAction defsecTypes.StringValue +} + +type Key struct { + Metadata defsecTypes.MisconfigMetadata + ExpiryDate defsecTypes.TimeValue +} + +type Secret struct { + Metadata defsecTypes.MisconfigMetadata + ContentType defsecTypes.StringValue + ExpiryDate defsecTypes.TimeValue +} diff --git a/pkg/providers/azure/monitor/monitor.go b/pkg/providers/azure/monitor/monitor.go new file mode 100755 index 000000000000..7af67c1c6054 --- /dev/null +++ b/pkg/providers/azure/monitor/monitor.go @@ -0,0 +1,22 @@ +package monitor + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Monitor struct { + LogProfiles []LogProfile +} + +type LogProfile struct { + Metadata defsecTypes.MisconfigMetadata + RetentionPolicy RetentionPolicy + Categories []defsecTypes.StringValue + Locations []defsecTypes.StringValue +} + +type RetentionPolicy struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + Days defsecTypes.IntValue +} diff --git a/pkg/providers/azure/network/network.go b/pkg/providers/azure/network/network.go new file mode 100755 index 000000000000..b166c2c7545e --- /dev/null +++ b/pkg/providers/azure/network/network.go @@ -0,0 +1,47 @@ +package network + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Network struct { + SecurityGroups []SecurityGroup + NetworkWatcherFlowLogs []NetworkWatcherFlowLog +} + +type SecurityGroup struct { + Metadata defsecTypes.MisconfigMetadata + Rules []SecurityGroupRule +} + +type SecurityGroupRule struct { + Metadata defsecTypes.MisconfigMetadata + Outbound defsecTypes.BoolValue + Allow defsecTypes.BoolValue + SourceAddresses []defsecTypes.StringValue + SourcePorts []PortRange + DestinationAddresses []defsecTypes.StringValue + DestinationPorts []PortRange + Protocol defsecTypes.StringValue +} + +type PortRange struct { + Metadata defsecTypes.MisconfigMetadata + Start int + End int +} + +func (r PortRange) Includes(port int) bool { + return port >= r.Start && port <= r.End +} + +type NetworkWatcherFlowLog struct { + Metadata defsecTypes.MisconfigMetadata + RetentionPolicy RetentionPolicy +} + +type RetentionPolicy struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + Days defsecTypes.IntValue +} diff --git a/pkg/providers/azure/securitycenter/securitycenter.go b/pkg/providers/azure/securitycenter/securitycenter.go new file mode 100755 index 000000000000..0e9e9a9ffafe --- /dev/null +++ b/pkg/providers/azure/securitycenter/securitycenter.go @@ -0,0 +1,26 @@ +package securitycenter + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SecurityCenter struct { + Contacts []Contact + Subscriptions []SubscriptionPricing +} + +type Contact struct { + Metadata defsecTypes.MisconfigMetadata + EnableAlertNotifications defsecTypes.BoolValue + Phone defsecTypes.StringValue +} + +const ( + TierFree = "Free" + TierStandard = "Standard" +) + +type SubscriptionPricing struct { + Metadata defsecTypes.MisconfigMetadata + Tier defsecTypes.StringValue +} diff --git a/pkg/providers/azure/storage/storage.go b/pkg/providers/azure/storage/storage.go new file mode 100755 index 000000000000..bcfe5fffe12b --- /dev/null +++ b/pkg/providers/azure/storage/storage.go @@ -0,0 +1,46 @@ +package storage + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Storage struct { + Accounts []Account +} + +type Account struct { + Metadata defsecTypes.MisconfigMetadata + NetworkRules []NetworkRule + EnforceHTTPS defsecTypes.BoolValue + Containers []Container + QueueProperties QueueProperties + MinimumTLSVersion defsecTypes.StringValue + Queues []Queue +} + +type Queue struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue +} + +type QueueProperties struct { + Metadata defsecTypes.MisconfigMetadata + EnableLogging defsecTypes.BoolValue +} + +type NetworkRule struct { + Metadata defsecTypes.MisconfigMetadata + Bypass []defsecTypes.StringValue + AllowByDefault defsecTypes.BoolValue +} + +const ( + PublicAccessOff = "off" + PublicAccessBlob = "blob" + PublicAccessContainer = "container" +) + +type Container struct { + Metadata defsecTypes.MisconfigMetadata + PublicAccess defsecTypes.StringValue +} diff --git a/pkg/providers/azure/synapse/synapse.go b/pkg/providers/azure/synapse/synapse.go new file mode 100755 index 000000000000..fdf1dbbea5df --- /dev/null +++ b/pkg/providers/azure/synapse/synapse.go @@ -0,0 +1,14 @@ +package synapse + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Synapse struct { + Workspaces []Workspace +} + +type Workspace struct { + Metadata defsecTypes.MisconfigMetadata + EnableManagedVirtualNetwork defsecTypes.BoolValue +} diff --git a/pkg/providers/cloudstack/cloudstack.go b/pkg/providers/cloudstack/cloudstack.go new file mode 100755 index 000000000000..eb54a4c7d6ce --- /dev/null +++ b/pkg/providers/cloudstack/cloudstack.go @@ -0,0 +1,9 @@ +package cloudstack + +import ( + "github.com/aquasecurity/trivy/pkg/providers/cloudstack/compute" +) + +type CloudStack struct { + Compute compute.Compute +} diff --git a/pkg/providers/cloudstack/compute/compute.go b/pkg/providers/cloudstack/compute/compute.go new file mode 100755 index 000000000000..e586e0673462 --- /dev/null +++ b/pkg/providers/cloudstack/compute/compute.go @@ -0,0 +1,14 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Compute struct { + Instances []Instance +} + +type Instance struct { + Metadata defsecTypes.MisconfigMetadata + UserData defsecTypes.StringValue // not b64 encoded pls +} diff --git a/pkg/providers/digitalocean/compute/compute.go b/pkg/providers/digitalocean/compute/compute.go new file mode 100755 index 000000000000..eb736c21cf32 --- /dev/null +++ b/pkg/providers/digitalocean/compute/compute.go @@ -0,0 +1,50 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Compute struct { + Firewalls []Firewall + LoadBalancers []LoadBalancer + Droplets []Droplet + KubernetesClusters []KubernetesCluster +} + +type Firewall struct { + Metadata defsecTypes.MisconfigMetadata + OutboundRules []OutboundFirewallRule + InboundRules []InboundFirewallRule +} + +type KubernetesCluster struct { + Metadata defsecTypes.MisconfigMetadata + SurgeUpgrade defsecTypes.BoolValue + AutoUpgrade defsecTypes.BoolValue +} + +type LoadBalancer struct { + Metadata defsecTypes.MisconfigMetadata + ForwardingRules []ForwardingRule + RedirectHttpToHttps defsecTypes.BoolValue +} + +type ForwardingRule struct { + Metadata defsecTypes.MisconfigMetadata + EntryProtocol defsecTypes.StringValue +} + +type OutboundFirewallRule struct { + Metadata defsecTypes.MisconfigMetadata + DestinationAddresses []defsecTypes.StringValue +} + +type InboundFirewallRule struct { + Metadata defsecTypes.MisconfigMetadata + SourceAddresses []defsecTypes.StringValue +} + +type Droplet struct { + Metadata defsecTypes.MisconfigMetadata + SSHKeys []defsecTypes.StringValue +} diff --git a/pkg/providers/digitalocean/digitalocean.go b/pkg/providers/digitalocean/digitalocean.go new file mode 100755 index 000000000000..7f5f7e683c78 --- /dev/null +++ b/pkg/providers/digitalocean/digitalocean.go @@ -0,0 +1,11 @@ +package digitalocean + +import ( + "github.com/aquasecurity/trivy/pkg/providers/digitalocean/compute" + "github.com/aquasecurity/trivy/pkg/providers/digitalocean/spaces" +) + +type DigitalOcean struct { + Compute compute.Compute + Spaces spaces.Spaces +} diff --git a/pkg/providers/digitalocean/spaces/spaces.go b/pkg/providers/digitalocean/spaces/spaces.go new file mode 100755 index 000000000000..f65e635d4c3a --- /dev/null +++ b/pkg/providers/digitalocean/spaces/spaces.go @@ -0,0 +1,28 @@ +package spaces + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Spaces struct { + Buckets []Bucket +} + +type Bucket struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Objects []Object + ACL defsecTypes.StringValue + ForceDestroy defsecTypes.BoolValue + Versioning Versioning +} + +type Versioning struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type Object struct { + Metadata defsecTypes.MisconfigMetadata + ACL defsecTypes.StringValue +} diff --git a/pkg/providers/dockerfile/dockerfile.go b/pkg/providers/dockerfile/dockerfile.go new file mode 100644 index 000000000000..a39b1a01044d --- /dev/null +++ b/pkg/providers/dockerfile/dockerfile.go @@ -0,0 +1,61 @@ +package dockerfile + +import ( + "reflect" + + "github.com/aquasecurity/trivy/pkg/rego/convert" +) + +// NOTE: we are currently preserving mixed case json here for backward compatibility + +// Dockerfile represents a parsed Dockerfile +type Dockerfile struct { + Stages []Stage +} + +type Stage struct { + Name string + Commands []Command +} + +func (d Dockerfile) ToRego() interface{} { + return map[string]interface{}{ + "Stages": convert.SliceToRego(reflect.ValueOf(d.Stages)), + } +} + +func (s Stage) ToRego() interface{} { + return map[string]interface{}{ + "Name": s.Name, + "Commands": convert.SliceToRego(reflect.ValueOf(s.Commands)), + } +} + +// Command is the struct for each dockerfile command +type Command struct { + Cmd string + SubCmd string + Flags []string + Value []string + Original string + JSON bool + Stage int + Path string + StartLine int + EndLine int +} + +func (c Command) ToRego() interface{} { + return map[string]interface{}{ + "Cmd": c.Cmd, + "SubCmd": c.SubCmd, + "Flags": c.Flags, + "Value": c.Value, + "Original": c.Original, + "JSON": c.JSON, + "Stage": c.Stage, + "Path": c.Path, + "StartLine": c.StartLine, + "EndLine": c.EndLine, + } +} diff --git a/pkg/providers/github/actions.go b/pkg/providers/github/actions.go new file mode 100644 index 000000000000..513743688f5c --- /dev/null +++ b/pkg/providers/github/actions.go @@ -0,0 +1,19 @@ +package github + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Action struct { + Metadata defsecTypes.MisconfigMetadata + EnvironmentSecrets []EnvironmentSecret +} + +type EnvironmentSecret struct { + Metadata defsecTypes.MisconfigMetadata + Repository defsecTypes.StringValue + Environment defsecTypes.StringValue + SecretName defsecTypes.StringValue + PlainTextValue defsecTypes.StringValue + EncryptedValue defsecTypes.StringValue +} diff --git a/pkg/providers/github/branch_protections.go b/pkg/providers/github/branch_protections.go new file mode 100755 index 000000000000..c0fc231e20e2 --- /dev/null +++ b/pkg/providers/github/branch_protections.go @@ -0,0 +1,14 @@ +package github + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type BranchProtection struct { + Metadata defsecTypes.MisconfigMetadata + RequireSignedCommits defsecTypes.BoolValue +} + +func (b BranchProtection) RequiresSignedCommits() bool { + return b.RequireSignedCommits.IsTrue() +} diff --git a/pkg/providers/github/github.go b/pkg/providers/github/github.go new file mode 100755 index 000000000000..449f94cecc30 --- /dev/null +++ b/pkg/providers/github/github.go @@ -0,0 +1,7 @@ +package github + +type GitHub struct { + Repositories []Repository + EnvironmentSecrets []EnvironmentSecret + BranchProtections []BranchProtection +} diff --git a/pkg/providers/github/repositories.go b/pkg/providers/github/repositories.go new file mode 100755 index 000000000000..7d7388e06c80 --- /dev/null +++ b/pkg/providers/github/repositories.go @@ -0,0 +1,16 @@ +package github + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Repository struct { + Metadata defsecTypes.MisconfigMetadata + Public defsecTypes.BoolValue + VulnerabilityAlerts defsecTypes.BoolValue + Archived defsecTypes.BoolValue +} + +func (r Repository) IsArchived() bool { + return r.Archived.IsTrue() +} diff --git a/pkg/providers/google/bigquery/bigquery.go b/pkg/providers/google/bigquery/bigquery.go new file mode 100755 index 000000000000..4dbd2d21bd44 --- /dev/null +++ b/pkg/providers/google/bigquery/bigquery.go @@ -0,0 +1,26 @@ +package bigquery + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type BigQuery struct { + Datasets []Dataset +} + +type Dataset struct { + Metadata defsecTypes.MisconfigMetadata + ID defsecTypes.StringValue + AccessGrants []AccessGrant +} + +const ( + SpecialGroupAllAuthenticatedUsers = "allAuthenticatedUsers" +) + +type AccessGrant struct { + Metadata defsecTypes.MisconfigMetadata + Role defsecTypes.StringValue + Domain defsecTypes.StringValue + SpecialGroup defsecTypes.StringValue +} diff --git a/pkg/providers/google/compute/compute.go b/pkg/providers/google/compute/compute.go new file mode 100755 index 000000000000..ffa9db257bad --- /dev/null +++ b/pkg/providers/google/compute/compute.go @@ -0,0 +1,9 @@ +package compute + +type Compute struct { + Disks []Disk + Networks []Network + SSLPolicies []SSLPolicy + ProjectMetadata ProjectMetadata + Instances []Instance +} diff --git a/pkg/providers/google/compute/disk.go b/pkg/providers/google/compute/disk.go new file mode 100755 index 000000000000..86b79f971c5e --- /dev/null +++ b/pkg/providers/google/compute/disk.go @@ -0,0 +1,17 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Disk struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Encryption DiskEncryption +} + +type DiskEncryption struct { + Metadata defsecTypes.MisconfigMetadata + RawKey defsecTypes.BytesValue + KMSKeyLink defsecTypes.StringValue +} diff --git a/pkg/providers/google/compute/firewall.go b/pkg/providers/google/compute/firewall.go new file mode 100755 index 000000000000..7d51ef7f0636 --- /dev/null +++ b/pkg/providers/google/compute/firewall.go @@ -0,0 +1,34 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Firewall struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + IngressRules []IngressRule + EgressRules []EgressRule + SourceTags []defsecTypes.StringValue + TargetTags []defsecTypes.StringValue +} + +type FirewallRule struct { + Metadata defsecTypes.MisconfigMetadata + Enforced defsecTypes.BoolValue + IsAllow defsecTypes.BoolValue + Protocol defsecTypes.StringValue + Ports []defsecTypes.IntValue +} + +type IngressRule struct { + Metadata defsecTypes.MisconfigMetadata + FirewallRule + SourceRanges []defsecTypes.StringValue +} + +type EgressRule struct { + Metadata defsecTypes.MisconfigMetadata + FirewallRule + DestinationRanges []defsecTypes.StringValue +} diff --git a/pkg/providers/google/compute/instance.go b/pkg/providers/google/compute/instance.go new file mode 100755 index 000000000000..6c5fcc157182 --- /dev/null +++ b/pkg/providers/google/compute/instance.go @@ -0,0 +1,41 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Instance struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + NetworkInterfaces []NetworkInterface + ShieldedVM ShieldedVMConfig + ServiceAccount ServiceAccount + CanIPForward defsecTypes.BoolValue + OSLoginEnabled defsecTypes.BoolValue + EnableProjectSSHKeyBlocking defsecTypes.BoolValue + EnableSerialPort defsecTypes.BoolValue + BootDisks []Disk + AttachedDisks []Disk +} + +type ServiceAccount struct { + Metadata defsecTypes.MisconfigMetadata + Email defsecTypes.StringValue + IsDefault defsecTypes.BoolValue + Scopes []defsecTypes.StringValue +} + +type NetworkInterface struct { + Metadata defsecTypes.MisconfigMetadata + Network *Network + SubNetwork *SubNetwork + HasPublicIP defsecTypes.BoolValue + NATIP defsecTypes.StringValue +} + +type ShieldedVMConfig struct { + Metadata defsecTypes.MisconfigMetadata + SecureBootEnabled defsecTypes.BoolValue + IntegrityMonitoringEnabled defsecTypes.BoolValue + VTPMEnabled defsecTypes.BoolValue +} diff --git a/pkg/providers/google/compute/metadata.go b/pkg/providers/google/compute/metadata.go new file mode 100755 index 000000000000..cf2b9dc67127 --- /dev/null +++ b/pkg/providers/google/compute/metadata.go @@ -0,0 +1,10 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type ProjectMetadata struct { + Metadata defsecTypes.MisconfigMetadata + EnableOSLogin defsecTypes.BoolValue +} diff --git a/pkg/providers/google/compute/network.go b/pkg/providers/google/compute/network.go new file mode 100755 index 000000000000..d4a036eeaffc --- /dev/null +++ b/pkg/providers/google/compute/network.go @@ -0,0 +1,11 @@ +package compute + +import ( + "github.com/aquasecurity/trivy/pkg/types" +) + +type Network struct { + Metadata types.MisconfigMetadata + Firewall *Firewall + Subnetworks []SubNetwork +} diff --git a/pkg/providers/google/compute/ssl_policy.go b/pkg/providers/google/compute/ssl_policy.go new file mode 100755 index 000000000000..a01dd60606a5 --- /dev/null +++ b/pkg/providers/google/compute/ssl_policy.go @@ -0,0 +1,12 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SSLPolicy struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Profile defsecTypes.StringValue + MinimumTLSVersion defsecTypes.StringValue +} diff --git a/pkg/providers/google/compute/subnetwork.go b/pkg/providers/google/compute/subnetwork.go new file mode 100755 index 000000000000..4d9fd102bf44 --- /dev/null +++ b/pkg/providers/google/compute/subnetwork.go @@ -0,0 +1,12 @@ +package compute + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SubNetwork struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Purpose defsecTypes.StringValue + EnableFlowLogs defsecTypes.BoolValue +} diff --git a/pkg/providers/google/dns/dns.go b/pkg/providers/google/dns/dns.go new file mode 100755 index 000000000000..685b2468686f --- /dev/null +++ b/pkg/providers/google/dns/dns.go @@ -0,0 +1,31 @@ +package dns + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type DNS struct { + ManagedZones []ManagedZone +} + +type ManagedZone struct { + Metadata defsecTypes.MisconfigMetadata + DNSSec DNSSec + Visibility defsecTypes.StringValue +} + +func (m ManagedZone) IsPrivate() bool { + return m.Visibility.EqualTo("private", defsecTypes.IgnoreCase) +} + +type DNSSec struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + DefaultKeySpecs []KeySpecs +} + +type KeySpecs struct { + Metadata defsecTypes.MisconfigMetadata + Algorithm defsecTypes.StringValue + KeyType defsecTypes.StringValue +} diff --git a/pkg/providers/google/gke/gke.go b/pkg/providers/google/gke/gke.go new file mode 100755 index 000000000000..4886d14592ff --- /dev/null +++ b/pkg/providers/google/gke/gke.go @@ -0,0 +1,86 @@ +package gke + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type GKE struct { + Clusters []Cluster +} + +type Cluster struct { + Metadata defsecTypes.MisconfigMetadata + NodePools []NodePool + IPAllocationPolicy IPAllocationPolicy + MasterAuthorizedNetworks MasterAuthorizedNetworks + NetworkPolicy NetworkPolicy + PrivateCluster PrivateCluster + LoggingService defsecTypes.StringValue + MonitoringService defsecTypes.StringValue + MasterAuth MasterAuth + NodeConfig NodeConfig + EnableShieldedNodes defsecTypes.BoolValue + EnableLegacyABAC defsecTypes.BoolValue + ResourceLabels defsecTypes.MapValue + RemoveDefaultNodePool defsecTypes.BoolValue + EnableAutpilot defsecTypes.BoolValue + DatapathProvider defsecTypes.StringValue +} + +type NodeConfig struct { + Metadata defsecTypes.MisconfigMetadata + ImageType defsecTypes.StringValue + WorkloadMetadataConfig WorkloadMetadataConfig + ServiceAccount defsecTypes.StringValue + EnableLegacyEndpoints defsecTypes.BoolValue +} + +type WorkloadMetadataConfig struct { + Metadata defsecTypes.MisconfigMetadata + NodeMetadata defsecTypes.StringValue +} + +type MasterAuth struct { + Metadata defsecTypes.MisconfigMetadata + ClientCertificate ClientCertificate + Username defsecTypes.StringValue + Password defsecTypes.StringValue +} + +type ClientCertificate struct { + Metadata defsecTypes.MisconfigMetadata + IssueCertificate defsecTypes.BoolValue +} + +type PrivateCluster struct { + Metadata defsecTypes.MisconfigMetadata + EnablePrivateNodes defsecTypes.BoolValue +} + +type NetworkPolicy struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type MasterAuthorizedNetworks struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue + CIDRs []defsecTypes.StringValue +} + +type IPAllocationPolicy struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type NodePool struct { + Metadata defsecTypes.MisconfigMetadata + Management Management + NodeConfig NodeConfig +} + +type Management struct { + Metadata defsecTypes.MisconfigMetadata + EnableAutoRepair defsecTypes.BoolValue + EnableAutoUpgrade defsecTypes.BoolValue +} diff --git a/pkg/providers/google/google.go b/pkg/providers/google/google.go new file mode 100755 index 000000000000..98bd9e0dc8db --- /dev/null +++ b/pkg/providers/google/google.go @@ -0,0 +1,23 @@ +package google + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/bigquery" + "github.com/aquasecurity/trivy/pkg/providers/google/compute" + "github.com/aquasecurity/trivy/pkg/providers/google/dns" + "github.com/aquasecurity/trivy/pkg/providers/google/gke" + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + "github.com/aquasecurity/trivy/pkg/providers/google/kms" + "github.com/aquasecurity/trivy/pkg/providers/google/sql" + "github.com/aquasecurity/trivy/pkg/providers/google/storage" +) + +type Google struct { + BigQuery bigquery.BigQuery + Compute compute.Compute + DNS dns.DNS + GKE gke.GKE + KMS kms.KMS + IAM iam.IAM + SQL sql.SQL + Storage storage.Storage +} diff --git a/pkg/providers/google/iam/iam.go b/pkg/providers/google/iam/iam.go new file mode 100755 index 000000000000..593ec0e4a912 --- /dev/null +++ b/pkg/providers/google/iam/iam.go @@ -0,0 +1,88 @@ +package iam + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type IAM struct { + Organizations []Organization + WorkloadIdentityPoolProviders []WorkloadIdentityPoolProvider +} + +type Organization struct { + Metadata defsecTypes.MisconfigMetadata + Folders []Folder + Projects []Project + Members []Member + Bindings []Binding +} + +type Folder struct { + Metadata defsecTypes.MisconfigMetadata + Folders []Folder + Projects []Project + Members []Member + Bindings []Binding +} + +type Project struct { + Metadata defsecTypes.MisconfigMetadata + AutoCreateNetwork defsecTypes.BoolValue + Members []Member + Bindings []Binding +} + +type Binding struct { + Metadata defsecTypes.MisconfigMetadata + Members []defsecTypes.StringValue + Role defsecTypes.StringValue + IncludesDefaultServiceAccount defsecTypes.BoolValue +} + +type Member struct { + Metadata defsecTypes.MisconfigMetadata + Member defsecTypes.StringValue + Role defsecTypes.StringValue + DefaultServiceAccount defsecTypes.BoolValue +} + +type WorkloadIdentityPoolProvider struct { + Metadata defsecTypes.MisconfigMetadata + WorkloadIdentityPoolId defsecTypes.StringValue + WorkloadIdentityPoolProviderId defsecTypes.StringValue + AttributeCondition defsecTypes.StringValue +} + +func (p *IAM) AllProjects() []Project { + var projects []Project + for _, org := range p.Organizations { + projects = append(projects, org.Projects...) + for _, folder := range org.Folders { + projects = append(projects, folder.Projects...) + for _, desc := range folder.AllFolders() { + projects = append(projects, desc.Projects...) + } + } + } + return projects +} + +func (p *IAM) AllFolders() []Folder { + var folders []Folder + for _, org := range p.Organizations { + folders = append(folders, org.Folders...) + for _, folder := range org.Folders { + folders = append(folders, folder.AllFolders()...) + } + } + return folders +} + +func (f *Folder) AllFolders() []Folder { + var folders []Folder + for _, folder := range f.Folders { + folders = append(folders, folder) + folders = append(folders, folder.AllFolders()...) + } + return folders +} diff --git a/pkg/providers/google/kms/kms.go b/pkg/providers/google/kms/kms.go new file mode 100755 index 000000000000..5991ddb211e9 --- /dev/null +++ b/pkg/providers/google/kms/kms.go @@ -0,0 +1,19 @@ +package kms + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type KMS struct { + KeyRings []KeyRing +} + +type KeyRing struct { + Metadata defsecTypes.MisconfigMetadata + Keys []Key +} + +type Key struct { + Metadata defsecTypes.MisconfigMetadata + RotationPeriodSeconds defsecTypes.IntValue +} diff --git a/pkg/providers/google/sql/sql.go b/pkg/providers/google/sql/sql.go new file mode 100755 index 000000000000..5787acc064aa --- /dev/null +++ b/pkg/providers/google/sql/sql.go @@ -0,0 +1,78 @@ +package sql + +import ( + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SQL struct { + Instances []DatabaseInstance +} + +const ( + DatabaseFamilyMySQL = "MYSQL" + DatabaseFamilyPostgres = "POSTGRES" + DatabaseFamilySQLServer = "SQLSERVER" +) + +const ( + DatabaseVersionMySQL_5_6 = "MYSQL_5_6" + DatabaseVersionMySQL_5_7 = "MYSQL_5_7" + DatabaseVersionMySQL_8_0 = "MYSQL_8_0" + DatabaseVersionPostgres_9_6 = "POSTGRES_9_6" + DatabaseVersionPostgres_10 = "POSTGRES_10" + DatabaseVersionPostgres_11 = "POSTGRES_11" + DatabaseVersionPostgres_12 = "POSTGRES_12" + DatabaseVersionPostgres_13 = "POSTGRES_13" + DatabaseVersionSQLServer_2017_STANDARD = "SQLSERVER_2017_STANDARD" + DatabaseVersionSQLServer_2017_ENTERPRISE = "SQLSERVER_2017_ENTERPRISE" + DatabaseVersionSQLServer_2017_EXPRESS = "SQLSERVER_2017_EXPRESS" + DatabaseVersionSQLServer_2017_WEB = "SQLSERVER_2017_WEB" +) + +type DatabaseInstance struct { + Metadata defsecTypes.MisconfigMetadata + DatabaseVersion defsecTypes.StringValue + Settings Settings + IsReplica defsecTypes.BoolValue +} + +type Settings struct { + Metadata defsecTypes.MisconfigMetadata + Flags Flags + Backups Backups + IPConfiguration IPConfiguration +} +type Flags struct { + Metadata defsecTypes.MisconfigMetadata + LogTempFileSize defsecTypes.IntValue + LocalInFile defsecTypes.BoolValue + ContainedDatabaseAuthentication defsecTypes.BoolValue + CrossDBOwnershipChaining defsecTypes.BoolValue + LogCheckpoints defsecTypes.BoolValue + LogConnections defsecTypes.BoolValue + LogDisconnections defsecTypes.BoolValue + LogLockWaits defsecTypes.BoolValue + LogMinMessages defsecTypes.StringValue // FATAL, PANIC, LOG, ERROR, WARN + LogMinDurationStatement defsecTypes.IntValue +} + +type Backups struct { + Metadata defsecTypes.MisconfigMetadata + Enabled defsecTypes.BoolValue +} + +type IPConfiguration struct { + Metadata defsecTypes.MisconfigMetadata + RequireTLS defsecTypes.BoolValue + EnableIPv4 defsecTypes.BoolValue + AuthorizedNetworks []struct { + Name defsecTypes.StringValue + CIDR defsecTypes.StringValue + } +} + +func (i *DatabaseInstance) DatabaseFamily() string { + return strings.Split(i.DatabaseVersion.Value(), "_")[0] +} diff --git a/pkg/providers/google/storage/storage.go b/pkg/providers/google/storage/storage.go new file mode 100755 index 000000000000..7df12ad29b16 --- /dev/null +++ b/pkg/providers/google/storage/storage.go @@ -0,0 +1,25 @@ +package storage + +import ( + "github.com/aquasecurity/trivy/pkg/providers/google/iam" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Storage struct { + Buckets []Bucket +} + +type Bucket struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Location defsecTypes.StringValue + EnableUniformBucketLevelAccess defsecTypes.BoolValue + Members []iam.Member + Bindings []iam.Binding + Encryption BucketEncryption +} + +type BucketEncryption struct { + Metadata defsecTypes.MisconfigMetadata + DefaultKMSKeyName defsecTypes.StringValue +} diff --git a/pkg/providers/kubernetes/kubernetes.go b/pkg/providers/kubernetes/kubernetes.go new file mode 100755 index 000000000000..7b978ec9cd4f --- /dev/null +++ b/pkg/providers/kubernetes/kubernetes.go @@ -0,0 +1,38 @@ +package kubernetes + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Kubernetes struct { + NetworkPolicies []NetworkPolicy +} + +type NetworkPolicy struct { + Metadata defsecTypes.MisconfigMetadata + Spec NetworkPolicySpec +} + +type NetworkPolicySpec struct { + Metadata defsecTypes.MisconfigMetadata + Egress Egress + Ingress Ingress +} + +type Egress struct { + Metadata defsecTypes.MisconfigMetadata + Ports []Port + DestinationCIDRs []defsecTypes.StringValue +} + +type Ingress struct { + Metadata defsecTypes.MisconfigMetadata + Ports []Port + SourceCIDRs []defsecTypes.StringValue +} + +type Port struct { + Metadata defsecTypes.MisconfigMetadata + Number defsecTypes.StringValue // e.g. "http" or "80" + Protocol defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/computing/computing.go b/pkg/providers/nifcloud/computing/computing.go new file mode 100755 index 000000000000..aaef2361bf98 --- /dev/null +++ b/pkg/providers/nifcloud/computing/computing.go @@ -0,0 +1,6 @@ +package computing + +type Computing struct { + SecurityGroups []SecurityGroup + Instances []Instance +} diff --git a/pkg/providers/nifcloud/computing/instance.go b/pkg/providers/nifcloud/computing/instance.go new file mode 100644 index 000000000000..04f9cbface62 --- /dev/null +++ b/pkg/providers/nifcloud/computing/instance.go @@ -0,0 +1,16 @@ +package computing + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Instance struct { + Metadata defsecTypes.MisconfigMetadata + SecurityGroup defsecTypes.StringValue + NetworkInterfaces []NetworkInterface +} + +type NetworkInterface struct { + Metadata defsecTypes.MisconfigMetadata + NetworkID defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/computing/security_group.go b/pkg/providers/nifcloud/computing/security_group.go new file mode 100644 index 000000000000..5dfcf449fbaf --- /dev/null +++ b/pkg/providers/nifcloud/computing/security_group.go @@ -0,0 +1,18 @@ +package computing + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SecurityGroup struct { + Metadata defsecTypes.MisconfigMetadata + Description defsecTypes.StringValue + IngressRules []SecurityGroupRule + EgressRules []SecurityGroupRule +} + +type SecurityGroupRule struct { + Metadata defsecTypes.MisconfigMetadata + Description defsecTypes.StringValue + CIDR defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/dns/dns.go b/pkg/providers/nifcloud/dns/dns.go new file mode 100755 index 000000000000..7351506d7f6f --- /dev/null +++ b/pkg/providers/nifcloud/dns/dns.go @@ -0,0 +1,5 @@ +package dns + +type DNS struct { + Records []Record +} diff --git a/pkg/providers/nifcloud/dns/record.go b/pkg/providers/nifcloud/dns/record.go new file mode 100644 index 000000000000..da89d8ee5d4e --- /dev/null +++ b/pkg/providers/nifcloud/dns/record.go @@ -0,0 +1,13 @@ +package dns + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +const ZoneRegistrationAuthTxt = "nifty-dns-verify=" + +type Record struct { + Metadata defsecTypes.MisconfigMetadata + Type defsecTypes.StringValue + Record defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/nas/nas.go b/pkg/providers/nifcloud/nas/nas.go new file mode 100755 index 000000000000..e73a9c9efd70 --- /dev/null +++ b/pkg/providers/nifcloud/nas/nas.go @@ -0,0 +1,6 @@ +package nas + +type NAS struct { + NASSecurityGroups []NASSecurityGroup + NASInstances []NASInstance +} diff --git a/pkg/providers/nifcloud/nas/nas_instance.go b/pkg/providers/nifcloud/nas/nas_instance.go new file mode 100644 index 000000000000..3b065110ab22 --- /dev/null +++ b/pkg/providers/nifcloud/nas/nas_instance.go @@ -0,0 +1,10 @@ +package nas + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type NASInstance struct { + Metadata defsecTypes.MisconfigMetadata + NetworkID defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/nas/nas_security_group.go b/pkg/providers/nifcloud/nas/nas_security_group.go new file mode 100644 index 000000000000..78b99967fb95 --- /dev/null +++ b/pkg/providers/nifcloud/nas/nas_security_group.go @@ -0,0 +1,11 @@ +package nas + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type NASSecurityGroup struct { + Metadata defsecTypes.MisconfigMetadata + Description defsecTypes.StringValue + CIDRs []defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/network/elastic_load_balancer.go b/pkg/providers/nifcloud/network/elastic_load_balancer.go new file mode 100644 index 000000000000..7aee92b42de4 --- /dev/null +++ b/pkg/providers/nifcloud/network/elastic_load_balancer.go @@ -0,0 +1,16 @@ +package network + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type ElasticLoadBalancer struct { + Metadata defsecTypes.MisconfigMetadata + NetworkInterfaces []NetworkInterface + Listeners []ElasticLoadBalancerListener +} + +type ElasticLoadBalancerListener struct { + Metadata defsecTypes.MisconfigMetadata + Protocol defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/network/load_balancer.go b/pkg/providers/nifcloud/network/load_balancer.go new file mode 100644 index 000000000000..c240da259d7e --- /dev/null +++ b/pkg/providers/nifcloud/network/load_balancer.go @@ -0,0 +1,16 @@ +package network + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type LoadBalancer struct { + Metadata defsecTypes.MisconfigMetadata + Listeners []LoadBalancerListener +} + +type LoadBalancerListener struct { + Metadata defsecTypes.MisconfigMetadata + Protocol defsecTypes.StringValue + TLSPolicy defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/network/network.go b/pkg/providers/nifcloud/network/network.go new file mode 100755 index 000000000000..c759124fde88 --- /dev/null +++ b/pkg/providers/nifcloud/network/network.go @@ -0,0 +1,16 @@ +package network + +import defsecTypes "github.com/aquasecurity/trivy/pkg/types" + +type Network struct { + ElasticLoadBalancers []ElasticLoadBalancer + LoadBalancers []LoadBalancer + Routers []Router + VpnGateways []VpnGateway +} + +type NetworkInterface struct { + Metadata defsecTypes.MisconfigMetadata + NetworkID defsecTypes.StringValue + IsVipNetwork defsecTypes.BoolValue +} diff --git a/pkg/providers/nifcloud/network/router.go b/pkg/providers/nifcloud/network/router.go new file mode 100644 index 000000000000..8a96d06724a9 --- /dev/null +++ b/pkg/providers/nifcloud/network/router.go @@ -0,0 +1,11 @@ +package network + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Router struct { + Metadata defsecTypes.MisconfigMetadata + SecurityGroup defsecTypes.StringValue + NetworkInterfaces []NetworkInterface +} diff --git a/pkg/providers/nifcloud/network/vpn_gateway.go b/pkg/providers/nifcloud/network/vpn_gateway.go new file mode 100644 index 000000000000..7e5c7ece139d --- /dev/null +++ b/pkg/providers/nifcloud/network/vpn_gateway.go @@ -0,0 +1,10 @@ +package network + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type VpnGateway struct { + Metadata defsecTypes.MisconfigMetadata + SecurityGroup defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/nifcloud.go b/pkg/providers/nifcloud/nifcloud.go new file mode 100755 index 000000000000..b730d83ffb09 --- /dev/null +++ b/pkg/providers/nifcloud/nifcloud.go @@ -0,0 +1,19 @@ +package nifcloud + +import ( + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/computing" + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/dns" + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/nas" + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/network" + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/rdb" + "github.com/aquasecurity/trivy/pkg/providers/nifcloud/sslcertificate" +) + +type Nifcloud struct { + Computing computing.Computing + DNS dns.DNS + NAS nas.NAS + Network network.Network + RDB rdb.RDB + SSLCertificate sslcertificate.SSLCertificate +} diff --git a/pkg/providers/nifcloud/rdb/db_instance.go b/pkg/providers/nifcloud/rdb/db_instance.go new file mode 100644 index 000000000000..960882133ce8 --- /dev/null +++ b/pkg/providers/nifcloud/rdb/db_instance.go @@ -0,0 +1,14 @@ +package rdb + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type DBInstance struct { + Metadata defsecTypes.MisconfigMetadata + BackupRetentionPeriodDays defsecTypes.IntValue + Engine defsecTypes.StringValue + EngineVersion defsecTypes.StringValue + NetworkID defsecTypes.StringValue + PublicAccess defsecTypes.BoolValue +} diff --git a/pkg/providers/nifcloud/rdb/db_security_group.go b/pkg/providers/nifcloud/rdb/db_security_group.go new file mode 100644 index 000000000000..fa608e66eae8 --- /dev/null +++ b/pkg/providers/nifcloud/rdb/db_security_group.go @@ -0,0 +1,11 @@ +package rdb + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type DBSecurityGroup struct { + Metadata defsecTypes.MisconfigMetadata + Description defsecTypes.StringValue + CIDRs []defsecTypes.StringValue +} diff --git a/pkg/providers/nifcloud/rdb/rdb.go b/pkg/providers/nifcloud/rdb/rdb.go new file mode 100755 index 000000000000..4aea31980708 --- /dev/null +++ b/pkg/providers/nifcloud/rdb/rdb.go @@ -0,0 +1,6 @@ +package rdb + +type RDB struct { + DBSecurityGroups []DBSecurityGroup + DBInstances []DBInstance +} diff --git a/pkg/providers/nifcloud/sslcertificate/server_certificate.go b/pkg/providers/nifcloud/sslcertificate/server_certificate.go new file mode 100644 index 000000000000..7934d8f1fd52 --- /dev/null +++ b/pkg/providers/nifcloud/sslcertificate/server_certificate.go @@ -0,0 +1,10 @@ +package sslcertificate + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type ServerCertificate struct { + Metadata defsecTypes.MisconfigMetadata + Expiration defsecTypes.TimeValue +} diff --git a/pkg/providers/nifcloud/sslcertificate/ssl_certificate.go b/pkg/providers/nifcloud/sslcertificate/ssl_certificate.go new file mode 100755 index 000000000000..7ab46d870b16 --- /dev/null +++ b/pkg/providers/nifcloud/sslcertificate/ssl_certificate.go @@ -0,0 +1,5 @@ +package sslcertificate + +type SSLCertificate struct { + ServerCertificates []ServerCertificate +} diff --git a/pkg/providers/openstack/networking.go b/pkg/providers/openstack/networking.go new file mode 100644 index 000000000000..0a432435507e --- /dev/null +++ b/pkg/providers/openstack/networking.go @@ -0,0 +1,27 @@ +package openstack + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Networking struct { + SecurityGroups []SecurityGroup +} + +type SecurityGroup struct { + Metadata defsecTypes.MisconfigMetadata + Name defsecTypes.StringValue + Description defsecTypes.StringValue + Rules []SecurityGroupRule +} + +// SecurityGroupRule describes https://registry.terraform.io/providers/terraform-provider-openstack/openstack/latest/docs/resources/networking_secgroup_rule_v2 +type SecurityGroupRule struct { + Metadata defsecTypes.MisconfigMetadata + IsIngress defsecTypes.BoolValue + EtherType defsecTypes.IntValue // 4 or 6 for ipv4/ipv6 + Protocol defsecTypes.StringValue // e.g. tcp + PortMin defsecTypes.IntValue + PortMax defsecTypes.IntValue + CIDR defsecTypes.StringValue +} diff --git a/pkg/providers/openstack/openstack.go b/pkg/providers/openstack/openstack.go new file mode 100755 index 000000000000..280fc5cfa8bd --- /dev/null +++ b/pkg/providers/openstack/openstack.go @@ -0,0 +1,34 @@ +package openstack + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type OpenStack struct { + Compute Compute + Networking Networking +} + +type Compute struct { + Instances []Instance + Firewall Firewall +} + +type Firewall struct { + AllowRules []FirewallRule + DenyRules []FirewallRule +} + +type FirewallRule struct { + Metadata defsecTypes.MisconfigMetadata + Source defsecTypes.StringValue + Destination defsecTypes.StringValue + SourcePort defsecTypes.StringValue + DestinationPort defsecTypes.StringValue + Enabled defsecTypes.BoolValue +} + +type Instance struct { + Metadata defsecTypes.MisconfigMetadata + AdminPassword defsecTypes.StringValue +} diff --git a/pkg/providers/oracle/oracle.go b/pkg/providers/oracle/oracle.go new file mode 100755 index 000000000000..dcda333284f3 --- /dev/null +++ b/pkg/providers/oracle/oracle.go @@ -0,0 +1,18 @@ +package oracle + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Oracle struct { + Compute Compute +} + +type Compute struct { + AddressReservations []AddressReservation +} + +type AddressReservation struct { + Metadata defsecTypes.MisconfigMetadata + Pool defsecTypes.StringValue // e.g. public-pool +} diff --git a/pkg/providers/provider.go b/pkg/providers/provider.go new file mode 100755 index 000000000000..cef13ee8f205 --- /dev/null +++ b/pkg/providers/provider.go @@ -0,0 +1,51 @@ +package providers + +import ( + "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +// Provider is the provider that the check applies to +type Provider string + +const ( + UnknownProvider Provider = "" + AWSProvider Provider = "aws" + AzureProvider Provider = "azure" + CustomProvider Provider = "custom" + DigitalOceanProvider Provider = "digitalocean" + GeneralProvider Provider = "general" + GitHubProvider Provider = "github" + GoogleProvider Provider = "google" + KubernetesProvider Provider = "kubernetes" + OracleProvider Provider = "oracle" + OpenStackProvider Provider = "openstack" + NifcloudProvider Provider = "nifcloud" + CloudStackProvider Provider = "cloudstack" +) + +func RuleProviderToString(provider Provider) string { + return strings.ToUpper(string(provider)) +} + +func (p Provider) DisplayName() string { + switch p { + case "aws": + return strings.ToUpper(string(p)) + case "digitalocean": + return "Digital Ocean" + case "github": + return "GitHub" + case "openstack": + return "OpenStack" + case "cloudstack": + return "Cloudstack" + default: + return cases.Title(language.English).String(strings.ToLower(string(p))) + } +} +func (p Provider) ConstName() string { + return strings.ReplaceAll(p.DisplayName(), " ", "") +} diff --git a/pkg/rego/build.go b/pkg/rego/build.go new file mode 100644 index 000000000000..2f975727633e --- /dev/null +++ b/pkg/rego/build.go @@ -0,0 +1,84 @@ +package rego + +import ( + "io/fs" + "path/filepath" + "strings" + + "github.com/aquasecurity/trivy/pkg/rego/schemas" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/util" +) + +func BuildSchemaSetFromPolicies(policies map[string]*ast.Module, paths []string, fsys fs.FS) (*ast.SchemaSet, bool, error) { + schemaSet := ast.NewSchemaSet() + schemaSet.Put(ast.MustParseRef("schema.input"), map[string]interface{}{}) // for backwards compat only + var customFound bool + for _, policy := range policies { + for _, annotation := range policy.Annotations { + for _, ss := range annotation.Schemas { + schemaName, err := ss.Schema.Ptr() + if err != nil { + continue + } + if schemaName != "input" { + if schema, ok := schemas.SchemaMap[types.Source(schemaName)]; ok { + customFound = true + schemaSet.Put(ast.MustParseRef(ss.Schema.String()), util.MustUnmarshalJSON([]byte(schema))) + } else { + b, err := findSchemaInFS(paths, fsys, schemaName) + if err != nil { + return schemaSet, true, err + } + if b != nil { + customFound = true + schemaSet.Put(ast.MustParseRef(ss.Schema.String()), util.MustUnmarshalJSON(b)) + } + } + } + } + } + } + + return schemaSet, customFound, nil +} + +// findSchemaInFS tries to find the schema anywhere in the specified FS +func findSchemaInFS(paths []string, srcFS fs.FS, schemaName string) ([]byte, error) { + var schema []byte + for _, path := range paths { + if err := fs.WalkDir(srcFS, sanitisePath(path), func(path string, info fs.DirEntry, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + if !IsJSONFile(info.Name()) { + return nil + } + if info.Name() == schemaName+".json" { + schema, err = fs.ReadFile(srcFS, filepath.ToSlash(path)) + if err != nil { + return err + } + return nil + } + return nil + }); err != nil { + return nil, err + } + } + return schema, nil +} + +func IsJSONFile(name string) bool { + return strings.HasSuffix(name, ".json") +} + +func sanitisePath(path string) string { + vol := filepath.VolumeName(path) + path = strings.TrimPrefix(path, vol) + return strings.TrimPrefix(strings.TrimPrefix(filepath.ToSlash(path), "./"), "/") +} diff --git a/pkg/rego/convert/anonymous.go b/pkg/rego/convert/anonymous.go new file mode 100644 index 000000000000..3563b0fccfc8 --- /dev/null +++ b/pkg/rego/convert/anonymous.go @@ -0,0 +1,47 @@ +package convert + +import ( + "reflect" +) + +var converterInterface = reflect.TypeOf((*Converter)(nil)).Elem() + +func anonymousToRego(inputValue reflect.Value) interface{} { + + if inputValue.IsZero() { + return nil + } + + for inputValue.Type().Kind() == reflect.Interface { + if inputValue.IsNil() { + return nil + } + inputValue = inputValue.Elem() + } + + if inputValue.Type().Implements(converterInterface) { + returns := inputValue.MethodByName("ToRego").Call(nil) + return returns[0].Interface() + } + + for inputValue.Type().Kind() == reflect.Ptr { + if inputValue.IsNil() { + return nil + } + inputValue = inputValue.Elem() + } + + if inputValue.Type().Implements(converterInterface) { + returns := inputValue.MethodByName("ToRego").Call(nil) + return returns[0].Interface() + } + + switch kind := inputValue.Type().Kind(); kind { + case reflect.Struct: + return StructToRego(inputValue) + case reflect.Slice: + return SliceToRego(inputValue) + } + + return nil +} diff --git a/pkg/rego/convert/converter.go b/pkg/rego/convert/converter.go new file mode 100644 index 000000000000..e132d6875aa2 --- /dev/null +++ b/pkg/rego/convert/converter.go @@ -0,0 +1,5 @@ +package convert + +type Converter interface { + ToRego() interface{} +} diff --git a/pkg/rego/convert/slice.go b/pkg/rego/convert/slice.go new file mode 100644 index 000000000000..8bb68a7fb551 --- /dev/null +++ b/pkg/rego/convert/slice.go @@ -0,0 +1,32 @@ +package convert + +import ( + "reflect" +) + +func SliceToRego(inputValue reflect.Value) []interface{} { + + // make sure we have a struct literal + for inputValue.Type().Kind() == reflect.Ptr { + if inputValue.IsNil() { + return nil + } + inputValue = inputValue.Elem() + } + if inputValue.Type().Kind() != reflect.Slice { + panic("not a slice") + } + + output := make([]interface{}, inputValue.Len()) + + for i := 0; i < inputValue.Len(); i++ { + val := inputValue.Index(i) + if val.Type().Kind() == reflect.Ptr && val.IsZero() { + output[i] = nil + continue + } + output[i] = anonymousToRego(val) + } + + return output +} diff --git a/pkg/rego/convert/slice_test.go b/pkg/rego/convert/slice_test.go new file mode 100644 index 000000000000..74f10ec1e7bf --- /dev/null +++ b/pkg/rego/convert/slice_test.go @@ -0,0 +1,57 @@ +package convert + +import ( + "reflect" + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" +) + +func Test_SliceConversion(t *testing.T) { + input := []struct { + X string + Y int + Z struct { + A float64 + } + }{ + {}, + } + input[0].Z.A = 123 + converted := SliceToRego(reflect.ValueOf(input)) + assert.Equal(t, []interface{}{map[string]interface{}{"z": map[string]interface{}{}}}, converted) +} + +func Test_SliceTypesConversion(t *testing.T) { + input := []types.StringValue{ + types.String("test1", types.NewTestMisconfigMetadata()), + types.String("test2", types.NewTestMisconfigMetadata()), + } + converted := SliceToRego(reflect.ValueOf(input)) + assert.Equal(t, []interface{}{ + map[string]interface{}{ + "value": "test1", + "filepath": "test.test", + "startline": 123, + "endline": 123, + "sourceprefix": "", + "managed": true, + "explicit": false, + "fskey": "", + "resource": "", + }, + map[string]interface{}{ + "value": "test2", + "filepath": "test.test", + "startline": 123, + "endline": 123, + "sourceprefix": "", + "managed": true, + "explicit": false, + "fskey": "", + "resource": "", + }, + }, converted) +} diff --git a/pkg/rego/convert/struct.go b/pkg/rego/convert/struct.go new file mode 100644 index 000000000000..0dd5d44ab4a4 --- /dev/null +++ b/pkg/rego/convert/struct.go @@ -0,0 +1,68 @@ +package convert + +import ( + "reflect" + "strings" + + "github.com/aquasecurity/trivy/pkg/types" +) + +type metadataProvider interface { + GetMetadata() types.Metadata +} + +var metadataInterface = reflect.TypeOf((*metadataProvider)(nil)).Elem() + +func StructToRego(inputValue reflect.Value) map[string]interface{} { + + // make sure we have a struct literal + for inputValue.Type().Kind() == reflect.Ptr || inputValue.Type().Kind() == reflect.Interface { + if inputValue.IsNil() { + return nil + } + inputValue = inputValue.Elem() + } + if inputValue.Type().Kind() != reflect.Struct { + panic("not a struct") + } + + output := make(map[string]interface{}, inputValue.NumField()) + + for i := 0; i < inputValue.NumField(); i++ { + field := inputValue.Field(i) + typ := inputValue.Type().Field(i) + name := typ.Name + if !typ.IsExported() { + continue + } + if field.Interface() == nil { + continue + } + val := anonymousToRego(reflect.ValueOf(field.Interface())) + if val == nil { + continue + } + key := strings.ToLower(name) + if _, ok := field.Interface().(types.MisconfigMetadata); key == "metadata" && ok { + continue + } + output[strings.ToLower(name)] = val + } + + if inputValue.Type().Implements(metadataInterface) { + returns := inputValue.MethodByName("GetMetadata").Call(nil) + if metadata, ok := returns[0].Interface().(types.MisconfigMetadata); ok { + output["__defsec_metadata"] = metadata.ToRego() + } + } else { + metaVal := inputValue.FieldByName("Metadata") + if metaVal.Kind() == reflect.Struct { + if meta, ok := metaVal.Interface().(types.MisconfigMetadata); ok { + output["__defsec_metadata"] = meta.ToRego() + } + } + + } + + return output +} diff --git a/pkg/rego/convert/struct_test.go b/pkg/rego/convert/struct_test.go new file mode 100644 index 000000000000..ca72efabdedd --- /dev/null +++ b/pkg/rego/convert/struct_test.go @@ -0,0 +1,21 @@ +package convert + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_StructConversion(t *testing.T) { + input := struct { + X string + Y int + Z struct { + A float64 + } + }{} + input.Z.A = 123 + converted := StructToRego(reflect.ValueOf(input)) + assert.Equal(t, map[string]interface{}{"z": map[string]interface{}{}}, converted) +} diff --git a/pkg/rego/custom.go b/pkg/rego/custom.go new file mode 100644 index 000000000000..c15b05a4577f --- /dev/null +++ b/pkg/rego/custom.go @@ -0,0 +1,109 @@ +package rego + +import ( + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/rego" + "github.com/open-policy-agent/opa/types" +) + +func init() { + rego.RegisterBuiltin2(®o.Function{ + Name: "result.new", + Decl: types.NewFunction(types.Args(types.S, types.A), types.A), + }, + createResult, + ) + + rego.RegisterBuiltin1(®o.Function{ + Name: "isManaged", + Decl: types.NewFunction(types.Args(types.A), types.B), + }, + func(c rego.BuiltinContext, resource *ast.Term) (*ast.Term, error) { + metadata, err := createResult(c, ast.StringTerm(""), resource) + if err != nil { + return nil, err + } + return metadata.Get(ast.StringTerm("managed")), nil + }, + ) +} + +func createResult(ctx rego.BuiltinContext, msg, cause *ast.Term) (*ast.Term, error) { + + metadata := map[string]*ast.Term{ + "startline": ast.IntNumberTerm(0), + "endline": ast.IntNumberTerm(0), + "sourceprefix": ast.StringTerm(""), + "filepath": ast.StringTerm(""), + "explicit": ast.BooleanTerm(false), + "managed": ast.BooleanTerm(true), + "fskey": ast.StringTerm(""), + "resource": ast.StringTerm(""), + "parent": ast.NullTerm(), + } + if msg != nil { + metadata["msg"] = msg + } + + // universal + input := cause.Get(ast.StringTerm("__defsec_metadata")) + if input == nil { + // docker + input = cause + } + metadata = updateMetadata(metadata, input) + + if term := input.Get(ast.StringTerm("parent")); term != nil { + var err error + metadata["parent"], err = createResult(ctx, nil, term) + if err != nil { + return nil, err + } + } + + var values [][2]*ast.Term + for key, val := range metadata { + values = append(values, [2]*ast.Term{ + ast.StringTerm(key), + val, + }) + } + return ast.ObjectTerm(values...), nil +} + +func updateMetadata(metadata map[string]*ast.Term, input *ast.Term) map[string]*ast.Term { + if term := input.Get(ast.StringTerm("startline")); term != nil { + metadata["startline"] = term + } + if term := input.Get(ast.StringTerm("StartLine")); term != nil { + metadata["startline"] = term + } + if term := input.Get(ast.StringTerm("endline")); term != nil { + metadata["endline"] = term + } + if term := input.Get(ast.StringTerm("EndLine")); term != nil { + metadata["endline"] = term + } + if term := input.Get(ast.StringTerm("filepath")); term != nil { + metadata["filepath"] = term + } + if term := input.Get(ast.StringTerm("sourceprefix")); term != nil { + metadata["sourceprefix"] = term + } + if term := input.Get(ast.StringTerm("Path")); term != nil { + metadata["filepath"] = term + } + if term := input.Get(ast.StringTerm("explicit")); term != nil { + metadata["explicit"] = term + } + if term := input.Get(ast.StringTerm("managed")); term != nil { + metadata["managed"] = term + } + if term := input.Get(ast.StringTerm("fskey")); term != nil { + metadata["fskey"] = term + } + if term := input.Get(ast.StringTerm("resource")); term != nil { + metadata["resource"] = term + } + return metadata +} diff --git a/pkg/rego/embed.go b/pkg/rego/embed.go new file mode 100644 index 000000000000..521b79ee9b5f --- /dev/null +++ b/pkg/rego/embed.go @@ -0,0 +1,107 @@ +package rego + +import ( + "context" + "io/fs" + "path/filepath" + "strings" + + rules2 "github.com/aquasecurity/trivy-policies" + "github.com/aquasecurity/trivy/pkg/trules" + "github.com/open-policy-agent/opa/ast" +) + +func init() { + + modules, err := LoadEmbeddedPolicies() + if err != nil { + // we should panic as the policies were not embedded properly + panic(err) + } + loadedLibs, err := LoadEmbeddedLibraries() + if err != nil { + panic(err) + } + for name, policy := range loadedLibs { + modules[name] = policy + } + + RegisterRegoRules(modules) +} + +func RegisterRegoRules(modules map[string]*ast.Module) { + ctx := context.TODO() + + schemaSet, _, _ := BuildSchemaSetFromPolicies(modules, nil, nil) + + compiler := ast.NewCompiler(). + WithSchemas(schemaSet). + WithCapabilities(nil). + WithUseTypeCheckAnnotations(true) + + compiler.Compile(modules) + if compiler.Failed() { + // we should panic as the embedded rego policies are syntactically incorrect... + panic(compiler.Errors) + } + + retriever := NewMetadataRetriever(compiler) + for _, module := range modules { + metadata, err := retriever.RetrieveMetadata(ctx, module) + if err != nil { + continue + } + if metadata.AVDID == "" { + continue + } + trules.Register( + metadata.ToRule(), + ) + } +} + +func LoadEmbeddedPolicies() (map[string]*ast.Module, error) { + return LoadPoliciesFromDirs(rules2.EmbeddedPolicyFileSystem, ".") +} + +func LoadEmbeddedLibraries() (map[string]*ast.Module, error) { + return LoadPoliciesFromDirs(rules2.EmbeddedLibraryFileSystem, ".") +} + +func LoadPoliciesFromDirs(target fs.FS, paths ...string) (map[string]*ast.Module, error) { + modules := make(map[string]*ast.Module) + for _, path := range paths { + if err := fs.WalkDir(target, sanitisePath(path), func(path string, info fs.DirEntry, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + if strings.HasSuffix(filepath.Dir(filepath.ToSlash(path)), "policies/advanced/optional") { + return fs.SkipDir + } + + if !IsRegoFile(info.Name()) || IsDotFile(info.Name()) { + return nil + } + data, err := fs.ReadFile(target, filepath.ToSlash(path)) + if err != nil { + return err + } + module, err := ast.ParseModuleWithOpts(path, string(data), ast.ParserOptions{ + ProcessAnnotation: true, + }) + if err != nil { + // s.debug.Log("Failed to load module: %s, err: %s", filepath.ToSlash(path), err.Error()) + return err + } + modules[path] = module + return nil + }); err != nil { + return nil, err + } + } + return modules, nil +} diff --git a/pkg/rego/embed_test.go b/pkg/rego/embed_test.go new file mode 100644 index 000000000000..ed82d5a28ba0 --- /dev/null +++ b/pkg/rego/embed_test.go @@ -0,0 +1,122 @@ +package rego + +import ( + "testing" + + rules2 "github.com/aquasecurity/trivy-policies" + "github.com/open-policy-agent/opa/ast" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_EmbeddedLoading(t *testing.T) { + + frameworkRules := rules.GetRegistered() + var found bool + for _, rule := range frameworkRules { + if rule.GetRule().RegoPackage != "" { + found = true + } + } + assert.True(t, found, "no embedded rego policies were registered as trules") +} + +func Test_RegisterRegoRules(t *testing.T) { + var testCases = []struct { + name string + inputPolicy string + expectedError bool + }{ + { + name: "happy path old single schema", + inputPolicy: `# METADATA +# title: "dummy title" +# description: "some description" +# scope: package +# schemas: +# - input: schema["input"] +# custom: +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS1234 +deny[res]{ + res := true +}`, + }, + { + name: "happy path new builtin single schema", + inputPolicy: `# METADATA +# title: "dummy title" +# description: "some description" +# scope: package +# schemas: +# - input: schema["dockerfile"] +# custom: +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS1234 +deny[res]{ + res := true +}`, + }, + { + name: "happy path new multiple schemas", + inputPolicy: `# METADATA +# title: "dummy title" +# description: "some description" +# scope: package +# schemas: +# - input: schema["dockerfile"] +# - input: schema["kubernetes"] +# custom: +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS1234 +deny[res]{ + res := true +}`, + }, + { + name: "sad path schema does not exist", + inputPolicy: `# METADATA +# title: "dummy title" +# description: "some description" +# scope: package +# schemas: +# - input: schema["invalid schema"] +# custom: +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS1234 +deny[res]{ + res := true +}`, + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + policies, err := LoadPoliciesFromDirs(rules2.EmbeddedLibraryFileSystem, ".") + require.NoError(t, err) + newRule, err := ast.ParseModuleWithOpts("/trules/newrule.rego", tc.inputPolicy, ast.ParserOptions{ + ProcessAnnotation: true, + }) + require.NoError(t, err) + + policies["/trules/newrule.rego"] = newRule + switch { + case tc.expectedError: + assert.Panics(t, func() { + RegisterRegoRules(policies) + }, tc.name) + default: + RegisterRegoRules(policies) + } + }) + } +} diff --git a/pkg/rego/exceptions.go b/pkg/rego/exceptions.go new file mode 100644 index 000000000000..ab202ec00d19 --- /dev/null +++ b/pkg/rego/exceptions.go @@ -0,0 +1,33 @@ +package rego + +import ( + "context" + "fmt" +) + +func (s *Scanner) isIgnored(ctx context.Context, namespace string, ruleName string, input interface{}) (bool, error) { + if ignored, err := s.isNamespaceIgnored(ctx, namespace, input); err != nil { + return false, err + } else if ignored { + return true, nil + } + return s.isRuleIgnored(ctx, namespace, ruleName, input) +} + +func (s *Scanner) isNamespaceIgnored(ctx context.Context, namespace string, input interface{}) (bool, error) { + exceptionQuery := fmt.Sprintf("data.namespace.exceptions.exception[_] == %q", namespace) + result, _, err := s.runQuery(ctx, exceptionQuery, input, true) + if err != nil { + return false, fmt.Errorf("query namespace exceptions: %w", err) + } + return result.Allowed(), nil +} + +func (s *Scanner) isRuleIgnored(ctx context.Context, namespace string, ruleName string, input interface{}) (bool, error) { + exceptionQuery := fmt.Sprintf("endswith(%q, data.%s.exception[_][_])", ruleName, namespace) + result, _, err := s.runQuery(ctx, exceptionQuery, input, true) + if err != nil { + return false, err + } + return result.Allowed(), nil +} diff --git a/pkg/rego/load.go b/pkg/rego/load.go new file mode 100644 index 000000000000..909510e8f505 --- /dev/null +++ b/pkg/rego/load.go @@ -0,0 +1,210 @@ +package rego + +import ( + "context" + "fmt" + "io" + "io/fs" + "strings" + + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/bundle" +) + +func IsRegoFile(name string) bool { + return strings.HasSuffix(name, bundle.RegoExt) && !strings.HasSuffix(name, "_test"+bundle.RegoExt) +} + +func IsDotFile(name string) bool { + return strings.HasPrefix(name, ".") +} + +func (s *Scanner) loadPoliciesFromReaders(readers []io.Reader) (map[string]*ast.Module, error) { + modules := make(map[string]*ast.Module) + for i, r := range readers { + moduleName := fmt.Sprintf("reader_%d", i) + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + module, err := ast.ParseModuleWithOpts(moduleName, string(data), ast.ParserOptions{ + ProcessAnnotation: true, + }) + if err != nil { + return nil, err + } + modules[moduleName] = module + } + return modules, nil +} + +func (s *Scanner) loadEmbedded(enableEmbeddedLibraries, enableEmbeddedPolicies bool) error { + if enableEmbeddedLibraries { + loadedLibs, errLoad := LoadEmbeddedLibraries() + if errLoad != nil { + return fmt.Errorf("failed to load embedded rego libraries: %w", errLoad) + } + for name, policy := range loadedLibs { + s.policies[name] = policy + } + s.debug.Log("Loaded %d embedded libraries.", len(loadedLibs)) + } + + if enableEmbeddedPolicies { + loaded, err := LoadEmbeddedPolicies() + if err != nil { + return fmt.Errorf("failed to load embedded rego policies: %w", err) + } + for name, policy := range loaded { + s.policies[name] = policy + } + s.debug.Log("Loaded %d embedded policies.", len(loaded)) + } + + return nil +} + +func (s *Scanner) LoadPolicies(enableEmbeddedLibraries, enableEmbeddedPolicies bool, srcFS fs.FS, paths []string, readers []io.Reader) error { + + if s.policies == nil { + s.policies = make(map[string]*ast.Module) + } + + if s.policyFS != nil { + s.debug.Log("Overriding filesystem for policies!") + srcFS = s.policyFS + } + + if err := s.loadEmbedded(enableEmbeddedLibraries, enableEmbeddedPolicies); err != nil { + return err + } + + var err error + if len(paths) > 0 { + loaded, err := LoadPoliciesFromDirs(srcFS, paths...) + if err != nil { + return fmt.Errorf("failed to load rego policies from %s: %w", paths, err) + } + for name, policy := range loaded { + s.policies[name] = policy + } + s.debug.Log("Loaded %d policies from disk.", len(loaded)) + } + + if len(readers) > 0 { + loaded, err := s.loadPoliciesFromReaders(readers) + if err != nil { + return fmt.Errorf("failed to load rego policies from reader(s): %w", err) + } + for name, policy := range loaded { + s.policies[name] = policy + } + s.debug.Log("Loaded %d policies from reader(s).", len(loaded)) + } + + // gather namespaces + uniq := make(map[string]struct{}) + for _, module := range s.policies { + namespace := getModuleNamespace(module) + uniq[namespace] = struct{}{} + } + var namespaces []string + for namespace := range uniq { + namespaces = append(namespaces, namespace) + } + + dataFS := srcFS + if s.dataFS != nil { + s.debug.Log("Overriding filesystem for data!") + dataFS = s.dataFS + } + store, err := initStore(dataFS, s.dataDirs, namespaces) + if err != nil { + return fmt.Errorf("unable to load data: %w", err) + } + s.store = store + + return s.compilePolicies(srcFS, paths) +} + +func (s *Scanner) prunePoliciesWithError(compiler *ast.Compiler) error { + if len(compiler.Errors) > s.regoErrorLimit { + s.debug.Log("Error(s) occurred while loading policies") + return compiler.Errors + } + + for _, e := range compiler.Errors { + s.debug.Log("Error occurred while parsing: %s, %s", e.Location.File, e.Error()) + delete(s.policies, e.Location.File) + } + return nil +} + +func (s *Scanner) compilePolicies(srcFS fs.FS, paths []string) error { + + schemaSet, custom, err := BuildSchemaSetFromPolicies(s.policies, paths, srcFS) + if err != nil { + return err + } + if custom { + s.inputSchema = nil // discard auto detected input schema in favour of policy defined schema + } + + compiler := ast.NewCompiler(). + WithUseTypeCheckAnnotations(true). + WithCapabilities(ast.CapabilitiesForThisVersion()). + WithSchemas(schemaSet) + + compiler.Compile(s.policies) + if compiler.Failed() { + if err := s.prunePoliciesWithError(compiler); err != nil { + return err + } + return s.compilePolicies(srcFS, paths) + } + retriever := NewMetadataRetriever(compiler) + + if err := s.filterModules(retriever); err != nil { + return err + } + if s.inputSchema != nil { + schemaSet := ast.NewSchemaSet() + schemaSet.Put(ast.MustParseRef("schema.input"), s.inputSchema) + compiler.WithSchemas(schemaSet) + compiler.Compile(s.policies) + if compiler.Failed() { + if err := s.prunePoliciesWithError(compiler); err != nil { + return err + } + return s.compilePolicies(srcFS, paths) + } + } + s.compiler = compiler + s.retriever = retriever + return nil +} + +func (s *Scanner) filterModules(retriever *MetadataRetriever) error { + + filtered := make(map[string]*ast.Module) + for name, module := range s.policies { + meta, err := retriever.RetrieveMetadata(context.TODO(), module) + if err != nil { + return err + } + if len(meta.InputOptions.Selectors) == 0 { + s.debug.Log("WARNING: Module %s has no input selectors - it will be loaded for all inputs!", name) + filtered[name] = module + continue + } + for _, selector := range meta.InputOptions.Selectors { + if selector.Type == string(s.sourceType) { + filtered[name] = module + break + } + } + } + + s.policies = filtered + return nil +} diff --git a/pkg/rego/load_test.go b/pkg/rego/load_test.go new file mode 100644 index 000000000000..1888300fe514 --- /dev/null +++ b/pkg/rego/load_test.go @@ -0,0 +1,46 @@ +package rego + +import ( + "bytes" + "embed" + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +//go:embed all:testdata/policies +var testEmbedFS embed.FS + +func Test_RegoScanning_WithSomeInvalidPolicies(t *testing.T) { + t.Run("allow no errors", func(t *testing.T) { + var debugBuf bytes.Buffer + scanner := NewScanner(types.SourceDockerfile) + scanner.SetRegoErrorLimit(0) + scanner.SetDebugWriter(&debugBuf) + p, _ := LoadPoliciesFromDirs(testEmbedFS, ".") + require.NotNil(t, p) + + scanner.policies = p + err := scanner.compilePolicies(testEmbedFS, []string{"policies"}) + require.ErrorContains(t, err, `want (one of): ["Cmd" "EndLine" "Flags" "JSON" "Original" "Path" "Stage" "StartLine" "SubCmd" "Value"]`) + assert.Contains(t, debugBuf.String(), "Error(s) occurred while loading policies") + }) + + t.Run("allow up to max 1 error", func(t *testing.T) { + var debugBuf bytes.Buffer + scanner := NewScanner(types.SourceDockerfile) + scanner.SetRegoErrorLimit(1) + scanner.SetDebugWriter(&debugBuf) + + p, _ := LoadPoliciesFromDirs(testEmbedFS, ".") + scanner.policies = p + + err := scanner.compilePolicies(testEmbedFS, []string{"policies"}) + require.NoError(t, err) + + assert.Contains(t, debugBuf.String(), "Error occurred while parsing: testdata/policies/invalid.rego, testdata/policies/invalid.rego:7") + }) + +} diff --git a/pkg/rego/metadata.go b/pkg/rego/metadata.go new file mode 100644 index 000000000000..238e78ee618b --- /dev/null +++ b/pkg/rego/metadata.go @@ -0,0 +1,393 @@ +package rego + +import ( + "context" + "fmt" + "strings" + + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/severity" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/mitchellh/mapstructure" + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/rego" +) + +type StaticMetadata struct { + ID string + AVDID string + Title string + ShortCode string + Aliases []string + Description string + Severity string + RecommendedActions string + PrimaryURL string + References []string + InputOptions InputOptions + Package string + Frameworks map[framework.Framework][]string + Provider string + Service string + Library bool + CloudFormation *scan.EngineMetadata + Terraform *scan.EngineMetadata +} + +func NewStaticMetadata(pkgPath string, inputOpt InputOptions) *StaticMetadata { + return &StaticMetadata{ + ID: "N/A", + Title: "N/A", + Severity: "UNKNOWN", + Description: fmt.Sprintf("Rego module: %s", pkgPath), + Package: pkgPath, + InputOptions: inputOpt, + Frameworks: make(map[framework.Framework][]string), + } +} + +func (sm *StaticMetadata) Update(meta map[string]any) error { + + upd := func(field *string, key string) { + if raw, ok := meta[key]; ok { + *field = fmt.Sprintf("%s", raw) + } + } + + upd(&sm.ID, "id") + upd(&sm.AVDID, "avd_id") + upd(&sm.Title, "title") + upd(&sm.ShortCode, "short_code") + upd(&sm.Description, "description") + upd(&sm.Service, "service") + upd(&sm.Provider, "provider") + upd(&sm.RecommendedActions, "recommended_actions") + upd(&sm.RecommendedActions, "recommended_action") + + if raw, ok := meta["severity"]; ok { + sm.Severity = strings.ToUpper(fmt.Sprintf("%s", raw)) + } + + if raw, ok := meta["library"]; ok { + if lib, ok := raw.(bool); ok { + sm.Library = lib + } + } + + if raw, ok := meta["url"]; ok { + sm.References = append(sm.References, fmt.Sprintf("%s", raw)) + } + if raw, ok := meta["frameworks"]; ok { + frameworks, ok := raw.(map[string][]string) + if !ok { + return fmt.Errorf("failed to parse framework metadata: not an object") + } + for fw, sections := range frameworks { + sm.Frameworks[framework.Framework(fw)] = sections + } + } + if raw, ok := meta["related_resources"]; ok { + if relatedResources, ok := raw.([]map[string]any); ok { + for _, relatedResource := range relatedResources { + if raw, ok := relatedResource["ref"]; ok { + sm.References = append(sm.References, fmt.Sprintf("%s", raw)) + } + } + } else if relatedResources, ok := raw.([]string); ok { + sm.References = append(sm.References, relatedResources...) + } + } + + sm.updateAliases(meta) + + var err error + if sm.CloudFormation, err = NewEngineMetadata("cloud_formation", meta); err != nil { + return err + } + + if sm.Terraform, err = NewEngineMetadata("terraform", meta); err != nil { + return err + } + + return nil +} + +func (sm *StaticMetadata) updateAliases(meta map[string]any) { + if raw, ok := meta["aliases"]; ok { + if aliases, ok := raw.([]interface{}); ok { + for _, a := range aliases { + sm.Aliases = append(sm.Aliases, fmt.Sprintf("%s", a)) + } + } + } +} + +func (sm *StaticMetadata) FromAnnotations(annotations *ast.Annotations) error { + sm.Title = annotations.Title + sm.Description = annotations.Description + for _, resource := range annotations.RelatedResources { + if !resource.Ref.IsAbs() { + continue + } + sm.References = append(sm.References, resource.Ref.String()) + } + if custom := annotations.Custom; custom != nil { + if err := sm.Update(custom); err != nil { + return err + } + } + if len(annotations.RelatedResources) > 0 { + sm.PrimaryURL = annotations.RelatedResources[0].Ref.String() + } + return nil +} + +func NewEngineMetadata(schema string, meta map[string]interface{}) (*scan.EngineMetadata, error) { + var sMap map[string]interface{} + if raw, ok := meta[schema]; ok { + sMap, ok = raw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("failed to parse %s metadata: not an object", schema) + } + } + + var em scan.EngineMetadata + if val, ok := sMap["good_examples"].(string); ok { + em.GoodExamples = []string{val} + } + if val, ok := sMap["bad_examples"].(string); ok { + em.BadExamples = []string{val} + } + if val, ok := sMap["links"].(string); ok { + em.Links = []string{val} + } + if val, ok := sMap["remediation_markdown"].(string); ok { + em.RemediationMarkdown = val + } + + return &em, nil +} + +type InputOptions struct { + Combined bool + Selectors []Selector +} + +type Selector struct { + Type string + Subtypes []SubType +} + +type SubType struct { + Group string + Version string + Kind string + Namespace string + Service string // only for cloud + Provider string // only for cloud +} + +func (m StaticMetadata) ToRule() scan.Rule { + + provider := "generic" + if m.Provider != "" { + provider = m.Provider + } else if len(m.InputOptions.Selectors) > 0 { + provider = m.InputOptions.Selectors[0].Type + } + service := "general" + if m.Service != "" { + service = m.Service + } + + return scan.Rule{ + AVDID: m.AVDID, + Aliases: append(m.Aliases, m.ID), + ShortCode: m.ShortCode, + Summary: m.Title, + Explanation: m.Description, + Impact: "", + Resolution: m.RecommendedActions, + Provider: providers.Provider(provider), + Service: service, + Links: m.References, + Severity: severity.Severity(m.Severity), + RegoPackage: m.Package, + Frameworks: m.Frameworks, + CloudFormation: m.CloudFormation, + Terraform: m.Terraform, + } +} + +type MetadataRetriever struct { + compiler *ast.Compiler +} + +func NewMetadataRetriever(compiler *ast.Compiler) *MetadataRetriever { + return &MetadataRetriever{ + compiler: compiler, + } +} + +func (m *MetadataRetriever) findPackageAnnotations(module *ast.Module) *ast.Annotations { + annotationSet := m.compiler.GetAnnotationSet() + if annotationSet == nil { + return nil + } + for _, annotation := range annotationSet.Flatten() { + if annotation.GetPackage().Path.String() != module.Package.Path.String() || annotation.Annotations.Scope != "package" { + continue + } + return annotation.Annotations + } + return nil +} + +func (m *MetadataRetriever) RetrieveMetadata(ctx context.Context, module *ast.Module, contents ...any) (*StaticMetadata, error) { + + metadata := NewStaticMetadata( + module.Package.Path.String(), + m.queryInputOptions(ctx, module), + ) + + // read metadata from official rego annotations if possible + if annotations := m.findPackageAnnotations(module); annotations != nil { + if err := metadata.FromAnnotations(annotations); err != nil { + return nil, err + } + return metadata, nil + } + + // otherwise, try to read metadata from the rego module itself - we used to do this before annotations were a thing + namespace := getModuleNamespace(module) + metadataQuery := fmt.Sprintf("data.%s.__rego_metadata__", namespace) + + options := []func(*rego.Rego){ + rego.Query(metadataQuery), + rego.Compiler(m.compiler), + rego.Capabilities(nil), + } + // support dynamic metadata fields + for _, in := range contents { + options = append(options, rego.Input(in)) + } + + instance := rego.New(options...) + set, err := instance.Eval(ctx) + if err != nil { + return nil, err + } + + // no metadata supplied + if set == nil { + return metadata, nil + } + + if len(set) != 1 { + return nil, fmt.Errorf("failed to parse metadata: unexpected set length") + } + if len(set[0].Expressions) != 1 { + return nil, fmt.Errorf("failed to parse metadata: unexpected expression length") + } + expression := set[0].Expressions[0] + meta, ok := expression.Value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("failed to parse metadata: not an object") + } + + if err := metadata.Update(meta); err != nil { + return nil, err + } + + return metadata, nil +} + +// nolint: cyclop +func (m *MetadataRetriever) queryInputOptions(ctx context.Context, module *ast.Module) InputOptions { + + options := InputOptions{ + Combined: false, + Selectors: nil, + } + + var metadata map[string]interface{} + + // read metadata from official rego annotations if possible + if annotation := m.findPackageAnnotations(module); annotation != nil && annotation.Custom != nil { + if input, ok := annotation.Custom["input"]; ok { + if mapped, ok := input.(map[string]interface{}); ok { + metadata = mapped + } + } + } + + if metadata == nil { + + namespace := getModuleNamespace(module) + inputOptionQuery := fmt.Sprintf("data.%s.__rego_input__", namespace) + instance := rego.New( + rego.Query(inputOptionQuery), + rego.Compiler(m.compiler), + rego.Capabilities(nil), + ) + set, err := instance.Eval(ctx) + if err != nil { + return options + } + + if len(set) != 1 { + return options + } + if len(set[0].Expressions) != 1 { + return options + } + expression := set[0].Expressions[0] + meta, ok := expression.Value.(map[string]interface{}) + if !ok { + return options + } + metadata = meta + } + + if raw, ok := metadata["combine"]; ok { + if combine, ok := raw.(bool); ok { + options.Combined = combine + } + } + + if raw, ok := metadata["selector"]; ok { + if each, ok := raw.([]interface{}); ok { + for _, rawSelector := range each { + var selector Selector + if selectorMap, ok := rawSelector.(map[string]interface{}); ok { + if rawType, ok := selectorMap["type"]; ok { + selector.Type = fmt.Sprintf("%s", rawType) + // handle backward compatibility for "defsec" source type which is now "cloud" + if selector.Type == string(defsecTypes.SourceDefsec) { + selector.Type = string(defsecTypes.SourceCloud) + } + } + if subType, ok := selectorMap["subtypes"].([]interface{}); ok { + for _, subT := range subType { + if st, ok := subT.(map[string]interface{}); ok { + s := SubType{} + _ = mapstructure.Decode(st, &s) + selector.Subtypes = append(selector.Subtypes, s) + } + } + } + } + options.Selectors = append(options.Selectors, selector) + } + } + } + + return options + +} + +func getModuleNamespace(module *ast.Module) string { + return strings.TrimPrefix(module.Package.Path.String(), "data.") +} diff --git a/pkg/rego/metadata_test.go b/pkg/rego/metadata_test.go new file mode 100644 index 000000000000..056daf94ddcf --- /dev/null +++ b/pkg/rego/metadata_test.go @@ -0,0 +1,191 @@ +package rego + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_UpdateStaticMetadata(t *testing.T) { + t.Run("happy", func(t *testing.T) { + sm := StaticMetadata{ + ID: "i", + AVDID: "a", + Title: "t", + ShortCode: "sc", + Aliases: []string{"a", "b", "c"}, + Description: "d", + Severity: "s", + RecommendedActions: "ra", + PrimaryURL: "pu", + References: []string{"r"}, + Package: "pkg", + Provider: "pr", + Service: "srvc", + Library: false, + Frameworks: map[framework.Framework][]string{ + framework.Default: {"dd"}, + }, + } + + require.NoError(t, sm.Update( + map[string]any{ + "id": "i_n", + "avd_id": "a_n", + "title": "t_n", + "short_code": "sc_n", + "aliases": []any{"a_n", "b_n", "c_n"}, + "description": "d_n", + "service": "srvc_n", + "provider": "pr_n", + "recommended_actions": "ra_n", + "severity": "s_n", + "library": true, + "url": "r_n", + "frameworks": map[string][]string{ + "all": {"aa"}, + }, + }, + )) + + expected := StaticMetadata{ + ID: "i_n", + AVDID: "a_n", + Title: "t_n", + ShortCode: "sc_n", + Aliases: []string{"a", "b", "c", "a_n", "b_n", "c_n"}, + Description: "d_n", + Severity: "S_N", + RecommendedActions: "ra_n", + PrimaryURL: "pu", + References: []string{"r", "r_n"}, + Package: "pkg", + Provider: "pr_n", + Service: "srvc_n", + Library: true, + Frameworks: map[framework.Framework][]string{ + framework.Default: {"dd"}, + framework.ALL: {"aa"}, + }, + CloudFormation: &scan.EngineMetadata{}, + Terraform: &scan.EngineMetadata{}, + } + + assert.Equal(t, expected, sm) + }) + + t.Run("related resources are a map", func(t *testing.T) { + sm := StaticMetadata{ + References: []string{"r"}, + } + require.NoError(t, sm.Update(map[string]any{ + "related_resources": []map[string]any{ + { + "ref": "r1_n", + }, + { + "ref": "r2_n", + }, + }, + })) + + expected := StaticMetadata{ + References: []string{"r", "r1_n", "r2_n"}, + CloudFormation: &scan.EngineMetadata{}, + Terraform: &scan.EngineMetadata{}, + } + + assert.Equal(t, expected, sm) + }) + + t.Run("related resources are a string", func(t *testing.T) { + sm := StaticMetadata{ + References: []string{"r"}, + } + require.NoError(t, sm.Update(map[string]any{ + "related_resources": []string{"r1_n", "r2_n"}, + })) + + expected := StaticMetadata{ + References: []string{"r", "r1_n", "r2_n"}, + CloudFormation: &scan.EngineMetadata{}, + Terraform: &scan.EngineMetadata{}, + } + + assert.Equal(t, expected, sm) + }) +} + +func Test_getEngineMetadata(t *testing.T) { + inputSchema := map[string]interface{}{ + "terraform": map[string]interface{}{ + "good_examples": `resource "aws_cloudtrail" "good_example" { + is_multi_region_trail = true + + event_selector { + read_write_type = "All" + include_management_events = true + + data_resource { + type = "AWS::S3::Object" + values = ["${data.aws_s3_bucket.important-bucket.arn}/"] + } + } + }`, + }, + "cloud_formation": map[string]interface{}{"good_examples": `--- +Resources: + GoodExample: + Type: AWS::CloudTrail::Trail + Properties: + IsLogging: true + IsMultiRegionTrail: true + S3BucketName: "CloudtrailBucket" + S3KeyPrefix: "/trailing" + TrailName: "Cloudtrail"`, + }} + + var testCases = []struct { + schema string + want string + }{ + { + schema: "terraform", + want: `resource "aws_cloudtrail" "good_example" { + is_multi_region_trail = true + + event_selector { + read_write_type = "All" + include_management_events = true + + data_resource { + type = "AWS::S3::Object" + values = ["${data.aws_s3_bucket.important-bucket.arn}/"] + } + } + }`, + }, + {schema: "cloud_formation", + want: `--- +Resources: + GoodExample: + Type: AWS::CloudTrail::Trail + Properties: + IsLogging: true + IsMultiRegionTrail: true + S3BucketName: "CloudtrailBucket" + S3KeyPrefix: "/trailing" + TrailName: "Cloudtrail"`}, + } + + for _, tc := range testCases { + t.Run(tc.schema, func(t *testing.T) { + em, err := NewEngineMetadata(tc.schema, inputSchema) + assert.NoError(t, err) + assert.Equal(t, tc.want, em.GoodExamples[0]) + }) + } +} diff --git a/pkg/rego/result.go b/pkg/rego/result.go new file mode 100644 index 000000000000..26d63ad5061c --- /dev/null +++ b/pkg/rego/result.go @@ -0,0 +1,166 @@ +package rego + +import ( + "fmt" + "io/fs" + "strconv" + + "github.com/aquasecurity/trivy/pkg/scan" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + "github.com/open-policy-agent/opa/rego" +) + +type regoResult struct { + Filepath string + Resource string + StartLine int + EndLine int + SourcePrefix string + Message string + Explicit bool + Managed bool + FSKey string + FS fs.FS + Parent *regoResult +} + +func (r regoResult) GetMetadata() defsecTypes.MisconfigMetadata { + var m defsecTypes.MisconfigMetadata + if !r.Managed { + m = defsecTypes.NewUnmanagedMisconfigMetadata() + } else { + rng := defsecTypes.NewRangeWithFSKey(r.Filepath, r.StartLine, r.EndLine, r.SourcePrefix, r.FSKey, r.FS) + if r.Explicit { + m = defsecTypes.NewExplicitMisconfigMetadata(rng, r.Resource) + } else { + m = defsecTypes.NewMisconfigMetadata(rng, r.Resource) + } + } + if r.Parent != nil { + return m.WithParent(r.Parent.GetMetadata()) + } + return m +} + +func (r regoResult) GetRawValue() interface{} { + return nil +} + +func parseResult(raw interface{}) *regoResult { + var result regoResult + result.Managed = true + switch val := raw.(type) { + case []interface{}: + var msg string + for _, item := range val { + switch raw := item.(type) { + case map[string]interface{}: + result = parseCause(raw) + case string: + msg = raw + } + } + result.Message = msg + case string: + result.Message = val + case map[string]interface{}: + result = parseCause(val) + default: + result.Message = "Rego policy resulted in DENY" + } + return &result +} + +func parseCause(cause map[string]interface{}) regoResult { + var result regoResult + result.Managed = true + if msg, ok := cause["msg"]; ok { + result.Message = fmt.Sprintf("%s", msg) + } + if filepath, ok := cause["filepath"]; ok { + result.Filepath = fmt.Sprintf("%s", filepath) + } + if msg, ok := cause["fskey"]; ok { + result.FSKey = fmt.Sprintf("%s", msg) + } + if msg, ok := cause["resource"]; ok { + result.Resource = fmt.Sprintf("%s", msg) + } + if start, ok := cause["startline"]; ok { + result.StartLine = parseLineNumber(start) + } + if end, ok := cause["endline"]; ok { + result.EndLine = parseLineNumber(end) + } + if prefix, ok := cause["sourceprefix"]; ok { + result.SourcePrefix = fmt.Sprintf("%s", prefix) + } + if explicit, ok := cause["explicit"]; ok { + if set, ok := explicit.(bool); ok { + result.Explicit = set + } + } + if managed, ok := cause["managed"]; ok { + if set, ok := managed.(bool); ok { + result.Managed = set + } + } + if parent, ok := cause["parent"]; ok { + if m, ok := parent.(map[string]interface{}); ok { + parentResult := parseCause(m) + result.Parent = &parentResult + } + } + return result +} + +func parseLineNumber(raw interface{}) int { + str := fmt.Sprintf("%s", raw) + n, _ := strconv.Atoi(str) + return n +} + +func (s *Scanner) convertResults(set rego.ResultSet, input Input, namespace string, rule string, traces []string) scan.Results { + var results scan.Results + + offset := 0 + if input.Contents != nil { + if xx, ok := input.Contents.(map[string]interface{}); ok { + if md, ok := xx["__defsec_metadata"]; ok { + if md2, ok := md.(map[string]interface{}); ok { + if sl, ok := md2["offset"]; ok { + offset, _ = sl.(int) + } + } + } + } + } + for _, result := range set { + for _, expression := range result.Expressions { + values, ok := expression.Value.([]interface{}) + if !ok { + values = []interface{}{expression.Value} + } + + for _, value := range values { + regoResult := parseResult(value) + regoResult.FS = input.FS + if regoResult.Filepath == "" && input.Path != "" { + regoResult.Filepath = input.Path + } + if regoResult.Message == "" { + regoResult.Message = fmt.Sprintf("Rego policy rule: %s.%s", namespace, rule) + } + regoResult.StartLine += offset + regoResult.EndLine += offset + results.AddRego(regoResult.Message, namespace, rule, traces, regoResult) + } + } + } + return results +} + +func (s *Scanner) embellishResultsWithRuleMetadata(results scan.Results, metadata StaticMetadata) scan.Results { + results.SetRule(metadata.ToRule()) + return results +} diff --git a/pkg/rego/result_test.go b/pkg/rego/result_test.go new file mode 100644 index 000000000000..d958f7962b10 --- /dev/null +++ b/pkg/rego/result_test.go @@ -0,0 +1,104 @@ +package rego + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_parseResult(t *testing.T) { + var testCases = []struct { + name string + input interface{} + want regoResult + }{ + { + name: "unknown", + input: nil, + want: regoResult{ + Managed: true, + Message: "Rego policy resulted in DENY", + }, + }, + { + name: "string", + input: "message", + want: regoResult{ + Managed: true, + Message: "message", + }, + }, + { + name: "strings", + input: []interface{}{"message"}, + want: regoResult{ + Managed: true, + Message: "message", + }, + }, + { + name: "maps", + input: []interface{}{ + "message", + map[string]interface{}{ + "filepath": "a.out", + }, + }, + want: regoResult{ + Managed: true, + Message: "message", + Filepath: "a.out", + }, + }, + { + name: "map", + input: map[string]interface{}{ + "msg": "message", + "filepath": "a.out", + "fskey": "abcd", + "resource": "resource", + "startline": "123", + "endline": "456", + "sourceprefix": "git", + "explicit": true, + "managed": true, + }, + want: regoResult{ + Message: "message", + Filepath: "a.out", + Resource: "resource", + StartLine: 123, + EndLine: 456, + SourcePrefix: "git", + FSKey: "abcd", + Explicit: true, + Managed: true, + }, + }, + { + name: "parent", + input: map[string]interface{}{ + "msg": "child", + "parent": map[string]interface{}{ + "msg": "parent", + }, + }, + want: regoResult{ + Message: "child", + Managed: true, + Parent: ®oResult{ + Message: "parent", + Managed: true, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + have := parseResult(tc.input) + assert.NotNil(t, have) + assert.Equal(t, tc.want, *have) + }) + } +} diff --git a/pkg/rego/runtime.go b/pkg/rego/runtime.go new file mode 100644 index 000000000000..6e28268d9971 --- /dev/null +++ b/pkg/rego/runtime.go @@ -0,0 +1,28 @@ +package rego + +import ( + "os" + "strings" + + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/version" +) + +func addRuntimeValues() *ast.Term { + env := ast.NewObject() + for _, pair := range os.Environ() { + parts := strings.SplitN(pair, "=", 2) + if len(parts) == 1 { + env.Insert(ast.StringTerm(parts[0]), ast.NullTerm()) + } else if len(parts) > 1 { + env.Insert(ast.StringTerm(parts[0]), ast.StringTerm(parts[1])) + } + } + + obj := ast.NewObject() + obj.Insert(ast.StringTerm("env"), ast.NewTerm(env)) + obj.Insert(ast.StringTerm("version"), ast.StringTerm(version.Version)) + obj.Insert(ast.StringTerm("commit"), ast.StringTerm(version.Vcs)) + + return ast.NewTerm(obj) +} diff --git a/pkg/rego/scanner.go b/pkg/rego/scanner.go new file mode 100644 index 000000000000..11a815356b7a --- /dev/null +++ b/pkg/rego/scanner.go @@ -0,0 +1,411 @@ +package rego + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "strings" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/rego/schemas" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/open-policy-agent/opa/ast" + "github.com/open-policy-agent/opa/rego" + "github.com/open-policy-agent/opa/storage" +) + +var _ options.ConfigurableScanner = (*Scanner)(nil) + +type Scanner struct { + ruleNamespaces map[string]struct{} + policies map[string]*ast.Module + store storage.Store + dataDirs []string + runtimeValues *ast.Term + compiler *ast.Compiler + regoErrorLimit int + debug debug.Logger + traceWriter io.Writer + tracePerResult bool + retriever *MetadataRetriever + policyFS fs.FS + dataFS fs.FS + frameworks []framework.Framework + spec string + inputSchema interface{} // unmarshalled into this from a json schema document + sourceType types.Source +} + +func (s *Scanner) SetUseEmbeddedLibraries(b bool) { + // handled externally +} + +func (s *Scanner) SetSpec(spec string) { + s.spec = spec +} + +func (s *Scanner) SetRegoOnly(bool) {} + +func (s *Scanner) SetFrameworks(frameworks []framework.Framework) { + s.frameworks = frameworks +} + +func (s *Scanner) SetUseEmbeddedPolicies(b bool) { + // handled externally +} + +func (s *Scanner) trace(heading string, input interface{}) { + if s.traceWriter == nil { + return + } + data, err := json.MarshalIndent(input, "", " ") + if err != nil { + return + } + _, _ = fmt.Fprintf(s.traceWriter, "REGO %[1]s:\n%s\nEND REGO %[1]s\n\n", heading, string(data)) +} + +func (s *Scanner) SetPolicyFilesystem(fs fs.FS) { + s.policyFS = fs +} + +func (s *Scanner) SetDataFilesystem(fs fs.FS) { + s.dataFS = fs +} + +func (s *Scanner) SetPolicyReaders(_ []io.Reader) { + // NOTE: Policy readers option not applicable for rego, policies are loaded on-demand by other scanners. +} + +func (s *Scanner) SetDebugWriter(writer io.Writer) { + s.debug = debug.New(writer, "rego", "scanner") +} + +func (s *Scanner) SetTraceWriter(writer io.Writer) { + s.traceWriter = writer +} + +func (s *Scanner) SetPerResultTracingEnabled(b bool) { + s.tracePerResult = b +} + +func (s *Scanner) SetPolicyDirs(_ ...string) { + // NOTE: Policy dirs option not applicable for rego, policies are loaded on-demand by other scanners. +} + +func (s *Scanner) SetDataDirs(dirs ...string) { + s.dataDirs = dirs +} + +func (s *Scanner) SetPolicyNamespaces(namespaces ...string) { + for _, namespace := range namespaces { + s.ruleNamespaces[namespace] = struct{}{} + } +} + +func (s *Scanner) SetSkipRequiredCheck(_ bool) { + // NOTE: Skip required option not applicable for rego. +} + +func (s *Scanner) SetRegoErrorLimit(limit int) { + s.regoErrorLimit = limit +} + +type DynamicMetadata struct { + Warning bool + Filepath string + Message string + StartLine int + EndLine int +} + +func NewScanner(source types.Source, options ...options.ScannerOption) *Scanner { + schema, ok := schemas.SchemaMap[source] + if !ok { + schema = schemas.Anything + } + + s := &Scanner{ + regoErrorLimit: ast.CompileErrorLimitDefault, + sourceType: source, + ruleNamespaces: map[string]struct{}{ + "builtin": {}, + "appshield": {}, + "defsec": {}, + }, + runtimeValues: addRuntimeValues(), + } + for _, opt := range options { + opt(s) + } + if schema != schemas.None { + err := json.Unmarshal([]byte(schema), &s.inputSchema) + if err != nil { + panic(err) + } + } + return s +} + +func (s *Scanner) SetParentDebugLogger(l debug.Logger) { + s.debug = l.Extend("rego") +} + +func (s *Scanner) runQuery(ctx context.Context, query string, input interface{}, disableTracing bool) (rego.ResultSet, []string, error) { + + trace := (s.traceWriter != nil || s.tracePerResult) && !disableTracing + + regoOptions := []func(*rego.Rego){ + rego.Query(query), + rego.Compiler(s.compiler), + rego.Store(s.store), + rego.Runtime(s.runtimeValues), + rego.Trace(trace), + } + + if s.inputSchema != nil { + schemaSet := ast.NewSchemaSet() + schemaSet.Put(ast.MustParseRef("schema.input"), s.inputSchema) + regoOptions = append(regoOptions, rego.Schemas(schemaSet)) + } + + if input != nil { + regoOptions = append(regoOptions, rego.Input(input)) + } + + instance := rego.New(regoOptions...) + set, err := instance.Eval(ctx) + if err != nil { + return nil, nil, err + } + + // we also build a slice of trace lines for per-result tracing - primarily for fanal/trivy + var traces []string + + if trace { + if s.traceWriter != nil { + rego.PrintTrace(s.traceWriter, instance) + } + if s.tracePerResult { + traceBuffer := bytes.NewBuffer([]byte{}) + rego.PrintTrace(traceBuffer, instance) + traces = strings.Split(traceBuffer.String(), "\n") + } + } + return set, traces, nil +} + +type Input struct { + Path string `json:"path"` + FS fs.FS `json:"-"` + Contents interface{} `json:"contents"` +} + +func GetInputsContents(inputs []Input) []any { + results := make([]any, len(inputs)) + for i, c := range inputs { + results[i] = c.Contents + } + return results +} + +func (s *Scanner) ScanInput(ctx context.Context, inputs ...Input) (scan.Results, error) { + + s.debug.Log("Scanning %d inputs...", len(inputs)) + + var results scan.Results + + for _, module := range s.policies { + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + namespace := getModuleNamespace(module) + topLevel := strings.Split(namespace, ".")[0] + if _, ok := s.ruleNamespaces[topLevel]; !ok { + continue + } + + staticMeta, err := s.retriever.RetrieveMetadata(ctx, module, GetInputsContents(inputs)...) + if err != nil { + return nil, err + } + + if isPolicyWithSubtype(s.sourceType) { + // skip if policy isn't relevant to what is being scanned + if !isPolicyApplicable(staticMeta, inputs...) { + continue + } + } + + if len(inputs) == 0 { + continue + } + + usedRules := make(map[string]struct{}) + + // all trules + for _, rule := range module.Rules { + ruleName := rule.Head.Name.String() + if _, ok := usedRules[ruleName]; ok { + continue + } + usedRules[ruleName] = struct{}{} + if isEnforcedRule(ruleName) { + ruleResults, err := s.applyRule(ctx, namespace, ruleName, inputs, staticMeta.InputOptions.Combined) + if err != nil { + return nil, err + } + results = append(results, s.embellishResultsWithRuleMetadata(ruleResults, *staticMeta)...) + } + } + + } + + return results, nil +} + +func isPolicyWithSubtype(sourceType types.Source) bool { + for _, s := range []types.Source{types.SourceCloud, types.SourceDefsec, types.SourceKubernetes} { + if sourceType == s { + return true + } + } + return false +} + +func checkSubtype(ii map[string]interface{}, provider string, subTypes []SubType) bool { + if len(subTypes) == 0 { + return true + } + + for _, st := range subTypes { + switch services := ii[provider].(type) { + case map[string]interface{}: // cloud + for service := range services { + if (service == st.Service) && (st.Provider == provider) { + return true + } + } + case string: // k8s - logic can be improved + if strings.EqualFold(services, st.Group) || + strings.EqualFold(services, st.Version) || + strings.EqualFold(services, st.Kind) { + return true + } + } + } + return false +} + +func isPolicyApplicable(staticMetadata *StaticMetadata, inputs ...Input) bool { + for _, input := range inputs { + if ii, ok := input.Contents.(map[string]interface{}); ok { + for provider := range ii { + // TODO(simar): Add other providers + if !strings.Contains(strings.Join([]string{"kind", "aws", "azure"}, ","), provider) { + continue + } + + if len(staticMetadata.InputOptions.Selectors) == 0 { // policy always applies if no selectors + return true + } + + // check metadata for subtype + for _, s := range staticMetadata.InputOptions.Selectors { + if checkSubtype(ii, provider, s.Subtypes) { + return true + } + } + } + } + } + return false +} + +func (s *Scanner) applyRule(ctx context.Context, namespace string, rule string, inputs []Input, combined bool) (scan.Results, error) { + + // handle combined evaluations if possible + if combined { + s.trace("INPUT", inputs) + return s.applyRuleCombined(ctx, namespace, rule, inputs) + } + + var results scan.Results + qualified := fmt.Sprintf("data.%s.%s", namespace, rule) + for _, input := range inputs { + s.trace("INPUT", input) + if ignored, err := s.isIgnored(ctx, namespace, rule, input.Contents); err != nil { + return nil, err + } else if ignored { + var result regoResult + result.FS = input.FS + result.Filepath = input.Path + result.Managed = true + results.AddIgnored(result) + continue + } + set, traces, err := s.runQuery(ctx, qualified, input.Contents, false) + if err != nil { + return nil, err + } + s.trace("RESULTSET", set) + ruleResults := s.convertResults(set, input, namespace, rule, traces) + if len(ruleResults) == 0 { // It passed because we didn't find anything wrong (NOT because it didn't exist) + var result regoResult + result.FS = input.FS + result.Filepath = input.Path + result.Managed = true + results.AddPassedRego(namespace, rule, traces, result) + continue + } + results = append(results, ruleResults...) + } + + return results, nil +} + +func (s *Scanner) applyRuleCombined(ctx context.Context, namespace string, rule string, inputs []Input) (scan.Results, error) { + if len(inputs) == 0 { + return nil, nil + } + var results scan.Results + qualified := fmt.Sprintf("data.%s.%s", namespace, rule) + if ignored, err := s.isIgnored(ctx, namespace, rule, inputs); err != nil { + return nil, err + } else if ignored { + for _, input := range inputs { + var result regoResult + result.FS = input.FS + result.Filepath = input.Path + result.Managed = true + results.AddIgnored(result) + } + return results, nil + } + set, traces, err := s.runQuery(ctx, qualified, inputs, false) + if err != nil { + return nil, err + } + return s.convertResults(set, inputs[0], namespace, rule, traces), nil +} + +// severity is now set with metadata, so deny/warn/violation now behave the same way +func isEnforcedRule(name string) bool { + switch { + case name == "deny", strings.HasPrefix(name, "deny_"), + name == "warn", strings.HasPrefix(name, "warn_"), + name == "violation", strings.HasPrefix(name, "violation_"): + return true + } + return false +} diff --git a/pkg/rego/scanner_test.go b/pkg/rego/scanner_test.go new file mode 100644 index 000000000000..aba44b63e71e --- /dev/null +++ b/pkg/rego/scanner_test.go @@ -0,0 +1,978 @@ +package rego + +import ( + "bytes" + "context" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/memoryfs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +func CreateFS(t *testing.T, files map[string]string) fs.FS { + memfs := memoryfs.New() + for name, content := range files { + name := strings.TrimPrefix(name, "/") + err := memfs.MkdirAll(filepath.Dir(name), 0o700) + require.NoError(t, err) + err = memfs.WriteFile(name, []byte(content), 0o644) + require.NoError(t, err) + } + return memfs +} + +func Test_RegoScanning_Deny(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny { + input.evil +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + FS: srcFS, + }) + require.NoError(t, err) + + require.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Equal(t, "/evil.lol", results.GetFailed()[0].Metadata().Range().GetFilename()) + assert.False(t, results.GetFailed()[0].IsWarning()) +} + +func Test_RegoScanning_AbsolutePolicyPath_Deny(t *testing.T) { + + tmp := t.TempDir() + require.NoError(t, os.Mkdir(filepath.Join(tmp, "policies"), 0755)) + require.NoError(t, os.WriteFile(filepath.Join(tmp, "policies", "test.rego"), []byte(`package defsec.test + +deny { + input.evil +}`), 0600)) + + srcFS := os.DirFS(tmp) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"/policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + FS: srcFS, + }) + require.NoError(t, err) + + require.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Equal(t, "/evil.lol", results.GetFailed()[0].Metadata().Range().GetFilename()) + assert.False(t, results.GetFailed()[0].IsWarning()) +} + +func Test_RegoScanning_Warn(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +warn { + input.evil +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + require.Equal(t, 1, len(results.GetFailed())) + require.Equal(t, 0, len(results.GetPassed())) + require.Equal(t, 0, len(results.GetIgnored())) + + assert.True(t, results.GetFailed()[0].IsWarning()) +} + +func Test_RegoScanning_Allow(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny { + input.evil +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": false, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 0, len(results.GetFailed())) + require.Equal(t, 1, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Equal(t, "/evil.lol", results.GetPassed()[0].Metadata().Range().GetFilename()) +} + +func Test_RegoScanning_Namespace_Exception(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny { + input.evil +} +`, + "policies/exceptions.rego": ` +package namespace.exceptions + +import data.namespaces + +exception[ns] { + ns := data.namespaces[_] + startswith(ns, "defsec") +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 0, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 1, len(results.GetIgnored())) + +} + +func Test_RegoScanning_Namespace_Exception_WithoutMatch(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny { + input.evil +} +`, "policies/something.rego": ` +package builtin.test + +deny_something { + input.something +} +`, + "policies/exceptions.rego": ` +package namespace.exceptions + +import data.namespaces + +exception[ns] { + ns := data.namespaces[_] + startswith(ns, "builtin") +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 1, len(results.GetIgnored())) + +} + +func Test_RegoScanning_Rule_Exception(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test +deny_evil { + input.evil +} +`, + "policies/exceptions.rego": ` +package defsec.test + +exception[trules] { + trules := ["evil"] +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 0, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 1, len(results.GetIgnored())) +} + +func Test_RegoScanning_Rule_Exception_WithoutMatch(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test +deny_evil { + input.evil +} +`, + "policies/exceptions.rego": ` +package defsec.test + +exception[trules] { + trules := ["good"] +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) +} + +func Test_RegoScanning_WithRuntimeValues(t *testing.T) { + + _ = os.Setenv("DEFSEC_RUNTIME_VAL", "AOK") + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny_evil { + output := opa.runtime() + output.env.DEFSEC_RUNTIME_VAL == "AOK" +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) +} + +func Test_RegoScanning_WithDenyMessage(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny[msg] { + input.evil + msg := "oh no" +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + require.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Equal(t, "oh no", results.GetFailed()[0].Description()) + assert.Equal(t, "/evil.lol", results.GetFailed()[0].Metadata().Range().GetFilename()) +} + +func Test_RegoScanning_WithDenyMetadata_ImpliedPath(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny[res] { + input.evil + res := { + "msg": "oh no", + "startline": 123, + "endline": 456, + } +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + require.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Equal(t, "oh no", results.GetFailed()[0].Description()) + assert.Equal(t, "/evil.lol", results.GetFailed()[0].Metadata().Range().GetFilename()) + assert.Equal(t, 123, results.GetFailed()[0].Metadata().Range().GetStartLine()) + assert.Equal(t, 456, results.GetFailed()[0].Metadata().Range().GetEndLine()) + +} + +func Test_RegoScanning_WithDenyMetadata_PersistedPath(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny[res] { + input.evil + res := { + "msg": "oh no", + "startline": 123, + "endline": 456, + "filepath": "/blah.txt", + } +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + require.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Equal(t, "oh no", results.GetFailed()[0].Description()) + assert.Equal(t, "/blah.txt", results.GetFailed()[0].Metadata().Range().GetFilename()) + assert.Equal(t, 123, results.GetFailed()[0].Metadata().Range().GetStartLine()) + assert.Equal(t, 456, results.GetFailed()[0].Metadata().Range().GetEndLine()) + +} + +func Test_RegoScanning_WithStaticMetadata(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +__rego_metadata__ := { + "id": "AA001", + "avd_id": "AVD-XX-9999", + "title": "This is a title", + "short_code": "short-code", + "severity": "LOW", + "type": "Dockerfile Security Check", + "description": "This is a description", + "recommended_actions": "This is a recommendation", + "url": "https://google.com", +} + +deny[res] { + input.evil + res := { + "msg": "oh no", + "startline": 123, + "endline": 456, + "filepath": "/blah.txt", + } +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + require.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + failure := results.GetFailed()[0] + + assert.Equal(t, "oh no", failure.Description()) + assert.Equal(t, "/blah.txt", failure.Metadata().Range().GetFilename()) + assert.Equal(t, 123, failure.Metadata().Range().GetStartLine()) + assert.Equal(t, 456, failure.Metadata().Range().GetEndLine()) + assert.Equal(t, "AVD-XX-9999", failure.Rule().AVDID) + assert.True(t, failure.Rule().HasID("AA001")) + assert.Equal(t, "This is a title", failure.Rule().Summary) + assert.Equal(t, severity.Low, failure.Rule().Severity) + assert.Equal(t, "This is a recommendation", failure.Rule().Resolution) + assert.Equal(t, "https://google.com", failure.Rule().Links[0]) + +} + +func Test_RegoScanning_WithMatchingInputSelector(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +__rego_input__ := { + "selector": [{"type": "json"}], +} + +deny { + input.evil +} + +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) +} + +func Test_RegoScanning_WithNonMatchingInputSelector(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +__rego_input__ := { + "selector": [{"type": "testing"}], +} + +deny { + input.evil +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 0, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) +} + +func Test_RegoScanning_NoTracingByDefault(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny { + input.evil +} +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Len(t, results.GetFailed()[0].Traces(), 0) +} + +func Test_RegoScanning_GlobalTracingEnabled(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny { + input.evil +} +`, + }) + + traceBuffer := bytes.NewBuffer([]byte{}) + + scanner := NewScanner(types.SourceJSON, options.ScannerWithTrace(traceBuffer)) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Len(t, results.GetFailed()[0].Traces(), 0) + assert.Greater(t, len(traceBuffer.Bytes()), 0) +} + +func Test_RegoScanning_PerResultTracingEnabled(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +deny { + input.evil +} +`, + }) + + scanner := NewScanner(types.SourceJSON, options.ScannerWithPerResultTracing(true)) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "evil": true, + }, + }) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) + + assert.Greater(t, len(results.GetFailed()[0].Traces()), 0) +} + +func Test_dynamicMetadata(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +__rego_metadata__ := { + "title" : sprintf("i am %s",[input.text]) +} + +deny { + input.text +} + +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "text": "dynamic", + }, + }) + require.NoError(t, err) + assert.Equal(t, results[0].Rule().Summary, "i am dynamic") +} + +func Test_staticMetadata(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test + +__rego_metadata__ := { + "title" : "i am static" +} + +deny { + input.text +} + +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "text": "test", + }, + }) + require.NoError(t, err) + assert.Equal(t, results[0].Rule().Summary, "i am static") +} + +func Test_annotationMetadata(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": `# METADATA +# title: i am a title +# description: i am a description +# related_resources: +# - https://google.com +# custom: +# id: EG123 +# avd_id: AVD-EG-0123 +# severity: LOW +# recommended_action: have a cup of tea +package defsec.test + +deny { + input.text +} + +`, + "policies/test2.rego": `# METADATA +# title: i am another title +package defsec.test2 + +deny { + input.blah +} + +`, + }) + + scanner := NewScanner(types.SourceJSON) + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{ + Path: "/evil.lol", + Contents: map[string]interface{}{ + "text": "test", + }, + }) + require.NoError(t, err) + require.Len(t, results.GetFailed(), 1) + failure := results.GetFailed()[0].Rule() + assert.Equal(t, "i am a title", failure.Summary) + assert.Equal(t, "i am a description", failure.Explanation) + require.Len(t, failure.Links, 1) + assert.Equal(t, "https://google.com", failure.Links[0]) + assert.Equal(t, "AVD-EG-0123", failure.AVDID) + assert.Equal(t, severity.Low, failure.Severity) + assert.Equal(t, "have a cup of tea", failure.Resolution) +} + +func Test_RegoScanning_WithInvalidInputSchema(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": `# METADATA +# schemas: +# - input: schema["input"] +package defsec.test + +deny { + input.evil == "lol" +} +`, + }) + + scanner := NewScanner(types.SourceDockerfile) + scanner.SetRegoErrorLimit(0) // override to not allow any errors + assert.ErrorContains( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + "undefined ref: input.evil", + ) +} + +func Test_RegoScanning_WithValidInputSchema(t *testing.T) { + + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": `# METADATA +# schemas: +# - input: schema["input"] +package defsec.test + +deny { + input.Stages[0].Commands[0].Cmd == "lol" +} +`, + }) + + scanner := NewScanner(types.SourceDockerfile) + assert.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) +} + +func Test_RegoScanning_WithFilepathToSchema(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": `# METADATA +# schemas: +# - input: schema["dockerfile"] +package defsec.test + +deny { + input.evil == "lol" +} +`, + }) + scanner := NewScanner(types.SourceJSON) + scanner.SetRegoErrorLimit(0) // override to not allow any errors + assert.ErrorContains( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + "undefined ref: input.evil", + ) +} + +func Test_RegoScanning_CustomData(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test +import data.settings.DS123.foo_bar_baz + +deny { + not foo_bar_baz +} +`, + }) + + dataFS := CreateFS(t, map[string]string{ + "data/data.json": `{ + "settings": { + "DS123":{ + "foo_bar_baz":false + } + } +}`, + "data/junk.txt": "this file should be ignored", + }) + + scanner := NewScanner(types.SourceJSON) + scanner.SetDataFilesystem(dataFS) + scanner.SetDataDirs(".") + + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{}) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) +} + +func Test_RegoScanning_InvalidFS(t *testing.T) { + srcFS := CreateFS(t, map[string]string{ + "policies/test.rego": ` +package defsec.test +import data.settings.DS123.foo_bar_baz + +deny { + not foo_bar_baz +} +`, + }) + + dataFS := CreateFS(t, map[string]string{ + "data/data.json": `{ + "settings": { + "DS123":{ + "foo_bar_baz":false + } + } +}`, + "data/junk.txt": "this file should be ignored", + }) + + scanner := NewScanner(types.SourceJSON) + scanner.SetDataFilesystem(dataFS) + scanner.SetDataDirs("X://") + + require.NoError( + t, + scanner.LoadPolicies(false, false, srcFS, []string{"policies"}, nil), + ) + + results, err := scanner.ScanInput(context.TODO(), Input{}) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) + assert.Equal(t, 0, len(results.GetPassed())) + assert.Equal(t, 0, len(results.GetIgnored())) +} diff --git a/pkg/rego/schemas/00_schema.go b/pkg/rego/schemas/00_schema.go new file mode 100644 index 000000000000..e6674912fe58 --- /dev/null +++ b/pkg/rego/schemas/00_schema.go @@ -0,0 +1,22 @@ +package schemas + +import _ "embed" + +type Schema string + +var ( + None Schema = "" + Anything Schema = `{}` + + //go:embed dockerfile.json + Dockerfile Schema + + //go:embed kubernetes.json + Kubernetes Schema + + //go:embed rbac.json + RBAC Schema + + //go:embed cloud.json + Cloud Schema +) diff --git a/pkg/rego/schemas/builder.go b/pkg/rego/schemas/builder.go new file mode 100644 index 000000000000..99d44823af6e --- /dev/null +++ b/pkg/rego/schemas/builder.go @@ -0,0 +1,270 @@ +package schemas + +import ( + "fmt" + "reflect" + "strings" + + "github.com/aquasecurity/trivy/pkg/rego/convert" + "github.com/aquasecurity/trivy/pkg/state" +) + +type RawSchema struct { + Type string `json:"type"` // object + Properties map[string]Property `json:"properties,omitempty"` + Defs map[string]*Property `json:"definitions,omitempty"` +} + +type Property struct { + Type string `json:"type,omitempty"` + Ref string `json:"$ref,omitempty"` + Properties map[string]Property `json:"properties,omitempty"` + Items *Property `json:"items,omitempty"` +} + +type builder struct { + schema RawSchema +} + +func Build() (*RawSchema, error) { + + b := newBuilder() + + inputValue := reflect.ValueOf(state.State{}) + + err := b.fromInput(inputValue) + if err != nil { + return nil, err + } + + return &b.schema, nil +} + +func newBuilder() *builder { + return &builder{ + schema: RawSchema{ + Properties: nil, + Defs: nil, + }, + } +} + +func (b *builder) fromInput(inputValue reflect.Value) error { + + prop, err := b.readProperty("", nil, inputValue.Type(), 0) + if err != nil { + return err + } + if prop == nil { + return fmt.Errorf("property is nil") + } + b.schema.Properties = prop.Properties + b.schema.Type = prop.Type + return nil +} + +func refName(name string, parent, t reflect.Type) string { + if t.Name() == "" { // inline struct + return sanitise(parent.PkgPath() + "." + parent.Name() + "." + name) + } + return sanitise(t.PkgPath() + "." + t.Name()) +} + +func sanitise(s string) string { + return strings.ReplaceAll(s, "/", ".") +} + +func (b *builder) readProperty(name string, parent, inputType reflect.Type, indent int) (*Property, error) { + + if inputType.Kind() == reflect.Ptr { + inputType = inputType.Elem() + } + + switch inputType.String() { + case "types.Metadata", "types.Range", "types.Reference": + return nil, nil + } + + if b.schema.Defs != nil { + _, ok := b.schema.Defs[refName(name, parent, inputType)] + if ok { + return &Property{ + Type: "object", + Ref: "#/definitions/" + refName(name, parent, inputType), + }, nil + } + } + + fmt.Println(strings.Repeat(" ", indent) + name) + + switch kind := inputType.Kind(); kind { + case reflect.Struct: + return b.readStruct(name, parent, inputType, indent) + case reflect.Slice: + return b.readSlice(name, parent, inputType, indent) + case reflect.String: + return &Property{ + Type: "string", + }, nil + case reflect.Int: + return &Property{ + Type: "integer", + }, nil + case reflect.Bool: + return &Property{ + Type: "boolean", + }, nil + case reflect.Float32, reflect.Float64: + return &Property{ + Type: "number", + }, nil + } + + switch inputType.Name() { + case "BoolValue": + return &Property{ + Type: "object", + Properties: map[string]Property{ + "value": { + Type: "boolean", + }, + }, + }, nil + case "IntValue": + return &Property{ + Type: "object", + Properties: map[string]Property{ + "value": { + Type: "integer", + }, + }, + }, nil + case "StringValue", "TimeValue", "BytesValue": + return &Property{ + Type: "object", + Properties: map[string]Property{ + "value": { + Type: "string", + }, + }, + }, nil + case "MapValue": + return &Property{ + Type: "object", + Properties: map[string]Property{ + "value": { + Type: "object", + }, + }, + }, nil + + } + + fmt.Printf("WARNING: unsupported type: %s (%s)\n", inputType.Name(), inputType) + return nil, nil +} + +var converterInterface = reflect.TypeOf((*convert.Converter)(nil)).Elem() + +func (b *builder) readStruct(name string, parent, inputType reflect.Type, indent int) (*Property, error) { + + if b.schema.Defs == nil { + b.schema.Defs = map[string]*Property{} + } + + def := &Property{ + Type: "object", + Properties: map[string]Property{}, + } + + if parent != nil { + b.schema.Defs[refName(name, parent, inputType)] = def + } + + if inputType.Implements(converterInterface) { + if inputType.Kind() == reflect.Ptr { + inputType = inputType.Elem() + } + returns := reflect.New(inputType).MethodByName("ToRego").Call(nil) + if err := b.readRego(def, name, parent, returns[0].Type(), returns[0].Interface(), indent); err != nil { + return nil, err + } + } else { + + for i := 0; i < inputType.NumField(); i++ { + field := inputType.Field(i) + prop, err := b.readProperty(field.Name, inputType, field.Type, indent+1) + if err != nil { + return nil, err + } + if prop == nil { + continue + } + key := strings.ToLower(field.Name) + if key == "metadata" { + continue + } + def.Properties[key] = *prop + } + } + + if parent == nil { + return def, nil + } + + return &Property{ + Type: "object", + Ref: "#/definitions/" + refName(name, parent, inputType), + }, nil +} + +func (b *builder) readSlice(name string, parent, inputType reflect.Type, indent int) (*Property, error) { + + items, err := b.readProperty(name, parent, inputType.Elem(), indent+1) + if err != nil { + return nil, err + } + + prop := &Property{ + Type: "array", + Items: items, + } + return prop, nil +} + +func (b *builder) readRego(def *Property, name string, parent reflect.Type, typ reflect.Type, raw interface{}, indent int) error { + + switch cast := raw.(type) { + case map[string]interface{}: + def.Type = "object" + for k, v := range cast { + child := &Property{ + Properties: map[string]Property{}, + } + if err := b.readRego(child, k, reflect.TypeOf(raw), reflect.TypeOf(v), v, indent+1); err != nil { + return err + } + def.Properties[k] = *child + } + case map[string]string: + def.Type = "object" + for k, v := range cast { + child := &Property{ + Properties: map[string]Property{}, + } + if err := b.readRego(child, k, reflect.TypeOf(raw), reflect.TypeOf(v), v, indent+1); err != nil { + return err + } + def.Properties[k] = *child + } + default: + prop, err := b.readProperty(name, parent, typ, indent) + if err != nil { + return err + } + *def = *prop + } + + return nil + +} diff --git a/pkg/rego/schemas/cloud.json b/pkg/rego/schemas/cloud.json new file mode 100644 index 000000000000..2ebadef6f353 --- /dev/null +++ b/pkg/rego/schemas/cloud.json @@ -0,0 +1,6830 @@ +{ + "type": "object", + "properties": { + "aws": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.AWS" + }, + "azure": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.Azure" + }, + "cloudstack": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.cloudstack.CloudStack" + }, + "digitalocean": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.DigitalOcean" + }, + "github": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.github.GitHub" + }, + "google": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.Google" + }, + "kubernetes": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Kubernetes" + }, + "nifcloud": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.Nifcloud" + }, + "openstack": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.OpenStack" + }, + "oracle": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.oracle.Oracle" + } + }, + "definitions": { + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.AWS": { + "type": "object", + "properties": { + "accessanalyzer": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.accessanalyzer.AccessAnalyzer" + }, + "apigateway": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.APIGateway" + }, + "athena": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.Athena" + }, + "cloudfront": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.Cloudfront" + }, + "cloudtrail": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudtrail.CloudTrail" + }, + "cloudwatch": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.CloudWatch" + }, + "codebuild": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.codebuild.CodeBuild" + }, + "config": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.config.Config" + }, + "documentdb": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.documentdb.DocumentDB" + }, + "dynamodb": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.DynamoDB" + }, + "ec2": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.EC2" + }, + "ecr": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecr.ECR" + }, + "ecs": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.ECS" + }, + "efs": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.efs.EFS" + }, + "eks": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.eks.EKS" + }, + "elasticache": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticache.ElastiCache" + }, + "elasticsearch": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.Elasticsearch" + }, + "elb": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elb.ELB" + }, + "emr": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.emr.EMR" + }, + "iam": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.IAM" + }, + "kinesis": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kinesis.Kinesis" + }, + "kms": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kms.KMS" + }, + "lambda": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.lambda.Lambda" + }, + "meta": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.Meta" + }, + "mq": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.mq.MQ" + }, + "msk": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.MSK" + }, + "neptune": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.neptune.Neptune" + }, + "rds": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.RDS" + }, + "redshift": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.Redshift" + }, + "s3": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.S3" + }, + "sam": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.SAM" + }, + "sns": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sns.SNS" + }, + "sqs": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sqs.SQS" + }, + "ssm": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ssm.SSM" + }, + "workspaces": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.WorkSpaces" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.AssumeRole": { + "type": "object", + "properties": { + "duration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "externalid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "policy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "policyarns": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "rolearn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sessionname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sourceidentity": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "tags": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.MapValue" + }, + "transitivetagkeys": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.AssumeRoleWithWebIdentity": { + "type": "object", + "properties": { + "duration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "policy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "policyarns": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "rolearn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sessionname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "webidentitytoken": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "webidentitytokenfile": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.DefaultTags": { + "type": "object", + "properties": { + "tags": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.MapValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.IgnoreTags": { + "type": "object", + "properties": { + "keyprefixes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "keys": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.Meta": { + "type": "object", + "properties": { + "tfproviders": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.TerraformProvider" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.TerraformProvider": { + "type": "object", + "properties": { + "accesskey": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "alias": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "allowedaccountsids": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "assumerole": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.AssumeRole" + }, + "assumerolewithwebidentity": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.AssumeRoleWithWebIdentity" + }, + "customcabundle": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "defaulttags": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.DefaultTags" + }, + "ec2metadataserviceendpoint": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "ec2metadataserviceendpointmode": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "endpoints": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.MapValue" + }, + "forbiddenaccountids": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "httpproxy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "ignoretags": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.IgnoreTags" + }, + "insecure": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "maxretries": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "profile": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "region": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "retrymode": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "s3useast1regionalendpoint": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "s3usepathstyle": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "secretkey": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sharedconfigfiles": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "sharedcredentialsfiles": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "skipcredentialsvalidation": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "skipmetadataapicheck": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "skipregionvalidation": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "skiprequestingaccountid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "stsregion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "token": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "usedualstackendpoint": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "usefipsendpoint": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "version": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.accessanalyzer.AccessAnalyzer": { + "type": "object", + "properties": { + "analyzers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.accessanalyzer.Analyzer" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.accessanalyzer.Analyzer": { + "type": "object", + "properties": { + "active": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "arn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "findings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.accessanalyzer.Findings" + } + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.accessanalyzer.Findings": { + "type": "object" + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.APIGateway": { + "type": "object", + "properties": { + "v1": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.APIGateway" + }, + "v2": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.APIGateway" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.API": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "resources": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.Resource" + } + }, + "stages": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.Stage" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.APIGateway": { + "type": "object", + "properties": { + "apis": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.API" + } + }, + "domainnames": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.DomainName" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.AccessLogging": { + "type": "object", + "properties": { + "cloudwatchloggrouparn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.DomainName": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "securitypolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.Method": { + "type": "object", + "properties": { + "apikeyrequired": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "authorizationtype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "httpmethod": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.RESTMethodSettings": { + "type": "object", + "properties": { + "cachedataencrypted": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "cacheenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "method": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.Resource": { + "type": "object", + "properties": { + "methods": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.Method" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.Stage": { + "type": "object", + "properties": { + "accesslogging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.AccessLogging" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "restmethodsettings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v1.RESTMethodSettings" + } + }, + "xraytracingenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.API": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "protocoltype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "stages": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.Stage" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.APIGateway": { + "type": "object", + "properties": { + "apis": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.API" + } + }, + "domainnames": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.DomainName" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.AccessLogging": { + "type": "object", + "properties": { + "cloudwatchloggrouparn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.DomainName": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "securitypolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.Stage": { + "type": "object", + "properties": { + "accesslogging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.apigateway.v2.AccessLogging" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.Athena": { + "type": "object", + "properties": { + "databases": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.Database" + } + }, + "workgroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.Workgroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.Database": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.EncryptionConfiguration" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.EncryptionConfiguration": { + "type": "object", + "properties": { + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.Workgroup": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.athena.EncryptionConfiguration" + }, + "enforceconfiguration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.CacheBehaviour": { + "type": "object", + "properties": { + "viewerprotocolpolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.Cloudfront": { + "type": "object", + "properties": { + "distributions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.Distribution" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.Distribution": { + "type": "object", + "properties": { + "defaultcachebehaviour": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.CacheBehaviour" + }, + "logging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.Logging" + }, + "orderercachebehaviours": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.CacheBehaviour" + } + }, + "viewercertificate": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.ViewerCertificate" + }, + "wafid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.Logging": { + "type": "object", + "properties": { + "bucket": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudfront.ViewerCertificate": { + "type": "object", + "properties": { + "cloudfrontdefaultcertificate": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "minimumprotocolversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sslsupportmethod": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudtrail.CloudTrail": { + "type": "object", + "properties": { + "trails": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudtrail.Trail" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudtrail.DataResource": { + "type": "object", + "properties": { + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "values": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudtrail.EventSelector": { + "type": "object", + "properties": { + "dataresources": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudtrail.DataResource" + } + }, + "readwritetype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudtrail.Trail": { + "type": "object", + "properties": { + "bucketname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "cloudwatchlogsloggrouparn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "enablelogfilevalidation": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "eventselectors": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudtrail.EventSelector" + } + }, + "islogging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "ismultiregion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.Alarm": { + "type": "object", + "properties": { + "alarmname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "dimensions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.AlarmDimension" + } + }, + "metricname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "metrics": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.MetricDataQuery" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.AlarmDimension": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "value": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.CloudWatch": { + "type": "object", + "properties": { + "alarms": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.Alarm" + } + }, + "loggroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.LogGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.LogGroup": { + "type": "object", + "properties": { + "arn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "metricfilters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.MetricFilter" + } + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "retentionindays": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.MetricDataQuery": { + "type": "object", + "properties": { + "expression": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "id": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.cloudwatch.MetricFilter": { + "type": "object", + "properties": { + "filtername": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "filterpattern": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.codebuild.ArtifactSettings": { + "type": "object", + "properties": { + "encryptionenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.codebuild.CodeBuild": { + "type": "object", + "properties": { + "projects": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.codebuild.Project" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.codebuild.Project": { + "type": "object", + "properties": { + "artifactsettings": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.codebuild.ArtifactSettings" + }, + "secondaryartifactsettings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.codebuild.ArtifactSettings" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.config.Config": { + "type": "object", + "properties": { + "configurationaggregrator": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.config.ConfigurationAggregrator" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.config.ConfigurationAggregrator": { + "type": "object", + "properties": { + "sourceallregions": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.documentdb.Cluster": { + "type": "object", + "properties": { + "backupretentionperiod": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "enabledlogexports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "identifier": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.documentdb.Instance" + } + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "storageencrypted": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.documentdb.DocumentDB": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.documentdb.Cluster" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.documentdb.Instance": { + "type": "object", + "properties": { + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.DAXCluster": { + "type": "object", + "properties": { + "pointintimerecovery": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "serversideencryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.ServerSideEncryption" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.DynamoDB": { + "type": "object", + "properties": { + "daxclusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.DAXCluster" + } + }, + "tables": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.Table" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.ServerSideEncryption": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.Table": { + "type": "object", + "properties": { + "pointintimerecovery": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "serversideencryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.dynamodb.ServerSideEncryption" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.BlockDevice": { + "type": "object", + "properties": { + "encrypted": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.EC2": { + "type": "object", + "properties": { + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Instance" + } + }, + "launchconfigurations": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.LaunchConfiguration" + } + }, + "launchtemplates": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.LaunchTemplate" + } + }, + "networkacls": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.NetworkACL" + } + }, + "securitygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.SecurityGroup" + } + }, + "subnets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Subnet" + } + }, + "volumes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Volume" + } + }, + "vpcs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.VPC" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Encryption": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Instance": { + "type": "object", + "properties": { + "ebsblockdevices": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.BlockDevice" + } + }, + "metadataoptions": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.MetadataOptions" + }, + "rootblockdevice": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.BlockDevice" + }, + "securitygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.SecurityGroup" + } + }, + "userdata": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.LaunchConfiguration": { + "type": "object", + "properties": { + "associatepublicip": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "ebsblockdevices": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.BlockDevice" + } + }, + "metadataoptions": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.MetadataOptions" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "rootblockdevice": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.BlockDevice" + }, + "userdata": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.LaunchTemplate": { + "type": "object", + "properties": { + "instance": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Instance" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.MetadataOptions": { + "type": "object", + "properties": { + "httpendpoint": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "httptokens": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.NetworkACL": { + "type": "object", + "properties": { + "isdefaultrule": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.NetworkACLRule" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.NetworkACLRule": { + "type": "object", + "properties": { + "action": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "cidrs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "protocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.SecurityGroup": { + "type": "object", + "properties": { + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "egressrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.SecurityGroupRule" + } + }, + "ingressrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.SecurityGroupRule" + } + }, + "isdefault": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "vpcid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.SecurityGroupRule": { + "type": "object", + "properties": { + "cidrs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Subnet": { + "type": "object", + "properties": { + "mappubliciponlaunch": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.VPC": { + "type": "object", + "properties": { + "flowlogsenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "id": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "isdefault": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "securitygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.SecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Volume": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ec2.Encryption" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecr.ECR": { + "type": "object", + "properties": { + "repositories": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecr.Repository" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecr.Encryption": { + "type": "object", + "properties": { + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecr.ImageScanning": { + "type": "object", + "properties": { + "scanonpush": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecr.Repository": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecr.Encryption" + }, + "imagescanning": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecr.ImageScanning" + }, + "imagetagsimmutable": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "policies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.Cluster": { + "type": "object", + "properties": { + "settings": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.ClusterSettings" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.ClusterSettings": { + "type": "object", + "properties": { + "containerinsightsenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.ContainerDefinition": { + "type": "object", + "properties": { + "cpu": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "environment": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.EnvVar" + } + }, + "essential": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "image": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "memory": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "portmappings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.PortMapping" + } + }, + "privileged": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.ECS": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.Cluster" + } + }, + "taskdefinitions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.TaskDefinition" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.EFSVolumeConfiguration": { + "type": "object", + "properties": { + "transitencryptionenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.EnvVar": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.PortMapping": { + "type": "object", + "properties": { + "containerport": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "hostport": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.TaskDefinition": { + "type": "object", + "properties": { + "containerdefinitions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.ContainerDefinition" + } + }, + "volumes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.Volume" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.Volume": { + "type": "object", + "properties": { + "efsvolumeconfiguration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ecs.EFSVolumeConfiguration" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.efs.EFS": { + "type": "object", + "properties": { + "filesystems": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.efs.FileSystem" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.efs.FileSystem": { + "type": "object", + "properties": { + "encrypted": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.eks.Cluster": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.eks.Encryption" + }, + "logging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.eks.Logging" + }, + "publicaccesscidrs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "publicaccessenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.eks.EKS": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.eks.Cluster" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.eks.Encryption": { + "type": "object", + "properties": { + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "secrets": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.eks.Logging": { + "type": "object", + "properties": { + "api": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "audit": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "authenticator": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "controllermanager": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "scheduler": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticache.Cluster": { + "type": "object", + "properties": { + "engine": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "nodetype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "snapshotretentionlimit": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticache.ElastiCache": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticache.Cluster" + } + }, + "replicationgroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticache.ReplicationGroup" + } + }, + "securitygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticache.SecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticache.ReplicationGroup": { + "type": "object", + "properties": { + "atrestencryptionenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "transitencryptionenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticache.SecurityGroup": { + "type": "object", + "properties": { + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.AtRestEncryption": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.Domain": { + "type": "object", + "properties": { + "accesspolicies": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "atrestencryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.AtRestEncryption" + }, + "dedicatedmasterenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "domainname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "endpoint": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.Endpoint" + }, + "logpublishing": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.LogPublishing" + }, + "servicesoftwareoptions": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.ServiceSoftwareOptions" + }, + "transitencryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.TransitEncryption" + }, + "vpcid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.Elasticsearch": { + "type": "object", + "properties": { + "domains": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.Domain" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.Endpoint": { + "type": "object", + "properties": { + "enforcehttps": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "tlspolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.LogPublishing": { + "type": "object", + "properties": { + "auditenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "cloudwatchloggrouparn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.ServiceSoftwareOptions": { + "type": "object", + "properties": { + "currentversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "newversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "updateavailable": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "updatestatus": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elasticsearch.TransitEncryption": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elb.Action": { + "type": "object", + "properties": { + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elb.ELB": { + "type": "object", + "properties": { + "loadbalancers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elb.LoadBalancer" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elb.Listener": { + "type": "object", + "properties": { + "defaultactions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elb.Action" + } + }, + "protocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "tlspolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elb.LoadBalancer": { + "type": "object", + "properties": { + "dropinvalidheaderfields": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "internal": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "listeners": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.elb.Listener" + } + }, + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.emr.Cluster": { + "type": "object", + "properties": { + "settings": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.emr.ClusterSettings" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.emr.ClusterSettings": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "releaselabel": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "servicerole": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.emr.EMR": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.emr.Cluster" + } + }, + "securityconfiguration": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.emr.SecurityConfiguration" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.emr.SecurityConfiguration": { + "type": "object", + "properties": { + "configuration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.AccessKey": { + "type": "object", + "properties": { + "accesskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "active": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "creationdate": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + }, + "lastaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Document": { + "type": "object", + "properties": { + "endline": { + "type": "integer" + }, + "explicit": { + "type": "boolean" + }, + "filepath": { + "type": "string" + }, + "fskey": { + "type": "string" + }, + "managed": { + "type": "boolean" + }, + "startline": { + "type": "integer" + }, + "value": { + "type": "string" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Group": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "policies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + }, + "users": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.User" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.IAM": { + "type": "object", + "properties": { + "groups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Group" + } + }, + "passwordpolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.PasswordPolicy" + }, + "policies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + }, + "roles": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Role" + } + }, + "servercertificates": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.ServerCertificate" + } + }, + "users": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.User" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.MFADevice": { + "type": "object", + "properties": { + "isvirtual": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.PasswordPolicy": { + "type": "object", + "properties": { + "maxagedays": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "minimumlength": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "requirelowercase": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "requirenumbers": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "requiresymbols": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "requireuppercase": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "reusepreventioncount": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy": { + "type": "object", + "properties": { + "builtin": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "document": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Document" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Role": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "policies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.ServerCertificate": { + "type": "object", + "properties": { + "expiration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.User": { + "type": "object", + "properties": { + "accesskeys": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.AccessKey" + } + }, + "groups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Group" + } + }, + "lastaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + }, + "mfadevices": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.MFADevice" + } + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "policies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kinesis.Encryption": { + "type": "object", + "properties": { + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kinesis.Kinesis": { + "type": "object", + "properties": { + "streams": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kinesis.Stream" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kinesis.Stream": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kinesis.Encryption" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kms.KMS": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kms.Key" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.kms.Key": { + "type": "object", + "properties": { + "rotationenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "usage": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.lambda.Function": { + "type": "object", + "properties": { + "permissions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.lambda.Permission" + } + }, + "tracing": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.lambda.Tracing" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.lambda.Lambda": { + "type": "object", + "properties": { + "functions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.lambda.Function" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.lambda.Permission": { + "type": "object", + "properties": { + "principal": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sourcearn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.lambda.Tracing": { + "type": "object", + "properties": { + "mode": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.mq.Broker": { + "type": "object", + "properties": { + "logging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.mq.Logging" + }, + "publicaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.mq.Logging": { + "type": "object", + "properties": { + "audit": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "general": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.mq.MQ": { + "type": "object", + "properties": { + "brokers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.mq.Broker" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.BrokerLogging": { + "type": "object", + "properties": { + "cloudwatch": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.CloudwatchLogging" + }, + "firehose": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.FirehoseLogging" + }, + "s3": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.S3Logging" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.CloudwatchLogging": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.Cluster": { + "type": "object", + "properties": { + "encryptionatrest": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.EncryptionAtRest" + }, + "encryptionintransit": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.EncryptionInTransit" + }, + "logging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.Logging" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.EncryptionAtRest": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyarn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.EncryptionInTransit": { + "type": "object", + "properties": { + "clientbroker": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.FirehoseLogging": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.Logging": { + "type": "object", + "properties": { + "broker": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.BrokerLogging" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.MSK": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.Cluster" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.msk.S3Logging": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.neptune.Cluster": { + "type": "object", + "properties": { + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "logging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.neptune.Logging" + }, + "storageencrypted": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.neptune.Logging": { + "type": "object", + "properties": { + "audit": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.neptune.Neptune": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.neptune.Cluster" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Classic": { + "type": "object", + "properties": { + "dbsecuritygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.DBSecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Cluster": { + "type": "object", + "properties": { + "availabilityzones": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "backupretentionperioddays": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "deletionprotection": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Encryption" + }, + "engine": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.ClusterInstance" + } + }, + "latestrestorabletime": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + }, + "performanceinsights": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.PerformanceInsights" + }, + "publicaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "replicationsourcearn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "skipfinalsnapshot": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.ClusterInstance": { + "type": "object", + "properties": { + "clusteridentifier": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "instance": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Instance" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.DBParameterGroupsList": { + "type": "object", + "properties": { + "dbparametergroupname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.DBSecurityGroup": { + "type": "object" + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.DBSnapshotAttributes": { + "type": "object", + "properties": { + "attributevalues": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Encryption": { + "type": "object", + "properties": { + "encryptstorage": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Instance": { + "type": "object", + "properties": { + "autominorversionupgrade": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "backupretentionperioddays": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "dbinstancearn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "dbinstanceidentifier": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "dbparametergroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.DBParameterGroupsList" + } + }, + "deletionprotection": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "enabledcloudwatchlogsexports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Encryption" + }, + "engine": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "engineversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "iamauthenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "latestrestorabletime": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + }, + "multiaz": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "performanceinsights": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.PerformanceInsights" + }, + "publicaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "publiclyaccessible": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "readreplicadbinstanceidentifiers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "replicationsourcearn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "storageencrypted": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "taglist": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.TagList" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.ParameterGroups": { + "type": "object", + "properties": { + "dbparametergroupfamily": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "dbparametergroupname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "parameters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Parameters" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Parameters": { + "type": "object", + "properties": { + "parametername": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "parametervalue": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.PerformanceInsights": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.RDS": { + "type": "object", + "properties": { + "classic": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Classic" + }, + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Cluster" + } + }, + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Instance" + } + }, + "parametergroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.ParameterGroups" + } + }, + "snapshots": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Snapshots" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.Snapshots": { + "type": "object", + "properties": { + "dbsnapshotarn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "dbsnapshotidentifier": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "encrypted": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "snapshotattributes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.DBSnapshotAttributes" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.rds.TagList": { + "type": "object" + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.Cluster": { + "type": "object", + "properties": { + "allowversionupgrade": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "automatedsnapshotretentionperiod": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "clusteridentifier": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.Encryption" + }, + "endpoint": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.EndPoint" + }, + "loggingenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "masterusername": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "nodetype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "numberofnodes": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "publiclyaccessible": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "subnetgroupname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "vpcid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.ClusterParameter": { + "type": "object", + "properties": { + "parametername": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "parametervalue": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.Encryption": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.EndPoint": { + "type": "object", + "properties": { + "port": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.Redshift": { + "type": "object", + "properties": { + "clusterparameters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.ClusterParameter" + } + }, + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.Cluster" + } + }, + "reservednodes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.ReservedNode" + } + }, + "securitygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.SecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.ReservedNode": { + "type": "object", + "properties": { + "nodetype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.redshift.SecurityGroup": { + "type": "object", + "properties": { + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Bucket": { + "type": "object", + "properties": { + "accelerateconfigurationstatus": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "acl": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "bucketlocation": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "bucketpolicies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + }, + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Encryption" + }, + "lifecycleconfiguration": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Rules" + } + }, + "logging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Logging" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "objects": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Contents" + } + }, + "publicaccessblock": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.PublicAccessBlock" + }, + "versioning": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Versioning" + }, + "website": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Website" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Contents": { + "type": "object" + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Encryption": { + "type": "object", + "properties": { + "algorithm": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Logging": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "targetbucket": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.PublicAccessBlock": { + "type": "object", + "properties": { + "blockpublicacls": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "blockpublicpolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "ignorepublicacls": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "restrictpublicbuckets": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Rules": { + "type": "object", + "properties": { + "status": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.S3": { + "type": "object", + "properties": { + "buckets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Bucket" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Versioning": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "mfadelete": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.s3.Website": { + "type": "object" + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.API": { + "type": "object", + "properties": { + "accesslogging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.AccessLogging" + }, + "domainconfiguration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.DomainConfiguration" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "restmethodsettings": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.RESTMethodSettings" + }, + "tracingenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.AccessLogging": { + "type": "object", + "properties": { + "cloudwatchloggrouparn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.Application": { + "type": "object", + "properties": { + "location": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.Location" + }, + "locationpath": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.DomainConfiguration": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "securitypolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.Function": { + "type": "object", + "properties": { + "functionname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "managedpolicies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "policies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + }, + "tracing": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.HttpAPI": { + "type": "object", + "properties": { + "accesslogging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.AccessLogging" + }, + "defaultroutesettings": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.RouteSettings" + }, + "domainconfiguration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.DomainConfiguration" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.Location": { + "type": "object", + "properties": { + "applicationid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "semanticversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.LoggingConfiguration": { + "type": "object", + "properties": { + "loggingenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.RESTMethodSettings": { + "type": "object", + "properties": { + "cachedataencrypted": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "datatraceenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "loggingenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "metricsenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.RouteSettings": { + "type": "object", + "properties": { + "datatraceenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "detailedmetricsenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "loggingenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.SAM": { + "type": "object", + "properties": { + "apis": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.API" + } + }, + "applications": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.Application" + } + }, + "functions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.Function" + } + }, + "httpapis": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.HttpAPI" + } + }, + "simpletables": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.SimpleTable" + } + }, + "statemachines": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.StateMachine" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.SSESpecification": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "kmsmasterkeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.SimpleTable": { + "type": "object", + "properties": { + "ssespecification": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.SSESpecification" + }, + "tablename": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.StateMachine": { + "type": "object", + "properties": { + "loggingconfiguration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.LoggingConfiguration" + }, + "managedpolicies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "policies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + }, + "tracing": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.TracingConfiguration" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sam.TracingConfiguration": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sns.Encryption": { + "type": "object", + "properties": { + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sns.SNS": { + "type": "object", + "properties": { + "topics": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sns.Topic" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sns.Topic": { + "type": "object", + "properties": { + "arn": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sns.Encryption" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sqs.Encryption": { + "type": "object", + "properties": { + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "managedencryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sqs.Queue": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sqs.Encryption" + }, + "policies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.iam.Policy" + } + }, + "queueurl": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sqs.SQS": { + "type": "object", + "properties": { + "queues": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.sqs.Queue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ssm.SSM": { + "type": "object", + "properties": { + "secrets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ssm.Secret" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.ssm.Secret": { + "type": "object", + "properties": { + "kmskeyid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.Encryption": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.Volume": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.Encryption" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.WorkSpace": { + "type": "object", + "properties": { + "rootvolume": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.Volume" + }, + "uservolume": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.Volume" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.WorkSpaces": { + "type": "object", + "properties": { + "workspaces": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.aws.workspaces.WorkSpace" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.Azure": { + "type": "object", + "properties": { + "appservice": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.AppService" + }, + "authorization": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.authorization.Authorization" + }, + "compute": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.Compute" + }, + "container": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.Container" + }, + "database": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.Database" + }, + "datafactory": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.datafactory.DataFactory" + }, + "datalake": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.datalake.DataLake" + }, + "keyvault": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.KeyVault" + }, + "monitor": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.monitor.Monitor" + }, + "network": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.Network" + }, + "securitycenter": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.securitycenter.SecurityCenter" + }, + "storage": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.Storage" + }, + "synapse": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.synapse.Synapse" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.AppService": { + "type": "object", + "properties": { + "functionapps": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.FunctionApp" + } + }, + "services": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.Service" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.FunctionApp": { + "type": "object", + "properties": { + "httpsonly": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.Service": { + "type": "object", + "properties": { + "authentication": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.Service.Authentication" + }, + "enableclientcert": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "identity": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.Service.Identity" + }, + "site": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.Service.Site" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.Service.Authentication": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.Service.Identity": { + "type": "object", + "properties": { + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.appservice.Service.Site": { + "type": "object", + "properties": { + "enablehttp2": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "minimumtlsversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.authorization.Authorization": { + "type": "object", + "properties": { + "roledefinitions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.authorization.RoleDefinition" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.authorization.Permission": { + "type": "object", + "properties": { + "actions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.authorization.RoleDefinition": { + "type": "object", + "properties": { + "assignablescopes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "permissions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.authorization.Permission" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.Compute": { + "type": "object", + "properties": { + "linuxvirtualmachines": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.LinuxVirtualMachine" + } + }, + "manageddisks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.ManagedDisk" + } + }, + "windowsvirtualmachines": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.WindowsVirtualMachine" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.Encryption": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.LinuxVirtualMachine": { + "type": "object", + "properties": { + "osprofilelinuxconfig": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.OSProfileLinuxConfig" + }, + "virtualmachine": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.VirtualMachine" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.ManagedDisk": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.Encryption" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.OSProfileLinuxConfig": { + "type": "object", + "properties": { + "disablepasswordauthentication": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.VirtualMachine": { + "type": "object", + "properties": { + "customdata": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.WindowsVirtualMachine": { + "type": "object", + "properties": { + "virtualmachine": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.compute.VirtualMachine" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.AddonProfile": { + "type": "object", + "properties": { + "omsagent": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.OMSAgent" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.Container": { + "type": "object", + "properties": { + "kubernetesclusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.KubernetesCluster" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.KubernetesCluster": { + "type": "object", + "properties": { + "addonprofile": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.AddonProfile" + }, + "apiserverauthorizedipranges": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "enableprivatecluster": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "networkprofile": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.NetworkProfile" + }, + "rolebasedaccesscontrol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.RoleBasedAccessControl" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.NetworkProfile": { + "type": "object", + "properties": { + "networkpolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.OMSAgent": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.container.RoleBasedAccessControl": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.Database": { + "type": "object", + "properties": { + "mariadbservers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.MariaDBServer" + } + }, + "mssqlservers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.MSSQLServer" + } + }, + "mysqlservers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.MySQLServer" + } + }, + "postgresqlservers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.PostgreSQLServer" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.ExtendedAuditingPolicy": { + "type": "object", + "properties": { + "retentionindays": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.FirewallRule": { + "type": "object", + "properties": { + "endip": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "startip": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.MSSQLServer": { + "type": "object", + "properties": { + "extendedauditingpolicies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.ExtendedAuditingPolicy" + } + }, + "securityalertpolicies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.SecurityAlertPolicy" + } + }, + "server": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.Server" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.MariaDBServer": { + "type": "object", + "properties": { + "server": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.Server" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.MySQLServer": { + "type": "object", + "properties": { + "server": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.Server" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.PostgreSQLServer": { + "type": "object", + "properties": { + "config": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.PostgresSQLConfig" + }, + "server": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.Server" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.PostgresSQLConfig": { + "type": "object", + "properties": { + "connectionthrottling": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "logcheckpoints": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "logconnections": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.SecurityAlertPolicy": { + "type": "object", + "properties": { + "disabledalerts": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "emailaccountadmins": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "emailaddresses": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.Server": { + "type": "object", + "properties": { + "enablepublicnetworkaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "enablesslenforcement": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "firewallrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.database.FirewallRule" + } + }, + "minimumtlsversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.datafactory.DataFactory": { + "type": "object", + "properties": { + "datafactories": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.datafactory.Factory" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.datafactory.Factory": { + "type": "object", + "properties": { + "enablepublicnetwork": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.datalake.DataLake": { + "type": "object", + "properties": { + "stores": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.datalake.Store" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.datalake.Store": { + "type": "object", + "properties": { + "enableencryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.Key": { + "type": "object", + "properties": { + "expirydate": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.KeyVault": { + "type": "object", + "properties": { + "vaults": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.Vault" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.NetworkACLs": { + "type": "object", + "properties": { + "defaultaction": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.Secret": { + "type": "object", + "properties": { + "contenttype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "expirydate": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.Vault": { + "type": "object", + "properties": { + "enablepurgeprotection": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "keys": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.Key" + } + }, + "networkacls": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.NetworkACLs" + }, + "secrets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.keyvault.Secret" + } + }, + "softdeleteretentiondays": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.monitor.LogProfile": { + "type": "object", + "properties": { + "categories": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "locations": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "retentionpolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.monitor.RetentionPolicy" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.monitor.Monitor": { + "type": "object", + "properties": { + "logprofiles": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.monitor.LogProfile" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.monitor.RetentionPolicy": { + "type": "object", + "properties": { + "days": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.Network": { + "type": "object", + "properties": { + "networkwatcherflowlogs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.NetworkWatcherFlowLog" + } + }, + "securitygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.SecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.NetworkWatcherFlowLog": { + "type": "object", + "properties": { + "retentionpolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.RetentionPolicy" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.PortRange": { + "type": "object", + "properties": { + "end": { + "type": "integer" + }, + "start": { + "type": "integer" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.RetentionPolicy": { + "type": "object", + "properties": { + "days": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.SecurityGroup": { + "type": "object", + "properties": { + "rules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.SecurityGroupRule" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.SecurityGroupRule": { + "type": "object", + "properties": { + "allow": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "destinationaddresses": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "destinationports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.PortRange" + } + }, + "outbound": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "protocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sourceaddresses": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "sourceports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.network.PortRange" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.securitycenter.Contact": { + "type": "object", + "properties": { + "enablealertnotifications": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "phone": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.securitycenter.SecurityCenter": { + "type": "object", + "properties": { + "contacts": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.securitycenter.Contact" + } + }, + "subscriptions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.securitycenter.SubscriptionPricing" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.securitycenter.SubscriptionPricing": { + "type": "object", + "properties": { + "tier": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.Account": { + "type": "object", + "properties": { + "containers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.Container" + } + }, + "enforcehttps": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "minimumtlsversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "networkrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.NetworkRule" + } + }, + "queueproperties": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.QueueProperties" + }, + "queues": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.Queue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.Container": { + "type": "object", + "properties": { + "publicaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.NetworkRule": { + "type": "object", + "properties": { + "allowbydefault": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "bypass": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.Queue": { + "type": "object", + "properties": { + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.QueueProperties": { + "type": "object", + "properties": { + "enablelogging": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.Storage": { + "type": "object", + "properties": { + "accounts": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.storage.Account" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.synapse.Synapse": { + "type": "object", + "properties": { + "workspaces": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.synapse.Workspace" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.azure.synapse.Workspace": { + "type": "object", + "properties": { + "enablemanagedvirtualnetwork": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.cloudstack.CloudStack": { + "type": "object", + "properties": { + "compute": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.cloudstack.compute.Compute" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.cloudstack.compute.Compute": { + "type": "object", + "properties": { + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.cloudstack.compute.Instance" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.cloudstack.compute.Instance": { + "type": "object", + "properties": { + "userdata": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.DigitalOcean": { + "type": "object", + "properties": { + "compute": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.Compute" + }, + "spaces": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.spaces.Spaces" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.Compute": { + "type": "object", + "properties": { + "droplets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.Droplet" + } + }, + "firewalls": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.Firewall" + } + }, + "kubernetesclusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.KubernetesCluster" + } + }, + "loadbalancers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.LoadBalancer" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.Droplet": { + "type": "object", + "properties": { + "sshkeys": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.Firewall": { + "type": "object", + "properties": { + "inboundrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.InboundFirewallRule" + } + }, + "outboundrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.OutboundFirewallRule" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.ForwardingRule": { + "type": "object", + "properties": { + "entryprotocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.InboundFirewallRule": { + "type": "object", + "properties": { + "sourceaddresses": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.KubernetesCluster": { + "type": "object", + "properties": { + "autoupgrade": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "surgeupgrade": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.LoadBalancer": { + "type": "object", + "properties": { + "forwardingrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.ForwardingRule" + } + }, + "redirecthttptohttps": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.compute.OutboundFirewallRule": { + "type": "object", + "properties": { + "destinationaddresses": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.spaces.Bucket": { + "type": "object", + "properties": { + "acl": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "forcedestroy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "objects": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.spaces.Object" + } + }, + "versioning": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.spaces.Versioning" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.spaces.Object": { + "type": "object", + "properties": { + "acl": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.spaces.Spaces": { + "type": "object", + "properties": { + "buckets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.spaces.Bucket" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.digitalocean.spaces.Versioning": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.github.BranchProtection": { + "type": "object", + "properties": { + "requiresignedcommits": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.github.EnvironmentSecret": { + "type": "object", + "properties": { + "encryptedvalue": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "environment": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "plaintextvalue": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "repository": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "secretname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.github.GitHub": { + "type": "object", + "properties": { + "branchprotections": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.github.BranchProtection" + } + }, + "environmentsecrets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.github.EnvironmentSecret" + } + }, + "repositories": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.github.Repository" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.github.Repository": { + "type": "object", + "properties": { + "archived": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "public": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "vulnerabilityalerts": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.Google": { + "type": "object", + "properties": { + "bigquery": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.bigquery.BigQuery" + }, + "compute": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Compute" + }, + "dns": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.dns.DNS" + }, + "gke": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.GKE" + }, + "iam": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.IAM" + }, + "kms": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.kms.KMS" + }, + "sql": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.SQL" + }, + "storage": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.storage.Storage" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.bigquery.AccessGrant": { + "type": "object", + "properties": { + "domain": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "role": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "specialgroup": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.bigquery.BigQuery": { + "type": "object", + "properties": { + "datasets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.bigquery.Dataset" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.bigquery.Dataset": { + "type": "object", + "properties": { + "accessgrants": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.bigquery.AccessGrant" + } + }, + "id": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Compute": { + "type": "object", + "properties": { + "disks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Disk" + } + }, + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Instance" + } + }, + "networks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Network" + } + }, + "projectmetadata": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.ProjectMetadata" + }, + "sslpolicies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.SSLPolicy" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Disk": { + "type": "object", + "properties": { + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.DiskEncryption" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.DiskEncryption": { + "type": "object", + "properties": { + "kmskeylink": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "rawkey": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BytesValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.EgressRule": { + "type": "object", + "properties": { + "destinationranges": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "firewallrule": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.FirewallRule" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Firewall": { + "type": "object", + "properties": { + "egressrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.EgressRule" + } + }, + "ingressrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.IngressRule" + } + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sourcetags": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "targettags": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.FirewallRule": { + "type": "object", + "properties": { + "enforced": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "isallow": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "ports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + }, + "protocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.IngressRule": { + "type": "object", + "properties": { + "firewallrule": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.FirewallRule" + }, + "sourceranges": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Instance": { + "type": "object", + "properties": { + "attacheddisks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Disk" + } + }, + "bootdisks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Disk" + } + }, + "canipforward": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "enableprojectsshkeyblocking": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "enableserialport": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "networkinterfaces": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.NetworkInterface" + } + }, + "osloginenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "serviceaccount": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.ServiceAccount" + }, + "shieldedvm": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.ShieldedVMConfig" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Network": { + "type": "object", + "properties": { + "firewall": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Firewall" + }, + "subnetworks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.SubNetwork" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.NetworkInterface": { + "type": "object", + "properties": { + "haspublicip": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "natip": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "network": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.Network" + }, + "subnetwork": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.SubNetwork" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.ProjectMetadata": { + "type": "object", + "properties": { + "enableoslogin": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.SSLPolicy": { + "type": "object", + "properties": { + "minimumtlsversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "profile": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.ServiceAccount": { + "type": "object", + "properties": { + "email": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "isdefault": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "scopes": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.ShieldedVMConfig": { + "type": "object", + "properties": { + "integritymonitoringenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "securebootenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "vtpmenabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.compute.SubNetwork": { + "type": "object", + "properties": { + "enableflowlogs": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "purpose": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.dns.DNS": { + "type": "object", + "properties": { + "managedzones": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.dns.ManagedZone" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.dns.DNSSec": { + "type": "object", + "properties": { + "defaultkeyspecs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.dns.KeySpecs" + } + }, + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.dns.KeySpecs": { + "type": "object", + "properties": { + "algorithm": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "keytype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.dns.ManagedZone": { + "type": "object", + "properties": { + "dnssec": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.dns.DNSSec" + }, + "visibility": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.ClientCertificate": { + "type": "object", + "properties": { + "issuecertificate": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.Cluster": { + "type": "object", + "properties": { + "datapathprovider": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "enableautpilot": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "enablelegacyabac": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "enableshieldednodes": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "ipallocationpolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.IPAllocationPolicy" + }, + "loggingservice": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "masterauth": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.MasterAuth" + }, + "masterauthorizednetworks": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.MasterAuthorizedNetworks" + }, + "monitoringservice": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "networkpolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.NetworkPolicy" + }, + "nodeconfig": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.NodeConfig" + }, + "nodepools": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.NodePool" + } + }, + "privatecluster": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.PrivateCluster" + }, + "removedefaultnodepool": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "resourcelabels": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.MapValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.GKE": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.Cluster" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.IPAllocationPolicy": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.Management": { + "type": "object", + "properties": { + "enableautorepair": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "enableautoupgrade": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.MasterAuth": { + "type": "object", + "properties": { + "clientcertificate": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.ClientCertificate" + }, + "password": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "username": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.MasterAuthorizedNetworks": { + "type": "object", + "properties": { + "cidrs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.NetworkPolicy": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.NodeConfig": { + "type": "object", + "properties": { + "enablelegacyendpoints": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "imagetype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "serviceaccount": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "workloadmetadataconfig": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.WorkloadMetadataConfig" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.NodePool": { + "type": "object", + "properties": { + "management": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.Management" + }, + "nodeconfig": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.NodeConfig" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.PrivateCluster": { + "type": "object", + "properties": { + "enableprivatenodes": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.gke.WorkloadMetadataConfig": { + "type": "object", + "properties": { + "nodemetadata": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Binding": { + "type": "object", + "properties": { + "includesdefaultserviceaccount": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "members": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "role": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Folder": { + "type": "object", + "properties": { + "bindings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Binding" + } + }, + "folders": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Folder" + } + }, + "members": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Member" + } + }, + "projects": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Project" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.IAM": { + "type": "object", + "properties": { + "organizations": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Organization" + } + }, + "workloadidentitypoolproviders": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.WorkloadIdentityPoolProvider" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Member": { + "type": "object", + "properties": { + "defaultserviceaccount": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "member": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "role": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Organization": { + "type": "object", + "properties": { + "bindings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Binding" + } + }, + "folders": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Folder" + } + }, + "members": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Member" + } + }, + "projects": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Project" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Project": { + "type": "object", + "properties": { + "autocreatenetwork": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "bindings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Binding" + } + }, + "members": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Member" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.WorkloadIdentityPoolProvider": { + "type": "object", + "properties": { + "attributecondition": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "workloadidentitypoolid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "workloadidentitypoolproviderid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.kms.KMS": { + "type": "object", + "properties": { + "keyrings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.kms.KeyRing" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.kms.Key": { + "type": "object", + "properties": { + "rotationperiodseconds": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.kms.KeyRing": { + "type": "object", + "properties": { + "keys": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.kms.Key" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.Backups": { + "type": "object", + "properties": { + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.DatabaseInstance": { + "type": "object", + "properties": { + "databaseversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "isreplica": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "settings": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.Settings" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.Flags": { + "type": "object", + "properties": { + "containeddatabaseauthentication": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "crossdbownershipchaining": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "localinfile": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "logcheckpoints": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "logconnections": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "logdisconnections": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "loglockwaits": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "logmindurationstatement": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "logminmessages": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "logtempfilesize": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.IPConfiguration": { + "type": "object", + "properties": { + "authorizednetworks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.IPConfiguration.AuthorizedNetworks" + } + }, + "enableipv4": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "requiretls": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.IPConfiguration.AuthorizedNetworks": { + "type": "object", + "properties": { + "cidr": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.SQL": { + "type": "object", + "properties": { + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.DatabaseInstance" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.Settings": { + "type": "object", + "properties": { + "backups": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.Backups" + }, + "flags": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.Flags" + }, + "ipconfiguration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.sql.IPConfiguration" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.storage.Bucket": { + "type": "object", + "properties": { + "bindings": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Binding" + } + }, + "enableuniformbucketlevelaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "encryption": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.storage.BucketEncryption" + }, + "location": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "members": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.iam.Member" + } + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.storage.BucketEncryption": { + "type": "object", + "properties": { + "defaultkmskeyname": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.storage.Storage": { + "type": "object", + "properties": { + "buckets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.google.storage.Bucket" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Egress": { + "type": "object", + "properties": { + "destinationcidrs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "ports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Port" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Ingress": { + "type": "object", + "properties": { + "ports": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Port" + } + }, + "sourcecidrs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Kubernetes": { + "type": "object", + "properties": { + "networkpolicies": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.NetworkPolicy" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.NetworkPolicy": { + "type": "object", + "properties": { + "spec": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.NetworkPolicySpec" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.NetworkPolicySpec": { + "type": "object", + "properties": { + "egress": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Egress" + }, + "ingress": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Ingress" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.kubernetes.Port": { + "type": "object", + "properties": { + "number": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "protocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.Nifcloud": { + "type": "object", + "properties": { + "computing": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.Computing" + }, + "dns": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.dns.DNS" + }, + "nas": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.nas.NAS" + }, + "network": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.Network" + }, + "rdb": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.rdb.RDB" + }, + "sslcertificate": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.sslcertificate.SSLCertificate" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.Computing": { + "type": "object", + "properties": { + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.Instance" + } + }, + "securitygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.SecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.Instance": { + "type": "object", + "properties": { + "networkinterfaces": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.NetworkInterface" + } + }, + "securitygroup": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.NetworkInterface": { + "type": "object", + "properties": { + "networkid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.SecurityGroup": { + "type": "object", + "properties": { + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "egressrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.SecurityGroupRule" + } + }, + "ingressrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.SecurityGroupRule" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.computing.SecurityGroupRule": { + "type": "object", + "properties": { + "cidr": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.dns.DNS": { + "type": "object", + "properties": { + "records": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.dns.Record" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.dns.Record": { + "type": "object", + "properties": { + "record": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "type": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.nas.NAS": { + "type": "object", + "properties": { + "nasinstances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.nas.NASInstance" + } + }, + "nassecuritygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.nas.NASSecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.nas.NASInstance": { + "type": "object", + "properties": { + "networkid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.nas.NASSecurityGroup": { + "type": "object", + "properties": { + "cidrs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.ElasticLoadBalancer": { + "type": "object", + "properties": { + "listeners": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.ElasticLoadBalancerListener" + } + }, + "networkinterfaces": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.NetworkInterface" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.ElasticLoadBalancerListener": { + "type": "object", + "properties": { + "protocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.LoadBalancer": { + "type": "object", + "properties": { + "listeners": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.LoadBalancerListener" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.LoadBalancerListener": { + "type": "object", + "properties": { + "protocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "tlspolicy": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.Network": { + "type": "object", + "properties": { + "elasticloadbalancers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.ElasticLoadBalancer" + } + }, + "loadbalancers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.LoadBalancer" + } + }, + "routers": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.Router" + } + }, + "vpngateways": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.VpnGateway" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.NetworkInterface": { + "type": "object", + "properties": { + "isvipnetwork": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "networkid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.Router": { + "type": "object", + "properties": { + "networkinterfaces": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.NetworkInterface" + } + }, + "securitygroup": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.network.VpnGateway": { + "type": "object", + "properties": { + "securitygroup": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.rdb.DBInstance": { + "type": "object", + "properties": { + "backupretentionperioddays": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "engine": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "engineversion": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "networkid": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "publicaccess": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.rdb.DBSecurityGroup": { + "type": "object", + "properties": { + "cidrs": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + }, + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.rdb.RDB": { + "type": "object", + "properties": { + "dbinstances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.rdb.DBInstance" + } + }, + "dbsecuritygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.rdb.DBSecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.sslcertificate.SSLCertificate": { + "type": "object", + "properties": { + "servercertificates": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.sslcertificate.ServerCertificate" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.nifcloud.sslcertificate.ServerCertificate": { + "type": "object", + "properties": { + "expiration": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.Compute": { + "type": "object", + "properties": { + "firewall": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.Firewall" + }, + "instances": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.Instance" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.Firewall": { + "type": "object", + "properties": { + "allowrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.FirewallRule" + } + }, + "denyrules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.FirewallRule" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.FirewallRule": { + "type": "object", + "properties": { + "destination": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "destinationport": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "enabled": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "source": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "sourceport": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.Instance": { + "type": "object", + "properties": { + "adminpassword": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.Networking": { + "type": "object", + "properties": { + "securitygroups": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.SecurityGroup" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.OpenStack": { + "type": "object", + "properties": { + "compute": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.Compute" + }, + "networking": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.Networking" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.SecurityGroup": { + "type": "object", + "properties": { + "description": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "name": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.SecurityGroupRule" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.openstack.SecurityGroupRule": { + "type": "object", + "properties": { + "cidr": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + }, + "ethertype": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "isingress": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue" + }, + "portmax": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "portmin": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue" + }, + "protocol": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.oracle.AddressReservation": { + "type": "object", + "properties": { + "pool": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.oracle.Compute": { + "type": "object", + "properties": { + "addressreservations": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.oracle.AddressReservation" + } + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.oracle.Oracle": { + "type": "object", + "properties": { + "compute": { + "type": "object", + "$ref": "#/definitions/github.aaakk.us.kg.aquasecurity.defsec.pkg.providers.oracle.Compute" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BoolValue": { + "type": "object", + "properties": { + "endline": { + "type": "integer" + }, + "explicit": { + "type": "boolean" + }, + "filepath": { + "type": "string" + }, + "fskey": { + "type": "string" + }, + "managed": { + "type": "boolean" + }, + "resource": { + "type": "string" + }, + "sourceprefix": { + "type": "string" + }, + "startline": { + "type": "integer" + }, + "value": { + "type": "boolean" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.types.BytesValue": { + "type": "object", + "properties": { + "endline": { + "type": "integer" + }, + "explicit": { + "type": "boolean" + }, + "filepath": { + "type": "string" + }, + "fskey": { + "type": "string" + }, + "managed": { + "type": "boolean" + }, + "resource": { + "type": "string" + }, + "sourceprefix": { + "type": "string" + }, + "startline": { + "type": "integer" + }, + "value": { + "type": "string" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.types.IntValue": { + "type": "object", + "properties": { + "endline": { + "type": "integer" + }, + "explicit": { + "type": "boolean" + }, + "filepath": { + "type": "string" + }, + "fskey": { + "type": "string" + }, + "managed": { + "type": "boolean" + }, + "resource": { + "type": "string" + }, + "sourceprefix": { + "type": "string" + }, + "startline": { + "type": "integer" + }, + "value": { + "type": "integer" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.types.MapValue": { + "type": "object", + "properties": { + "endline": { + "type": "integer" + }, + "explicit": { + "type": "boolean" + }, + "filepath": { + "type": "string" + }, + "fskey": { + "type": "string" + }, + "managed": { + "type": "boolean" + }, + "resource": { + "type": "string" + }, + "sourceprefix": { + "type": "string" + }, + "startline": { + "type": "integer" + }, + "value": { + "type": "object" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.types.StringValue": { + "type": "object", + "properties": { + "endline": { + "type": "integer" + }, + "explicit": { + "type": "boolean" + }, + "filepath": { + "type": "string" + }, + "fskey": { + "type": "string" + }, + "managed": { + "type": "boolean" + }, + "resource": { + "type": "string" + }, + "sourceprefix": { + "type": "string" + }, + "startline": { + "type": "integer" + }, + "value": { + "type": "string" + } + } + }, + "github.aaakk.us.kg.aquasecurity.defsec.pkg.types.TimeValue": { + "type": "object", + "properties": { + "endline": { + "type": "integer" + }, + "explicit": { + "type": "boolean" + }, + "filepath": { + "type": "string" + }, + "fskey": { + "type": "string" + }, + "managed": { + "type": "boolean" + }, + "resource": { + "type": "string" + }, + "sourceprefix": { + "type": "string" + }, + "startline": { + "type": "integer" + }, + "value": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/pkg/rego/schemas/dockerfile.json b/pkg/rego/schemas/dockerfile.json new file mode 100644 index 000000000000..d769cb195bae --- /dev/null +++ b/pkg/rego/schemas/dockerfile.json @@ -0,0 +1,70 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/aquasecurity/trivy-policies/blob/main/pkg/rego/schemas/dockerfile.json", + "type": "object", + "properties": { + "Stages": { + "type": "array", + "items": { + "$ref": "#/$defs/stage" + } + } + }, + "$defs": { + "stage": { + "type": "object", + "properties": { + "Name": { + "type": "string" + }, + "Commands": { + "type": "array", + "items": { + "$ref": "#/$defs/command" + } + } + } + }, + "command": { + "type": "object", + "properties": { + "Flags": { + "type": "array", + "items": { + "type": "string" + } + }, + "Value": { + "type": "array", + "items": { + "type": "string" + } + }, + "Cmd": { + "type": "string" + }, + "SubCmd": { + "type": "string" + }, + "Original": { + "type": "string" + }, + "Path": { + "type": "string" + }, + "JSON": { + "type": "boolean" + }, + "Stage": { + "type": "integer" + }, + "StartLine": { + "type": "integer" + }, + "EndLine": { + "type": "integer" + } + } + } + } +} \ No newline at end of file diff --git a/pkg/rego/schemas/kubernetes.json b/pkg/rego/schemas/kubernetes.json new file mode 100644 index 000000000000..1975944b7790 --- /dev/null +++ b/pkg/rego/schemas/kubernetes.json @@ -0,0 +1,51 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/aquasecurity/trivy-policies/blob/main/pkg/rego/schemas/kubernetes.json", + "type": "object", + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "spec": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "apiGroups": { + "type": "array", + "items": { + "type": "string" + } + }, + "resources": { + "type": "array", + "items": { + "type": "string" + } + }, + "resourceNames": { + "type": "array", + "items": { + "type": "string" + } + }, + "verbs": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/pkg/rego/schemas/rbac.json b/pkg/rego/schemas/rbac.json new file mode 100644 index 000000000000..c251890f91fd --- /dev/null +++ b/pkg/rego/schemas/rbac.json @@ -0,0 +1,51 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://github.com/aquasecurity/trivy-policies/blob/main/pkg/rego/schemas/rbac.json", + "type": "object", + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "type": "object" + }, + "spec": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "apiGroups": { + "type": "array", + "items": { + "type": "string" + } + }, + "resources": { + "type": "array", + "items": { + "type": "string" + } + }, + "resourceNames": { + "type": "array", + "items": { + "type": "string" + } + }, + "verbs": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/pkg/rego/schemas/schemas.go b/pkg/rego/schemas/schemas.go new file mode 100644 index 000000000000..ce311ce4ea6c --- /dev/null +++ b/pkg/rego/schemas/schemas.go @@ -0,0 +1,16 @@ +package schemas + +import ( + "github.com/aquasecurity/trivy/pkg/types" +) + +var SchemaMap = map[types.Source]Schema{ + types.SourceDefsec: Cloud, + types.SourceCloud: Cloud, + types.SourceKubernetes: Kubernetes, + types.SourceRbac: Kubernetes, + types.SourceDockerfile: Dockerfile, + types.SourceTOML: Anything, + types.SourceYAML: Anything, + types.SourceJSON: Anything, +} diff --git a/pkg/rego/store.go b/pkg/rego/store.go new file mode 100644 index 000000000000..127b1d8dd647 --- /dev/null +++ b/pkg/rego/store.go @@ -0,0 +1,48 @@ +package rego + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/open-policy-agent/opa/loader" + "github.com/open-policy-agent/opa/storage" +) + +// initialise a store populated with OPA data files found in dataPaths +func initStore(dataFS fs.FS, dataPaths, namespaces []string) (storage.Store, error) { + // FilteredPaths will recursively find all file paths that contain a valid document + // extension from the given list of data paths. + allDocumentPaths, _ := loader.FilteredPathsFS(dataFS, dataPaths, func(abspath string, info os.FileInfo, depth int) bool { + if info.IsDir() { + return false // filter in, include + } + ext := strings.ToLower(filepath.Ext(info.Name())) + for _, filter := range []string{ + ".yaml", + ".yml", + ".json", + } { + if filter == ext { + return false // filter in, include + } + } + return true // filter out, exclude + }) + + documents, err := loader.NewFileLoader().WithFS(dataFS).All(allDocumentPaths) + if err != nil { + return nil, fmt.Errorf("load documents: %w", err) + } + + // pass all namespaces so that rego rule can refer to namespaces as data.namespaces + documents.Documents["namespaces"] = namespaces + + store, err := documents.Store() + if err != nil { + return nil, fmt.Errorf("get documents store: %w", err) + } + return store, nil +} diff --git a/pkg/rego/testdata/policies/._sysfile.rego b/pkg/rego/testdata/policies/._sysfile.rego new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pkg/rego/testdata/policies/invalid.rego b/pkg/rego/testdata/policies/invalid.rego new file mode 100644 index 000000000000..a2ef3607bc70 --- /dev/null +++ b/pkg/rego/testdata/policies/invalid.rego @@ -0,0 +1,8 @@ +# METADATA +# schemas: +# - input: schema["input"] +package defsec.test_invalid + +deny { + input.Stages[0].Commands[0].FooBarNothingBurger == "lol" +} diff --git a/pkg/rego/testdata/policies/valid.rego b/pkg/rego/testdata/policies/valid.rego new file mode 100644 index 000000000000..74a96afeec0c --- /dev/null +++ b/pkg/rego/testdata/policies/valid.rego @@ -0,0 +1,8 @@ +# METADATA +# schemas: +# - input: schema["input"] +package defsec.test_valid + +deny { + input.Stages[0].Commands[0].Cmd == "lol" +} diff --git a/pkg/scan/code.go b/pkg/scan/code.go new file mode 100644 index 000000000000..f134111dd7dc --- /dev/null +++ b/pkg/scan/code.go @@ -0,0 +1,285 @@ +package scan + +import ( + "fmt" + "io/fs" + "path/filepath" + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Code struct { + Lines []Line +} + +type Line struct { + Number int `json:"Number"` + Content string `json:"Content"` + IsCause bool `json:"IsCause"` + Annotation string `json:"Annotation"` + Truncated bool `json:"Truncated"` + Highlighted string `json:"Highlighted,omitempty"` + FirstCause bool `json:"FirstCause"` + LastCause bool `json:"LastCause"` +} + +func (c *Code) IsCauseMultiline() bool { + var count int + for _, line := range c.Lines { + if line.IsCause { + count++ + if count > 1 { + return true + } + } + } + return false +} + +const ( + darkTheme = "solarized-dark256" + lightTheme = "github" +) + +type codeSettings struct { + theme string + allowTruncation bool + maxLines int + includeHighlighted bool +} + +var defaultCodeSettings = codeSettings{ + theme: darkTheme, + allowTruncation: true, + maxLines: 10, + includeHighlighted: true, +} + +type CodeOption func(*codeSettings) + +func OptionCodeWithTheme(theme string) CodeOption { + return func(s *codeSettings) { + s.theme = theme + } +} + +func OptionCodeWithDarkTheme() CodeOption { + return func(s *codeSettings) { + s.theme = darkTheme + } +} + +func OptionCodeWithLightTheme() CodeOption { + return func(s *codeSettings) { + s.theme = lightTheme + } +} + +func OptionCodeWithTruncation(truncate bool) CodeOption { + return func(s *codeSettings) { + s.allowTruncation = truncate + } +} + +func OptionCodeWithMaxLines(lines int) CodeOption { + return func(s *codeSettings) { + s.maxLines = lines + } +} + +func OptionCodeWithHighlighted(include bool) CodeOption { + return func(s *codeSettings) { + s.includeHighlighted = include + } +} + +func validateRange(r defsecTypes.Range) error { + if r.GetStartLine() < 0 || r.GetStartLine() > r.GetEndLine() || r.GetEndLine() < 0 { + return fmt.Errorf("invalid range: %s", r.String()) + } + return nil +} + +// nolint +func (r *Result) GetCode(opts ...CodeOption) (*Code, error) { + + settings := defaultCodeSettings + for _, opt := range opts { + opt(&settings) + } + + srcFS := r.Metadata().Range().GetFS() + if srcFS == nil { + return nil, fmt.Errorf("code unavailable: result was not mapped to a known filesystem") + } + + innerRange := r.Range() + outerRange := innerRange + metadata := r.Metadata() + for { + if parent := metadata.Parent(); parent != nil && + parent.Range().GetFilename() == metadata.Range().GetFilename() && + parent.Range().GetStartLine() > 0 { + outerRange = parent.Range() + metadata = *parent + continue + } + break + } + + if err := validateRange(innerRange); err != nil { + return nil, err + } + if err := validateRange(outerRange); err != nil { + return nil, err + } + + slashed := filepath.ToSlash(r.fsPath) + slashed = strings.TrimPrefix(slashed, "/") + + content, err := fs.ReadFile(srcFS, slashed) + if err != nil { + return nil, fmt.Errorf("failed to read file from result filesystem (%#v): %w", srcFS, err) + } + + hasAnnotation := r.Annotation() != "" + + code := Code{ + Lines: nil, + } + + rawLines := strings.Split(string(content), "\n") + + var highlightedLines []string + if settings.includeHighlighted { + highlightedLines = highlight(defsecTypes.CreateFSKey(innerRange.GetFS()), innerRange.GetLocalFilename(), content, settings.theme) + if len(highlightedLines) < len(rawLines) { + highlightedLines = rawLines + } + } else { + highlightedLines = make([]string, len(rawLines)) + } + + if outerRange.GetEndLine()-1 >= len(rawLines) || innerRange.GetStartLine() == 0 { + return nil, fmt.Errorf("invalid line number") + } + + shrink := settings.allowTruncation && outerRange.LineCount() > (innerRange.LineCount()+10) + + if shrink { + + if outerRange.GetStartLine() < innerRange.GetStartLine() { + code.Lines = append( + code.Lines, + Line{ + Content: rawLines[outerRange.GetStartLine()-1], + Highlighted: highlightedLines[outerRange.GetStartLine()-1], + Number: outerRange.GetStartLine(), + }, + ) + if outerRange.GetStartLine()+1 < innerRange.GetStartLine() { + code.Lines = append( + code.Lines, + Line{ + Truncated: true, + Number: outerRange.GetStartLine() + 1, + }, + ) + } + } + + for lineNo := innerRange.GetStartLine(); lineNo <= innerRange.GetEndLine(); lineNo++ { + + if lineNo-1 >= len(rawLines) || lineNo-1 >= len(highlightedLines) { + break + } + + line := Line{ + Number: lineNo, + Content: strings.TrimSuffix(rawLines[lineNo-1], "\r"), + Highlighted: strings.TrimSuffix(highlightedLines[lineNo-1], "\r"), + IsCause: true, + } + + if hasAnnotation && lineNo == innerRange.GetStartLine() { + line.Annotation = r.Annotation() + } + + code.Lines = append(code.Lines, line) + } + + if outerRange.GetEndLine() > innerRange.GetEndLine() { + if outerRange.GetEndLine() > innerRange.GetEndLine()+1 { + code.Lines = append( + code.Lines, + Line{ + Truncated: true, + Number: outerRange.GetEndLine() - 1, + }, + ) + } + code.Lines = append( + code.Lines, + Line{ + Content: rawLines[outerRange.GetEndLine()-1], + Highlighted: highlightedLines[outerRange.GetEndLine()-1], + Number: outerRange.GetEndLine(), + }, + ) + + } + + } else { + for lineNo := outerRange.GetStartLine(); lineNo <= outerRange.GetEndLine(); lineNo++ { + + line := Line{ + Number: lineNo, + Content: strings.TrimSuffix(rawLines[lineNo-1], "\r"), + Highlighted: strings.TrimSuffix(highlightedLines[lineNo-1], "\r"), + IsCause: lineNo >= innerRange.GetStartLine() && lineNo <= innerRange.GetEndLine(), + } + + if hasAnnotation && lineNo == innerRange.GetStartLine() { + line.Annotation = r.Annotation() + } + + code.Lines = append(code.Lines, line) + } + } + + if settings.allowTruncation && len(code.Lines) > settings.maxLines && settings.maxLines > 0 { + previouslyTruncated := settings.maxLines-1 > 0 && code.Lines[settings.maxLines-2].Truncated + if settings.maxLines-1 > 0 && code.Lines[settings.maxLines-1].LastCause { + code.Lines[settings.maxLines-2].LastCause = true + } + code.Lines[settings.maxLines-1] = Line{ + Truncated: true, + Number: code.Lines[settings.maxLines-1].Number, + } + if previouslyTruncated { + code.Lines = code.Lines[:settings.maxLines-1] + } else { + code.Lines = code.Lines[:settings.maxLines] + } + } + + var first, last bool + for i, line := range code.Lines { + if line.IsCause && !first { + code.Lines[i].FirstCause = true + first = true + continue + } + if first && !line.IsCause && i > 0 { + code.Lines[i-1].LastCause = true + last = true + break + } + } + if !last && len(code.Lines) > 0 { + code.Lines[len(code.Lines)-1].LastCause = true + } + + return &code, nil +} diff --git a/pkg/scan/code_test.go b/pkg/scan/code_test.go new file mode 100644 index 000000000000..72d03e0b6691 --- /dev/null +++ b/pkg/scan/code_test.go @@ -0,0 +1,266 @@ +package scan + +import ( + "os" + "strings" + "testing" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" + + "github.com/liamg/memoryfs" +) + +func TestResult_GetCode(t *testing.T) { + + tests := []struct { + name string + source string + filename string + start int + end int + outerStart int + outerEnd int + expected []Line + options []CodeOption + wantErr bool + annotation string + }{ + { + name: "basic w/ defaults", + source: `1 +2 +3 +4`, + filename: "test.txt", + start: 2, + end: 3, + expected: []Line{ + { + Number: 2, + Content: "2", + IsCause: true, + Highlighted: "2", + FirstCause: true, + LastCause: false, + }, + { + Number: 3, + Content: "3", + IsCause: true, + Highlighted: "3", + FirstCause: false, + LastCause: true, + }, + }, + }, + { + name: "nested ranges", + source: `resource "aws_s3_bucket" "something" { + bucket = "something" +}`, + filename: "main.tf", + start: 2, + end: 2, + outerStart: 1, + outerEnd: 3, + options: []CodeOption{OptionCodeWithHighlighted(false)}, + expected: []Line{ + { + Number: 1, + Content: `resource "aws_s3_bucket" "something" {`, + }, + { + Number: 2, + Content: ` bucket = "something"`, + IsCause: true, + FirstCause: true, + LastCause: true, + }, + { + Number: 3, + Content: "}", + }, + }, + }, + { + name: "bad filename", + source: `1 +2 +3 +4`, + filename: "", + start: 2, + end: 3, + wantErr: true, + }, + { + name: "no line numbers", + source: `1 +2 +3 +4`, + filename: "test.txt", + start: 0, + end: 0, + wantErr: true, + }, + { + name: "negative line numbers", + source: `1 +2 +3 +4`, + filename: "test.txt", + start: -2, + end: -1, + wantErr: true, + }, + { + name: "invalid line numbers", + source: `1 +2 +3 +4`, + filename: "test.txt", + start: 5, + end: 6, + wantErr: true, + }, + { + name: "syntax highlighting", + source: `FROM ubuntu`, + filename: "Dockerfile", + start: 1, + end: 1, + expected: []Line{ + { + Number: 1, + Content: "FROM ubuntu", + IsCause: true, + Highlighted: "\x1b[38;5;64mFROM\x1b[0m\x1b[38;5;37m ubuntu\x1b[0m", + FirstCause: true, + LastCause: true, + }, + }, + }, + { + name: "truncation", + source: strings.Repeat("If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.\n", 100), + filename: "longfile.txt", + start: 1, + end: 100, + expected: []Line{ + { + Number: 1, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: true, + LastCause: false, + }, + { + Number: 2, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: false, + LastCause: false, + }, + { + Number: 3, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: false, + LastCause: false, + }, + { + Number: 4, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: false, + LastCause: false, + }, + { + Number: 5, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: false, + LastCause: false, + }, + { + Number: 6, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: false, + LastCause: false, + }, + { + Number: 7, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: false, + LastCause: false, + }, + { + Number: 8, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: false, + LastCause: false, + }, + { + Number: 9, + Content: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + IsCause: true, + Highlighted: "If you can do a half-assed job of anything, you're a one-eyed man in a kingdom of the blind.", + FirstCause: false, + LastCause: true, + }, + { + Number: 10, + Truncated: true, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + system := memoryfs.New() + require.NoError(t, system.WriteFile(test.filename, []byte(test.source), os.ModePerm)) + meta := defsecTypes.NewMisconfigMetadata( + defsecTypes.NewRange(test.filename, test.start, test.end, "", system), + "", + ) + if test.outerStart > 0 { + meta = meta.WithParent(defsecTypes.NewMisconfigMetadata( + defsecTypes.NewRange(test.filename, test.outerStart, test.outerEnd, "", system), + "", + )) + } + result := &Result{ + annotation: test.annotation, + metadata: meta, + fsPath: test.filename, + } + code, err := result.GetCode(test.options...) + if test.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, test.expected, code.Lines) + }) + } + +} diff --git a/pkg/scan/flat.go b/pkg/scan/flat.go new file mode 100755 index 000000000000..10bf3d366a24 --- /dev/null +++ b/pkg/scan/flat.go @@ -0,0 +1,72 @@ +package scan + +import ( + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/severity" +) + +type FlatResult struct { + RuleID string `json:"rule_id"` + LongID string `json:"long_id"` + RuleSummary string `json:"rule_description"` + RuleProvider providers.Provider `json:"rule_provider"` + RuleService string `json:"rule_service"` + Impact string `json:"impact"` + Resolution string `json:"resolution"` + Links []string `json:"links"` + Description string `json:"description"` + RangeAnnotation string `json:"-"` + Severity severity.Severity `json:"severity"` + Warning bool `json:"warning"` + Status Status `json:"status"` + Resource string `json:"resource"` + Occurrences []Occurrence `json:"occurrences,omitempty"` + Location FlatRange `json:"location"` +} + +type FlatRange struct { + Filename string `json:"filename"` + StartLine int `json:"start_line"` + EndLine int `json:"end_line"` +} + +func (r Results) Flatten() []FlatResult { + var results []FlatResult + for _, original := range r { + results = append(results, original.Flatten()) + } + return results +} + +func (r *Result) Flatten() FlatResult { + rng := r.metadata.Range() + + resMetadata := r.metadata + + for resMetadata.Parent() != nil { + resMetadata = *resMetadata.Parent() + } + + return FlatResult{ + RuleID: r.rule.AVDID, + LongID: r.Rule().LongID(), + RuleSummary: r.rule.Summary, + RuleProvider: r.rule.Provider, + RuleService: r.rule.Service, + Impact: r.rule.Impact, + Resolution: r.rule.Resolution, + Links: r.rule.Links, + Description: r.Description(), + RangeAnnotation: r.Annotation(), + Severity: r.rule.Severity, + Status: r.status, + Resource: resMetadata.Reference(), + Occurrences: r.Occurrences(), + Warning: r.IsWarning(), + Location: FlatRange{ + Filename: rng.GetFilename(), + StartLine: rng.GetStartLine(), + EndLine: rng.GetEndLine(), + }, + } +} diff --git a/pkg/scan/highlighting.go b/pkg/scan/highlighting.go new file mode 100644 index 000000000000..7f46a29a20c7 --- /dev/null +++ b/pkg/scan/highlighting.go @@ -0,0 +1,124 @@ +package scan + +import ( + "bytes" + "fmt" + "strings" + "sync" + + "github.com/alecthomas/chroma" + "github.com/alecthomas/chroma/formatters" + "github.com/alecthomas/chroma/lexers" + "github.com/alecthomas/chroma/styles" +) + +type cache struct { + sync.RWMutex + data map[string][]string +} + +func (c *cache) Get(key string) ([]string, bool) { + c.RLock() + defer c.RUnlock() + data, ok := c.data[key] + return data, ok +} + +func (c *cache) Set(key string, data []string) { + c.Lock() + defer c.Unlock() + c.data[key] = data +} + +var globalCache = &cache{ + data: make(map[string][]string), +} + +func highlight(fsKey string, filename string, input []byte, theme string) []string { + + key := fmt.Sprintf("%s|%s", fsKey, filename) + if lines, ok := globalCache.Get(key); ok { + return lines + } + + lexer := lexers.Match(filename) + if lexer == nil { + lexer = lexers.Fallback + } + lexer = chroma.Coalesce(lexer) + + style := styles.Get(theme) + if style == nil { + style = styles.Fallback + } + formatter := formatters.Get("terminal256") + if formatter == nil { + formatter = formatters.Fallback + } + + // replace windows line endings + input = bytes.ReplaceAll(input, []byte{0x0d}, []byte{}) + iterator, err := lexer.Tokenise(nil, string(input)) + if err != nil { + return nil + } + + buffer := bytes.NewBuffer([]byte{}) + if err := formatter.Format(buffer, style, iterator); err != nil { + return nil + } + + raw := shiftANSIOverLineEndings(buffer.Bytes()) + lines := strings.Split(string(raw), "\n") + globalCache.Set(key, lines) + return lines +} + +func shiftANSIOverLineEndings(input []byte) []byte { + var output []byte + prev := byte(0) + inCSI := false + csiShouldCarry := false + var csi []byte + var skipOutput bool + for _, r := range input { + skipOutput = false + if !inCSI { + switch { + case r == '\n': + if csiShouldCarry && len(csi) > 0 { + skipOutput = true + output = append(output, '\n') + output = append(output, csi...) + csi = nil + csiShouldCarry = false + } + case r == '[' && prev == 0x1b: + inCSI = true + csi = append(csi, 0x1b, '[') + output = output[:len(output)-1] + skipOutput = true + default: + csiShouldCarry = false + if len(csi) > 0 { + output = append(output, csi...) + csi = nil + } + } + } else { + csi = append(csi, r) + skipOutput = true + switch { + case r >= 0x40 && r <= 0x7E: + csiShouldCarry = true + inCSI = false + } + } + if !skipOutput { + output = append(output, r) + } + prev = r + } + + return append(output, csi...) +} diff --git a/pkg/scan/result.go b/pkg/scan/result.go new file mode 100755 index 000000000000..c08b2839b058 --- /dev/null +++ b/pkg/scan/result.go @@ -0,0 +1,366 @@ +package scan + +import ( + "fmt" + "io/fs" + "path/filepath" + "reflect" + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/severity" +) + +type Status uint8 + +const ( + StatusFailed Status = iota + StatusPassed + StatusIgnored +) + +type Result struct { + rule Rule + description string + annotation string + status Status + metadata defsecTypes.MisconfigMetadata + severityOverride *severity.Severity + regoNamespace string + regoRule string + warning bool + traces []string + fsPath string +} + +func (r Result) RegoNamespace() string { + return r.regoNamespace +} + +func (r Result) RegoRule() string { + return r.regoRule +} + +func (r Result) Severity() severity.Severity { + if r.severityOverride != nil { + return *r.severityOverride + } + return r.Rule().Severity +} + +func (r *Result) IsWarning() bool { + return r.warning +} + +func (r *Result) OverrideSeverity(s severity.Severity) { + r.severityOverride = &s +} + +func (r *Result) OverrideDescription(description string) { + r.description = description +} + +func (r *Result) OverrideMetadata(metadata defsecTypes.MisconfigMetadata) { + r.metadata = metadata +} + +func (r *Result) OverrideStatus(status Status) { + r.status = status +} + +func (r *Result) OverrideAnnotation(annotation string) { + r.annotation = annotation +} + +func (r *Result) SetRule(ru Rule) { + r.rule = ru +} + +func (r Result) Status() Status { + return r.status +} + +func (r Result) Rule() Rule { + return r.rule +} + +func (r Result) Description() string { + return r.description +} + +func (r Result) Annotation() string { + return r.annotation +} + +func (r Result) Metadata() defsecTypes.MisconfigMetadata { + return r.metadata +} + +func (r Result) Range() defsecTypes.Range { + return r.metadata.Range() +} + +func (r Result) Traces() []string { + return r.traces +} + +func (r *Result) AbsolutePath(fsRoot string, metadata defsecTypes.MisconfigMetadata) string { + if strings.HasSuffix(fsRoot, ":") { + fsRoot += "/" + } + + if metadata.IsUnmanaged() { + return "" + } + rng := metadata.Range() + if rng.GetSourcePrefix() != "" && !strings.HasPrefix(rng.GetSourcePrefix(), ".") { + return rng.GetFilename() + } + return filepath.Join(fsRoot, rng.GetLocalFilename()) +} + +func (r *Result) RelativePathTo(fsRoot, to string, metadata defsecTypes.MisconfigMetadata) string { + + absolute := r.AbsolutePath(fsRoot, metadata) + + if strings.HasSuffix(fsRoot, ":") { + fsRoot += "/" + } + + if metadata.IsUnmanaged() { + return absolute + } + rng := metadata.Range() + if rng.GetSourcePrefix() != "" && !strings.HasPrefix(rng.GetSourcePrefix(), ".") { + return absolute + } + if !strings.HasPrefix(rng.GetLocalFilename(), strings.TrimSuffix(fsRoot, "/")) { + return absolute + } + relative, err := filepath.Rel(to, rng.GetLocalFilename()) + if err != nil { + return absolute + } + return relative +} + +type Results []Result + +type MetadataProvider interface { + GetMetadata() defsecTypes.MisconfigMetadata + GetRawValue() interface{} +} + +func (r *Results) GetPassed() Results { + return r.filterStatus(StatusPassed) +} + +func (r *Results) GetIgnored() Results { + return r.filterStatus(StatusIgnored) +} + +func (r *Results) GetFailed() Results { + return r.filterStatus(StatusFailed) +} + +func (r *Results) filterStatus(status Status) Results { + var filtered Results + if r == nil { + return filtered + } + for _, res := range *r { + if res.Status() == status { + filtered = append(filtered, res) + } + } + return filtered +} + +func (r *Results) Add(description string, source interface{}) { + result := Result{ + description: description, + } + result.metadata = getMetadataFromSource(source) + if result.metadata.IsExplicit() { + result.annotation = getAnnotation(source) + } + rnge := result.metadata.Range() + result.fsPath = rnge.GetLocalFilename() + *r = append(*r, result) +} + +func (r *Results) AddRego(description string, namespace string, rule string, traces []string, source MetadataProvider) { + result := Result{ + description: description, + regoNamespace: namespace, + regoRule: rule, + warning: rule == "warn" || strings.HasPrefix(rule, "warn_"), + traces: traces, + } + result.metadata = getMetadataFromSource(source) + if result.metadata.IsExplicit() { + result.annotation = getAnnotation(source) + } + rnge := result.metadata.Range() + result.fsPath = rnge.GetLocalFilename() + *r = append(*r, result) +} + +func (r *Results) AddPassed(source interface{}, descriptions ...string) { + res := Result{ + description: strings.Join(descriptions, " "), + status: StatusPassed, + } + res.metadata = getMetadataFromSource(source) + rnge := res.metadata.Range() + res.fsPath = rnge.GetLocalFilename() + *r = append(*r, res) +} + +func getMetadataFromSource(source interface{}) defsecTypes.MisconfigMetadata { + if provider, ok := source.(MetadataProvider); ok { + return provider.GetMetadata() + } + + metaValue := reflect.ValueOf(source) + if metaValue.Kind() == reflect.Ptr { + metaValue = metaValue.Elem() + } + metaVal := metaValue.FieldByName("Metadata") + return metaVal.Interface().(defsecTypes.MisconfigMetadata) +} + +func getAnnotation(source interface{}) string { + if provider, ok := source.(MetadataProvider); ok { + return rawToString(provider.GetRawValue()) + } + return "" +} + +func (r *Results) AddPassedRego(namespace string, rule string, traces []string, source interface{}) { + res := Result{ + status: StatusPassed, + regoNamespace: namespace, + regoRule: rule, + traces: traces, + } + res.metadata = getMetadataFromSource(source) + rnge := res.metadata.Range() + res.fsPath = rnge.GetLocalFilename() + *r = append(*r, res) +} + +func (r *Results) AddIgnored(source interface{}, descriptions ...string) { + res := Result{ + description: strings.Join(descriptions, " "), + status: StatusIgnored, + } + res.metadata = getMetadataFromSource(source) + rnge := res.metadata.Range() + res.fsPath = rnge.GetLocalFilename() + *r = append(*r, res) +} + +func (r *Results) SetRule(rule Rule) { + for i := range *r { + (*r)[i].rule = rule + } +} + +func (r *Results) SetSourceAndFilesystem(source string, f fs.FS, logicalSource bool) { + for i := range *r { + m := (*r)[i].Metadata() + if m.IsUnmanaged() { + continue + } + rng := m.Range() + + newrng := defsecTypes.NewRange(rng.GetLocalFilename(), rng.GetStartLine(), rng.GetEndLine(), source, f) + if logicalSource { + newrng = defsecTypes.NewRangeWithLogicalSource(rng.GetLocalFilename(), rng.GetStartLine(), rng.GetEndLine(), + source, f) + } + parent := m.Parent() + switch { + case m.IsExplicit(): + m = defsecTypes.NewExplicitMisconfigMetadata(newrng, m.Reference()) + default: + m = defsecTypes.NewMisconfigMetadata(newrng, m.Reference()) + } + if parent != nil { + m.SetParentPtr(parent) + } + (*r)[i].OverrideMetadata(m) + } +} + +func rawToString(raw interface{}) string { + if raw == nil { + return "" + } + switch t := raw.(type) { + case int: + return fmt.Sprintf("%d", t) + case bool: + return fmt.Sprintf("%t", t) + case float64: + return fmt.Sprintf("%f", t) + case string: + return fmt.Sprintf("%q", t) + case []string: + var items []string + for _, item := range t { + items = append(items, rawToString(item)) + } + return fmt.Sprintf("[%s]", strings.Join(items, ", ")) + case []int: + var items []string + for _, item := range t { + items = append(items, rawToString(item)) + } + return fmt.Sprintf("[%s]", strings.Join(items, ", ")) + case []float64: + var items []string + for _, item := range t { + items = append(items, rawToString(item)) + } + return fmt.Sprintf("[%s]", strings.Join(items, ", ")) + case []bool: + var items []string + for _, item := range t { + items = append(items, rawToString(item)) + } + return fmt.Sprintf("[%s]", strings.Join(items, ", ")) + default: + return "?" + } +} + +type Occurrence struct { + Resource string `json:"resource"` + Filename string `json:"filename"` + StartLine int `json:"start_line"` + EndLine int `json:"end_line"` +} + +func (r *Result) Occurrences() []Occurrence { + var occurrences []Occurrence + + mod := &r.metadata + + for { + mod = mod.Parent() + if mod == nil { + break + } + parentRange := mod.Range() + occurrences = append(occurrences, Occurrence{ + Resource: mod.Reference(), + Filename: parentRange.GetFilename(), + StartLine: parentRange.GetStartLine(), + EndLine: parentRange.GetEndLine(), + }) + } + return occurrences +} diff --git a/pkg/scan/result_test.go b/pkg/scan/result_test.go new file mode 100644 index 000000000000..bf920f2c1b92 --- /dev/null +++ b/pkg/scan/result_test.go @@ -0,0 +1,56 @@ +package scan_test + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/assert" +) + +func Test_Occurrences(t *testing.T) { + tests := []struct { + name string + factory func() *scan.Result + expected []scan.Occurrence + }{ + { + name: "happy", + factory: func() *scan.Result { + r := scan.Result{} + causeResourceMeta := types.NewMisconfigMetadata(types.NewRange( + "main.tf", 1, 13, "", nil, + ), "module.aws-security-groups[\"db1\"]") + + parentMeta := types.NewMisconfigMetadata(types.NewRange( + "terraform-aws-modules/security-group/aws/main.tf", 191, 227, "", nil, + ), "aws_security_group_rule.ingress_with_cidr_blocks[0]").WithParent(causeResourceMeta) + + r.OverrideMetadata(types.NewMisconfigMetadata(types.NewRange( + "terraform-aws-modules/security-group/aws/main.tf", 197, 204, "", nil, + ), "aws_security_group_rule.ingress_with_cidr_blocks").WithParent(parentMeta)) + return &r + }, + expected: []scan.Occurrence{ + { + Resource: "aws_security_group_rule.ingress_with_cidr_blocks[0]", + Filename: "terraform-aws-modules/security-group/aws/main.tf", + StartLine: 191, + EndLine: 227, + }, + { + Resource: "module.aws-security-groups[\"db1\"]", + Filename: "main.tf", + StartLine: 1, + EndLine: 13, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, tt.factory().Occurrences()) + }) + } +} diff --git a/pkg/scan/rule.go b/pkg/scan/rule.go new file mode 100755 index 000000000000..5da355d807bf --- /dev/null +++ b/pkg/scan/rule.go @@ -0,0 +1,168 @@ +package scan + +import ( + "fmt" + "regexp" + "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" + + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/state" + "github.com/aquasecurity/trivy/pkg/terraform" +) + +type CheckFunc func(s *state.State) (results Results) + +type EngineMetadata struct { + GoodExamples []string `json:"good_examples,omitempty"` + BadExamples []string `json:"bad_examples,omitempty"` + RemediationMarkdown string `json:"remediation_markdown,omitempty"` + Links []string `json:"links,omitempty"` +} + +type CustomChecks struct { + Terraform *TerraformCustomCheck +} + +type TerraformCustomCheck struct { + RequiredTypes []string + RequiredLabels []string + RequiredSources []string + Check func(*terraform.Block, *terraform.Module) Results +} + +type Rule struct { + AVDID string `json:"avd_id"` + Aliases []string `json:"aliases"` + ShortCode string `json:"short_code"` + Summary string `json:"summary"` + Explanation string `json:"explanation"` + Impact string `json:"impact"` + Resolution string `json:"resolution"` + Provider providers.Provider `json:"provider"` + Service string `json:"service"` + Links []string `json:"links"` + Severity severity.Severity `json:"severity"` + Terraform *EngineMetadata `json:"terraform,omitempty"` + CloudFormation *EngineMetadata `json:"cloud_formation,omitempty"` + CustomChecks CustomChecks `json:"-"` + RegoPackage string `json:"-"` + Frameworks map[framework.Framework][]string `json:"frameworks"` + Check CheckFunc `json:"-"` +} + +func (r Rule) HasID(id string) bool { + if r.AVDID == id || r.LongID() == id { + return true + } + for _, alias := range r.Aliases { + if alias == id { + return true + } + } + return false +} + +func (r Rule) LongID() string { + return strings.ToLower(fmt.Sprintf("%s-%s-%s", r.Provider, r.Service, r.ShortCode)) +} + +func (r Rule) ServiceDisplayName() string { + return nicify(r.Service) +} + +func (r Rule) ShortCodeDisplayName() string { + return nicify(r.ShortCode) +} + +func (r Rule) CanCheck() bool { + return r.Check != nil +} + +func (r Rule) Evaluate(s *state.State) Results { + if !r.CanCheck() { + return nil + } + results := r.Check(s) + for i := range results { + results[i].SetRule(r) + } + return results +} + +var acronyms = []string{ + "acl", + "alb", + "api", + "arn", + "aws", + "cidr", + "db", + "dns", + "ebs", + "ec2", + "ecr", + "ecs", + "efs", + "eks", + "elb", + "gke", + "http", + "http2", + "https", + "iam", + "im", + "imds", + "ip", + "ips", + "kms", + "lb", + "md5", + "mfa", + "mq", + "msk", + "rbac", + "rdp", + "rds", + "rsa", + "sam", + "sgr", + "sha1", + "sha256", + "sns", + "sql", + "sqs", + "ssh", + "ssm", + "tls", + "ubla", + "vm", + "vpc", + "vtpm", + "waf", +} + +var specials = map[string]string{ + "dynamodb": "DynamoDB", + "documentdb": "DocumentDB", + "mysql": "MySQL", + "postgresql": "PostgreSQL", + "acls": "ACLs", + "ips": "IPs", + "bigquery": "BigQuery", +} + +func nicify(input string) string { + input = strings.ToLower(input) + for replace, with := range specials { + input = regexp.MustCompile(fmt.Sprintf("\\b%s\\b", replace)).ReplaceAllString(input, with) + } + for _, acronym := range acronyms { + input = regexp.MustCompile(fmt.Sprintf("\\b%s\\b", acronym)).ReplaceAllString(input, strings.ToUpper(acronym)) + } + return cases.Title(language.English).String(strings.ReplaceAll(input, "-", " ")) +} diff --git a/pkg/scanners/azure/arm/parser/armjson/bench_test.go b/pkg/scanners/azure/arm/parser/armjson/bench_test.go new file mode 100644 index 000000000000..439181838dcc --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/bench_test.go @@ -0,0 +1,71 @@ +package armjson + +import ( + "encoding/json" + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/require" +) + +func BenchmarkUnmarshal_JFather(b *testing.B) { + target := make(map[string]interface{}) + input := []byte(`{ + "glossary": { + "title": "example glossary", + "GlossDiv": { + "title": "S", + "GlossList": { + "GlossEntry": { + "ID": "SGML", + "SortAs": "SGML", + "GlossTerm": "Standard Generalized Markup Language", + "Acronym": "SGML", + "Abbrev": "ISO 8879:1986", + "GlossDef": { + "para": "A meta-markup language, used to create markup languages such as DocBook.", + "GlossSeeAlso": ["GML", "XML"] + }, + "GlossSee": "markup" + } + } + } + } +}`) + + for n := 0; n < b.N; n++ { + metadata := types.NewTestMisconfigMetadata() + require.NoError(b, Unmarshal(input, &target, &metadata)) + } +} + +func BenchmarkUnmarshal_Traditional(b *testing.B) { + target := make(map[string]interface{}) + input := []byte(`{ + "glossary": { + "title": "example glossary", + "GlossDiv": { + "title": "S", + "GlossList": { + "GlossEntry": { + "ID": "SGML", + "SortAs": "SGML", + "GlossTerm": "Standard Generalized Markup Language", + "Acronym": "SGML", + "Abbrev": "ISO 8879:1986", + "GlossDef": { + "para": "A meta-markup language, used to create markup languages such as DocBook.", + "GlossSeeAlso": ["GML", "XML"] + }, + "GlossSee": "markup" + } + } + } + } +}`) + + for n := 0; n < b.N; n++ { + require.NoError(b, json.Unmarshal(input, &target)) + } +} diff --git a/pkg/scanners/azure/arm/parser/armjson/decode.go b/pkg/scanners/azure/arm/parser/armjson/decode.go new file mode 100644 index 000000000000..b3610721225d --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/decode.go @@ -0,0 +1,66 @@ +package armjson + +import ( + "fmt" + "reflect" + + "github.com/aquasecurity/trivy/pkg/types" +) + +func (n *node) Decode(target interface{}) error { + v := reflect.ValueOf(target) + return n.decodeToValue(v) +} + +func (n *node) Metadata() types.MisconfigMetadata { + return *n.metadata +} + +var unmarshaller = reflect.TypeOf((*Unmarshaller)(nil)).Elem() +var receiver = reflect.TypeOf((*MetadataReceiver)(nil)).Elem() + +func (n *node) decodeToValue(v reflect.Value) error { + + if v.Type().Implements(receiver) { + rec := v + defer func() { + rec.MethodByName("SetMetadata").Call([]reflect.Value{reflect.ValueOf(n.metadata)}) + }() + } + if v.Type().Implements(unmarshaller) { + returns := v.MethodByName("UnmarshalJSONWithMetadata").Call([]reflect.Value{reflect.ValueOf(n)}) + if err := returns[0].Interface(); err != nil { + return err.(error) + } + return nil + } + + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + if !v.CanSet() { + return fmt.Errorf("target is not settable") + } + + switch n.kind { + case KindObject: + return n.decodeObject(v) + case KindArray: + return n.decodeArray(v) + case KindString: + return n.decodeString(v) + case KindNumber: + return n.decodeNumber(v) + case KindBoolean: + return n.decodeBoolean(v) + case KindNull: + return n.decodeNull(v) + case KindComment: + return n.decodeString(v) + case KindUnknown: + return fmt.Errorf("cannot decode unknown kind") + default: + return fmt.Errorf("decoding of kind 0x%x is not supported", n.kind) + } +} diff --git a/pkg/scanners/azure/arm/parser/armjson/decode_array.go b/pkg/scanners/azure/arm/parser/armjson/decode_array.go new file mode 100644 index 000000000000..75faada57252 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/decode_array.go @@ -0,0 +1,51 @@ +package armjson + +import ( + "fmt" + "reflect" +) + +func (n *node) decodeArray(v reflect.Value) error { + + length := len(n.content) + + var original reflect.Value + + switch v.Kind() { + case reflect.Array: + if v.Len() != length { + return fmt.Errorf("invalid length") + } + case reflect.Slice: + v.Set(reflect.MakeSlice(v.Type(), length, length)) + case reflect.Interface: + original = v + slice := reflect.ValueOf(make([]interface{}, length)) + v = reflect.New(slice.Type()).Elem() + v.Set(slice) + default: + return fmt.Errorf("invalid target type") + } + + elementType := v.Type().Elem() + for i, nodeElement := range n.content { + node := nodeElement.(*node) + targetElement := reflect.New(elementType).Elem() + addressable := targetElement + if targetElement.Kind() == reflect.Ptr { + targetElement.Set(reflect.New(elementType.Elem())) + } else { + addressable = targetElement.Addr() + } + if err := node.decodeToValue(addressable); err != nil { + return err + } + v.Index(i).Set(targetElement) + } + + if original.IsValid() { + original.Set(v) + } + + return nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/decode_boolean.go b/pkg/scanners/azure/arm/parser/armjson/decode_boolean.go new file mode 100644 index 000000000000..dbdef3a3253d --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/decode_boolean.go @@ -0,0 +1,18 @@ +package armjson + +import ( + "fmt" + "reflect" +) + +func (n *node) decodeBoolean(v reflect.Value) error { + switch v.Kind() { + case reflect.Bool: + v.SetBool(n.raw.(bool)) + case reflect.Interface: + v.Set(reflect.ValueOf(n.raw)) + default: + return fmt.Errorf("cannot decode boolean value to %s target", v.Kind()) + } + return nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/decode_meta_test.go b/pkg/scanners/azure/arm/parser/armjson/decode_meta_test.go new file mode 100644 index 000000000000..0834d2684085 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/decode_meta_test.go @@ -0,0 +1,40 @@ +package armjson + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type TestParent struct { + Child *TestChild `json:"child"` +} + +type TestChild struct { + Name string + Line int + Column int +} + +func (t *TestChild) UnmarshalJSONWithMetadata(node Node) error { + t.Line = node.Range().Start.Line + t.Column = node.Range().Start.Column + return node.Decode(&t.Name) +} + +func Test_DecodeWithMetadata(t *testing.T) { + example := []byte(` +{ + "child": "secret" +} +`) + var parent TestParent + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &parent, &metadata)) + assert.Equal(t, 3, parent.Child.Line) + assert.Equal(t, 12, parent.Child.Column) + assert.Equal(t, "secret", parent.Child.Name) +} diff --git a/pkg/scanners/azure/arm/parser/armjson/decode_null.go b/pkg/scanners/azure/arm/parser/armjson/decode_null.go new file mode 100644 index 000000000000..2cc86b3c1bb7 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/decode_null.go @@ -0,0 +1,10 @@ +package armjson + +import ( + "reflect" +) + +func (n *node) decodeNull(v reflect.Value) error { + v.Set(reflect.Zero(v.Type())) + return nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/decode_number.go b/pkg/scanners/azure/arm/parser/armjson/decode_number.go new file mode 100644 index 000000000000..653f6f1fbe06 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/decode_number.go @@ -0,0 +1,46 @@ +package armjson + +import ( + "fmt" + "reflect" +) + +func (n *node) decodeNumber(v reflect.Value) error { + + switch v.Kind() { + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: + if i64, ok := n.raw.(int64); ok { + v.SetInt(i64) + return nil + } + if f64, ok := n.raw.(float64); ok { + v.SetInt(int64(f64)) + return nil + } + case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: + if i64, ok := n.raw.(int64); ok { + v.SetUint(uint64(i64)) + return nil + } + if f64, ok := n.raw.(float64); ok { + v.SetUint(uint64(f64)) + return nil + } + case reflect.Float32, reflect.Float64: + if i64, ok := n.raw.(int64); ok { + v.SetFloat(float64(i64)) + return nil + } + if f64, ok := n.raw.(float64); ok { + v.SetFloat(f64) + return nil + } + case reflect.Interface: + v.Set(reflect.ValueOf(n.raw)) + return nil + default: + return fmt.Errorf("cannot decode number value to %s target", v.Kind()) + } + + return fmt.Errorf("internal value is not numeric") +} diff --git a/pkg/scanners/azure/arm/parser/armjson/decode_object.go b/pkg/scanners/azure/arm/parser/armjson/decode_object.go new file mode 100644 index 000000000000..516029b55deb --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/decode_object.go @@ -0,0 +1,122 @@ +package armjson + +import ( + "fmt" + "reflect" + "strings" +) + +func (n *node) decodeObject(v reflect.Value) error { + switch v.Kind() { + case reflect.Struct: + return n.decodeObjectToStruct(v) + case reflect.Map: + return n.decodeObjectToMap(v) + case reflect.Interface: + target := reflect.New(reflect.TypeOf(make(map[string]interface{}, len(n.Content())))).Elem() + if err := n.decodeObjectToMap(target); err != nil { + return err + } + v.Set(target) + return nil + default: + return fmt.Errorf("cannot set object value to target of type %s", v.Kind()) + } +} + +func (n *node) decodeObjectToMap(v reflect.Value) error { + properties, err := n.objectAsMap() + if err != nil { + return err + } + + newMap := reflect.MakeMap(v.Type()) + valueType := v.Type().Elem() + + for key, value := range properties { + target := reflect.New(valueType).Elem() + addressable := target + if target.Kind() == reflect.Ptr { + target.Set(reflect.New(valueType.Elem())) + } else { + addressable = target.Addr() + } + if err := value.(*node).decodeToValue(addressable); err != nil { + return err + } + newMap.SetMapIndex(reflect.ValueOf(key), target) + } + + v.Set(newMap) + return nil + +} + +func (n *node) objectAsMap() (map[string]Node, error) { + if n.kind != KindObject { + return nil, fmt.Errorf("not an object") + } + properties := make(map[string]Node) + contents := n.content + for i := 0; i < len(contents); i += 2 { + key := contents[i] + if key.Kind() != KindString { + return nil, fmt.Errorf("invalid object key - please report this bug") + } + keyStr := key.(*node).raw.(string) + + if i+1 >= len(contents) { + return nil, fmt.Errorf("missing object value - please report this bug") + } + properties[keyStr] = contents[i+1] + } + return properties, nil +} + +func (n *node) decodeObjectToStruct(v reflect.Value) error { + + temp := reflect.New(v.Type()).Elem() + v.Set(temp) + + properties, err := n.objectAsMap() + if err != nil { + return err + } + + t := v.Type() + for i := 0; i < t.NumField(); i++ { + fv := t.Field(i) + tags := strings.Split(fv.Tag.Get("json"), ",") + var tagName string + for _, tag := range tags { + if tag != "omitempty" && tag != "-" { + tagName = tag + } + } + if tagName == "" { + tagName = fv.Name + } + + value, ok := properties[tagName] + if !ok { + // TODO: should we zero this value? + continue + } + + subject := v.Field(i) + + // if fields are nil pointers, initialise them with values of the correct type + if subject.Kind() == reflect.Ptr { + if subject.IsNil() { + subject.Set(reflect.New(subject.Type().Elem())) + } + } else { + subject = subject.Addr() + } + + if err := value.(*node).decodeToValue(subject); err != nil { + return err + } + } + return nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/decode_string.go b/pkg/scanners/azure/arm/parser/armjson/decode_string.go new file mode 100644 index 000000000000..c8f734b57024 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/decode_string.go @@ -0,0 +1,19 @@ +package armjson + +import ( + "fmt" + "reflect" +) + +func (n *node) decodeString(v reflect.Value) error { + + switch v.Kind() { + case reflect.String: + v.SetString(n.raw.(string)) + case reflect.Interface: + v.Set(reflect.ValueOf(n.raw)) + default: + return fmt.Errorf("cannot decode string value to non-string target: %s", v.Kind()) + } + return nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/kind.go b/pkg/scanners/azure/arm/parser/armjson/kind.go new file mode 100644 index 000000000000..82712cc89225 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/kind.go @@ -0,0 +1,14 @@ +package armjson + +type Kind uint8 + +const ( + KindUnknown Kind = iota + KindNull + KindNumber + KindString + KindBoolean + KindArray + KindObject + KindComment +) diff --git a/pkg/scanners/azure/arm/parser/armjson/node.go b/pkg/scanners/azure/arm/parser/armjson/node.go new file mode 100644 index 000000000000..9512acc668d9 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/node.go @@ -0,0 +1,59 @@ +package armjson + +import "github.com/aquasecurity/trivy/pkg/types" + +type Node interface { + Comments() []Node + Range() Range + Decode(target interface{}) error + Kind() Kind + Content() []Node + Metadata() types.MisconfigMetadata +} + +type Range struct { + Start Position + End Position +} + +type Position struct { + Line int + Column int +} + +type node struct { + raw interface{} + start Position + end Position + kind Kind + content []Node + comments []Node + metadata *types.MisconfigMetadata + ref string +} + +func (n *node) Range() Range { + return Range{ + Start: n.start, + End: Position{ + Column: n.end.Column - 1, + Line: n.end.Line, + }, + } +} + +func (n *node) Comments() []Node { + return n.comments +} + +func (n *node) End() Position { + return n.end +} + +func (n *node) Kind() Kind { + return n.kind +} + +func (n *node) Content() []Node { + return n.content +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse.go b/pkg/scanners/azure/arm/parser/armjson/parse.go new file mode 100644 index 000000000000..66bdedd479cf --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse.go @@ -0,0 +1,150 @@ +package armjson + +import ( + "fmt" + "strings" + + "github.com/aquasecurity/trivy/pkg/types" +) + +type parser struct { + position Position + size int + peeker *PeekReader +} + +func newParser(p *PeekReader, pos Position) *parser { + return &parser{ + position: pos, + peeker: p, + } +} + +func (p *parser) parse(rootMetadata *types.MisconfigMetadata) (Node, error) { + root, err := p.parseElement(rootMetadata) + if err != nil { + return nil, err + } + root.(*node).updateMetadata("") + return root, nil +} + +func (p *parser) parseElement(parentMetadata *types.MisconfigMetadata) (Node, error) { + if err := p.parseWhitespace(); err != nil { + return nil, err + } + n, err := p.parseValue(parentMetadata) + if err != nil { + return nil, err + } + if err := p.parseWhitespace(); err != nil { + return nil, err + } + return n, nil +} + +func (p *parser) parseValue(parentMetadata *types.MisconfigMetadata) (Node, error) { + c, err := p.peeker.Peek() + if err != nil { + return nil, err + } + + switch c { + case '/': + return p.parseComment(parentMetadata) + case '"': + return p.parseString(parentMetadata) + case '{': + return p.parseObject(parentMetadata) + case '[': + return p.parseArray(parentMetadata) + case 'n': + return p.parseNull(parentMetadata) + case 't', 'f': + return p.parseBoolean(parentMetadata) + default: + if c == '-' || (c >= '0' && c <= '9') { + return p.parseNumber(parentMetadata) + } + return nil, fmt.Errorf("unexpected character '%c'", c) + } +} + +func (p *parser) next() (rune, error) { + b, err := p.peeker.Next() + if err != nil { + return 0, err + } + p.position.Column++ + p.size++ + return b, nil +} + +func (p *parser) undo() error { + if err := p.peeker.Undo(); err != nil { + return err + } + p.position.Column-- + p.size-- + return nil +} + +func (p *parser) makeError(format string, args ...interface{}) error { + return fmt.Errorf( + "error at line %d, column %d: %s", + p.position.Line, + p.position.Column, + fmt.Sprintf(format, args...), + ) +} + +func (p *parser) newNode(k Kind, parentMetadata *types.MisconfigMetadata) (*node, *types.MisconfigMetadata) { + n := &node{ + start: p.position, + kind: k, + } + metadata := types.NewMisconfigMetadata( + types.NewRange(parentMetadata.Range().GetFilename(), n.start.Line, n.end.Line, "", parentMetadata.Range().GetFS()), + n.ref, + ) + metadata.SetParentPtr(parentMetadata) + n.metadata = &metadata + return n, n.metadata +} + +func (n *node) updateMetadata(prefix string) { + + var full string + // nolint:gocritic + if strings.HasPrefix(n.ref, "[") { + full = prefix + n.ref + } else if prefix != "" { + full = prefix + "." + n.ref + } else { + full = n.ref + } + + n.metadata.SetRange(types.NewRange(n.Metadata().Range().GetFilename(), + n.start.Line, + n.end.Line, + "", + n.Metadata().Range().GetFS())) + + n.metadata.SetReference(full) + + for i := range n.content { + n.content[i].(*node).updateMetadata(full) + } +} + +func (p *parser) swallowIfEqual(r rune) bool { + c, err := p.peeker.Peek() + if err != nil { + return false + } + if c != r { + return false + } + _, _ = p.next() + return true +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_array.go b/pkg/scanners/azure/arm/parser/armjson/parse_array.go new file mode 100644 index 000000000000..22191ba08adc --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_array.go @@ -0,0 +1,54 @@ +package armjson + +import ( + "fmt" + + "github.com/aquasecurity/trivy/pkg/types" +) + +func (p *parser) parseArray(parentMetadata *types.MisconfigMetadata) (Node, error) { + n, metadata := p.newNode(KindArray, parentMetadata) + + c, err := p.next() + if err != nil { + return nil, err + } + + if c != '[' { + return nil, p.makeError("expecting object delimiter") + } + if err := p.parseWhitespace(); err != nil { + return nil, err + } + // we've hit the end of the object + if p.swallowIfEqual(']') { + n.end = p.position + return n, nil + } + + // for each element + for { + + if err := p.parseWhitespace(); err != nil { + return nil, err + } + + val, err := p.parseElement(metadata) + if err != nil { + return nil, err + } + val.(*node).ref = fmt.Sprintf("[%d]", len(n.content)) + + n.content = append(n.content, val) + + // we've hit the end of the array + if p.swallowIfEqual(']') { + n.end = p.position + return n, nil + } + + if !p.swallowIfEqual(',') { + return nil, p.makeError("unexpected character - expecting , or ]") + } + } +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_array_test.go b/pkg/scanners/azure/arm/parser/armjson/parse_array_test.go new file mode 100644 index 000000000000..9d512bea4366 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_array_test.go @@ -0,0 +1,46 @@ +package armjson + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Array_Empty(t *testing.T) { + example := []byte(`[]`) + target := []int{} + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &target, &metadata)) + assert.Len(t, target, 0) +} + +func Test_Array_ToSlice(t *testing.T) { + example := []byte(`[1, 2, 3]`) + target := []int{} + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &target, &metadata)) + assert.Len(t, target, 3) + assert.EqualValues(t, []int{1, 2, 3}, target) +} + +func Test_Array_ToArray(t *testing.T) { + example := []byte(`[3, 2, 1]`) + target := [3]int{6, 6, 6} + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &target, &metadata)) + assert.Len(t, target, 3) + assert.EqualValues(t, [3]int{3, 2, 1}, target) +} + +func Test_Array_ToInterface(t *testing.T) { + example := []byte(`{ "List": [1, 2, 3] }`) + target := struct { + List interface{} + }{} + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &target, &metadata)) + assert.Len(t, target.List, 3) +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_boolean.go b/pkg/scanners/azure/arm/parser/armjson/parse_boolean.go new file mode 100644 index 000000000000..e327a4856b48 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_boolean.go @@ -0,0 +1,40 @@ +package armjson + +import ( + "fmt" + + "github.com/aquasecurity/trivy/pkg/types" +) + +var trueRunes = []rune("true") +var falseRunes = []rune("false") + +func (p *parser) parseBoolean(parentMetadata *types.MisconfigMetadata) (Node, error) { + + n, _ := p.newNode(KindBoolean, parentMetadata) + + r, err := p.peeker.Peek() + if err != nil { + return nil, err + } + + if r == 't' { + for _, expected := range trueRunes { + if !p.swallowIfEqual(expected) { + return nil, fmt.Errorf("unexpected character in boolean value") + } + } + n.raw = true + n.end = p.position + return n, err + } + + for _, expected := range falseRunes { + if !p.swallowIfEqual(expected) { + return nil, fmt.Errorf("unexpected character in boolean value") + } + } + n.raw = false + n.end = p.position + return n, nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_boolean_test.go b/pkg/scanners/azure/arm/parser/armjson/parse_boolean_test.go new file mode 100644 index 000000000000..37f592e62b06 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_boolean_test.go @@ -0,0 +1,54 @@ +package armjson + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Boolean_True(t *testing.T) { + example := []byte(`true`) + var output bool + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.True(t, output) +} + +func Test_Boolean_False(t *testing.T) { + example := []byte(`false`) + var output bool + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.False(t, output) +} + +func Test_Boolean_ToNonBoolPointer(t *testing.T) { + example := []byte(`false`) + var output string + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.Error(t, err) +} + +func Test_Bool_ToUninitialisedPointer(t *testing.T) { + example := []byte(`true`) + var str *string + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, str, &metadata) + require.Error(t, err) + assert.Nil(t, str) +} + +func Test_Bool_ToInterface(t *testing.T) { + example := []byte(`true`) + var output interface{} + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.True(t, output.(bool)) +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_comment.go b/pkg/scanners/azure/arm/parser/armjson/parse_comment.go new file mode 100644 index 000000000000..feb87676fdad --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_comment.go @@ -0,0 +1,98 @@ +package armjson + +import ( + "strings" + + "github.com/aquasecurity/trivy/pkg/types" +) + +func (p *parser) parseComment(parentMetadata *types.MisconfigMetadata) (Node, error) { + + if err := p.parseWhitespace(); err != nil { + return nil, err + } + + _, err := p.next() + if err != nil { + return nil, err + } + + b, err := p.next() + if err != nil { + return nil, err + } + + switch b { + case '/': + return p.parseLineComment(parentMetadata) + case '*': + return p.parseBlockComment(parentMetadata) + default: + return nil, p.makeError("expecting comment delimiter") + } +} + +func (p *parser) parseLineComment(parentMetadata *types.MisconfigMetadata) (Node, error) { + + n, _ := p.newNode(KindComment, parentMetadata) + + var sb strings.Builder + for { + c, err := p.next() + if err != nil { + return nil, err + } + if c == '\n' { + p.position.Column = 1 + p.position.Line++ + break + } + sb.WriteRune(c) + } + + n.raw = sb.String() + n.end = p.position + + if err := p.parseWhitespace(); err != nil { + return nil, err + } + return n, nil +} + +func (p *parser) parseBlockComment(parentMetadata *types.MisconfigMetadata) (Node, error) { + + n, _ := p.newNode(KindComment, parentMetadata) + + var sb strings.Builder + + for { + c, err := p.next() + if err != nil { + return nil, err + } + if c == '*' { + c, err := p.peeker.Peek() + if err != nil { + return nil, err + } + if c == '/' { + break + } + sb.WriteRune('*') + } else { + if c == '\n' { + p.position.Column = 1 + p.position.Line++ + } + sb.WriteRune(c) + } + } + + n.raw = sb.String() + + if err := p.parseWhitespace(); err != nil { + return nil, err + } + + return n, nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_complex_test.go b/pkg/scanners/azure/arm/parser/armjson/parse_complex_test.go new file mode 100644 index 000000000000..2d3d2a58083f --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_complex_test.go @@ -0,0 +1,131 @@ +package armjson + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/require" +) + +func Test_Complex(t *testing.T) { + target := make(map[string]interface{}) + input := `{ + "glossary": { + "title": "example glossary", + "GlossDiv": { + "title": "S", + "GlossList": { + "GlossEntry": { + "ID": "SGML", + "SortAs": "SGML", + "GlossTerm": "Standard Generalized Markup Language", + "Acronym": "SGML", + "Abbrev": "ISO 8879:1986", + "GlossDef": { + "para": "A meta-markup language, used to create markup languages such as DocBook.", + "GlossSeeAlso": ["GML", "XML"] + }, + "GlossSee": "markup" + } + } + } + } +}` + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal([]byte(input), &target, &metadata)) +} + +type Resource struct { + Line int + inner resourceInner +} + +type resourceInner struct { + Type string `json:"Type" yaml:"Type"` + Properties map[string]*Property `json:"Properties" yaml:"Properties"` +} + +func (r *Resource) UnmarshalJSONWithMetadata(node Node) error { + r.Line = node.Range().Start.Line + return node.Decode(&r.inner) +} + +type Parameter struct { + inner parameterInner +} + +type parameterInner struct { + Type string `json:"Type" yaml:"Type"` + Default interface{} `yaml:"Default"` +} + +func (p *Parameter) UnmarshalJSONWithMetadata(node Node) error { + return node.Decode(&p.inner) +} + +type Property struct { + Line int + inner propertyInner +} + +type CFType string + +type propertyInner struct { + Type CFType + Value interface{} `json:"Value" yaml:"Value"` +} + +func (p *Property) UnmarshalJSONWithMetadata(node Node) error { + p.Line = node.Range().Start.Line + return node.Decode(&p.inner) +} + +type Temp struct { + BucketName *Parameter + BucketKeyEnabled *Parameter +} + +type FileContext struct { + Parameters map[string]*Parameter `json:"Parameters" yaml:"Parameters"` + Resources map[string]*Resource `json:"Resources" yaml:"Resources"` +} + +func Test_CloudFormation(t *testing.T) { + var target FileContext + input := ` +{ + "Parameters": { + "BucketName": { + "Type": "String", + "Default": "naughty" + }, + "BucketKeyEnabled": { + "Type": "Boolean", + "Default": false + } + }, + "Resources": { + "S3Bucket": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": { + "Ref": "BucketName" + }, + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "BucketKeyEnabled": { + "Ref": "BucketKeyEnabled" + } + } + ] + } + } + } + } +} +` + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal([]byte(input), &target, &metadata)) +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_null.go b/pkg/scanners/azure/arm/parser/armjson/parse_null.go new file mode 100644 index 000000000000..89209254813b --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_null.go @@ -0,0 +1,23 @@ +package armjson + +import ( + "fmt" + + "github.com/aquasecurity/trivy/pkg/types" +) + +var nullRunes = []rune("null") + +func (p *parser) parseNull(parentMetadata *types.MisconfigMetadata) (Node, error) { + + n, _ := p.newNode(KindNull, parentMetadata) + + for _, expected := range nullRunes { + if !p.swallowIfEqual(expected) { + return nil, fmt.Errorf("unexpected character") + } + } + n.raw = nil + n.end = p.position + return n, nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_null_test.go b/pkg/scanners/azure/arm/parser/armjson/parse_null_test.go new file mode 100644 index 000000000000..0b7d50d6342c --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_null_test.go @@ -0,0 +1,18 @@ +package armjson + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/require" +) + +func Test_Null(t *testing.T) { + example := []byte(`null`) + var output string + ref := &output + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &ref, &metadata) + require.NoError(t, err) +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_number.go b/pkg/scanners/azure/arm/parser/armjson/parse_number.go new file mode 100644 index 000000000000..a6e561b2c3cb --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_number.go @@ -0,0 +1,163 @@ +package armjson + +import ( + "fmt" + "strconv" + "strings" + + "github.com/aquasecurity/trivy/pkg/types" +) + +func (p *parser) parseNumber(parentMetadata *types.MisconfigMetadata) (Node, error) { + + n, _ := p.newNode(KindNumber, parentMetadata) + + var str string + + if p.swallowIfEqual('-') { + str = "-" + } + + integral, err := p.parseIntegral() + if err != nil { + return nil, err + } + fraction, err := p.parseFraction() + if err != nil { + return nil, err + } + exponent, err := p.parseExponent() + if err != nil { + return nil, err + } + + str = fmt.Sprintf("%s%s%s%s", str, integral, fraction, exponent) + n.end = p.position + + if fraction != "" || exponent != "" { + f, err := strconv.ParseFloat(str, 64) + if err != nil { + return nil, p.makeError("%s", err) + } + n.raw = f + return n, nil + } + + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, p.makeError("%s", err) + } + n.raw = i + + return n, nil +} + +func (p *parser) parseIntegral() (string, error) { + r, err := p.next() + if err != nil { + return "", err + } + if r == '0' { + r, _ := p.peeker.Peek() + if r >= '0' && r <= '9' { + return "", p.makeError("invalid number") + } + return "0", nil + } + + var sb strings.Builder + if r < '1' || r > '9' { + return "", p.makeError("invalid number") + } + sb.WriteRune(r) + + for { + r, err := p.next() + if err != nil { + return sb.String(), nil + } + if r < '0' || r > '9' { + return sb.String(), p.undo() + } + sb.WriteRune(r) + } +} + +func (p *parser) parseFraction() (string, error) { + r, err := p.next() + if err != nil { + return "", nil + } + if r != '.' { + return "", p.undo() + } + + var sb strings.Builder + sb.WriteRune('.') + + for { + r, err := p.next() + if err != nil { + break + } + if r < '0' || r > '9' { + if err := p.undo(); err != nil { + return "", err + } + break + } + sb.WriteRune(r) + } + + str := sb.String() + if str == "." { + return "", p.makeError("invalid number - missing digits after decimal point") + } + + return str, nil +} + +func (p *parser) parseExponent() (string, error) { + r, err := p.next() + if err != nil { + return "", nil + } + if r != 'e' && r != 'E' { + return "", p.undo() + } + + var sb strings.Builder + sb.WriteRune(r) + + r, err = p.next() + if err != nil { + return "", nil + } + hasDigits := r >= '0' && r <= '9' + if r != '-' && r != '+' && !hasDigits { + return "", p.undo() + } + + sb.WriteRune(r) + + for { + r, err := p.next() + if err != nil { + break + } + if r < '0' || r > '9' { + if err := p.undo(); err != nil { + return "", err + } + break + } + hasDigits = true + sb.WriteRune(r) + } + + if !hasDigits { + return "", p.makeError("invalid number - no digits in exponent") + } + + return sb.String(), nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_number_test.go b/pkg/scanners/azure/arm/parser/armjson/parse_number_test.go new file mode 100644 index 000000000000..aae2959e5e92 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_number_test.go @@ -0,0 +1,178 @@ +package armjson + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Number_IntToInt(t *testing.T) { + example := []byte(`123`) + var output int + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.Equal(t, 123, output) +} + +func Test_Number_IntToFloat(t *testing.T) { + example := []byte(`123`) + var output float64 + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.Equal(t, 123.0, output) +} + +func Test_Number_FloatToFloat(t *testing.T) { + example := []byte(`123.456`) + var output float64 + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.Equal(t, 123.456, output) +} + +func Test_Number_FloatToInt(t *testing.T) { + example := []byte(`123.456`) + var output int + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.Equal(t, 123, output) +} + +func Test_Number_FloatWithExponent(t *testing.T) { + cases := []struct { + in string + out float64 + }{ + { + in: `123.456e10`, + out: 123.456e+10, + }, + { + in: `123e+1`, + out: 123e+1, + }, + { + in: `123e-2`, + out: 123e-2, + }, + } + for _, test := range cases { + t.Run(test.in, func(t *testing.T) { + example := []byte(test.in) + var output float64 + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.Equal(t, test.out, output) + + }) + } +} + +func Test_Number_IntWithExponent(t *testing.T) { + cases := []struct { + in string + out int64 + }{ + { + in: `123e10`, + out: 123e+10, + }, + { + in: `123e+1`, + out: 123e+1, + }, + } + for _, test := range cases { + t.Run(test.in, func(t *testing.T) { + example := []byte(test.in) + var output int64 + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.Equal(t, test.out, output) + + }) + } +} + +func Test_Number_Ints(t *testing.T) { + cases := []struct { + in string + out int64 + err bool + }{ + { + in: `123e10`, + out: 123e+10, + }, + { + in: `-1`, + out: -1, + }, + { + in: `1.0123`, + out: 1, + }, + { + in: `0`, + out: 0, + }, + { + in: `01`, + err: true, + }, + { + in: ``, + err: true, + }, + { + in: `+1`, + err: true, + }, + { + in: `e`, + err: true, + }, + + { + in: `.123`, + err: true, + }, + + { + in: `.`, + err: true, + }, + + { + in: `00`, + err: true, + }, + { + in: `-`, + err: true, + }, + } + for _, test := range cases { + t.Run(test.in, func(t *testing.T) { + example := []byte(test.in) + var output int64 + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + if test.err { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, test.out, output) + }) + } +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_object.go b/pkg/scanners/azure/arm/parser/armjson/parse_object.go new file mode 100644 index 000000000000..bc815116b174 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_object.go @@ -0,0 +1,143 @@ +package armjson + +import ( + "github.com/aquasecurity/trivy/pkg/types" +) + +func (p *parser) parseObject(parentMetadata *types.MisconfigMetadata) (Node, error) { + + n, metadata := p.newNode(KindObject, parentMetadata) + + c, err := p.next() + if err != nil { + return nil, err + } + + if c != '{' { + return nil, p.makeError("expecting object delimiter") + } + + if err := p.parseWhitespace(); err != nil { + return nil, err + } + + // we've hit the end of the object + if p.swallowIfEqual('}') { + n.end = p.position + return n, nil + } + + var nextComments []Node + return p.iterateObject(nextComments, metadata, n) + +} + +// nolint: cyclop +func (p *parser) iterateObject(nextComments []Node, metadata *types.MisconfigMetadata, n *node) (Node, error) { + for { + + if err := p.parseWhitespace(); err != nil { + return nil, err + } + + comments := make([]Node, len(nextComments)) + copy(comments, nextComments) + nextComments = nil + for { + peeked, err := p.peeker.Peek() + if err != nil { + return nil, err + } + if peeked != '/' { + break + } + comment, err := p.parseComment(metadata) + if err != nil { + return nil, err + } + comments = append(comments, comment) + } + + if comments != nil { + if err := p.parseWhitespace(); err != nil { + return nil, err + } + } + + key, err := p.parseString(metadata) + if err != nil { + return nil, err + } + + if err := p.parseWhitespace(); err != nil { + return nil, err + } + + if !p.swallowIfEqual(':') { + return nil, p.makeError("invalid character, expecting ':'") + } + + val, err := p.parseElement(metadata) + if err != nil { + return nil, err + } + ref := key.(*node).raw.(string) + key.(*node).ref = ref + val.(*node).ref = ref + + for { + peeked, err := p.peeker.Peek() + if err != nil { + return nil, err + } + if peeked != '/' { + break + } + comment, err := p.parseComment(metadata) + if err != nil { + return nil, err + } + comments = append(comments, comment) + } + + // we've hit the end of the object + if p.swallowIfEqual('}') { + key.(*node).comments = comments + val.(*node).comments = comments + n.content = append(n.content, key, val) + n.end = p.position + return n, nil + } + + if !p.swallowIfEqual(',') { + return nil, p.makeError("unexpected character - expecting , or }") + } + + for { + if err := p.parseWhitespace(); err != nil { + return nil, err + } + peeked, err := p.peeker.Peek() + if err != nil { + return nil, err + } + if peeked != '/' { + break + } + comment, err := p.parseComment(metadata) + if err != nil { + return nil, err + } + if comment.Range().Start.Line > val.Range().End.Line { + nextComments = append(nextComments, comment) + } else { + comments = append(comments, comment) + } + } + + key.(*node).comments = comments + val.(*node).comments = comments + n.content = append(n.content, key, val) + + } +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_object_test.go b/pkg/scanners/azure/arm/parser/armjson/parse_object_test.go new file mode 100644 index 000000000000..9559cd3d1361 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_object_test.go @@ -0,0 +1,115 @@ +package armjson + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Object(t *testing.T) { + example := []byte(`{ + "name": "testing", + "balance": 3.14 +}`) + target := struct { + Name string `json:"name"` + Balance float64 `json:"balance"` + }{} + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &target, &metadata)) + assert.Equal(t, "testing", target.Name) + assert.Equal(t, 3.14, target.Balance) +} + +func Test_ObjectWithPointers(t *testing.T) { + example := []byte(`{ + "name": "testing", + "balance": 3.14 +}`) + target := struct { + Name *string `json:"name"` + Balance *float64 `json:"balance"` + }{} + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &target, &metadata)) + assert.Equal(t, "testing", *target.Name) + assert.Equal(t, 3.14, *target.Balance) +} + +type nestedParent struct { + Child *nestedChild + Name string +} + +type nestedChild struct { + Blah string `json:"secret"` +} + +func Test_ObjectWithPointerToNestedStruct(t *testing.T) { + example := []byte(`{ + "Child": { + "secret": "password" + }, + "Name": "testing" +}`) + + var parent nestedParent + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &parent, &metadata)) + assert.Equal(t, "testing", parent.Name) + assert.Equal(t, "password", parent.Child.Blah) +} + +func Test_Object_ToMapStringInterface(t *testing.T) { + example := []byte(`{ + "Name": "testing" +}`) + + parent := make(map[string]interface{}) + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &parent, &metadata)) + assert.Equal(t, "testing", parent["Name"]) +} + +func Test_Object_ToNestedMapStringInterfaceFromIAM(t *testing.T) { + example := []byte(` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "ec2:*", + "Resource": "*", + "Condition": { + "Bool": { + "aws:MultiFactorAuthPresent": ["true"] + } + } + } + ] +}`) + + parent := make(map[string]interface{}) + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &parent, &metadata)) +} + +func Test_Object_ToNestedMapStringInterface(t *testing.T) { + example := []byte(`{ + "Child": { + "secret": "password" + }, + "Name": "testing" +}`) + + parent := make(map[string]interface{}) + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, Unmarshal(example, &parent, &metadata)) + assert.Equal(t, "testing", parent["Name"]) + child := parent["Child"].(map[string]interface{}) + assert.Equal(t, "password", child["secret"]) +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_string.go b/pkg/scanners/azure/arm/parser/armjson/parse_string.go new file mode 100644 index 000000000000..fea67bfe6a0f --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_string.go @@ -0,0 +1,91 @@ +package armjson + +import ( + "strconv" + "strings" + + "github.com/aquasecurity/trivy/pkg/types" +) + +var escapes = map[rune]string{ + '\\': "\\", + '/': "/", + '"': "\"", + 'n': "\n", + 'r': "\r", + 'b': "\b", + 'f': "\f", + 't': "\t", +} + +// nolint: cyclop +func (p *parser) parseString(parentMetadata *types.MisconfigMetadata) (Node, error) { + + n, _ := p.newNode(KindString, parentMetadata) + + b, err := p.next() + if err != nil { + return nil, err + } + + if b != '"' { + return nil, p.makeError("expecting string delimiter") + } + + var sb strings.Builder + + var inEscape bool + var inHex bool + var hex []rune + + for { + c, err := p.next() + if err != nil { + return nil, err + } + // nolint: gocritic + if inHex { + switch { + case c >= 'a' && c <= 'f', c >= 'A' && c <= 'F', c >= '0' && c <= '9': + hex = append(hex, c) + if len(hex) == 4 { + inHex = false + char, err := strconv.Unquote("\\u" + string(hex)) + if err != nil { + return nil, p.makeError("invalid unicode character '%s'", err) + } + sb.WriteString(char) + hex = nil + } + default: + return nil, p.makeError("invalid hexedecimal escape sequence '\\%s%c'", string(hex), c) + } + } else if inEscape { + inEscape = false + if c == 'u' { + inHex = true + continue + } + seq, ok := escapes[c] + if !ok { + return nil, p.makeError("invalid escape sequence '\\%c'", c) + } + sb.WriteString(seq) + } else { + switch c { + case '\\': + inEscape = true + case '"': + n.raw = sb.String() + n.end = p.position + return n, nil + default: + if c < 0x20 || c > 0x10FFFF { + return nil, p.makeError("invalid unescaped character '0x%X'", c) + } + sb.WriteRune(c) + } + } + + } +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_string_test.go b/pkg/scanners/azure/arm/parser/armjson/parse_string_test.go new file mode 100644 index 000000000000..a906058bf4e4 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_string_test.go @@ -0,0 +1,37 @@ +package armjson + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_String(t *testing.T) { + example := []byte(`"hello"`) + var output string + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.Equal(t, "hello", output) +} + +func Test_StringToUninitialisedPointer(t *testing.T) { + example := []byte(`"hello"`) + var str *string + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, str, &metadata) + require.Error(t, err) + assert.Nil(t, str) +} + +func Test_String_ToInterface(t *testing.T) { + example := []byte(`"hello"`) + var output interface{} + metadata := types.NewTestMisconfigMetadata() + err := Unmarshal(example, &output, &metadata) + require.NoError(t, err) + assert.Equal(t, "hello", output) +} diff --git a/pkg/scanners/azure/arm/parser/armjson/parse_whitespace.go b/pkg/scanners/azure/arm/parser/armjson/parse_whitespace.go new file mode 100644 index 000000000000..ad5751147d3e --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/parse_whitespace.go @@ -0,0 +1,29 @@ +package armjson + +import ( + "errors" + "io" +) + +func (p *parser) parseWhitespace() error { + for { + b, err := p.peeker.Peek() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + return err + } + switch b { + case 0x0d, 0x20, 0x09: + case 0x0a: + p.position.Column = 1 + p.position.Line++ + default: + return nil + } + if _, err := p.next(); err != nil { + return err + } + } +} diff --git a/pkg/scanners/azure/arm/parser/armjson/reader.go b/pkg/scanners/azure/arm/parser/armjson/reader.go new file mode 100644 index 000000000000..e05769f02da9 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/reader.go @@ -0,0 +1,36 @@ +package armjson + +import ( + "bufio" + "io" +) + +type PeekReader struct { + underlying *bufio.Reader +} + +func NewPeekReader(reader io.Reader) *PeekReader { + return &PeekReader{ + underlying: bufio.NewReader(reader), + } +} + +func (r *PeekReader) Next() (rune, error) { + c, _, err := r.underlying.ReadRune() + return c, err +} + +func (r *PeekReader) Undo() error { + return r.underlying.UnreadRune() +} + +func (r *PeekReader) Peek() (rune, error) { + c, _, err := r.underlying.ReadRune() + if err != nil { + return 0, err + } + if err := r.underlying.UnreadRune(); err != nil { + return 0, err + } + return c, nil +} diff --git a/pkg/scanners/azure/arm/parser/armjson/reader_test.go b/pkg/scanners/azure/arm/parser/armjson/reader_test.go new file mode 100644 index 000000000000..8017f30f9f98 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/reader_test.go @@ -0,0 +1,62 @@ +package armjson + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var input = `abcdefghijklmnopqrstuvwxyz` + +func Test_Peeker(t *testing.T) { + peeker := NewPeekReader(strings.NewReader(input)) + + var b rune + var err error + + for i := 0; i < 30; i++ { + b, err = peeker.Peek() + require.NoError(t, err) + assert.Equal(t, ('a'), b) + } + + b, err = peeker.Next() + require.NoError(t, err) + assert.Equal(t, ('a'), b) + + b, err = peeker.Next() + require.NoError(t, err) + assert.Equal(t, ('b'), b) + + b, err = peeker.Peek() + require.NoError(t, err) + assert.Equal(t, ('c'), b) + + for i := 0; i < 5; i++ { + b, err = peeker.Next() + require.NoError(t, err) + assert.Equal(t, []rune(input)[2+i], b) + } + + b, err = peeker.Peek() + require.NoError(t, err) + assert.Equal(t, ('h'), b) + + b, err = peeker.Next() + require.NoError(t, err) + assert.Equal(t, ('h'), b) + for i := 0; i < 18; i++ { + b, err = peeker.Next() + require.NoError(t, err) + assert.Equal(t, []rune(input)[8+i], b) + } + + _, err = peeker.Peek() + require.Error(t, err) + + _, err = peeker.Next() + require.Error(t, err) + +} diff --git a/pkg/scanners/azure/arm/parser/armjson/unmarshal.go b/pkg/scanners/azure/arm/parser/armjson/unmarshal.go new file mode 100644 index 000000000000..a0cf229ceee8 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/armjson/unmarshal.go @@ -0,0 +1,40 @@ +package armjson + +import ( + "bytes" + "io" + + "github.com/aquasecurity/trivy/pkg/types" +) + +type Unmarshaller interface { + UnmarshalJSONWithMetadata(node Node) error +} + +type MetadataReceiver interface { + SetMetadata(m *types.MisconfigMetadata) +} + +func Unmarshal(data []byte, target interface{}, metadata *types.MisconfigMetadata) error { + node, err := newParser(NewPeekReader(bytes.NewReader(data)), Position{1, 1}).parse(metadata) + if err != nil { + return err + } + if err := node.Decode(target); err != nil { + return err + } + + return nil +} + +func UnmarshalFromReader(r io.ReadSeeker, target interface{}, metadata *types.MisconfigMetadata) error { + node, err := newParser(NewPeekReader(r), Position{1, 1}).parse(metadata) + if err != nil { + return err + } + if err := node.Decode(target); err != nil { + return err + } + + return nil +} diff --git a/pkg/scanners/azure/arm/parser/parser.go b/pkg/scanners/azure/arm/parser/parser.go new file mode 100644 index 000000000000..97c9a4e037b7 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/parser.go @@ -0,0 +1,194 @@ +package parser + +import ( + "context" + "fmt" + "io" + "io/fs" + "path/filepath" + "strings" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/scanners/azure/arm/parser/armjson" + "github.com/aquasecurity/trivy/pkg/scanners/azure/resolver" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +type Parser struct { + targetFS fs.FS + skipRequired bool + debug debug.Logger +} + +func (p *Parser) SetDebugWriter(writer io.Writer) { + p.debug = debug.New(writer, "azure", "arm") +} + +func (p *Parser) SetSkipRequiredCheck(b bool) { + p.skipRequired = b +} + +func New(targetFS fs.FS, opts ...options.ParserOption) *Parser { + p := &Parser{ + targetFS: targetFS, + } + for _, opt := range opts { + opt(p) + } + return p +} + +func (p *Parser) ParseFS(ctx context.Context, dir string) ([]azure.Deployment, error) { + + var deployments []azure.Deployment + + if err := fs.WalkDir(p.targetFS, dir, func(path string, entry fs.DirEntry, err error) error { + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if entry.IsDir() { + return nil + } + if !p.Required(path) { + return nil + } + f, err := p.targetFS.Open(path) + if err != nil { + return err + } + defer f.Close() + deployment, err := p.parseFile(f, path) + if err != nil { + return err + } + deployments = append(deployments, *deployment) + return nil + }); err != nil { + return nil, err + } + + return deployments, nil +} + +func (p *Parser) Required(path string) bool { + if p.skipRequired { + return true + } + if !strings.HasSuffix(path, ".json") { + return false + } + data, err := fs.ReadFile(p.targetFS, path) + if err != nil { + return false + } + var template Template + root := types.NewMisconfigMetadata( + types.NewRange(filepath.Base(path), 0, 0, "", p.targetFS), + "", + ) + if err := armjson.Unmarshal(data, &template, &root); err != nil { + p.debug.Log("Error scanning %s: %s", path, err) + return false + } + + if template.Schema.Kind != azure.KindString { + return false + } + + return strings.HasPrefix(template.Schema.AsString(), "https://schema.management.azure.com") +} + +func (p *Parser) parseFile(r io.Reader, filename string) (*azure.Deployment, error) { + var template Template + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + root := types.NewMisconfigMetadata( + types.NewRange(filename, 0, 0, "", p.targetFS), + "", + ).WithInternal(resolver.NewResolver()) + + if err := armjson.Unmarshal(data, &template, &root); err != nil { + return nil, fmt.Errorf("failed to parse template: %w", err) + } + return p.convertTemplate(template), nil +} + +func (p *Parser) convertTemplate(template Template) *azure.Deployment { + + deployment := azure.Deployment{ + Metadata: template.Metadata, + TargetScope: azure.ScopeResourceGroup, // TODO: override from --resource-group? + Parameters: nil, + Variables: nil, + Resources: nil, + Outputs: nil, + } + + if r, ok := template.Metadata.Internal().(resolver.Resolver); ok { + r.SetDeployment(&deployment) + } + + // TODO: the references passed here should probably not be the name - maybe params.NAME.DefaultValue? + for name, param := range template.Parameters { + deployment.Parameters = append(deployment.Parameters, azure.Parameter{ + Variable: azure.Variable{ + Name: name, + Value: param.DefaultValue, + }, + Default: param.DefaultValue, + Decorators: nil, + }) + } + + for name, variable := range template.Variables { + deployment.Variables = append(deployment.Variables, azure.Variable{ + Name: name, + Value: variable, + }) + } + + for name, output := range template.Outputs { + deployment.Outputs = append(deployment.Outputs, azure.Output{ + Name: name, + Value: output, + }) + } + + for _, resource := range template.Resources { + deployment.Resources = append(deployment.Resources, p.convertResource(resource)) + } + + return &deployment +} + +func (p *Parser) convertResource(input Resource) azure.Resource { + + var children []azure.Resource + + for _, child := range input.Resources { + children = append(children, p.convertResource(child)) + } + + resource := azure.Resource{ + Metadata: input.Metadata, + APIVersion: input.APIVersion, + Type: input.Type, + Kind: input.Kind, + Name: input.Name, + Location: input.Location, + Properties: input.Properties, + Resources: children, + } + + return resource +} diff --git a/pkg/scanners/azure/arm/parser/parser_test.go b/pkg/scanners/azure/arm/parser/parser_test.go new file mode 100644 index 000000000000..651df2c71554 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/parser_test.go @@ -0,0 +1,338 @@ +package parser + +import ( + "context" + "io/fs" + "os" + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/memoryfs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/scanners/azure/resolver" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +func createMetadata(targetFS fs.FS, filename string, start, end int, ref string, parent *types.MisconfigMetadata) types.MisconfigMetadata { + child := types.NewMisconfigMetadata(types.NewRange(filename, start, end, "", targetFS), ref) + if parent != nil { + child.SetParentPtr(parent) + } + return child +} + +func TestParser_Parse(t *testing.T) { + + filename := "example.json" + + targetFS := memoryfs.New() + + tests := []struct { + name string + input string + want func() azure.Deployment + wantDeployment bool + }{ + { + name: "invalid code", + input: `blah`, + wantDeployment: false, + }, + { + name: "basic param", + input: `{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", // another one + "contentVersion": "1.0.0.0", + "parameters": { + "storagePrefix": { + "type": "string", + "defaultValue": "x", + "maxLength": 11, + "minLength": 3 + } + }, + "resources": [] +}`, + want: func() azure.Deployment { + + root := createMetadata(targetFS, filename, 0, 0, "", nil).WithInternal(resolver.NewResolver()) + metadata := createMetadata(targetFS, filename, 1, 13, "", &root) + parametersMetadata := createMetadata(targetFS, filename, 4, 11, "parameters", &metadata) + storageMetadata := createMetadata(targetFS, filename, 5, 10, "parameters.storagePrefix", ¶metersMetadata) + + return azure.Deployment{ + Metadata: metadata, + TargetScope: azure.ScopeResourceGroup, + Parameters: []azure.Parameter{ + { + Variable: azure.Variable{ + Name: "storagePrefix", + Value: azure.NewValue("x", createMetadata(targetFS, filename, 7, 7, "parameters.storagePrefix.defaultValue", &storageMetadata)), + }, + Default: azure.NewValue("x", createMetadata(targetFS, filename, 7, 7, "parameters.storagePrefix.defaultValue", &storageMetadata)), + Decorators: nil, + }, + }, + } + }, + wantDeployment: true, + }, + { + name: "storageAccount", + input: `{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", // another one + "contentVersion": "1.0.0.0", + "parameters": {}, + "resources": [ +{ + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "2022-05-01", + "name": "myResource", + "location": "string", + "tags": { + "tagName1": "tagValue1", + "tagName2": "tagValue2" + }, + "sku": { + "name": "string" + }, + "kind": "string", + "extendedLocation": { + "name": "string", + "type": "EdgeZone" + }, + "identity": { + "type": "string", + "userAssignedIdentities": {} + }, + "properties": { + "allowSharedKeyAccess":false, + "customDomain": { + "name": "string", + "useSubDomainName":false, + "number": 123 + }, + "networkAcls": [ + { + "bypass": "AzureServices1" + }, + { + "bypass": "AzureServices2" + } + ] + } +} +] +}`, + want: func() azure.Deployment { + + rootMetadata := createMetadata(targetFS, filename, 0, 0, "", nil).WithInternal(resolver.NewResolver()) + fileMetadata := createMetadata(targetFS, filename, 1, 45, "", &rootMetadata) + resourcesMetadata := createMetadata(targetFS, filename, 5, 44, "resources", &fileMetadata) + + resourceMetadata := createMetadata(targetFS, filename, 6, 43, "resources[0]", &resourcesMetadata) + + propertiesMetadata := createMetadata(targetFS, filename, 27, 42, "resources[0].properties", &resourceMetadata) + + customDomainMetadata := createMetadata(targetFS, filename, 29, 33, "resources[0].properties.customDomain", &propertiesMetadata) + networkACLListMetadata := createMetadata(targetFS, filename, 34, 41, "resources[0].properties.networkAcls", &propertiesMetadata) + + networkACL0Metadata := createMetadata(targetFS, filename, 35, 37, "resources[0].properties.networkAcls[0]", &networkACLListMetadata) + networkACL1Metadata := createMetadata(targetFS, filename, 38, 40, "resources[0].properties.networkAcls[1]", &networkACLListMetadata) + + return azure.Deployment{ + Metadata: fileMetadata, + TargetScope: azure.ScopeResourceGroup, + Resources: []azure.Resource{ + { + Metadata: resourceMetadata, + APIVersion: azure.NewValue( + "2022-05-01", + createMetadata(targetFS, filename, 8, 8, "resources[0].apiVersion", &resourceMetadata), + ), + Type: azure.NewValue( + "Microsoft.Storage/storageAccounts", + createMetadata(targetFS, filename, 7, 7, "resources[0].type", &resourceMetadata), + ), + Kind: azure.NewValue( + "string", + createMetadata(targetFS, filename, 18, 18, "resources[0].kind", &resourceMetadata), + ), + Name: azure.NewValue( + "myResource", + createMetadata(targetFS, filename, 9, 9, "resources[0].name", &resourceMetadata), + ), + Location: azure.NewValue( + "string", + createMetadata(targetFS, filename, 10, 10, "resources[0].location", &resourceMetadata), + ), + Properties: azure.NewValue( + map[string]azure.Value{ + "allowSharedKeyAccess": azure.NewValue(false, createMetadata(targetFS, filename, 28, 28, "resources[0].properties.allowSharedKeyAccess", &propertiesMetadata)), + "customDomain": azure.NewValue( + map[string]azure.Value{ + "name": azure.NewValue("string", createMetadata(targetFS, filename, 30, 30, "resources[0].properties.customDomain.name", &customDomainMetadata)), + "useSubDomainName": azure.NewValue(false, createMetadata(targetFS, filename, 31, 31, "resources[0].properties.customDomain.useSubDomainName", &customDomainMetadata)), + "number": azure.NewValue(int64(123), createMetadata(targetFS, filename, 32, 32, "resources[0].properties.customDomain.number", &customDomainMetadata)), + }, customDomainMetadata), + "networkAcls": azure.NewValue( + []azure.Value{ + azure.NewValue( + map[string]azure.Value{ + "bypass": azure.NewValue("AzureServices1", createMetadata(targetFS, filename, 36, 36, "resources[0].properties.networkAcls[0].bypass", &networkACL0Metadata)), + }, + networkACL0Metadata, + ), + azure.NewValue( + map[string]azure.Value{ + "bypass": azure.NewValue("AzureServices2", createMetadata(targetFS, filename, 39, 39, "resources[0].properties.networkAcls[1].bypass", &networkACL1Metadata)), + }, + networkACL1Metadata, + ), + }, networkACLListMetadata), + }, + propertiesMetadata, + ), + }, + }, + } + }, + + wantDeployment: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + require.NoError(t, targetFS.WriteFile(filename, []byte(tt.input), 0644)) + + p := New(targetFS, options.ParserWithDebug(os.Stderr)) + got, err := p.ParseFS(context.Background(), ".") + require.NoError(t, err) + + if !tt.wantDeployment { + assert.Len(t, got, 0) + return + } + + require.Len(t, got, 1) + want := tt.want() + g := got[0] + + require.Equal(t, want, g) + }) + } +} + +func Test_NestedResourceParsing(t *testing.T) { + + input := ` +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "environment": { + "type": "string", + "allowedValues": [ + "dev", + "test", + "prod" + ] + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "Location for all resources." + } + }, + "storageAccountSkuName": { + "type": "string", + "defaultValue": "Standard_LRS" + }, + "storageAccountSkuTier": { + "type": "string", + "defaultValue": "Standard" + } + }, + "variables": { + "uniquePart": "[take(uniqueString(resourceGroup().id), 4)]", + "storageAccountName": "[concat('mystorageaccount', variables('uniquePart'), parameters('environment'))]", + "queueName": "myqueue" + }, + "resources": [ + { + "type": "Microsoft.Storage/storageAccounts", + "name": "[variables('storageAccountName')]", + "location": "[parameters('location')]", + "apiVersion": "2019-06-01", + "sku": { + "name": "[parameters('storageAccountSkuName')]", + "tier": "[parameters('storageAccountSkuTier')]" + }, + "kind": "StorageV2", + "properties": {}, + "resources": [ + { + "name": "[concat('default/', variables('queueName'))]", + "type": "queueServices/queues", + "apiVersion": "2019-06-01", + "dependsOn": [ + "[variables('storageAccountName')]" + ], + "properties": { + "metadata": {} + } + } + ] + } + ] +} +` + + targetFS := memoryfs.New() + + require.NoError(t, targetFS.WriteFile("nested.json", []byte(input), 0644)) + + p := New(targetFS, options.ParserWithDebug(os.Stderr)) + got, err := p.ParseFS(context.Background(), ".") + require.NoError(t, err) + require.Len(t, got, 1) + + deployment := got[0] + + require.Len(t, deployment.Resources, 1) + + storageAccountResource := deployment.Resources[0] + + require.Len(t, storageAccountResource.Resources, 1) + + queue := storageAccountResource.Resources[0] + + assert.Equal(t, "queueServices/queues", queue.Type.AsString()) +} + +// +// func Test_JsonFile(t *testing.T) { +// +// input, err := os.ReadFile("testdata/postgres.json") +// require.NoError(t, err) +// +// targetFS := memoryfs.New() +// +// require.NoError(t, targetFS.WriteFile("postgres.json", input, 0644)) +// +// p := New(targetFS, options.ParserWithDebug(os.Stderr)) +// got, err := p.ParseFS(context.Background(), ".") +// require.NoError(t, err) +// +// got[0].Resources[3].Name.Resolve() +// +// name := got[0].Resources[3].Name.AsString() +// assert.Equal(t, "myserver", name) +// +// } diff --git a/pkg/scanners/azure/arm/parser/template.go b/pkg/scanners/azure/arm/parser/template.go new file mode 100644 index 000000000000..0308ff1886ca --- /dev/null +++ b/pkg/scanners/azure/arm/parser/template.go @@ -0,0 +1,78 @@ +package parser + +import ( + types2 "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/scanners/azure/arm/parser/armjson" + "github.com/aquasecurity/trivy/pkg/types" +) + +type Template struct { + Metadata types.MisconfigMetadata `json:"-"` + Schema types2.Value `json:"$schema"` + ContentVersion types2.Value `json:"contentVersion"` + APIProfile types2.Value `json:"apiProfile"` + Parameters map[string]Parameter `json:"parameters"` + Variables map[string]types2.Value `json:"variables"` + Functions []Function `json:"functions"` + Resources []Resource `json:"resources"` + Outputs map[string]types2.Value `json:"outputs"` +} + +type Parameter struct { + Metadata types.MisconfigMetadata + Type types2.Value `json:"type"` + DefaultValue types2.Value `json:"defaultValue"` + MaxLength types2.Value `json:"maxLength"` + MinLength types2.Value `json:"minLength"` +} + +type Function struct{} + +type Resource struct { + Metadata types.MisconfigMetadata `json:"-"` + innerResource +} + +func (t *Template) SetMetadata(m *types.MisconfigMetadata) { + t.Metadata = *m +} + +func (r *Resource) SetMetadata(m *types.MisconfigMetadata) { + r.Metadata = *m +} + +func (p *Parameter) SetMetadata(m *types.MisconfigMetadata) { + p.Metadata = *m +} + +type innerResource struct { + APIVersion types2.Value `json:"apiVersion"` + Type types2.Value `json:"type"` + Kind types2.Value `json:"kind"` + Name types2.Value `json:"name"` + Location types2.Value `json:"location"` + Tags types2.Value `json:"tags"` + Sku types2.Value `json:"sku"` + Properties types2.Value `json:"properties"` + Resources []Resource `json:"resources"` +} + +func (v *Resource) UnmarshalJSONWithMetadata(node armjson.Node) error { + + if err := node.Decode(&v.innerResource); err != nil { + return err + } + + v.Metadata = node.Metadata() + + for _, comment := range node.Comments() { + var str string + if err := comment.Decode(&str); err != nil { + return err + } + // TODO + // v.Metadata.Comments = append(v.Metadata.Comments, str) + } + + return nil +} diff --git a/pkg/scanners/azure/arm/parser/template_test.go b/pkg/scanners/azure/arm/parser/template_test.go new file mode 100644 index 000000000000..7dde519b1546 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/template_test.go @@ -0,0 +1,60 @@ +package parser + +import ( + "os" + "path/filepath" + "testing" + + types2 "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/scanners/azure/arm/parser/armjson" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_JSONUnmarshal(t *testing.T) { + data, err := os.ReadFile(filepath.Join("testdata", "example.json")) + require.NoError(t, err) + var target Template + metadata := types.NewTestMisconfigMetadata() + require.NoError(t, armjson.Unmarshal(data, &target, &metadata)) + assert.Equal(t, + "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + target.Schema.AsString(), + ) + require.Len(t, target.Schema.Comments, 2) + assert.Equal(t, " wow this is a comment", target.Schema.Comments[0]) + assert.Equal(t, " another one", target.Schema.Comments[1]) + + assert.Equal(t, "1.0.0.0", target.ContentVersion.Raw()) + require.Len(t, target.ContentVersion.Comments, 1) + assert.Equal(t, " this version is great", target.ContentVersion.Comments[0]) + + require.Contains(t, target.Parameters, "storagePrefix") + prefix := target.Parameters["storagePrefix"] + /* + "type": "string", + "defaultValue": "x", + "maxLength": 11, + "minLength": 3 + */ + assert.Equal(t, "string", prefix.Type.Raw()) + assert.Equal(t, types2.KindString, prefix.Type.Kind) + assert.Equal(t, 8, prefix.Type.MisconfigMetadata.Range().GetStartLine()) + assert.Equal(t, 8, prefix.Type.MisconfigMetadata.Range().GetEndLine()) + + assert.Equal(t, "x", prefix.DefaultValue.Raw()) + assert.Equal(t, types2.KindString, prefix.DefaultValue.Kind) + assert.Equal(t, 9, prefix.DefaultValue.MisconfigMetadata.Range().GetStartLine()) + assert.Equal(t, 9, prefix.DefaultValue.MisconfigMetadata.Range().GetEndLine()) + + assert.Equal(t, int64(11), prefix.MaxLength.Raw()) + assert.Equal(t, types2.KindNumber, prefix.MaxLength.Kind) + assert.Equal(t, 10, prefix.MaxLength.MisconfigMetadata.Range().GetStartLine()) + assert.Equal(t, 10, prefix.MaxLength.MisconfigMetadata.Range().GetEndLine()) + + assert.Equal(t, int64(3), prefix.MinLength.Raw()) + assert.Equal(t, types2.KindNumber, prefix.MinLength.Kind) + assert.Equal(t, 11, prefix.MinLength.MisconfigMetadata.Range().GetStartLine()) + assert.Equal(t, 11, prefix.MinLength.MisconfigMetadata.Range().GetEndLine()) +} diff --git a/pkg/scanners/azure/arm/parser/testdata/example.json b/pkg/scanners/azure/arm/parser/testdata/example.json new file mode 100644 index 000000000000..9698ed1a0583 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/testdata/example.json @@ -0,0 +1,15 @@ +{ + // wow this is a comment + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", // another one + // this version is great + "contentVersion": "1.0.0.0", + "parameters": { + "storagePrefix": { + "type": "string", + "defaultValue": "x", + "maxLength": 11, + "minLength": 3 + } + }, + "resources": [] +} \ No newline at end of file diff --git a/pkg/scanners/azure/arm/parser/testdata/postgres.json b/pkg/scanners/azure/arm/parser/testdata/postgres.json new file mode 100644 index 000000000000..670733fdd308 --- /dev/null +++ b/pkg/scanners/azure/arm/parser/testdata/postgres.json @@ -0,0 +1,73 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.10.61.36676", + "templateHash": "8074447630975889785" + } + }, + "resources": [ + { + "type": "Microsoft.DBforPostgreSQL/servers", + "apiVersion": "2017-12-01", + "name": "myPostgreSQLServer", + "location": "westus", + "identity": { + "type": "SystemAssigned" + }, + "properties": { + "administratorLogin": "myadmin", + "administratorLoginPassword": "myadminpassword", + "version": "9.6", + "sslEnforcement": "Enabled", + "storageProfile": { + "storageMB": 5120 + }, + "createMode": "Default", + "minimalTlsVersion": "1.2", + "publicNetworkAccess": "Enabled", + "FirewallRules": [ + { + "name": "AllowAllAzureIps", + "startIpAddress": "0.0.0.0/0" + } + ] + } + }, + { + "type": "Microsoft.DBforPostgreSQL/servers/configurations", + "apiVersion": "2017-12-01", + "name": "[format('{0}/{1}', 'myPostgreSQLServer', 'connection_throttling')]", + "properties": { + "value": "OFF" + }, + "dependsOn": [ + "[resourceId('Microsoft.DBforPostgreSQL/servers', 'myPostgreSQLServer')]" + ] + }, + { + "type": "Microsoft.DBforPostgreSQL/servers/configurations", + "apiVersion": "2017-12-01", + "name": "[format('{0}/{1}', 'myPostgreSQLServer', 'log_checkpoints')]", + "properties": { + "value": "OFF" + }, + "dependsOn": [ + "[resourceId('Microsoft.DBforPostgreSQL/servers', 'myPostgreSQLServer')]" + ] + }, + { + "type": "Microsoft.DBforPostgreSQL/servers/configurations", + "apiVersion": "2017-12-01", + "name": "[format('{0}/{1}', 'myPostgreSQLServer', 'log_connections')]", + "properties": { + "value": "OFF" + }, + "dependsOn": [ + "[resourceId('Microsoft.DBforPostgreSQL/servers', 'myPostgreSQLServer')]" + ] + } + ] +} \ No newline at end of file diff --git a/pkg/scanners/azure/arm/scanner.go b/pkg/scanners/azure/arm/scanner.go new file mode 100644 index 000000000000..b2a6e26273b5 --- /dev/null +++ b/pkg/scanners/azure/arm/scanner.go @@ -0,0 +1,187 @@ +package arm + +import ( + "context" + "fmt" + + "io" + "io/fs" + "sync" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/state" + "github.com/aquasecurity/trivy/pkg/trules" + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/internal/adapters/arm" + "github.com/aquasecurity/trivy/pkg/scanners" + "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/scanners/azure/arm/parser" +) + +var _ scanners.FSScanner = (*Scanner)(nil) +var _ options.ConfigurableScanner = (*Scanner)(nil) + +type Scanner struct { + scannerOptions []options.ScannerOption + parserOptions []options.ParserOption + debug debug.Logger + frameworks []framework.Framework + skipRequired bool + regoOnly bool + loadEmbeddedPolicies bool + loadEmbeddedLibraries bool + policyDirs []string + policyReaders []io.Reader + regoScanner *rego.Scanner + spec string + sync.Mutex +} + +func (s *Scanner) SetSpec(spec string) { + s.spec = spec +} + +func (s *Scanner) SetRegoOnly(regoOnly bool) { + s.regoOnly = regoOnly +} + +func New(opts ...options.ScannerOption) *Scanner { + scanner := &Scanner{ + scannerOptions: opts, + } + for _, opt := range opts { + opt(scanner) + } + return scanner +} + +func (s *Scanner) Name() string { + return "Azure ARM" +} + +func (s *Scanner) SetDebugWriter(writer io.Writer) { + s.debug = debug.New(writer, "azure", "arm") + s.parserOptions = append(s.parserOptions, options.ParserWithDebug(writer)) +} + +func (s *Scanner) SetPolicyDirs(dirs ...string) { + s.policyDirs = dirs +} + +func (s *Scanner) SetSkipRequiredCheck(skipRequired bool) { + s.skipRequired = skipRequired +} +func (s *Scanner) SetPolicyReaders(readers []io.Reader) { + s.policyReaders = readers +} + +func (s *Scanner) SetPolicyFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} +func (s *Scanner) SetDataFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} + +func (s *Scanner) SetUseEmbeddedPolicies(b bool) { + s.loadEmbeddedPolicies = b +} + +func (s *Scanner) SetUseEmbeddedLibraries(b bool) { + s.loadEmbeddedLibraries = b +} + +func (s *Scanner) SetFrameworks(frameworks []framework.Framework) { + s.frameworks = frameworks +} + +func (s *Scanner) SetTraceWriter(io.Writer) {} +func (s *Scanner) SetPerResultTracingEnabled(bool) {} +func (s *Scanner) SetDataDirs(...string) {} +func (s *Scanner) SetPolicyNamespaces(...string) {} +func (s *Scanner) SetRegoErrorLimit(_ int) {} + +func (s *Scanner) initRegoScanner(srcFS fs.FS) error { + s.Lock() + defer s.Unlock() + if s.regoScanner != nil { + return nil + } + regoScanner := rego.NewScanner(types.SourceCloud, s.scannerOptions...) + regoScanner.SetParentDebugLogger(s.debug) + if err := regoScanner.LoadPolicies(s.loadEmbeddedLibraries, s.loadEmbeddedPolicies, srcFS, s.policyDirs, s.policyReaders); err != nil { + return err + } + s.regoScanner = regoScanner + return nil +} + +func (s *Scanner) ScanFS(ctx context.Context, fs fs.FS, dir string) (scan.Results, error) { + p := parser.New(fs, s.parserOptions...) + deployments, err := p.ParseFS(ctx, dir) + if err != nil { + return nil, err + } + if err := s.initRegoScanner(fs); err != nil { + return nil, err + } + + return s.scanDeployments(ctx, deployments, fs) +} + +func (s *Scanner) scanDeployments(ctx context.Context, deployments []azure.Deployment, f fs.FS) (scan.Results, error) { + + var results scan.Results + + for _, deployment := range deployments { + + result, err := s.scanDeployment(ctx, deployment, f) + if err != nil { + return nil, err + } + results = append(results, result...) + } + + return results, nil +} + +func (s *Scanner) scanDeployment(ctx context.Context, deployment azure.Deployment, fs fs.FS) (scan.Results, error) { + var results scan.Results + deploymentState := s.adaptDeployment(ctx, deployment) + if !s.regoOnly { + for _, rule := range trules.GetRegistered(s.frameworks...) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + if rule.GetRule().RegoPackage != "" { + continue + } + ruleResults := rule.Evaluate(deploymentState) + s.debug.Log("Found %d results for %s", len(ruleResults), rule.GetRule().AVDID) + if len(ruleResults) > 0 { + results = append(results, ruleResults...) + } + } + } + + regoResults, err := s.regoScanner.ScanInput(ctx, rego.Input{ + Path: deployment.Metadata.Range().GetFilename(), + FS: fs, + Contents: deploymentState.ToRego(), + }) + if err != nil { + return nil, fmt.Errorf("rego scan error: %w", err) + } + + return append(results, regoResults...), nil +} + +func (s *Scanner) adaptDeployment(ctx context.Context, deployment azure.Deployment) *state.State { + return arm.Adapt(ctx, deployment) +} diff --git a/pkg/scanners/azure/deployment.go b/pkg/scanners/azure/deployment.go new file mode 100644 index 000000000000..ab658316a6be --- /dev/null +++ b/pkg/scanners/azure/deployment.go @@ -0,0 +1,179 @@ +package azure + +import ( + "os" + + "github.com/aquasecurity/trivy/pkg/types" +) + +type Deployment struct { + Metadata types.MisconfigMetadata + TargetScope Scope + Parameters []Parameter + Variables []Variable + Resources []Resource + Outputs []Output +} + +type Parameter struct { + Variable + Default Value + Decorators []Decorator +} + +type Variable struct { + Name string + Value Value +} + +type Output Variable + +type Resource struct { + Metadata types.MisconfigMetadata + APIVersion Value + Type Value + Kind Value + Name Value + Location Value + Tags Value + Sku Value + Properties Value + Resources []Resource +} + +type PropertyBag struct { + Metadata types.Metadata + Data map[string]Value +} + +type Decorator struct { + Name string + Args []Value +} + +type Scope string + +const ( + ScopeResourceGroup Scope = "resourceGroup" +) + +func (d *Deployment) GetResourcesByType(t string) []Resource { + var resources []Resource + for _, r := range d.Resources { + if r.Type.AsString() == t { + resources = append(resources, r) + } + } + return resources +} + +func (r *Resource) GetResourcesByType(t string) []Resource { + var resources []Resource + for _, res := range r.Resources { + if res.Type.AsString() == t { + resources = append(resources, res) + } + } + return resources +} + +func (d *Deployment) GetParameter(parameterName string) interface{} { + + for _, parameter := range d.Parameters { + if parameter.Name == parameterName { + return parameter.Value.Raw() + } + } + return nil +} + +func (d *Deployment) GetVariable(variableName string) interface{} { + + for _, variable := range d.Variables { + if variable.Name == variableName { + return variable.Value.Raw() + } + } + return nil +} + +func (d *Deployment) GetEnvVariable(envVariableName string) interface{} { + + if envVariable, exists := os.LookupEnv(envVariableName); exists { + return envVariable + } + return nil +} + +func (d *Deployment) GetOutput(outputName string) interface{} { + + for _, output := range d.Outputs { + if output.Name == outputName { + return output.Value.Raw() + } + } + return nil +} + +func (d *Deployment) GetDeployment() interface{} { + + type template struct { + Schema string `json:"$schema"` + ContentVersion string `json:"contentVersion"` + Parameters map[string]interface{} `json:"parameters"` + Variables map[string]interface{} `json:"variables"` + Resources []interface{} `json:"resources"` + Outputs map[string]interface{} `json:"outputs"` + } + + type templateLink struct { + URI string `json:"uri"` + } + + type properties struct { + TemplateLink templateLink `json:"templateLink"` + Template template `json:"template"` + TemplateHash string `json:"templateHash"` + Parameters map[string]interface{} `json:"parameters"` + Mode string `json:"mode"` + ProvisioningState string `json:"provisioningState"` + } + + deploymentShell := struct { + Name string `json:"name"` + Properties properties `json:"properties"` + }{ + Name: "Placeholder Deployment", + Properties: properties{ + TemplateLink: templateLink{ + URI: "https://placeholder.com", + }, + Template: template{ + Schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + ContentVersion: "", + Parameters: make(map[string]interface{}), + Variables: make(map[string]interface{}), + Resources: make([]interface{}, 0), + Outputs: make(map[string]interface{}), + }, + }, + } + + for _, parameter := range d.Parameters { + deploymentShell.Properties.Template.Parameters[parameter.Name] = parameter.Value.Raw() + } + + for _, variable := range d.Variables { + deploymentShell.Properties.Template.Variables[variable.Name] = variable.Value.Raw() + } + + for _, resource := range d.Resources { + deploymentShell.Properties.Template.Resources = append(deploymentShell.Properties.Template.Resources, resource) + } + + for _, output := range d.Outputs { + deploymentShell.Properties.Template.Outputs[output.Name] = output.Value.Raw() + } + + return deploymentShell +} diff --git a/pkg/scanners/azure/expressions/lex.go b/pkg/scanners/azure/expressions/lex.go new file mode 100644 index 000000000000..09eb7b819eff --- /dev/null +++ b/pkg/scanners/azure/expressions/lex.go @@ -0,0 +1,203 @@ +package expressions + +import ( + "bufio" + "fmt" + "strconv" + "strings" +) + +type TokenType uint16 + +const ( + TokenName TokenType = iota + TokenOpenParen + TokenCloseParen + TokenComma + TokenDot + TokenLiteralString + TokenLiteralInteger + TokenLiteralFloat + TokenNewLine +) + +type Token struct { + Type TokenType + Data interface{} +} + +type lexer struct { + reader *bufio.Reader +} + +func lex(expression string) ([]Token, error) { + lexer := &lexer{ + reader: bufio.NewReader(strings.NewReader(expression)), + } + return lexer.Lex() +} + +func (l *lexer) unread() { + _ = l.reader.UnreadRune() +} + +func (l *lexer) read() (rune, error) { + r, _, err := l.reader.ReadRune() + return r, err +} + +func (l *lexer) Lex() ([]Token, error) { + var tokens []Token + + for { + r, err := l.read() + if err != nil { + break + } + + switch r { + case ' ', '\t', '\r': + continue + case '\n': + tokens = append(tokens, Token{Type: TokenNewLine}) + case '(': + tokens = append(tokens, Token{Type: TokenOpenParen}) + case ')': + tokens = append(tokens, Token{Type: TokenCloseParen}) + case ',': + tokens = append(tokens, Token{Type: TokenComma}) + case '.': + tokens = append(tokens, Token{Type: TokenDot}) + case '"', '\'': + token, err := l.lexString(r) + if err != nil { + return nil, fmt.Errorf("string parse error: %w", err) + } + tokens = append(tokens, token) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + l.unread() + token, err := l.lexNumber() + if err != nil { + return nil, fmt.Errorf("number parse error: %w", err) + } + tokens = append(tokens, token) + default: + l.unread() + tokens = append(tokens, l.lexKeyword()) + } + } + + return tokens, nil +} + +func (l *lexer) lexString(terminator rune) (Token, error) { + var sb strings.Builder + for { + r, err := l.read() + if err != nil { + break + } + if r == '\\' { + r, err := l.readEscapedChar() + if err != nil { + return Token{}, fmt.Errorf("bad escape: %w", err) + } + sb.WriteRune(r) + continue + } + if r == terminator { + break + } + sb.WriteRune(r) + } + return Token{ + Type: TokenLiteralString, + Data: sb.String(), + }, nil +} + +func (l *lexer) readEscapedChar() (rune, error) { + r, err := l.read() + if err != nil { + return 0, fmt.Errorf("unexpected EOF") + } + switch r { + case 'n': + return '\n', nil + case 'r': + return '\r', nil + case 't': + return '\t', nil + case '"', '\'': + return r, nil + default: + return 0, fmt.Errorf("'%c' is not a supported escape sequence", r) + } +} + +func (l *lexer) lexNumber() (Token, error) { + + var sb strings.Builder + var decimal bool + +LOOP: + for { + r, err := l.read() + if err != nil { + break + } + switch r { + case '.': + decimal = true + sb.WriteRune('.') + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + sb.WriteRune(r) + default: + l.unread() + break LOOP + } + } + + raw := sb.String() + if decimal { + fl, err := strconv.ParseFloat(raw, 64) + if err != nil { + return Token{}, err + } + return Token{ + Type: TokenLiteralFloat, + Data: fl, + }, nil + } + + i, err := strconv.ParseInt(raw, 10, 64) + if err != nil { + return Token{}, err + } + return Token{ + Type: TokenLiteralInteger, + Data: i, + }, nil +} + +func (l *lexer) lexKeyword() Token { + var sb strings.Builder +LOOP: + for { + r, err := l.read() + if err != nil { + break + } + switch { + case r >= 'a' && r <= 'z', r >= 'A' && r <= 'Z', r >= '0' && r <= '9', r == '_': + sb.WriteRune(r) + default: + l.unread() + break LOOP + } + } + return Token{ + Type: TokenName, + Data: sb.String(), + } +} diff --git a/pkg/scanners/azure/expressions/node.go b/pkg/scanners/azure/expressions/node.go new file mode 100644 index 000000000000..3257e127033d --- /dev/null +++ b/pkg/scanners/azure/expressions/node.go @@ -0,0 +1,75 @@ +package expressions + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/azure/functions" +) + +type Node interface { + Evaluate(deploymentProvider functions.DeploymentData) interface{} +} + +type expressionValue struct { + val interface{} +} + +func (e expressionValue) Evaluate(deploymentProvider functions.DeploymentData) interface{} { + if f, ok := e.val.(expression); ok { + return f.Evaluate(deploymentProvider) + } + return e.val +} + +type expression struct { + name string + args []Node +} + +func (f expression) Evaluate(deploymentProvider functions.DeploymentData) interface{} { + args := make([]interface{}, len(f.args)) + for i, arg := range f.args { + args[i] = arg.Evaluate(deploymentProvider) + } + + return functions.Evaluate(deploymentProvider, f.name, args...) +} + +func NewExpressionTree(code string) (Node, error) { + tokens, err := lex(code) + if err != nil { + return nil, err + } + + // create a walker for the nodes + tw := newTokenWalker(tokens) + + // generate the root function + return newFunctionNode(tw), nil +} + +func newFunctionNode(tw *tokenWalker) Node { + funcNode := &expression{ + name: tw.pop().Data.(string), + } + + for tw.hasNext() { + token := tw.pop() + if token == nil { + break + } + + switch token.Type { + case TokenCloseParen: + return funcNode + case TokenName: + if tw.peek().Type == TokenOpenParen { + // this is a function, unwind 1 + tw.unPop() + funcNode.args = append(funcNode.args, newFunctionNode(tw)) + } + case TokenLiteralString, TokenLiteralInteger, TokenLiteralFloat: + funcNode.args = append(funcNode.args, expressionValue{token.Data}) + } + + } + return funcNode +} diff --git a/pkg/scanners/azure/expressions/token_walker.go b/pkg/scanners/azure/expressions/token_walker.go new file mode 100644 index 000000000000..d07a238d1bd9 --- /dev/null +++ b/pkg/scanners/azure/expressions/token_walker.go @@ -0,0 +1,40 @@ +package expressions + +type tokenWalker struct { + tokens []Token + currentPosition int +} + +func newTokenWalker(tokens []Token) *tokenWalker { + return &tokenWalker{ + tokens: tokens, + currentPosition: 0, + } +} + +func (t *tokenWalker) peek() Token { + if t.currentPosition >= len(t.tokens) { + return Token{} + } + return t.tokens[t.currentPosition] +} + +func (t *tokenWalker) hasNext() bool { + return t.currentPosition+1 < len(t.tokens) +} + +func (t *tokenWalker) unPop() { + if t.currentPosition > 0 { + t.currentPosition-- + } +} + +func (t *tokenWalker) pop() *Token { + if !t.hasNext() { + return nil + } + + token := t.tokens[t.currentPosition] + t.currentPosition++ + return &token +} diff --git a/pkg/scanners/azure/functions/add.go b/pkg/scanners/azure/functions/add.go new file mode 100644 index 000000000000..9eb699e2eb9b --- /dev/null +++ b/pkg/scanners/azure/functions/add.go @@ -0,0 +1,15 @@ +package functions + +func Add(args ...interface{}) interface{} { + + if len(args) != 2 { + return nil + } + + if a, ok := args[0].(int); ok { + if b, ok := args[1].(int); ok { + return a + b + } + } + return nil +} diff --git a/pkg/scanners/azure/functions/add_test.go b/pkg/scanners/azure/functions/add_test.go new file mode 100644 index 000000000000..b88e9b8ee1cc --- /dev/null +++ b/pkg/scanners/azure/functions/add_test.go @@ -0,0 +1,38 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Add(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "Add with 1 and 2", + args: []interface{}{1, 2}, + expected: 3, + }, + { + name: "Add with 2 and 3", + args: []interface{}{2, 3}, + expected: 5, + }, + { + name: "Add with 3 and -4", + args: []interface{}{3, -4}, + expected: -1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Add(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/scanners/azure/functions/and.go b/pkg/scanners/azure/functions/and.go new file mode 100644 index 000000000000..67070b5c2cb0 --- /dev/null +++ b/pkg/scanners/azure/functions/and.go @@ -0,0 +1,27 @@ +package functions + +func And(args ...interface{}) interface{} { + + if len(args) <= 1 { + return false + } + + arg0, ok := args[0].(bool) + if !ok { + return false + } + + benchmark := arg0 + + for _, arg := range args[1:] { + arg1, ok := arg.(bool) + if !ok { + return false + } + if benchmark != arg1 { + return false + } + + } + return true +} diff --git a/pkg/scanners/azure/functions/and_test.go b/pkg/scanners/azure/functions/and_test.go new file mode 100644 index 000000000000..6814e9288ca0 --- /dev/null +++ b/pkg/scanners/azure/functions/and_test.go @@ -0,0 +1,39 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_And(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected bool + }{ + { + name: "And with same 2 bools", + args: []interface{}{true, true}, + expected: true, + }, + { + name: "And with same 3 bools", + args: []interface{}{true, true, true}, + expected: true, + }, + { + name: "And with different 4 bools", + args: []interface{}{true, true, false, true}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := And(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/scanners/azure/functions/array.go b/pkg/scanners/azure/functions/array.go new file mode 100644 index 000000000000..a1da05ef4fdc --- /dev/null +++ b/pkg/scanners/azure/functions/array.go @@ -0,0 +1,29 @@ +package functions + +func Array(args ...interface{}) interface{} { + + if len(args) != 1 { + return "" + } + + switch ctype := args[0].(type) { + case int: + return []int{ctype} + case string: + return []string{ctype} + case map[string]interface{}: + var result []interface{} + for k, v := range ctype { + result = append(result, k, v) + } + return result + case interface{}: + switch ctype := ctype.(type) { + case []string: + return ctype + case []interface{}: + return ctype + } + } + return []interface{}{} +} diff --git a/pkg/scanners/azure/functions/array_test.go b/pkg/scanners/azure/functions/array_test.go new file mode 100644 index 000000000000..c4a376ea6080 --- /dev/null +++ b/pkg/scanners/azure/functions/array_test.go @@ -0,0 +1,44 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Array(t *testing.T) { + test := []struct { + name string + input []interface{} + expected interface{} + }{ + { + name: "array from an int", + input: []interface{}{1}, + expected: []int{1}, + }, + { + name: "array from a string", + input: []interface{}{"hello"}, + expected: []string{"hello"}, + }, + { + name: "array from a map", + input: []interface{}{map[string]interface{}{"hello": "world"}}, + expected: []interface{}{"hello", "world"}, + }, + { + name: "array from an slice", + input: []interface{}{ + []string{"hello", "world"}, + }, + expected: []string{"hello", "world"}, + }, + } + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + actual := Array(tt.input...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/base64.go b/pkg/scanners/azure/functions/base64.go new file mode 100644 index 000000000000..c3222e7675ec --- /dev/null +++ b/pkg/scanners/azure/functions/base64.go @@ -0,0 +1,52 @@ +package functions + +import ( + "encoding/base64" + "encoding/json" +) + +func Base64(args ...interface{}) interface{} { + + if len(args) == 0 { + return nil + } + + input := args[0].(string) + + return base64.StdEncoding.EncodeToString([]byte(input)) +} + +func Base64ToString(args ...interface{}) interface{} { + if len(args) == 0 { + return nil + } + + input := args[0].(string) + + result, err := base64.StdEncoding.DecodeString(input) + if err != nil { + return "" + } + return string(result) +} + +func Base64ToJson(args ...interface{}) interface{} { + + if len(args) == 0 { + return nil + } + + input := args[0].(string) + + decoded, err := base64.StdEncoding.DecodeString(input) + if err != nil { + return nil + } + + var result map[string]interface{} + + if err := json.Unmarshal(decoded, &result); err != nil { + return nil + } + return result +} diff --git a/pkg/scanners/azure/functions/base64_test.go b/pkg/scanners/azure/functions/base64_test.go new file mode 100644 index 000000000000..f557b277930c --- /dev/null +++ b/pkg/scanners/azure/functions/base64_test.go @@ -0,0 +1,85 @@ +package functions + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Base64Call(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "simple base64 call", + args: []interface{}{ + "hello, world", + }, + expected: "aGVsbG8sIHdvcmxk", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Base64(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} + +func Test_Base64ToStringCall(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "simple base64ToString call", + args: []interface{}{ + "aGVsbG8sIHdvcmxk", + }, + expected: "hello, world", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Base64ToString(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} + +func Test_Base64ToJsonCall(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "simple base64ToJson call", + args: []interface{}{ + "eyJoZWxsbyI6ICJ3b3JsZCJ9", + }, + expected: `{"hello":"world"}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Base64ToJson(tt.args...) + + actualContent, err := json.Marshal(actual) + require.NoError(t, err) + + assert.Equal(t, tt.expected, string(actualContent)) + }) + } +} diff --git a/pkg/scanners/azure/functions/bool.go b/pkg/scanners/azure/functions/bool.go new file mode 100644 index 000000000000..0221a5a4b8ee --- /dev/null +++ b/pkg/scanners/azure/functions/bool.go @@ -0,0 +1,20 @@ +package functions + +import "strings" + +func Bool(args ...interface{}) interface{} { + if len(args) != 1 { + return false + } + + switch input := args[0].(type) { + case bool: + return input + case string: + input = strings.ToLower(input) + return input == "true" || input == "1" || input == "yes" || input == "on" + case int: + return input == 1 + } + return false +} diff --git a/pkg/scanners/azure/functions/bool_test.go b/pkg/scanners/azure/functions/bool_test.go new file mode 100644 index 000000000000..6c520a9380f8 --- /dev/null +++ b/pkg/scanners/azure/functions/bool_test.go @@ -0,0 +1,63 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Bool(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected bool + }{ + { + name: "Bool with true", + args: []interface{}{true}, + expected: true, + }, + { + name: "Bool with false", + args: []interface{}{false}, + expected: false, + }, + { + name: "Bool with 1", + args: []interface{}{1}, + expected: true, + }, + { + name: "Bool with 0", + args: []interface{}{0}, + expected: false, + }, + { + name: "Bool with true string", + args: []interface{}{"true"}, + expected: true, + }, + { + name: "Bool with false string", + args: []interface{}{"false"}, + expected: false, + }, + { + name: "Bool with 1 string", + args: []interface{}{"1"}, + expected: true, + }, + { + name: "Bool with 0 string", + args: []interface{}{"0"}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Bool(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/scanners/azure/functions/casing.go b/pkg/scanners/azure/functions/casing.go new file mode 100644 index 000000000000..56a93bbd7a4b --- /dev/null +++ b/pkg/scanners/azure/functions/casing.go @@ -0,0 +1,29 @@ +package functions + +import "strings" + +func ToLower(args ...interface{}) interface{} { + if len(args) != 1 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + + return strings.ToLower(input) +} + +func ToUpper(args ...interface{}) interface{} { + if len(args) != 1 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + + return strings.ToUpper(input) +} diff --git a/pkg/scanners/azure/functions/casing_test.go b/pkg/scanners/azure/functions/casing_test.go new file mode 100644 index 000000000000..51c970e1765e --- /dev/null +++ b/pkg/scanners/azure/functions/casing_test.go @@ -0,0 +1,71 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_ToLower(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "lowercase a string", + args: []interface{}{ + "HELLO", + }, + expected: "hello", + }, + { + name: "lowercase a string with a non-string input", + args: []interface{}{ + 10, + }, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := ToLower(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} + +func Test_ToUpper(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "uppercase a string", + args: []interface{}{ + "hello", + }, + expected: "HELLO", + }, + { + name: "uppercase a string with a non-string input", + args: []interface{}{ + 10, + }, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := ToUpper(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/coalesce.go b/pkg/scanners/azure/functions/coalesce.go new file mode 100644 index 000000000000..b7ec261450f7 --- /dev/null +++ b/pkg/scanners/azure/functions/coalesce.go @@ -0,0 +1,10 @@ +package functions + +func Coalesce(args ...interface{}) interface{} { + for _, arg := range args { + if arg != nil { + return arg + } + } + return nil +} diff --git a/pkg/scanners/azure/functions/coalesce_test.go b/pkg/scanners/azure/functions/coalesce_test.go new file mode 100644 index 000000000000..361914df64cd --- /dev/null +++ b/pkg/scanners/azure/functions/coalesce_test.go @@ -0,0 +1,56 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Coalesce(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "coalesce with nil", + args: []interface{}{ + nil, + }, + expected: nil, + }, + { + name: "coalesce with nil and string", + args: []interface{}{ + nil, + "test", + }, + expected: "test", + }, + { + name: "coalesce with nil and string and int", + args: []interface{}{ + nil, + "test", + 1, + }, + expected: "test", + }, + { + name: "coalesce with nil and nil and array", + args: []interface{}{ + nil, + nil, + []interface{}{"a", "b", "c"}, + }, + expected: []interface{}{"a", "b", "c"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Coalesce(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/concat.go b/pkg/scanners/azure/functions/concat.go new file mode 100644 index 000000000000..800db04be77d --- /dev/null +++ b/pkg/scanners/azure/functions/concat.go @@ -0,0 +1,28 @@ +package functions + +import ( + "fmt" +) + +func Concat(args ...interface{}) interface{} { + + switch args[0].(type) { + case string: + var result string + for _, arg := range args { + result += fmt.Sprintf("%v", arg) + } + return result + case interface{}: + var result []interface{} + for _, arg := range args { + argArr, ok := arg.([]interface{}) + if !ok { + continue + } + result = append(result, argArr...) + } + return result + } + return "" +} diff --git a/pkg/scanners/azure/functions/concat_test.go b/pkg/scanners/azure/functions/concat_test.go new file mode 100644 index 000000000000..7b0c461c960d --- /dev/null +++ b/pkg/scanners/azure/functions/concat_test.go @@ -0,0 +1,94 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_StringConcatenation(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "simple string concatenation", + args: []interface{}{ + "hello", + ", ", + "world", + "!", + }, + expected: "hello, world!", + }, + { + name: "string concatenation with non strings", + args: []interface{}{ + "pi to 3 decimal places is ", + 3.142, + }, + expected: "pi to 3 decimal places is 3.142", + }, + { + name: "string concatenation with multiple primitives", + args: []interface{}{ + "to say that ", + 3, + " is greater than ", + 5, + " would be ", + false, + }, + expected: "to say that 3 is greater than 5 would be false", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + concatenated := Concat(tt.args...) + require.Equal(t, tt.expected, concatenated) + }) + } +} + +func Test_ArrayConcatenation(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected []interface{} + }{ + { + name: "simple array concatenation", + args: []interface{}{ + []interface{}{1, 2, 3}, + []interface{}{4, 5, 6}, + }, + expected: []interface{}{1, 2, 3, 4, 5, 6}, + }, + { + name: "array concatenation with non arrays", + args: []interface{}{ + []interface{}{1, 2, 3}, + 4, + }, + expected: []interface{}{1, 2, 3}, + }, + { + name: "array concatenation with multiple primitives", + args: []interface{}{ + []interface{}{1, 2, 3}, + 4, + []interface{}{5, 6, 7}, + }, + expected: []interface{}{1, 2, 3, 5, 6, 7}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + concatenated := Concat(tt.args...) + require.Equal(t, tt.expected, concatenated) + }) + } +} diff --git a/pkg/scanners/azure/functions/contains.go b/pkg/scanners/azure/functions/contains.go new file mode 100644 index 000000000000..a067d63dfa85 --- /dev/null +++ b/pkg/scanners/azure/functions/contains.go @@ -0,0 +1,40 @@ +package functions + +import ( + "fmt" + "strings" +) + +func Contains(args ...interface{}) interface{} { + + if len(args) != 2 { + return false + } + + container := args[0] + itemToFind := args[1] + + switch cType := container.(type) { + case string: + switch iType := itemToFind.(type) { + case string: + return strings.Contains(strings.ToLower(cType), strings.ToLower(iType)) + case int, int32, int64, uint, uint32, uint64: + return strings.Contains(strings.ToLower(cType), fmt.Sprintf("%d", iType)) + } + case []interface{}: + for _, item := range cType { + if item == itemToFind { + return true + } + } + case map[string]interface{}: + for key := range cType { + if key == itemToFind { + return true + } + } + } + + return false +} diff --git a/pkg/scanners/azure/functions/contains_test.go b/pkg/scanners/azure/functions/contains_test.go new file mode 100644 index 000000000000..e92f08fd5462 --- /dev/null +++ b/pkg/scanners/azure/functions/contains_test.go @@ -0,0 +1,95 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_Contains(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected bool + }{ + { + name: "simple true string contains", + args: []interface{}{ + "hello, world", + "hell", + }, + expected: true, + }, + { + name: "simple false string contains", + args: []interface{}{ + "hello, world", + "help", + }, + expected: false, + }, + { + name: "simple true string contains with case sensitivity", + args: []interface{}{ + "hello, world", + "HELL", + }, + expected: true, + }, + { + name: "simple true string contains with number", + args: []interface{}{ + "You're my number 1", + 1, + }, + expected: true, + }, + { + name: "true object contains key", + args: []interface{}{ + map[string]interface{}{ + "hello": "world", + }, + "hello", + }, + expected: true, + }, + { + name: "false object contains key", + args: []interface{}{ + map[string]interface{}{ + "hello": "world", + }, + "world", + }, + expected: false, + }, + { + name: "true array contains value", + args: []interface{}{ + []interface{}{ + "hello", "world", + }, + "hello", + }, + expected: true, + }, + { + name: "false array contains value", + args: []interface{}{ + []interface{}{ + "hello", "world", + }, + "help", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + doesContain := Contains(tt.args...) + require.Equal(t, tt.expected, doesContain) + }) + } +} diff --git a/pkg/scanners/azure/functions/copy_index.go b/pkg/scanners/azure/functions/copy_index.go new file mode 100644 index 000000000000..aee090e79466 --- /dev/null +++ b/pkg/scanners/azure/functions/copy_index.go @@ -0,0 +1,25 @@ +package functions + +var loopCounter = map[string]int{} + +func CopyIndex(args ...interface{}) interface{} { + loopName := "default" + offset := 1 + if len(args) > 0 { + if providedLoopName, ok := args[0].(string); ok { + loopName = providedLoopName + } + } + if len(args) > 1 { + if providedOffset, ok := args[1].(int); ok { + offset = providedOffset + } + } + + if _, ok := loopCounter[loopName]; !ok { + loopCounter[loopName] = 0 + } + + loopCounter[loopName] += offset + return loopCounter[loopName] +} diff --git a/pkg/scanners/azure/functions/copy_index_test.go b/pkg/scanners/azure/functions/copy_index_test.go new file mode 100644 index 000000000000..041b258ca8cf --- /dev/null +++ b/pkg/scanners/azure/functions/copy_index_test.go @@ -0,0 +1,52 @@ +package functions + +import "testing" + +func Test_CopyIndex(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "CopyIndex with 1", + args: []interface{}{}, + expected: 1, + }, + { + name: "CopyIndex with 2", + args: []interface{}{}, + expected: 2, + }, + { + name: "CopyIndex with 3", + args: []interface{}{}, + expected: 3, + }, + { + name: "CopyIndex with loopName", + args: []interface{}{"loop1"}, + expected: 1, + }, + { + name: "CopyIndex with same lo" + + "opName", + args: []interface{}{"loop1"}, + expected: 2, + }, + { + name: "CopyIndex with loopName", + args: []interface{}{"loop2", 10}, + expected: 10, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := CopyIndex(tt.args...) + if got != tt.expected { + t.Errorf("CopyIndex() = %v, want %v", got, tt.expected) + } + }) + } +} diff --git a/pkg/scanners/azure/functions/create_array.go b/pkg/scanners/azure/functions/create_array.go new file mode 100644 index 000000000000..99f3558847a1 --- /dev/null +++ b/pkg/scanners/azure/functions/create_array.go @@ -0,0 +1,11 @@ +package functions + +func CreateArray(args ...interface{}) interface{} { + var result []interface{} + if len(args) == 0 { + return result + } + + result = append(result, args...) + return result +} diff --git a/pkg/scanners/azure/functions/create_array_test.go b/pkg/scanners/azure/functions/create_array_test.go new file mode 100644 index 000000000000..5e63074888cb --- /dev/null +++ b/pkg/scanners/azure/functions/create_array_test.go @@ -0,0 +1,68 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_CreateArray(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "create array with strings", + args: []interface{}{ + "Hello", + "World", + }, + expected: []interface{}{"Hello", "World"}, + }, + { + name: "create array with ints", + + args: []interface{}{ + 1, 2, 3, + }, + expected: []interface{}{1, 2, 3}, + }, + { + name: "create array with arrays", + args: []interface{}{ + []interface{}{1, 2, 3}, + []interface{}{4, 5, 6}, + }, + expected: []interface{}{[]interface{}{1, 2, 3}, []interface{}{4, 5, 6}}, + }, + { + name: "create arrau with maps", + args: []interface{}{ + map[string]interface{}{ + "one": "a", + }, + map[string]interface{}{ + "two": "b", + }, + }, + expected: []interface{}{ + map[string]interface{}{ + "one": "a", + }, + map[string]interface{}{ + "two": "b", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := CreateArray(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/create_object.go b/pkg/scanners/azure/functions/create_object.go new file mode 100644 index 000000000000..b9fe2d29f6d2 --- /dev/null +++ b/pkg/scanners/azure/functions/create_object.go @@ -0,0 +1,21 @@ +package functions + +func CreateObject(args ...interface{}) interface{} { + obj := map[string]interface{}{} + if len(args) == 0 { + return obj + } + + // if there aren't even pairs then return an empty object + if len(args)%2 != 0 { + return obj + } + + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + value := args[i+1] + obj[key] = value + } + + return obj +} diff --git a/pkg/scanners/azure/functions/create_object_test.go b/pkg/scanners/azure/functions/create_object_test.go new file mode 100644 index 000000000000..f695e38410fe --- /dev/null +++ b/pkg/scanners/azure/functions/create_object_test.go @@ -0,0 +1,60 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_CreateObject(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "CreateObject with no args", + args: []interface{}{}, + expected: map[string]interface{}{}, + }, + { + name: "CreateObject with one arg", + args: []interface{}{"foo", "bar"}, + expected: map[string]interface{}{"foo": "bar"}, + }, + { + name: "CreateObject with two args", + args: []interface{}{"foo", "bar", "baz", "qux"}, + expected: map[string]interface{}{"foo": "bar", "baz": "qux"}, + }, + { + name: "CreateObject with three args", + args: []interface{}{"foo", "bar", "baz", 1, "quux", true}, + expected: map[string]interface{}{"foo": "bar", "baz": 1, "quux": true}, + }, + { + name: "CreateObject with odd number of args", + args: []interface{}{"foo", "bar", "baz"}, + expected: map[string]interface{}{}, + }, + { + name: "CreateObject with odd number of args", + args: []interface{}{"foo", "bar", "baz", []string{"Hello", "World"}}, + expected: map[string]interface{}{ + "foo": "bar", + "baz": []string{ + "Hello", "World", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := CreateObject(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } + +} diff --git a/pkg/scanners/azure/functions/data_uri.go b/pkg/scanners/azure/functions/data_uri.go new file mode 100644 index 000000000000..50f0835ee6ad --- /dev/null +++ b/pkg/scanners/azure/functions/data_uri.go @@ -0,0 +1,36 @@ +package functions + +import ( + "fmt" + "strings" +) + +func DataUri(args ...interface{}) interface{} { + if len(args) == 0 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + + return fmt.Sprintf("data:text/plain;charset=utf8;base64,%s", Base64(input)) +} + +func DataUriToString(args ...interface{}) interface{} { + if len(args) == 0 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + parts := strings.Split(input, "base64,") + if len(parts) != 2 { + return "" + } + + return Base64ToString(parts[1]) +} diff --git a/pkg/scanners/azure/functions/data_uri_test.go b/pkg/scanners/azure/functions/data_uri_test.go new file mode 100644 index 000000000000..04f92249e093 --- /dev/null +++ b/pkg/scanners/azure/functions/data_uri_test.go @@ -0,0 +1,53 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_data_uri_from_string(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "data uri from string", + args: []interface{}{ + "Hello", + }, + expected: "data:text/plain;charset=utf8;base64,SGVsbG8=", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dataUri := DataUri(tt.args...) + require.Equal(t, tt.expected, dataUri) + }) + } +} + +func Test_string_from_data_uri(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "data uri to string", + args: []interface{}{ + "data:;base64,SGVsbG8sIFdvcmxkIQ==", + }, + expected: "Hello, World!", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dataUri := DataUriToString(tt.args...) + require.Equal(t, tt.expected, dataUri) + }) + } +} diff --git a/pkg/scanners/azure/functions/date_time_add.go b/pkg/scanners/azure/functions/date_time_add.go new file mode 100644 index 000000000000..c3b902b08965 --- /dev/null +++ b/pkg/scanners/azure/functions/date_time_add.go @@ -0,0 +1,115 @@ +package functions + +import ( + "fmt" + "regexp" + "strconv" + "time" +) + +var pattern = regexp.MustCompile(`^P((?P\d+)Y)?((?P\d+)M)?((?P\d+)W)?((?P\d+)D)?(T((?P\d+)H)?((?P\d+)M)?((?P\d+)S)?)?$`) + +func DateTimeAdd(args ...interface{}) interface{} { + if len(args) < 2 { + return nil + } + + base, ok := args[0].(string) + if !ok { + return nil + } + + format := time.RFC3339 + if len(args) == 3 { + if providedFormat, ok := args[2].(string); ok { + format = convertFormat(providedFormat) + } + + } + + baseTime, err := time.Parse(format, base) + if err != nil { + return nil + } + + duration, err := parseISO8601(args[1].(string)) + if err != nil { + return nil + } + + timeDuration := duration.timeDuration() + baseTime = baseTime.Add(timeDuration) + + if ok { + return baseTime.Format(format) + } + + return baseTime.Format(time.RFC3339) +} + +type Iso8601Duration struct { + Y int + M int + W int + D int + // Time Component + TH int + TM int + TS int +} + +func parseISO8601(from string) (Iso8601Duration, error) { + var match []string + var d Iso8601Duration + + if pattern.MatchString(from) { + match = pattern.FindStringSubmatch(from) + } else { + return d, fmt.Errorf("could not parse duration string") + } + + for i, name := range pattern.SubexpNames() { + part := match[i] + if i == 0 || name == "" || part == "" { + continue + } + + val, err := strconv.Atoi(part) + if err != nil { + return d, err + } + switch name { + case "year": + d.Y = val + case "month": + d.M = val + case "week": + d.W = val + case "day": + d.D = val + case "hour": + d.TH = val + case "minute": + d.TM = val + case "second": + d.TS = val + default: + return d, fmt.Errorf("unknown field %s", name) + } + } + + return d, nil +} + +func (d Iso8601Duration) timeDuration() time.Duration { + var dur time.Duration + dur += time.Duration(d.TH) * time.Hour + dur += time.Duration(d.TM) * time.Minute + dur += time.Duration(d.TS) * time.Second + dur += time.Duration(d.D) * 24 * time.Hour + dur += time.Duration(d.W) * 7 * 24 * time.Hour + dur += time.Duration(d.M) * 30 * 24 * time.Hour + dur += time.Duration(d.Y) * 365 * 24 * time.Hour + + return dur +} diff --git a/pkg/scanners/azure/functions/date_time_epoch.go b/pkg/scanners/azure/functions/date_time_epoch.go new file mode 100644 index 000000000000..9b1802573269 --- /dev/null +++ b/pkg/scanners/azure/functions/date_time_epoch.go @@ -0,0 +1,38 @@ +package functions + +import ( + "time" + + smithyTime "github.com/aws/smithy-go/time" +) + +func DateTimeFromEpoch(args ...interface{}) interface{} { + if len(args) != 1 { + return nil + } + + epoch, ok := args[0].(int) + if !ok { + return nil + } + + return smithyTime.ParseEpochSeconds(float64(epoch)).Format(time.RFC3339) +} + +func DateTimeToEpoch(args ...interface{}) interface{} { + if len(args) != 1 { + return nil + } + + dateTime, ok := args[0].(string) + if !ok { + return nil + } + + parsed, err := time.Parse(time.RFC3339, dateTime) + if err != nil { + return nil + } + + return int(parsed.Unix()) +} diff --git a/pkg/scanners/azure/functions/date_time_epoch_test.go b/pkg/scanners/azure/functions/date_time_epoch_test.go new file mode 100644 index 000000000000..6cdf7a0442bd --- /dev/null +++ b/pkg/scanners/azure/functions/date_time_epoch_test.go @@ -0,0 +1,51 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_DateTimeFromEpoch(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "datetime from epoch", + args: []interface{}{ + 1683040573, + }, + expected: "2023-05-02T15:16:13Z", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := DateTimeFromEpoch(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} + +func Test_DateTimeToEpoch(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "datetime to epoch", + args: []interface{}{ + "2023-05-02T15:16:13Z", + }, + expected: 1683040573, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := DateTimeToEpoch(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/datetime_add_test.go b/pkg/scanners/azure/functions/datetime_add_test.go new file mode 100644 index 000000000000..b5c09d04a742 --- /dev/null +++ b/pkg/scanners/azure/functions/datetime_add_test.go @@ -0,0 +1,72 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_DateTimeAdd(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + + { + name: "datetime add 1 years", + args: []interface{}{ + "2010-01-01T00:00:00Z", + "P1Y", + }, + expected: "2011-01-01T00:00:00Z", + }, + { + name: "datetime add 3 months", + args: []interface{}{ + "2010-01-01T00:00:00Z", + "P3M", + }, + expected: "2010-04-01T00:00:00Z", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := DateTimeAdd(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} + +func Test_ISO8601DurationParse(t *testing.T) { + tests := []struct { + name string + args string + expected Iso8601Duration + }{ + + { + name: "parse 1 year", + args: "P1Y", + expected: Iso8601Duration{Y: 1}, + }, + { + name: "parse 3 months", + args: "P3M", + expected: Iso8601Duration{M: 3}, + }, + { + name: "parse 12 hours", + args: "PT12H", + expected: Iso8601Duration{TH: 12}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual, err := parseISO8601(tt.args) + require.NoError(t, err) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/deployment.go b/pkg/scanners/azure/functions/deployment.go new file mode 100644 index 000000000000..afafb2b3587c --- /dev/null +++ b/pkg/scanners/azure/functions/deployment.go @@ -0,0 +1,75 @@ +package functions + +type DeploymentData interface { + GetParameter(name string) interface{} + GetVariable(variableName string) interface{} + GetEnvVariable(envVariableName string) interface{} +} + +func Deployment(deploymentProvider DeploymentData, args ...interface{}) interface{} { + + /* + + { + "name": "", + "properties": { + "templateLink": { + "uri": "" + }, + "template": { + "$schema": "", + "contentVersion": "", + "parameters": {}, + "variables": {}, + "resources": [], + "outputs": {} + }, + "templateHash": "", + "parameters": {}, + "mode": "", + "provisioningState": "" + } + } + + */ + + return nil +} + +func Environment(envProvider DeploymentData, args ...interface{}) interface{} { + if len(args) == 0 { + return nil + } + + envVarName, ok := args[0].(string) + if !ok { + return nil + } + return envProvider.GetEnvVariable(envVarName) +} + +func Variables(varProvider DeploymentData, args ...interface{}) interface{} { + if len(args) == 0 { + return nil + } + + varName, ok := args[0].(string) + if !ok { + return nil + } + return varProvider.GetVariable(varName) +} + +func Parameters(paramProvider DeploymentData, args ...interface{}) interface{} { + if len(args) == 0 { + return nil + } + + paramName, ok := args[0].(string) + if !ok { + return nil + } + + return paramProvider.GetParameter(paramName) + +} diff --git a/pkg/scanners/azure/functions/div.go b/pkg/scanners/azure/functions/div.go new file mode 100644 index 000000000000..9de0dfb05f73 --- /dev/null +++ b/pkg/scanners/azure/functions/div.go @@ -0,0 +1,15 @@ +package functions + +func Div(args ...interface{}) interface{} { + + if len(args) != 2 { + return nil + } + + if a, ok := args[0].(int); ok { + if b, ok := args[1].(int); ok { + return a / b + } + } + return nil +} diff --git a/pkg/scanners/azure/functions/div_test.go b/pkg/scanners/azure/functions/div_test.go new file mode 100644 index 000000000000..49166190fb5d --- /dev/null +++ b/pkg/scanners/azure/functions/div_test.go @@ -0,0 +1,38 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Div(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "Div 2 by 1", + args: []interface{}{2, 1}, + expected: 2, + }, + { + name: "Div 4 by 2", + args: []interface{}{4, 2}, + expected: 2, + }, + { + name: "Div 6 by 2", + args: []interface{}{6, 2}, + expected: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Div(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/scanners/azure/functions/empty.go b/pkg/scanners/azure/functions/empty.go new file mode 100644 index 000000000000..1dbe8396f7c3 --- /dev/null +++ b/pkg/scanners/azure/functions/empty.go @@ -0,0 +1,33 @@ +package functions + +func Empty(args ...interface{}) interface{} { + + if len(args) != 1 { + return false + } + + container := args[0] + + switch cType := container.(type) { + case string: + return cType == "" + case map[string]interface{}: + return len(cType) == 0 + case interface{}: + switch iType := cType.(type) { + case []string: + return len(iType) == 0 + case []bool: + return len(iType) == 0 + case []int: + return len(iType) == 0 + case []float64: + return len(iType) == 0 + case map[string]interface{}: + return len(iType) == 0 + } + + } + + return false +} diff --git a/pkg/scanners/azure/functions/empty_test.go b/pkg/scanners/azure/functions/empty_test.go new file mode 100644 index 000000000000..a21fb96cd8cd --- /dev/null +++ b/pkg/scanners/azure/functions/empty_test.go @@ -0,0 +1,68 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_Empty(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected bool + }{ + { + name: "string is empty", + args: []interface{}{ + "", + }, + expected: true, + }, + { + name: "string is not empty", + args: []interface{}{ + "hello, world", + }, + expected: false, + }, + { + name: "array is empty", + args: []interface{}{ + []string{}, + }, + expected: true, + }, + { + name: "array is not empty", + args: []interface{}{ + []string{"Hello", "World"}, + }, + expected: false, + }, + { + name: "map is empty", + args: []interface{}{ + map[string]interface{}{}, + }, + expected: true, + }, + { + name: "map is not empty", + args: []interface{}{ + map[string]interface{}{ + "hello": "world", + }, + "world", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + doesContain := Empty(tt.args...) + require.Equal(t, tt.expected, doesContain) + }) + } +} diff --git a/pkg/scanners/azure/functions/ends_with.go b/pkg/scanners/azure/functions/ends_with.go new file mode 100644 index 000000000000..2bcd66217ecb --- /dev/null +++ b/pkg/scanners/azure/functions/ends_with.go @@ -0,0 +1,22 @@ +package functions + +import "strings" + +func EndsWith(args ...interface{}) interface{} { + + if len(args) != 2 { + return false + } + + stringToSearch, ok := args[0].(string) + if !ok { + return false + } + + stringToFind, ok := args[1].(string) + if !ok { + return false + } + + return strings.HasSuffix(stringToSearch, stringToFind) +} diff --git a/pkg/scanners/azure/functions/ends_with_test.go b/pkg/scanners/azure/functions/ends_with_test.go new file mode 100644 index 000000000000..b1d1900ba0d2 --- /dev/null +++ b/pkg/scanners/azure/functions/ends_with_test.go @@ -0,0 +1,41 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_EndsWith(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected bool + }{ + { + name: "string ends with", + args: []interface{}{ + "Hello world!", + "world!", + }, + expected: true, + }, + { + name: "string does not end with", + args: []interface{}{ + "Hello world!", + "world", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := EndsWith(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/equals.go b/pkg/scanners/azure/functions/equals.go new file mode 100644 index 000000000000..ca5174144cb8 --- /dev/null +++ b/pkg/scanners/azure/functions/equals.go @@ -0,0 +1,25 @@ +package functions + +func Equals(args ...interface{}) interface{} { + if len(args) != 2 { + return false + } + + slice1, ok := args[0].([]interface{}) + if ok { + slice2, ok := args[1].([]interface{}) + if ok { + if len(slice1) != len(slice2) { + return false + } + for i := 0; i < len(slice1); i++ { + if slice1[i] != slice2[i] { + return false + } + } + return true + } + } + + return args[0] == args[1] +} diff --git a/pkg/scanners/azure/functions/equals_test.go b/pkg/scanners/azure/functions/equals_test.go new file mode 100644 index 000000000000..e9ad7f03f7c7 --- /dev/null +++ b/pkg/scanners/azure/functions/equals_test.go @@ -0,0 +1,111 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Equals(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "equals with nil", + args: []interface{}{ + nil, + }, + expected: false, + }, + { + name: "equals with nil and string", + args: []interface{}{ + nil, + "test", + }, + expected: false, + }, + { + name: "equals with nil and string and int", + args: []interface{}{ + nil, + "test", + 1, + }, + expected: false, + }, + { + name: "equals with nil and nil and array", + args: []interface{}{ + nil, + nil, + []interface{}{"a", "b", "c"}, + }, + expected: false, + }, + { + name: "equals with nil and nil", + args: []interface{}{ + nil, + nil, + }, + expected: true, + }, + { + name: "equals with string and string", + args: []interface{}{ + "test", + "test", + }, + expected: true, + }, + { + name: "equals with string and string", + args: []interface{}{ + "test", + "test1", + }, + expected: false, + }, + { + name: "equals with int and int", + args: []interface{}{ + 1, + 1, + }, + expected: true, + }, + { + name: "equals with int and int", + args: []interface{}{ + 1, + 2, + }, + expected: false, + }, + { + name: "equals with array and array", + args: []interface{}{ + []interface{}{"a", "b", "c"}, + []interface{}{"a", "b", "c"}, + }, + expected: true, + }, + { + name: "equals with array and array", + args: []interface{}{ + []interface{}{"a", "b", "c"}, + []interface{}{"a", "b", "d"}, + }, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Equals(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/false.go b/pkg/scanners/azure/functions/false.go new file mode 100644 index 000000000000..26309e333812 --- /dev/null +++ b/pkg/scanners/azure/functions/false.go @@ -0,0 +1,5 @@ +package functions + +func False(args ...interface{}) interface{} { + return false +} diff --git a/pkg/scanners/azure/functions/first.go b/pkg/scanners/azure/functions/first.go new file mode 100644 index 000000000000..3415b453ffe3 --- /dev/null +++ b/pkg/scanners/azure/functions/first.go @@ -0,0 +1,37 @@ +package functions + +func First(args ...interface{}) interface{} { + if len(args) != 1 { + return "" + } + + container := args[0] + + switch cType := container.(type) { + case string: + if len(cType) > 0 { + return string(cType[0]) + } + case interface{}: + switch iType := cType.(type) { + case []string: + if len(iType) > 0 { + return iType[0] + } + case []bool: + if len(iType) > 0 { + return iType[0] + } + case []int: + if len(iType) > 0 { + return iType[0] + } + case []float64: + if len(iType) > 0 { + return iType[0] + } + } + } + + return "" +} diff --git a/pkg/scanners/azure/functions/first_test.go b/pkg/scanners/azure/functions/first_test.go new file mode 100644 index 000000000000..5ce059750184 --- /dev/null +++ b/pkg/scanners/azure/functions/first_test.go @@ -0,0 +1,51 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_First(t *testing.T) { + test := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "first in empty string", + args: []interface{}{ + "", + }, + expected: "", + }, + { + name: "first in string", + args: []interface{}{ + "Hello", + }, + expected: "H", + }, + { + name: "first in empty slice", + args: []interface{}{ + []string{}, + }, + expected: "", + }, + { + name: "first in slice", + args: []interface{}{ + []string{"Hello", "World"}, + }, + expected: "Hello", + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + actual := First(tt.args...) + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/float.go b/pkg/scanners/azure/functions/float.go new file mode 100644 index 000000000000..512b471b9421 --- /dev/null +++ b/pkg/scanners/azure/functions/float.go @@ -0,0 +1,20 @@ +package functions + +import "strconv" + +func Float(args ...interface{}) interface{} { + if len(args) != 1 { + return 0.0 + } + if a, ok := args[0].(int); ok { + return float64(a) + } + if a, ok := args[0].(string); ok { + f, err := strconv.ParseFloat(a, 64) + if err != nil { + return 0.0 + } + return f + } + return 0.0 +} diff --git a/pkg/scanners/azure/functions/float_test.go b/pkg/scanners/azure/functions/float_test.go new file mode 100644 index 000000000000..a7f5f84a8c20 --- /dev/null +++ b/pkg/scanners/azure/functions/float_test.go @@ -0,0 +1,36 @@ +package functions + +import "testing" + +func Test_Float(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected float64 + }{ + { + name: "Float with 1", + args: []interface{}{1}, + expected: 1.0, + }, + { + name: "Float with 2", + args: []interface{}{"2"}, + expected: 2.0, + }, + { + name: "Float with 3", + args: []interface{}{"2.3"}, + expected: 2.3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Float(tt.args...) + if got != tt.expected { + t.Errorf("Float() = %v, want %v", got, tt.expected) + } + }) + } +} diff --git a/pkg/scanners/azure/functions/format.go b/pkg/scanners/azure/functions/format.go new file mode 100644 index 000000000000..207b9ebfdda7 --- /dev/null +++ b/pkg/scanners/azure/functions/format.go @@ -0,0 +1,31 @@ +package functions + +import ( + "fmt" + "strings" +) + +func Format(args ...interface{}) interface{} { + formatter := generateFormatterString(args...) + + return fmt.Sprintf(formatter, args[1:]...) +} + +func generateFormatterString(args ...interface{}) string { + + formatter, ok := args[0].(string) + if !ok { + return "" + } + for i, arg := range args[1:] { + switch arg.(type) { + case string: + formatter = strings.ReplaceAll(formatter, fmt.Sprintf("{%d}", i), "%s") + case int, int32, int64, uint, uint32, uint64: + formatter = strings.ReplaceAll(formatter, fmt.Sprintf("{%d}", i), "%d") + case float64, float32: + formatter = strings.ReplaceAll(formatter, fmt.Sprintf("{%d}", i), "%f") + } + } + return formatter +} diff --git a/pkg/scanners/azure/functions/format_test.go b/pkg/scanners/azure/functions/format_test.go new file mode 100644 index 000000000000..8d5e840c61a6 --- /dev/null +++ b/pkg/scanners/azure/functions/format_test.go @@ -0,0 +1,42 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_FormatCall(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "simple format call", + args: []interface{}{ + "{0}/{1}", + "myPostgreSQLServer", + "log_checkpoints", + }, + expected: "myPostgreSQLServer/log_checkpoints", + }, + { + name: "complex format call", + args: []interface{}{ + "{0} + {1} = {2}", + 1, 2, 3, + }, + expected: "1 + 2 = 3", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Format(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/functions.go b/pkg/scanners/azure/functions/functions.go new file mode 100644 index 000000000000..f4ed7815f485 --- /dev/null +++ b/pkg/scanners/azure/functions/functions.go @@ -0,0 +1,99 @@ +package functions + +var deploymentFuncs = map[string]func(dp DeploymentData, args ...interface{}) interface{}{ + "parameters": Parameters, + "deployment": Deployment, + "environment": Environment, + "variables": Variables, +} +var generalFuncs = map[string]func(...interface{}) interface{}{ + + "add": Add, + "and": And, + "array": Array, + "base64": Base64, + "base64ToJson": Base64ToJson, + "bool": Bool, + "coalesce": Coalesce, + "concat": Concat, + "contains": Contains, + "copyIndex": CopyIndex, + "createArray": CreateArray, + "createObject": CreateObject, + "dataUri": DataUri, + "dataUriToString": DataUriToString, + "dateTimeAdd": DateTimeAdd, + "dateTimeFromEpoch": DateTimeFromEpoch, + "dateTimeToEpoch": DateTimeToEpoch, + "div": Div, + "empty": Empty, + "endsWith": EndsWith, + "equals": Equals, + "extensionResourceId": ExtensionResourceID, + "false": False, + "float": Float, + "format": Format, + "greater": Greater, + "greaterOrEquals": GreaterOrEquals, + "guid": Guid, + "if": If, + "indexOf": IndexOf, + "int": Int, + "intersection": Intersection, + "items": Items, + "join": Join, + "lastIndexOf": LastIndexOf, + "length": Length, + "less": Less, + "lessOrEquals": LessOrEquals, + // "list": List, + "managementGroup": ManagementGroup, + "managementGroupResourceId": ManagementGroupResourceID, + "max": Max, + "min": Min, + "mod": Mod, + "mul": Mul, + "newGuid": NewGuid, + "not": Not, + "null": Null, + "or": Or, + "padLeft": PadLeft, + "pickZones": PickZones, + "range": Range, + "reference": Reference, + "replace": Replace, + "resourceGroup": ResourceGroup, + "resourceId": ResourceID, + "skip": Skip, + "split": Split, + "startsWith": StartsWith, + "string": String, + "sub": Sub, + "subscription": Subscription, + "subscriptionResourceId": SubscriptionResourceID, + "substring": SubString, + "tenant": Tenant, + "tenantResourceId": TenantResourceID, + "toLower": ToLower, + "toUpper": ToUpper, + "trim": Trim, + "true": True, + "union": Union, + "union:": Union, + "uniqueString": UniqueString, + "uri": Uri, + "utcNow": UTCNow, +} + +func Evaluate(deploymentProvider DeploymentData, name string, args ...interface{}) interface{} { + + if f, ok := deploymentFuncs[name]; ok { + return f(deploymentProvider, args...) + } + + if f, ok := generalFuncs[name]; ok { + return f(args...) + } + + return nil +} diff --git a/pkg/scanners/azure/functions/greater.go b/pkg/scanners/azure/functions/greater.go new file mode 100644 index 000000000000..24bf79834641 --- /dev/null +++ b/pkg/scanners/azure/functions/greater.go @@ -0,0 +1,47 @@ +package functions + +func Greater(args ...interface{}) interface{} { + + if len(args) != 2 { + return false + } + + switch arg0 := args[0].(type) { + case int: + arg1, ok := args[1].(int) + if ok { + return arg0 > arg1 + } + case string: + arg1, ok := args[1].(string) + if ok { + return arg0 > arg1 + } + } + + return false +} + +func GreaterOrEquals(args ...interface{}) interface{} { + + if len(args) != 2 { + return false + } + + switch arg0 := args[0].(type) { + case nil: + return args[1] == nil + case int: + arg1, ok := args[1].(int) + if ok { + return arg0 >= arg1 + } + case string: + arg1, ok := args[1].(string) + if ok { + return arg0 >= arg1 + } + } + + return false +} diff --git a/pkg/scanners/azure/functions/greater_test.go b/pkg/scanners/azure/functions/greater_test.go new file mode 100644 index 000000000000..8d3e1b21b25e --- /dev/null +++ b/pkg/scanners/azure/functions/greater_test.go @@ -0,0 +1,119 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Greater(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + + { + name: "greater with nil and string", + args: []interface{}{ + nil, + "test", + }, + expected: false, + }, + { + name: "greater with nil and nil", + args: []interface{}{ + nil, + nil, + }, + expected: false, + }, + { + name: "greater with string and string", + args: []interface{}{ + "test", + "test", + }, + expected: false, + }, + { + name: "greater with string and int", + args: []interface{}{ + "test", + 1, + }, + expected: false, + }, + { + name: "greater with int and int", + args: []interface{}{ + 1, + 1, + }, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Greater(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} + +func Test_GreaterThanOrEqual(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + + { + name: "greater with nil and string", + args: []interface{}{ + nil, + "test", + }, + expected: false, + }, + { + name: "greater with nil and nil", + args: []interface{}{ + nil, + nil, + }, + expected: true, + }, + { + name: "greater with string and string", + args: []interface{}{ + "test", + "test", + }, + expected: true, + }, + { + name: "greater with string and int", + args: []interface{}{ + "test", + 1, + }, + expected: false, + }, + { + name: "greater with int and int", + args: []interface{}{ + 1, + 1, + }, + expected: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := GreaterOrEquals(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/guid.go b/pkg/scanners/azure/functions/guid.go new file mode 100644 index 000000000000..d54bbacb1beb --- /dev/null +++ b/pkg/scanners/azure/functions/guid.go @@ -0,0 +1,44 @@ +package functions + +import ( + "crypto/sha256" + "strings" + + "github.com/google/uuid" +) + +func Guid(args ...interface{}) interface{} { + + if len(args) == 0 { + return "" + } + + hashParts := make([]string, len(args)) + for i, str := range args { + hashParts[i] = str.(string) + } + + guid, err := generateSeededGUID(hashParts...) + if err != nil { + return "" + } + + return guid.String() +} + +func generateSeededGUID(seedParts ...string) (uuid.UUID, error) { + var id uuid.UUID + + stringToHash := strings.Join(seedParts, "") + + hsha2 := sha256.Sum256([]byte(stringToHash)) + + copy(id[:], hsha2[:16]) + id[6] = (id[6] & 0x0f) | 0x40 // Version 4 + id[8] = (id[8] & 0x3f) | 0x80 // Variant is 10 + return id, nil +} + +func NewGuid(args ...interface{}) interface{} { + return uuid.NewString() +} diff --git a/pkg/scanners/azure/functions/guid_test.go b/pkg/scanners/azure/functions/guid_test.go new file mode 100644 index 000000000000..0e47e5383a54 --- /dev/null +++ b/pkg/scanners/azure/functions/guid_test.go @@ -0,0 +1,35 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_Guid(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "guid from a string", + args: []interface{}{ + "hello", + }, + expected: "2cf24dba-5fb0-430e-a6e8-3b2ac5b9e29e", + }, + { + name: "guid from an string", + args: []interface{}{}, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + guid := Guid(tt.args...) + require.Equal(t, tt.expected, guid) + }) + } +} diff --git a/pkg/scanners/azure/functions/if.go b/pkg/scanners/azure/functions/if.go new file mode 100644 index 000000000000..03fd35e360ff --- /dev/null +++ b/pkg/scanners/azure/functions/if.go @@ -0,0 +1,15 @@ +package functions + +func If(args ...interface{}) interface{} { + + if len(args) != 3 { + return nil + } + + if condition, ok := args[0].(bool); ok { + if condition { + return args[1] + } + } + return args[2] +} diff --git a/pkg/scanners/azure/functions/if_test.go b/pkg/scanners/azure/functions/if_test.go new file mode 100644 index 000000000000..52c645fb30aa --- /dev/null +++ b/pkg/scanners/azure/functions/if_test.go @@ -0,0 +1,44 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_If(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "If with true", + args: []interface{}{true, "true", "false"}, + expected: "true", + }, + { + name: "If with false", + args: []interface{}{false, "true", "false"}, + expected: "false", + }, + { + name: "If with true and slice returned", + args: []interface{}{ + true, + []interface{}{"Hello", "World"}, + []interface{}{"Goodbye", "World"}, + }, + expected: []interface{}{"Hello", "World"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := If(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } + +} diff --git a/pkg/scanners/azure/functions/index_of.go b/pkg/scanners/azure/functions/index_of.go new file mode 100644 index 000000000000..93896e21e897 --- /dev/null +++ b/pkg/scanners/azure/functions/index_of.go @@ -0,0 +1,22 @@ +package functions + +import "strings" + +func IndexOf(args ...interface{}) interface{} { + + if len(args) != 2 { + return -1 + } + + stringToSearch, ok := args[0].(string) + if !ok { + return -1 + } + + stringToFind, ok := args[1].(string) + if !ok { + return -1 + } + + return strings.Index(stringToSearch, stringToFind) +} diff --git a/pkg/scanners/azure/functions/index_of_test.go b/pkg/scanners/azure/functions/index_of_test.go new file mode 100644 index 000000000000..c35d59279942 --- /dev/null +++ b/pkg/scanners/azure/functions/index_of_test.go @@ -0,0 +1,48 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_IndexOf(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "get index of string that is there", + args: []interface{}{ + "Hello world!", + "Hell", + }, + expected: 0, + }, + { + name: "get index of string that is there as well", + args: []interface{}{ + "Hello world!", + "world", + }, + expected: 6, + }, + { + name: "get index of string that isn't there", + args: []interface{}{ + "Hello world!", + "planet!", + }, + expected: -1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := IndexOf(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/int.go b/pkg/scanners/azure/functions/int.go new file mode 100644 index 000000000000..f873a29fb0bf --- /dev/null +++ b/pkg/scanners/azure/functions/int.go @@ -0,0 +1,20 @@ +package functions + +import "strconv" + +func Int(args ...interface{}) interface{} { + if len(args) != 1 { + return 0 + } + if a, ok := args[0].(int); ok { + return a + } + if a, ok := args[0].(string); ok { + i, err := strconv.Atoi(a) + if err != nil { + return 0 + } + return i + } + return 0 +} diff --git a/pkg/scanners/azure/functions/int_test.go b/pkg/scanners/azure/functions/int_test.go new file mode 100644 index 000000000000..0834ecdd6fc2 --- /dev/null +++ b/pkg/scanners/azure/functions/int_test.go @@ -0,0 +1,36 @@ +package functions + +import "testing" + +func Test_Int(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "Int with 1", + args: []interface{}{1}, + expected: 1, + }, + { + name: "Int with 2", + args: []interface{}{"2"}, + expected: 2, + }, + { + name: "Int with 2.3", + args: []interface{}{"2.3"}, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Int(tt.args...) + if got != tt.expected { + t.Errorf("Int() = %v, want %v", got, tt.expected) + } + }) + } +} diff --git a/pkg/scanners/azure/functions/intersection.go b/pkg/scanners/azure/functions/intersection.go new file mode 100644 index 000000000000..5eace2fe0bc7 --- /dev/null +++ b/pkg/scanners/azure/functions/intersection.go @@ -0,0 +1,76 @@ +package functions + +import "sort" + +func Intersection(args ...interface{}) interface{} { + + if args == nil || len(args) < 2 { + return []interface{}{} + } + + switch args[0].(type) { + case map[string]interface{}: + return intersectionMap(args...) + case interface{}: + return intersectionArray(args...) + } + + return []interface{}{} +} + +func intersectionArray(args ...interface{}) interface{} { + result := []interface{}{} + hash := make(map[interface{}]bool) + + for _, arg := range args[0].([]interface{}) { + hash[arg] = true + } + + for i := 1; i < len(args); i++ { + workingHash := make(map[interface{}]bool) + argArr, ok := args[i].([]interface{}) + if !ok { + continue + } + for _, item := range argArr { + if _, ok := hash[item]; ok { + workingHash[item] = true + } + } + hash = workingHash + } + + for k := range hash { + result = append(result, k) + } + + sort.Slice(result, func(i, j int) bool { + return result[i].(string) < result[j].(string) + }) + + return result +} + +func intersectionMap(args ...interface{}) interface{} { + hash := make(map[string]interface{}) + + for k, v := range args[0].(map[string]interface{}) { + hash[k] = v + } + + for i := 1; i < len(args); i++ { + workingHash := make(map[string]interface{}) + argArr, ok := args[i].(map[string]interface{}) + if !ok { + continue + } + for k, v := range argArr { + if ev, ok := hash[k]; ok && ev == v { + workingHash[k] = v + } + } + hash = workingHash + } + + return hash +} diff --git a/pkg/scanners/azure/functions/intersection_test.go b/pkg/scanners/azure/functions/intersection_test.go new file mode 100644 index 000000000000..98630fa9687c --- /dev/null +++ b/pkg/scanners/azure/functions/intersection_test.go @@ -0,0 +1,106 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Intersect(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "intersect two arrays", + args: []interface{}{ + []interface{}{"a", "b", "c"}, + []interface{}{"b", "c", "d"}, + }, + expected: []interface{}{"b", "c"}, + }, + { + name: "intersect three arrays", + args: []interface{}{ + []interface{}{"a", "b", "c", "d"}, + []interface{}{"b", "c", "d"}, + []interface{}{"b", "c"}, + }, + expected: []interface{}{"b", "c"}, + }, + { + name: "intersect two arrays with one empty", + args: []interface{}{ + []interface{}{"a", "b", "c"}, + []interface{}{}, + }, + expected: []interface{}{}, + }, + { + name: "intersect two arrays with both empty", + args: []interface{}{ + []interface{}{}, + []interface{}{}, + }, + expected: []interface{}{}, + }, + { + name: "intersect two arrays with both nil", + args: []interface{}{ + nil, + nil, + }, + expected: []interface{}{}, + }, + { + name: "intersect two maps", + args: []interface{}{ + map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + }, + map[string]interface{}{ + "b": "b", + "c": "c", + "d": "d", + }, + }, + expected: map[string]interface{}{ + "b": "b", + "c": "c", + }, + }, + { + name: "intersect three maps", + args: []interface{}{ + map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + }, + map[string]interface{}{ + "b": "b", + "c": "c", + "d": "d", + }, + map[string]interface{}{ + "b": "b", + "d": "d", + }, + }, + expected: map[string]interface{}{ + "b": "b", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Intersection(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/items.go b/pkg/scanners/azure/functions/items.go new file mode 100644 index 000000000000..2b40a369ea46 --- /dev/null +++ b/pkg/scanners/azure/functions/items.go @@ -0,0 +1,6 @@ +package functions + +func Items(args ...interface{}) interface{} { + + return nil +} diff --git a/pkg/scanners/azure/functions/join.go b/pkg/scanners/azure/functions/join.go new file mode 100644 index 000000000000..cdefa43fdad0 --- /dev/null +++ b/pkg/scanners/azure/functions/join.go @@ -0,0 +1,22 @@ +package functions + +import "strings" + +func Join(args ...interface{}) interface{} { + + if len(args) != 2 { + return "" + } + + container, ok := args[0].([]string) + if !ok { + return "" + } + + separator, ok := args[1].(string) + if !ok { + return "" + } + + return strings.Join(container, separator) +} diff --git a/pkg/scanners/azure/functions/join_test.go b/pkg/scanners/azure/functions/join_test.go new file mode 100644 index 000000000000..fab50a4e1e90 --- /dev/null +++ b/pkg/scanners/azure/functions/join_test.go @@ -0,0 +1,39 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Join(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "join strings with no items", + args: []interface{}{ + []string{}, + " ", + }, + expected: "", + }, + { + name: "join strings", + args: []interface{}{ + []string{"Hello", "World"}, + " ", + }, + expected: "Hello World", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Join(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/json.go b/pkg/scanners/azure/functions/json.go new file mode 100644 index 000000000000..7694b358737b --- /dev/null +++ b/pkg/scanners/azure/functions/json.go @@ -0,0 +1,20 @@ +package functions + +import "encoding/json" + +func JSON(args ...interface{}) interface{} { + if len(args) != 1 { + return "" + } + + value, ok := args[0].(string) + if !ok { + return "" + } + + var jsonType map[string]interface{} + if err := json.Unmarshal([]byte(value), &jsonType); err != nil { + return "" + } + return jsonType +} diff --git a/pkg/scanners/azure/functions/json_test.go b/pkg/scanners/azure/functions/json_test.go new file mode 100644 index 000000000000..1f04cd65026f --- /dev/null +++ b/pkg/scanners/azure/functions/json_test.go @@ -0,0 +1,42 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_JSON(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected map[string]interface{} + }{ + { + name: "simple json string to json type", + args: []interface{}{ + `{"hello": "world"}`, + }, + expected: map[string]interface{}{ + "hello": "world", + }, + }, + { + name: "more complex json string to json type", + args: []interface{}{ + `{"hello": ["world", "world2"]}`, + }, + expected: map[string]interface{}{ + "hello": []interface{}{"world", "world2"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := JSON(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/last.go b/pkg/scanners/azure/functions/last.go new file mode 100644 index 000000000000..8466ec6b669f --- /dev/null +++ b/pkg/scanners/azure/functions/last.go @@ -0,0 +1,37 @@ +package functions + +func Last(args ...interface{}) interface{} { + if len(args) != 1 { + return "" + } + + container := args[0] + + switch cType := container.(type) { + case string: + if len(cType) > 0 { + return string(cType[len(cType)-1]) + } + case interface{}: + switch iType := cType.(type) { + case []string: + if len(iType) > 0 { + return iType[len(iType)-1] + } + case []bool: + if len(iType) > 0 { + return iType[len(iType)-1] + } + case []int: + if len(iType) > 0 { + return iType[len(iType)-1] + } + case []float64: + if len(iType) > 0 { + return iType[len(iType)-1] + } + } + } + + return "" +} diff --git a/pkg/scanners/azure/functions/last_index_of.go b/pkg/scanners/azure/functions/last_index_of.go new file mode 100644 index 000000000000..7dce6320d8fb --- /dev/null +++ b/pkg/scanners/azure/functions/last_index_of.go @@ -0,0 +1,22 @@ +package functions + +import "strings" + +func LastIndexOf(args ...interface{}) interface{} { + + if len(args) != 2 { + return -1 + } + + stringToSearch, ok := args[0].(string) + if !ok { + return -1 + } + + stringToFind, ok := args[1].(string) + if !ok { + return -1 + } + + return strings.LastIndex(stringToSearch, stringToFind) +} diff --git a/pkg/scanners/azure/functions/last_index_of_test.go b/pkg/scanners/azure/functions/last_index_of_test.go new file mode 100644 index 000000000000..96b78d72dc5f --- /dev/null +++ b/pkg/scanners/azure/functions/last_index_of_test.go @@ -0,0 +1,48 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_LastIndexOf(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "get last index of string that is there", + args: []interface{}{ + "Hello world!", + "l", + }, + expected: 9, + }, + { + name: "get last index of string that is there as well", + args: []interface{}{ + "Hello world!", + "world", + }, + expected: 6, + }, + { + name: "get last index of string that isn't there", + args: []interface{}{ + "Hello world!", + "planet!", + }, + expected: -1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := LastIndexOf(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/last_test.go b/pkg/scanners/azure/functions/last_test.go new file mode 100644 index 000000000000..2ceafbf8a69a --- /dev/null +++ b/pkg/scanners/azure/functions/last_test.go @@ -0,0 +1,51 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_Last(t *testing.T) { + test := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "last in empty string", + args: []interface{}{ + "", + }, + expected: "", + }, + { + name: "last in string", + args: []interface{}{ + "Hello", + }, + expected: "o", + }, + { + name: "last in empty slice", + args: []interface{}{ + []string{}, + }, + expected: "", + }, + { + name: "last in slice", + args: []interface{}{ + []string{"Hello", "World"}, + }, + expected: "World", + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + actual := Last(tt.args...) + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/length.go b/pkg/scanners/azure/functions/length.go new file mode 100644 index 000000000000..d74bfb2553bf --- /dev/null +++ b/pkg/scanners/azure/functions/length.go @@ -0,0 +1,29 @@ +package functions + +func Length(args ...interface{}) interface{} { + + if len(args) != 1 { + return 0 + } + + switch ctype := args[0].(type) { + case string: + return len(ctype) + case map[string]interface{}: + return len(ctype) + case interface{}: + switch iType := ctype.(type) { + case []string: + return len(iType) + case []bool: + return len(iType) + case []int: + return len(iType) + case []float64: + return len(iType) + case []interface{}: + return len(iType) + } + } + return 0 +} diff --git a/pkg/scanners/azure/functions/length_test.go b/pkg/scanners/azure/functions/length_test.go new file mode 100644 index 000000000000..2d15ba4968cf --- /dev/null +++ b/pkg/scanners/azure/functions/length_test.go @@ -0,0 +1,53 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Length(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "length of a string", + args: []interface{}{ + "hello", + }, + expected: 5, + }, + { + name: "length of an empty string", + args: []interface{}{ + "", + }, + expected: 0, + }, + { + name: "length of an empty slice", + args: []interface{}{ + []string{}, + }, + expected: 0, + }, + { + name: "length of an slice with items", + args: []interface{}{ + []string{ + "hello", "world", + }, + }, + expected: 2, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Length(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/less.go b/pkg/scanners/azure/functions/less.go new file mode 100644 index 000000000000..e25b3662c5c9 --- /dev/null +++ b/pkg/scanners/azure/functions/less.go @@ -0,0 +1,47 @@ +package functions + +func Less(args ...interface{}) interface{} { + + if len(args) != 2 { + return false + } + + switch arg0 := args[0].(type) { + case int: + arg1, ok := args[1].(int) + if ok { + return arg0 < arg1 + } + case string: + arg1, ok := args[1].(string) + if ok { + return arg0 < arg1 + } + } + + return false +} + +func LessOrEquals(args ...interface{}) interface{} { + + if len(args) != 2 { + return false + } + + switch arg0 := args[0].(type) { + case nil: + return args[1] == nil + case int: + arg1, ok := args[1].(int) + if ok { + return arg0 <= arg1 + } + case string: + arg1, ok := args[1].(string) + if ok { + return arg0 <= arg1 + } + } + + return false +} diff --git a/pkg/scanners/azure/functions/less_test.go b/pkg/scanners/azure/functions/less_test.go new file mode 100644 index 000000000000..706ee89db33f --- /dev/null +++ b/pkg/scanners/azure/functions/less_test.go @@ -0,0 +1,119 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Less(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + + { + name: "less with nil and string", + args: []interface{}{ + nil, + "test", + }, + expected: false, + }, + { + name: "less with nil and nil", + args: []interface{}{ + nil, + nil, + }, + expected: false, + }, + { + name: "less with string and string", + args: []interface{}{ + "test", + "test", + }, + expected: false, + }, + { + name: "less with string and int", + args: []interface{}{ + "test", + 1, + }, + expected: false, + }, + { + name: "less with int and int", + args: []interface{}{ + 1, + 1, + }, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Less(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} + +func Test_LessThanOrEqual(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + + { + name: "less with nil and string", + args: []interface{}{ + nil, + "test", + }, + expected: false, + }, + { + name: "less with nil and nil", + args: []interface{}{ + nil, + nil, + }, + expected: true, + }, + { + name: "less with string and string", + args: []interface{}{ + "test", + "test", + }, + expected: true, + }, + { + name: "less with string and int", + args: []interface{}{ + "test", + 1, + }, + expected: false, + }, + { + name: "less with int and int", + args: []interface{}{ + 1, + 1, + }, + expected: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := LessOrEquals(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/max.go b/pkg/scanners/azure/functions/max.go new file mode 100644 index 000000000000..6cbfd5ba25bc --- /dev/null +++ b/pkg/scanners/azure/functions/max.go @@ -0,0 +1,33 @@ +package functions + +func Max(args ...interface{}) interface{} { + switch args[0].(type) { + case int: + var ints []int + for _, arg := range args { + ints = append(ints, arg.(int)) + } + return maxInt(ints) + case interface{}: + switch iType := args[0].(type) { + case []int: + return maxInt(iType) + } + } + return 0 +} + +func maxInt(args []int) int { + if len(args) == 0 { + return 0 + } + + max := args[0] + + for i := 1; i < len(args); i++ { + if args[i] > max { + max = args[i] + } + } + return max +} diff --git a/pkg/scanners/azure/functions/max_test.go b/pkg/scanners/azure/functions/max_test.go new file mode 100644 index 000000000000..942fad7e9e59 --- /dev/null +++ b/pkg/scanners/azure/functions/max_test.go @@ -0,0 +1,58 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Max(t *testing.T) { + test := []struct { + name string + args []interface{} + expected int + }{ + { + name: "max of empty slice", + args: []interface{}{ + []int{}, + }, + expected: 0, + }, + { + name: "max of slice", + args: []interface{}{ + []int{1, 2, 3}, + }, + expected: 3, + }, + { + name: "max of slice with negative numbers", + args: []interface{}{ + []int{-1, -2, -3}, + }, + expected: -1, + }, + { + name: "max of slice with negative and positive numbers", + args: []interface{}{ + []int{-1, 2, -3}, + }, + expected: 2, + }, + { + name: "max of comma separated numbers", + args: []interface{}{ + 1, 2, 3, 4, 5, + }, + expected: 5, + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + actual := Max(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/min.go b/pkg/scanners/azure/functions/min.go new file mode 100644 index 000000000000..35900e26d33d --- /dev/null +++ b/pkg/scanners/azure/functions/min.go @@ -0,0 +1,33 @@ +package functions + +func Min(args ...interface{}) interface{} { + switch args[0].(type) { + case int: + var ints []int + for _, arg := range args { + ints = append(ints, arg.(int)) + } + return minInt(ints) + case interface{}: + switch iType := args[0].(type) { + case []int: + return minInt(iType) + } + } + return 0 +} + +func minInt(args []int) int { + if len(args) == 0 { + return 0 + } + + min := args[0] + + for i := 1; i < len(args); i++ { + if args[i] < min { + min = args[i] + } + } + return min +} diff --git a/pkg/scanners/azure/functions/min_test.go b/pkg/scanners/azure/functions/min_test.go new file mode 100644 index 000000000000..28e12ef69de8 --- /dev/null +++ b/pkg/scanners/azure/functions/min_test.go @@ -0,0 +1,58 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Min(t *testing.T) { + test := []struct { + name string + args []interface{} + expected int + }{ + { + name: "min of empty slice", + args: []interface{}{ + []int{}, + }, + expected: 0, + }, + { + name: "min of slice", + args: []interface{}{ + []int{1, 2, 3}, + }, + expected: 1, + }, + { + name: "min of slice with negative numbers", + args: []interface{}{ + []int{-1, -2, -3}, + }, + expected: -3, + }, + { + name: "min of slice with negative and positive numbers", + args: []interface{}{ + []int{-1, 2, -3}, + }, + expected: -3, + }, + { + name: "min of comma separated numbers", + args: []interface{}{ + 1, 2, 3, 4, 5, + }, + expected: 1, + }, + } + + for _, tt := range test { + t.Run(tt.name, func(t *testing.T) { + actual := Min(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/mod.go b/pkg/scanners/azure/functions/mod.go new file mode 100644 index 000000000000..34fb12b7a356 --- /dev/null +++ b/pkg/scanners/azure/functions/mod.go @@ -0,0 +1,14 @@ +package functions + +func Mod(args ...interface{}) interface{} { + if len(args) != 2 { + return 0 + } + + if a, ok := args[0].(int); ok { + if b, ok := args[1].(int); ok { + return a % b + } + } + return 0 +} diff --git a/pkg/scanners/azure/functions/mod_test.go b/pkg/scanners/azure/functions/mod_test.go new file mode 100644 index 000000000000..656e77e9aae3 --- /dev/null +++ b/pkg/scanners/azure/functions/mod_test.go @@ -0,0 +1,41 @@ +package functions + +import "testing" + +func Test_Mod(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "Mod with 1 and 2", + args: []interface{}{1, 2}, + expected: 1, + }, + { + name: "Mod with 2 and 3", + args: []interface{}{2, 3}, + expected: 2, + }, + { + name: "Mod with 3 and -4", + args: []interface{}{3, -4}, + expected: 3, + }, + { + name: "Mod with 7 and 3", + args: []interface{}{7, 3}, + expected: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Mod(tt.args...) + if got != tt.expected { + t.Errorf("Mod() = %v, want %v", got, tt.expected) + } + }) + } +} diff --git a/pkg/scanners/azure/functions/mul.go b/pkg/scanners/azure/functions/mul.go new file mode 100644 index 000000000000..9d079728107f --- /dev/null +++ b/pkg/scanners/azure/functions/mul.go @@ -0,0 +1,15 @@ +package functions + +func Mul(args ...interface{}) interface{} { + + if len(args) != 2 { + return nil + } + + if a, ok := args[0].(int); ok { + if b, ok := args[1].(int); ok { + return a * b + } + } + return nil +} diff --git a/pkg/scanners/azure/functions/mul_test.go b/pkg/scanners/azure/functions/mul_test.go new file mode 100644 index 000000000000..cf4ff57607b2 --- /dev/null +++ b/pkg/scanners/azure/functions/mul_test.go @@ -0,0 +1,38 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Mul(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "multiply -2 by 1", + args: []interface{}{-2, 1}, + expected: -2, + }, + { + name: "multiply 4 by 2", + args: []interface{}{4, 2}, + expected: 8, + }, + { + name: "multiply 6 by 3", + args: []interface{}{6, 3}, + expected: 18, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Mul(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/scanners/azure/functions/not.go b/pkg/scanners/azure/functions/not.go new file mode 100644 index 000000000000..5de10af5dffa --- /dev/null +++ b/pkg/scanners/azure/functions/not.go @@ -0,0 +1,13 @@ +package functions + +func Not(args ...interface{}) interface{} { + + if len(args) != 1 { + return false + } + + if condition, ok := args[0].(bool); ok { + return !condition + } + return false +} diff --git a/pkg/scanners/azure/functions/not_test.go b/pkg/scanners/azure/functions/not_test.go new file mode 100644 index 000000000000..b1a209768f36 --- /dev/null +++ b/pkg/scanners/azure/functions/not_test.go @@ -0,0 +1,33 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Not(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected bool + }{ + { + name: "Not with true", + args: []interface{}{true}, + expected: false, + }, + { + name: "Not with false", + args: []interface{}{false}, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Not(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/scanners/azure/functions/null.go b/pkg/scanners/azure/functions/null.go new file mode 100644 index 000000000000..597c5485e9f5 --- /dev/null +++ b/pkg/scanners/azure/functions/null.go @@ -0,0 +1,5 @@ +package functions + +func Null(args ...interface{}) interface{} { + return nil +} diff --git a/pkg/scanners/azure/functions/null_test.go b/pkg/scanners/azure/functions/null_test.go new file mode 100644 index 000000000000..3394193415fb --- /dev/null +++ b/pkg/scanners/azure/functions/null_test.go @@ -0,0 +1,12 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Null(t *testing.T) { + + assert.Nil(t, Null()) +} diff --git a/pkg/scanners/azure/functions/or.go b/pkg/scanners/azure/functions/or.go new file mode 100644 index 000000000000..87e6f8627ed4 --- /dev/null +++ b/pkg/scanners/azure/functions/or.go @@ -0,0 +1,20 @@ +package functions + +func Or(args ...interface{}) interface{} { + + if len(args) <= 1 { + return false + } + + for _, arg := range args { + arg1, ok := arg.(bool) + if !ok { + return false + } + if arg1 { + return true + } + + } + return false +} diff --git a/pkg/scanners/azure/functions/or_test.go b/pkg/scanners/azure/functions/or_test.go new file mode 100644 index 000000000000..2361c858a82a --- /dev/null +++ b/pkg/scanners/azure/functions/or_test.go @@ -0,0 +1,44 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Or(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected bool + }{ + { + name: "And with same 2 bools", + args: []interface{}{true, true}, + expected: true, + }, + { + name: "And with same 3 bools", + args: []interface{}{true, true, true}, + expected: true, + }, + { + name: "And with different 4 bools", + args: []interface{}{true, true, false, true}, + expected: true, + }, + { + name: "And with same false 4 bools", + args: []interface{}{false, false, false, false}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Or(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/scanners/azure/functions/pad.go b/pkg/scanners/azure/functions/pad.go new file mode 100644 index 000000000000..9d668210b11c --- /dev/null +++ b/pkg/scanners/azure/functions/pad.go @@ -0,0 +1,32 @@ +package functions + +import "strings" + +func PadLeft(args ...interface{}) interface{} { + if len(args) != 3 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + + length, ok := args[1].(int) + if !ok { + return "" + } + + pad, ok := args[2].(string) + if !ok { + return "" + } + + if len(input) >= length { + return input + } + + repeat := (length - len(input)) / len(pad) + + return strings.Repeat(pad, repeat) + input +} diff --git a/pkg/scanners/azure/functions/pad_test.go b/pkg/scanners/azure/functions/pad_test.go new file mode 100644 index 000000000000..e7d274504298 --- /dev/null +++ b/pkg/scanners/azure/functions/pad_test.go @@ -0,0 +1,61 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_PadLeft(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "pad left with a input smaller than length", + args: []interface{}{ + "1234", + 8, + "0", + }, + expected: "00001234", + }, + { + name: "pad left with a input larger than length", + args: []interface{}{ + "1234", + 2, + "0", + }, + expected: "1234", + }, + { + name: "pad left with a input same as than length", + args: []interface{}{ + "1234", + 4, + "0", + }, + expected: "1234", + }, + { + name: "pad left with larger padding character", + args: []interface{}{ + "1234", + 8, + "00", + }, + expected: "00001234", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := PadLeft(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/parameters.go b/pkg/scanners/azure/functions/parameters.go new file mode 100644 index 000000000000..b13ee3d60e4e --- /dev/null +++ b/pkg/scanners/azure/functions/parameters.go @@ -0,0 +1 @@ +package functions diff --git a/pkg/scanners/azure/functions/pick_zones.go b/pkg/scanners/azure/functions/pick_zones.go new file mode 100644 index 000000000000..982936633dbe --- /dev/null +++ b/pkg/scanners/azure/functions/pick_zones.go @@ -0,0 +1,23 @@ +package functions + +func PickZones(args ...interface{}) interface{} { + if len(args) < 3 { + return nil + } + numOfZones := 1 + + if len(args) > 3 { + numOfZones = args[3].(int) + if numOfZones > 3 { + numOfZones = 3 + } + } + + var zones []int + + for i := 1; i <= numOfZones; i++ { + zones = append(zones, i) + } + + return zones +} diff --git a/pkg/scanners/azure/functions/pick_zones_test.go b/pkg/scanners/azure/functions/pick_zones_test.go new file mode 100644 index 000000000000..19db480f9b0d --- /dev/null +++ b/pkg/scanners/azure/functions/pick_zones_test.go @@ -0,0 +1,14 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_PickZones(t *testing.T) { + assert.Equal(t, []int{1}, PickZones("Microsoft.Compute", "virtualmachines", "eu-west-1")) + assert.Equal(t, []int{1, 2}, PickZones("Microsoft.Compute", "virtualmachines", "eu-west-1", 2)) + assert.Equal(t, []int{1, 2, 3}, PickZones("Microsoft.Compute", "virtualmachines", "eu-west-1", 3)) + assert.Equal(t, []int{1, 2, 3}, PickZones("Microsoft.Compute", "virtualmachines", "eu-west-1", 4)) +} diff --git a/pkg/scanners/azure/functions/range.go b/pkg/scanners/azure/functions/range.go new file mode 100644 index 000000000000..12a3526957d8 --- /dev/null +++ b/pkg/scanners/azure/functions/range.go @@ -0,0 +1,30 @@ +package functions + +func Range(args ...interface{}) interface{} { + + if len(args) != 2 { + return []interface{}{} + } + + start, ok := args[0].(int) + if !ok { + return []int{} + } + + count, ok := args[1].(int) + if !ok { + return []int{} + } + + if count > 10000 { + count = 10000 + } + + result := make([]int, count) + + for i := 0; i < count; i++ { + result[i] = start + i + } + + return result +} diff --git a/pkg/scanners/azure/functions/range_test.go b/pkg/scanners/azure/functions/range_test.go new file mode 100644 index 000000000000..9c0c6a084b6b --- /dev/null +++ b/pkg/scanners/azure/functions/range_test.go @@ -0,0 +1,47 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Range(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "range for 3 from 1", + args: []interface{}{ + 1, + 3, + }, + expected: []int{1, 2, 3}, + }, + { + name: "range with for 10 from 3", + args: []interface{}{ + 3, + 10, + }, + expected: []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, + }, + { + name: "range with for 10 from -10", + args: []interface{}{ + -10, + 10, + }, + expected: []int{-10, -9, -8, -7, -6, -5, -4, -3, -2, -1}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Range(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/reference.go b/pkg/scanners/azure/functions/reference.go new file mode 100644 index 000000000000..2f7b38ccf741 --- /dev/null +++ b/pkg/scanners/azure/functions/reference.go @@ -0,0 +1,12 @@ +package functions + +import "fmt" + +// Reference function can't work as per Azure because it requires Azure ARM logic +// best effort is to return the resourcename with a suffix to try and make it unique +func Reference(args ...interface{}) interface{} { + if len(args) < 1 { + return nil + } + return fmt.Sprintf("%v-reference", args[0]) +} diff --git a/pkg/scanners/azure/functions/reference_test.go b/pkg/scanners/azure/functions/reference_test.go new file mode 100644 index 000000000000..c669fe98d3f0 --- /dev/null +++ b/pkg/scanners/azure/functions/reference_test.go @@ -0,0 +1,12 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Reference(t *testing.T) { + assert.Equal(t, "test-reference", Reference("test")) + assert.Equal(t, "123-reference", Reference(123)) +} diff --git a/pkg/scanners/azure/functions/replace.go b/pkg/scanners/azure/functions/replace.go new file mode 100644 index 000000000000..00a7a8a4560f --- /dev/null +++ b/pkg/scanners/azure/functions/replace.go @@ -0,0 +1,26 @@ +package functions + +import "strings" + +func Replace(args ...interface{}) interface{} { + if len(args) != 3 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + + old, ok := args[1].(string) + if !ok { + return "" + } + + new, ok := args[2].(string) + if !ok { + return "" + } + + return strings.ReplaceAll(input, old, new) +} diff --git a/pkg/scanners/azure/functions/replace_test.go b/pkg/scanners/azure/functions/replace_test.go new file mode 100644 index 000000000000..fe8fb40994cd --- /dev/null +++ b/pkg/scanners/azure/functions/replace_test.go @@ -0,0 +1,41 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Replace(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "replace a string", + args: []interface{}{ + "hello", + "l", + "p", + }, + expected: "heppo", + }, + { + name: "replace a string with invalid replacement", + args: []interface{}{ + "hello", + "q", + "p", + }, + expected: "hello", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Replace(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/resource.go b/pkg/scanners/azure/functions/resource.go new file mode 100644 index 000000000000..7eacfaeccff1 --- /dev/null +++ b/pkg/scanners/azure/functions/resource.go @@ -0,0 +1,48 @@ +package functions + +import ( + "fmt" +) + +func ResourceID(args ...interface{}) interface{} { + if len(args) < 2 { + return nil + } + + var resourceID string + + for _, arg := range args { + resourceID += "/" + fmt.Sprintf("%v", arg) + } + + return resourceID +} + +func ExtensionResourceID(args ...interface{}) interface{} { + if len(args) < 3 { + return nil + } + + var resourceID string + + for _, arg := range args { + resourceID += "/" + fmt.Sprintf("%v", arg) + } + + return resourceID +} + +func ResourceGroup(args ...interface{}) interface{} { + return fmt.Sprintf(`{ +"id": "/subscriptions/%s/resourceGroups/PlaceHolderResourceGroup", +"name": "Placeholder Resource Group", +"type":"Microsoft.Resources/resourceGroups", +"location": "westus", +"managedBy": "%s", +"tags": { +}, +"properties": { + "provisioningState": "Succeeded +} +}`, subscriptionID, managingResourceID) +} diff --git a/pkg/scanners/azure/functions/resource_test.go b/pkg/scanners/azure/functions/resource_test.go new file mode 100644 index 000000000000..d6dac14b4184 --- /dev/null +++ b/pkg/scanners/azure/functions/resource_test.go @@ -0,0 +1,12 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_ResourceID(t *testing.T) { + assert.Equal(t, "/test1/test2", ResourceID("test1", "test2")) + assert.Equal(t, "/test1/123", ResourceID("test1", 123)) +} diff --git a/pkg/scanners/azure/functions/scope.go b/pkg/scanners/azure/functions/scope.go new file mode 100644 index 000000000000..dcd1676b1945 --- /dev/null +++ b/pkg/scanners/azure/functions/scope.go @@ -0,0 +1,106 @@ +package functions + +import ( + "fmt" + + "github.com/google/uuid" +) + +var ( + tenantID = uuid.NewString() + groupID = uuid.NewString() + updaterID = uuid.NewString() + subscriptionID = uuid.NewString() + managingResourceID = uuid.NewString() +) + +func ManagementGroup(_ ...interface{}) interface{} { + + return fmt.Sprintf(`{ + "id": "/providers/Microsoft.Management/managementGroups/mgPlaceholder", + "name": "mgPlaceholder", + "properties": { + "details": { + "parent": { + "displayName": "Tenant Root Group", + "id": "/providers/Microsoft.Management/managementGroups/%[1]s", + "name": "%[1]s" + }, + "updatedBy": "%[2]s", + "updatedTime": "2020-07-23T21:05:52.661306Z", + "version": "1" + }, + "displayName": "Management PlaceHolder Group", + "tenantId": "%[3]s" + }, + "type": "/providers/Microsoft.Management/managementGroups" + } +`, groupID, updaterID, tenantID) +} + +func ManagementGroupResourceID(args ...interface{}) interface{} { + if len(args) < 2 { + return "" + } + + switch len(args) { + case 3: + return fmt.Sprintf("/providers/Microsoft.Management/managementGroups/%s/providers/%s/%s/%s", groupID, args[0], args[1], args[2]) + case 4: + return fmt.Sprintf("/providers/Microsoft.Management/managementGroups/%s/providers/%s/%s/%s", args[0], args[1], args[2], args[3]) + default: + return fmt.Sprintf("/providers/Microsoft.Management/managementGroups/%s/providers/%s/%s", groupID, args[0], args[1]) + } + +} + +func Subscription(_ ...interface{}) interface{} { + return fmt.Sprintf(`{ + "id": "/subscriptions/%[1]s", + "subscriptionId": "%[1]s", + "tenantId": "%[2]s", + "displayName": "Placeholder Subscription" +}`, subscriptionID, tenantID) +} + +func SubscriptionResourceID(args ...interface{}) interface{} { + if len(args) < 2 { + return nil + } + + switch len(args) { + + case 3: + return fmt.Sprintf("/subscriptions/%s/providers/%s/%s/%s", subscriptionID, args[0], args[1], args[2]) + case 4: + // subscription ID has been provided so use that + return fmt.Sprintf("/subscriptions/%s/providers/%s/%s/%s", args[0], args[1], args[2], args[3]) + default: + + return fmt.Sprintf("/subscriptions/%s/providers/%s/%s", subscriptionID, args[0], args[1]) + } +} + +func Tenant(_ ...interface{}) interface{} { + return fmt.Sprintf(`{ + "countryCode": "US", + "displayName": "Placeholder Tenant Name", + "id": "/tenants/%[1]s", + "tenantId": "%[1]s" + }`, tenantID) +} + +func TenantResourceID(args ...interface{}) interface{} { + if len(args) < 2 { + return nil + } + + switch len(args) { + case 3: + return fmt.Sprintf("/providers/%s/%s/%s", args[0], args[1], args[2]) + + default: + return fmt.Sprintf("/providers/%s/%s", args[0], args[1]) + } + +} diff --git a/pkg/scanners/azure/functions/scope_test.go b/pkg/scanners/azure/functions/scope_test.go new file mode 100644 index 000000000000..af84119e350e --- /dev/null +++ b/pkg/scanners/azure/functions/scope_test.go @@ -0,0 +1,34 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_SubscriptionResourceID(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "scope resource id with subscription ID", + args: []interface{}{ + "4ec875a5-41a2-4837-88cf-4266466e65ed", + "Microsoft.Authorization/roleDefinitions", + "8e3af657-a8ff-443c-a75c-2fe8c4bcb635", + "b34282f6-5e3c-4306-8741-ebd7a871d187", + }, + expected: "/subscriptions/4ec875a5-41a2-4837-88cf-4266466e65ed/providers/Microsoft.Authorization/roleDefinitions/8e3af657-a8ff-443c-a75c-2fe8c4bcb635/b34282f6-5e3c-4306-8741-ebd7a871d187", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := SubscriptionResourceID(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/skip.go b/pkg/scanners/azure/functions/skip.go new file mode 100644 index 000000000000..b68296fff66d --- /dev/null +++ b/pkg/scanners/azure/functions/skip.go @@ -0,0 +1,34 @@ +package functions + +func Skip(args ...interface{}) interface{} { + if len(args) != 2 { + return "" + } + + count, ok := args[1].(int) + if !ok { + return "" + } + switch input := args[0].(type) { + case string: + if count > len(input) { + return "" + } + return input[count:] + case interface{}: + switch iType := input.(type) { + case []int: + return iType[count:] + case []string: + return iType[count:] + case []bool: + return iType[count:] + case []float64: + return iType[count:] + case []interface{}: + return iType[count:] + } + } + + return "" +} diff --git a/pkg/scanners/azure/functions/skip_test.go b/pkg/scanners/azure/functions/skip_test.go new file mode 100644 index 000000000000..692e6508f7f1 --- /dev/null +++ b/pkg/scanners/azure/functions/skip_test.go @@ -0,0 +1,65 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Skip(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "skip a string", + args: []interface{}{ + "hello", + 1, + }, + expected: "ello", + }, + { + name: "skip beyond the length a string", + args: []interface{}{ + "hello", + 6, + }, + expected: "", + }, + { + name: "skip with a zero count on a string", + args: []interface{}{ + "hello", + 0, + }, + expected: "hello", + }, + { + name: "skip with slice of ints", + args: []interface{}{ + []int{1, 2, 3, 4, 5}, + 2, + }, + expected: []int{3, 4, 5}, + }, + { + name: "skip with slice of strings", + args: []interface{}{ + []string{"hello", "world"}, + 1, + }, + expected: []string{"world"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Skip(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/split.go b/pkg/scanners/azure/functions/split.go new file mode 100644 index 000000000000..04b7f5779d33 --- /dev/null +++ b/pkg/scanners/azure/functions/split.go @@ -0,0 +1,36 @@ +package functions + +import "strings" + +func Split(args ...interface{}) interface{} { + if len(args) != 2 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + + switch separator := args[1].(type) { + case string: + return strings.Split(input, separator) + case interface{}: + switch separator := separator.(type) { + case []string: + m := make(map[rune]int) + for _, r := range separator { + r := rune(r[0]) + m[r] = 1 + } + + splitter := func(r rune) bool { + return m[r] == 1 + } + + return strings.FieldsFunc(input, splitter) + } + + } + return []string{} +} diff --git a/pkg/scanners/azure/functions/split_test.go b/pkg/scanners/azure/functions/split_test.go new file mode 100644 index 000000000000..e40df07526aa --- /dev/null +++ b/pkg/scanners/azure/functions/split_test.go @@ -0,0 +1,38 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Split(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected []string + }{ + { + name: "split a string", + args: []interface{}{ + "hello, world", + ",", + }, + expected: []string{"hello", " world"}, + }, + { + name: "split a string with multiple separators", + args: []interface{}{ + "one;two,three", + []string{",", ";"}, + }, + expected: []string{"one", "two", "three"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Split(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/starts_with.go b/pkg/scanners/azure/functions/starts_with.go new file mode 100644 index 000000000000..a4eb398cea3d --- /dev/null +++ b/pkg/scanners/azure/functions/starts_with.go @@ -0,0 +1,22 @@ +package functions + +import "strings" + +func StartsWith(args ...interface{}) interface{} { + + if len(args) != 2 { + return false + } + + stringToSearch, ok := args[0].(string) + if !ok { + return false + } + + stringToFind, ok := args[1].(string) + if !ok { + return false + } + + return strings.HasPrefix(stringToSearch, stringToFind) +} diff --git a/pkg/scanners/azure/functions/starts_with_test.go b/pkg/scanners/azure/functions/starts_with_test.go new file mode 100644 index 000000000000..4a745478ee51 --- /dev/null +++ b/pkg/scanners/azure/functions/starts_with_test.go @@ -0,0 +1,41 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_StartsWith(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected bool + }{ + { + name: "string ends with", + args: []interface{}{ + "Hello, world!", + "Hello,", + }, + expected: true, + }, + { + name: "string does not end with", + args: []interface{}{ + "Hello world!", + "Hello,", + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := StartsWith(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/string.go b/pkg/scanners/azure/functions/string.go new file mode 100644 index 000000000000..cba9997d9e9c --- /dev/null +++ b/pkg/scanners/azure/functions/string.go @@ -0,0 +1,16 @@ +package functions + +import "fmt" + +func String(args ...interface{}) interface{} { + if len(args) != 1 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return fmt.Sprintf("%v", args[0]) + } + + return input +} diff --git a/pkg/scanners/azure/functions/string_test.go b/pkg/scanners/azure/functions/string_test.go new file mode 100644 index 000000000000..ecab50ea8b65 --- /dev/null +++ b/pkg/scanners/azure/functions/string_test.go @@ -0,0 +1,44 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_String(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "string from a string", + args: []interface{}{ + "hello", + }, + expected: "hello", + }, + { + name: "string from a bool", + args: []interface{}{ + false, + }, + expected: "false", + }, + { + name: "string from an int", + args: []interface{}{ + 10, + }, + expected: "10", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := String(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/sub.go b/pkg/scanners/azure/functions/sub.go new file mode 100644 index 000000000000..6013a8c0d509 --- /dev/null +++ b/pkg/scanners/azure/functions/sub.go @@ -0,0 +1,15 @@ +package functions + +func Sub(args ...interface{}) interface{} { + + if len(args) != 2 { + return nil + } + + if a, ok := args[0].(int); ok { + if b, ok := args[1].(int); ok { + return a - b + } + } + return nil +} diff --git a/pkg/scanners/azure/functions/sub_test.go b/pkg/scanners/azure/functions/sub_test.go new file mode 100644 index 000000000000..a3f9308a2710 --- /dev/null +++ b/pkg/scanners/azure/functions/sub_test.go @@ -0,0 +1,43 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Sub(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected int + }{ + { + name: "subtract 2 from 5", + args: []interface{}{5, 2}, + expected: 3, + }, + { + name: "subtract 2 from 1", + args: []interface{}{1, 2}, + expected: -1, + }, + { + name: "subtract 3 from 2", + args: []interface{}{2, 3}, + expected: -1, + }, + { + name: "subtract -4 from 3", + args: []interface{}{3, -4}, + expected: 7, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Sub(tt.args...) + assert.Equal(t, tt.expected, got) + }) + } +} diff --git a/pkg/scanners/azure/functions/substring.go b/pkg/scanners/azure/functions/substring.go new file mode 100644 index 000000000000..fed22f0d14a6 --- /dev/null +++ b/pkg/scanners/azure/functions/substring.go @@ -0,0 +1,36 @@ +package functions + +func SubString(args ...interface{}) interface{} { + if len(args) < 2 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + + start, ok := args[1].(int) + if !ok { + return "" + } + + if len(args) == 2 { + args = append(args, len(input)) + } + + length, ok := args[2].(int) + if !ok { + return "" + } + + if start > len(input) { + return "" + } + + if start+length > len(input) { + return input[start:] + } + + return input[start : start+length] +} diff --git a/pkg/scanners/azure/functions/substring_test.go b/pkg/scanners/azure/functions/substring_test.go new file mode 100644 index 000000000000..56e2ea107c73 --- /dev/null +++ b/pkg/scanners/azure/functions/substring_test.go @@ -0,0 +1,49 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_SubString(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "substring a string", + args: []interface{}{ + "hello", + 1, + 3, + }, + expected: "ell", + }, + { + name: "substring a string with no upper bound", + args: []interface{}{ + "hello", + 1, + }, + expected: "ello", + }, + { + name: "substring a string with start higher than the length", + args: []interface{}{ + "hello", + 10, + }, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := SubString(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/take.go b/pkg/scanners/azure/functions/take.go new file mode 100644 index 000000000000..738c9d7d8064 --- /dev/null +++ b/pkg/scanners/azure/functions/take.go @@ -0,0 +1,49 @@ +package functions + +func Take(args ...interface{}) interface{} { + if len(args) != 2 { + return "" + } + + count, ok := args[1].(int) + if !ok { + return "" + } + switch input := args[0].(type) { + case string: + if count > len(input) { + return input + } + return input[:count] + case interface{}: + switch iType := input.(type) { + case []int: + if count > len(iType) { + return iType + } + return iType[:count] + case []string: + if count > len(iType) { + return iType + } + return iType[:count] + case []bool: + if count > len(iType) { + return iType + } + return iType[:count] + case []float64: + if count > len(iType) { + return iType + } + return iType[:count] + case []interface{}: + if count > len(iType) { + return iType + } + return iType[:count] + } + } + + return "" +} diff --git a/pkg/scanners/azure/functions/take_test.go b/pkg/scanners/azure/functions/take_test.go new file mode 100644 index 000000000000..68c19070a6e9 --- /dev/null +++ b/pkg/scanners/azure/functions/take_test.go @@ -0,0 +1,63 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Take(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "take a string", + args: []interface{}{ + "hello", + 2, + }, + expected: "he", + }, + { + name: "take a string with invalid count", + args: []interface{}{ + "hello", + 10, + }, + expected: "hello", + }, + { + name: "take a string from slice", + args: []interface{}{ + []string{"a", "b", "c"}, + 2, + }, + expected: []string{"a", "b"}, + }, + { + name: "take a string from a slice", + args: []interface{}{ + []string{"a", "b", "c"}, + 2, + }, + expected: []string{"a", "b"}, + }, + { + name: "take a string from a slice with invalid count", + args: []interface{}{ + []string{"a", "b", "c"}, + 10, + }, + expected: []string{"a", "b", "c"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Take(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/trim.go b/pkg/scanners/azure/functions/trim.go new file mode 100644 index 000000000000..5215bbe7f43d --- /dev/null +++ b/pkg/scanners/azure/functions/trim.go @@ -0,0 +1,16 @@ +package functions + +import "strings" + +func Trim(args ...interface{}) interface{} { + if len(args) != 1 { + return "" + } + + input, ok := args[0].(string) + if !ok { + return "" + } + + return strings.TrimSpace(input) +} diff --git a/pkg/scanners/azure/functions/trim_test.go b/pkg/scanners/azure/functions/trim_test.go new file mode 100644 index 000000000000..44a787b0f268 --- /dev/null +++ b/pkg/scanners/azure/functions/trim_test.go @@ -0,0 +1,71 @@ +package functions + +import "testing" + +func Test_Trim(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "trim a string", + args: []interface{}{ + " hello ", + }, + expected: "hello", + }, + { + name: "trim a string with multiple spaces", + args: []interface{}{ + " hello ", + }, + expected: "hello", + }, + { + name: "trim a string with tabs", + args: []interface{}{ + " hello ", + }, + expected: "hello", + }, + { + name: "trim a string with new lines", + args: []interface{}{ + ` + +hello + +`, + }, + expected: "hello", + }, + { + name: "trim a string with tabs, spaces and new lines", + args: []interface{}{ + ` + +hello + +`, + }, + expected: "hello", + }, + { + name: "trim a string with non string input", + args: []interface{}{ + 10, + }, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Trim(tt.args...) + if actual != tt.expected { + t.Errorf("Trim(%v) = %v, expected %v", tt.args, actual, tt.expected) + } + }) + } +} diff --git a/pkg/scanners/azure/functions/true.go b/pkg/scanners/azure/functions/true.go new file mode 100644 index 000000000000..9f13af580757 --- /dev/null +++ b/pkg/scanners/azure/functions/true.go @@ -0,0 +1,5 @@ +package functions + +func True(args ...interface{}) interface{} { + return true +} diff --git a/pkg/scanners/azure/functions/union.go b/pkg/scanners/azure/functions/union.go new file mode 100644 index 000000000000..07bb98f28eeb --- /dev/null +++ b/pkg/scanners/azure/functions/union.go @@ -0,0 +1,60 @@ +package functions + +import "sort" + +func Union(args ...interface{}) interface{} { + if len(args) == 0 { + return []interface{}{} + } + if len(args) == 1 { + return args[0] + } + + switch args[0].(type) { + case map[string]interface{}: + return unionMap(args...) + case interface{}: + return unionArray(args...) + } + + return []interface{}{} + +} + +func unionMap(args ...interface{}) interface{} { + result := make(map[string]interface{}) + + for _, arg := range args { + switch iType := arg.(type) { + case map[string]interface{}: + for k, v := range iType { + result[k] = v + } + } + } + + return result +} + +func unionArray(args ...interface{}) interface{} { + result := []interface{}{} + union := make(map[interface{}]bool) + + for _, arg := range args { + switch iType := arg.(type) { + case []interface{}: + for _, item := range iType { + union[item] = true + } + } + } + + for k := range union { + result = append(result, k) + } + sort.Slice(result, func(i, j int) bool { + return result[i].(string) < result[j].(string) + }) + + return result +} diff --git a/pkg/scanners/azure/functions/union_test.go b/pkg/scanners/azure/functions/union_test.go new file mode 100644 index 000000000000..56d5bf809088 --- /dev/null +++ b/pkg/scanners/azure/functions/union_test.go @@ -0,0 +1,110 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Union(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected interface{} + }{ + { + name: "union single array", + args: []interface{}{ + []interface{}{"a", "b", "c"}, + }, + expected: []interface{}{"a", "b", "c"}, + }, + { + name: "union two arrays", + args: []interface{}{ + []interface{}{"a", "b", "c"}, + []interface{}{"b", "c", "d"}, + }, + expected: []interface{}{"a", "b", "c", "d"}, + }, + { + name: "union two arrays", + args: []interface{}{ + []interface{}{"a", "b", "c"}, + []interface{}{"b", "c", "d"}, + []interface{}{"b", "c", "d", "e"}, + }, + expected: []interface{}{"a", "b", "c", "d", "e"}, + }, + { + name: "union single maps", + args: []interface{}{ + map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + }, + }, + expected: map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + }, + }, + { + name: "union two maps", + args: []interface{}{ + map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + }, + map[string]interface{}{ + "b": "b", + "c": "c", + "d": "d", + }, + }, + expected: map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + "d": "d", + }, + }, + { + name: "union three maps", + args: []interface{}{ + map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + }, + map[string]interface{}{ + "b": "b", + "c": "c", + "d": "d", + }, + map[string]interface{}{ + "b": "b", + "c": "c", + "e": "e", + }, + }, + expected: map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + "d": "d", + "e": "e", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Union(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/unique_string.go b/pkg/scanners/azure/functions/unique_string.go new file mode 100644 index 000000000000..fba35c6459ac --- /dev/null +++ b/pkg/scanners/azure/functions/unique_string.go @@ -0,0 +1,21 @@ +package functions + +import ( + "crypto/sha256" + "fmt" + "strings" +) + +func UniqueString(args ...interface{}) interface{} { + if len(args) == 0 { + return "" + } + + hashParts := make([]string, len(args)) + for i, str := range args { + hashParts[i] = str.(string) + } + + hash := sha256.New().Sum([]byte(strings.Join(hashParts, ""))) + return fmt.Sprintf("%x", hash)[:13] +} diff --git a/pkg/scanners/azure/functions/unique_string_test.go b/pkg/scanners/azure/functions/unique_string_test.go new file mode 100644 index 000000000000..035591eb46aa --- /dev/null +++ b/pkg/scanners/azure/functions/unique_string_test.go @@ -0,0 +1,38 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_UniqueString(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "unique string from a string", + args: []interface{}{ + "hello", + }, + expected: "68656c6c6fe3b", + }, + { + name: "unique string from a string", + args: []interface{}{ + "hello", + "world", + }, + expected: "68656c6c6f776", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := UniqueString(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/functions/uri.go b/pkg/scanners/azure/functions/uri.go new file mode 100644 index 000000000000..949e12235dea --- /dev/null +++ b/pkg/scanners/azure/functions/uri.go @@ -0,0 +1,29 @@ +package functions + +import ( + "net/url" + "path" +) + +func Uri(args ...interface{}) interface{} { + if len(args) != 2 { + return "" + } + + result, err := joinPath(args[0].(string), args[1].(string)) + if err != nil { + return "" + } + return result +} + +// Backport url.JoinPath until we're ready for Go 1.19 +func joinPath(base string, elem ...string) (string, error) { + u, err := url.Parse(base) + if err != nil { + return "", err + } + elem = append([]string{u.EscapedPath()}, elem...) + u.Path = path.Join(elem...) + return u.String(), nil +} diff --git a/pkg/scanners/azure/functions/uri_test.go b/pkg/scanners/azure/functions/uri_test.go new file mode 100644 index 000000000000..1a63fe6bbd01 --- /dev/null +++ b/pkg/scanners/azure/functions/uri_test.go @@ -0,0 +1,48 @@ +package functions + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_Uri(t *testing.T) { + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "uri from a base and relative with no trailing slash", + args: []interface{}{ + "http://contoso.org/firstpath", + "myscript.sh", + }, + expected: "http://contoso.org/firstpath/myscript.sh", + }, + { + name: "uri from a base and relative with trailing slash", + args: []interface{}{ + "http://contoso.org/firstpath/", + "myscript.sh", + }, + expected: "http://contoso.org/firstpath/myscript.sh", + }, + { + name: "uri from a base with trailing slash and relative with ../", + args: []interface{}{ + "http://contoso.org/firstpath/", + "../myscript.sh", + }, + expected: "http://contoso.org/myscript.sh", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := Uri(tt.args...) + require.Equal(t, tt.expected, actual) + }) + } + +} diff --git a/pkg/scanners/azure/functions/utc_now.go b/pkg/scanners/azure/functions/utc_now.go new file mode 100644 index 000000000000..68c93bd58fee --- /dev/null +++ b/pkg/scanners/azure/functions/utc_now.go @@ -0,0 +1,47 @@ +package functions + +import ( + "strings" + "time" +) + +func UTCNow(args ...interface{}) interface{} { + if len(args) > 1 { + return nil + } + + if len(args) == 1 { + format, ok := args[0].(string) + if ok { + goFormat := convertFormat(format) + return time.Now().UTC().Format(goFormat) + } + } + + return time.Now().UTC().Format(time.RFC3339) +} + +// don't look directly at this code +func convertFormat(format string) string { + goFormat := format + goFormat = strings.ReplaceAll(goFormat, "yyyy", "2006") + goFormat = strings.ReplaceAll(goFormat, "yy", "06") + goFormat = strings.ReplaceAll(goFormat, "MMMM", "January") + goFormat = strings.ReplaceAll(goFormat, "MMM", "Jan") + goFormat = strings.ReplaceAll(goFormat, "MM", "01") + goFormat = strings.ReplaceAll(goFormat, "M", "1") + goFormat = strings.ReplaceAll(goFormat, "dd", "02") + goFormat = strings.ReplaceAll(goFormat, "d", "2") + goFormat = strings.ReplaceAll(goFormat, "HH", "15") + goFormat = strings.ReplaceAll(goFormat, "H", "3") + goFormat = strings.ReplaceAll(goFormat, "hh", "03") + goFormat = strings.ReplaceAll(goFormat, "h", "3") + goFormat = strings.ReplaceAll(goFormat, "mm", "04") + goFormat = strings.ReplaceAll(goFormat, "m", "4") + goFormat = strings.ReplaceAll(goFormat, "ss", "05") + goFormat = strings.ReplaceAll(goFormat, "s", "5") + goFormat = strings.ReplaceAll(goFormat, "tt", "PM") + goFormat = strings.ReplaceAll(goFormat, "t", "PM") + return goFormat + +} diff --git a/pkg/scanners/azure/functions/utc_now_test.go b/pkg/scanners/azure/functions/utc_now_test.go new file mode 100644 index 000000000000..c203c3e70a0a --- /dev/null +++ b/pkg/scanners/azure/functions/utc_now_test.go @@ -0,0 +1,40 @@ +package functions + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_UTCNow(t *testing.T) { + + tests := []struct { + name string + args []interface{} + expected string + }{ + { + name: "utc now day", + args: []interface{}{ + "d", + }, + expected: fmt.Sprintf("%d", time.Now().UTC().Day()), + }, + { + name: "utc now date", + args: []interface{}{ + "yyyy-M-d", + }, + expected: fmt.Sprintf("%d-%d-%d", time.Now().UTC().Year(), time.Now().UTC().Month(), time.Now().UTC().Day()), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := UTCNow(tt.args...) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/pkg/scanners/azure/resolver/resolver.go b/pkg/scanners/azure/resolver/resolver.go new file mode 100644 index 000000000000..55d2f949ba2a --- /dev/null +++ b/pkg/scanners/azure/resolver/resolver.go @@ -0,0 +1,51 @@ +package resolver + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/scanners/azure/expressions" + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type Resolver interface { + ResolveExpression(expression azure.Value) azure.Value + SetDeployment(d *azure.Deployment) +} + +func NewResolver() Resolver { + return &resolver{} +} + +type resolver struct { + deployment *azure.Deployment +} + +func (r *resolver) SetDeployment(d *azure.Deployment) { + r.deployment = d +} + +func (r *resolver) ResolveExpression(expression azure.Value) azure.Value { + if expression.Kind != azure.KindExpression { + return expression + } + if r.deployment == nil { + panic("cannot resolve expression on nil deployment") + } + code := expression.AsString() + + resolved, err := r.resolveExpressionString(code, expression.GetMetadata()) + if err != nil { + expression.Kind = azure.KindUnresolvable + return expression + } + return resolved +} + +func (r *resolver) resolveExpressionString(code string, metadata defsecTypes.MisconfigMetadata) (azure.Value, error) { + et, err := expressions.NewExpressionTree(code) + if err != nil { + return azure.NullValue, err + } + + evaluatedValue := et.Evaluate(r.deployment) + return azure.NewValue(evaluatedValue, metadata), nil +} diff --git a/pkg/scanners/azure/resolver/resolver_test.go b/pkg/scanners/azure/resolver/resolver_test.go new file mode 100644 index 000000000000..07b9655357f2 --- /dev/null +++ b/pkg/scanners/azure/resolver/resolver_test.go @@ -0,0 +1,101 @@ +package resolver + +import ( + "testing" + "time" + + "github.com/aquasecurity/trivy/pkg/scanners/azure" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/require" +) + +func Test_resolveFunc(t *testing.T) { + + tests := []struct { + name string + expr string + expected string + }{ + { + name: "simple format call", + expr: "format('{0}/{1}', 'myPostgreSQLServer', 'log_checkpoints')", + expected: "myPostgreSQLServer/log_checkpoints", + }, + { + name: "simple format call with numbers", + expr: "format('{0} + {1} = {2}', 1, 2, 3)", + expected: "1 + 2 = 3", + }, + { + name: "format with nested format", + expr: "format('{0} + {1} = {2}', format('{0}', 1), 2, 3)", + expected: "1 + 2 = 3", + }, + { + name: "format with multiple nested format", + expr: "format('{0} + {1} = {2}', format('{0}', 1), 2, format('{0}', 3))", + expected: "1 + 2 = 3", + }, + { + name: "format with nested base64", + expr: "format('the base64 of \"hello, world\" is {0}', base64('hello, world'))", + expected: "the base64 of \"hello, world\" is aGVsbG8sIHdvcmxk", + }, + { + name: "dateTimeAdd with add a day", + expr: "dateTimeAdd(utcNow('yyyy-MM-dd'), 'P1D', 'yyyy-MM-dd')", + expected: time.Now().UTC().AddDate(0, 0, 1).Format("2006-01-02"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resolver := resolver{} + + resolvedValue, err := resolver.resolveExpressionString(tt.expr, types.NewTestMisconfigMetadata()) + require.NoError(t, err) + require.Equal(t, azure.KindString, resolvedValue.Kind) + + require.Equal(t, tt.expected, resolvedValue.AsString()) + }) + } +} + +func Test_resolveParameter(t *testing.T) { + tests := []struct { + name string + deployment *azure.Deployment + expr string + expected string + }{ + { + name: "format call with parameter", + deployment: &azure.Deployment{ + Parameters: []azure.Parameter{ + { + Variable: azure.Variable{ + Name: "dbName", + Value: azure.NewValue("myPostgreSQLServer", types.NewTestMisconfigMetadata()), + }, + }, + }, + }, + expr: "format('{0}/{1}', parameters('dbName'), 'log_checkpoints')", + expected: "myPostgreSQLServer/log_checkpoints", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resolver := resolver{ + deployment: tt.deployment, + } + + resolvedValue, err := resolver.resolveExpressionString(tt.expr, types.NewTestMisconfigMetadata()) + require.NoError(t, err) + require.Equal(t, azure.KindString, resolvedValue.Kind) + + require.Equal(t, tt.expected, resolvedValue.AsString()) + }) + } + +} diff --git a/pkg/scanners/azure/value.go b/pkg/scanners/azure/value.go new file mode 100644 index 000000000000..ff5ece2259e0 --- /dev/null +++ b/pkg/scanners/azure/value.go @@ -0,0 +1,358 @@ +package azure + +import ( + "strings" + "time" + + "golang.org/x/exp/slices" + + "github.com/aquasecurity/trivy/pkg/scanners/azure/arm/parser/armjson" + "github.com/aquasecurity/trivy/pkg/types" +) + +type EvalContext struct{} + +type Kind string + +const ( + KindUnresolvable Kind = "unresolvable" + KindNull Kind = "null" + KindBoolean Kind = "boolean" + KindString Kind = "string" + KindNumber Kind = "number" + KindObject Kind = "object" + KindArray Kind = "array" + KindExpression Kind = "expression" +) + +type Value struct { + types.MisconfigMetadata + rLit interface{} + rMap map[string]Value + rArr []Value + Kind Kind + Comments []string +} + +var NullValue = Value{ + Kind: KindNull, +} + +func NewValue(value interface{}, metadata types.MisconfigMetadata) Value { + + v := Value{ + MisconfigMetadata: metadata, + } + + switch ty := value.(type) { + case []interface{}: + v.Kind = KindArray + for _, child := range ty { + if internal, ok := child.(Value); ok { + v.rArr = append(v.rArr, internal) + } else { + v.rArr = append(v.rArr, NewValue(child, metadata)) + } + } + case []Value: + v.Kind = KindArray + v.rArr = append(v.rArr, ty...) + + case map[string]interface{}: + v.Kind = KindObject + v.rMap = make(map[string]Value) + for key, val := range ty { + if internal, ok := val.(Value); ok { + v.rMap[key] = internal + } else { + v.rMap[key] = NewValue(val, metadata) + } + } + case map[string]Value: + v.Kind = KindObject + v.rMap = make(map[string]Value) + for key, val := range ty { + v.rMap[key] = val + } + case string: + v.Kind = KindString + v.rLit = ty + case int, int64, int32, float32, float64, int8, int16, uint8, uint16, uint32, uint64: + v.Kind = KindNumber + v.rLit = ty + case bool: + v.Kind = KindBoolean + v.rLit = ty + case nil: + v.Kind = KindNull + v.rLit = ty + default: + v.Kind = KindUnresolvable + v.rLit = ty + } + + return v +} + +func (v *Value) GetMetadata() types.MisconfigMetadata { + return v.MisconfigMetadata +} + +func (v *Value) UnmarshalJSONWithMetadata(node armjson.Node) error { + + v.updateValueKind(node) + + v.MisconfigMetadata = node.Metadata() + + switch node.Kind() { + case armjson.KindArray: + err := v.unmarshallArray(node) + if err != nil { + return err + } + case armjson.KindObject: + err := v.unmarshalObject(node) + if err != nil { + return err + } + case armjson.KindString: + err := v.unmarshalString(node) + if err != nil { + return err + } + default: + if err := node.Decode(&v.rLit); err != nil { + return err + } + } + + for _, comment := range node.Comments() { + var str string + if err := comment.Decode(&str); err != nil { + return err + } + // remove `\r` from comment when running windows + str = strings.ReplaceAll(str, "\r", "") + + v.Comments = append(v.Comments, str) + } + return nil +} + +func (v *Value) unmarshalString(node armjson.Node) error { + var str string + if err := node.Decode(&str); err != nil { + return err + } + if strings.HasPrefix(str, "[") && !strings.HasPrefix(str, "[[") && strings.HasSuffix(str, "]") { + // function! + v.Kind = KindExpression + v.rLit = str[1 : len(str)-1] + } else { + v.rLit = str + } + return nil +} + +func (v *Value) unmarshalObject(node armjson.Node) error { + obj := make(map[string]Value) + for i := 0; i < len(node.Content()); i += 2 { + var key string + if err := node.Content()[i].Decode(&key); err != nil { + return err + } + var val Value + if err := val.UnmarshalJSONWithMetadata(node.Content()[i+1]); err != nil { + return err + } + obj[key] = val + } + v.rMap = obj + return nil +} + +func (v *Value) unmarshallArray(node armjson.Node) error { + var arr []Value + for _, child := range node.Content() { + var val Value + if err := val.UnmarshalJSONWithMetadata(child); err != nil { + return err + } + arr = append(arr, val) + } + v.rArr = arr + return nil +} + +func (v *Value) updateValueKind(node armjson.Node) { + switch node.Kind() { + case armjson.KindString: + v.Kind = KindString + case armjson.KindNumber: + v.Kind = KindNumber + case armjson.KindBoolean: + v.Kind = KindBoolean + case armjson.KindObject: + v.Kind = KindObject + case armjson.KindNull: + v.Kind = KindNull + case armjson.KindArray: + v.Kind = KindArray + default: + panic(node.Kind()) + } +} + +func (v Value) AsString() string { + v.Resolve() + + if v.Kind != KindString { + return "" + } + + return v.rLit.(string) +} + +func (v Value) AsBool() bool { + v.Resolve() + if v.Kind != KindBoolean { + return false + } + return v.rLit.(bool) +} + +func (v Value) AsInt() int { + v.Resolve() + if v.Kind != KindNumber { + return 0 + } + return int(v.rLit.(int64)) +} + +func (v Value) AsFloat() float64 { + v.Resolve() + if v.Kind != KindNumber { + return 0 + } + return v.rLit.(float64) +} + +func (v Value) AsIntValue(defaultValue int, metadata types.MisconfigMetadata) types.IntValue { + v.Resolve() + if v.Kind != KindNumber { + return types.Int(defaultValue, metadata) + } + return types.Int(v.AsInt(), metadata) +} + +func (v Value) AsBoolValue(defaultValue bool, metadata types.MisconfigMetadata) types.BoolValue { + v.Resolve() + if v.Kind == KindString { + possibleValue := strings.ToLower(v.rLit.(string)) + if slices.Contains([]string{"true", "1", "yes", "on", "enabled"}, possibleValue) { + return types.Bool(true, metadata) + } + } + + if v.Kind != KindBoolean { + return types.Bool(defaultValue, metadata) + } + + return types.Bool(v.rLit.(bool), v.GetMetadata()) +} + +func (v Value) EqualTo(value interface{}) bool { + switch ty := value.(type) { + case string: + return v.AsString() == ty + default: + panic("not supported") + } +} + +func (v Value) AsStringValue(defaultValue string, metadata types.MisconfigMetadata) types.StringValue { + v.Resolve() + if v.Kind != KindString { + return types.StringDefault(defaultValue, metadata) + } + return types.String(v.rLit.(string), v.MisconfigMetadata) +} + +func (v Value) GetMapValue(key string) Value { + v.Resolve() + if v.Kind != KindObject { + return NullValue + } + return v.rMap[key] +} + +func (v Value) AsMap() map[string]Value { + v.Resolve() + if v.Kind != KindObject { + return nil + } + return v.rMap +} + +func (v Value) AsList() []Value { + v.Resolve() + if v.Kind != KindArray { + return nil + } + return v.rArr +} + +func (v Value) Raw() interface{} { + switch v.Kind { + case KindArray: + // TODO: recursively build raw array + return nil + case KindObject: + // TODO: recursively build raw object + return nil + default: + return v.rLit + } +} + +func (v *Value) Resolve() { + if v.Kind != KindExpression { + return + } + // if resolver, ok := v.Metadata.Internal().(Resolver); ok { + // *v = resolver.ResolveExpression(*v) + // } +} + +func (v Value) HasKey(key string) bool { + v.Resolve() + _, ok := v.rMap[key] + return ok +} + +func (v Value) AsTimeValue(metadata types.MisconfigMetadata) types.TimeValue { + v.Resolve() + if v.Kind != KindString { + return types.Time(time.Time{}, metadata) + } + if v.Kind == KindNumber { + return types.Time(time.Unix(int64(v.AsFloat()), 0), metadata) + } + t, err := time.Parse(time.RFC3339, v.rLit.(string)) + if err != nil { + return types.Time(time.Time{}, metadata) + } + return types.Time(t, metadata) +} + +func (v Value) AsStringValuesList(defaultValue string) (stringValues []types.StringValue) { + v.Resolve() + if v.Kind != KindArray { + return + } + for _, item := range v.rArr { + stringValues = append(stringValues, item.AsStringValue(defaultValue, item.MisconfigMetadata)) + } + + return stringValues +} diff --git a/pkg/scanners/azure/value_test.go b/pkg/scanners/azure/value_test.go new file mode 100644 index 000000000000..8422a9abe0af --- /dev/null +++ b/pkg/scanners/azure/value_test.go @@ -0,0 +1,13 @@ +package azure + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/assert" +) + +func Test_ValueAsInt(t *testing.T) { + val := NewValue(int64(10), types.NewTestMisconfigMetadata()) + assert.Equal(t, 10, val.AsInt()) +} diff --git a/pkg/scanners/cloudformation/cftypes/types.go b/pkg/scanners/cloudformation/cftypes/types.go new file mode 100644 index 000000000000..44d9c1fd2a93 --- /dev/null +++ b/pkg/scanners/cloudformation/cftypes/types.go @@ -0,0 +1,12 @@ +package cftypes + +type CfType string + +const ( + String CfType = "string" + Int CfType = "int" + Float64 CfType = "float64" + Bool CfType = "bool" + Map CfType = "map" + List CfType = "list" +) diff --git a/pkg/scanners/cloudformation/parser/errors.go b/pkg/scanners/cloudformation/parser/errors.go new file mode 100644 index 000000000000..655f137cd271 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/errors.go @@ -0,0 +1,24 @@ +package parser + +import ( + "fmt" +) + +type InvalidContentError struct { + source string + err error +} + +func NewErrInvalidContent(source string, err error) *InvalidContentError { + return &InvalidContentError{ + source: source, + err: err, + } +} +func (e *InvalidContentError) Error() string { + return fmt.Sprintf("Invalid content in file: %s. Error: %v", e.source, e.err) +} + +func (e *InvalidContentError) Reason() error { + return e.err +} diff --git a/pkg/scanners/cloudformation/parser/file_context.go b/pkg/scanners/cloudformation/parser/file_context.go new file mode 100644 index 000000000000..f0313ee0202e --- /dev/null +++ b/pkg/scanners/cloudformation/parser/file_context.go @@ -0,0 +1,61 @@ +package parser + +import ( + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type SourceFormat string + +const ( + YamlSourceFormat SourceFormat = "yaml" + JsonSourceFormat SourceFormat = "json" +) + +type FileContexts []*FileContext + +type FileContext struct { + filepath string + lines []string + SourceFormat SourceFormat + Parameters map[string]*Parameter `json:"Parameters" yaml:"Parameters"` + Resources map[string]*Resource `json:"Resources" yaml:"Resources"` + Globals map[string]*Resource `json:"Globals" yaml:"Globals"` + Mappings map[string]interface{} `json:"Mappings,omitempty" yaml:"Mappings"` + Conditions map[string]Property `json:"Conditions,omitempty" yaml:"Conditions"` +} + +func (t *FileContext) GetResourceByLogicalID(name string) *Resource { + for n, r := range t.Resources { + if name == n { + return r + } + } + return nil +} + +func (t *FileContext) GetResourcesByType(names ...string) []*Resource { + var resources []*Resource + for _, r := range t.Resources { + for _, name := range names { + if name == r.Type() { + // + resources = append(resources, r) + } + } + } + return resources +} + +func (t *FileContext) Metadata() defsecTypes.MisconfigMetadata { + rng := defsecTypes.NewRange(t.filepath, 1, len(t.lines), "", nil) + + return defsecTypes.NewMisconfigMetadata(rng, NewCFReference("Template", rng).String()) +} + +func (t *FileContext) OverrideParameters(params map[string]any) { + for key := range t.Parameters { + if val, ok := params[key]; ok { + t.Parameters[key].UpdateDefault(val) + } + } +} diff --git a/pkg/scanners/cloudformation/parser/file_context_test.go b/pkg/scanners/cloudformation/parser/file_context_test.go new file mode 100644 index 000000000000..bbf5db4ddc39 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/file_context_test.go @@ -0,0 +1,61 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFileContext_OverrideParameters(t *testing.T) { + tests := []struct { + name string + ctx FileContext + arg map[string]any + expected map[string]*Parameter + }{ + { + name: "happy", + ctx: FileContext{ + Parameters: map[string]*Parameter{ + "BucketName": { + inner: parameterInner{ + Type: "String", + Default: "test", + }, + }, + "QueueName": { + inner: parameterInner{ + Type: "String", + }, + }, + }, + }, + arg: map[string]any{ + "BucketName": "test2", + "QueueName": "test", + "SomeKey": "some_value", + }, + expected: map[string]*Parameter{ + "BucketName": { + inner: parameterInner{ + Type: "String", + Default: "test2", + }, + }, + "QueueName": { + inner: parameterInner{ + Type: "String", + Default: "test", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.ctx.OverrideParameters(tt.arg) + assert.Equal(t, tt.expected, tt.ctx.Parameters) + }) + } +} diff --git a/pkg/scanners/cloudformation/parser/fn_and.go b/pkg/scanners/cloudformation/parser/fn_and.go new file mode 100644 index 000000000000..82a9f7bdcb19 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_and.go @@ -0,0 +1,38 @@ +package parser + +import "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + +func ResolveAnd(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::And"].AsList() + + if len(refValue) < 2 { + return abortIntrinsic(property, "Fn::And should have at least 2 values, returning original Property") + } + + results := make([]bool, len(refValue)) + for i := 0; i < len(refValue); i++ { + + r := false + if refValue[i].IsBool() { + r = refValue[i].AsBool() + } + + results[i] = r + } + + theSame := allSameStrings(results) + return property.deriveResolved(cftypes.Bool, theSame), true +} + +func allSameStrings(a []bool) bool { + for i := 1; i < len(a); i++ { + if a[i] != a[0] { + return false + } + } + return true +} diff --git a/pkg/scanners/cloudformation/parser/fn_and_test.go b/pkg/scanners/cloudformation/parser/fn_and_test.go new file mode 100644 index 000000000000..ab26f8315708 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_and_test.go @@ -0,0 +1,186 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_and_value(t *testing.T) { + + property1 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + }, + }, + }, + }, + }, + } + + property2 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + }, + }, + }, + }, + }, + } + andProperty := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::And": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + property1, + property2, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(andProperty) + require.True(t, success) + + assert.True(t, resolvedProperty.IsTrue()) +} + +func Test_resolve_and_value_not_the_same(t *testing.T) { + + property1 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "bar", + }, + }, + }, + }, + }, + }, + }, + } + + property2 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + }, + }, + }, + }, + }, + } + andProperty := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::And": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + property1, + property2, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(andProperty) + require.True(t, success) + + assert.False(t, resolvedProperty.IsTrue()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_base64.go b/pkg/scanners/cloudformation/parser/fn_base64.go new file mode 100644 index 000000000000..e1b8bcbeca6e --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_base64.go @@ -0,0 +1,19 @@ +package parser + +import ( + "encoding/base64" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveBase64(property *Property) (*Property, bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Base64"].AsString() + + retVal := base64.StdEncoding.EncodeToString([]byte(refValue)) + + return property.deriveResolved(cftypes.String, retVal), true +} diff --git a/pkg/scanners/cloudformation/parser/fn_base64_test.go b/pkg/scanners/cloudformation/parser/fn_base64_test.go new file mode 100644 index 000000000000..efacf63eeefe --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_base64_test.go @@ -0,0 +1,35 @@ +package parser + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "testing" +) + +func Test_resolve_base64_value(t *testing.T) { + + property := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Base64": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "HelloWorld", + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.Equal(t, "SGVsbG9Xb3JsZA==", resolvedProperty.AsString()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_builtin.go b/pkg/scanners/cloudformation/parser/fn_builtin.go new file mode 100644 index 000000000000..f20011618889 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_builtin.go @@ -0,0 +1,65 @@ +package parser + +import ( + "fmt" + "net" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/apparentlymart/go-cidr/cidr" +) + +func GetAzs(property *Property) (*Property, bool) { + return property.deriveResolved(cftypes.List, []*Property{ + property.deriveResolved(cftypes.String, "us-east-1a"), + property.deriveResolved(cftypes.String, "us-east-1a"), + property.deriveResolved(cftypes.String, "us-east-1a"), + }), true +} + +func GetCidr(property *Property) (*Property, bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Cidr"] + if refValue.IsNotList() || len(refValue.AsList()) != 3 { + return abortIntrinsic(property, "Fn::Cidr expects a list of 3 attributes") + } + + listParts := refValue.AsList() + ipaddressProp := listParts[0] + ipAddress := "10.0.0.0/2" + if ipaddressProp.IsString() { + ipAddress = ipaddressProp.AsString() + } + count := listParts[1].AsInt() + bit := listParts[2].AsInt() + + ranges, err := calculateCidrs(ipAddress, count, bit, property) + if err != nil { + return abortIntrinsic(property, "Could not calculate the required ranges") + } + return property.deriveResolved(cftypes.List, ranges), true +} + +func calculateCidrs(ipaddress string, count int, bit int, original *Property) ([]*Property, error) { + + var cidrProperties []*Property + + _, network, err := net.ParseCIDR(ipaddress) + if err != nil { + return nil, err + } + + for i := 0; i < count; i++ { + next, err := cidr.Subnet(network, bit, i) + if err != nil { + return nil, fmt.Errorf("failed to create cidr blocks") + } + + cidrProperties = append(cidrProperties, original.deriveResolved(cftypes.String, next.String())) + } + + return cidrProperties, nil +} diff --git a/pkg/scanners/cloudformation/parser/fn_builtin_test.go b/pkg/scanners/cloudformation/parser/fn_builtin_test.go new file mode 100644 index 000000000000..9a14029344a8 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_builtin_test.go @@ -0,0 +1,63 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_cidr_generator(t *testing.T) { + + original := &Property{ + ctx: nil, + name: "cidr", + comment: "", + Inner: PropertyInner{ + Type: "", + Value: nil, + }, + } + + ranges, err := calculateCidrs("10.1.0.0/16", 4, 4, original) + require.Nil(t, err) + require.Len(t, ranges, 4) + + results := make(map[int]string) + for i, property := range ranges { + value := property.AsString() + results[i] = value + } + + assert.Equal(t, "10.1.0.0/20", results[0]) + assert.Equal(t, "10.1.16.0/20", results[1]) + assert.Equal(t, "10.1.32.0/20", results[2]) + assert.Equal(t, "10.1.48.0/20", results[3]) +} + +func Test_cidr_generator_8_bits(t *testing.T) { + original := &Property{ + ctx: nil, + name: "cidr", + comment: "", + Inner: PropertyInner{ + Type: "", + Value: nil, + }, + } + + ranges, err := calculateCidrs("10.1.0.0/16", 4, 8, original) + require.Nil(t, err) + require.Len(t, ranges, 4) + + results := make(map[int]string) + for i, property := range ranges { + value := property.AsString() + results[i] = value + } + + assert.Equal(t, "10.1.0.0/24", results[0]) + assert.Equal(t, "10.1.1.0/24", results[1]) + assert.Equal(t, "10.1.2.0/24", results[2]) + assert.Equal(t, "10.1.3.0/24", results[3]) +} diff --git a/pkg/scanners/cloudformation/parser/fn_condition.go b/pkg/scanners/cloudformation/parser/fn_condition.go new file mode 100644 index 000000000000..8d5c923936ab --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_condition.go @@ -0,0 +1,21 @@ +package parser + +func ResolveCondition(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refProp := property.AsMap()["Condition"] + if refProp.IsNotString() { + return nil, false + } + refValue := refProp.AsString() + + for k, prop := range property.ctx.Conditions { + if k == refValue { + return prop.resolveValue() + } + } + + return nil, false +} diff --git a/pkg/scanners/cloudformation/parser/fn_condition_test.go b/pkg/scanners/cloudformation/parser/fn_condition_test.go new file mode 100644 index 000000000000..bb8f78e751e5 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_condition_test.go @@ -0,0 +1,98 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_condition_value(t *testing.T) { + + fctx := new(FileContext) + fctx.Conditions = map[string]Property{ + "SomeCondition": { + ctx: fctx, + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + ctx: fctx, + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "some val", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "some val", + }, + }, + }, + }, + }, + }, + }, + }, + "EnableVersioning": { + ctx: fctx, + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Condition": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "SomeCondition", + }, + }, + }, + }, + }, + } + + property := &Property{ + ctx: fctx, + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::If": { + ctx: fctx, + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "EnableVersioning", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "Enabled", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "Suspended", + }, + }, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.Equal(t, "Enabled", resolvedProperty.AsString()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_equals.go b/pkg/scanners/cloudformation/parser/fn_equals.go new file mode 100644 index 000000000000..b476342c9a8f --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_equals.go @@ -0,0 +1,21 @@ +package parser + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveEquals(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Equals"].AsList() + + if len(refValue) != 2 { + return abortIntrinsic(property, "Fn::Equals should have exactly 2 values, returning original Property") + } + + propA, _ := refValue[0].resolveValue() + propB, _ := refValue[1].resolveValue() + return property.deriveResolved(cftypes.Bool, propA.EqualTo(propB.RawValue())), true +} diff --git a/pkg/scanners/cloudformation/parser/fn_equals_test.go b/pkg/scanners/cloudformation/parser/fn_equals_test.go new file mode 100644 index 000000000000..340ef7aaab4f --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_equals_test.go @@ -0,0 +1,180 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_equals_value(t *testing.T) { + + property := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.True(t, resolvedProperty.IsTrue()) +} + +func Test_resolve_equals_value_to_false(t *testing.T) { + + property := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "bar", + }, + }, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.False(t, resolvedProperty.IsTrue()) +} + +func Test_resolve_equals_value_to_true_when_boolean(t *testing.T) { + + property := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.Bool, + Value: true, + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.Bool, + Value: true, + }, + }, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + assert.True(t, resolvedProperty.IsTrue()) +} + +func Test_resolve_equals_value_when_one_is_a_reference(t *testing.T) { + + property := &Property{ + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "staging", + }, + }, + { + ctx: &FileContext{ + filepath: "", + Parameters: map[string]*Parameter{ + "Environment": { + inner: parameterInner{ + Type: "string", + Default: "staging", + }, + }, + }, + }, + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Ref": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "Environment", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.True(t, resolvedProperty.IsTrue()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_find_in_map.go b/pkg/scanners/cloudformation/parser/fn_find_in_map.go new file mode 100644 index 000000000000..3c9a0da29f7b --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_find_in_map.go @@ -0,0 +1,45 @@ +package parser + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveFindInMap(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::FindInMap"].AsList() + + if len(refValue) != 3 { + return abortIntrinsic(property, "Fn::FindInMap should have exactly 3 values, returning original Property") + } + + mapName := refValue[0].AsString() + topLevelKey := refValue[1].AsString() + secondaryLevelKey := refValue[2].AsString() + + if property.ctx == nil { + return abortIntrinsic(property, "the property does not have an attached context, returning original Property") + } + + m, ok := property.ctx.Mappings[mapName] + if !ok { + return abortIntrinsic(property, "could not find map %s, returning original Property") + } + + mapContents := m.(map[string]interface{}) + + k, ok := mapContents[topLevelKey] + if !ok { + return abortIntrinsic(property, "could not find %s in the %s map, returning original Property", topLevelKey, mapName) + } + + mapValues := k.(map[string]interface{}) + + if prop, ok := mapValues[secondaryLevelKey]; !ok { + return abortIntrinsic(property, "could not find a value for %s in %s, returning original Property", secondaryLevelKey, topLevelKey) + } else { + return property.deriveResolved(cftypes.String, prop), true + } +} diff --git a/pkg/scanners/cloudformation/parser/fn_find_in_map_test.go b/pkg/scanners/cloudformation/parser/fn_find_in_map_test.go new file mode 100644 index 000000000000..bbfa372b7121 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_find_in_map_test.go @@ -0,0 +1,100 @@ +package parser + +import ( + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "testing" +) + +func Test_resolve_find_in_map_value(t *testing.T) { + + source := `--- +Parameters: + Environment: + Type: String + Default: production +Mappings: + CacheNodeTypes: + production: + NodeType: cache.t2.large + test: + NodeType: cache.t2.small + dev: + NodeType: cache.t2.micro +Resources: + ElasticacheSecurityGroup: + Type: 'AWS::EC2::SecurityGroup' + Properties: + GroupDescription: Elasticache Security Group + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 11211 + ToPort: 11211 + SourceSecurityGroupName: !Ref InstanceSecurityGroup + ElasticacheCluster: + Type: 'AWS::ElastiCache::CacheCluster' + Properties: + Engine: memcached + CacheNodeType: !FindInMap [ CacheNodeTypes, production, NodeType ] + NumCacheNodes: '1' + VpcSecurityGroupIds: + - !GetAtt + - ElasticacheSecurityGroup + - GroupId +` + ctx := createTestFileContext(t, source) + require.NotNil(t, ctx) + + testRes := ctx.GetResourceByLogicalID("ElasticacheCluster") + assert.NotNil(t, testRes) + + nodeTypeProp := testRes.GetStringProperty("CacheNodeType", "") + assert.Equal(t, "cache.t2.large", nodeTypeProp.Value()) +} + +func Test_resolve_find_in_map_with_nested_intrinsic_value(t *testing.T) { + + source := `--- +Parameters: + Environment: + Type: String + Default: dev +Mappings: + CacheNodeTypes: + production: + NodeType: cache.t2.large + test: + NodeType: cache.t2.small + dev: + NodeType: cache.t2.micro +Resources: + ElasticacheSecurityGroup: + Type: 'AWS::EC2::SecurityGroup' + Properties: + GroupDescription: Elasticache Security Group + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 11211 + ToPort: 11211 + SourceSecurityGroupName: !Ref InstanceSecurityGroup + ElasticacheCluster: + Type: 'AWS::ElastiCache::CacheCluster' + Properties: + Engine: memcached + CacheNodeType: !FindInMap [ CacheNodeTypes, !Ref Environment, NodeType ] + NumCacheNodes: '1' + VpcSecurityGroupIds: + - !GetAtt + - ElasticacheSecurityGroup + - GroupId +` + ctx := createTestFileContext(t, source) + require.NotNil(t, ctx) + + testRes := ctx.GetResourceByLogicalID("ElasticacheCluster") + assert.NotNil(t, testRes) + + nodeTypeProp := testRes.GetStringProperty("CacheNodeType", "") + assert.Equal(t, "cache.t2.micro", nodeTypeProp.Value()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_get_attr.go b/pkg/scanners/cloudformation/parser/fn_get_attr.go new file mode 100644 index 000000000000..53a7891e0252 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_get_attr.go @@ -0,0 +1,46 @@ +package parser + +import ( + "strings" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveGetAtt(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValueProp := property.AsMap()["Fn::GetAtt"] + + var refValue []string + + if refValueProp.IsString() { + refValue = strings.Split(refValueProp.AsString(), ".") + } + + if refValueProp.IsList() { + for _, p := range refValueProp.AsList() { + refValue = append(refValue, p.AsString()) + } + } + + if len(refValue) != 2 { + return abortIntrinsic(property, "Fn::GetAtt should have exactly 2 values, returning original Property") + } + + logicalId := refValue[0] + attribute := refValue[1] + + referencedResource := property.ctx.GetResourceByLogicalID(logicalId) + if referencedResource == nil || referencedResource.IsNil() { + return property.deriveResolved(cftypes.String, ""), true + } + + referencedProperty := referencedResource.GetProperty(attribute) + if referencedProperty.IsNil() { + return property.deriveResolved(cftypes.String, referencedResource.ID()), true + } + + return property.deriveResolved(referencedProperty.Type(), referencedProperty.RawValue()), true +} diff --git a/pkg/scanners/cloudformation/parser/fn_get_attr_test.go b/pkg/scanners/cloudformation/parser/fn_get_attr_test.go new file mode 100644 index 000000000000..ebd52da035b0 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_get_attr_test.go @@ -0,0 +1,50 @@ +package parser + +import ( + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "testing" +) + +func Test_resolve_get_attr_value(t *testing.T) { + + source := `--- +Resources: + ElasticacheSecurityGroup: + Type: 'AWS::EC2::SecurityGroup' + Properties: + GroupDescription: Elasticache Security Group + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 11211 + ToPort: 11211 + SourceSecurityGroupName: !Ref InstanceSecurityGroup + ElasticacheCluster: + Type: 'AWS::ElastiCache::CacheCluster' + Properties: + Engine: memcached + CacheNodeType: cache.t2.micro + NumCacheNodes: '1' + VpcSecurityGroupIds: + - !GetAtt + - ElasticacheSecurityGroup + - GroupId +` + ctx := createTestFileContext(t, source) + require.NotNil(t, ctx) + + testRes := ctx.GetResourceByLogicalID("ElasticacheCluster") + assert.NotNil(t, testRes) + + sgProp := testRes.GetProperty("VpcSecurityGroupIds") + require.True(t, sgProp.IsNotNil()) + require.True(t, sgProp.IsList()) + + for _, property := range sgProp.AsList() { + resolved, success := ResolveIntrinsicFunc(property) + require.True(t, success) + assert.True(t, resolved.IsNotNil()) + } + +} diff --git a/pkg/scanners/cloudformation/parser/fn_if.go b/pkg/scanners/cloudformation/parser/fn_if.go new file mode 100644 index 000000000000..d444952ff38a --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_if.go @@ -0,0 +1,40 @@ +package parser + +func ResolveIf(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::If"].AsList() + + if len(refValue) != 3 { + return abortIntrinsic(property, "Fn::If should have exactly 3 values, returning original Property") + } + + condition, _ := refValue[0].resolveValue() + trueState, _ := refValue[1].resolveValue() + falseState, _ := refValue[2].resolveValue() + + conditionMet := false + + con, _ := condition.resolveValue() + if con.IsBool() { + conditionMet = con.AsBool() + } else if property.ctx.Conditions != nil && + condition.IsString() { + + condition := property.ctx.Conditions[condition.AsString()] + if condition.isFunction() { + con, _ := condition.resolveValue() + if con.IsBool() { + conditionMet = con.AsBool() + } + } + } + + if conditionMet { + return trueState, true + } else { + return falseState, true + } +} diff --git a/pkg/scanners/cloudformation/parser/fn_if_test.go b/pkg/scanners/cloudformation/parser/fn_if_test.go new file mode 100644 index 000000000000..d7f5fbf8e160 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_if_test.go @@ -0,0 +1,56 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_if_value(t *testing.T) { + + property := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::If": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.Bool, + Value: true, + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "bar", + }, + }, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.Equal(t, "foo", resolvedProperty.String()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_join.go b/pkg/scanners/cloudformation/parser/fn_join.go new file mode 100644 index 000000000000..961248a997f2 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_join.go @@ -0,0 +1,34 @@ +package parser + +import ( + "strings" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveJoin(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Join"].AsList() + + if len(refValue) != 2 { + return abortIntrinsic(property, "Fn::Join should have exactly 2 values, returning original Property") + } + + joiner := refValue[0].AsString() + items := refValue[1].AsList() + + var itemValues []string + for _, item := range items { + resolved, success := item.resolveValue() + if success { + itemValues = append(itemValues, resolved.AsString()) + } + } + + joined := strings.Join(itemValues, joiner) + + return property.deriveResolved(cftypes.String, joined), true +} diff --git a/pkg/scanners/cloudformation/parser/fn_join_test.go b/pkg/scanners/cloudformation/parser/fn_join_test.go new file mode 100644 index 000000000000..7dd3bf8746d7 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_join_test.go @@ -0,0 +1,152 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_join_value(t *testing.T) { + + property := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Join": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "::", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "s3", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "part1", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "part2", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.Equal(t, "s3::part1::part2", resolvedProperty.AsString()) +} + +func Test_resolve_join_value_with_reference(t *testing.T) { + + property := &Property{ + ctx: &FileContext{ + filepath: "", + Parameters: map[string]*Parameter{ + "Environment": { + inner: parameterInner{ + Type: "string", + Default: "staging", + }, + }, + }, + }, + name: "EnvironmentBucket", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Join": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "::", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "s3", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "part1", + }, + }, + { + ctx: &FileContext{ + filepath: "", + Parameters: map[string]*Parameter{ + "Environment": { + inner: parameterInner{ + Type: "string", + Default: "staging", + }, + }, + }, + }, + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Ref": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "Environment", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.Equal(t, "s3::part1::staging", resolvedProperty.AsString()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_length.go b/pkg/scanners/cloudformation/parser/fn_length.go new file mode 100644 index 000000000000..664bc933c158 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_length.go @@ -0,0 +1,24 @@ +package parser + +import "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + +func ResolveLength(property *Property) (*Property, bool) { + if !property.isFunction() { + return property, true + } + + val := property.AsMap()["Fn::Length"] + if val.IsList() { + return property.deriveResolved(cftypes.Int, val.Len()), true + } else if val.IsMap() { + resolved, _ := val.resolveValue() + + if resolved.IsList() { + return property.deriveResolved(cftypes.Int, resolved.Len()), true + } + return resolved, false + } + + return property, false + +} diff --git a/pkg/scanners/cloudformation/parser/fn_length_test.go b/pkg/scanners/cloudformation/parser/fn_length_test.go new file mode 100644 index 000000000000..af9d842dd339 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_length_test.go @@ -0,0 +1,99 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + "github.com/stretchr/testify/require" +) + +func Test_ResolveLength_WhenPropIsArray(t *testing.T) { + prop := &Property{ + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Length": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.Int, + Value: 1, + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "IntParameter", + }, + }, + }, + }, + }, + }, + }, + } + resolved, ok := ResolveIntrinsicFunc(prop) + require.True(t, ok) + require.True(t, resolved.IsInt()) + require.Equal(t, 2, resolved.AsInt()) +} + +func Test_ResolveLength_WhenPropIsIntrinsicFunction(t *testing.T) { + fctx := &FileContext{ + Parameters: map[string]*Parameter{ + "SomeParameter": { + inner: parameterInner{ + Type: "string", + Default: "a|b|c|d", + }, + }, + }, + } + prop := &Property{ + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Length": { + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Split": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "|", + }, + }, + { + ctx: fctx, + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Ref": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "SomeParameter", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + resolved, ok := ResolveIntrinsicFunc(prop) + require.True(t, ok) + require.True(t, resolved.IsInt()) + require.Equal(t, 4, resolved.AsInt()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_not.go b/pkg/scanners/cloudformation/parser/fn_not.go new file mode 100644 index 000000000000..a61390d26cf3 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_not.go @@ -0,0 +1,23 @@ +package parser + +import "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + +func ResolveNot(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Not"].AsList() + + if len(refValue) != 1 { + return abortIntrinsic(property, "Fn::No should have at only 1 values, returning original Property") + } + + funcToInvert, _ := refValue[0].resolveValue() + + if funcToInvert.IsBool() { + return property.deriveResolved(cftypes.Bool, !funcToInvert.AsBool()), true + } + + return property, false +} diff --git a/pkg/scanners/cloudformation/parser/fn_not_test.go b/pkg/scanners/cloudformation/parser/fn_not_test.go new file mode 100644 index 000000000000..9ef79b9b8b78 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_not_test.go @@ -0,0 +1,124 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_not_value(t *testing.T) { + property1 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "bar", + }, + }, + }, + }, + }, + }, + }, + } + + notProperty := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Not": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + property1, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(notProperty) + require.True(t, success) + + assert.True(t, resolvedProperty.IsTrue()) +} + +func Test_resolve_not_value_when_true(t *testing.T) { + property1 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + }, + }, + }, + }, + }, + } + + notProperty := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Not": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + property1, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(notProperty) + require.True(t, success) + + assert.False(t, resolvedProperty.IsTrue()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_or.go b/pkg/scanners/cloudformation/parser/fn_or.go new file mode 100644 index 000000000000..0da432b350bf --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_or.go @@ -0,0 +1,39 @@ +package parser + +import "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + +func ResolveOr(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Or"].AsList() + + if len(refValue) < 2 { + return abortIntrinsic(property, "Fn::Or should have at least 2 values, returning original Property") + } + + results := make([]bool, len(refValue)) + for i := 0; i < len(refValue); i++ { + + r := false + if refValue[i].IsBool() { + r = refValue[i].AsBool() + } + + results[i] = r + } + + atleastOne := atleastOne(results) + return property.deriveResolved(cftypes.Bool, atleastOne), true +} + +func atleastOne(a []bool) bool { + for _, b := range a { + if b { + return true + } + } + + return false +} diff --git a/pkg/scanners/cloudformation/parser/fn_or_test.go b/pkg/scanners/cloudformation/parser/fn_or_test.go new file mode 100644 index 000000000000..0fa1a222f7ca --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_or_test.go @@ -0,0 +1,184 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_or_value(t *testing.T) { + property1 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "bar", + }, + }, + }, + }, + }, + }, + }, + } + + property2 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + }, + }, + }, + }, + }, + } + orProperty := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Or": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + property1, + property2, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(orProperty) + require.True(t, success) + + assert.True(t, resolvedProperty.IsTrue()) +} + +func Test_resolve_or_value_when_neither_true(t *testing.T) { + property1 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "bar", + }, + }, + }, + }, + }, + }, + }, + } + + property2 := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "bar", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + }, + }, + }, + }, + }, + } + orProperty := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Or": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + property1, + property2, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(orProperty) + require.True(t, success) + + assert.False(t, resolvedProperty.IsTrue()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_ref.go b/pkg/scanners/cloudformation/parser/fn_ref.go new file mode 100644 index 000000000000..d2f2ed6eeca4 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_ref.go @@ -0,0 +1,54 @@ +package parser + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveReference(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refProp := property.AsMap()["Ref"] + if refProp.IsNotString() { + return property, false + } + refValue := refProp.AsString() + + if pseudo, ok := pseudoParameters[refValue]; ok { + return property.deriveResolved(pseudo.t, pseudo.val), true + } + + if property.ctx == nil { + return property, false + } + + var param *Parameter + for k := range property.ctx.Parameters { + if k == refValue { + param = property.ctx.Parameters[k] + resolvedType := param.Type() + + switch param.Default().(type) { + case bool: + resolvedType = cftypes.Bool + case string: + resolvedType = cftypes.String + case int: + resolvedType = cftypes.Int + } + + resolved = property.deriveResolved(resolvedType, param.Default()) + return resolved, true + } + } + + for k := range property.ctx.Resources { + if k == refValue { + res := property.ctx.Resources[k] + resolved = property.deriveResolved(cftypes.String, res.ID()) + break + } + } + return resolved, true +} diff --git a/pkg/scanners/cloudformation/parser/fn_ref_test.go b/pkg/scanners/cloudformation/parser/fn_ref_test.go new file mode 100644 index 000000000000..bdde45857ed5 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_ref_test.go @@ -0,0 +1,89 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_referenced_value(t *testing.T) { + + property := &Property{ + ctx: &FileContext{ + filepath: "", + Parameters: map[string]*Parameter{ + "BucketName": { + inner: parameterInner{ + Type: "string", + Default: "someBucketName", + }, + }, + }, + }, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Ref": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "BucketName", + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + + assert.Equal(t, "someBucketName", resolvedProperty.AsString()) +} + +func Test_property_value_correct_when_not_reference(t *testing.T) { + + property := &Property{ + ctx: &FileContext{ + filepath: "", + }, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.String, + Value: "someBucketName", + }, + } + + // should fail when trying to resolve function that is not in fact a function + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.False(t, success) + + assert.Equal(t, "someBucketName", resolvedProperty.AsString()) +} + +func Test_resolve_ref_with_pseudo_value(t *testing.T) { + source := `--- +Resources: + TestInstance: + Type: AWS::EC2::Instance + Properties: + ImageId: "ami-79fd7eee" + KeyName: !Join [":", ["aws", !Ref AWS::Region, "key" ]] +` + ctx := createTestFileContext(t, source) + require.NotNil(t, ctx) + + testRes := ctx.GetResourceByLogicalID("TestInstance") + require.NotNil(t, testRes) + + keyNameProp := testRes.GetProperty("KeyName") + require.NotNil(t, keyNameProp) + + assert.Equal(t, "aws:eu-west-1:key", keyNameProp.AsString()) +} diff --git a/pkg/scanners/cloudformation/parser/fn_select.go b/pkg/scanners/cloudformation/parser/fn_select.go new file mode 100644 index 000000000000..3289004847c8 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_select.go @@ -0,0 +1,41 @@ +package parser + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveSelect(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Select"].AsList() + + if len(refValue) != 2 { + return abortIntrinsic(property, "Fn::Select should have exactly 2 values, returning original Property") + } + + index := refValue[0] + list := refValue[1] + + if index.IsNotInt() { + if index.IsConvertableTo(cftypes.Int) { + // + index = index.ConvertTo(cftypes.Int) + } else { + return abortIntrinsic(property, "index on property [%s] should be an int, returning original Property", property.name) + } + } + + if list.IsNotList() { + return abortIntrinsic(property, "list on property [%s] should be a list, returning original Property", property.name) + } + + listItems := list.AsList() + + if len(listItems) <= index.AsInt() { + return nil, false + } + + return listItems[index.AsInt()], true +} diff --git a/pkg/scanners/cloudformation/parser/fn_select_test.go b/pkg/scanners/cloudformation/parser/fn_select_test.go new file mode 100644 index 000000000000..92b634457b2d --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_select_test.go @@ -0,0 +1,77 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_select_value(t *testing.T) { + + source := `--- +Parameters: + EngineIndex: + Type: Integer + Default: 1 +Resources: + ElasticacheCluster: + Type: 'AWS::ElastiCache::CacheCluster' + Properties: + Engine: !Select [ !Ref EngineIndex, [memcached, redis ]] + CacheNodeType: cache.t2.micro + NumCacheNodes: '1' +` + ctx := createTestFileContext(t, source) + require.NotNil(t, ctx) + + testRes := ctx.GetResourceByLogicalID("ElasticacheCluster") + assert.NotNil(t, testRes) + + engineProp := testRes.GetProperty("Engine") + require.True(t, engineProp.IsNotNil()) + require.True(t, engineProp.IsString()) + + require.Equal(t, "redis", engineProp.AsString()) +} + +func Test_SelectPseudoListParam(t *testing.T) { + src := `--- +Resources: + myASGrpOne: + Type: AWS::AutoScaling::AutoScalingGroup + Version: "2009-05-15" + Properties: + AvailabilityZones: + - "us-east-1a" + LaunchConfigurationName: + Ref: MyLaunchConfiguration + MinSize: "0" + MaxSize: "0" + NotificationConfigurations: + - TopicARN: + Fn::Select: + - "1" + - Ref: AWS::NotificationARNs + NotificationTypes: + - autoscaling:EC2_INSTANCE_LAUNCH + - autoscaling:EC2_INSTANCE_LAUNCH_ERROR + +` + + ctx := createTestFileContext(t, src) + require.NotNil(t, ctx) + + resource := ctx.GetResourceByLogicalID("myASGrpOne") + require.NotNil(t, resource) + + notification := resource.GetProperty("NotificationConfigurations") + require.True(t, notification.IsNotNil()) + require.True(t, notification.IsList()) + first := notification.AsList()[0] + require.True(t, first.IsMap()) + topic, ok := first.AsMap()["TopicARN"] + require.True(t, ok) + require.Equal(t, "notification::arn::2", topic.AsString()) + +} diff --git a/pkg/scanners/cloudformation/parser/fn_split.go b/pkg/scanners/cloudformation/parser/fn_split.go new file mode 100644 index 000000000000..6facab992ea7 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_split.go @@ -0,0 +1,44 @@ +package parser + +import ( + "strings" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveSplit(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Split"].AsList() + + if len(refValue) != 2 { + return abortIntrinsic(property, "Fn::Split should have exactly 2 values, returning original Property") + } + + delimiterProp := refValue[0] + splitProp := refValue[1] + + if !splitProp.IsString() || !delimiterProp.IsString() { + abortIntrinsic(property, "Fn::Split requires two strings as input, returning original Property") + + } + + propertyList := createPropertyList(splitProp, delimiterProp, property) + + return property.deriveResolved(cftypes.List, propertyList), true +} + +func createPropertyList(splitProp *Property, delimiterProp *Property, parent *Property) []*Property { + + splitString := splitProp.AsString() + delimiter := delimiterProp.AsString() + + splits := strings.Split(splitString, delimiter) + var props []*Property + for _, split := range splits { + props = append(props, parent.deriveResolved(cftypes.String, split)) + } + return props +} diff --git a/pkg/scanners/cloudformation/parser/fn_split_test.go b/pkg/scanners/cloudformation/parser/fn_split_test.go new file mode 100644 index 000000000000..0e038c440219 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_split_test.go @@ -0,0 +1,56 @@ +package parser + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "testing" +) + +/* + Fn::Split: ["::", "s3::bucket::to::split"] + +*/ + +func Test_resolve_split_value(t *testing.T) { + + property := &Property{ + ctx: &FileContext{}, + name: "BucketName", + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Split": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "::", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "s3::bucket::to::split", + }, + }, + }, + }, + }, + }, + }, + } + + resolvedProperty, success := ResolveIntrinsicFunc(property) + require.True(t, success) + assert.True(t, resolvedProperty.IsNotNil()) + assert.True(t, resolvedProperty.IsList()) + listContents := resolvedProperty.AsList() + assert.Len(t, listContents, 4) + +} diff --git a/pkg/scanners/cloudformation/parser/fn_sub.go b/pkg/scanners/cloudformation/parser/fn_sub.go new file mode 100644 index 000000000000..81e8401bcfa7 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_sub.go @@ -0,0 +1,71 @@ +package parser + +import ( + "fmt" + "strconv" + "strings" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func ResolveSub(property *Property) (resolved *Property, success bool) { + if !property.isFunction() { + return property, true + } + + refValue := property.AsMap()["Fn::Sub"] + + if refValue.IsString() { + return resolveStringSub(refValue, property), true + } + + if refValue.IsList() { + return resolveMapSub(refValue, property) + } + + return property, false +} + +func resolveMapSub(refValue *Property, original *Property) (*Property, bool) { + refValues := refValue.AsList() + if len(refValues) != 2 { + return abortIntrinsic(original, "Fn::Sub with list expects 2 values, returning original property") + } + + workingString := refValues[0].AsString() + components := refValues[1].AsMap() + + for k, v := range components { + replacement := "[failed to resolve]" + switch v.Type() { + case cftypes.Map: + resolved, _ := ResolveIntrinsicFunc(v) + replacement = resolved.AsString() + case cftypes.String: + replacement = v.AsString() + case cftypes.Int: + replacement = strconv.Itoa(v.AsInt()) + case cftypes.Bool: + replacement = fmt.Sprintf("%v", v.AsBool()) + case cftypes.List: + var parts []string + for _, p := range v.AsList() { + parts = append(parts, p.String()) + } + replacement = fmt.Sprintf("[%s]", strings.Join(parts, ", ")) + } + workingString = strings.ReplaceAll(workingString, fmt.Sprintf("${%s}", k), replacement) + } + + return original.deriveResolved(cftypes.String, workingString), true +} + +func resolveStringSub(refValue *Property, original *Property) *Property { + workingString := refValue.AsString() + + for k, param := range pseudoParameters { + workingString = strings.ReplaceAll(workingString, fmt.Sprintf("${%s}", k), fmt.Sprintf("%v", param.getRawValue())) + } + + return original.deriveResolved(cftypes.String, workingString) +} diff --git a/pkg/scanners/cloudformation/parser/fn_sub_test.go b/pkg/scanners/cloudformation/parser/fn_sub_test.go new file mode 100644 index 000000000000..5ab98a59692b --- /dev/null +++ b/pkg/scanners/cloudformation/parser/fn_sub_test.go @@ -0,0 +1,103 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_resolve_sub_value(t *testing.T) { + source := `--- +Resources: + TestInstance: + Type: AWS::EC2::Instance + Properties: + ImageId: "ami-79fd7eee" + KeyName: "testkey" + UserData: + !Sub | + #!/bin/bash -xe + yum update -y aws-cfn-bootstrap + /opt/aws/bin/cfn-init -v --stack ${AWS::StackName} --resource LaunchConfig --configsets wordpress_install --region ${AWS::Region} + /opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource WebServerGroup --region ${AWS::Region} +` + ctx := createTestFileContext(t, source) + require.NotNil(t, ctx) + + testRes := ctx.GetResourceByLogicalID("TestInstance") + require.NotNil(t, testRes) + + userDataProp := testRes.GetProperty("UserData") + require.NotNil(t, userDataProp) + + assert.Equal(t, "#!/bin/bash -xe\nyum update -y aws-cfn-bootstrap\n/opt/aws/bin/cfn-init -v --stack cfsec-test-stack --resource LaunchConfig --configsets wordpress_install --region eu-west-1\n/opt/aws/bin/cfn-signal -e $? --stack cfsec-test-stack --resource WebServerGroup --region eu-west-1\n", userDataProp.AsString()) +} + +func Test_resolve_sub_value_with_base64(t *testing.T) { + + source := `--- +Resources: + TestInstance: + Type: AWS::EC2::Instance + Properties: + ImageId: "ami-79fd7eee" + KeyName: "testkey" + UserData: + Fn::Base64: + !Sub | + #!/bin/bash -xe + yum update -y aws-cfn-bootstrap + /opt/aws/bin/cfn-init -v --stack ${AWS::StackName} --resource LaunchConfig --configsets wordpress_install --region ${AWS::Region} + /opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource WebServerGroup --region ${AWS::Region}` + ctx := createTestFileContext(t, source) + require.NotNil(t, ctx) + + testRes := ctx.GetResourceByLogicalID("TestInstance") + require.NotNil(t, testRes) + + userDataProp := testRes.GetProperty("UserData") + require.NotNil(t, userDataProp) + + assert.Equal(t, "IyEvYmluL2Jhc2ggLXhlCnl1bSB1cGRhdGUgLXkgYXdzLWNmbi1ib290c3RyYXAKL29wdC9hd3MvYmluL2Nmbi1pbml0IC12IC0tc3RhY2sgY2ZzZWMtdGVzdC1zdGFjayAtLXJlc291cmNlIExhdW5jaENvbmZpZyAtLWNvbmZpZ3NldHMgd29yZHByZXNzX2luc3RhbGwgLS1yZWdpb24gZXUtd2VzdC0xCi9vcHQvYXdzL2Jpbi9jZm4tc2lnbmFsIC1lICQ/IC0tc3RhY2sgY2ZzZWMtdGVzdC1zdGFjayAtLXJlc291cmNlIFdlYlNlcnZlckdyb3VwIC0tcmVnaW9uIGV1LXdlc3QtMQ==", userDataProp.AsString()) +} + +func Test_resolve_sub_value_with_map(t *testing.T) { + + source := `--- +Parameters: + RootDomainName: + Type: String + Default: somedomain.com +Resources: + TestDistribution: + Type: AWS::CloudFront::Distribution + Properties: + DistributionConfig: + DefaultCacheBehavior: + TargetOriginId: target + ViewerProtocolPolicy: https-only + Enabled: true + Origins: + - DomainName: + !Sub + - www.${Domain} + - { Domain: !Ref RootDomainName } + Id: somedomain1 + + +` + ctx := createTestFileContext(t, source) + require.NotNil(t, ctx) + + testRes := ctx.GetResourceByLogicalID("TestDistribution") + require.NotNil(t, testRes) + + originsList := testRes.GetProperty("DistributionConfig.Origins") + + domainNameProp := originsList.AsList()[0].GetProperty("DomainName") + require.NotNil(t, domainNameProp) + + assert.Equal(t, "www.somedomain.com", domainNameProp.AsString()) + +} diff --git a/pkg/scanners/cloudformation/parser/intrinsics.go b/pkg/scanners/cloudformation/parser/intrinsics.go new file mode 100644 index 000000000000..d455fd3d5c6e --- /dev/null +++ b/pkg/scanners/cloudformation/parser/intrinsics.go @@ -0,0 +1,101 @@ +package parser + +import ( + "fmt" + "strings" + + "gopkg.in/yaml.v3" +) + +var intrinsicFuncs map[string]func(property *Property) (*Property, bool) + +func init() { + intrinsicFuncs = map[string]func(property *Property) (*Property, bool){ + "Ref": ResolveReference, + "Fn::Base64": ResolveBase64, + "Fn::Equals": ResolveEquals, + "Fn::Join": ResolveJoin, + "Fn::Split": ResolveSplit, + "Fn::Sub": ResolveSub, + "Fn::FindInMap": ResolveFindInMap, + "Fn::Select": ResolveSelect, + "Fn::GetAtt": ResolveGetAtt, + "Fn::GetAZs": GetAzs, + "Fn::Cidr": GetCidr, + "Fn::ImportValue": ImportPlaceholder, + "Fn::If": ResolveIf, + "Fn::And": ResolveAnd, + "Fn::Or": ResolveOr, + "Fn::Not": ResolveNot, + "Fn::Length": ResolveLength, + "Condition": ResolveCondition, + } +} + +func ImportPlaceholder(property *Property) (*Property, bool) { + property.unresolved = true + return property, false +} + +func PassthroughResolution(property *Property) (*Property, bool) { + return property, false +} + +func IsIntrinsicFunc(node *yaml.Node) bool { + if node == nil || node.Tag == "" { + return false + } + + nodeTag := strings.TrimPrefix(node.Tag, "!") + if nodeTag != "Ref" && nodeTag != "Condition" { + nodeTag = fmt.Sprintf("Fn::%s", nodeTag) + } + for tag := range intrinsicFuncs { + + if nodeTag == tag { + return true + } + } + return false +} + +func IsIntrinsic(key string) bool { + for tag := range intrinsicFuncs { + if tag == key { + return true + } + } + return false +} + +func ResolveIntrinsicFunc(property *Property) (*Property, bool) { + if property == nil { + return nil, false + } + if !property.IsMap() { + return property, false + } + + for funcName := range property.AsMap() { + if fn := intrinsicFuncs[funcName]; fn != nil { + // + return fn(property) + } + } + return property, false +} + +func getIntrinsicTag(tag string) string { + tag = strings.TrimPrefix(tag, "!") + switch tag { + case "Ref", "Contains": + return tag + default: + return fmt.Sprintf("Fn::%s", tag) + } +} + +func abortIntrinsic(property *Property, msg string, components ...string) (*Property, bool) { + // + return property, false +} diff --git a/pkg/scanners/cloudformation/parser/intrinsics_test.go b/pkg/scanners/cloudformation/parser/intrinsics_test.go new file mode 100644 index 000000000000..a69e04dd0fba --- /dev/null +++ b/pkg/scanners/cloudformation/parser/intrinsics_test.go @@ -0,0 +1,45 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" +) + +func Test_is_intrinsic_returns_expected(t *testing.T) { + + testCases := []struct { + nodeTag string + expectedResult bool + }{ + { + nodeTag: "!Ref", + expectedResult: true, + }, + { + nodeTag: "!Join", + expectedResult: true, + }, + { + nodeTag: "!Sub", + expectedResult: true, + }, + { + nodeTag: "!Equals", + expectedResult: true, + }, + { + nodeTag: "!Equal", + expectedResult: false, + }, + } + + for _, tt := range testCases { + n := &yaml.Node{ + Tag: tt.nodeTag, + } + assert.Equal(t, tt.expectedResult, IsIntrinsicFunc(n)) + } + +} diff --git a/pkg/scanners/cloudformation/parser/parameter.go b/pkg/scanners/cloudformation/parser/parameter.go new file mode 100644 index 000000000000..493dea756168 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/parameter.go @@ -0,0 +1,129 @@ +package parser + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/liamg/jfather" + "gopkg.in/yaml.v3" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +type Parameter struct { + inner parameterInner +} + +type parameterInner struct { + Type string `yaml:"Type"` + Default interface{} `yaml:"Default"` +} + +func (p *Parameter) UnmarshalYAML(node *yaml.Node) error { + return node.Decode(&p.inner) +} + +func (p *Parameter) UnmarshalJSONWithMetadata(node jfather.Node) error { + return node.Decode(&p.inner) +} + +func (p *Parameter) Type() cftypes.CfType { + switch p.inner.Type { + case "Boolean": + return cftypes.Bool + case "String": + return cftypes.String + case "Integer": + return cftypes.Int + default: + return cftypes.String + } +} + +func (p *Parameter) Default() interface{} { + return p.inner.Default +} + +func (p *Parameter) UpdateDefault(inVal interface{}) { + passedVal := inVal.(string) + + switch p.inner.Type { + case "Boolean": + p.inner.Default, _ = strconv.ParseBool(passedVal) + case "String": + p.inner.Default = passedVal + case "Integer": + p.inner.Default, _ = strconv.Atoi(passedVal) + default: + p.inner.Default = passedVal + } +} + +type Parameters map[string]any + +func (p *Parameters) Merge(other Parameters) { + for k, v := range other { + (*p)[k] = v + } +} + +func (p *Parameters) UnmarshalJSON(data []byte) error { + (*p) = make(Parameters) + + if len(data) == 0 { + return nil + } + + switch { + case data[0] == '{' && data[len(data)-1] == '}': // object + // CodePipeline like format + var params struct { + Params map[string]any `json:"Parameters"` + } + + if err := json.Unmarshal(data, ¶ms); err != nil { + return err + } + + (*p) = params.Params + case data[0] == '[' && data[len(data)-1] == ']': // array + { + // Original format + var params []string + + if err := json.Unmarshal(data, ¶ms); err == nil { + for _, param := range params { + parts := strings.Split(param, "=") + if len(parts) != 2 { + return fmt.Errorf("invalid key-value parameter: %q", param) + } + (*p)[parts[0]] = parts[1] + } + return nil + } + + // CloudFormation like format + var cfparams []struct { + ParameterKey string `json:"ParameterKey"` + ParameterValue string `json:"ParameterValue"` + } + + d := json.NewDecoder(bytes.NewReader(data)) + d.DisallowUnknownFields() + if err := d.Decode(&cfparams); err != nil { + return err + } + + for _, param := range cfparams { + (*p)[param.ParameterKey] = param.ParameterValue + } + } + default: + return fmt.Errorf("unsupported parameters format") + } + + return nil +} diff --git a/pkg/scanners/cloudformation/parser/parameters_test.go b/pkg/scanners/cloudformation/parser/parameters_test.go new file mode 100644 index 000000000000..703f07f5fe12 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/parameters_test.go @@ -0,0 +1,89 @@ +package parser + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParameters_UnmarshalJSON(t *testing.T) { + tests := []struct { + name string + source string + expected Parameters + wantErr bool + }{ + { + name: "original format", + source: `[ + "Key1=Value1", + "Key2=Value2" + ]`, + expected: map[string]any{ + "Key1": "Value1", + "Key2": "Value2", + }, + }, + { + name: "CloudFormation like format", + source: `[ + { + "ParameterKey": "Key1", + "ParameterValue": "Value1" + }, + { + "ParameterKey": "Key2", + "ParameterValue": "Value2" + } + ]`, + expected: map[string]any{ + "Key1": "Value1", + "Key2": "Value2", + }, + }, + { + name: "CloudFormation like format, with unknown fields", + source: `[ + { + "ParameterKey": "Key1", + "ParameterValue": "Value1" + }, + { + "ParameterKey": "Key2", + "ParameterValue": "Value2", + "UsePreviousValue": true + } + ]`, + wantErr: true, + }, + { + name: "CodePipeline like format", + source: `{ + "Parameters": { + "Key1": "Value1", + "Key2": "Value2" + } + }`, + expected: map[string]any{ + "Key1": "Value1", + "Key2": "Value2", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var params Parameters + + err := json.Unmarshal([]byte(tt.source), ¶ms) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tt.expected, params) + }) + } +} diff --git a/pkg/scanners/cloudformation/parser/parser.go b/pkg/scanners/cloudformation/parser/parser.go new file mode 100644 index 000000000000..e45d2251036d --- /dev/null +++ b/pkg/scanners/cloudformation/parser/parser.go @@ -0,0 +1,236 @@ +package parser + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "path/filepath" + "strings" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/liamg/jfather" + "gopkg.in/yaml.v3" + + "github.com/aquasecurity/trivy/pkg/detection" +) + +var _ options.ConfigurableParser = (*Parser)(nil) + +type Parser struct { + debug debug.Logger + skipRequired bool + parameterFiles []string + parameters map[string]any + overridedParameters Parameters + configsFS fs.FS +} + +func WithParameters(params map[string]any) options.ParserOption { + return func(cp options.ConfigurableParser) { + if p, ok := cp.(*Parser); ok { + p.parameters = params + } + } +} + +func WithParameterFiles(files ...string) options.ParserOption { + return func(cp options.ConfigurableParser) { + if p, ok := cp.(*Parser); ok { + p.parameterFiles = files + } + } +} + +func WithConfigsFS(fsys fs.FS) options.ParserOption { + return func(cp options.ConfigurableParser) { + if p, ok := cp.(*Parser); ok { + p.configsFS = fsys + } + } +} + +func (p *Parser) SetDebugWriter(writer io.Writer) { + p.debug = debug.New(writer, "cloudformation", "parser") +} + +func (p *Parser) SetSkipRequiredCheck(b bool) { + p.skipRequired = b +} + +func New(options ...options.ParserOption) *Parser { + p := &Parser{} + for _, option := range options { + option(p) + } + return p +} + +func (p *Parser) ParseFS(ctx context.Context, fsys fs.FS, dir string) (FileContexts, error) { + var contexts FileContexts + if err := fs.WalkDir(fsys, filepath.ToSlash(dir), func(path string, entry fs.DirEntry, err error) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err != nil { + return err + } + if entry.IsDir() { + return nil + } + + if !p.Required(fsys, path) { + p.debug.Log("not a CloudFormation file, skipping %s", path) + return nil + } + + c, err := p.ParseFile(ctx, fsys, path) + if err != nil { + p.debug.Log("Error parsing file '%s': %s", path, err) + return nil + } + contexts = append(contexts, c) + return nil + }); err != nil { + return nil, err + } + return contexts, nil +} + +func (p *Parser) Required(fs fs.FS, path string) bool { + if p.skipRequired { + return true + } + + f, err := fs.Open(filepath.ToSlash(path)) + if err != nil { + return false + } + defer func() { _ = f.Close() }() + if data, err := io.ReadAll(f); err == nil { + return detection.IsType(path, bytes.NewReader(data), detection.FileTypeCloudFormation) + } + return false + +} + +func (p *Parser) ParseFile(ctx context.Context, fsys fs.FS, path string) (context *FileContext, err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("panic during parse: %s", e) + } + }() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if p.configsFS == nil { + p.configsFS = fsys + } + + if err := p.parseParams(); err != nil { + return nil, fmt.Errorf("failed to parse parameters file: %w", err) + } + + sourceFmt := YamlSourceFormat + if strings.HasSuffix(strings.ToLower(path), ".json") { + sourceFmt = JsonSourceFormat + } + + f, err := fsys.Open(filepath.ToSlash(path)) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + + content, err := io.ReadAll(f) + if err != nil { + return nil, err + } + + lines := strings.Split(string(content), "\n") + + context = &FileContext{ + filepath: path, + lines: lines, + SourceFormat: sourceFmt, + } + + if strings.HasSuffix(strings.ToLower(path), ".json") { + if err := jfather.Unmarshal(content, context); err != nil { + return nil, NewErrInvalidContent(path, err) + } + } else { + if err := yaml.Unmarshal(content, context); err != nil { + return nil, NewErrInvalidContent(path, err) + } + } + + context.OverrideParameters(p.overridedParameters) + + context.lines = lines + context.SourceFormat = sourceFmt + context.filepath = path + + p.debug.Log("Context loaded from source %s", path) + + // the context must be set to conditions before resources + for _, c := range context.Conditions { + c.setContext(context) + } + + for name, r := range context.Resources { + r.ConfigureResource(name, fsys, path, context) + } + + return context, nil +} + +func (p *Parser) parseParams() error { + if p.overridedParameters != nil { // parameters have already been parsed + return nil + } + + params := make(Parameters) + + var errs []error + + for _, path := range p.parameterFiles { + if parameters, err := p.parseParametersFile(path); err != nil { + errs = append(errs, err) + } else { + params.Merge(parameters) + } + } + + if len(errs) != 0 { + return errors.Join(errs...) + } + + params.Merge(p.parameters) + + p.overridedParameters = params + return nil +} + +func (p *Parser) parseParametersFile(path string) (Parameters, error) { + f, err := p.configsFS.Open(path) + if err != nil { + return nil, fmt.Errorf("parameters file %q open error: %w", path, err) + } + + var parameters Parameters + if err := json.NewDecoder(f).Decode(¶meters); err != nil { + return nil, err + } + return parameters, nil +} diff --git a/pkg/scanners/cloudformation/parser/parser_test.go b/pkg/scanners/cloudformation/parser/parser_test.go new file mode 100644 index 000000000000..5862d4757186 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/parser_test.go @@ -0,0 +1,374 @@ +package parser + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func parseFile(t *testing.T, source string, name string) (FileContexts, error) { + tmp, err := os.MkdirTemp(os.TempDir(), "defsec") + require.NoError(t, err) + defer func() { _ = os.RemoveAll(tmp) }() + require.NoError(t, os.WriteFile(filepath.Join(tmp, name), []byte(source), 0600)) + fs := os.DirFS(tmp) + return New().ParseFS(context.TODO(), fs, ".") +} + +func Test_parse_yaml(t *testing.T) { + + source := `--- +Parameters: + BucketName: + Type: String + Default: naughty + EncryptBucket: + Type: Boolean + Default: false +Resources: + S3Bucket: + Type: 'AWS::S3::Bucket' + Properties: + BucketName: naughty + BucketEncryption: + ServerSideEncryptionConfiguration: + - BucketKeyEnabled: + Ref: EncryptBucket` + + files, err := parseFile(t, source, "cf.yaml") + require.NoError(t, err) + assert.Len(t, files, 1) + file := files[0] + + assert.Len(t, file.Resources, 1) + assert.Len(t, file.Parameters, 2) + + bucket, ok := file.Resources["S3Bucket"] + require.True(t, ok, "S3Bucket resource should be available") + assert.Equal(t, "cf.yaml", bucket.Range().GetFilename()) + assert.Equal(t, 10, bucket.Range().GetStartLine()) + assert.Equal(t, 17, bucket.Range().GetEndLine()) +} + +func Test_parse_json(t *testing.T) { + source := `{ + "Parameters": { + "BucketName": { + "Type": "String", + "Default": "naughty" + }, + "BucketKeyEnabled": { + "Type": "Boolean", + "Default": false + } + }, + "Resources": { + "S3Bucket": { + "Type": "AWS::S3::Bucket", + "properties": { + "BucketName": { + "Ref": "BucketName" + }, + "BucketEncryption": { + "ServerSideEncryptionConfiguration": [ + { + "BucketKeyEnabled": { + "Ref": "BucketKeyEnabled" + } + } + ] + } + } + } + } +} +` + + files, err := parseFile(t, source, "cf.json") + require.NoError(t, err) + assert.Len(t, files, 1) + file := files[0] + + assert.Len(t, file.Resources, 1) + assert.Len(t, file.Parameters, 2) +} + +func Test_parse_yaml_with_map_ref(t *testing.T) { + + source := `--- +Parameters: + BucketName: + Type: String + Default: referencedBucket + EncryptBucket: + Type: Boolean + Default: false +Resources: + S3Bucket: + Type: 'AWS::S3::Bucket' + Properties: + BucketName: + Ref: BucketName + BucketEncryption: + ServerSideEncryptionConfiguration: + - BucketKeyEnabled: + Ref: EncryptBucket` + + files, err := parseFile(t, source, "cf.yaml") + require.NoError(t, err) + assert.Len(t, files, 1) + file := files[0] + + assert.Len(t, file.Resources, 1) + assert.Len(t, file.Parameters, 2) + + res := file.GetResourceByLogicalID("S3Bucket") + assert.NotNil(t, res) + + refProp := res.GetProperty("BucketName") + assert.False(t, refProp.IsNil()) + assert.Equal(t, "referencedBucket", refProp.AsString()) +} + +func Test_parse_yaml_with_intrinsic_functions(t *testing.T) { + + source := `--- +Parameters: + BucketName: + Type: String + Default: somebucket + EncryptBucket: + Type: Boolean + Default: false +Resources: + S3Bucket: + Type: 'AWS::S3::Bucket' + Properties: + BucketName: !Ref BucketName + BucketEncryption: + ServerSideEncryptionConfiguration: + - BucketKeyEnabled: false +` + + files, err := parseFile(t, source, "cf.yaml") + require.NoError(t, err) + assert.Len(t, files, 1) + ctx := files[0] + + assert.Len(t, ctx.Resources, 1) + assert.Len(t, ctx.Parameters, 2) + + res := ctx.GetResourceByLogicalID("S3Bucket") + assert.NotNil(t, res) + + refProp := res.GetProperty("BucketName") + assert.False(t, refProp.IsNil()) + assert.Equal(t, "somebucket", refProp.AsString()) +} + +func createTestFileContext(t *testing.T, source string) *FileContext { + contexts, err := parseFile(t, source, "main.yaml") + require.NoError(t, err) + require.Len(t, contexts, 1) + return contexts[0] +} + +func Test_parse_yaml_use_condition_in_resource(t *testing.T) { + source := `--- +AWSTemplateFormatVersion: "2010-09-09" +Description: some description +Parameters: + ServiceName: + Type: String + Description: The service name + EnvName: + Type: String + Description: Optional environment name to prefix all resources with + Default: "" + +Conditions: + SuffixResources: !Not [!Equals [!Ref EnvName, ""]] + +Resources: + ErrorTimedOutMetricFilter: + Type: AWS::Logs::MetricFilter + Properties: + FilterPattern: '?ERROR ?error ?Error ?"timed out"' # If log contains one of these error words or timed out + LogGroupName: + !If [ + SuffixResources, + !Sub "/aws/lambda/${ServiceName}-${EnvName}", + !Sub "/aws/lambda/${ServiceName}", + ] + MetricTransformations: + - MetricName: !Sub "${ServiceName}-ErrorLogCount" + MetricNamespace: market-LogMetrics + MetricValue: 1 + DefaultValue: 0 +` + + files, err := parseFile(t, source, "cf.yaml") + require.NoError(t, err) + assert.Len(t, files, 1) + ctx := files[0] + + assert.Len(t, ctx.Parameters, 2) + assert.Len(t, ctx.Conditions, 1) + assert.Len(t, ctx.Resources, 1) + + res := ctx.GetResourceByLogicalID("ErrorTimedOutMetricFilter") + assert.NotNil(t, res) + + refProp := res.GetProperty("LogGroupName") + assert.False(t, refProp.IsNil()) + assert.Equal(t, "/aws/lambda/${ServiceName}", refProp.AsString()) +} + +func TestParse_WithParameters(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "main.yaml": `AWSTemplateFormatVersion: 2010-09-09 +Parameters: + KmsMasterKeyId: + Type: String +Resources: + TestQueue: + Type: 'AWS::SQS::Queue' + Properties: + QueueName: test-queue + KmsMasterKeyId: !Ref KmsMasterKeyId + `, + }) + + params := map[string]any{ + "KmsMasterKeyId": "some_id", + } + p := New(WithParameters(params)) + + files, err := p.ParseFS(context.TODO(), fs, ".") + require.NoError(t, err) + require.Len(t, files, 1) + + file := files[0] + res := file.GetResourceByLogicalID("TestQueue") + assert.NotNil(t, res) + + kmsProp := res.GetProperty("KmsMasterKeyId") + assert.False(t, kmsProp.IsNil()) + assert.Equal(t, "some_id", kmsProp.AsString()) +} + +func TestParse_WithParameterFiles(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.yaml": `AWSTemplateFormatVersion: 2010-09-09 +Parameters: + KmsMasterKeyId: + Type: String +Resources: + TestQueue: + Type: 'AWS::SQS::Queue' + Properties: + QueueName: test-queue + KmsMasterKeyId: !Ref KmsMasterKeyId +`, + "params.json": `[ + { + "ParameterKey": "KmsMasterKeyId", + "ParameterValue": "some_id" + } +] + `, + }) + + p := New(WithParameterFiles("params.json")) + + files, err := p.ParseFS(context.TODO(), fs, ".") + require.NoError(t, err) + require.Len(t, files, 1) + + file := files[0] + res := file.GetResourceByLogicalID("TestQueue") + assert.NotNil(t, res) + + kmsProp := res.GetProperty("KmsMasterKeyId") + assert.False(t, kmsProp.IsNil()) + assert.Equal(t, "some_id", kmsProp.AsString()) +} + +func TestParse_WithConfigFS(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "queue.yaml": `AWSTemplateFormatVersion: 2010-09-09 +Parameters: + KmsMasterKeyId: + Type: String +Resources: + TestQueue: + Type: 'AWS::SQS::Queue' + Properties: + QueueName: testqueue + KmsMasterKeyId: !Ref KmsMasterKeyId +`, + "bucket.yaml": `AWSTemplateFormatVersion: '2010-09-09' +Description: Bucket +Parameters: + BucketName: + Type: String +Resources: + S3Bucket: + Type: AWS::S3::Bucket + Properties: + BucketName: !Ref BucketName +`, + }) + + configFS := testutil.CreateFS(t, map[string]string{ + "/workdir/parameters/queue.json": `[ + { + "ParameterKey": "KmsMasterKeyId", + "ParameterValue": "some_id" + } + ] + `, + "/workdir/parameters/s3.json": `[ + { + "ParameterKey": "BucketName", + "ParameterValue": "testbucket" + } + ]`, + }) + + p := New( + WithParameterFiles("/workdir/parameters/queue.json", "/workdir/parameters/s3.json"), + WithConfigsFS(configFS), + ) + + files, err := p.ParseFS(context.TODO(), fs, ".") + require.NoError(t, err) + require.Len(t, files, 2) + + for _, file := range files { + if strings.Contains(file.filepath, "queue") { + res := file.GetResourceByLogicalID("TestQueue") + assert.NotNil(t, res) + + kmsProp := res.GetProperty("KmsMasterKeyId") + assert.False(t, kmsProp.IsNil()) + assert.Equal(t, "some_id", kmsProp.AsString()) + } else if strings.Contains(file.filepath, "s3") { + res := file.GetResourceByLogicalID("S3Bucket") + assert.NotNil(t, res) + + bucketNameProp := res.GetProperty("BucketName") + assert.False(t, bucketNameProp.IsNil()) + assert.Equal(t, "testbucket", bucketNameProp.AsString()) + } + } + +} diff --git a/pkg/scanners/cloudformation/parser/property.go b/pkg/scanners/cloudformation/parser/property.go new file mode 100644 index 000000000000..4197481e9d67 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/property.go @@ -0,0 +1,428 @@ +package parser + +import ( + "encoding/json" + "io/fs" + "strconv" + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/liamg/jfather" + "gopkg.in/yaml.v3" +) + +type EqualityOptions = int + +const ( + IgnoreCase EqualityOptions = iota +) + +type Property struct { + ctx *FileContext + name string + comment string + rng defsecTypes.Range + parentRange defsecTypes.Range + Inner PropertyInner + logicalId string + unresolved bool +} + +type PropertyInner struct { + Type cftypes.CfType + Value interface{} `json:"Value" yaml:"Value"` +} + +func (p *Property) Comment() string { + return p.comment +} + +func (p *Property) setName(name string) { + p.name = name + if p.Type() == cftypes.Map { + for n, subProp := range p.AsMap() { + if subProp == nil { + continue + } + subProp.setName(n) + } + } +} + +func (p *Property) setContext(ctx *FileContext) { + p.ctx = ctx + + if p.IsMap() { + for _, subProp := range p.AsMap() { + if subProp == nil { + continue + } + subProp.setContext(ctx) + } + } + + if p.IsList() { + for _, subProp := range p.AsList() { + subProp.setContext(ctx) + } + } +} + +func (p *Property) setFileAndParentRange(target fs.FS, filepath string, parentRange defsecTypes.Range) { + p.rng = defsecTypes.NewRange(filepath, p.rng.GetStartLine(), p.rng.GetEndLine(), p.rng.GetSourcePrefix(), target) + p.parentRange = parentRange + + switch p.Type() { + case cftypes.Map: + for _, subProp := range p.AsMap() { + if subProp == nil { + continue + } + subProp.setFileAndParentRange(target, filepath, parentRange) + } + case cftypes.List: + for _, subProp := range p.AsList() { + if subProp == nil { + continue + } + subProp.setFileAndParentRange(target, filepath, parentRange) + } + } +} + +func (p *Property) UnmarshalYAML(node *yaml.Node) error { + p.rng = defsecTypes.NewRange("", node.Line, calculateEndLine(node), "", nil) + + p.comment = node.LineComment + return setPropertyValueFromYaml(node, &p.Inner) +} + +func (p *Property) UnmarshalJSONWithMetadata(node jfather.Node) error { + p.rng = defsecTypes.NewRange("", node.Range().Start.Line, node.Range().End.Line, "", nil) + return setPropertyValueFromJson(node, &p.Inner) +} + +func (p *Property) Type() cftypes.CfType { + return p.Inner.Type +} + +func (p *Property) Range() defsecTypes.Range { + return p.rng +} + +func (p *Property) Metadata() defsecTypes.MisconfigMetadata { + base := p + if p.isFunction() { + if resolved, ok := p.resolveValue(); ok { + base = resolved + } + } + ref := NewCFReferenceWithValue(p.parentRange, *base, p.logicalId) + return defsecTypes.NewMisconfigMetadata(p.Range(), ref.String()) +} + +func (p *Property) MetadataWithValue(resolvedValue *Property) defsecTypes.MisconfigMetadata { + ref := NewCFReferenceWithValue(p.parentRange, *resolvedValue, p.logicalId) + return defsecTypes.NewMisconfigMetadata(p.Range(), ref.String()) +} + +func (p *Property) isFunction() bool { + if p == nil { + return false + } + if p.Type() == cftypes.Map { + for n := range p.AsMap() { + return IsIntrinsic(n) + } + } + return false +} + +func (p *Property) RawValue() interface{} { + return p.Inner.Value +} + +func (p *Property) AsRawStrings() ([]string, error) { + + if len(p.ctx.lines) < p.rng.GetEndLine() { + return p.ctx.lines, nil + } + return p.ctx.lines[p.rng.GetStartLine()-1 : p.rng.GetEndLine()], nil +} + +func (p *Property) resolveValue() (*Property, bool) { + if !p.isFunction() || p.IsUnresolved() { + return p, true + } + + resolved, ok := ResolveIntrinsicFunc(p) + if ok { + return resolved, true + } + + p.unresolved = true + return p, false +} + +func (p *Property) GetStringProperty(path string, defaultValue ...string) defsecTypes.StringValue { + defVal := "" + if len(defaultValue) > 0 { + defVal = defaultValue[0] + } + + if p.IsUnresolved() { + return defsecTypes.StringUnresolvable(p.Metadata()) + } + + prop := p.GetProperty(path) + if prop.IsNotString() { + return p.StringDefault(defVal) + } + return prop.AsStringValue() +} + +func (p *Property) StringDefault(defaultValue string) defsecTypes.StringValue { + return defsecTypes.StringDefault(defaultValue, p.Metadata()) +} + +func (p *Property) GetBoolProperty(path string, defaultValue ...bool) defsecTypes.BoolValue { + defVal := false + if len(defaultValue) > 0 { + defVal = defaultValue[0] + } + + if p.IsUnresolved() { + return defsecTypes.BoolUnresolvable(p.Metadata()) + } + + prop := p.GetProperty(path) + + if prop.isFunction() { + prop, _ = prop.resolveValue() + } + + if prop.IsNotBool() { + return p.inferBool(prop, defVal) + } + return prop.AsBoolValue() +} + +func (p *Property) GetIntProperty(path string, defaultValue ...int) defsecTypes.IntValue { + defVal := 0 + if len(defaultValue) > 0 { + defVal = defaultValue[0] + } + + if p.IsUnresolved() { + return defsecTypes.IntUnresolvable(p.Metadata()) + } + + prop := p.GetProperty(path) + + if prop.IsNotInt() { + return p.IntDefault(defVal) + } + return prop.AsIntValue() +} + +func (p *Property) BoolDefault(defaultValue bool) defsecTypes.BoolValue { + return defsecTypes.BoolDefault(defaultValue, p.Metadata()) +} + +func (p *Property) IntDefault(defaultValue int) defsecTypes.IntValue { + return defsecTypes.IntDefault(defaultValue, p.Metadata()) +} + +func (p *Property) GetProperty(path string) *Property { + + pathParts := strings.Split(path, ".") + + first := pathParts[0] + property := p + + if p.isFunction() { + property, _ = p.resolveValue() + } + + if property.IsNotMap() { + return nil + } + + for n, p := range property.AsMap() { + if n == first { + property = p + break + } + } + + if len(pathParts) == 1 || property == nil { + return property + } + + if nestedProperty := property.GetProperty(strings.Join(pathParts[1:], ".")); nestedProperty != nil { + if nestedProperty.isFunction() { + resolved, _ := nestedProperty.resolveValue() + return resolved + } else { + return nestedProperty + } + } + + return &Property{} +} + +func (p *Property) deriveResolved(propType cftypes.CfType, propValue interface{}) *Property { + return &Property{ + ctx: p.ctx, + name: p.name, + comment: p.comment, + rng: p.rng, + parentRange: p.parentRange, + logicalId: p.logicalId, + Inner: PropertyInner{ + Type: propType, + Value: propValue, + }, + } +} + +func (p *Property) ParentRange() defsecTypes.Range { + return p.parentRange +} + +func (p *Property) inferBool(prop *Property, defaultValue bool) defsecTypes.BoolValue { + if prop.IsString() { + if prop.EqualTo("true", IgnoreCase) { + return defsecTypes.Bool(true, prop.Metadata()) + } + if prop.EqualTo("yes", IgnoreCase) { + return defsecTypes.Bool(true, prop.Metadata()) + } + if prop.EqualTo("1", IgnoreCase) { + return defsecTypes.Bool(true, prop.Metadata()) + } + if prop.EqualTo("false", IgnoreCase) { + return defsecTypes.Bool(false, prop.Metadata()) + } + if prop.EqualTo("no", IgnoreCase) { + return defsecTypes.Bool(false, prop.Metadata()) + } + if prop.EqualTo("0", IgnoreCase) { + return defsecTypes.Bool(false, prop.Metadata()) + } + } + + if prop.IsInt() { + if prop.EqualTo(0) { + return defsecTypes.Bool(false, prop.Metadata()) + } + if prop.EqualTo(1) { + return defsecTypes.Bool(true, prop.Metadata()) + } + } + + return p.BoolDefault(defaultValue) +} + +func (p *Property) String() string { + r := "" + switch p.Type() { + case cftypes.String: + r = p.AsString() + case cftypes.Int: + r = strconv.Itoa(p.AsInt()) + } + return r +} + +func (p *Property) SetLogicalResource(id string) { + p.logicalId = id + + if p.isFunction() { + return + } + + if p.IsMap() { + for _, subProp := range p.AsMap() { + if subProp == nil { + continue + } + subProp.SetLogicalResource(id) + } + } + + if p.IsList() { + for _, subProp := range p.AsList() { + subProp.SetLogicalResource(id) + } + } + +} + +func (p *Property) GetJsonBytes(squashList ...bool) []byte { + if p.IsNil() { + return []byte{} + } + lines, err := p.AsRawStrings() + if err != nil { + return nil + } + if p.ctx.SourceFormat == JsonSourceFormat { + return []byte(strings.Join(lines, " ")) + } + + if len(squashList) > 0 { + lines[0] = strings.Replace(lines[0], "-", " ", 1) + } + + lines = removeLeftMargin(lines) + + yamlContent := strings.Join(lines, "\n") + var body interface{} + if err := yaml.Unmarshal([]byte(yamlContent), &body); err != nil { + return nil + } + jsonBody := convert(body) + policyJson, err := json.Marshal(jsonBody) + if err != nil { + return nil + } + return policyJson +} + +func (p *Property) GetJsonBytesAsString(squashList ...bool) string { + return string(p.GetJsonBytes(squashList...)) +} + +func removeLeftMargin(lines []string) []string { + if len(lines) == 0 { + return lines + } + prefixSpace := len(lines[0]) - len(strings.TrimLeft(lines[0], " ")) + + for i, line := range lines { + if len(line) >= prefixSpace { + lines[i] = line[prefixSpace:] + } + } + return lines +} + +func convert(input interface{}) interface{} { + switch x := input.(type) { + case map[interface{}]interface{}: + outpMap := map[string]interface{}{} + for k, v := range x { + outpMap[k.(string)] = convert(v) + } + return outpMap + case []interface{}: + for i, v := range x { + x[i] = convert(v) + } + } + return input +} diff --git a/pkg/scanners/cloudformation/parser/property_conversion.go b/pkg/scanners/cloudformation/parser/property_conversion.go new file mode 100644 index 000000000000..45ff7f3dc927 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/property_conversion.go @@ -0,0 +1,129 @@ +package parser + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func (p *Property) IsConvertableTo(conversionType cftypes.CfType) bool { + switch conversionType { + case cftypes.Int: + return p.isConvertableToInt() + case cftypes.Bool: + return p.isConvertableToBool() + case cftypes.String: + return p.isConvertableToString() + } + return false +} + +func (p *Property) isConvertableToString() bool { + switch p.Type() { + case cftypes.Map: + return false + case cftypes.List: + for _, p := range p.AsList() { + if !p.IsString() { + return false + } + } + } + return true +} + +func (p *Property) isConvertableToBool() bool { + switch p.Type() { + case cftypes.String: + return p.EqualTo("true", IgnoreCase) || p.EqualTo("false", IgnoreCase) || + p.EqualTo("1", IgnoreCase) || p.EqualTo("0", IgnoreCase) + + case cftypes.Int: + return p.EqualTo(1) || p.EqualTo(0) + } + return false +} + +func (p *Property) isConvertableToInt() bool { + switch p.Type() { + case cftypes.String: + if _, err := strconv.Atoi(p.AsString()); err == nil { + return true + } + case cftypes.Bool: + return true + } + return false +} + +func (p *Property) ConvertTo(conversionType cftypes.CfType) *Property { + + if !p.IsConvertableTo(conversionType) { + _, _ = fmt.Fprintf(os.Stderr, "property of type %s cannot be converted to %s\n", p.Type(), conversionType) + return p + } + switch conversionType { + case cftypes.Int: + return p.convertToInt() + case cftypes.Bool: + return p.convertToBool() + case cftypes.String: + return p.convertToString() + } + return p +} + +func (p *Property) convertToString() *Property { + switch p.Type() { + case cftypes.Int: + return p.deriveResolved(cftypes.String, strconv.Itoa(p.AsInt())) + case cftypes.Bool: + return p.deriveResolved(cftypes.String, fmt.Sprintf("%v", p.AsBool())) + case cftypes.List: + var parts []string + for _, property := range p.AsList() { + parts = append(parts, property.AsString()) + } + return p.deriveResolved(cftypes.String, fmt.Sprintf("[%s]", strings.Join(parts, ", "))) + } + return p +} + +func (p *Property) convertToBool() *Property { + switch p.Type() { + case cftypes.String: + if p.EqualTo("true", IgnoreCase) || p.EqualTo("1") { + return p.deriveResolved(cftypes.Bool, true) + } + if p.EqualTo("false", IgnoreCase) || p.EqualTo("0") { + return p.deriveResolved(cftypes.Bool, false) + } + case cftypes.Int: + if p.EqualTo(1) { + return p.deriveResolved(cftypes.Bool, true) + } + if p.EqualTo(0) { + return p.deriveResolved(cftypes.Bool, false) + } + } + return p +} + +func (p *Property) convertToInt() *Property { + // + switch p.Type() { + case cftypes.String: + if val, err := strconv.Atoi(p.AsString()); err == nil { + return p.deriveResolved(cftypes.Int, val) + } + case cftypes.Bool: + if p.IsTrue() { + return p.deriveResolved(cftypes.Int, 1) + } + return p.deriveResolved(cftypes.Int, 0) + } + return p +} diff --git a/pkg/scanners/cloudformation/parser/property_helpers.go b/pkg/scanners/cloudformation/parser/property_helpers.go new file mode 100644 index 000000000000..075f7c4cea80 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/property_helpers.go @@ -0,0 +1,267 @@ +package parser + +import ( + "strconv" + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" +) + +func (p *Property) IsNil() bool { + return p == nil || p.Inner.Value == nil +} + +func (p *Property) IsNotNil() bool { + return !p.IsUnresolved() && !p.IsNil() +} + +func (p *Property) Is(t cftypes.CfType) bool { + if p.IsNil() || p.IsUnresolved() { + return false + } + if p.isFunction() { + if prop, success := p.resolveValue(); success && prop != p { + return prop.Is(t) + } + } + return p.Inner.Type == t +} + +func (p *Property) IsString() bool { + return p.Is(cftypes.String) +} + +func (p *Property) IsNotString() bool { + return !p.IsUnresolved() && !p.IsString() +} + +func (p *Property) IsInt() bool { + return p.Is(cftypes.Int) +} + +func (p *Property) IsNotInt() bool { + return !p.IsUnresolved() && !p.IsInt() +} + +func (p *Property) IsMap() bool { + if p.IsNil() || p.IsUnresolved() { + return false + } + return p.Inner.Type == cftypes.Map +} + +func (p *Property) IsNotMap() bool { + return !p.IsUnresolved() && !p.IsMap() +} + +func (p *Property) IsList() bool { + return p.Is(cftypes.List) +} + +func (p *Property) IsNotList() bool { + return !p.IsUnresolved() && !p.IsList() +} + +func (p *Property) IsBool() bool { + return p.Is(cftypes.Bool) +} + +func (p *Property) IsUnresolved() bool { + return p != nil && p.unresolved +} + +func (p *Property) IsNotBool() bool { + return !p.IsUnresolved() && !p.IsBool() +} + +func (p *Property) AsString() string { + if p.isFunction() { + if prop, success := p.resolveValue(); success && prop != p { + return prop.AsString() + } + return "" + } + if p.IsNil() { + return "" + } + if !p.IsString() { + return "" + } + + return p.Inner.Value.(string) +} + +func (p *Property) AsStringValue() defsecTypes.StringValue { + if p.unresolved { + return defsecTypes.StringUnresolvable(p.Metadata()) + } + return defsecTypes.StringExplicit(p.AsString(), p.Metadata()) +} + +func (p *Property) AsInt() int { + if p.isFunction() { + if prop, success := p.resolveValue(); success && prop != p { + return prop.AsInt() + } + return 0 + } + if p.IsNotInt() { + if p.isConvertableToInt() { + return p.convertToInt().AsInt() + } + return 0 + } + + return p.Inner.Value.(int) +} + +func (p *Property) AsIntValue() defsecTypes.IntValue { + if p.unresolved { + return defsecTypes.IntUnresolvable(p.Metadata()) + } + return defsecTypes.IntExplicit(p.AsInt(), p.Metadata()) +} + +func (p *Property) AsBool() bool { + if p.isFunction() { + if prop, success := p.resolveValue(); success && prop != p { + return prop.AsBool() + } + return false + } + if !p.IsBool() { + return false + } + return p.Inner.Value.(bool) +} + +func (p *Property) AsBoolValue() defsecTypes.BoolValue { + if p.unresolved { + return defsecTypes.BoolUnresolvable(p.Metadata()) + } + return defsecTypes.Bool(p.AsBool(), p.Metadata()) +} + +func (p *Property) AsMap() map[string]*Property { + val, ok := p.Inner.Value.(map[string]*Property) + if !ok { + return nil + } + return val +} + +func (p *Property) AsList() []*Property { + if p.isFunction() { + if prop, success := p.resolveValue(); success && prop != p { + return prop.AsList() + } + return []*Property{} + } + + if list, ok := p.Inner.Value.([]*Property); ok { + return list + } + return nil +} + +func (p *Property) Len() int { + return len(p.AsList()) +} + +func (p *Property) EqualTo(checkValue interface{}, equalityOptions ...EqualityOptions) bool { + var ignoreCase bool + for _, option := range equalityOptions { + if option == IgnoreCase { + ignoreCase = true + } + } + + switch checkerVal := checkValue.(type) { + case string: + if p.IsNil() { + return false + } + + if p.Inner.Type == cftypes.String || p.IsString() { + if ignoreCase { + return strings.EqualFold(p.AsString(), checkerVal) + } + return p.AsString() == checkerVal + } else if p.Inner.Type == cftypes.Int || p.IsInt() { + if val, err := strconv.Atoi(checkerVal); err == nil { + return p.AsInt() == val + } + } + return false + case bool: + if p.Inner.Type == cftypes.Bool || p.IsBool() { + return p.AsBool() == checkerVal + } + case int: + if p.Inner.Type == cftypes.Int || p.IsInt() { + return p.AsInt() == checkerVal + } + } + + return false + +} + +func (p *Property) IsTrue() bool { + if p.IsNil() || !p.IsBool() { + return false + } + + return p.AsBool() +} + +func (p *Property) IsEmpty() bool { + + if p.IsNil() { + return true + } + if p.IsUnresolved() { + return false + } + + switch p.Inner.Type { + case cftypes.String: + return p.AsString() == "" + case cftypes.List, cftypes.Map: + return len(p.AsList()) == 0 + default: + return false + } +} + +func (p *Property) Contains(checkVal interface{}) bool { + if p == nil || p.IsNil() { + return false + } + + switch p.Type() { + case cftypes.List: + for _, p := range p.AsList() { + if p.EqualTo(checkVal) { + return true + } + } + case cftypes.Map: + if _, ok := checkVal.(string); !ok { + return false + } + for key := range p.AsMap() { + if key == checkVal.(string) { + return true + } + } + case cftypes.String: + if _, ok := checkVal.(string); !ok { + return false + } + return strings.Contains(p.AsString(), checkVal.(string)) + } + return false +} diff --git a/pkg/scanners/cloudformation/parser/property_helpers_test.go b/pkg/scanners/cloudformation/parser/property_helpers_test.go new file mode 100644 index 000000000000..9cabb664776a --- /dev/null +++ b/pkg/scanners/cloudformation/parser/property_helpers_test.go @@ -0,0 +1,195 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/stretchr/testify/assert" +) + +func newProp(inner PropertyInner) *Property { + return &Property{ + name: "test_prop", + ctx: &FileContext{}, + rng: types.NewRange("testfile", 1, 1, "", nil), + Inner: inner, + } +} + +func Test_EqualTo(t *testing.T) { + tests := []struct { + name string + property *Property + checkValue interface{} + opts []EqualityOptions + isEqual bool + }{ + { + name: "prop is nil", + property: nil, + checkValue: "some value", + isEqual: false, + }, + { + name: "compare strings", + property: newProp(PropertyInner{ + Type: cftypes.String, + Value: "is str", + }), + checkValue: "is str", + isEqual: true, + }, + { + name: "compare strings ignoring case", + property: newProp(PropertyInner{ + Type: cftypes.String, + Value: "is str", + }), + opts: []EqualityOptions{IgnoreCase}, + checkValue: "Is StR", + isEqual: true, + }, + { + name: "strings ate not equal", + property: newProp(PropertyInner{ + Type: cftypes.String, + Value: "some value", + }), + checkValue: "some other value", + isEqual: false, + }, + { + name: "compare prop with a int represented by a string", + property: newProp(PropertyInner{ + Type: cftypes.Int, + Value: 147, + }), + checkValue: "147", + isEqual: true, + }, + { + name: "compare ints", + property: newProp(PropertyInner{ + Type: cftypes.Int, + Value: 701, + }), + checkValue: 701, + isEqual: true, + }, + { + name: "compare bools", + property: newProp(PropertyInner{ + Type: cftypes.Bool, + Value: true, + }), + checkValue: true, + isEqual: true, + }, + { + name: "prop is string fn", + property: newProp(PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::If": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.Bool, + Value: false, + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "bad", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "some value", + }, + }, + }, + }, + }, + }, + }), + checkValue: "some value", + isEqual: true, + }, + { + name: "prop is int fn", + property: newProp(PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::If": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.Bool, + Value: true, + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.Int, + Value: 121, + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.Int, + Value: -1, + }, + }, + }, + }, + }, + }, + }), + checkValue: 121, + isEqual: true, + }, + { + name: "prop is bool fn", + property: newProp(PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::Equals": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "foo", + }, + }, + }, + }, + }, + }, + }), + checkValue: true, + isEqual: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.isEqual, tt.property.EqualTo(tt.checkValue, tt.opts...)) + }) + } +} diff --git a/pkg/scanners/cloudformation/parser/pseudo_parameters.go b/pkg/scanners/cloudformation/parser/pseudo_parameters.go new file mode 100644 index 000000000000..3027095c13b7 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/pseudo_parameters.go @@ -0,0 +1,46 @@ +package parser + +import "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + +type pseudoParameter struct { + t cftypes.CfType + val interface{} + raw interface{} +} + +var pseudoParameters = map[string]pseudoParameter{ + "AWS::AccountId": {t: cftypes.String, val: "123456789012"}, + "AWS::NotificationARNs": { + t: cftypes.List, + val: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "notification::arn::1", + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "notification::arn::2", + }, + }, + }, + raw: []string{"notification::arn::1", "notification::arn::2"}, + }, + "AWS::NoValue": {t: cftypes.String, val: ""}, + "AWS::Partition": {t: cftypes.String, val: "aws"}, + "AWS::Region": {t: cftypes.String, val: "eu-west-1"}, + "AWS::StackId": {t: cftypes.String, val: "arn:aws:cloudformation:eu-west-1:stack/ID"}, + "AWS::StackName": {t: cftypes.String, val: "cfsec-test-stack"}, + "AWS::URLSuffix": {t: cftypes.String, val: "amazonaws.com"}, +} + +func (p pseudoParameter) getRawValue() interface{} { + switch p.t { + case cftypes.List: + return p.raw + default: + return p.val + } +} diff --git a/pkg/scanners/cloudformation/parser/pseudo_parameters_test.go b/pkg/scanners/cloudformation/parser/pseudo_parameters_test.go new file mode 100644 index 000000000000..281bf9083a14 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/pseudo_parameters_test.go @@ -0,0 +1,36 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_Raw(t *testing.T) { + tests := []struct { + name string + key string + expected interface{} + }{ + { + name: "parameter with a string type value", + key: "AWS::AccountId", + expected: "123456789012", + }, + { + name: "a parameter with a list type value", + key: "AWS::NotificationARNs", + expected: []string{"notification::arn::1", "notification::arn::2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if val, ok := pseudoParameters[tt.key]; ok { + assert.Equal(t, tt.expected, val.getRawValue()) + } else { + t.Fatal("unexpected parameter key") + } + }) + } +} diff --git a/pkg/scanners/cloudformation/parser/reference.go b/pkg/scanners/cloudformation/parser/reference.go new file mode 100644 index 000000000000..a20da2baf697 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/reference.go @@ -0,0 +1,58 @@ +package parser + +import ( + "fmt" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" +) + +type CFReference struct { + logicalId string + resourceRange defsecTypes.Range + resolvedValue Property +} + +func NewCFReference(id string, resourceRange defsecTypes.Range) CFReference { + return CFReference{ + logicalId: id, + resourceRange: resourceRange, + } +} + +func NewCFReferenceWithValue(resourceRange defsecTypes.Range, resolvedValue Property, logicalId string) CFReference { + return CFReference{ + resourceRange: resourceRange, + resolvedValue: resolvedValue, + logicalId: logicalId, + } +} + +func (cf CFReference) String() string { + return cf.resourceRange.String() +} + +func (cf CFReference) LogicalID() string { + return cf.logicalId +} + +func (cf CFReference) ResourceRange() defsecTypes.Range { + return cf.resourceRange +} + +func (cf CFReference) PropertyRange() defsecTypes.Range { + if cf.resolvedValue.IsNotNil() { + return cf.resolvedValue.Range() + } + return defsecTypes.Range{} +} + +func (cf CFReference) DisplayValue() string { + if cf.resolvedValue.IsNotNil() { + return fmt.Sprintf("%v", cf.resolvedValue.RawValue()) + } + return "" +} + +func (cf *CFReference) Comment() string { + return cf.resolvedValue.Comment() +} diff --git a/pkg/scanners/cloudformation/parser/resource.go b/pkg/scanners/cloudformation/parser/resource.go new file mode 100644 index 000000000000..a1bd596f0c1a --- /dev/null +++ b/pkg/scanners/cloudformation/parser/resource.go @@ -0,0 +1,211 @@ +package parser + +import ( + "io/fs" + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/liamg/jfather" + "gopkg.in/yaml.v3" +) + +type Resource struct { + ctx *FileContext + rng defsecTypes.Range + id string + comment string + Inner ResourceInner +} + +type ResourceInner struct { + Type string `json:"Type" yaml:"Type"` + Properties map[string]*Property `json:"Properties" yaml:"Properties"` +} + +func (r *Resource) ConfigureResource(id string, target fs.FS, filepath string, ctx *FileContext) { + r.setId(id) + r.setFile(target, filepath) + r.setContext(ctx) +} + +func (r *Resource) setId(id string) { + r.id = id + + for n, p := range r.properties() { + p.setName(n) + } +} + +func (r *Resource) setFile(target fs.FS, filepath string) { + r.rng = defsecTypes.NewRange(filepath, r.rng.GetStartLine(), r.rng.GetEndLine(), r.rng.GetSourcePrefix(), target) + + for _, p := range r.Inner.Properties { + p.setFileAndParentRange(target, filepath, r.rng) + } +} + +func (r *Resource) setContext(ctx *FileContext) { + r.ctx = ctx + + for _, p := range r.Inner.Properties { + p.SetLogicalResource(r.id) + p.setContext(ctx) + } +} + +func (r *Resource) UnmarshalYAML(value *yaml.Node) error { + r.rng = defsecTypes.NewRange("", value.Line-1, calculateEndLine(value), "", nil) + r.comment = value.LineComment + return value.Decode(&r.Inner) +} + +func (r *Resource) UnmarshalJSONWithMetadata(node jfather.Node) error { + r.rng = defsecTypes.NewRange("", node.Range().Start.Line, node.Range().End.Line, "", nil) + return node.Decode(&r.Inner) +} + +func (r *Resource) ID() string { + return r.id +} + +func (r *Resource) Type() string { + return r.Inner.Type +} + +func (r *Resource) Range() defsecTypes.Range { + return r.rng +} + +func (r *Resource) SourceFormat() SourceFormat { + return r.ctx.SourceFormat +} + +func (r *Resource) Metadata() defsecTypes.MisconfigMetadata { + return defsecTypes.NewMisconfigMetadata(r.Range(), NewCFReference(r.id, r.rng).String()) +} + +func (r *Resource) properties() map[string]*Property { + return r.Inner.Properties +} + +func (r *Resource) IsNil() bool { + return r.id == "" +} + +func (r *Resource) GetProperty(path string) *Property { + + pathParts := strings.Split(path, ".") + + first := pathParts[0] + property := &Property{} + + for n, p := range r.properties() { + if n == first { + property = p + break + } + } + + if len(pathParts) == 1 || property.IsNil() { + if property.isFunction() { + resolved, _ := property.resolveValue() + return resolved + } + return property + } + + if nestedProperty := property.GetProperty(strings.Join(pathParts[1:], ".")); nestedProperty != nil { + return nestedProperty + } + + return &Property{} +} + +func (r *Resource) GetStringProperty(path string, defaultValue ...string) defsecTypes.StringValue { + defVal := "" + if len(defaultValue) > 0 { + defVal = defaultValue[0] + } + + prop := r.GetProperty(path) + + if prop.IsNotString() { + return r.StringDefault(defVal) + } + return prop.AsStringValue() +} + +func (r *Resource) GetBoolProperty(path string, defaultValue ...bool) defsecTypes.BoolValue { + defVal := false + if len(defaultValue) > 0 { + defVal = defaultValue[0] + } + + prop := r.GetProperty(path) + + if prop.IsNotBool() { + return r.inferBool(prop, defVal) + } + return prop.AsBoolValue() +} + +func (r *Resource) GetIntProperty(path string, defaultValue ...int) defsecTypes.IntValue { + defVal := 0 + if len(defaultValue) > 0 { + defVal = defaultValue[0] + } + + prop := r.GetProperty(path) + + if prop.IsNotInt() { + return r.IntDefault(defVal) + } + return prop.AsIntValue() +} + +func (r *Resource) StringDefault(defaultValue string) defsecTypes.StringValue { + return defsecTypes.StringDefault(defaultValue, r.Metadata()) +} + +func (r *Resource) BoolDefault(defaultValue bool) defsecTypes.BoolValue { + return defsecTypes.BoolDefault(defaultValue, r.Metadata()) +} + +func (r *Resource) IntDefault(defaultValue int) defsecTypes.IntValue { + return defsecTypes.IntDefault(defaultValue, r.Metadata()) +} + +func (r *Resource) inferBool(prop *Property, defaultValue bool) defsecTypes.BoolValue { + if prop.IsString() { + if prop.EqualTo("true", IgnoreCase) { + return defsecTypes.Bool(true, prop.Metadata()) + } + if prop.EqualTo("yes", IgnoreCase) { + return defsecTypes.Bool(true, prop.Metadata()) + } + if prop.EqualTo("1", IgnoreCase) { + return defsecTypes.Bool(true, prop.Metadata()) + } + if prop.EqualTo("false", IgnoreCase) { + return defsecTypes.Bool(false, prop.Metadata()) + } + if prop.EqualTo("no", IgnoreCase) { + return defsecTypes.Bool(false, prop.Metadata()) + } + if prop.EqualTo("0", IgnoreCase) { + return defsecTypes.Bool(false, prop.Metadata()) + } + } + + if prop.IsInt() { + if prop.EqualTo(0) { + return defsecTypes.Bool(false, prop.Metadata()) + } + if prop.EqualTo(1) { + return defsecTypes.Bool(true, prop.Metadata()) + } + } + + return r.BoolDefault(defaultValue) +} diff --git a/pkg/scanners/cloudformation/parser/resource_test.go b/pkg/scanners/cloudformation/parser/resource_test.go new file mode 100644 index 000000000000..eff28ae63931 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/resource_test.go @@ -0,0 +1,75 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + "github.com/stretchr/testify/require" +) + +func Test_GetProperty_PropIsFunction(t *testing.T) { + resource := Resource{ + Inner: ResourceInner{ + Type: "AWS::S3::Bucket", + Properties: map[string]*Property{ + "BucketName": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "mybucket", + }, + }, + "VersioningConfiguration": { + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Fn::If": { + Inner: PropertyInner{ + Type: cftypes.List, + Value: []*Property{ + { + Inner: PropertyInner{ + Type: cftypes.Bool, + Value: false, + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Status": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "Enabled", + }, + }, + }, + }, + }, + { + Inner: PropertyInner{ + Type: cftypes.Map, + Value: map[string]*Property{ + "Status": { + Inner: PropertyInner{ + Type: cftypes.String, + Value: "Suspended", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + prop := resource.GetProperty("VersioningConfiguration.Status") + require.NotNil(t, prop) + require.True(t, prop.IsString()) + require.Equal(t, "Suspended", prop.AsString()) +} diff --git a/pkg/scanners/cloudformation/parser/util.go b/pkg/scanners/cloudformation/parser/util.go new file mode 100644 index 000000000000..a00a8ec8dd78 --- /dev/null +++ b/pkg/scanners/cloudformation/parser/util.go @@ -0,0 +1,139 @@ +package parser + +import ( + "strconv" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/cftypes" + + "github.com/liamg/jfather" + "gopkg.in/yaml.v3" +) + +func setPropertyValueFromJson(node jfather.Node, propertyData *PropertyInner) error { + + switch node.Kind() { + + case jfather.KindNumber: + propertyData.Type = cftypes.Float64 + return node.Decode(&propertyData.Value) + case jfather.KindBoolean: + propertyData.Type = cftypes.Bool + return node.Decode(&propertyData.Value) + case jfather.KindString: + propertyData.Type = cftypes.String + return node.Decode(&propertyData.Value) + case jfather.KindObject: + var childData map[string]*Property + if err := node.Decode(&childData); err != nil { + return err + } + propertyData.Type = cftypes.Map + propertyData.Value = childData + return nil + case jfather.KindArray: + var childData []*Property + if err := node.Decode(&childData); err != nil { + return err + } + propertyData.Type = cftypes.List + propertyData.Value = childData + return nil + default: + propertyData.Type = cftypes.String + return node.Decode(&propertyData.Value) + } + +} + +func setPropertyValueFromYaml(node *yaml.Node, propertyData *PropertyInner) error { + if IsIntrinsicFunc(node) { + var newContent []*yaml.Node + + newContent = append(newContent, &yaml.Node{ + Tag: "!!str", + Value: getIntrinsicTag(node.Tag), + Kind: yaml.ScalarNode, + }) + + newContent = createNode(node, newContent) + + node.Tag = "!!map" + node.Kind = yaml.MappingNode + node.Content = newContent + } + + if node.Content == nil { + + switch node.Tag { + + case "!!int": + propertyData.Type = cftypes.Int + propertyData.Value, _ = strconv.Atoi(node.Value) + case "!!bool": + propertyData.Type = cftypes.Bool + propertyData.Value, _ = strconv.ParseBool(node.Value) + case "!!str", "!!string": + propertyData.Type = cftypes.String + propertyData.Value = node.Value + } + return nil + } + + switch node.Tag { + case "!!map": + var childData map[string]*Property + if err := node.Decode(&childData); err != nil { + return err + } + propertyData.Type = cftypes.Map + propertyData.Value = childData + return nil + case "!!seq": + var childData []*Property + if err := node.Decode(&childData); err != nil { + return err + } + propertyData.Type = cftypes.List + propertyData.Value = childData + return nil + } + + return nil +} + +func createNode(node *yaml.Node, newContent []*yaml.Node) []*yaml.Node { + if node.Content == nil { + newContent = append(newContent, &yaml.Node{ + Tag: "!!str", + Value: node.Value, + Kind: yaml.ScalarNode, + }) + } else { + + newNode := &yaml.Node{ + Content: node.Content, + Kind: node.Kind, + } + + switch node.Kind { + case yaml.SequenceNode: + newNode.Tag = "!!seq" + case yaml.MappingNode: + newNode.Tag = "!!map" + case yaml.ScalarNode: + default: + newNode.Tag = node.Tag + } + newContent = append(newContent, newNode) + } + return newContent +} + +func calculateEndLine(node *yaml.Node) int { + if node.Content == nil { + return node.Line + } + + return calculateEndLine(node.Content[len(node.Content)-1]) + +} diff --git a/pkg/scanners/cloudformation/scanner.go b/pkg/scanners/cloudformation/scanner.go new file mode 100644 index 000000000000..b7324c9fbfa1 --- /dev/null +++ b/pkg/scanners/cloudformation/scanner.go @@ -0,0 +1,263 @@ +package cloudformation + +import ( + "context" + "fmt" + "io" + "io/fs" + "sort" + "sync" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/trules" + "github.com/aquasecurity/trivy/pkg/types" + + adapter "github.com/aquasecurity/trivy/internal/adapters/cloudformation" + "github.com/aquasecurity/trivy/pkg/scanners" + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation/parser" +) + +func WithParameters(params map[string]any) options.ScannerOption { + return func(cs options.ConfigurableScanner) { + if s, ok := cs.(*Scanner); ok { + s.addParserOptions(parser.WithParameters(params)) + } + } +} + +func WithParameterFiles(files ...string) options.ScannerOption { + return func(cs options.ConfigurableScanner) { + if s, ok := cs.(*Scanner); ok { + s.addParserOptions(parser.WithParameterFiles(files...)) + } + } +} + +func WithConfigsFS(fsys fs.FS) options.ScannerOption { + return func(cs options.ConfigurableScanner) { + if s, ok := cs.(*Scanner); ok { + s.addParserOptions(parser.WithConfigsFS(fsys)) + } + } +} + +var _ scanners.FSScanner = (*Scanner)(nil) +var _ options.ConfigurableScanner = (*Scanner)(nil) + +type Scanner struct { + debug debug.Logger + policyDirs []string + policyReaders []io.Reader + parser *parser.Parser + regoScanner *rego.Scanner + skipRequired bool + regoOnly bool + loadEmbeddedPolicies bool + loadEmbeddedLibraries bool + options []options.ScannerOption + parserOptions []options.ParserOption + frameworks []framework.Framework + spec string + sync.Mutex +} + +func (s *Scanner) addParserOptions(opt options.ParserOption) { + s.parserOptions = append(s.parserOptions, opt) +} + +func (s *Scanner) SetFrameworks(frameworks []framework.Framework) { + s.frameworks = frameworks +} + +func (s *Scanner) SetSpec(spec string) { + s.spec = spec +} + +func (s *Scanner) SetUseEmbeddedPolicies(b bool) { + s.loadEmbeddedPolicies = b +} + +func (s *Scanner) SetUseEmbeddedLibraries(b bool) { + s.loadEmbeddedLibraries = b +} + +func (s *Scanner) SetRegoOnly(regoOnly bool) { + s.regoOnly = regoOnly +} + +func (s *Scanner) Name() string { + return "CloudFormation" +} + +func (s *Scanner) SetPolicyReaders(readers []io.Reader) { + s.policyReaders = readers +} + +func (s *Scanner) SetSkipRequiredCheck(skip bool) { + s.skipRequired = skip +} + +func (s *Scanner) SetDebugWriter(writer io.Writer) { + s.debug = debug.New(writer, "cloudformation", "scanner") +} + +func (s *Scanner) SetPolicyDirs(dirs ...string) { + s.policyDirs = dirs +} + +func (s *Scanner) SetPolicyFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} + +func (s *Scanner) SetDataFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} +func (s *Scanner) SetRegoErrorLimit(_ int) {} + +func (s *Scanner) SetTraceWriter(_ io.Writer) {} +func (s *Scanner) SetPerResultTracingEnabled(_ bool) {} +func (s *Scanner) SetDataDirs(_ ...string) {} +func (s *Scanner) SetPolicyNamespaces(_ ...string) {} + +// New creates a new Scanner +func New(opts ...options.ScannerOption) *Scanner { + s := &Scanner{ + options: opts, + } + for _, opt := range opts { + opt(s) + } + s.addParserOptions(options.ParserWithSkipRequiredCheck(s.skipRequired)) + s.parser = parser.New(s.parserOptions...) + return s +} + +func (s *Scanner) initRegoScanner(srcFS fs.FS) (*rego.Scanner, error) { + s.Lock() + defer s.Unlock() + if s.regoScanner != nil { + return s.regoScanner, nil + } + regoScanner := rego.NewScanner(types.SourceCloud, s.options...) + regoScanner.SetParentDebugLogger(s.debug) + if err := regoScanner.LoadPolicies(s.loadEmbeddedLibraries, s.loadEmbeddedPolicies, srcFS, s.policyDirs, s.policyReaders); err != nil { + return nil, err + } + s.regoScanner = regoScanner + return regoScanner, nil +} + +func (s *Scanner) ScanFS(ctx context.Context, fs fs.FS, dir string) (results scan.Results, err error) { + + contexts, err := s.parser.ParseFS(ctx, fs, dir) + if err != nil { + return nil, err + } + + if len(contexts) == 0 { + return nil, nil + } + + regoScanner, err := s.initRegoScanner(fs) + if err != nil { + return nil, err + } + + for _, cfCtx := range contexts { + if cfCtx == nil { + continue + } + fileResults, err := s.scanFileContext(ctx, regoScanner, cfCtx, fs) + if err != nil { + return nil, err + } + results = append(results, fileResults...) + } + sort.Slice(results, func(i, j int) bool { + return results[i].Rule().AVDID < results[j].Rule().AVDID + }) + return results, nil +} + +func (s *Scanner) ScanFile(ctx context.Context, fs fs.FS, path string) (scan.Results, error) { + + cfCtx, err := s.parser.ParseFile(ctx, fs, path) + if err != nil { + return nil, err + } + + regoScanner, err := s.initRegoScanner(fs) + if err != nil { + return nil, err + } + + results, err := s.scanFileContext(ctx, regoScanner, cfCtx, fs) + if err != nil { + return nil, err + } + results.SetSourceAndFilesystem("", fs, false) + + sort.Slice(results, func(i, j int) bool { + return results[i].Rule().AVDID < results[j].Rule().AVDID + }) + return results, nil +} + +func (s *Scanner) scanFileContext(ctx context.Context, regoScanner *rego.Scanner, cfCtx *parser.FileContext, fs fs.FS) (results scan.Results, err error) { + state := adapter.Adapt(*cfCtx) + if state == nil { + return nil, nil + } + if !s.regoOnly { + for _, rule := range trules.GetRegistered(s.frameworks...) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + if rule.GetRule().RegoPackage != "" { + continue + } + evalResult := rule.Evaluate(state) + if len(evalResult) > 0 { + s.debug.Log("Found %d results for %s", len(evalResult), rule.GetRule().AVDID) + for _, scanResult := range evalResult { + + ref := scanResult.Metadata().Reference() + + if ref == "" && scanResult.Metadata().Parent() != nil { + ref = scanResult.Metadata().Parent().Reference() + } + + description := getDescription(scanResult, ref) + scanResult.OverrideDescription(description) + results = append(results, scanResult) + } + } + } + } + regoResults, err := regoScanner.ScanInput(ctx, rego.Input{ + Path: cfCtx.Metadata().Range().GetFilename(), + FS: fs, + Contents: state.ToRego(), + }) + if err != nil { + return nil, fmt.Errorf("rego scan error: %w", err) + } + return append(results, regoResults...), nil +} + +func getDescription(scanResult scan.Result, ref string) string { + switch scanResult.Status() { + case scan.StatusPassed: + return fmt.Sprintf("Resource '%s' passed check: %s", ref, scanResult.Rule().Summary) + case scan.StatusIgnored: + return fmt.Sprintf("Resource '%s' had check ignored: %s", ref, scanResult.Rule().Summary) + default: + return scanResult.Description() + } +} diff --git a/pkg/scanners/cloudformation/scanner_test.go b/pkg/scanners/cloudformation/scanner_test.go new file mode 100644 index 000000000000..e88250a22db4 --- /dev/null +++ b/pkg/scanners/cloudformation/scanner_test.go @@ -0,0 +1,103 @@ +package cloudformation + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_BasicScan(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "/code/main.yaml": `--- +Resources: + S3Bucket: + Type: 'AWS::S3::Bucket' + Properties: + BucketName: public-bucket + +`, + "/trules/rule.rego": `package builtin.dockerfile.DS006 + +__rego_metadata__ := { + "id": "DS006", + "avd_id": "AVD-DS-0006", + "title": "COPY '--from' referring to the current image", + "short_code": "no-self-referencing-copy-from", + "version": "v1.0.0", + "severity": "CRITICAL", + "type": "Dockerfile Security Check", + "description": "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself.", + "recommended_actions": "Change the '--from' so that it will not refer to itself", + "url": "https://docs.docker.com/develop/develop-images/multistage-build/", +} + +__rego_input__ := { + "combine": false, + "selector": [{"type": "defsec", "subtypes": [{"service": "s3", "provider": "aws"}]}], +} + +deny[res] { + res := { + "msg": "oh no", + "filepath": "code/main.yaml", + "startline": 6, + "endline": 6, + } +} + +`, + }) + + scanner := New(options.ScannerWithPolicyDirs("trules"), options.ScannerWithRegoOnly(true)) + + results, err := scanner.ScanFS(context.TODO(), fs, "code") + require.NoError(t, err) + + require.Len(t, results.GetFailed(), 1) + + assert.Equal(t, scan.Rule{ + AVDID: "AVD-DS-0006", + Aliases: []string{"DS006"}, + ShortCode: "no-self-referencing-copy-from", + Summary: "COPY '--from' referring to the current image", + Explanation: "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself.", + Impact: "", + Resolution: "Change the '--from' so that it will not refer to itself", + Provider: "cloud", + Service: "general", + Links: []string{"https://docs.docker.com/develop/develop-images/multistage-build/"}, + Severity: "CRITICAL", + Terraform: &scan.EngineMetadata{}, + CloudFormation: &scan.EngineMetadata{}, + CustomChecks: scan.CustomChecks{ + Terraform: (*scan.TerraformCustomCheck)(nil), + }, + RegoPackage: "data.builtin.dockerfile.DS006", + Frameworks: map[framework.Framework][]string{}, + }, results.GetFailed()[0].Rule()) + + failure := results.GetFailed()[0] + actualCode, err := failure.GetCode() + require.NoError(t, err) + for i := range actualCode.Lines { + actualCode.Lines[i].Highlighted = "" + } + assert.Equal(t, []scan.Line{ + { + Number: 6, + Content: " BucketName: public-bucket", + IsCause: true, + FirstCause: true, + LastCause: true, + Annotation: "", + }, + }, actualCode.Lines) +} diff --git a/pkg/scanners/cloudformation/test/cf_scanning_test.go b/pkg/scanners/cloudformation/test/cf_scanning_test.go new file mode 100644 index 000000000000..00c8a5a04bcf --- /dev/null +++ b/pkg/scanners/cloudformation/test/cf_scanning_test.go @@ -0,0 +1,48 @@ +package test + +import ( + "bytes" + "context" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/cloudformation" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +func Test_basic_cloudformation_scanning(t *testing.T) { + cfScanner := cloudformation.New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + + results, err := cfScanner.ScanFS(context.TODO(), os.DirFS("./examples/bucket"), ".") + require.NoError(t, err) + + assert.Greater(t, len(results.GetFailed()), 0) +} + +func Test_cloudformation_scanning_has_expected_errors(t *testing.T) { + cfScanner := cloudformation.New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + + results, err := cfScanner.ScanFS(context.TODO(), os.DirFS("./examples/bucket"), ".") + require.NoError(t, err) + + assert.Greater(t, len(results.GetFailed()), 0) +} + +func Test_cloudformation_scanning_with_debug(t *testing.T) { + + debugWriter := bytes.NewBufferString("") + + scannerOptions := []options.ScannerOption{ + options.ScannerWithDebug(debugWriter), + } + cfScanner := cloudformation.New(scannerOptions...) + + _, err := cfScanner.ScanFS(context.TODO(), os.DirFS("./examples/bucket"), ".") + require.NoError(t, err) + + // check debug is as expected + assert.Greater(t, len(debugWriter.String()), 0) +} diff --git a/pkg/scanners/cloudformation/test/examples/bucket/bucket.yaml b/pkg/scanners/cloudformation/test/examples/bucket/bucket.yaml new file mode 100644 index 000000000000..21f1c25042b0 --- /dev/null +++ b/pkg/scanners/cloudformation/test/examples/bucket/bucket.yaml @@ -0,0 +1,24 @@ +--- +AWSTemplateFormatVersion: "2010-09-09" +Description: An example Stack for a bucket +Parameters: + BucketName: + Type: String + Default: naughty-bucket + EncryptBucket: + Type: Boolean + Default: false +Resources: + S3Bucket: + Type: 'AWS::S3::Bucket' + Properties: + BucketName: + Ref: BucketName + PublicAccessBlockConfiguration: + BlockPublicAcls: false + BlockPublicPolicy: false + IgnorePublicAcls: true + RestrictPublicBuckets: false + BucketEncryption: + ServerSideEncryptionConfiguration: + - BucketKeyEnabled: !Ref EncryptBucket diff --git a/pkg/scanners/cloudformation/test/examples/ignores/bucket_with_ignores.yaml b/pkg/scanners/cloudformation/test/examples/ignores/bucket_with_ignores.yaml new file mode 100644 index 000000000000..ec5e8a8d7661 --- /dev/null +++ b/pkg/scanners/cloudformation/test/examples/ignores/bucket_with_ignores.yaml @@ -0,0 +1,24 @@ +--- +AWSTemplateFormatVersion: "2010-09-09" +Description: An example Stack for a bucket +Parameters: + BucketName: + Type: String + Default: naughty-bucket + EncryptBucket: + Type: Boolean + Default: false +Resources: + S3Bucket: + Type: 'AWS::S3::Bucket' + Properties: + BucketName: + Ref: BucketName + PublicAccessBlockConfiguration: + BlockPublicAcls: false + BlockPublicPolicy: false # cfsec:ignore:AVD-AWS-0087 + IgnorePublicAcls: true + RestrictPublicBuckets: false + BucketEncryption: + ServerSideEncryptionConfiguration: + - BucketKeyEnabled: !Ref EncryptBucket diff --git a/pkg/scanners/cloudformation/test/examples/roles/roles.yml b/pkg/scanners/cloudformation/test/examples/roles/roles.yml new file mode 100644 index 000000000000..5b927457762b --- /dev/null +++ b/pkg/scanners/cloudformation/test/examples/roles/roles.yml @@ -0,0 +1,51 @@ +Resources: + LambdaAPIRole: + Type: "AWS::IAM::Role" + Properties: + RoleName: "${self:service}-${self:provider.stage}-LambdaAPI" + Policies: + - PolicyName: "${self:service}-${self:provider.stage}-lambda" + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - "logs:CreateLogStream" + - "logs:CreateLogGroup" + - "logs:PutLogEvents" + Resource: !Sub "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/${self:service}-${self:provider.stage}*:*" + - !If + - EnableCrossAccountSnsPublish + - PolicyName: "${self:service}-${self:provider.stage}-asngen-sns-publish" + PolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Action: + - "SNS:Publish" + Resource: + - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-PurchaseOrder.fifo" + - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-Vendor.fifo" + - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-Customer.fifo" + - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-Manufacturer.fifo" + - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-ManufacturerItem.fifo" + - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-Item.fifo" + - !Sub "arn:aws:sns:${self:provider.region}:${self:provider.itopia_account_id}:${self:provider.stage}-*-VendorItem.fifo" + - !Ref "AWS::NoValue" + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Service: + - "lambda.amazonaws.com" + Action: + - "sts:AssumeRole" + + + + +Conditions: + EnableCrossAccountSnsPublish: !Equals + - ${env:ALLOW_SNS_PUBLISH, true} + - true diff --git a/pkg/scanners/dockerfile/parser/parser.go b/pkg/scanners/dockerfile/parser/parser.go new file mode 100644 index 000000000000..962d944466ce --- /dev/null +++ b/pkg/scanners/dockerfile/parser/parser.go @@ -0,0 +1,151 @@ +package parser + +import ( + "context" + "fmt" + "io" + "io/fs" + "path/filepath" + "strings" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/providers/dockerfile" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + + "github.com/aquasecurity/trivy/pkg/detection" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +var _ options.ConfigurableParser = (*Parser)(nil) + +type Parser struct { + debug debug.Logger + skipRequired bool +} + +func (p *Parser) SetDebugWriter(writer io.Writer) { + p.debug = debug.New(writer, "dockerfile", "parser") +} + +func (p *Parser) SetSkipRequiredCheck(b bool) { + p.skipRequired = b +} + +// New creates a new Dockerfile parser +func New(options ...options.ParserOption) *Parser { + p := &Parser{} + for _, option := range options { + option(p) + } + return p +} + +func (p *Parser) ParseFS(ctx context.Context, target fs.FS, path string) (map[string]*dockerfile.Dockerfile, error) { + + files := make(map[string]*dockerfile.Dockerfile) + if err := fs.WalkDir(target, filepath.ToSlash(path), func(path string, entry fs.DirEntry, err error) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err != nil { + return err + } + if entry.IsDir() { + return nil + } + if !p.Required(path) { + return nil + } + df, err := p.ParseFile(ctx, target, path) + if err != nil { + // TODO add debug for parse errors + return nil + } + files[path] = df + return nil + }); err != nil { + return nil, err + } + return files, nil +} + +// ParseFile parses Dockerfile content from the provided filesystem path. +func (p *Parser) ParseFile(_ context.Context, fs fs.FS, path string) (*dockerfile.Dockerfile, error) { + f, err := fs.Open(filepath.ToSlash(path)) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + return p.parse(path, f) +} + +func (p *Parser) Required(path string) bool { + if p.skipRequired { + return true + } + return detection.IsType(path, nil, detection.FileTypeDockerfile) +} + +func (p *Parser) parse(path string, r io.Reader) (*dockerfile.Dockerfile, error) { + parsed, err := parser.Parse(r) + if err != nil { + return nil, fmt.Errorf("dockerfile parse error: %w", err) + } + + var parsedFile dockerfile.Dockerfile + var stage dockerfile.Stage + var stageIndex int + fromValue := "args" + for _, child := range parsed.AST.Children { + child.Value = strings.ToLower(child.Value) + + instr, err := instructions.ParseInstruction(child) + if err != nil { + return nil, fmt.Errorf("process dockerfile instructions: %w", err) + } + + if _, ok := instr.(*instructions.Stage); ok { + if len(stage.Commands) > 0 { + parsedFile.Stages = append(parsedFile.Stages, stage) + } + if fromValue != "args" { + stageIndex++ + } + fromValue = strings.TrimSpace(strings.TrimPrefix(child.Original, "FROM ")) + stage = dockerfile.Stage{ + Name: fromValue, + } + } + + cmd := dockerfile.Command{ + Cmd: child.Value, + Original: child.Original, + Flags: child.Flags, + Stage: stageIndex, + Path: path, + StartLine: child.StartLine, + EndLine: child.EndLine, + } + + if child.Next != nil && len(child.Next.Children) > 0 { + cmd.SubCmd = child.Next.Children[0].Value + child = child.Next.Children[0] + } + + cmd.JSON = child.Attributes["json"] + for n := child.Next; n != nil; n = n.Next { + cmd.Value = append(cmd.Value, n.Value) + } + + stage.Commands = append(stage.Commands, cmd) + + } + if len(stage.Commands) > 0 { + parsedFile.Stages = append(parsedFile.Stages, stage) + } + + return &parsedFile, nil +} diff --git a/pkg/scanners/dockerfile/parser/parser_test.go b/pkg/scanners/dockerfile/parser/parser_test.go new file mode 100644 index 000000000000..04a45ea4695d --- /dev/null +++ b/pkg/scanners/dockerfile/parser/parser_test.go @@ -0,0 +1,56 @@ +package parser + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Parser(t *testing.T) { + input := `FROM ubuntu:18.04 +COPY . /app +RUN make /app +CMD python /app/app.py +` + + df, err := New().parse("Dockerfile", strings.NewReader(input)) + require.NoError(t, err) + + assert.Equal(t, 1, len(df.Stages)) + + require.Len(t, df.Stages, 1) + + assert.Equal(t, "ubuntu:18.04", df.Stages[0].Name) + commands := df.Stages[0].Commands + assert.Equal(t, 4, len(commands)) + + // FROM ubuntu:18.04 + assert.Equal(t, "from", commands[0].Cmd) + assert.Equal(t, "ubuntu:18.04", commands[0].Value[0]) + assert.Equal(t, "Dockerfile", commands[0].Path) + assert.Equal(t, 1, commands[0].StartLine) + assert.Equal(t, 1, commands[0].EndLine) + + // COPY . /app + assert.Equal(t, "copy", commands[1].Cmd) + assert.Equal(t, ". /app", strings.Join(commands[1].Value, " ")) + assert.Equal(t, "Dockerfile", commands[1].Path) + assert.Equal(t, 2, commands[1].StartLine) + assert.Equal(t, 2, commands[1].EndLine) + + // RUN make /app + assert.Equal(t, "run", commands[2].Cmd) + assert.Equal(t, "make /app", commands[2].Value[0]) + assert.Equal(t, "Dockerfile", commands[2].Path) + assert.Equal(t, 3, commands[2].StartLine) + assert.Equal(t, 3, commands[2].EndLine) + + // CMD python /app/app.py + assert.Equal(t, "cmd", commands[3].Cmd) + assert.Equal(t, "python /app/app.py", commands[3].Value[0]) + assert.Equal(t, "Dockerfile", commands[3].Path) + assert.Equal(t, 4, commands[3].StartLine) + assert.Equal(t, 4, commands[3].EndLine) +} diff --git a/pkg/scanners/dockerfile/scanner.go b/pkg/scanners/dockerfile/scanner.go new file mode 100644 index 000000000000..ee351be8a903 --- /dev/null +++ b/pkg/scanners/dockerfile/scanner.go @@ -0,0 +1,182 @@ +package dockerfile + +import ( + "context" + "io" + "io/fs" + "sync" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scanners" + "github.com/aquasecurity/trivy/pkg/scanners/dockerfile/parser" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +var _ scanners.FSScanner = (*Scanner)(nil) +var _ options.ConfigurableScanner = (*Scanner)(nil) + +type Scanner struct { + debug debug.Logger + policyDirs []string + policyReaders []io.Reader + parser *parser.Parser + regoScanner *rego.Scanner + skipRequired bool + options []options.ScannerOption + frameworks []framework.Framework + spec string + sync.Mutex + loadEmbeddedLibraries bool + loadEmbeddedPolicies bool +} + +func (s *Scanner) SetSpec(spec string) { + s.spec = spec +} + +func (s *Scanner) SetRegoOnly(bool) { +} + +func (s *Scanner) SetFrameworks(frameworks []framework.Framework) { + s.frameworks = frameworks +} + +func (s *Scanner) SetUseEmbeddedPolicies(b bool) { + s.loadEmbeddedPolicies = b +} + +func (s *Scanner) SetUseEmbeddedLibraries(b bool) { + s.loadEmbeddedLibraries = b +} + +func (s *Scanner) Name() string { + return "Dockerfile" +} + +func (s *Scanner) SetPolicyReaders(readers []io.Reader) { + s.policyReaders = readers +} + +func (s *Scanner) SetSkipRequiredCheck(skip bool) { + s.skipRequired = skip +} + +func (s *Scanner) SetDebugWriter(writer io.Writer) { + s.debug = debug.New(writer, "dockerfile", "scanner") +} + +func (s *Scanner) SetTraceWriter(_ io.Writer) { + // handled by rego later - nothing to do for now... +} + +func (s *Scanner) SetPerResultTracingEnabled(_ bool) { + // handled by rego later - nothing to do for now... +} + +func (s *Scanner) SetPolicyDirs(dirs ...string) { + s.policyDirs = dirs +} + +func (s *Scanner) SetDataDirs(_ ...string) { + // handled by rego later - nothing to do for now... +} + +func (s *Scanner) SetPolicyNamespaces(_ ...string) { + // handled by rego later - nothing to do for now... +} + +func (s *Scanner) SetPolicyFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} + +func (s *Scanner) SetDataFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} + +func (s *Scanner) SetRegoErrorLimit(_ int) { + // handled by rego when option is passed on +} + +func NewScanner(opts ...options.ScannerOption) *Scanner { + s := &Scanner{ + options: opts, + } + for _, opt := range opts { + opt(s) + } + s.parser = parser.New(options.ParserWithSkipRequiredCheck(s.skipRequired)) + return s +} + +func (s *Scanner) ScanFS(ctx context.Context, fs fs.FS, path string) (scan.Results, error) { + + files, err := s.parser.ParseFS(ctx, fs, path) + if err != nil { + return nil, err + } + + if len(files) == 0 { + return nil, nil + } + + var inputs []rego.Input + for path, dfile := range files { + inputs = append(inputs, rego.Input{ + Path: path, + FS: fs, + Contents: dfile.ToRego(), + }) + } + + results, err := s.scanRego(ctx, fs, inputs...) + if err != nil { + return nil, err + } + return results, nil +} + +func (s *Scanner) ScanFile(ctx context.Context, fs fs.FS, path string) (scan.Results, error) { + dockerfile, err := s.parser.ParseFile(ctx, fs, path) + if err != nil { + return nil, err + } + s.debug.Log("Scanning %s...", path) + return s.scanRego(ctx, fs, rego.Input{ + Path: path, + Contents: dockerfile.ToRego(), + }) +} + +func (s *Scanner) initRegoScanner(srcFS fs.FS) (*rego.Scanner, error) { + s.Lock() + defer s.Unlock() + if s.regoScanner != nil { + return s.regoScanner, nil + } + + regoScanner := rego.NewScanner(types.SourceDockerfile, s.options...) + regoScanner.SetParentDebugLogger(s.debug) + if err := regoScanner.LoadPolicies(s.loadEmbeddedLibraries, s.loadEmbeddedPolicies, srcFS, s.policyDirs, s.policyReaders); err != nil { + return nil, err + } + s.regoScanner = regoScanner + return regoScanner, nil +} + +func (s *Scanner) scanRego(ctx context.Context, srcFS fs.FS, inputs ...rego.Input) (scan.Results, error) { + regoScanner, err := s.initRegoScanner(srcFS) + if err != nil { + return nil, err + } + results, err := regoScanner.ScanInput(ctx, inputs...) + if err != nil { + return nil, err + } + results.SetSourceAndFilesystem("", srcFS, false) + return results, nil +} diff --git a/pkg/scanners/dockerfile/scanner_test.go b/pkg/scanners/dockerfile/scanner_test.go new file mode 100644 index 000000000000..1157ba81bc52 --- /dev/null +++ b/pkg/scanners/dockerfile/scanner_test.go @@ -0,0 +1,638 @@ +package dockerfile + +import ( + "bytes" + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/rego/schemas" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/test/testutil" +) + +const DS006PolicyWithDockerfileSchema = `# METADATA +# title: "COPY '--from' referring to the current image" +# description: "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself." +# scope: package +# schemas: +# - input: schema["dockerfile"] +# related_resources: +# - https://docs.docker.com/develop/develop-images/multistage-build/ +# custom: +# id: DS006 +# avd_id: AVD-DS-0006 +# severity: CRITICAL +# short_code: no-self-referencing-copy-from +# recommended_action: "Change the '--from' so that it will not refer to itself" +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS006 + +import data.lib.docker + +get_alias_from_copy[output] { + copies := docker.stage_copies[stage] + + copy := copies[_] + flag := copy.Flags[_] + contains(flag, "--from=") + parts := split(flag, "=") + + is_alias_current_from_alias(stage.Name, parts[1]) + args := parts[1] + output := { + "args": args, + "cmd": copy, + } +} + +is_alias_current_from_alias(current_name, current_alias) = allow { + current_name_lower := lower(current_name) + current_alias_lower := lower(current_alias) + + #expecting stage name as "myimage:tag as dep" + [_, alias] := regex.split(` + "`\\s+as\\s+`" + `, current_name_lower) + + alias == current_alias + + allow = true +} + +deny[res] { + output := get_alias_from_copy[_] + msg := sprintf("'COPY --from' should not mention current alias '%s' since it is impossible to copy from itself", [output.args]) + res := result.new(msg, output.cmd) +} +` + +const DS006PolicyWithMyFancyDockerfileSchema = `# METADATA +# title: "COPY '--from' referring to the current image" +# description: "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself." +# scope: package +# schemas: +# - input: schema["myfancydockerfile"] +# related_resources: +# - https://docs.docker.com/develop/develop-images/multistage-build/ +# custom: +# id: DS006 +# avd_id: AVD-DS-0006 +# severity: CRITICAL +# short_code: no-self-referencing-copy-from +# recommended_action: "Change the '--from' so that it will not refer to itself" +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS006 + +import data.lib.docker + +get_alias_from_copy[output] { +copies := docker.stage_copies[stage] + +copy := copies[_] +flag := copy.Flags[_] +contains(flag, "--from=") +parts := split(flag, "=") + +is_alias_current_from_alias(stage.Name, parts[1]) +args := parts[1] +output := { +"args": args, +"cmd": copy, +} +} + +is_alias_current_from_alias(current_name, current_alias) = allow { +current_name_lower := lower(current_name) +current_alias_lower := lower(current_alias) + +#expecting stage name as "myimage:tag as dep" +[_, alias] := regex.split(` + "`\\s+as\\s+`" + `, current_name_lower) + +alias == current_alias + +allow = true +} + +deny[res] { +output := get_alias_from_copy[_] +msg := sprintf("'COPY --from' should not mention current alias '%s' since it is impossible to copy from itself", [output.args]) +res := result.new(msg, output.cmd) +} +` + +const DS006PolicyWithOldSchemaSelector = `# METADATA +# title: "COPY '--from' referring to the current image" +# description: "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself." +# scope: package +# schemas: +# - input: schema["input"] +# related_resources: +# - https://docs.docker.com/develop/develop-images/multistage-build/ +# custom: +# id: DS006 +# avd_id: AVD-DS-0006 +# severity: CRITICAL +# short_code: no-self-referencing-copy-from +# recommended_action: "Change the '--from' so that it will not refer to itself" +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS006 + +import data.lib.docker + +get_alias_from_copy[output] { + copies := docker.stage_copies[stage] + + copy := copies[_] + flag := copy.Flags[_] + contains(flag, "--from=") + parts := split(flag, "=") + + is_alias_current_from_alias(stage.Name, parts[1]) + args := parts[1] + output := { + "args": args, + "cmd": copy, + } +} + +is_alias_current_from_alias(current_name, current_alias) = allow { + current_name_lower := lower(current_name) + current_alias_lower := lower(current_alias) + + #expecting stage name as "myimage:tag as dep" + [_, alias] := regex.split(` + "`\\s+as\\s+`" + `, current_name_lower) + + alias == current_alias + + allow = true +} + +deny[res] { + output := get_alias_from_copy[_] + msg := sprintf("'COPY --from' should not mention current alias '%s' since it is impossible to copy from itself", [output.args]) + res := result.new(msg, output.cmd) +} +` +const DS006LegacyWithOldStyleMetadata = `package builtin.dockerfile.DS006 + +__rego_metadata__ := { + "id": "DS006", + "avd_id": "AVD-DS-0006", + "title": "COPY '--from' referring to the current image", + "short_code": "no-self-referencing-copy-from", + "version": "v1.0.0", + "severity": "CRITICAL", + "type": "Dockerfile Security Check", + "description": "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself.", + "recommended_actions": "Change the '--from' so that it will not refer to itself", + "url": "https://docs.docker.com/develop/develop-images/multistage-build/", +} + +__rego_input__ := { + "combine": false, + "selector": [{"type": "dockerfile"}], +} + +deny[res] { + res := { + "msg": "oh no", + "filepath": "code/Dockerfile", + "startline": 1, + "endline": 1, + } +}` + +func Test_BasicScanLegacyRegoMetadata(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "/code/Dockerfile": `FROM ubuntu +USER root +`, + "/trules/rule.rego": DS006LegacyWithOldStyleMetadata, + }) + + scanner := NewScanner(options.ScannerWithPolicyDirs("trules")) + + results, err := scanner.ScanFS(context.TODO(), fs, "code") + require.NoError(t, err) + + require.Len(t, results.GetFailed(), 1) + + failure := results.GetFailed()[0] + metadata := failure.Metadata() + assert.Equal(t, 1, metadata.Range().GetStartLine()) + assert.Equal(t, 1, metadata.Range().GetEndLine()) + assert.Equal(t, "code/Dockerfile", metadata.Range().GetFilename()) + + assert.Equal( + t, + scan.Rule{ + AVDID: "AVD-DS-0006", + Aliases: []string{"DS006"}, + ShortCode: "no-self-referencing-copy-from", + Summary: "COPY '--from' referring to the current image", + Explanation: "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself.", + Impact: "", + Resolution: "Change the '--from' so that it will not refer to itself", + Provider: "dockerfile", + Service: "general", + Links: []string{"https://docs.docker.com/develop/develop-images/multistage-build/"}, + Severity: "CRITICAL", + Terraform: &scan.EngineMetadata{}, + CloudFormation: &scan.EngineMetadata{}, + CustomChecks: scan.CustomChecks{ + Terraform: (*scan.TerraformCustomCheck)(nil)}, + RegoPackage: "data.builtin.dockerfile.DS006", + Frameworks: map[framework.Framework][]string{}, + }, + results.GetFailed()[0].Rule(), + ) + + actualCode, err := results.GetFailed()[0].GetCode() + require.NoError(t, err) + for i := range actualCode.Lines { + actualCode.Lines[i].Highlighted = "" + } + assert.Equal(t, []scan.Line{ + { + Number: 1, + Content: "FROM ubuntu", + IsCause: true, + FirstCause: true, + LastCause: true, + Annotation: "", + }, + }, actualCode.Lines) +} + +func Test_BasicScanNewRegoMetadata(t *testing.T) { + var testCases = []struct { + name string + inputRegoPolicy string + expectedError string + expectedInputTraceLogs string + expectedOutputTraceLogs string + }{ + { + name: "old schema selector schema.input", + inputRegoPolicy: DS006PolicyWithOldSchemaSelector, + expectedInputTraceLogs: `REGO INPUT: +{ + "path": "code/Dockerfile", + "contents": { + "Stages": [ + { + "Commands": [ + { + "Cmd": "from", + "EndLine": 1, + "Flags": [], + "JSON": false, + "Original": "FROM golang:1.7.3 as dep", + "Path": "code/Dockerfile", + "Stage": 0, + "StartLine": 1, + "SubCmd": "", + "Value": [ + "golang:1.7.3", + "as", + "dep" + ] + }, + { + "Cmd": "copy", + "EndLine": 2, + "Flags": [ + "--from=dep" + ], + "JSON": false, + "Original": "COPY --from=dep /binary /", + "Path": "code/Dockerfile", + "Stage": 0, + "StartLine": 2, + "SubCmd": "", + "Value": [ + "/binary", + "/" + ] + } + ], + "Name": "golang:1.7.3 as dep" + } + ] + } +} +END REGO INPUT +`, + expectedOutputTraceLogs: `REGO RESULTSET: +[ + { + "expressions": [ + { + "value": [ + { + "endline": 2, + "explicit": false, + "filepath": "code/Dockerfile", + "fskey": "", + "managed": true, + "msg": "'COPY --from' should not mention current alias 'dep' since it is impossible to copy from itself", + "parent": null, + "resource": "", + "sourceprefix": "", + "startline": 2 + } + ], + "text": "data.builtin.dockerfile.DS006.deny", + "location": { + "row": 1, + "col": 1 + } + } + ] + } +] +END REGO RESULTSET + +`, + }, + { + name: "new schema selector schema.dockerfile", + inputRegoPolicy: DS006PolicyWithDockerfileSchema, + expectedInputTraceLogs: `REGO INPUT: +{ + "path": "code/Dockerfile", + "contents": { + "Stages": [ + { + "Commands": [ + { + "Cmd": "from", + "EndLine": 1, + "Flags": [], + "JSON": false, + "Original": "FROM golang:1.7.3 as dep", + "Path": "code/Dockerfile", + "Stage": 0, + "StartLine": 1, + "SubCmd": "", + "Value": [ + "golang:1.7.3", + "as", + "dep" + ] + }, + { + "Cmd": "copy", + "EndLine": 2, + "Flags": [ + "--from=dep" + ], + "JSON": false, + "Original": "COPY --from=dep /binary /", + "Path": "code/Dockerfile", + "Stage": 0, + "StartLine": 2, + "SubCmd": "", + "Value": [ + "/binary", + "/" + ] + } + ], + "Name": "golang:1.7.3 as dep" + } + ] + } +} +END REGO INPUT +`, + expectedOutputTraceLogs: `REGO RESULTSET: +[ + { + "expressions": [ + { + "value": [ + { + "endline": 2, + "explicit": false, + "filepath": "code/Dockerfile", + "fskey": "", + "managed": true, + "msg": "'COPY --from' should not mention current alias 'dep' since it is impossible to copy from itself", + "parent": null, + "resource": "", + "sourceprefix": "", + "startline": 2 + } + ], + "text": "data.builtin.dockerfile.DS006.deny", + "location": { + "row": 1, + "col": 1 + } + } + ] + } +] +END REGO RESULTSET + +`, + }, + { + name: "new schema selector with custom schema.myfancydockerfile", + inputRegoPolicy: DS006PolicyWithMyFancyDockerfileSchema, + expectedInputTraceLogs: `REGO INPUT: +{ + "path": "code/Dockerfile", + "contents": { + "Stages": [ + { + "Commands": [ + { + "Cmd": "from", + "EndLine": 1, + "Flags": [], + "JSON": false, + "Original": "FROM golang:1.7.3 as dep", + "Path": "code/Dockerfile", + "Stage": 0, + "StartLine": 1, + "SubCmd": "", + "Value": [ + "golang:1.7.3", + "as", + "dep" + ] + }, + { + "Cmd": "copy", + "EndLine": 2, + "Flags": [ + "--from=dep" + ], + "JSON": false, + "Original": "COPY --from=dep /binary /", + "Path": "code/Dockerfile", + "Stage": 0, + "StartLine": 2, + "SubCmd": "", + "Value": [ + "/binary", + "/" + ] + } + ], + "Name": "golang:1.7.3 as dep" + } + ] + } +} +END REGO INPUT +`, + expectedOutputTraceLogs: `REGO RESULTSET: +[ + { + "expressions": [ + { + "value": [ + { + "endline": 2, + "explicit": false, + "filepath": "code/Dockerfile", + "fskey": "", + "managed": true, + "msg": "'COPY --from' should not mention current alias 'dep' since it is impossible to copy from itself", + "parent": null, + "resource": "", + "sourceprefix": "", + "startline": 2 + } + ], + "text": "data.builtin.dockerfile.DS006.deny", + "location": { + "row": 1, + "col": 1 + } + } + ] + } +] +END REGO RESULTSET + +`, + }, + { + name: "new schema selector but invalid", + inputRegoPolicy: `# METADATA +# title: "COPY '--from' referring to the current image" +# description: "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself." +# scope: package +# schemas: +# - input: schema["spooky-schema"] +# custom: +# input: +# selector: +# - type: dockerfile +package builtin.dockerfile.DS006 +deny[res]{ +res := true +}`, + expectedError: `1 error occurred: trules/rule.rego:12: rego_type_error: undefined schema: schema["spooky-schema"]`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + regoMap := make(map[string]string) + libs, err := rego.LoadEmbeddedLibraries() + require.NoError(t, err) + for name, library := range libs { + regoMap["/trules/"+name] = library.String() + } + regoMap["/code/Dockerfile"] = `FROM golang:1.7.3 as dep +COPY --from=dep /binary /` + regoMap["/trules/rule.rego"] = tc.inputRegoPolicy + regoMap["/trules/schemas/myfancydockerfile.json"] = string(schemas.Dockerfile) // just use the same for testing + fs := testutil.CreateFS(t, regoMap) + + var traceBuf bytes.Buffer + var debugBuf bytes.Buffer + + scanner := NewScanner( + options.ScannerWithPolicyDirs("trules"), + options.ScannerWithTrace(&traceBuf), + options.ScannerWithDebug(&debugBuf), + options.ScannerWithRegoErrorLimits(0), + ) + + results, err := scanner.ScanFS(context.TODO(), fs, "code") + if tc.expectedError != "" && err != nil { + require.Equal(t, tc.expectedError, err.Error(), tc.name) + } else { + require.NoError(t, err) + require.Len(t, results.GetFailed(), 1) + + failure := results.GetFailed()[0] + metadata := failure.Metadata() + assert.Equal(t, 2, metadata.Range().GetStartLine()) + assert.Equal(t, 2, metadata.Range().GetEndLine()) + assert.Equal(t, "code/Dockerfile", metadata.Range().GetFilename()) + + assert.Equal( + t, + scan.Rule{ + AVDID: "AVD-DS-0006", + Aliases: []string{"DS006"}, + ShortCode: "no-self-referencing-copy-from", + Summary: "COPY '--from' referring to the current image", + Explanation: "COPY '--from' should not mention the current FROM alias, since it is impossible to copy from itself.", + Impact: "", + Resolution: "Change the '--from' so that it will not refer to itself", + Provider: "dockerfile", + Service: "general", + Links: []string{"https://docs.docker.com/develop/develop-images/multistage-build/"}, + Severity: "CRITICAL", + Terraform: &scan.EngineMetadata{}, + CloudFormation: &scan.EngineMetadata{}, + CustomChecks: scan.CustomChecks{ + Terraform: (*scan.TerraformCustomCheck)(nil)}, + RegoPackage: "data.builtin.dockerfile.DS006", + Frameworks: map[framework.Framework][]string{}, + }, + results.GetFailed()[0].Rule(), + ) + + actualCode, err := results.GetFailed()[0].GetCode() + require.NoError(t, err) + for i := range actualCode.Lines { + actualCode.Lines[i].Highlighted = "" + } + assert.Equal(t, []scan.Line{ + { + Number: 2, + Content: "COPY --from=dep /binary /", + IsCause: true, + FirstCause: true, + LastCause: true, + Annotation: "", + }, + }, actualCode.Lines) + + // assert logs + assert.Contains(t, traceBuf.String(), tc.expectedInputTraceLogs, traceBuf.String()) + assert.Contains(t, traceBuf.String(), tc.expectedOutputTraceLogs, traceBuf.String()) + } + }) + } + +} diff --git a/pkg/scanners/helm/options.go b/pkg/scanners/helm/options.go new file mode 100644 index 000000000000..f80d42773026 --- /dev/null +++ b/pkg/scanners/helm/options.go @@ -0,0 +1,51 @@ +package helm + +import ( + "github.com/aquasecurity/trivy/pkg/scanners/helm/parser" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +type ConfigurableHelmScanner interface { + options.ConfigurableScanner + AddParserOptions(options ...options.ParserOption) +} + +func ScannerWithValuesFile(paths ...string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if helmScanner, ok := s.(ConfigurableHelmScanner); ok { + helmScanner.AddParserOptions(parser.OptionWithValuesFile(paths...)) + } + } +} + +func ScannerWithValues(values ...string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if helmScanner, ok := s.(ConfigurableHelmScanner); ok { + helmScanner.AddParserOptions(parser.OptionWithValues(values...)) + } + } +} + +func ScannerWithFileValues(values ...string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if helmScanner, ok := s.(ConfigurableHelmScanner); ok { + helmScanner.AddParserOptions(parser.OptionWithFileValues(values...)) + } + } +} + +func ScannerWithStringValues(values ...string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if helmScanner, ok := s.(ConfigurableHelmScanner); ok { + helmScanner.AddParserOptions(parser.OptionWithStringValues(values...)) + } + } +} + +func ScannerWithAPIVersions(values ...string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if helmScanner, ok := s.(ConfigurableHelmScanner); ok { + helmScanner.AddParserOptions(parser.OptionWithAPIVersions(values...)) + } + } +} diff --git a/pkg/scanners/helm/parser/option.go b/pkg/scanners/helm/parser/option.go new file mode 100644 index 000000000000..e7791e2449e5 --- /dev/null +++ b/pkg/scanners/helm/parser/option.go @@ -0,0 +1,52 @@ +package parser + +import "github.com/aquasecurity/trivy/pkg/scanners/options" + +type ConfigurableHelmParser interface { + options.ConfigurableParser + SetValuesFile(...string) + SetValues(...string) + SetFileValues(...string) + SetStringValues(...string) + SetAPIVersions(...string) +} + +func OptionWithValuesFile(paths ...string) options.ParserOption { + return func(p options.ConfigurableParser) { + if helmParser, ok := p.(ConfigurableHelmParser); ok { + helmParser.SetValuesFile(paths...) + } + } +} + +func OptionWithValues(values ...string) options.ParserOption { + return func(p options.ConfigurableParser) { + if helmParser, ok := p.(ConfigurableHelmParser); ok { + helmParser.SetValues(values...) + } + } +} + +func OptionWithFileValues(values ...string) options.ParserOption { + return func(p options.ConfigurableParser) { + if helmParser, ok := p.(ConfigurableHelmParser); ok { + helmParser.SetValues(values...) + } + } +} + +func OptionWithStringValues(values ...string) options.ParserOption { + return func(p options.ConfigurableParser) { + if helmParser, ok := p.(ConfigurableHelmParser); ok { + helmParser.SetValues(values...) + } + } +} + +func OptionWithAPIVersions(values ...string) options.ParserOption { + return func(p options.ConfigurableParser) { + if helmParser, ok := p.(ConfigurableHelmParser); ok { + helmParser.SetAPIVersions(values...) + } + } +} diff --git a/pkg/scanners/helm/parser/parser.go b/pkg/scanners/helm/parser/parser.go new file mode 100644 index 000000000000..a7526d77c494 --- /dev/null +++ b/pkg/scanners/helm/parser/parser.go @@ -0,0 +1,322 @@ +package parser + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + "gopkg.in/yaml.v3" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/google/uuid" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/releaseutil" + + "github.com/aquasecurity/trivy/pkg/detection" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +var manifestNameRegex = regexp.MustCompile("# Source: [^/]+/(.+)") + +type Parser struct { + helmClient *action.Install + rootPath string + ChartSource string + filepaths []string + debug debug.Logger + skipRequired bool + workingFS fs.FS + valuesFiles []string + values []string + fileValues []string + stringValues []string + apiVersions []string +} + +type ChartFile struct { + TemplateFilePath string + ManifestContent string +} + +func (p *Parser) SetDebugWriter(writer io.Writer) { + p.debug = debug.New(writer, "helm", "parser") +} + +func (p *Parser) SetSkipRequiredCheck(b bool) { + p.skipRequired = b +} + +func (p *Parser) SetValuesFile(s ...string) { + p.valuesFiles = s +} + +func (p *Parser) SetValues(values ...string) { + p.values = values +} + +func (p *Parser) SetFileValues(values ...string) { + p.fileValues = values +} + +func (p *Parser) SetStringValues(values ...string) { + p.stringValues = values +} + +func (p *Parser) SetAPIVersions(values ...string) { + p.apiVersions = values +} + +func New(path string, options ...options.ParserOption) *Parser { + + client := action.NewInstall(&action.Configuration{}) + client.DryRun = true // don't do anything + client.Replace = true // skip name check + client.ClientOnly = true // don't try to talk to a cluster + + p := &Parser{ + helmClient: client, + ChartSource: path, + } + + for _, option := range options { + option(p) + } + + if p.apiVersions != nil { + p.helmClient.APIVersions = p.apiVersions + } + + return p +} + +func (p *Parser) ParseFS(ctx context.Context, target fs.FS, path string) error { + p.workingFS = target + + if err := fs.WalkDir(p.workingFS, filepath.ToSlash(path), func(path string, entry fs.DirEntry, err error) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err != nil { + return err + } + if entry.IsDir() { + return nil + } + + if !p.required(path, p.workingFS) { + return nil + } + + if detection.IsArchive(path) { + tarFS, err := p.addTarToFS(path) + if errors.Is(err, errSkipFS) { + // an unpacked Chart already exists + return nil + } else if err != nil { + return fmt.Errorf("failed to add tar %q to FS: %w", path, err) + } + + targetPath := filepath.Dir(path) + if targetPath == "" { + targetPath = "." + } + + if err := p.ParseFS(ctx, tarFS, targetPath); err != nil { + return fmt.Errorf("parse tar FS error: %w", err) + } + return nil + } else { + return p.addPaths(path) + } + }); err != nil { + return fmt.Errorf("walk dir error: %w", err) + } + + return nil +} + +func (p *Parser) addPaths(paths ...string) error { + for _, path := range paths { + if _, err := fs.Stat(p.workingFS, path); err != nil { + return err + } + + if strings.HasSuffix(path, "Chart.yaml") && p.rootPath == "" { + if err := p.extractChartName(path); err != nil { + return err + } + p.rootPath = filepath.Dir(path) + } + p.filepaths = append(p.filepaths, path) + } + return nil +} + +func (p *Parser) extractChartName(chartPath string) error { + + chart, err := p.workingFS.Open(chartPath) + if err != nil { + return err + } + defer func() { _ = chart.Close() }() + + var chartContent map[string]interface{} + if err := yaml.NewDecoder(chart).Decode(&chartContent); err != nil { + // the chart likely has the name templated and so cannot be parsed as yaml - use a temporary name + if dir := filepath.Dir(chartPath); dir != "" && dir != "." { + p.helmClient.ReleaseName = dir + } else { + p.helmClient.ReleaseName = uuid.NewString() + } + return nil + } + + if name, ok := chartContent["name"]; !ok { + return fmt.Errorf("could not extract the chart name from %s", chartPath) + } else { + p.helmClient.ReleaseName = fmt.Sprintf("%v", name) + } + return nil +} + +func (p *Parser) RenderedChartFiles() ([]ChartFile, error) { + + tempDir, err := os.MkdirTemp(os.TempDir(), "defsec") + if err != nil { + return nil, err + } + + if err := p.writeBuildFiles(tempDir); err != nil { + return nil, err + } + + workingChart, err := loadChart(tempDir) + if err != nil { + return nil, err + } + + workingRelease, err := p.getRelease(workingChart) + if err != nil { + return nil, err + } + + var manifests bytes.Buffer + _, _ = fmt.Fprintln(&manifests, strings.TrimSpace(workingRelease.Manifest)) + + splitManifests := releaseutil.SplitManifests(manifests.String()) + manifestsKeys := make([]string, 0, len(splitManifests)) + for k := range splitManifests { + manifestsKeys = append(manifestsKeys, k) + } + return p.getRenderedManifests(manifestsKeys, splitManifests), nil +} + +func (p *Parser) getRelease(chart *chart.Chart) (*release.Release, error) { + opts := &ValueOptions{ + ValueFiles: p.valuesFiles, + Values: p.values, + FileValues: p.fileValues, + StringValues: p.stringValues, + } + + vals, err := opts.MergeValues() + if err != nil { + return nil, err + } + r, err := p.helmClient.RunWithContext(context.Background(), chart, vals) + if err != nil { + return nil, err + } + + if r == nil { + return nil, fmt.Errorf("there is nothing in the release") + } + return r, nil +} + +func loadChart(tempFs string) (*chart.Chart, error) { + loadedChart, err := loader.Load(tempFs) + if err != nil { + return nil, err + } + + if req := loadedChart.Metadata.Dependencies; req != nil { + if err := action.CheckDependencies(loadedChart, req); err != nil { + return nil, err + } + } + + return loadedChart, nil +} + +func (*Parser) getRenderedManifests(manifestsKeys []string, splitManifests map[string]string) []ChartFile { + sort.Sort(releaseutil.BySplitManifestsOrder(manifestsKeys)) + var manifestsToRender []ChartFile + for _, manifestKey := range manifestsKeys { + manifest := splitManifests[manifestKey] + submatch := manifestNameRegex.FindStringSubmatch(manifest) + if len(submatch) == 0 { + continue + } + manifestsToRender = append(manifestsToRender, ChartFile{ + TemplateFilePath: getManifestPath(manifest), + ManifestContent: manifest, + }) + } + return manifestsToRender +} + +func getManifestPath(manifest string) string { + lines := strings.Split(manifest, "\n") + if len(lines) == 0 { + return "unknown.yaml" + } + manifestFilePathParts := strings.SplitN(strings.TrimPrefix(lines[0], "# Source: "), "/", 2) + if len(manifestFilePathParts) > 1 { + return manifestFilePathParts[1] + } + return manifestFilePathParts[0] +} + +func (p *Parser) writeBuildFiles(tempFs string) error { + for _, path := range p.filepaths { + content, err := fs.ReadFile(p.workingFS, path) + if err != nil { + return err + } + workingPath := strings.TrimPrefix(path, p.rootPath) + workingPath = filepath.Join(tempFs, workingPath) + if err := os.MkdirAll(filepath.Dir(workingPath), os.ModePerm); err != nil { + return err + } + if err := os.WriteFile(workingPath, content, os.ModePerm); err != nil { + return err + } + } + return nil +} + +func (p *Parser) required(path string, workingFS fs.FS) bool { + if p.skipRequired { + return true + } + content, err := fs.ReadFile(workingFS, path) + if err != nil { + return false + } + + return detection.IsType(path, bytes.NewReader(content), detection.FileTypeHelm) +} diff --git a/pkg/scanners/helm/parser/parser_tar.go b/pkg/scanners/helm/parser/parser_tar.go new file mode 100644 index 000000000000..0c77408ea7d6 --- /dev/null +++ b/pkg/scanners/helm/parser/parser_tar.go @@ -0,0 +1,110 @@ +package parser + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + + "github.com/aquasecurity/trivy/pkg/detection" + "github.com/liamg/memoryfs" +) + +var errSkipFS = errors.New("skip parse FS") + +func (p *Parser) addTarToFS(path string) (fs.FS, error) { + tarFS := memoryfs.CloneFS(p.workingFS) + + file, err := tarFS.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open tar: %w", err) + } + defer file.Close() + + var tr *tar.Reader + + if detection.IsZip(path) { + zipped, err := gzip.NewReader(file) + if err != nil { + return nil, fmt.Errorf("failed to create gzip reader: %w", err) + } + defer zipped.Close() + tr = tar.NewReader(zipped) + } else { + tr = tar.NewReader(file) + } + + checkExistedChart := true + + for { + header, err := tr.Next() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, fmt.Errorf("failed to get next entry: %w", err) + } + + if checkExistedChart { + // Do not add archive files to FS if the chart already exists + // This can happen when the source chart is located next to an archived chart (with the `helm package` command) + // The first level folder in the archive is equal to the Chart name + if _, err := tarFS.Stat(filepath.Dir(path) + "/" + filepath.Dir(header.Name)); err == nil { + return nil, errSkipFS + } + checkExistedChart = false + } + + // get the individual path and extract to the current directory + entryPath := header.Name + + switch header.Typeflag { + case tar.TypeDir: + if err := tarFS.MkdirAll(entryPath, os.FileMode(header.Mode)); err != nil && !errors.Is(err, fs.ErrExist) { + return nil, err + } + case tar.TypeReg: + writePath := filepath.Dir(path) + "/" + entryPath + p.debug.Log("Unpacking tar entry %s", writePath) + + _ = tarFS.MkdirAll(filepath.Dir(writePath), fs.ModePerm) + + buf, err := copyChunked(tr, 1024) + if err != nil { + return nil, err + } + + p.debug.Log("writing file contents to %s", writePath) + if err := tarFS.WriteFile(writePath, buf.Bytes(), fs.ModePerm); err != nil { + return nil, fmt.Errorf("write file error: %w", err) + } + default: + return nil, fmt.Errorf("header type %q is not supported", header.Typeflag) + } + } + + if err := tarFS.Remove(path); err != nil { + return nil, fmt.Errorf("failed to remove tar from FS: %w", err) + } + + return tarFS, nil +} + +func copyChunked(src io.Reader, chunkSize int64) (*bytes.Buffer, error) { + buf := new(bytes.Buffer) + for { + if _, err := io.CopyN(buf, src, chunkSize); err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, fmt.Errorf("failed to copy: %w", err) + } + } + + return buf, nil +} diff --git a/pkg/scanners/helm/parser/parser_test.go b/pkg/scanners/helm/parser/parser_test.go new file mode 100644 index 000000000000..c146b8f9e18f --- /dev/null +++ b/pkg/scanners/helm/parser/parser_test.go @@ -0,0 +1,24 @@ +package parser + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseFS(t *testing.T) { + t.Run("source chart is located next to an same archived chart", func(t *testing.T) { + p := New(".") + require.NoError(t, p.ParseFS(context.TODO(), os.DirFS(filepath.Join("testdata", "chart-and-archived-chart")), ".")) + + expectedFiles := []string{ + "my-chart/Chart.yaml", + "my-chart/templates/pod.yaml", + } + assert.Equal(t, expectedFiles, p.filepaths) + }) +} diff --git a/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart-0.1.0.tgz b/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart-0.1.0.tgz new file mode 100644 index 000000000000..e36b2b474f3e Binary files /dev/null and b/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart-0.1.0.tgz differ diff --git a/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart/Chart.yaml b/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart/Chart.yaml new file mode 100644 index 000000000000..767f748a8d59 --- /dev/null +++ b/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: my-chart +description: A Helm chart for Kubernetes +type: application +version: 0.1.0 +appVersion: "1.16.0" diff --git a/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart/templates/pod.yaml b/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart/templates/pod.yaml new file mode 100644 index 000000000000..3649247c1bb1 --- /dev/null +++ b/pkg/scanners/helm/parser/testdata/chart-and-archived-chart/my-chart/templates/pod.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 \ No newline at end of file diff --git a/pkg/scanners/helm/parser/vals.go b/pkg/scanners/helm/parser/vals.go new file mode 100644 index 000000000000..300dad819730 --- /dev/null +++ b/pkg/scanners/helm/parser/vals.go @@ -0,0 +1,114 @@ +package parser + +import ( + "fmt" + "io" + "net/url" + "os" + "strings" + + "gopkg.in/yaml.v3" + "helm.sh/helm/v3/pkg/getter" + "helm.sh/helm/v3/pkg/strvals" +) + +type ValueOptions struct { + ValueFiles []string + StringValues []string + Values []string + FileValues []string +} + +// MergeValues merges values from files specified via -f/--values and directly +// via --set, --set-string, or --set-file, marshaling them to YAML +func (opts *ValueOptions) MergeValues() (map[string]interface{}, error) { + base := map[string]interface{}{} + + // User specified a values files via -f/--values + for _, filePath := range opts.ValueFiles { + currentMap := map[string]interface{}{} + + bytes, err := readFile(filePath) + if err != nil { + return nil, err + } + + if err := yaml.Unmarshal(bytes, ¤tMap); err != nil { + return nil, fmt.Errorf("failed to parse %s: %w", filePath, err) + } + // Merge with the previous map + base = mergeMaps(base, currentMap) + } + + // User specified a value via --set + for _, value := range opts.Values { + if err := strvals.ParseInto(value, base); err != nil { + return nil, fmt.Errorf("failed parsing --set data, %w", err) + } + } + + // User specified a value via --set-string + for _, value := range opts.StringValues { + if err := strvals.ParseIntoString(value, base); err != nil { + return nil, fmt.Errorf("failed parsing --set-string data %w", err) + } + } + + // User specified a value via --set-file + for _, value := range opts.FileValues { + reader := func(rs []rune) (interface{}, error) { + bytes, err := readFile(string(rs)) + if err != nil { + return nil, err + } + return string(bytes), err + } + if err := strvals.ParseIntoFile(value, base, reader); err != nil { + return nil, fmt.Errorf("failed parsing --set-file data: %w", err) + } + } + + return base, nil +} + +func mergeMaps(a, b map[string]interface{}) map[string]interface{} { + out := make(map[string]interface{}, len(a)) + for k, v := range a { + out[k] = v + } + for k, v := range b { + if v, ok := v.(map[string]interface{}); ok { + if bv, ok := out[k]; ok { + if bv, ok := bv.(map[string]interface{}); ok { + out[k] = mergeMaps(bv, v) + continue + } + } + } + out[k] = v + } + return out +} + +// readFile load a file from stdin, the local directory, or a remote file with a url. +func readFile(filePath string) ([]byte, error) { + if strings.TrimSpace(filePath) == "-" { + return io.ReadAll(os.Stdin) + } + u, _ := url.Parse(filePath) + + // FIXME: maybe someone handle other protocols like ftp. + if u.Scheme == "http" || u.Scheme == "https" { + g, err := getter.NewHTTPGetter() + if err != nil { + return nil, err + } + data, err := g.Get(filePath, getter.WithURL(filePath)) + if err != nil { + return nil, err + } + return data.Bytes(), err + } else { + return os.ReadFile(filePath) + } +} diff --git a/pkg/scanners/helm/scanner.go b/pkg/scanners/helm/scanner.go new file mode 100644 index 000000000000..1cbb7b8cdaf2 --- /dev/null +++ b/pkg/scanners/helm/scanner.go @@ -0,0 +1,221 @@ +package helm + +import ( + "context" + "fmt" + "io" + "io/fs" + "path/filepath" + "strings" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/memoryfs" + + "github.com/aquasecurity/trivy/pkg/detection" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scanners" + "github.com/aquasecurity/trivy/pkg/scanners/helm/parser" + kparser "github.com/aquasecurity/trivy/pkg/scanners/kubernetes/parser" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +var _ scanners.FSScanner = (*Scanner)(nil) +var _ options.ConfigurableScanner = (*Scanner)(nil) + +type Scanner struct { + policyDirs []string + dataDirs []string + debug debug.Logger + options []options.ScannerOption + parserOptions []options.ParserOption + policyReaders []io.Reader + loadEmbeddedLibraries bool + loadEmbeddedPolicies bool + policyFS fs.FS + skipRequired bool + frameworks []framework.Framework + spec string +} + +func (s *Scanner) SetSpec(spec string) { + s.spec = spec +} + +func (s *Scanner) SetRegoOnly(bool) { +} + +func (s *Scanner) SetFrameworks(frameworks []framework.Framework) { + s.frameworks = frameworks +} + +// New creates a new Scanner +func New(options ...options.ScannerOption) *Scanner { + s := &Scanner{ + options: options, + } + + for _, option := range options { + option(s) + } + return s +} + +func (s *Scanner) AddParserOptions(options ...options.ParserOption) { + s.parserOptions = append(s.parserOptions, options...) +} + +func (s *Scanner) SetUseEmbeddedPolicies(b bool) { + s.loadEmbeddedPolicies = b +} + +func (s *Scanner) SetUseEmbeddedLibraries(b bool) { + s.loadEmbeddedLibraries = b +} + +func (s *Scanner) Name() string { + return "Helm" +} + +func (s *Scanner) SetPolicyReaders(readers []io.Reader) { + s.policyReaders = readers +} + +func (s *Scanner) SetSkipRequiredCheck(skip bool) { + s.skipRequired = skip +} + +func (s *Scanner) SetDebugWriter(writer io.Writer) { + s.debug = debug.New(writer, "helm", "scanner") +} + +func (s *Scanner) SetTraceWriter(_ io.Writer) { + // handled by rego later - nothing to do for now... +} + +func (s *Scanner) SetPerResultTracingEnabled(_ bool) { + // handled by rego later - nothing to do for now... +} + +func (s *Scanner) SetPolicyDirs(dirs ...string) { + s.policyDirs = dirs +} + +func (s *Scanner) SetDataDirs(dirs ...string) { + s.dataDirs = dirs +} + +func (s *Scanner) SetPolicyNamespaces(namespaces ...string) { + // handled by rego later - nothing to do for now... +} + +func (s *Scanner) SetPolicyFilesystem(policyFS fs.FS) { + s.policyFS = policyFS +} + +func (s *Scanner) SetDataFilesystem(_ fs.FS) {} +func (s *Scanner) SetRegoErrorLimit(_ int) {} + +func (s *Scanner) ScanFS(ctx context.Context, target fs.FS, path string) (scan.Results, error) { + + var results []scan.Result + if err := fs.WalkDir(target, path, func(path string, d fs.DirEntry, err error) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err != nil { + return err + } + + if d.IsDir() { + return nil + } + + if detection.IsArchive(path) { + if scanResults, err := s.getScanResults(path, ctx, target); err != nil { + return err + } else { + results = append(results, scanResults...) + } + } + + if strings.HasSuffix(path, "Chart.yaml") { + if scanResults, err := s.getScanResults(filepath.Dir(path), ctx, target); err != nil { + return err + } else { + results = append(results, scanResults...) + } + } + + return nil + }); err != nil { + return nil, err + } + + return results, nil + +} + +func (s *Scanner) getScanResults(path string, ctx context.Context, target fs.FS) (results []scan.Result, err error) { + helmParser := parser.New(path, s.parserOptions...) + + if err := helmParser.ParseFS(ctx, target, path); err != nil { + return nil, err + } + + chartFiles, err := helmParser.RenderedChartFiles() + if err != nil { // not valid helm, maybe some other yaml etc., abort + s.debug.Log("Failed to render Chart files: %s", err) + return nil, nil + } + + regoScanner := rego.NewScanner(types.SourceKubernetes, s.options...) + policyFS := target + if s.policyFS != nil { + policyFS = s.policyFS + } + if err := regoScanner.LoadPolicies(s.loadEmbeddedLibraries, s.loadEmbeddedPolicies, policyFS, s.policyDirs, s.policyReaders); err != nil { + return nil, fmt.Errorf("policies load: %w", err) + } + for _, file := range chartFiles { + file := file + s.debug.Log("Processing rendered chart file: %s", file.TemplateFilePath) + + manifests, err := kparser.New().Parse(strings.NewReader(file.ManifestContent), file.TemplateFilePath) + if err != nil { + return nil, fmt.Errorf("unmarshal yaml: %w", err) + } + for _, manifest := range manifests { + fileResults, err := regoScanner.ScanInput(ctx, rego.Input{ + Path: file.TemplateFilePath, + Contents: manifest, + FS: target, + }) + if err != nil { + return nil, fmt.Errorf("scanning error: %w", err) + } + + if len(fileResults) > 0 { + renderedFS := memoryfs.New() + if err := renderedFS.MkdirAll(filepath.Dir(file.TemplateFilePath), fs.ModePerm); err != nil { + return nil, err + } + if err := renderedFS.WriteLazyFile(file.TemplateFilePath, func() (io.Reader, error) { + return strings.NewReader(file.ManifestContent), nil + }, fs.ModePerm); err != nil { + return nil, err + } + fileResults.SetSourceAndFilesystem(helmParser.ChartSource, renderedFS, detection.IsArchive(helmParser.ChartSource)) + } + + results = append(results, fileResults...) + } + + } + return results, nil +} diff --git a/pkg/scanners/helm/test/mysql/.helmignore b/pkg/scanners/helm/test/mysql/.helmignore new file mode 100644 index 000000000000..f0c131944441 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/pkg/scanners/helm/test/mysql/Chart.lock b/pkg/scanners/helm/test/mysql/Chart.lock new file mode 100644 index 000000000000..2a6356005c25 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.11.1 +digest: sha256:a000bcd4d4cdd813c67d633b5523b4a4cd478fb95f1cae665d9b0ba5c45b40e2 +generated: "2022-02-16T22:19:57.971058445Z" diff --git a/pkg/scanners/helm/test/mysql/Chart.yaml b/pkg/scanners/helm/test/mysql/Chart.yaml new file mode 100644 index 000000000000..7d5f5c6ce834 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 8.0.28 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: MySQL is a fast, reliable, scalable, and easy to use open source relational + database system. Designed to handle mission-critical, heavy-load production applications. +home: https://github.com/bitnami/charts/tree/master/bitnami/mysql +icon: https://bitnami.com/assets/stacks/mysql/img/mysql-stack-220x234.png +keywords: +- mysql +- database +- sql +- cluster +- high availability +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: mysql +sources: +- https://github.com/bitnami/bitnami-docker-mysql +- https://mysql.com +version: 8.8.26 diff --git a/pkg/scanners/helm/test/mysql/README.md b/pkg/scanners/helm/test/mysql/README.md new file mode 100644 index 000000000000..b03fa495893f --- /dev/null +++ b/pkg/scanners/helm/test/mysql/README.md @@ -0,0 +1,491 @@ + + +# MySQL packaged by Bitnami + +MySQL is a fast, reliable, scalable, and easy to use open source relational database system. Designed to handle mission-critical, heavy-load production applications. + +[Overview of MySQL](http://www.mysql.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/mysql +``` + +## Introduction + +This chart bootstraps a [MySQL](https://github.com/bitnami/bitnami-docker-mysql) replication cluster deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/mysql +``` + +These commands deploy MySQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Cluster domain | `cluster.local` | +| `commonAnnotations` | Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `commonLabels` | Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `extraDeploy` | Array with extra yaml to deploy with the chart. Evaluated as a template | `[]` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + + +### MySQL common parameters + +| Name | Description | Value | +| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `image.registry` | MySQL image registry | `docker.io` | +| `image.repository` | MySQL image repository | `bitnami/mysql` | +| `image.tag` | MySQL image tag (immutable tags are recommended) | `8.0.28-debian-10-r0` | +| `image.pullPolicy` | MySQL image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `architecture` | MySQL architecture (`standalone` or `replication`) | `standalone` | +| `auth.rootPassword` | Password for the `root` user. Ignored if existing secret is provided | `""` | +| `auth.database` | Name for a custom database to create | `my_database` | +| `auth.username` | Name for a custom user to create | `""` | +| `auth.password` | Password for the new user. Ignored if existing secret is provided | `""` | +| `auth.replicationUser` | MySQL replication user | `replicator` | +| `auth.replicationPassword` | MySQL replication user password. Ignored if existing secret is provided | `""` | +| `auth.existingSecret` | Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password` | `""` | +| `auth.forcePassword` | Force users to specify required passwords | `false` | +| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` | +| `auth.customPasswordFiles` | Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` | `{}` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `""` | + + +### MySQL Primary parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | ------------------- | +| `primary.command` | Override default container command on MySQL Primary container(s) (useful when using custom images) | `[]` | +| `primary.args` | Override default container args on MySQL Primary container(s) (useful when using custom images) | `[]` | +| `primary.hostAliases` | Deployment pod host aliases | `[]` | +| `primary.configuration` | Configure MySQL Primary with a custom my.cnf file | `""` | +| `primary.existingConfigmap` | Name of existing ConfigMap with MySQL Primary configuration. | `""` | +| `primary.updateStrategy` | Update strategy type for the MySQL primary statefulset | `RollingUpdate` | +| `primary.rollingUpdatePartition` | Partition update strategy for MySQL Primary statefulset | `""` | +| `primary.podAnnotations` | Additional pod annotations for MySQL primary pods | `{}` | +| `primary.podAffinityPreset` | MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.podAntiAffinityPreset` | MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `primary.nodeAffinityPreset.type` | MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `primary.nodeAffinityPreset.key` | MySQL primary node label key to match Ignored if `primary.affinity` is set. | `""` | +| `primary.nodeAffinityPreset.values` | MySQL primary node label values to match. Ignored if `primary.affinity` is set. | `[]` | +| `primary.affinity` | Affinity for MySQL primary pods assignment | `{}` | +| `primary.nodeSelector` | Node labels for MySQL primary pods assignment | `{}` | +| `primary.tolerations` | Tolerations for MySQL primary pods assignment | `[]` | +| `primary.podSecurityContext.enabled` | Enable security context for MySQL primary pods | `true` | +| `primary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `primary.containerSecurityContext.enabled` | MySQL primary container securityContext | `true` | +| `primary.containerSecurityContext.runAsUser` | User ID for the MySQL primary container | `1001` | +| `primary.resources.limits` | The resources limits for MySQL primary containers | `{}` | +| `primary.resources.requests` | The requested resources for MySQL primary containers | `{}` | +| `primary.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `primary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `primary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `primary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `primary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `primary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `primary.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `primary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `primary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `primary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `primary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `primary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `primary.startupProbe.enabled` | Enable startupProbe | `true` | +| `primary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `15` | +| `primary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `primary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `primary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `10` | +| `primary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `primary.customLivenessProbe` | Override default liveness probe for MySQL primary containers | `{}` | +| `primary.customReadinessProbe` | Override default readiness probe for MySQL primary containers | `{}` | +| `primary.customStartupProbe` | Override default startup probe for MySQL primary containers | `{}` | +| `primary.extraFlags` | MySQL primary additional command line flags | `""` | +| `primary.extraEnvVars` | Extra environment variables to be set on MySQL primary containers | `[]` | +| `primary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MySQL primary containers | `""` | +| `primary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MySQL primary containers | `""` | +| `primary.persistence.enabled` | Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir | `true` | +| `primary.persistence.existingClaim` | Name of an existing `PersistentVolumeClaim` for MySQL primary replicas | `""` | +| `primary.persistence.storageClass` | MySQL primary persistent volume storage Class | `""` | +| `primary.persistence.annotations` | MySQL primary persistent volume claim annotations | `{}` | +| `primary.persistence.accessModes` | MySQL primary persistent volume access Modes | `["ReadWriteOnce"]` | +| `primary.persistence.size` | MySQL primary persistent volume size | `8Gi` | +| `primary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `primary.extraVolumes` | Optionally specify extra list of additional volumes to the MySQL Primary pod(s) | `[]` | +| `primary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s) | `[]` | +| `primary.initContainers` | Add additional init containers for the MySQL Primary pod(s) | `[]` | +| `primary.sidecars` | Add additional sidecar containers for the MySQL Primary pod(s) | `[]` | +| `primary.service.type` | MySQL Primary K8s service type | `ClusterIP` | +| `primary.service.port` | MySQL Primary K8s service port | `3306` | +| `primary.service.nodePort` | MySQL Primary K8s service node port | `""` | +| `primary.service.clusterIP` | MySQL Primary K8s service clusterIP IP | `""` | +| `primary.service.loadBalancerIP` | MySQL Primary loadBalancerIP if service type is `LoadBalancer` | `""` | +| `primary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `primary.service.loadBalancerSourceRanges` | Addresses that are allowed when MySQL Primary service is LoadBalancer | `[]` | +| `primary.service.annotations` | Provide any additional annotations which may be required | `{}` | +| `primary.pdb.enabled` | Enable/disable a Pod Disruption Budget creation for MySQL primary pods | `false` | +| `primary.pdb.minAvailable` | Minimum number/percentage of MySQL primary pods that should remain scheduled | `1` | +| `primary.pdb.maxUnavailable` | Maximum number/percentage of MySQL primary pods that may be made unavailable | `""` | +| `primary.podLabels` | MySQL Primary pod label. If labels are same as commonLabels , this will take precedence | `{}` | + + +### MySQL Secondary parameters + +| Name | Description | Value | +| ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `secondary.replicaCount` | Number of MySQL secondary replicas | `1` | +| `secondary.hostAliases` | Deployment pod host aliases | `[]` | +| `secondary.command` | Override default container command on MySQL Secondary container(s) (useful when using custom images) | `[]` | +| `secondary.args` | Override default container args on MySQL Secondary container(s) (useful when using custom images) | `[]` | +| `secondary.configuration` | Configure MySQL Secondary with a custom my.cnf file | `""` | +| `secondary.existingConfigmap` | Name of existing ConfigMap with MySQL Secondary configuration. | `""` | +| `secondary.updateStrategy` | Update strategy type for the MySQL secondary statefulset | `RollingUpdate` | +| `secondary.rollingUpdatePartition` | Partition update strategy for MySQL Secondary statefulset | `""` | +| `secondary.podAnnotations` | Additional pod annotations for MySQL secondary pods | `{}` | +| `secondary.podAffinityPreset` | MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.podAntiAffinityPreset` | MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `secondary.nodeAffinityPreset.type` | MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `secondary.nodeAffinityPreset.key` | MySQL secondary node label key to match Ignored if `secondary.affinity` is set. | `""` | +| `secondary.nodeAffinityPreset.values` | MySQL secondary node label values to match. Ignored if `secondary.affinity` is set. | `[]` | +| `secondary.affinity` | Affinity for MySQL secondary pods assignment | `{}` | +| `secondary.nodeSelector` | Node labels for MySQL secondary pods assignment | `{}` | +| `secondary.tolerations` | Tolerations for MySQL secondary pods assignment | `[]` | +| `secondary.podSecurityContext.enabled` | Enable security context for MySQL secondary pods | `true` | +| `secondary.podSecurityContext.fsGroup` | Group ID for the mounted volumes' filesystem | `1001` | +| `secondary.containerSecurityContext.enabled` | MySQL secondary container securityContext | `true` | +| `secondary.containerSecurityContext.runAsUser` | User ID for the MySQL secondary container | `1001` | +| `secondary.resources.limits` | The resources limits for MySQL secondary containers | `{}` | +| `secondary.resources.requests` | The requested resources for MySQL secondary containers | `{}` | +| `secondary.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `secondary.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `secondary.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `secondary.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `secondary.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `secondary.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `secondary.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `secondary.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `secondary.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `secondary.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `secondary.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `secondary.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `secondary.startupProbe.enabled` | Enable startupProbe | `true` | +| `secondary.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `15` | +| `secondary.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `secondary.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `secondary.startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `secondary.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `secondary.customLivenessProbe` | Override default liveness probe for MySQL secondary containers | `{}` | +| `secondary.customReadinessProbe` | Override default readiness probe for MySQL secondary containers | `{}` | +| `secondary.customStartupProbe` | Override default startup probe for MySQL secondary containers | `{}` | +| `secondary.extraFlags` | MySQL secondary additional command line flags | `""` | +| `secondary.extraEnvVars` | An array to add extra environment variables on MySQL secondary containers | `[]` | +| `secondary.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for MySQL secondary containers | `""` | +| `secondary.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for MySQL secondary containers | `""` | +| `secondary.persistence.enabled` | Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim` | `true` | +| `secondary.persistence.storageClass` | MySQL secondary persistent volume storage Class | `""` | +| `secondary.persistence.annotations` | MySQL secondary persistent volume claim annotations | `{}` | +| `secondary.persistence.accessModes` | MySQL secondary persistent volume access Modes | `["ReadWriteOnce"]` | +| `secondary.persistence.size` | MySQL secondary persistent volume size | `8Gi` | +| `secondary.persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `secondary.extraVolumes` | Optionally specify extra list of additional volumes to the MySQL secondary pod(s) | `[]` | +| `secondary.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s) | `[]` | +| `secondary.initContainers` | Add additional init containers for the MySQL secondary pod(s) | `[]` | +| `secondary.sidecars` | Add additional sidecar containers for the MySQL secondary pod(s) | `[]` | +| `secondary.service.type` | MySQL secondary Kubernetes service type | `ClusterIP` | +| `secondary.service.port` | MySQL secondary Kubernetes service port | `3306` | +| `secondary.service.nodePort` | MySQL secondary Kubernetes service node port | `""` | +| `secondary.service.clusterIP` | MySQL secondary Kubernetes service clusterIP IP | `""` | +| `secondary.service.loadBalancerIP` | MySQL secondary loadBalancerIP if service type is `LoadBalancer` | `""` | +| `secondary.service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `secondary.service.loadBalancerSourceRanges` | Addresses that are allowed when MySQL secondary service is LoadBalancer | `[]` | +| `secondary.service.annotations` | Provide any additional annotations which may be required | `{}` | +| `secondary.pdb.enabled` | Enable/disable a Pod Disruption Budget creation for MySQL secondary pods | `false` | +| `secondary.pdb.minAvailable` | Minimum number/percentage of MySQL secondary pods that should remain scheduled | `1` | +| `secondary.pdb.maxUnavailable` | Maximum number/percentage of MySQL secondary pods that may be made unavailable | `""` | +| `secondary.podLabels` | Additional pod labels for MySQL secondary pods | `{}` | + + +### RBAC parameters + +| Name | Description | Value | +| ---------------------------- | ------------------------------------------------------ | ------- | +| `serviceAccount.create` | Enable the creation of a ServiceAccount for MySQL pods | `true` | +| `serviceAccount.name` | Name of the created ServiceAccount | `""` | +| `serviceAccount.annotations` | Annotations for MySQL Service Account | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + + +### Network Policy + +| Name | Description | Value | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------- | ------- | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | The Policy model to apply. | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed to MySQL | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `10-debian-10-r312` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources` | Init container volume-permissions resources | `{}` | + + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Exporter image registry | `docker.io` | +| `metrics.image.repository` | Exporter image repository | `bitnami/mysqld-exporter` | +| `metrics.image.tag` | Exporter image tag (immutable tags are recommended) | `0.13.0-debian-10-r216` | +| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.service.type` | Kubernetes service type for MySQL Prometheus Exporter | `ClusterIP` | +| `metrics.service.port` | MySQL Prometheus Exporter service port | `9104` | +| `metrics.service.annotations` | Prometheus exporter service annotations | `{}` | +| `metrics.extraArgs.primary` | Extra args to be passed to mysqld_exporter on Primary pods | `[]` | +| `metrics.extraArgs.secondary` | Extra args to be passed to mysqld_exporter on Secondary pods | `[]` | +| `metrics.resources.limits` | The resources limits for MySQL prometheus exporter containers | `{}` | +| `metrics.resources.requests` | The requested resources for MySQL prometheus exporter containers | `{}` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `30` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with | `{}` | + + +The above parameters map to the env variables defined in [bitnami/mysql](https://github.com/bitnami/bitnami-docker-mysql). For more information please refer to the [bitnami/mysql](https://github.com/bitnami/bitnami-docker-mysql) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set auth.rootPassword=secretpassword,auth.database=app_database \ + bitnami/mysql +``` + +The above command sets the MySQL `root` account password to `secretpassword`. Additionally it creates a database named `app_database`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/mysql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Use a different MySQL version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/mysql/configuration/change-image-version/). + +### Customize a new MySQL instance + +The [Bitnami MySQL](https://github.com/bitnami/bitnami-docker-mysql) image allows you to use your custom scripts to initialize a fresh instance. Custom scripts may be specified using the `initdbScripts` parameter. Alternatively, an external ConfigMap may be created with all the initialization scripts and the ConfigMap passed to the chart via the `initdbScriptsConfigMap` parameter. Note that this will override the `initdbScripts` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +These scripts are treated differently depending on their extension. While `.sh` scripts are executed on all the nodes, `.sql` and `.sql.gz` scripts are only executed on the primary nodes. This is because `.sh` scripts support conditional tests to identify the type of node they are running on, while such tests are not supported in `.sql` or `sql.gz` files. + +Refer to the [chart documentation for more information and a usage example](http://docs.bitnami.com/kubernetes/infrastructure/mysql/configuration/customize-new-instance/). + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as MySQL, you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +## Persistence + +The [Bitnami MySQL](https://github.com/bitnami/bitnami-docker-mysql) image stores the MySQL data and configurations at the `/bitnami/mysql` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) volume at this location. The volume is created using dynamic volume provisioning by default. An existing PersistentVolumeClaim can also be defined for this purpose. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +## Network Policy + +To enable network policy for MySQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 3306. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to MySQL. +This label will be displayed in the output of a successful install. + +## Pod affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.rootPassword` parameter when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Administrator credentials' section. Please note down the password and run the command below to upgrade your chart: + +```bash +$ helm upgrade my-release bitnami/mysql --set auth.rootPassword=[ROOT_PASSWORD] +``` + +| Note: you need to substitute the placeholder _[ROOT_PASSWORD]_ with the value obtained in the installation notes. + +### To 8.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The terms *master* and *slave* have been replaced by the terms *primary* and *secondary*. Therefore, parameters prefixed with `master` or `slave` are now prefixed with `primary` or `secondary`, respectively. + - Credentials parameters are reorganized under the `auth` parameter. + - `replication.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`. +- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). +- This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/master/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +Consequences: + +- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MySQL chart, and migrate the data from your previous release. You have 2 alternatives to do so: + - Create a backup of the database, and restore it on the new release using tools such as [mysqldump](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html). + - Reuse the PVC used to hold the master data on your previous release. To do so, use the `primary.persistence.existingClaim` parameter. The following example assumes that the release name is `mysql`: + +```bash +$ helm install mysql bitnami/mysql --set auth.rootPassword=[ROOT_PASSWORD] --set primary.persistence.existingClaim=[EXISTING_PVC] +``` + +| Note: you need to substitute the placeholder _[EXISTING_PVC]_ with the name of the PVC used on your previous release, and _[ROOT_PASSWORD]_ with the root password used in your previous release. + +### To 7.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/mysql/administration/upgrade-helm3/). + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is mysql: + +```console +$ kubectl delete statefulset mysql-master --cascade=false +$ kubectl delete statefulset mysql-slave --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/pkg/scanners/helm/test/mysql/charts/common/.helmignore b/pkg/scanners/helm/test/mysql/charts/common/.helmignore new file mode 100644 index 000000000000..50af03172541 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/pkg/scanners/helm/test/mysql/charts/common/Chart.yaml b/pkg/scanners/helm/test/mysql/charts/common/Chart.yaml new file mode 100644 index 000000000000..87226649a57c --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.11.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.11.1 diff --git a/pkg/scanners/helm/test/mysql/charts/common/README.md b/pkg/scanners/helm/test/mysql/charts/common/README.md new file mode 100644 index 000000000000..da84c426d0db --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/README.md @@ -0,0 +1,345 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_affinities.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000000..189ea403d558 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_capabilities.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000000..b94212bbe77c --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,128 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_errors.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_errors.tpl new file mode 100644 index 000000000000..a79cc2e322e0 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_images.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_images.tpl new file mode 100644 index 000000000000..42ffbc7227eb --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_ingress.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000000..8caf73a61082 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_labels.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_labels.tpl new file mode 100644 index 000000000000..252066c7e2b3 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_names.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_names.tpl new file mode 100644 index 000000000000..cf0323171f39 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_names.tpl @@ -0,0 +1,52 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_secrets.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000000..a1afc1195996 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_secrets.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_storage.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_storage.tpl new file mode 100644 index 000000000000..60e2a844f6eb --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_tplvalues.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000000..2db166851bb5 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_utils.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_utils.tpl new file mode 100644 index 000000000000..ea083a249f80 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/_warnings.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000000..ae10fa41ee7d --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_cassandra.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000000..ded1ae3bcad7 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_mariadb.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000000..b6906ff77b72 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_mongodb.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000000..a071ea4d3127 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_postgresql.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000000..164ec0d01252 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_redis.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000000..5d72959b9eee --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis™ required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_validations.tpl b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000000..9a814cf40dcb --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/charts/common/values.yaml b/pkg/scanners/helm/test/mysql/charts/common/values.yaml new file mode 100644 index 000000000000..f2df68e5e6af --- /dev/null +++ b/pkg/scanners/helm/test/mysql/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/pkg/scanners/helm/test/mysql/ci/values-production-with-rbac.yaml b/pkg/scanners/helm/test/mysql/ci/values-production-with-rbac.yaml new file mode 100644 index 000000000000..d3370c931113 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/ci/values-production-with-rbac.yaml @@ -0,0 +1,30 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct + +architecture: replication +auth: + usePasswordFiles: true + +primary: + extraEnvVars: + - name: TEST + value: "3" + podDisruptionBudget: + create: true + +secondary: + replicaCount: 2 + extraEnvVars: + - name: TEST + value: "2" + podDisruptionBudget: + create: true + +serviceAccount: + create: true + name: mysql-service-account +rbac: + create: true + +metrics: + enabled: true diff --git a/pkg/scanners/helm/test/mysql/templates/NOTES.txt b/pkg/scanners/helm/test/mysql/templates/NOTES.txt new file mode 100644 index 000000000000..1b8b6d5ea7d2 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/NOTES.txt @@ -0,0 +1,102 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/mysql/entrypoint.sh /opt/bitnami/scripts/mysql/run.sh + +{{- else }} + +Tip: + + Watch the deployment status using the command: kubectl get pods -w --namespace {{ .Release.Namespace }} + +Services: + + echo Primary: {{ include "mysql.primary.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.primary.service.port }} +{{- if eq .Values.architecture "replication" }} + echo Secondary: {{ include "mysql.secondary.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.secondary.service.port }} +{{- end }} + +Execute the following to get the administrator credentials: + + echo Username: root + MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mysql.secretName" . }} -o jsonpath="{.data.mysql-root-password}" | base64 --decode) + +To connect to your database: + + 1. Run a pod that you can use as a client: + + kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --image {{ template "mysql.image" . }} --namespace {{ .Release.Namespace }} --command -- bash + + 2. To connect to primary service (read/write): + + mysql -h {{ include "mysql.primary.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -uroot -p"$MYSQL_ROOT_PASSWORD" + +{{- if eq .Values.architecture "replication" }} + + 3. To connect to secondary service (read-only): + + mysql -h {{ include "mysql.secondary.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} -uroot -p"$MYSQL_ROOT_PASSWORD" +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "common.names.fullname" . }}-client=true" will be able to connect to MySQL. +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the MySQL Prometheus metrics from outside the cluster execute the following commands: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-metrics" (include "common.names.fullname" .) }} {{ .Values.metrics.service.port }}:{{ .Values.metrics.service.port }} & + curl http://127.0.0.1:{{ .Values.metrics.service.port }}/metrics + +{{- end }} + +To upgrade this helm chart: + + 1. Obtain the password as described on the 'Administrator credentials' section and set the 'root.password' parameter as shown below: + + ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} -o jsonpath="{.data.mysql-root-password}" | base64 --decode) + helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} bitnami/mysql --set auth.rootPassword=$ROOT_PASSWORD + +{{ include "mysql.validateValues" . }} +{{ include "mysql.checkRollingTags" . }} +{{- if and (not .Values.auth.existingSecret) (not .Values.auth.customPasswordFiles) -}} + {{- $secretName := include "mysql.secretName" . -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" "auth.rootPassword" "secret" $secretName "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- if not (empty .Values.auth.username) -}} + {{- $requiredPassword := dict "valueKey" "auth.password" "secret" $secretName "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq .Values.architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" "auth.replicationPassword" "secret" $secretName "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- $mysqlPasswordValidationErrors := include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" $) -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $mysqlPasswordValidationErrors "context" $) -}} +{{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/_helpers.tpl b/pkg/scanners/helm/test/mysql/templates/_helpers.tpl new file mode 100644 index 000000000000..6c2bcff81398 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/_helpers.tpl @@ -0,0 +1,192 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "mysql.primary.fullname" -}} +{{- if eq .Values.architecture "replication" }} +{{- printf "%s-%s" (include "common.names.fullname" .) "primary" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{- define "mysql.secondary.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "secondary" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper MySQL image name +*/}} +{{- define "mysql.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper metrics image name +*/}} +{{- define "mysql.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mysql.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mysql.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{ template "mysql.initdbScriptsCM" . }} +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "mysql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} + {{- printf "%s" .Values.initdbScriptsConfigMap -}} +{{- else -}} + {{- printf "%s-init-scripts" (include "mysql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* + Returns the proper service account name depending if an explicit service account name is set + in the values file. If the name is not set it will default to either mysql.fullname if serviceAccount.create + is true or default otherwise. +*/}} +{{- define "mysql.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} + {{- else -}} + {{ default "default" .Values.serviceAccount.name }} + {{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MySQL Primary configuration +*/}} +{{- define "mysql.primary.configmapName" -}} +{{- if .Values.primary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.primary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mysql.primary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MySQL Secondary +*/}} +{{- define "mysql.primary.createConfigmap" -}} +{{- if and .Values.primary.configuration (not .Values.primary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MySQL Primary configuration +*/}} +{{- define "mysql.secondary.configmapName" -}} +{{- if .Values.secondary.existingConfigmap -}} + {{- printf "%s" (tpl .Values.secondary.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mysql.secondary.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MySQL Secondary +*/}} +{{- define "mysql.secondary.createConfigmap" -}} +{{- if and (eq .Values.architecture "replication") .Values.secondary.configuration (not .Values.secondary.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with MySQL credentials +*/}} +{{- define "mysql.secretName" -}} + {{- if .Values.auth.existingSecret -}} + {{- printf "%s" .Values.auth.existingSecret -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for MySQL +*/}} +{{- define "mysql.createSecret" -}} +{{- if and (not .Values.auth.existingSecret) (not .Values.auth.customPasswordFiles) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{- define "mysql.root.password" -}} + {{- if not (empty .Values.auth.rootPassword) }} + {{- .Values.auth.rootPassword }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-root-password") }} + {{- else }} + {{- required "A MySQL Root Password is required!" .Values.auth.rootPassword }} + {{- end }} +{{- end -}} + +{{- define "mysql.password" -}} + {{- if and (not (empty .Values.auth.username)) (not (empty .Values.auth.password)) }} + {{- .Values.auth.password }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-password") }} + {{- else }} + {{- required "A MySQL Database Password is required!" .Values.auth.password }} + {{- end }} +{{- end -}} + +{{- define "mysql.replication.password" -}} + {{- if not (empty .Values.auth.replicationPassword) }} + {{- .Values.auth.replicationPassword }} + {{- else if (not .Values.auth.forcePassword) }} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "common.names.fullname" .) "Length" 10 "Key" "mysql-replication-password") }} + {{- else }} + {{- required "A MySQL Replication Password is required!" .Values.auth.replicationPassword }} + {{- end }} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "mysql.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mysql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/templates/extra-list.yaml b/pkg/scanners/helm/test/mysql/templates/extra-list.yaml new file mode 100644 index 000000000000..9ac65f9e16f4 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/metrics-svc.yaml b/pkg/scanners/helm/test/mysql/templates/metrics-svc.yaml new file mode 100644 index 000000000000..fb0d9d761dc6 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - port: {{ .Values.metrics.service.port }} + targetPort: metrics + protocol: TCP + name: metrics + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/networkpolicy.yaml b/pkg/scanners/helm/test/mysql/templates/networkpolicy.yaml new file mode 100644 index 000000000000..a0d1d01d4079 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/networkpolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + namespace: {{ .Release.Namespace }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.primary.service.port }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes + - ports: + - port: 9104 + {{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/primary/configmap.yaml b/pkg/scanners/helm/test/mysql/templates/primary/configmap.yaml new file mode 100644 index 000000000000..540b7b9072e9 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/primary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mysql.primary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- + {{ .Values.primary.configuration | nindent 4 }} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/templates/primary/initialization-configmap.yaml b/pkg/scanners/helm/test/mysql/templates/primary/initialization-configmap.yaml new file mode 100644 index 000000000000..83cbaea74883 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/primary/initialization-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "mysql.primary.fullname" .) }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{ end }} diff --git a/pkg/scanners/helm/test/mysql/templates/primary/pdb.yaml b/pkg/scanners/helm/test/mysql/templates/primary/pdb.yaml new file mode 100644 index 000000000000..106ad5207e5a --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/primary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if .Values.primary.pdb.enabled }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.primary.pdb.minAvailable }} + minAvailable: {{ .Values.primary.pdb.minAvailable }} + {{- end }} + {{- if .Values.primary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.primary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/primary/statefulset.yaml b/pkg/scanners/helm/test/mysql/templates/primary/statefulset.yaml new file mode 100644 index 000000000000..6f9c99ea66d9 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/primary/statefulset.yaml @@ -0,0 +1,368 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: primary + serviceName: {{ include "mysql.primary.fullname" . }} + updateStrategy: + type: {{ .Values.primary.updateStrategy }} + {{- if (eq "Recreate" .Values.primary.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.primary.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.primary.rollingUpdatePartition }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mysql.primary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/primary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.primary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.primary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "mysql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.primary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.primary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mysql.serviceAccountName" . }} + {{- if .Values.primary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.primary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAffinityPreset "component" "primary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.primary.podAntiAffinityPreset "component" "primary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.primary.nodeAffinityPreset.type "key" .Values.primary.nodeAffinityPreset.key "values" .Values.primary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.primary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.primary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.primary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.primary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.primary.initContainers (and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled) }} + initContainers: + {{- if .Values.primary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.primary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mysql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.primary.containerSecurityContext.runAsUser }}:{{ .Values.primary.podSecurityContext.fsGroup }} /bitnami/mysql + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- end }} + {{- end }} + containers: + - name: mysql + image: {{ include "mysql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.primary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.primary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.primary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.primary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.primary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.primary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if not (empty .Values.auth.username) }} + - name: MYSQL_USER + value: {{ .Values.auth.username | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-password" .Values.auth.customPasswordFiles.user }} + {{- else }} + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-password + {{- end }} + {{- end }} + - name: MYSQL_DATABASE + value: {{ .Values.auth.database | quote }} + {{- if eq .Values.architecture "replication" }} + - name: MYSQL_REPLICATION_MODE + value: "master" + - name: MYSQL_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MYSQL_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraFlags }} + - name: MYSQL_EXTRA_FLAGS + value: "{{ .Values.primary.extraFlags }}" + {{- end }} + {{- if .Values.primary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.primary.extraEnvVarsCM .Values.primary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.primary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.primary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.primary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.primary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.primary.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.primary.livenessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.primary.readinessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.primary.startupProbe.enabled }} + startupProbe: {{- omit .Values.primary.startupProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.primary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.primary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.primary.resources }} + resources: {{ toYaml .Values.primary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + mountPath: /opt/bitnami/mysql/secrets/ + {{- end }} + {{- if .Values.primary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mysql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.primary }} {{ . }} {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mysql-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.primary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.primary.configuration .Values.primary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mysql.primary.configmapName" . }} + {{- end }} + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ include "mysql.initdbScriptsCM" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + secret: + secretName: {{ include "mysql.secretName" . }} + items: + - key: mysql-root-password + path: mysql-root-password + - key: mysql-password + path: mysql-password + {{- if eq .Values.architecture "replication" }} + - key: mysql-replication-password + path: mysql-replication-password + {{- end }} + {{- end }} + {{- if .Values.primary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.primary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.primary.persistence.enabled .Values.primary.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl .Values.primary.persistence.existingClaim . }} + {{- else if not .Values.primary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if and .Values.primary.persistence.enabled (not .Values.primary.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: primary + {{- if .Values.primary.persistence.annotations }} + annotations: + {{- toYaml .Values.primary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.primary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.primary.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.primary.persistence "global" .Values.global) }} + {{- if .Values.primary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.primary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/primary/svc-headless.yaml b/pkg/scanners/helm/test/mysql/templates/primary/svc-headless.yaml new file mode 100644 index 000000000000..49e6e5798783 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/primary/svc-headless.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.primary.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: {{ .Values.primary.service.port }} + targetPort: mysql + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/pkg/scanners/helm/test/mysql/templates/primary/svc.yaml b/pkg/scanners/helm/test/mysql/templates/primary/svc.yaml new file mode 100644 index 000000000000..b46e6faa8149 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/primary/svc.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.primary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: primary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.primary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.primary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.primary.service.type }} + {{- if and (eq .Values.primary.service.type "ClusterIP") .Values.primary.service.clusterIP }} + clusterIP: {{ .Values.primary.service.clusterIP }} + {{- end }} + {{- if and .Values.primary.service.loadBalancerIP (eq .Values.primary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.primary.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.primary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.primary.service.type "LoadBalancer") .Values.primary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.primary.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.primary.service.port }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.primary.service.type "NodePort") (eq .Values.primary.service.type "LoadBalancer")) .Values.primary.service.nodePort) }} + nodePort: {{ .Values.primary.service.nodePort }} + {{- else if eq .Values.primary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: primary diff --git a/pkg/scanners/helm/test/mysql/templates/role.yaml b/pkg/scanners/helm/test/mysql/templates/role.yaml new file mode 100644 index 000000000000..4cbdd5c9ff20 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/rolebinding.yaml b/pkg/scanners/helm/test/mysql/templates/rolebinding.yaml new file mode 100644 index 000000000000..90ede32f5fc7 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "mysql.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "common.names.fullname" . -}} +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/secondary/configmap.yaml b/pkg/scanners/helm/test/mysql/templates/secondary/configmap.yaml new file mode 100644 index 000000000000..682e3e19ba96 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/secondary/configmap.yaml @@ -0,0 +1,18 @@ +{{- if (include "mysql.secondary.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + my.cnf: |- + {{ .Values.secondary.configuration | nindent 4 }} +{{- end -}} diff --git a/pkg/scanners/helm/test/mysql/templates/secondary/pdb.yaml b/pkg/scanners/helm/test/mysql/templates/secondary/pdb.yaml new file mode 100644 index 000000000000..49c7e167c0a2 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/secondary/pdb.yaml @@ -0,0 +1,25 @@ +{{- if and (eq .Values.architecture "replication") .Values.secondary.pdb.enabled }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.secondary.pdb.minAvailable }} + minAvailable: {{ .Values.secondary.pdb.minAvailable }} + {{- end }} + {{- if .Values.secondary.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.secondary.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/secondary/statefulset.yaml b/pkg/scanners/helm/test/mysql/templates/secondary/statefulset.yaml new file mode 100644 index 000000000000..ef196ebf6df0 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/secondary/statefulset.yaml @@ -0,0 +1,338 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.secondary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.podLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.secondary.replicaCount }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: secondary + serviceName: {{ include "mysql.secondary.fullname" . }} + updateStrategy: + type: {{ .Values.secondary.updateStrategy }} + {{- if (eq "Recreate" .Values.secondary.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.secondary.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.secondary.rollingUpdatePartition }} + {{- end }} + template: + metadata: + annotations: + {{- if (include "mysql.secondary.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/secondary/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.secondary.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.podLabels "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "mysql.imagePullSecrets" . | nindent 6 }} + {{- if .Values.secondary.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ include "mysql.serviceAccountName" . }} + {{- if .Values.secondary.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAffinityPreset "component" "secondary" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.secondary.podAntiAffinityPreset "component" "secondary" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.secondary.nodeAffinityPreset.type "key" .Values.secondary.nodeAffinityPreset.key "values" .Values.secondary.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.secondary.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.secondary.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.secondary.podSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.secondary.initContainers (and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled) }} + initContainers: + {{- if .Values.secondary.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.secondary.podSecurityContext.enabled .Values.volumePermissions.enabled .Values.secondary.persistence.enabled }} + - name: volume-permissions + image: {{ include "mysql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.secondary.containerSecurityContext.runAsUser }}:{{ .Values.secondary.podSecurityContext.fsGroup }} /bitnami/mysql + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- end }} + {{- end }} + containers: + - name: mysql + image: {{ include "mysql.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.secondary.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.secondary.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.secondary.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.secondary.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MYSQL_REPLICATION_MODE + value: "slave" + - name: MYSQL_MASTER_HOST + value: {{ include "mysql.primary.fullname" . }} + - name: MYSQL_MASTER_PORT_NUMBER + value: {{ .Values.primary.service.port | quote }} + - name: MYSQL_MASTER_ROOT_USER + value: "root" + - name: MYSQL_REPLICATION_USER + value: {{ .Values.auth.replicationUser | quote }} + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_MASTER_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + - name: MYSQL_REPLICATION_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysql/secrets/mysql-replication-password" .Values.auth.customPasswordFiles.replicator }} + {{- else }} + - name: MYSQL_MASTER_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + - name: MYSQL_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-replication-password + {{- end }} + {{- if .Values.secondary.extraFlags }} + - name: MYSQL_EXTRA_FLAGS + value: "{{ .Values.secondary.extraFlags }}" + {{- end }} + {{- if .Values.secondary.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.secondary.extraEnvVarsCM .Values.secondary.extraEnvVarsSecret }} + envFrom: + {{- if .Values.secondary.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.secondary.extraEnvVarsCM }} + {{- end }} + {{- if .Values.secondary.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.secondary.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: mysql + containerPort: 3306 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.secondary.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.secondary.livenessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.secondary.readinessProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.secondary.startupProbe.enabled }} + startupProbe: {{- omit .Values.secondary.startupProbe "enabled" | toYaml | nindent 12 }} + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_MASTER_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_MASTER_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_MASTER_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + {{- else if .Values.secondary.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.customStartupProbe "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.secondary.resources }} + resources: {{ toYaml .Values.secondary.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + mountPath: /opt/bitnami/mysql/secrets/ + {{- end }} + {{- if .Values.secondary.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "mysql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + {{- if .Values.auth.usePasswordFiles }} + - name: MYSQL_ROOT_PASSWORD_FILE + value: {{ default "/opt/bitnami/mysqld-exporter/secrets/mysql-root-password" .Values.auth.customPasswordFiles.root }} + {{- else }} + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "mysql.secretName" . }} + key: mysql-root-password + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + DATA_SOURCE_NAME="root:${password_aux}@(localhost:3306)/" /bin/mysqld_exporter {{- range .Values.metrics.extraArgs.secondary }} {{ . }} {{- end }} + {{- end }} + ports: + - name: metrics + containerPort: 9104 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.metrics.livenessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.metrics.readinessProbe "enabled" | toYaml | nindent 12 }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + volumeMounts: + - name: mysql-credentials + mountPath: /opt/bitnami/mysqld-exporter/secrets/ + {{- end }} + {{- end }} + {{- if .Values.secondary.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.secondary.configuration .Values.secondary.existingConfigmap }} + - name: config + configMap: + name: {{ include "mysql.secondary.configmapName" . }} + {{- end }} + {{- if and .Values.auth.usePasswordFiles (not .Values.auth.customPasswordFiles) }} + - name: mysql-credentials + secret: + secretName: {{ template "mysql.secretName" . }} + items: + - key: mysql-root-password + path: mysql-root-password + - key: mysql-replication-password + path: mysql-replication-password + {{- end }} + {{- if .Values.secondary.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.secondary.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.secondary.persistence.enabled }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{ include "common.labels.matchLabels" . | nindent 10 }} + app.kubernetes.io/component: secondary + {{- if .Values.secondary.persistence.annotations }} + annotations: + {{- toYaml .Values.secondary.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.secondary.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.secondary.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.secondary.persistence "global" .Values.global) }} + {{- if .Values.secondary.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.secondary.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/secondary/svc-headless.yaml b/pkg/scanners/helm/test/mysql/templates/secondary/svc-headless.yaml new file mode 100644 index 000000000000..703d8e747b75 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/secondary/svc-headless.yaml @@ -0,0 +1,26 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.secondary.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: {{ .Values.secondary.service.port }} + targetPort: mysql + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/secondary/svc.yaml b/pkg/scanners/helm/test/mysql/templates/secondary/svc.yaml new file mode 100644 index 000000000000..74a4c6ef5fb8 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/secondary/svc.yaml @@ -0,0 +1,43 @@ +{{- if eq .Values.architecture "replication" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mysql.secondary.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: secondary + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.secondary.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secondary.service.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.secondary.service.type }} + {{- if and (eq .Values.secondary.service.type "ClusterIP") .Values.secondary.service.clusterIP }} + clusterIP: {{ .Values.secondary.service.clusterIP }} + {{- end }} + {{- if and .Values.secondary.service.loadBalancerIP (eq .Values.secondary.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.secondary.service.loadBalancerIP }} + externalTrafficPolicy: {{ .Values.secondary.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.secondary.service.type "LoadBalancer") .Values.secondary.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.secondary.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: mysql + port: {{ .Values.secondary.service.port }} + protocol: TCP + targetPort: mysql + {{- if (and (or (eq .Values.secondary.service.type "NodePort") (eq .Values.secondary.service.type "LoadBalancer")) .Values.secondary.service.nodePort) }} + nodePort: {{ .Values.secondary.service.nodePort }} + {{- else if eq .Values.secondary.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{ include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: secondary +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/secrets.yaml b/pkg/scanners/helm/test/mysql/templates/secrets.yaml new file mode 100644 index 000000000000..9412fc35a5bc --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/secrets.yaml @@ -0,0 +1,21 @@ +{{- if eq (include "mysql.createSecret" .) "true" }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + mysql-root-password: {{ include "mysql.root.password" . | b64enc | quote }} + mysql-password: {{ include "mysql.password" . | b64enc | quote }} + {{- if eq .Values.architecture "replication" }} + mysql-replication-password: {{ include "mysql.replication.password" . | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/serviceaccount.yaml b/pkg/scanners/helm/test/mysql/templates/serviceaccount.yaml new file mode 100644 index 000000000000..59eb10409d91 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/serviceaccount.yaml @@ -0,0 +1,22 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mysql.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- if (not .Values.auth.customPasswordFiles) }} +secrets: + - name: {{ template "mysql.secretName" . }} +{{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/templates/servicemonitor.yaml b/pkg/scanners/helm/test/mysql/templates/servicemonitor.yaml new file mode 100644 index 000000000000..f082dd5409d6 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/templates/servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/pkg/scanners/helm/test/mysql/values.schema.json b/pkg/scanners/helm/test/mysql/values.schema.json new file mode 100644 index 000000000000..8021a4603600 --- /dev/null +++ b/pkg/scanners/helm/test/mysql/values.schema.json @@ -0,0 +1,178 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "MySQL architecture", + "form": true, + "description": "Allowed values: `standalone` or `replication`", + "enum": ["standalone", "replication"] + }, + "auth": { + "type": "object", + "title": "Authentication configuration", + "form": true, + "required": ["database", "username", "password"], + "properties": { + "rootPassword": { + "type": "string", + "title": "MySQL root password", + "description": "Defaults to a random 10-character alphanumeric string if not set" + }, + "database": { + "type": "string", + "title": "MySQL custom database name" + }, + "username": { + "type": "string", + "title": "MySQL custom username" + }, + "password": { + "type": "string", + "title": "MySQL custom password" + }, + "replicationUser": { + "type": "string", + "title": "MySQL replication username" + }, + "replicationPassword": { + "type": "string", + "title": "MySQL replication password" + } + } + }, + "primary": { + "type": "object", + "title": "Primary database configuration", + "form": true, + "properties": { + "podSecurityContext": { + "type": "object", + "title": "MySQL primary Pod security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "fsGroup": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "primary/podSecurityContext/enabled" + } + } + } + }, + "containerSecurityContext": { + "type": "object", + "title": "MySQL primary container security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "runAsUser": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "primary/containerSecurityContext/enabled" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Enable persistence using Persistent Volume Claims", + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "title": "If true, use a Persistent Volume Claim, If false, use emptyDir" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "primary/persistence/enabled" + } + } + } + } + } + }, + "secondary": { + "type": "object", + "title": "Secondary database configuration", + "form": true, + "properties": { + "podSecurityContext": { + "type": "object", + "title": "MySQL secondary Pod security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "fsGroup": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "secondary/podSecurityContext/enabled" + } + } + } + }, + "containerSecurityContext": { + "type": "object", + "title": "MySQL secondary container security context", + "properties": { + "enabled": { + "type": "boolean", + "default": false + }, + "runAsUser": { + "type": "integer", + "default": 1001, + "hidden": { + "value": false, + "path": "secondary/containerSecurityContext/enabled" + } + } + } + }, + "persistence": { + "type": "object", + "title": "Enable persistence using Persistent Volume Claims", + "properties": { + "enabled": { + "type": "boolean", + "default": true, + "title": "If true, use a Persistent Volume Claim, If false, use emptyDir" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "secondary/persistence/enabled" + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/pkg/scanners/helm/test/mysql/values.yaml b/pkg/scanners/helm/test/mysql/values.yaml new file mode 100644 index 000000000000..3900e865955c --- /dev/null +++ b/pkg/scanners/helm/test/mysql/values.yaml @@ -0,0 +1,1020 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets [array] Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Cluster domain +## +clusterDomain: cluster.local +## @param commonAnnotations [object] Common annotations to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} +## @param commonLabels [object] Common labels to add to all MySQL resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} +## @param extraDeploy [array] Array with extra yaml to deploy with the chart. Evaluated as a template +## +extraDeploy: [] +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section MySQL common parameters + +## Bitnami MySQL image +## ref: https://hub.docker.com/r/bitnami/mysql/tags/ +## @param image.registry MySQL image registry +## @param image.repository MySQL image repository +## @param image.tag MySQL image tag (immutable tags are recommended) +## @param image.pullPolicy MySQL image pull policy +## @param image.pullSecrets [array] Specify docker-registry secret names as an array +## @param image.debug Specify if debug logs should be enabled +## +image: + registry: docker.io + repository: bitnami/mysql + tag: 8.0.28-debian-10-r23 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false +## @param architecture MySQL architecture (`standalone` or `replication`) +## +architecture: standalone +## MySQL Authentication parameters +## +auth: + ## @param auth.rootPassword Password for the `root` user. Ignored if existing secret is provided + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-the-root-password-on-first-run + ## + rootPassword: "" + ## @param auth.database Name for a custom database to create + ## ref: https://github.com/bitnami/bitnami-docker-mysql/blob/master/README.md#creating-a-database-on-first-run + ## + database: my_database + ## @param auth.username Name for a custom user to create + ## ref: https://github.com/bitnami/bitnami-docker-mysql/blob/master/README.md#creating-a-database-user-on-first-run + ## + username: "" + ## @param auth.password Password for the new user. Ignored if existing secret is provided + ## + password: "" + ## @param auth.replicationUser MySQL replication user + ## ref: https://github.com/bitnami/bitnami-docker-mysql#setting-up-a-replication-cluster + ## + replicationUser: replicator + ## @param auth.replicationPassword MySQL replication user password. Ignored if existing secret is provided + ## + replicationPassword: "" + ## @param auth.existingSecret Use existing secret for password details. The secret has to contain the keys `mysql-root-password`, `mysql-replication-password` and `mysql-password` + ## NOTE: When it's set the auth.rootPassword, auth.password, auth.replicationPassword are ignored. + ## + existingSecret: "" + ## @param auth.forcePassword Force users to specify required passwords + ## + forcePassword: false + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + ## @param auth.customPasswordFiles [object] Use custom password files when `auth.usePasswordFiles` is set to `true`. Define path for keys `root` and `user`, also define `replicator` if `architecture` is set to `replication` + ## Example: + ## customPasswordFiles: + ## root: /vault/secrets/mysql-root + ## user: /vault/secrets/mysql-user + ## replicator: /vault/secrets/mysql-replicator + ## + customPasswordFiles: {} +## @param initdbScripts [object] Dictionary of initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} +## @param initdbScriptsConfigMap ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) +## +initdbScriptsConfigMap: "" + +## @section MySQL Primary parameters + +primary: + ## @param primary.command [array] Override default container command on MySQL Primary container(s) (useful when using custom images) + ## + command: [] + ## @param primary.args [array] Override default container args on MySQL Primary container(s) (useful when using custom images) + ## + args: [] + ## @param primary.hostAliases [array] Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.configuration [string] Configure MySQL Primary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/lib/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/lib/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## @param primary.existingConfigmap Name of existing ConfigMap with MySQL Primary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param primary.updateStrategy Update strategy type for the MySQL primary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + ## @param primary.rollingUpdatePartition Partition update strategy for MySQL Primary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + rollingUpdatePartition: "" + ## @param primary.podAnnotations [object] Additional pod annotations for MySQL primary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param primary.podAffinityPreset MySQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset MySQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## MySQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type MySQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param primary.nodeAffinityPreset.key MySQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param primary.nodeAffinityPreset.values [array] MySQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param primary.affinity [object] Affinity for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param primary.nodeSelector [object] Node labels for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param primary.tolerations [array] Tolerations for MySQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## MySQL primary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param primary.podSecurityContext.enabled Enable security context for MySQL primary pods + ## @param primary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MySQL primary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param primary.containerSecurityContext.enabled MySQL primary container securityContext + ## @param primary.containerSecurityContext.runAsUser User ID for the MySQL primary container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## MySQL primary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param primary.resources.limits [object] The resources limits for MySQL primary containers + ## @param primary.resources.requests [object] The requested resources for MySQL primary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.readinessProbe.enabled Enable readinessProbe + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for startupProbe probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param primary.startupProbe.enabled Enable startupProbe + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 10 + successThreshold: 1 + ## @param primary.customLivenessProbe [object] Override default liveness probe for MySQL primary containers + ## + customLivenessProbe: {} + ## @param primary.customReadinessProbe [object] Override default readiness probe for MySQL primary containers + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe [object] Override default startup probe for MySQL primary containers + ## + customStartupProbe: {} + ## @param primary.extraFlags MySQL primary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param primary.extraEnvVars [array] Extra environment variables to be set on MySQL primary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL primary containers + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL primary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param primary.persistence.enabled Enable persistence on MySQL primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir + ## + enabled: true + ## @param primary.persistence.existingClaim Name of an existing `PersistentVolumeClaim` for MySQL primary replicas + ## NOTE: When it's set the rest of persistence parameters are ignored + ## + existingClaim: "" + ## @param primary.persistence.storageClass MySQL primary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param primary.persistence.annotations [object] MySQL primary persistent volume claim annotations + ## + annotations: {} + ## @param primary.persistence.accessModes MySQL primary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size MySQL primary persistent volume size + ## + size: 8Gi + ## @param primary.persistence.selector [object] Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.extraVolumes [array] Optionally specify extra list of additional volumes to the MySQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.extraVolumeMounts [array] Optionally specify extra list of additional volumeMounts for the MySQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.initContainers [array] Add additional init containers for the MySQL Primary pod(s) + ## + initContainers: [] + ## @param primary.sidecars [array] Add additional sidecar containers for the MySQL Primary pod(s) + ## + sidecars: [] + ## MySQL Primary Service parameters + ## + service: + ## @param primary.service.type MySQL Primary K8s service type + ## + type: ClusterIP + ## @param primary.service.port MySQL Primary K8s service port + ## + port: 3306 + ## @param primary.service.nodePort MySQL Primary K8s service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param primary.service.clusterIP MySQL Primary K8s service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.loadBalancerIP MySQL Primary loadBalancerIP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges [array] Addresses that are allowed when MySQL Primary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.annotations [object] Provide any additional annotations which may be required + ## + annotations: {} + ## MySQL primary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param primary.pdb.enabled Enable/disable a Pod Disruption Budget creation for MySQL primary pods + ## + enabled: false + ## @param primary.pdb.minAvailable Minimum number/percentage of MySQL primary pods that should remain scheduled + ## + minAvailable: 1 + ## @param primary.pdb.maxUnavailable Maximum number/percentage of MySQL primary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param primary.podLabels [object] MySQL Primary pod label. If labels are same as commonLabels , this will take precedence + ## + podLabels: {} + +## @section MySQL Secondary parameters + +secondary: + ## @param secondary.replicaCount Number of MySQL secondary replicas + ## + replicaCount: 1 + ## @param secondary.hostAliases [array] Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param secondary.command [array] Override default container command on MySQL Secondary container(s) (useful when using custom images) + ## + command: [] + ## @param secondary.args [array] Override default container args on MySQL Secondary container(s) (useful when using custom images) + ## + args: [] + ## @param secondary.configuration [string] Configure MySQL Secondary with a custom my.cnf file + ## ref: https://mysql.com/kb/en/mysql/configuring-mysql-with-mycnf/#example-of-configuration-file + ## + configuration: |- + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + ## @param secondary.existingConfigmap Name of existing ConfigMap with MySQL Secondary configuration. + ## NOTE: When it's set the 'configuration' parameter is ignored + ## + existingConfigmap: "" + ## @param secondary.updateStrategy Update strategy type for the MySQL secondary statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: RollingUpdate + ## @param secondary.rollingUpdatePartition Partition update strategy for MySQL Secondary statefulset + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + ## + rollingUpdatePartition: "" + ## @param secondary.podAnnotations [object] Additional pod annotations for MySQL secondary pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param secondary.podAffinityPreset MySQL secondary pod affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param secondary.podAntiAffinityPreset MySQL secondary pod anti-affinity preset. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Allowed values: soft, hard + ## + podAntiAffinityPreset: soft + ## MySQL Secondary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param secondary.nodeAffinityPreset.type MySQL secondary node affinity preset type. Ignored if `secondary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param secondary.nodeAffinityPreset.key MySQL secondary node label key to match Ignored if `secondary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param secondary.nodeAffinityPreset.values [array] MySQL secondary node label values to match. Ignored if `secondary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param secondary.affinity [object] Affinity for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param secondary.nodeSelector [object] Node labels for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param secondary.tolerations [array] Tolerations for MySQL secondary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## MySQL secondary Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param secondary.podSecurityContext.enabled Enable security context for MySQL secondary pods + ## @param secondary.podSecurityContext.fsGroup Group ID for the mounted volumes' filesystem + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## MySQL secondary container security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param secondary.containerSecurityContext.enabled MySQL secondary container securityContext + ## @param secondary.containerSecurityContext.runAsUser User ID for the MySQL secondary container + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + ## MySQL secondary container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param secondary.resources.limits [object] The resources limits for MySQL secondary containers + ## @param secondary.resources.requests [object] The requested resources for MySQL secondary containers + ## + resources: + ## Example: + ## limits: + ## cpu: 250m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## Configure extra options for liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.livenessProbe.enabled Enable livenessProbe + ## @param secondary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param secondary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param secondary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param secondary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param secondary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.readinessProbe.enabled Enable readinessProbe + ## @param secondary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param secondary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param secondary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param secondary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param secondary.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + ## Configure extra options for startupProbe probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param secondary.startupProbe.enabled Enable startupProbe + ## @param secondary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param secondary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param secondary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param secondary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param secondary.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param secondary.customLivenessProbe [object] Override default liveness probe for MySQL secondary containers + ## + customLivenessProbe: {} + ## @param secondary.customReadinessProbe [object] Override default readiness probe for MySQL secondary containers + ## + customReadinessProbe: {} + ## @param secondary.customStartupProbe [object] Override default startup probe for MySQL secondary containers + ## + customStartupProbe: {} + ## @param secondary.extraFlags MySQL secondary additional command line flags + ## Can be used to specify command line flags, for example: + ## E.g. + ## extraFlags: "--max-connect-errors=1000 --max_connections=155" + ## + extraFlags: "" + ## @param secondary.extraEnvVars [array] An array to add extra environment variables on MySQL secondary containers + ## E.g. + ## extraEnvVars: + ## - name: TZ + ## value: "Europe/Paris" + ## + extraEnvVars: [] + ## @param secondary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for MySQL secondary containers + ## + extraEnvVarsCM: "" + ## @param secondary.extraEnvVarsSecret Name of existing Secret containing extra env vars for MySQL secondary containers + ## + extraEnvVarsSecret: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param secondary.persistence.enabled Enable persistence on MySQL secondary replicas using a `PersistentVolumeClaim` + ## + enabled: true + ## @param secondary.persistence.storageClass MySQL secondary persistent volume storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param secondary.persistence.annotations [object] MySQL secondary persistent volume claim annotations + ## + annotations: {} + ## @param secondary.persistence.accessModes MySQL secondary persistent volume access Modes + ## + accessModes: + - ReadWriteOnce + ## @param secondary.persistence.size MySQL secondary persistent volume size + ## + size: 8Gi + ## @param secondary.persistence.selector [object] Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param secondary.extraVolumes [array] Optionally specify extra list of additional volumes to the MySQL secondary pod(s) + ## + extraVolumes: [] + ## @param secondary.extraVolumeMounts [array] Optionally specify extra list of additional volumeMounts for the MySQL secondary container(s) + ## + extraVolumeMounts: [] + ## @param secondary.initContainers [array] Add additional init containers for the MySQL secondary pod(s) + ## + initContainers: [] + ## @param secondary.sidecars [array] Add additional sidecar containers for the MySQL secondary pod(s) + ## + sidecars: [] + ## MySQL Secondary Service parameters + ## + service: + ## @param secondary.service.type MySQL secondary Kubernetes service type + ## + type: ClusterIP + ## @param secondary.service.port MySQL secondary Kubernetes service port + ## + port: 3306 + ## @param secondary.service.nodePort MySQL secondary Kubernetes service node port + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## @param secondary.service.clusterIP MySQL secondary Kubernetes service clusterIP IP + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param secondary.service.loadBalancerIP MySQL secondary loadBalancerIP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param secondary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param secondary.service.loadBalancerSourceRanges [array] Addresses that are allowed when MySQL secondary service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## E.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param secondary.service.annotations [object] Provide any additional annotations which may be required + ## + annotations: {} + ## MySQL secondary Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param secondary.pdb.enabled Enable/disable a Pod Disruption Budget creation for MySQL secondary pods + ## + enabled: false + ## @param secondary.pdb.minAvailable Minimum number/percentage of MySQL secondary pods that should remain scheduled + ## + minAvailable: 1 + ## @param secondary.pdb.maxUnavailable Maximum number/percentage of MySQL secondary pods that may be made unavailable + ## + maxUnavailable: "" + ## @param secondary.podLabels [object] Additional pod labels for MySQL secondary pods + ## + podLabels: {} + +## @section RBAC parameters + +## MySQL pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable the creation of a ServiceAccount for MySQL pods + ## + create: true + ## @param serviceAccount.name Name of the created ServiceAccount + ## If not set and create is true, a name is generated using the mysql.fullname template + ## + name: "" + ## @param serviceAccount.annotations [object] Annotations for MySQL Service Account + ## + annotations: {} +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## + create: false + +## @section Network Policy + +## MySQL Nework Policy configuration +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal The Policy model to apply. + ## When set to false, only pods with the correct + ## client label will have network access to the port MySQL is listening + ## on. When true, MySQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector [object] A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed to MySQL + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## @section Volume Permissions parameters + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets [array] Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 10-debian-10-r349 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param volumePermissions.resources [object] Init container volume-permissions resources + ## + resources: {} + +## @section Metrics parameters + +## Mysqld Prometheus exporter parameters +## +metrics: + ## @param metrics.enabled Start a side-car prometheus exporter + ## + enabled: false + ## @param metrics.image.registry Exporter image registry + ## @param metrics.image.repository Exporter image repository + ## @param metrics.image.tag Exporter image tag (immutable tags are recommended) + ## @param metrics.image.pullPolicy Exporter image pull policy + ## @param metrics.image.pullSecrets [array] Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/mysqld-exporter + tag: 0.13.0-debian-10-r256 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## MySQL Prometheus exporter service parameters + ## Mysqld Prometheus exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.service.type Kubernetes service type for MySQL Prometheus Exporter + ## @param metrics.service.port MySQL Prometheus Exporter service port + ## @param metrics.service.annotations [object] Prometheus exporter service annotations + ## + service: + type: ClusterIP + port: 9104 + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + ## @param metrics.extraArgs.primary [array] Extra args to be passed to mysqld_exporter on Primary pods + ## @param metrics.extraArgs.secondary [array] Extra args to be passed to mysqld_exporter on Secondary pods + ## ref: https://github.com/prometheus/mysqld_exporter/ + ## E.g. + ## - --collect.auto_increment.columns + ## - --collect.binlog_size + ## - --collect.engine_innodb_status + ## - --collect.engine_tokudb_status + ## - --collect.global_status + ## - --collect.global_variables + ## - --collect.info_schema.clientstats + ## - --collect.info_schema.innodb_metrics + ## - --collect.info_schema.innodb_tablespaces + ## - --collect.info_schema.innodb_cmp + ## - --collect.info_schema.innodb_cmpmem + ## - --collect.info_schema.processlist + ## - --collect.info_schema.processlist.min_time + ## - --collect.info_schema.query_response_time + ## - --collect.info_schema.tables + ## - --collect.info_schema.tables.databases + ## - --collect.info_schema.tablestats + ## - --collect.info_schema.userstats + ## - --collect.perf_schema.eventsstatements + ## - --collect.perf_schema.eventsstatements.digest_text_limit + ## - --collect.perf_schema.eventsstatements.limit + ## - --collect.perf_schema.eventsstatements.timelimit + ## - --collect.perf_schema.eventswaits + ## - --collect.perf_schema.file_events + ## - --collect.perf_schema.file_instances + ## - --collect.perf_schema.indexiowaits + ## - --collect.perf_schema.tableiowaits + ## - --collect.perf_schema.tablelocks + ## - --collect.perf_schema.replication_group_member_stats + ## - --collect.slave_status + ## - --collect.slave_hosts + ## - --collect.heartbeat + ## - --collect.heartbeat.database + ## - --collect.heartbeat.table + ## + extraArgs: + primary: [] + secondary: [] + ## Mysqld Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param metrics.resources.limits [object] The resources limits for MySQL prometheus exporter containers + ## @param metrics.resources.requests [object] The requested resources for MySQL prometheus exporter containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 256Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 256Mi + requests: {} + ## Mysqld Prometheus exporter liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 120 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Mysqld Prometheus exporter readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings [array] Specify Metric Relabellings to add to the scrape endpoint + ## + relabellings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels [object] Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + additionalLabels: {} diff --git a/pkg/scanners/helm/test/option_test.go b/pkg/scanners/helm/test/option_test.go new file mode 100644 index 000000000000..1afa75658490 --- /dev/null +++ b/pkg/scanners/helm/test/option_test.go @@ -0,0 +1,167 @@ +package test + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/helm/parser" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +func Test_helm_parser_with_options_with_values_file(t *testing.T) { + + tests := []struct { + testName string + chartName string + valuesFile string + }{ + { + testName: "Parsing directory 'testchart'", + chartName: "testchart", + valuesFile: "values/values.yaml", + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + chartName := test.chartName + + t.Logf("Running test: %s", test.testName) + + var opts []options.ParserOption + + if test.valuesFile != "" { + opts = append(opts, parser.OptionWithValuesFile(test.valuesFile)) + } + + helmParser := parser.New(chartName, opts...) + err := helmParser.ParseFS(context.TODO(), os.DirFS(filepath.Join("testdata", chartName)), ".") + require.NoError(t, err) + manifests, err := helmParser.RenderedChartFiles() + require.NoError(t, err) + + assert.Len(t, manifests, 3) + + for _, manifest := range manifests { + expectedPath := filepath.Join("testdata", "expected", "options", chartName, manifest.TemplateFilePath) + + expectedContent, err := os.ReadFile(expectedPath) + require.NoError(t, err) + + cleanExpected := strings.ReplaceAll(string(expectedContent), "\r\n", "\n") + cleanActual := strings.ReplaceAll(manifest.ManifestContent, "\r\n", "\n") + + assert.Equal(t, cleanExpected, cleanActual) + } + }) + } +} + +func Test_helm_parser_with_options_with_set_value(t *testing.T) { + + tests := []struct { + testName string + chartName string + valuesFile string + values string + }{ + { + testName: "Parsing directory 'testchart'", + chartName: "testchart", + values: "securityContext.runAsUser=0", + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + chartName := test.chartName + + t.Logf("Running test: %s", test.testName) + + var opts []options.ParserOption + + if test.valuesFile != "" { + opts = append(opts, parser.OptionWithValuesFile(test.valuesFile)) + } + + if test.values != "" { + opts = append(opts, parser.OptionWithValues(test.values)) + } + + helmParser := parser.New(chartName, opts...) + err := helmParser.ParseFS(context.TODO(), os.DirFS(filepath.Join("testdata", chartName)), ".") + require.NoError(t, err) + manifests, err := helmParser.RenderedChartFiles() + require.NoError(t, err) + + assert.Len(t, manifests, 3) + + for _, manifest := range manifests { + expectedPath := filepath.Join("testdata", "expected", "options", chartName, manifest.TemplateFilePath) + + expectedContent, err := os.ReadFile(expectedPath) + require.NoError(t, err) + + cleanExpected := strings.ReplaceAll(string(expectedContent), "\r\n", "\n") + cleanActual := strings.ReplaceAll(manifest.ManifestContent, "\r\n", "\n") + + assert.Equal(t, cleanExpected, cleanActual) + } + }) + } +} + +func Test_helm_parser_with_options_with_api_versions(t *testing.T) { + + tests := []struct { + testName string + chartName string + apiVersions []string + }{ + { + testName: "Parsing directory 'with-api-version'", + chartName: "with-api-version", + apiVersions: []string{"policy/v1/PodDisruptionBudget"}, + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + chartName := test.chartName + + t.Logf("Running test: %s", test.testName) + + var opts []options.ParserOption + + if len(test.apiVersions) > 0 { + opts = append(opts, parser.OptionWithAPIVersions(test.apiVersions...)) + } + + helmParser := parser.New(chartName, opts...) + err := helmParser.ParseFS(context.TODO(), os.DirFS(filepath.Join("testdata", chartName)), ".") + require.NoError(t, err) + manifests, err := helmParser.RenderedChartFiles() + require.NoError(t, err) + + assert.Len(t, manifests, 1) + + for _, manifest := range manifests { + expectedPath := filepath.Join("testdata", "expected", "options", chartName, manifest.TemplateFilePath) + + expectedContent, err := os.ReadFile(expectedPath) + require.NoError(t, err) + + cleanExpected := strings.TrimSpace(strings.ReplaceAll(string(expectedContent), "\r\n", "\n")) + cleanActual := strings.TrimSpace(strings.ReplaceAll(manifest.ManifestContent, "\r\n", "\n")) + + assert.Equal(t, cleanExpected, cleanActual) + } + }) + } +} diff --git a/pkg/scanners/helm/test/parser_test.go b/pkg/scanners/helm/test/parser_test.go new file mode 100644 index 000000000000..24130d00a104 --- /dev/null +++ b/pkg/scanners/helm/test/parser_test.go @@ -0,0 +1,199 @@ +package test + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/aquasecurity/trivy/pkg/detection" + "github.com/aquasecurity/trivy/pkg/scanners/helm/parser" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_helm_parser(t *testing.T) { + + tests := []struct { + testName string + chartName string + }{ + { + testName: "Parsing directory 'testchart'", + chartName: "testchart", + }, + { + testName: "Parsing directory with tarred dependency", + chartName: "with-tarred-dep", + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + chartName := test.chartName + + t.Logf("Running test: %s", test.testName) + + helmParser := parser.New(chartName) + err := helmParser.ParseFS(context.TODO(), os.DirFS(filepath.Join("testdata", chartName)), ".") + require.NoError(t, err) + manifests, err := helmParser.RenderedChartFiles() + require.NoError(t, err) + + assert.Len(t, manifests, 3) + + for _, manifest := range manifests { + expectedPath := filepath.Join("testdata", "expected", chartName, manifest.TemplateFilePath) + + expectedContent, err := os.ReadFile(expectedPath) + require.NoError(t, err) + + got := strings.ReplaceAll(manifest.ManifestContent, "\r\n", "\n") + assert.Equal(t, strings.ReplaceAll(string(expectedContent), "\r\n", "\n"), got) + } + }) + } +} + +func Test_helm_parser_where_name_non_string(t *testing.T) { + + tests := []struct { + testName string + chartName string + }{ + { + testName: "Scanning chart with integer for name", + chartName: "numberName", + }, + } + + for _, test := range tests { + chartName := test.chartName + + t.Logf("Running test: %s", test.testName) + + helmParser := parser.New(chartName) + err := helmParser.ParseFS(context.TODO(), os.DirFS(filepath.Join("testdata", chartName)), ".") + require.NoError(t, err) + } +} + +func Test_tar_is_chart(t *testing.T) { + + tests := []struct { + testName string + archiveFile string + isHelmChart bool + }{ + { + testName: "standard tarball", + archiveFile: "mysql-8.8.26.tar", + isHelmChart: true, + }, + { + testName: "gzip tarball with tar.gz extension", + archiveFile: "mysql-8.8.26.tar.gz", + isHelmChart: true, + }, + { + testName: "broken gzip tarball with tar.gz extension", + archiveFile: "aws-cluster-autoscaler-bad.tar.gz", + isHelmChart: true, + }, + { + testName: "gzip tarball with tgz extension", + archiveFile: "mysql-8.8.26.tgz", + isHelmChart: true, + }, + { + testName: "gzip tarball that has nothing of interest in it", + archiveFile: "nope.tgz", + isHelmChart: false, + }, + } + + for _, test := range tests { + + t.Logf("Running test: %s", test.testName) + testPath := filepath.Join("testdata", test.archiveFile) + file, err := os.Open(testPath) + defer func() { _ = file.Close() }() + require.NoError(t, err) + + assert.Equal(t, test.isHelmChart, detection.IsHelmChartArchive(test.archiveFile, file)) + + _ = file.Close() + } +} + +func Test_helm_tarball_parser(t *testing.T) { + + tests := []struct { + testName string + chartName string + archiveFile string + }{ + { + testName: "standard tarball", + chartName: "mysql", + archiveFile: "mysql-8.8.26.tar", + }, + { + testName: "gzip tarball with tar.gz extension", + chartName: "mysql", + archiveFile: "mysql-8.8.26.tar.gz", + }, + { + testName: "gzip tarball with tgz extension", + chartName: "mysql", + archiveFile: "mysql-8.8.26.tgz", + }, + } + + for _, test := range tests { + + t.Logf("Running test: %s", test.testName) + + testPath := filepath.Join("testdata", test.archiveFile) + + testTemp := t.TempDir() + testFileName := filepath.Join(testTemp, test.archiveFile) + require.NoError(t, copyArchive(testPath, testFileName)) + + testFs := os.DirFS(testTemp) + + helmParser := parser.New(test.archiveFile) + err := helmParser.ParseFS(context.TODO(), testFs, ".") + require.NoError(t, err) + + manifests, err := helmParser.RenderedChartFiles() + require.NoError(t, err) + + assert.Len(t, manifests, 6) + + oneOf := []string{ + "configmap.yaml", + "statefulset.yaml", + "svc-headless.yaml", + "svc.yaml", + "secrets.yaml", + "serviceaccount.yaml", + } + + for _, manifest := range manifests { + filename := filepath.Base(manifest.TemplateFilePath) + assert.Contains(t, oneOf, filename) + + if strings.HasSuffix(manifest.TemplateFilePath, "secrets.yaml") { + continue + } + expectedPath := filepath.Join("testdata", "expected", test.chartName, manifest.TemplateFilePath) + + expectedContent, err := os.ReadFile(expectedPath) + require.NoError(t, err) + + assert.Equal(t, strings.ReplaceAll(string(expectedContent), "\r\n", "\n"), strings.ReplaceAll(manifest.ManifestContent, "\r\n", "\n")) + } + } +} diff --git a/pkg/scanners/helm/test/scanner_test.go b/pkg/scanners/helm/test/scanner_test.go new file mode 100644 index 000000000000..eee8775adf5a --- /dev/null +++ b/pkg/scanners/helm/test/scanner_test.go @@ -0,0 +1,306 @@ +package test + +import ( + "context" + "io" + "os" + "path/filepath" + "sort" + "strings" + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/helm" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_helm_scanner_with_archive(t *testing.T) { + + tests := []struct { + testName string + chartName string + path string + archiveName string + }{ + { + testName: "Parsing tarball 'mysql-8.8.26.tar'", + chartName: "mysql", + path: filepath.Join("testdata", "mysql-8.8.26.tar"), + archiveName: "mysql-8.8.26.tar", + }, + } + + for _, test := range tests { + t.Logf("Running test: %s", test.testName) + + helmScanner := helm.New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + + testTemp := t.TempDir() + testFileName := filepath.Join(testTemp, test.archiveName) + require.NoError(t, copyArchive(test.path, testFileName)) + + testFs := os.DirFS(testTemp) + results, err := helmScanner.ScanFS(context.TODO(), testFs, ".") + require.NoError(t, err) + require.NotNil(t, results) + + failed := results.GetFailed() + assert.Equal(t, 13, len(failed)) + + visited := make(map[string]bool) + var errorCodes []string + for _, result := range failed { + id := result.Flatten().RuleID + if _, exists := visited[id]; !exists { + visited[id] = true + errorCodes = append(errorCodes, id) + } + } + assert.Len(t, errorCodes, 13) + + sort.Strings(errorCodes) + + assert.Equal(t, []string{ + "AVD-KSV-0001", "AVD-KSV-0003", + "AVD-KSV-0011", "AVD-KSV-0012", "AVD-KSV-0014", + "AVD-KSV-0015", "AVD-KSV-0016", "AVD-KSV-0018", + "AVD-KSV-0020", "AVD-KSV-0021", "AVD-KSV-0030", + "AVD-KSV-0104", "AVD-KSV-0106", + }, errorCodes) + } +} + +func Test_helm_scanner_with_missing_name_can_recover(t *testing.T) { + + tests := []struct { + testName string + chartName string + path string + archiveName string + }{ + { + testName: "Parsing tarball 'aws-cluster-autoscaler-bad.tar.gz'", + chartName: "aws-cluster-autoscaler", + path: filepath.Join("testdata", "aws-cluster-autoscaler-bad.tar.gz"), + archiveName: "aws-cluster-autoscaler-bad.tar.gz", + }, + } + + for _, test := range tests { + t.Logf("Running test: %s", test.testName) + + helmScanner := helm.New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + + testTemp := t.TempDir() + testFileName := filepath.Join(testTemp, test.archiveName) + require.NoError(t, copyArchive(test.path, testFileName)) + + testFs := os.DirFS(testTemp) + _, err := helmScanner.ScanFS(context.TODO(), testFs, ".") + require.NoError(t, err) + } +} + +func Test_helm_scanner_with_dir(t *testing.T) { + + tests := []struct { + testName string + chartName string + }{ + { + testName: "Parsing directory testchart'", + chartName: "testchart", + }, + } + + for _, test := range tests { + + t.Logf("Running test: %s", test.testName) + + helmScanner := helm.New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + + testFs := os.DirFS(filepath.Join("testdata", test.chartName)) + results, err := helmScanner.ScanFS(context.TODO(), testFs, ".") + require.NoError(t, err) + require.NotNil(t, results) + + failed := results.GetFailed() + assert.Equal(t, 14, len(failed)) + + visited := make(map[string]bool) + var errorCodes []string + for _, result := range failed { + id := result.Flatten().RuleID + if _, exists := visited[id]; !exists { + visited[id] = true + errorCodes = append(errorCodes, id) + } + } + + sort.Strings(errorCodes) + + assert.Equal(t, []string{ + "AVD-KSV-0001", "AVD-KSV-0003", + "AVD-KSV-0011", "AVD-KSV-0012", "AVD-KSV-0014", + "AVD-KSV-0015", "AVD-KSV-0016", "AVD-KSV-0018", + "AVD-KSV-0020", "AVD-KSV-0021", "AVD-KSV-0030", + "AVD-KSV-0104", "AVD-KSV-0106", + "AVD-KSV-0117", + }, errorCodes) + } +} + +func Test_helm_scanner_with_custom_policies(t *testing.T) { + regoRule := ` +package user.kubernetes.ID001 + + +__rego_metadata__ := { + "id": "ID001", + "avd_id": "AVD-USR-ID001", + "title": "Services not allowed", + "severity": "LOW", + "description": "Services are not allowed because of some reasons.", +} + +__rego_input__ := { + "selector": [ + {"type": "kubernetes"}, + ], +} + +deny[res] { + input.kind == "Service" + msg := sprintf("Found service '%s' but services are not allowed", [input.metadata.name]) + res := result.new(msg, input) +} +` + tests := []struct { + testName string + chartName string + path string + archiveName string + }{ + { + testName: "Parsing tarball 'mysql-8.8.26.tar'", + chartName: "mysql", + path: filepath.Join("testdata", "mysql-8.8.26.tar"), + archiveName: "mysql-8.8.26.tar", + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + t.Logf("Running test: %s", test.testName) + + helmScanner := helm.New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true), + options.ScannerWithPolicyDirs("trules"), + options.ScannerWithPolicyNamespaces("user")) + + testTemp := t.TempDir() + testFileName := filepath.Join(testTemp, test.archiveName) + require.NoError(t, copyArchive(test.path, testFileName)) + + policyDirName := filepath.Join(testTemp, "trules") + require.NoError(t, os.Mkdir(policyDirName, 0o700)) + require.NoError(t, os.WriteFile(filepath.Join(policyDirName, "rule.rego"), []byte(regoRule), 0o600)) + + testFs := os.DirFS(testTemp) + + results, err := helmScanner.ScanFS(context.TODO(), testFs, ".") + require.NoError(t, err) + require.NotNil(t, results) + + failed := results.GetFailed() + assert.Equal(t, 15, len(failed)) + + visited := make(map[string]bool) + var errorCodes []string + for _, result := range failed { + id := result.Flatten().RuleID + if _, exists := visited[id]; !exists { + visited[id] = true + errorCodes = append(errorCodes, id) + } + } + assert.Len(t, errorCodes, 14) + + sort.Strings(errorCodes) + + assert.Equal(t, []string{ + "AVD-KSV-0001", "AVD-KSV-0003", + "AVD-KSV-0011", "AVD-KSV-0012", "AVD-KSV-0014", + "AVD-KSV-0015", "AVD-KSV-0016", "AVD-KSV-0018", + "AVD-KSV-0020", "AVD-KSV-0021", "AVD-KSV-0030", + "AVD-KSV-0104", "AVD-KSV-0106", "AVD-USR-ID001", + }, errorCodes) + }) + } +} + +func copyArchive(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer func() { _ = in.Close() }() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer func() { _ = out.Close() }() + + if _, err := io.Copy(out, in); err != nil { + return err + } + return nil +} + +func Test_helm_chart_with_templated_name(t *testing.T) { + helmScanner := helm.New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + testFs := os.DirFS(filepath.Join("testdata", "templated-name")) + _, err := helmScanner.ScanFS(context.TODO(), testFs, ".") + require.NoError(t, err) +} + +func TestCodeShouldNotBeMissing(t *testing.T) { + policy := `# METADATA +# title: "Test rego" +# description: "Test rego" +# scope: package +# schemas: +# - input: schema["kubernetes"] +# custom: +# id: ID001 +# avd_id: AVD-USR-ID001 +# severity: LOW +# input: +# selector: +# - type: kubernetes +package user.kubernetes.ID001 + +deny[res] { + input.spec.replicas == 3 + res := result.new("Replicas are not allowed", input) +} +` + helmScanner := helm.New( + options.ScannerWithEmbeddedPolicies(false), + options.ScannerWithEmbeddedLibraries(false), + options.ScannerWithPolicyNamespaces("user"), + options.ScannerWithPolicyReader(strings.NewReader(policy)), + ) + + results, err := helmScanner.ScanFS(context.TODO(), os.DirFS("testdata/simmilar-templates"), ".") + require.NoError(t, err) + + failedResults := results.GetFailed() + require.Len(t, failedResults, 1) + + failed := failedResults[0] + code, err := failed.GetCode() + require.NoError(t, err) + assert.NotNil(t, code) +} diff --git a/pkg/scanners/helm/test/testdata/aws-cluster-autoscaler-bad.tar.gz b/pkg/scanners/helm/test/testdata/aws-cluster-autoscaler-bad.tar.gz new file mode 100644 index 000000000000..a66f228c9851 Binary files /dev/null and b/pkg/scanners/helm/test/testdata/aws-cluster-autoscaler-bad.tar.gz differ diff --git a/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/configmap.yaml b/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/configmap.yaml new file mode 100644 index 000000000000..9ee00d2c2c0c --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/configmap.yaml @@ -0,0 +1,42 @@ +# Source: mysql/templates/primary/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: mysql + namespace: + labels: + app.kubernetes.io/name: mysql + helm.sh/chart: mysql-8.8.26 + app.kubernetes.io/instance: mysql + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: primary +data: + my.cnf: |- + + [mysqld] + default_authentication_plugin=mysql_native_password + skip-name-resolve + explicit_defaults_for_timestamp + basedir=/opt/bitnami/mysql + plugin_dir=/opt/bitnami/mysql/lib/plugin + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + datadir=/bitnami/mysql/data + tmpdir=/opt/bitnami/mysql/tmp + max_allowed_packet=16M + bind-address=0.0.0.0 + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid + log-error=/opt/bitnami/mysql/logs/mysqld.log + character-set-server=UTF8 + collation-server=utf8_general_ci + + [client] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + default-character-set=UTF8 + plugin_dir=/opt/bitnami/mysql/lib/plugin + + [manager] + port=3306 + socket=/opt/bitnami/mysql/tmp/mysql.sock + pid-file=/opt/bitnami/mysql/tmp/mysqld.pid \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/statefulset.yaml b/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/statefulset.yaml new file mode 100644 index 000000000000..a7f5f59d831b --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/statefulset.yaml @@ -0,0 +1,147 @@ +# Source: mysql/templates/primary/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mysql + namespace: + labels: + app.kubernetes.io/name: mysql + helm.sh/chart: mysql-8.8.26 + app.kubernetes.io/instance: mysql + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: primary +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mysql + app.kubernetes.io/instance: mysql + app.kubernetes.io/component: primary + serviceName: mysql + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/configuration: 6adfba795651cd736dfa943a87e0853ce417b9fb842b57535e3b1b4e762a33fd + labels: + app.kubernetes.io/name: mysql + helm.sh/chart: mysql-8.8.26 + app.kubernetes.io/instance: mysql + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: primary + spec: + + serviceAccountName: mysql + affinity: + podAffinity: + + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: mysql + app.kubernetes.io/instance: mysql + app.kubernetes.io/component: primary + namespaces: + - "" + topologyKey: kubernetes.io/hostname + weight: 1 + nodeAffinity: + + securityContext: + fsGroup: 1001 + containers: + - name: mysql + image: docker.io/bitnami/mysql:8.0.28-debian-10-r23 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: mysql + key: mysql-root-password + - name: MYSQL_DATABASE + value: "my_database" + ports: + - name: mysql + containerPort: 3306 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + startupProbe: + failureThreshold: 10 + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + exec: + command: + - /bin/bash + - -ec + - | + password_aux="${MYSQL_ROOT_PASSWORD:-}" + if [[ -f "${MYSQL_ROOT_PASSWORD_FILE:-}" ]]; then + password_aux=$(cat "$MYSQL_ROOT_PASSWORD_FILE") + fi + mysqladmin status -uroot -p"${password_aux}" + resources: + limits: {} + requests: {} + volumeMounts: + - name: data + mountPath: /bitnami/mysql + - name: config + mountPath: /opt/bitnami/mysql/conf/my.cnf + subPath: my.cnf + volumes: + - name: config + configMap: + name: mysql + volumeClaimTemplates: + - metadata: + name: data + labels: + app.kubernetes.io/name: mysql + app.kubernetes.io/instance: mysql + app.kubernetes.io/component: primary + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/svc-headless.yaml b/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/svc-headless.yaml new file mode 100644 index 000000000000..9fe0f11c87ae --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/svc-headless.yaml @@ -0,0 +1,25 @@ +# Source: mysql/templates/primary/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: mysql-headless + namespace: + labels: + app.kubernetes.io/name: mysql + helm.sh/chart: mysql-8.8.26 + app.kubernetes.io/instance: mysql + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: primary + annotations: +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: mysql + port: 3306 + targetPort: mysql + selector: + app.kubernetes.io/name: mysql + app.kubernetes.io/instance: mysql + app.kubernetes.io/component: primary \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/svc.yaml b/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/svc.yaml new file mode 100644 index 000000000000..2bbdab8fe468 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/mysql/templates/primary/svc.yaml @@ -0,0 +1,25 @@ +# Source: mysql/templates/primary/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: mysql + namespace: + labels: + app.kubernetes.io/name: mysql + helm.sh/chart: mysql-8.8.26 + app.kubernetes.io/instance: mysql + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: primary + annotations: +spec: + type: ClusterIP + ports: + - name: mysql + port: 3306 + protocol: TCP + targetPort: mysql + nodePort: null + selector: + app.kubernetes.io/name: mysql + app.kubernetes.io/instance: mysql + app.kubernetes.io/component: primary \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/mysql/templates/secrets.yaml b/pkg/scanners/helm/test/testdata/expected/mysql/templates/secrets.yaml new file mode 100644 index 000000000000..ffa6909e2f04 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/mysql/templates/secrets.yaml @@ -0,0 +1,15 @@ +# Source: mysql/templates/secrets.yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysql + namespace: + labels: + app.kubernetes.io/name: mysql + helm.sh/chart: mysql-8.8.26 + app.kubernetes.io/instance: mysql + app.kubernetes.io/managed-by: Helm +type: Opaque +data: + mysql-root-password: "aGZYYW1vN3V5NA==" + mysql-password: "eHR6YU9MR1VhbA==" \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/mysql/templates/serviceaccount.yaml b/pkg/scanners/helm/test/testdata/expected/mysql/templates/serviceaccount.yaml new file mode 100644 index 000000000000..760b8bf731a5 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/mysql/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +# Source: mysql/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mysql + namespace: + labels: + app.kubernetes.io/name: mysql + helm.sh/chart: mysql-8.8.26 + app.kubernetes.io/instance: mysql + app.kubernetes.io/managed-by: Helm + annotations: +secrets: + - name: mysql \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/deployment.yaml b/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/deployment.yaml new file mode 100644 index 000000000000..c41133c72716 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/deployment.yaml @@ -0,0 +1,46 @@ +# Source: testchart/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: testchart + labels: + helm.sh/chart: testchart-0.1.0 + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + template: + metadata: + labels: + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + spec: + serviceAccountName: testchart + securityContext: + {} + containers: + - name: testchart + securityContext: + runAsUser: 0 + image: "nginx:1.16.0" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {} \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/service.yaml b/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/service.yaml new file mode 100644 index 000000000000..6c6699f3d5dd --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/service.yaml @@ -0,0 +1,21 @@ +# Source: testchart/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: testchart + labels: + helm.sh/chart: testchart-0.1.0 + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/serviceaccount.yaml b/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/serviceaccount.yaml new file mode 100644 index 000000000000..6fe44a89bb3b --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/options/testchart/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +# Source: testchart/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: testchart + labels: + helm.sh/chart: testchart-0.1.0 + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/options/with-api-version/templates/pdb.yaml b/pkg/scanners/helm/test/testdata/expected/options/with-api-version/templates/pdb.yaml new file mode 100644 index 000000000000..7c7ef5fd74d7 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/options/with-api-version/templates/pdb.yaml @@ -0,0 +1,17 @@ +# Source: with-api-version/templates/pdb.yaml +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: with-api-version + labels: + helm.sh/chart: with-api-version-0.1.0 + app.kubernetes.io/name: with-api-version + app.kubernetes.io/instance: with-api-version + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: with-api-version + app.kubernetes.io/instance: with-api-version + maxUnavailable: 0 diff --git a/pkg/scanners/helm/test/testdata/expected/testchart/templates/deployment.yaml b/pkg/scanners/helm/test/testdata/expected/testchart/templates/deployment.yaml new file mode 100644 index 000000000000..8ace433f0c03 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/testchart/templates/deployment.yaml @@ -0,0 +1,46 @@ +# Source: testchart/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: testchart + labels: + helm.sh/chart: testchart-0.1.0 + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + template: + metadata: + labels: + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + spec: + serviceAccountName: testchart + securityContext: + {} + containers: + - name: testchart + securityContext: + {} + image: "nginx:1.16.0" + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {} \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/testchart/templates/service.yaml b/pkg/scanners/helm/test/testdata/expected/testchart/templates/service.yaml new file mode 100644 index 000000000000..6c6699f3d5dd --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/testchart/templates/service.yaml @@ -0,0 +1,21 @@ +# Source: testchart/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: testchart + labels: + helm.sh/chart: testchart-0.1.0 + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + ports: + - port: 80 + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/testchart/templates/serviceaccount.yaml b/pkg/scanners/helm/test/testdata/expected/testchart/templates/serviceaccount.yaml new file mode 100644 index 000000000000..6fe44a89bb3b --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/testchart/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +# Source: testchart/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: testchart + labels: + helm.sh/chart: testchart-0.1.0 + app.kubernetes.io/name: testchart + app.kubernetes.io/instance: testchart + app.kubernetes.io/version: "1.16.0" + app.kubernetes.io/managed-by: Helm \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/deployment.yaml b/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/deployment.yaml new file mode 100644 index 000000000000..ed57d12a6e2b --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/deployment.yaml @@ -0,0 +1,78 @@ +# Source: with-tarred-dep/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: with-tarred-dep + labels: + app.kubernetes.io/name: with-tarred-dep + helm.sh/chart: with-tarred-dep-0.1.1 + app.kubernetes.io/instance: with-tarred-dep + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: with-tarred-dep + app.kubernetes.io/instance: with-tarred-dep + template: + metadata: + labels: + app.kubernetes.io/name: with-tarred-dep + helm.sh/chart: with-tarred-dep-0.1.1 + app.kubernetes.io/instance: with-tarred-dep + app.kubernetes.io/managed-by: Helm + spec: + containers: + - name: metadata-service + env: + - name: METADATASERVICE_UPSTREAM_API_URL + value: '' + - name: METADATASERVICE_OIDC_AUDIENCE + value: "" + - name: METADATASERVICE_OIDC_ISSUER + value: "" + - name: METADATASERVICE_OIDC_JWKSURI + value: "" + - name: METADATASERVICE_OIDC_CLAIMS_ROLES + value: "" + - name: METADATASERVICE_OIDC_CLAIMS_USERNAME + value: "" + - name: METADATASERVICE_DB_URI + valueFrom: + secretKeyRef: + name: with-tarred-dep-dbconn + key: uri + image: "ghcr.io/metal-toolbox/hollow-metadataservice:v0.0.1" + imagePullPolicy: Always + volumeMounts: + - name: dbcerts + mountPath: "/dbcerts" + readOnly: true + ports: + - name: http + containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz/liveness + port: http + initialDelaySeconds: 5 + timeoutSeconds: 2 + readinessProbe: + httpGet: + path: /healthz/readiness + port: http + initialDelaySeconds: 5 + timeoutSeconds: 2 + resources: + limits: + cpu: 4 + memory: 4Gi + requests: + cpu: 4 + memory: 4Gi + volumes: + - name: dbcerts + secret: + secretName: with-tarred-dep-crdb-ca + defaultMode: 0400 \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/ingress.yaml b/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/ingress.yaml new file mode 100644 index 000000000000..b48564477997 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/ingress.yaml @@ -0,0 +1,26 @@ +# Source: with-tarred-dep/templates/ingress.yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: with-tarred-dep + labels: + app.kubernetes.io/name: with-tarred-dep + helm.sh/chart: with-tarred-dep-0.1.1 + app.kubernetes.io/instance: with-tarred-dep + app.kubernetes.io/managed-by: Helm +spec: + rules: + - host: metadata-service.mydomain + http: + paths: + - path: /($|metadata|userdata|2009-04-04) + pathType: Prefix + backend: + service: + name: with-tarred-dep + port: + name: http +# tls: [] +# hosts: +# - hollow-metadataservice.mydomain +# secretName: hollow-metadataservice-example-tls \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/service.yaml b/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/service.yaml new file mode 100644 index 000000000000..7d86aeb5b02b --- /dev/null +++ b/pkg/scanners/helm/test/testdata/expected/with-tarred-dep/templates/service.yaml @@ -0,0 +1,24 @@ +# Source: with-tarred-dep/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: with-tarred-dep + labels: + app.kubernetes.io/name: with-tarred-dep + helm.sh/chart: with-tarred-dep-0.1.1 + app.kubernetes.io/instance: with-tarred-dep + app.kubernetes.io/managed-by: Helm +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8000 + - name: https + port: 443 + protocol: TCP + targetPort: 8000 + selector: + app.kubernetes.io/name: with-tarred-dep + app.kubernetes.io/instance: with-tarred-dep + type: ClusterIP \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/mysql-8.8.26.tar b/pkg/scanners/helm/test/testdata/mysql-8.8.26.tar new file mode 100644 index 000000000000..53cb6802de42 Binary files /dev/null and b/pkg/scanners/helm/test/testdata/mysql-8.8.26.tar differ diff --git a/pkg/scanners/helm/test/testdata/mysql-8.8.26.tar.gz b/pkg/scanners/helm/test/testdata/mysql-8.8.26.tar.gz new file mode 100644 index 000000000000..ff8bd1ab402e Binary files /dev/null and b/pkg/scanners/helm/test/testdata/mysql-8.8.26.tar.gz differ diff --git a/pkg/scanners/helm/test/testdata/mysql-8.8.26.tgz b/pkg/scanners/helm/test/testdata/mysql-8.8.26.tgz new file mode 100644 index 000000000000..ff8bd1ab402e Binary files /dev/null and b/pkg/scanners/helm/test/testdata/mysql-8.8.26.tgz differ diff --git a/pkg/scanners/helm/test/testdata/nope.tgz b/pkg/scanners/helm/test/testdata/nope.tgz new file mode 100644 index 000000000000..a47332d93877 Binary files /dev/null and b/pkg/scanners/helm/test/testdata/nope.tgz differ diff --git a/pkg/scanners/helm/test/testdata/numberName/Chart.yaml b/pkg/scanners/helm/test/testdata/numberName/Chart.yaml new file mode 100644 index 000000000000..e840fbabf456 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/numberName/Chart.yaml @@ -0,0 +1,3 @@ +apiVersion: v2 +name: 1001 +version: 1.0.0 \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/simmilar-templates/Chart.yaml b/pkg/scanners/helm/test/testdata/simmilar-templates/Chart.yaml new file mode 100644 index 000000000000..e5855a786639 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/simmilar-templates/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: test-license-manager +version: 1.10.0 +type: application +appVersion: 1.10.0 +deprecated: false diff --git a/pkg/scanners/helm/test/testdata/simmilar-templates/templates/deployment.yaml b/pkg/scanners/helm/test/testdata/simmilar-templates/templates/deployment.yaml new file mode 100644 index 000000000000..eb2b2a343d51 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/simmilar-templates/templates/deployment.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +spec: + replicas: 3 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: myapp + image: test:latest + resources: + limits: + memory: "128Mi" + cpu: "500m" + ports: + - containerPort: 80 diff --git a/pkg/scanners/helm/test/testdata/simmilar-templates/templates/manifest.yaml b/pkg/scanners/helm/test/testdata/simmilar-templates/templates/manifest.yaml new file mode 100644 index 000000000000..49d9df010f82 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/simmilar-templates/templates/manifest.yaml @@ -0,0 +1,2 @@ +apiVersion: apps/v1 +kind: Deployment diff --git a/pkg/scanners/helm/test/testdata/templated-name/Chart.yaml b/pkg/scanners/helm/test/testdata/templated-name/Chart.yaml new file mode 100644 index 000000000000..e675643d99c9 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/templated-name/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: {{COMPONENT_NAME}} +version: {{COMPONENT_VERSION}} +description: A Helm chart for Kubernetes +keywords: + - kublr + - audit diff --git a/pkg/scanners/helm/test/testdata/testchart/.helmignore b/pkg/scanners/helm/test/testdata/testchart/.helmignore new file mode 100644 index 000000000000..0e8a0eb36f4c --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/pkg/scanners/helm/test/testdata/testchart/Chart.yaml b/pkg/scanners/helm/test/testdata/testchart/Chart.yaml new file mode 100644 index 000000000000..0ffb7d074a72 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: testchart +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/pkg/scanners/helm/test/testdata/testchart/templates/NOTES.txt b/pkg/scanners/helm/test/testdata/testchart/templates/NOTES.txt new file mode 100644 index 000000000000..45e51670a862 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "testchart.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "testchart.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "testchart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "testchart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/testchart/templates/_helpers.tpl b/pkg/scanners/helm/test/testdata/testchart/templates/_helpers.tpl new file mode 100644 index 000000000000..4b0db05bf5f5 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "testchart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "testchart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "testchart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "testchart.labels" -}} +helm.sh/chart: {{ include "testchart.chart" . }} +{{ include "testchart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "testchart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "testchart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "testchart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "testchart.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/testchart/templates/deployment.yaml b/pkg/scanners/helm/test/testdata/testchart/templates/deployment.yaml new file mode 100644 index 000000000000..cde22bc4fcc5 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/templates/deployment.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "testchart.fullname" . }} + labels: + {{- include "testchart.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "testchart.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "testchart.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "testchart.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/pkg/scanners/helm/test/testdata/testchart/templates/hpa.yaml b/pkg/scanners/helm/test/testdata/testchart/templates/hpa.yaml new file mode 100644 index 000000000000..51734471d41d --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "testchart.fullname" . }} + labels: + {{- include "testchart.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "testchart.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/testchart/templates/ingress.yaml b/pkg/scanners/helm/test/testdata/testchart/templates/ingress.yaml new file mode 100644 index 000000000000..9732d2a24a14 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "testchart.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "testchart.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/testchart/templates/service.yaml b/pkg/scanners/helm/test/testdata/testchart/templates/service.yaml new file mode 100644 index 000000000000..86baf148215d --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "testchart.fullname" . }} + labels: + {{- include "testchart.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "testchart.selectorLabels" . | nindent 4 }} diff --git a/pkg/scanners/helm/test/testdata/testchart/templates/serviceaccount.yaml b/pkg/scanners/helm/test/testdata/testchart/templates/serviceaccount.yaml new file mode 100644 index 000000000000..f728deb2a6bb --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "testchart.serviceAccountName" . }} + labels: + {{- include "testchart.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/testchart/templates/tests/test-connection.yaml b/pkg/scanners/helm/test/testdata/testchart/templates/tests/test-connection.yaml new file mode 100644 index 000000000000..a391ef1c462f --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "testchart.fullname" . }}-test-connection" + labels: + {{- include "testchart.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "testchart.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/pkg/scanners/helm/test/testdata/testchart/values.yaml b/pkg/scanners/helm/test/testdata/testchart/values.yaml new file mode 100644 index 000000000000..4acdf3c931bd --- /dev/null +++ b/pkg/scanners/helm/test/testdata/testchart/values.yaml @@ -0,0 +1,86 @@ +# Default values for testchart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: + {} + # fsGroup: 2000 + +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: + {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/pkg/scanners/helm/test/testdata/with-api-version/.helmignore b/pkg/scanners/helm/test/testdata/with-api-version/.helmignore new file mode 100644 index 000000000000..0e8a0eb36f4c --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-api-version/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/pkg/scanners/helm/test/testdata/with-api-version/Chart.yaml b/pkg/scanners/helm/test/testdata/with-api-version/Chart.yaml new file mode 100644 index 000000000000..22dab35d32f4 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-api-version/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: with-api-version +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/pkg/scanners/helm/test/testdata/with-api-version/templates/_helpers.tpl b/pkg/scanners/helm/test/testdata/with-api-version/templates/_helpers.tpl new file mode 100644 index 000000000000..cab726131dc5 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-api-version/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "with-api-version.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "with-api-version.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "with-api-version.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "with-api-version.labels" -}} +helm.sh/chart: {{ include "with-api-version.chart" . }} +{{ include "with-api-version.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "with-api-version.selectorLabels" -}} +app.kubernetes.io/name: {{ include "with-api-version.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "with-api-version.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "with-api-version.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/with-api-version/templates/pdb.yaml b/pkg/scanners/helm/test/testdata/with-api-version/templates/pdb.yaml new file mode 100644 index 000000000000..a0a54cbc232b --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-api-version/templates/pdb.yaml @@ -0,0 +1,11 @@ +apiVersion: {{ $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" | ternary "policy/v1" "policy/v1beta1" }} +kind: PodDisruptionBudget +metadata: + name: {{ include "with-api-version.fullname" . }} + labels: + {{- include "with-api-version.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "with-api-version.selectorLabels" . | nindent 6 }} + maxUnavailable: 0 diff --git a/pkg/scanners/helm/test/testdata/with-api-version/values.yaml b/pkg/scanners/helm/test/testdata/with-api-version/values.yaml new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/.helmignore b/pkg/scanners/helm/test/testdata/with-tarred-dep/.helmignore new file mode 100644 index 000000000000..50af03172541 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/Chart.yaml b/pkg/scanners/helm/test/testdata/with-tarred-dep/Chart.yaml new file mode 100644 index 000000000000..bd163a944cae --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: with-tarred-dep +description: Test With Tarred Dependencies +type: application +version: 0.1.1 +appVersion: "1.0" +sources: + - https://github.com/test/with-tarred-dep +dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.16.1 diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/LICENSE b/pkg/scanners/helm/test/testdata/with-tarred-dep/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/charts/common-1.16.1.tgz b/pkg/scanners/helm/test/testdata/with-tarred-dep/charts/common-1.16.1.tgz new file mode 100644 index 000000000000..6a2df2e15b93 Binary files /dev/null and b/pkg/scanners/helm/test/testdata/with-tarred-dep/charts/common-1.16.1.tgz differ diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/renovate.json b/pkg/scanners/helm/test/testdata/with-tarred-dep/renovate.json new file mode 100644 index 000000000000..a78e667b7736 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/renovate.json @@ -0,0 +1,5 @@ +{ + "extends": [ + "config:base" + ] + } \ No newline at end of file diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/.gitkeep b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/.gitkeep new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/deployment.yaml b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/deployment.yaml new file mode 100644 index 000000000000..003d08eb745d --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/deployment.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + spec: + containers: + - name: metadata-service + env: + - name: METADATASERVICE_UPSTREAM_API_URL + value: '{{ .Values.upstreamAPI }}' + - name: METADATASERVICE_OIDC_AUDIENCE + value: "{{ .Values.oidc.audience }}" + - name: METADATASERVICE_OIDC_ISSUER + value: "{{ .Values.oidc.issuer }}" + - name: METADATASERVICE_OIDC_JWKSURI + value: "{{ .Values.oidc.jwksuri }}" + - name: METADATASERVICE_OIDC_CLAIMS_ROLES + value: "{{ .Values.oidc.rolesClaim }}" + - name: METADATASERVICE_OIDC_CLAIMS_USERNAME + value: "{{ .Values.oidc.userClaim }}" + - name: METADATASERVICE_DB_URI + valueFrom: + secretKeyRef: + name: {{ template "common.names.fullname" . }}-dbconn + key: uri + image: "{{ .Values.metadataservice.image.repository }}:{{ .Values.metadataservice.image.tag }}" + imagePullPolicy: Always + volumeMounts: + - name: dbcerts + mountPath: "/dbcerts" + readOnly: true + ports: + - name: http + containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz/liveness + port: http + initialDelaySeconds: 5 + timeoutSeconds: 2 + readinessProbe: + httpGet: + path: /healthz/readiness + port: http + initialDelaySeconds: 5 + timeoutSeconds: 2 + resources: +{{ toYaml .Values.resources | indent 12 }} + volumes: + - name: dbcerts + secret: + secretName: {{ template "common.names.fullname" . }}-crdb-ca + defaultMode: 0400 diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/ingress.yaml b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/ingress.yaml new file mode 100644 index 000000000000..45cd321ca9a9 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/ingress.yaml @@ -0,0 +1,36 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ template "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- range .Values.ingress.hostnames }} + - host: {{ . }} + http: + paths: + - path: / + {{- if $.Values.ingress.publicPaths -}} + ( + {{- range $index,$path := $.Values.ingress.publicPaths }} + {{- if $index }}|{{ end }} + {{- $path }} + {{- end -}} + ) + {{- end }} + pathType: Prefix + backend: + service: + name: {{ template "common.names.fullname" $ }} + port: + name: http + {{- end }} +# tls: [] +# hosts: +# - hollow-metadataservice.mydomain +# secretName: hollow-metadataservice-example-tls +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/secrets-crdb-ca.yaml b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/secrets-crdb-ca.yaml new file mode 100644 index 000000000000..18c39c058dcd --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/secrets-crdb-ca.yaml @@ -0,0 +1,17 @@ +{{- if .Values.crdbCA }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-crdb-ca + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + ca.crt: {{ .Values.crdbCA | b64enc | quote }} +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/secrets-dbconn.yaml b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/secrets-dbconn.yaml new file mode 100644 index 000000000000..06c93061d08c --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/secrets-dbconn.yaml @@ -0,0 +1,17 @@ +{{- if .Values.dbconnURI }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-dbconn + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + uri: {{ .Values.dbconnURI | b64enc | quote }} +{{- end }} diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/service.yaml b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/service.yaml new file mode 100644 index 000000000000..fdb8b82d76f8 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/templates/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8000 + - name: https + port: 443 + protocol: TCP + targetPort: 8000 + selector:{{ include "common.labels.matchLabels" . | nindent 4 }} + type: ClusterIP diff --git a/pkg/scanners/helm/test/testdata/with-tarred-dep/values.yaml b/pkg/scanners/helm/test/testdata/with-tarred-dep/values.yaml new file mode 100644 index 000000000000..7a86583f54e3 --- /dev/null +++ b/pkg/scanners/helm/test/testdata/with-tarred-dep/values.yaml @@ -0,0 +1,30 @@ +metadataservice: + image: + repository: ghcr.io/metal-toolbox/hollow-metadataservice + tag: "v0.0.1" + +ingress: + enabled: true + hostnames: + - metadata-service.mydomain + publicPaths: + - $ + - metadata + - userdata + - '2009-04-04' + +oidc: + audience: "" + issuer: "" + jwksuri: "" + rolesClaim: "" + userClaim: "" + +replicaCount: 1 +resources: + limits: + cpu: 4 + memory: 4Gi + requests: + cpu: 4 + memory: 4Gi diff --git a/pkg/scanners/helm/test/values/values.yaml b/pkg/scanners/helm/test/values/values.yaml new file mode 100644 index 000000000000..6f637160ffa9 --- /dev/null +++ b/pkg/scanners/helm/test/values/values.yaml @@ -0,0 +1,3 @@ +--- +securityContext: + runAsUser: 0 \ No newline at end of file diff --git a/pkg/scanners/json/parser/parser.go b/pkg/scanners/json/parser/parser.go new file mode 100644 index 000000000000..3f0925d4c731 --- /dev/null +++ b/pkg/scanners/json/parser/parser.go @@ -0,0 +1,89 @@ +package parser + +import ( + "context" + "encoding/json" + "io" + "io/fs" + "path/filepath" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/detection" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +var _ options.ConfigurableParser = (*Parser)(nil) + +type Parser struct { + debug debug.Logger + skipRequired bool +} + +func (p *Parser) SetDebugWriter(writer io.Writer) { + p.debug = debug.New(writer, "json", "parser") +} + +func (p *Parser) SetSkipRequiredCheck(b bool) { + p.skipRequired = b +} + +// New creates a new parser +func New(opts ...options.ParserOption) *Parser { + p := &Parser{} + for _, opt := range opts { + opt(p) + } + return p +} + +func (p *Parser) ParseFS(ctx context.Context, target fs.FS, path string) (map[string]interface{}, error) { + + files := make(map[string]interface{}) + if err := fs.WalkDir(target, filepath.ToSlash(path), func(path string, entry fs.DirEntry, err error) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err != nil { + return err + } + if entry.IsDir() { + return nil + } + if !p.Required(path) { + return nil + } + df, err := p.ParseFile(ctx, target, path) + if err != nil { + p.debug.Log("Parse error in '%s': %s", path, err) + return nil + } + files[path] = df + return nil + }); err != nil { + return nil, err + } + return files, nil +} + +// ParseFile parses Dockerfile content from the provided filesystem path. +func (p *Parser) ParseFile(_ context.Context, fs fs.FS, path string) (interface{}, error) { + f, err := fs.Open(filepath.ToSlash(path)) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + var target interface{} + if err := json.NewDecoder(f).Decode(&target); err != nil { + return nil, err + } + return target, nil +} + +func (p *Parser) Required(path string) bool { + if p.skipRequired { + return true + } + return detection.IsType(path, nil, detection.FileTypeJSON) +} diff --git a/pkg/scanners/json/parser/parser_test.go b/pkg/scanners/json/parser/parser_test.go new file mode 100644 index 000000000000..2af3936d6124 --- /dev/null +++ b/pkg/scanners/json/parser/parser_test.go @@ -0,0 +1,51 @@ +package parser + +import ( + "context" + "testing" + + "github.com/liamg/memoryfs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_Parser(t *testing.T) { + input := `{ "x": { "y": 123, "z": ["a", "b", "c"]}}` + + memfs := memoryfs.New() + err := memfs.WriteFile("something.json", []byte(input), 0644) + require.NoError(t, err) + + data, err := New().ParseFile(context.TODO(), memfs, "something.json") + require.NoError(t, err) + + msi, ok := data.(map[string]interface{}) + require.True(t, ok) + + xObj, ok := msi["x"] + require.True(t, ok) + + xMsi, ok := xObj.(map[string]interface{}) + require.True(t, ok) + + yRaw, ok := xMsi["y"] + require.True(t, ok) + + y, ok := yRaw.(float64) + require.True(t, ok) + + assert.Equal(t, 123.0, y) + + zRaw, ok := xMsi["z"] + require.True(t, ok) + + z, ok := zRaw.([]interface{}) + require.True(t, ok) + + require.Len(t, z, 3) + + assert.Equal(t, "a", z[0]) + assert.Equal(t, "b", z[1]) + assert.Equal(t, "c", z[2]) + +} diff --git a/pkg/scanners/json/scanner.go b/pkg/scanners/json/scanner.go new file mode 100644 index 000000000000..994e434cfe19 --- /dev/null +++ b/pkg/scanners/json/scanner.go @@ -0,0 +1,170 @@ +package json + +import ( + "context" + "io" + "io/fs" + "sync" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners" + "github.com/aquasecurity/trivy/pkg/scanners/json/parser" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/types" +) + +var _ scanners.FSScanner = (*Scanner)(nil) +var _ options.ConfigurableScanner = (*Scanner)(nil) + +type Scanner struct { + debug debug.Logger + policyDirs []string + policyReaders []io.Reader + parser *parser.Parser + regoScanner *rego.Scanner + skipRequired bool + options []options.ScannerOption + sync.Mutex + frameworks []framework.Framework + spec string + loadEmbeddedPolicies bool + loadEmbeddedLibraries bool +} + +func (s *Scanner) SetRegoOnly(bool) { +} + +func (s *Scanner) SetFrameworks(frameworks []framework.Framework) { + s.frameworks = frameworks +} + +func (s *Scanner) SetSpec(spec string) { + s.spec = spec +} + +func (s *Scanner) SetUseEmbeddedPolicies(b bool) { + s.loadEmbeddedPolicies = b +} + +func (s *Scanner) SetUseEmbeddedLibraries(b bool) { + s.loadEmbeddedLibraries = b +} + +func (s *Scanner) SetPolicyReaders(readers []io.Reader) { + s.policyReaders = readers +} + +func (s *Scanner) SetDebugWriter(writer io.Writer) { + s.debug = debug.New(writer, "json", "scanner") +} + +func (s *Scanner) SetTraceWriter(_ io.Writer) { +} + +func (s *Scanner) SetPerResultTracingEnabled(_ bool) { +} + +func (s *Scanner) SetPolicyDirs(dirs ...string) { + s.policyDirs = dirs +} + +func (s *Scanner) SetDataDirs(_ ...string) {} +func (s *Scanner) SetPolicyNamespaces(_ ...string) {} + +func (s *Scanner) SetSkipRequiredCheck(skip bool) { + s.skipRequired = skip +} + +func (s *Scanner) SetPolicyFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} + +func (s *Scanner) SetDataFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} +func (s *Scanner) SetRegoErrorLimit(_ int) {} + +func NewScanner(opts ...options.ScannerOption) *Scanner { + s := &Scanner{ + options: opts, + } + for _, opt := range opts { + opt(s) + } + s.parser = parser.New(options.ParserWithSkipRequiredCheck(s.skipRequired)) + return s +} + +func (s *Scanner) Name() string { + return "JSON" +} + +func (s *Scanner) ScanFS(ctx context.Context, fs fs.FS, path string) (scan.Results, error) { + + files, err := s.parser.ParseFS(ctx, fs, path) + if err != nil { + return nil, err + } + + if len(files) == 0 { + return nil, nil + } + + var inputs []rego.Input + for path, file := range files { + inputs = append(inputs, rego.Input{ + Path: path, + FS: fs, + Contents: file, + }) + } + + results, err := s.scanRego(ctx, fs, inputs...) + if err != nil { + return nil, err + } + return results, nil +} + +func (s *Scanner) ScanFile(ctx context.Context, fs fs.FS, path string) (scan.Results, error) { + parsed, err := s.parser.ParseFile(ctx, fs, path) + if err != nil { + return nil, err + } + s.debug.Log("Scanning %s...", path) + return s.scanRego(ctx, fs, rego.Input{ + Path: path, + Contents: parsed, + }) +} + +func (s *Scanner) initRegoScanner(srcFS fs.FS) (*rego.Scanner, error) { + s.Lock() + defer s.Unlock() + if s.regoScanner != nil { + return s.regoScanner, nil + } + regoScanner := rego.NewScanner(types.SourceJSON, s.options...) + regoScanner.SetParentDebugLogger(s.debug) + if err := regoScanner.LoadPolicies(s.loadEmbeddedLibraries, s.loadEmbeddedPolicies, srcFS, s.policyDirs, s.policyReaders); err != nil { + return nil, err + } + s.regoScanner = regoScanner + return regoScanner, nil +} + +func (s *Scanner) scanRego(ctx context.Context, srcFS fs.FS, inputs ...rego.Input) (scan.Results, error) { + regoScanner, err := s.initRegoScanner(srcFS) + if err != nil { + return nil, err + } + results, err := regoScanner.ScanInput(ctx, inputs...) + if err != nil { + return nil, err + } + results.SetSourceAndFilesystem("", srcFS, false) + return results, nil +} diff --git a/pkg/scanners/json/scanner_test.go b/pkg/scanners/json/scanner_test.go new file mode 100644 index 000000000000..e48c23e75028 --- /dev/null +++ b/pkg/scanners/json/scanner_test.go @@ -0,0 +1,77 @@ +package json + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_BasicScan(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "/code/data.json": `{ "x": { "y": 123, "z": ["a", "b", "c"]}}`, + "/trules/rule.rego": `package builtin.json.lol + +__rego_metadata__ := { + "id": "ABC123", + "avd_id": "AVD-AB-0123", + "title": "title", + "short_code": "short", + "severity": "CRITICAL", + "type": "JSON Check", + "description": "description", + "recommended_actions": "actions", + "url": "https://example.com", +} + +__rego_input__ := { + "combine": false, + "selector": [{"type": "json"}], +} + +deny[res] { + input.x.y == 123 + res := { + "msg": "oh no", + "startline": 1, + "endline": 2, + } +} + +`, + }) + + scanner := NewScanner(options.ScannerWithPolicyDirs("trules")) + + results, err := scanner.ScanFS(context.TODO(), fs, "code") + require.NoError(t, err) + + require.Len(t, results.GetFailed(), 1) + + assert.Equal(t, scan.Rule{ + AVDID: "AVD-AB-0123", + Aliases: []string{"ABC123"}, + ShortCode: "short", + Summary: "title", + Explanation: "description", + Impact: "", + Resolution: "actions", + Provider: "json", + Service: "general", + Links: []string{"https://example.com"}, + Severity: "CRITICAL", + Terraform: &scan.EngineMetadata{}, + CloudFormation: &scan.EngineMetadata{}, + CustomChecks: scan.CustomChecks{ + Terraform: (*scan.TerraformCustomCheck)(nil), + }, + RegoPackage: "data.builtin.json.lol", + Frameworks: map[framework.Framework][]string{}, + }, results.GetFailed()[0].Rule()) +} diff --git a/pkg/scanners/kubernetes/parser/manifest.go b/pkg/scanners/kubernetes/parser/manifest.go new file mode 100644 index 000000000000..3f809a6f9145 --- /dev/null +++ b/pkg/scanners/kubernetes/parser/manifest.go @@ -0,0 +1,33 @@ +package parser + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +type Manifest struct { + Path string + Content *ManifestNode +} + +func (m *Manifest) UnmarshalYAML(value *yaml.Node) error { + + switch value.Tag { + case "!!map": + node := new(ManifestNode) + node.Path = m.Path + if err := value.Decode(node); err != nil { + return err + } + m.Content = node + default: + return fmt.Errorf("failed to handle tag: %s", value.Tag) + } + + return nil +} + +func (m *Manifest) ToRego() interface{} { + return m.Content.ToRego() +} diff --git a/pkg/scanners/kubernetes/parser/manifest_node.go b/pkg/scanners/kubernetes/parser/manifest_node.go new file mode 100644 index 000000000000..1f82ca1e3680 --- /dev/null +++ b/pkg/scanners/kubernetes/parser/manifest_node.go @@ -0,0 +1,140 @@ +package parser + +import ( + "fmt" + "strconv" + + "gopkg.in/yaml.v3" +) + +type TagType string + +const ( + TagBool TagType = "!!bool" + TagInt TagType = "!!int" + TagFloat TagType = "!!float" + TagStr TagType = "!!str" + TagString TagType = "!!string" + TagSlice TagType = "!!seq" + TagMap TagType = "!!map" +) + +type ManifestNode struct { + StartLine int + EndLine int + Offset int + Value interface{} + Type TagType + Path string +} + +func (r *ManifestNode) ToRego() interface{} { + if r == nil { + return nil + } + switch r.Type { + case TagBool, TagInt, TagString, TagStr: + return r.Value + case TagSlice: + var output []interface{} + for _, node := range r.Value.([]ManifestNode) { + output = append(output, node.ToRego()) + } + return output + case TagMap: + output := make(map[string]interface{}) + output["__defsec_metadata"] = map[string]interface{}{ + "startline": r.StartLine, + "endline": r.EndLine, + "filepath": r.Path, + "offset": r.Offset, + } + for key, node := range r.Value.(map[string]ManifestNode) { + output[key] = node.ToRego() + } + return output + } + return nil +} + +func (r *ManifestNode) UnmarshalYAML(node *yaml.Node) error { + + r.StartLine = node.Line + r.EndLine = node.Line + r.Type = TagType(node.Tag) + + switch TagType(node.Tag) { + case TagString, TagStr: + + r.Value = node.Value + case TagInt: + val, err := strconv.Atoi(node.Value) + if err != nil { + return err + } + r.Value = val + case TagFloat: + val, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return err + } + r.Value = val + case TagBool: + val, err := strconv.ParseBool(node.Value) + if err != nil { + return err + } + r.Value = val + case TagMap: + return r.handleMapTag(node) + case TagSlice: + return r.handleSliceTag(node) + + default: + return fmt.Errorf("node tag is not supported %s", node.Tag) + } + return nil +} + +func (r *ManifestNode) handleSliceTag(node *yaml.Node) error { + var nodes []ManifestNode + max := node.Line + for _, contentNode := range node.Content { + newNode := new(ManifestNode) + newNode.Path = r.Path + if err := contentNode.Decode(newNode); err != nil { + return err + } + if newNode.EndLine > max { + max = newNode.EndLine + } + nodes = append(nodes, *newNode) + } + r.EndLine = max + r.Value = nodes + return nil +} + +func (r *ManifestNode) handleMapTag(node *yaml.Node) error { + output := make(map[string]ManifestNode) + var key string + max := node.Line + for i, contentNode := range node.Content { + if i == 0 || i%2 == 0 { + key = contentNode.Value + } else { + newNode := new(ManifestNode) + newNode.Path = r.Path + if err := contentNode.Decode(newNode); err != nil { + return err + } + output[key] = *newNode + if newNode.EndLine > max { + max = newNode.EndLine + } + } + } + r.EndLine = max + r.Value = output + return nil +} diff --git a/pkg/scanners/kubernetes/parser/parser.go b/pkg/scanners/kubernetes/parser/parser.go new file mode 100644 index 000000000000..5868806cdbdb --- /dev/null +++ b/pkg/scanners/kubernetes/parser/parser.go @@ -0,0 +1,137 @@ +package parser + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "path/filepath" + "regexp" + "strings" + + "gopkg.in/yaml.v3" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/detection" + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +var _ options.ConfigurableParser = (*Parser)(nil) + +type Parser struct { + debug debug.Logger + skipRequired bool +} + +func (p *Parser) SetDebugWriter(writer io.Writer) { + p.debug = debug.New(writer, "kubernetes", "parser") +} + +func (p *Parser) SetSkipRequiredCheck(b bool) { + p.skipRequired = b +} + +// New creates a new K8s parser +func New(options ...options.ParserOption) *Parser { + p := &Parser{} + for _, option := range options { + option(p) + } + return p +} + +func (p *Parser) ParseFS(ctx context.Context, target fs.FS, path string) (map[string][]interface{}, error) { + files := make(map[string][]interface{}) + if err := fs.WalkDir(target, filepath.ToSlash(path), func(path string, entry fs.DirEntry, err error) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err != nil { + return err + } + if entry.IsDir() { + return nil + } + if !p.required(target, path) { + return nil + } + parsed, err := p.ParseFile(ctx, target, path) + if err != nil { + p.debug.Log("Parse error in '%s': %s", path, err) + return nil + } + files[path] = parsed + return nil + }); err != nil { + return nil, err + } + return files, nil +} + +// ParseFile parses Kubernetes manifest from the provided filesystem path. +func (p *Parser) ParseFile(_ context.Context, fs fs.FS, path string) ([]interface{}, error) { + f, err := fs.Open(filepath.ToSlash(path)) + if err != nil { + return nil, err + } + defer func() { _ = f.Close() }() + return p.Parse(f, path) +} + +func (p *Parser) required(fs fs.FS, path string) bool { + if p.skipRequired { + return true + } + f, err := fs.Open(filepath.ToSlash(path)) + if err != nil { + return false + } + defer func() { _ = f.Close() }() + if data, err := io.ReadAll(f); err == nil { + return detection.IsType(path, bytes.NewReader(data), detection.FileTypeKubernetes) + } + return false +} + +func (p *Parser) Parse(r io.Reader, path string) ([]interface{}, error) { + + contents, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + if len(contents) == 0 { + return nil, nil + } + + if strings.TrimSpace(string(contents))[0] == '{' { + var target interface{} + if err := json.Unmarshal(contents, &target); err != nil { + return nil, err + } + return []interface{}{target}, nil + } + + var results []interface{} + + re := regexp.MustCompile(`(?m:^---\r?\n)`) + pos := 0 + for _, partial := range re.Split(string(contents), -1) { + var result Manifest + result.Path = path + if err := yaml.Unmarshal([]byte(partial), &result); err != nil { + return nil, fmt.Errorf("unmarshal yaml: %w", err) + } + if result.Content != nil { + result.Content.Offset = pos + results = append(results, result.ToRego()) + } + pos += len(strings.Split(partial, "\n")) + } + + return results, nil +} diff --git a/pkg/scanners/kubernetes/scanner.go b/pkg/scanners/kubernetes/scanner.go new file mode 100644 index 000000000000..5edd5fb81eeb --- /dev/null +++ b/pkg/scanners/kubernetes/scanner.go @@ -0,0 +1,176 @@ +package kubernetes + +import ( + "context" + "io" + "io/fs" + "path/filepath" + "sort" + "sync" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners" + "github.com/aquasecurity/trivy/pkg/scanners/kubernetes/parser" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/liamg/memoryfs" +) + +var _ scanners.FSScanner = (*Scanner)(nil) +var _ options.ConfigurableScanner = (*Scanner)(nil) + +type Scanner struct { + debug debug.Logger + options []options.ScannerOption + policyDirs []string + policyReaders []io.Reader + regoScanner *rego.Scanner + parser *parser.Parser + skipRequired bool + sync.Mutex + loadEmbeddedPolicies bool + frameworks []framework.Framework + spec string + loadEmbeddedLibraries bool +} + +func (s *Scanner) SetSpec(spec string) { + s.spec = spec +} + +func (s *Scanner) SetRegoOnly(bool) {} + +func (s *Scanner) SetFrameworks(frameworks []framework.Framework) { + s.frameworks = frameworks +} + +func (s *Scanner) SetUseEmbeddedPolicies(b bool) { + s.loadEmbeddedPolicies = b +} + +func (s *Scanner) SetUseEmbeddedLibraries(b bool) { + s.loadEmbeddedLibraries = b +} + +func (s *Scanner) SetPolicyReaders(readers []io.Reader) { + s.policyReaders = readers +} + +func (s *Scanner) SetSkipRequiredCheck(skip bool) { + s.skipRequired = skip +} + +func (s *Scanner) SetDebugWriter(writer io.Writer) { + s.debug = debug.New(writer, "kubernetes", "scanner") +} + +func (s *Scanner) SetTraceWriter(_ io.Writer) { +} + +func (s *Scanner) SetPerResultTracingEnabled(_ bool) { +} + +func (s *Scanner) SetPolicyDirs(dirs ...string) { + s.policyDirs = dirs +} + +func (s *Scanner) SetDataDirs(...string) {} +func (s *Scanner) SetPolicyNamespaces(_ ...string) { +} + +func (s *Scanner) SetPolicyFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} + +func (s *Scanner) SetDataFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} +func (s *Scanner) SetRegoErrorLimit(_ int) {} + +func NewScanner(opts ...options.ScannerOption) *Scanner { + s := &Scanner{ + options: opts, + } + for _, opt := range opts { + opt(s) + } + s.parser = parser.New(options.ParserWithSkipRequiredCheck(s.skipRequired)) + return s +} + +func (s *Scanner) Name() string { + return "Kubernetes" +} + +func (s *Scanner) initRegoScanner(srcFS fs.FS) (*rego.Scanner, error) { + s.Lock() + defer s.Unlock() + if s.regoScanner != nil { + return s.regoScanner, nil + } + regoScanner := rego.NewScanner(types.SourceKubernetes, s.options...) + regoScanner.SetParentDebugLogger(s.debug) + if err := regoScanner.LoadPolicies(s.loadEmbeddedLibraries, s.loadEmbeddedPolicies, srcFS, s.policyDirs, s.policyReaders); err != nil { + return nil, err + } + s.regoScanner = regoScanner + return regoScanner, nil +} + +func (s *Scanner) ScanReader(ctx context.Context, filename string, reader io.Reader) (scan.Results, error) { + memfs := memoryfs.New() + if err := memfs.MkdirAll(filepath.Base(filename), 0o700); err != nil { + return nil, err + } + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + if err := memfs.WriteFile(filename, data, 0o644); err != nil { + return nil, err + } + return s.ScanFS(ctx, memfs, ".") +} + +func (s *Scanner) ScanFS(ctx context.Context, target fs.FS, dir string) (scan.Results, error) { + + k8sFilesets, err := s.parser.ParseFS(ctx, target, dir) + if err != nil { + return nil, err + } + + if len(k8sFilesets) == 0 { + return nil, nil + } + + var inputs []rego.Input + for path, k8sFiles := range k8sFilesets { + for _, content := range k8sFiles { + inputs = append(inputs, rego.Input{ + Path: path, + FS: target, + Contents: content, + }) + } + } + + regoScanner, err := s.initRegoScanner(target) + if err != nil { + return nil, err + } + + s.debug.Log("Scanning %d files...", len(inputs)) + results, err := regoScanner.ScanInput(ctx, inputs...) + if err != nil { + return nil, err + } + results.SetSourceAndFilesystem("", target, false) + + sort.Slice(results, func(i, j int) bool { + return results[i].Rule().AVDID < results[j].Rule().AVDID + }) + return results, nil +} diff --git a/pkg/scanners/kubernetes/scanner_test.go b/pkg/scanners/kubernetes/scanner_test.go new file mode 100644 index 000000000000..8aaf1ddfbeeb --- /dev/null +++ b/pkg/scanners/kubernetes/scanner_test.go @@ -0,0 +1,733 @@ +package kubernetes + +import ( + "context" + "os" + "strings" + "testing" + + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_BasicScan(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "/code/example.yaml": ` +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello +`, + "/trules/lib.k8s.rego": ` + package lib.kubernetes + + default is_gatekeeper = false + + is_gatekeeper { + has_field(input, "review") + has_field(input.review, "object") + } + + object = input { + not is_gatekeeper + } + + object = input.review.object { + is_gatekeeper + } + + format(msg) = gatekeeper_format { + is_gatekeeper + gatekeeper_format = {"msg": msg} + } + + format(msg) = msg { + not is_gatekeeper + } + + name = object.metadata.name + + default namespace = "default" + + namespace = object.metadata.namespace + + #annotations = object.metadata.annotations + + kind = object.kind + + is_pod { + kind = "Pod" + } + + is_cronjob { + kind = "CronJob" + } + + default is_controller = false + + is_controller { + kind = "Deployment" + } + + is_controller { + kind = "StatefulSet" + } + + is_controller { + kind = "DaemonSet" + } + + is_controller { + kind = "ReplicaSet" + } + + is_controller { + kind = "ReplicationController" + } + + is_controller { + kind = "Job" + } + + split_image(image) = [image, "latest"] { + not contains(image, ":") + } + + split_image(image) = [image_name, tag] { + [image_name, tag] = split(image, ":") + } + + pod_containers(pod) = all_containers { + keys = {"containers", "initContainers"} + all_containers = [c | keys[k]; c = pod.spec[k][_]] + } + + containers[container] { + pods[pod] + all_containers = pod_containers(pod) + container = all_containers[_] + } + + containers[container] { + all_containers = pod_containers(object) + container = all_containers[_] + } + + pods[pod] { + is_pod + pod = object + } + + pods[pod] { + is_controller + pod = object.spec.template + } + + pods[pod] { + is_cronjob + pod = object.spec.jobTemplate.spec.template + } + + volumes[volume] { + pods[pod] + volume = pod.spec.volumes[_] + } + + dropped_capability(container, cap) { + container.securityContext.capabilities.drop[_] == cap + } + + added_capability(container, cap) { + container.securityContext.capabilities.add[_] == cap + } + + has_field(obj, field) { + obj[field] + } + + no_read_only_filesystem(c) { + not has_field(c, "securityContext") + } + + no_read_only_filesystem(c) { + has_field(c, "securityContext") + not has_field(c.securityContext, "readOnlyRootFilesystem") + } + + privilege_escalation_allowed(c) { + not has_field(c, "securityContext") + } + + privilege_escalation_allowed(c) { + has_field(c, "securityContext") + has_field(c.securityContext, "allowPrivilegeEscalation") + } + + annotations[annotation] { + pods[pod] + annotation = pod.metadata.annotations + } + + host_ipcs[host_ipc] { + pods[pod] + host_ipc = pod.spec.hostIPC + } + + host_networks[host_network] { + pods[pod] + host_network = pod.spec.hostNetwork + } + + host_pids[host_pid] { + pods[pod] + host_pid = pod.spec.hostPID + } + + host_aliases[host_alias] { + pods[pod] + host_alias = pod.spec + } + `, + "/trules/lib.util.rego": ` + package lib.utils + + has_key(x, k) { + _ = x[k] + }`, + "/trules/rule.rego": ` +package builtin.kubernetes.KSV011 + +import data.lib.kubernetes +import data.lib.utils + +default failLimitsCPU = false + +__rego_metadata__ := { + "id": "KSV011", + "avd_id": "AVD-KSV-0011", + "title": "CPU not limited", + "short_code": "limit-cpu", + "version": "v1.0.0", + "severity": "LOW", + "type": "Kubernetes Security Check", + "description": "Enforcing CPU limits prevents DoS via resource exhaustion.", + "recommended_actions": "Set a limit value under 'containers[].resources.limits.cpu'.", + "url": "https://cloud.google.com/blog/products/containers-kubernetes/kubernetes-best-practices-resource-requests-and-limits", +} + +__rego_input__ := { + "combine": false, + "selector": [{"type": "kubernetes"}], +} + +# getLimitsCPUContainers returns all containers which have set resources.limits.cpu +getLimitsCPUContainers[container] { + allContainers := kubernetes.containers[_] + utils.has_key(allContainers.resources.limits, "cpu") + container := allContainers.name +} + +# getNoLimitsCPUContainers returns all containers which have not set +# resources.limits.cpu +getNoLimitsCPUContainers[container] { + container := kubernetes.containers[_].name + not getLimitsCPUContainers[container] +} + +# failLimitsCPU is true if containers[].resources.limits.cpu is not set +# for ANY container +failLimitsCPU { + count(getNoLimitsCPUContainers) > 0 +} + +deny[res] { + failLimitsCPU + + msg := kubernetes.format(sprintf("Container '%s' of %s '%s' should set 'resources.limits.cpu'", [getNoLimitsCPUContainers[_], kubernetes.kind, kubernetes.name])) + + res := { + "msg": msg, + "id": __rego_metadata__.id, + "title": __rego_metadata__.title, + "severity": __rego_metadata__.severity, + "type": __rego_metadata__.type, + "startline": 6, + "endline": 10, + } +} +`, + }) + + scanner := NewScanner(options.ScannerWithPolicyDirs("trules")) + + results, err := scanner.ScanFS(context.TODO(), fs, "code") + require.NoError(t, err) + + require.Len(t, results.GetFailed(), 1) + + assert.Equal(t, scan.Rule{ + AVDID: "AVD-KSV-0011", + Aliases: []string{"KSV011"}, + ShortCode: "limit-cpu", + Summary: "CPU not limited", + Explanation: "Enforcing CPU limits prevents DoS via resource exhaustion.", + Impact: "", + Resolution: "Set a limit value under 'containers[].resources.limits.cpu'.", + Provider: "kubernetes", + Service: "general", + Links: []string{"https://cloud.google.com/blog/products/containers-kubernetes/kubernetes-best-practices-resource-requests-and-limits"}, + Severity: "LOW", + Terraform: &scan.EngineMetadata{}, + CloudFormation: &scan.EngineMetadata{}, + CustomChecks: scan.CustomChecks{Terraform: (*scan.TerraformCustomCheck)(nil)}, + RegoPackage: "data.builtin.kubernetes.KSV011", + Frameworks: map[framework.Framework][]string{}, + }, results.GetFailed()[0].Rule()) + + failure := results.GetFailed()[0] + actualCode, err := failure.GetCode() + require.NoError(t, err) + for i := range actualCode.Lines { + actualCode.Lines[i].Highlighted = "" + } + assert.Equal(t, []scan.Line{ + { + Number: 6, + Content: "spec: ", + IsCause: true, + FirstCause: true, + Annotation: "", + }, + { + Number: 7, + Content: " containers: ", + IsCause: true, + Annotation: "", + }, + { + Number: 8, + Content: " - command: [\"sh\", \"-c\", \"echo 'Hello' && sleep 1h\"]", + IsCause: true, + Annotation: "", + }, + { + Number: 9, + Content: " image: busybox", + IsCause: true, + Annotation: "", + }, + { + Number: 10, + Content: " name: hello", + IsCause: true, + LastCause: true, + Annotation: "", + }, + }, actualCode.Lines) +} + +func Test_FileScan(t *testing.T) { + + results, err := NewScanner(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true), options.ScannerWithEmbeddedLibraries(true)).ScanReader(context.TODO(), "k8s.yaml", strings.NewReader(` +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello +`)) + require.NoError(t, err) + + assert.Greater(t, len(results.GetFailed()), 0) +} + +func Test_FileScan_WithSeparator(t *testing.T) { + + results, err := NewScanner(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)).ScanReader(context.TODO(), "k8s.yaml", strings.NewReader(` +--- +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello +`)) + require.NoError(t, err) + + assert.Greater(t, len(results.GetFailed()), 0) +} + +func Test_FileScan_MultiManifests(t *testing.T) { + file := ` +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello1-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello1' && sleep 1h"] + image: busybox + name: hello1 +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello2-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello2' && sleep 1h"] + image: busybox + name: hello2 +` + + results, err := NewScanner( + options.ScannerWithEmbeddedPolicies(true), + options.ScannerWithEmbeddedLibraries(true), + options.ScannerWithEmbeddedLibraries(true)).ScanReader(context.TODO(), "k8s.yaml", strings.NewReader(file)) + require.NoError(t, err) + + assert.Greater(t, len(results.GetFailed()), 1) + fileLines := strings.Split(file, "\n") + for _, failure := range results.GetFailed() { + actualCode, err := failure.GetCode() + require.NoError(t, err) + assert.Greater(t, len(actualCode.Lines), 0) + for _, line := range actualCode.Lines { + assert.Greater(t, len(fileLines), line.Number) + assert.Equal(t, line.Content, fileLines[line.Number-1]) + } + } +} + +func Test_FileScanWithPolicyReader(t *testing.T) { + + results, err := NewScanner(options.ScannerWithPolicyReader(strings.NewReader(`package defsec + +deny[msg] { + msg = "fail" +} +`))).ScanReader(context.TODO(), "k8s.yaml", strings.NewReader(` +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello +`)) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) +} + +func Test_FileScanJSON(t *testing.T) { + + results, err := NewScanner(options.ScannerWithPolicyReader(strings.NewReader(`package defsec + +deny[msg] { + input.kind == "Pod" + msg = "fail" +} +`))).ScanReader(context.TODO(), "k8s.json", strings.NewReader(` +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "mongo", + "labels": { + "name": "mongo", + "role": "mongo" + } + }, + "spec": { + "volumes": [ + { + "name": "mongo-disk", + "gcePersistentDisk": { + "pdName": "mongo-disk", + "fsType": "ext4" + } + } + ], + "containers": [ + { + "name": "mongo", + "image": "mongo:latest", + "ports": [ + { + "name": "mongo", + "containerPort": 27017 + } + ], + "volumeMounts": [ + { + "name": "mongo-disk", + "mountPath": "/data/db" + } + ] + } + ] + } +} +`)) + require.NoError(t, err) + + assert.Equal(t, 1, len(results.GetFailed())) +} + +func Test_FileScanWithMetadata(t *testing.T) { + + results, err := NewScanner( + options.ScannerWithDebug(os.Stdout), + options.ScannerWithTrace(os.Stdout), + options.ScannerWithPolicyReader(strings.NewReader(`package defsec + +deny[msg] { + input.kind == "Pod" + msg := { + "msg": "fail", + "startline": 2, + "endline": 2, + "filepath": "chartname/template/serviceAccount.yaml" + } +} +`))).ScanReader( + context.TODO(), + "k8s.yaml", + strings.NewReader(` +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello +`)) + require.NoError(t, err) + + assert.Greater(t, len(results.GetFailed()), 0) + + firstResult := results.GetFailed()[0] + assert.Equal(t, 2, firstResult.Metadata().Range().GetStartLine()) + assert.Equal(t, 2, firstResult.Metadata().Range().GetEndLine()) + assert.Equal(t, "chartname/template/serviceAccount.yaml", firstResult.Metadata().Range().GetFilename()) +} + +func Test_FileScanExampleWithResultFunction(t *testing.T) { + + results, err := NewScanner( + options.ScannerWithDebug(os.Stdout), + options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true), + options.ScannerWithPolicyReader(strings.NewReader(`package defsec + +import data.lib.kubernetes + +default checkCapsDropAll = false + +__rego_metadata__ := { +"id": "KSV003", +"avd_id": "AVD-KSV-0003", +"title": "Default capabilities not dropped", +"short_code": "drop-default-capabilities", +"version": "v1.0.0", +"severity": "LOW", +"type": "Kubernetes Security Check", +"description": "The container should drop all default capabilities and add only those that are needed for its execution.", +"recommended_actions": "Add 'ALL' to containers[].securityContext.capabilities.drop.", +"url": "https://kubesec.io/basics/containers-securitycontext-capabilities-drop-index-all/", +} + +__rego_input__ := { +"combine": false, +"selector": [{"type": "kubernetes"}], +} + +# Get all containers which include 'ALL' in security.capabilities.drop +getCapsDropAllContainers[container] { +allContainers := kubernetes.containers[_] +lower(allContainers.securityContext.capabilities.drop[_]) == "all" +container := allContainers.name +} + +# Get all containers which don't include 'ALL' in security.capabilities.drop +getCapsNoDropAllContainers[container] { +container := kubernetes.containers[_] +not getCapsDropAllContainers[container.name] +} + +deny[res] { +output := getCapsNoDropAllContainers[_] + +msg := kubernetes.format(sprintf("Container '%s' of %s '%s' should add 'ALL' to 'securityContext.capabilities.drop'", [output.name, kubernetes.kind, kubernetes.name])) + +res := result.new(msg, output) +} + +`))).ScanReader( + context.TODO(), + "k8s.yaml", + strings.NewReader(` +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + capabilities: + drop: + - nothing +`)) + require.NoError(t, err) + + require.Greater(t, len(results.GetFailed()), 0) + + firstResult := results.GetFailed()[0] + assert.Equal(t, 8, firstResult.Metadata().Range().GetStartLine()) + assert.Equal(t, 14, firstResult.Metadata().Range().GetEndLine()) + assert.Equal(t, "k8s.yaml", firstResult.Metadata().Range().GetFilename()) +} + +func Test_checkPolicyIsApplicable(t *testing.T) { + srcFS := testutil.CreateFS(t, map[string]string{ + "policies/pod_policy.rego": `# METADATA +# title: "Process can elevate its own privileges" +# description: "A program inside the container can elevate its own privileges and run as root, which might give the program control over the container and node." +# scope: package +# schemas: +# - input: schema["kubernetes"] +# related_resources: +# - https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted +# custom: +# id: KSV001 +# avd_id: AVD-KSV-0999 +# severity: MEDIUM +# short_code: no-self-privesc +# recommended_action: "Set 'set containers[].securityContext.allowPrivilegeEscalation' to 'false'." +# input: +# selector: +# - type: kubernetes +# subtypes: +# - kind: Pod +package builtin.kubernetes.KSV999 + +import data.lib.kubernetes +import data.lib.utils + +default checkAllowPrivilegeEscalation = false + +# getNoPrivilegeEscalationContainers returns the names of all containers which have +# securityContext.allowPrivilegeEscalation set to false. +getNoPrivilegeEscalationContainers[container] { + allContainers := kubernetes.containers[_] + allContainers.securityContext.allowPrivilegeEscalation == false + container := allContainers.name +} + +# getPrivilegeEscalationContainers returns the names of all containers which have +# securityContext.allowPrivilegeEscalation set to true or not set. +getPrivilegeEscalationContainers[container] { + containerName := kubernetes.containers[_].name + not getNoPrivilegeEscalationContainers[containerName] + container := kubernetes.containers[_] +} + +deny[res] { + output := getPrivilegeEscalationContainers[_] + msg := kubernetes.format(sprintf("Container '%s' of %s '%s' should set 'securityContext.allowPrivilegeEscalation' to false", [output.name, kubernetes.kind, kubernetes.name])) + res := result.new(msg, output) +} + +`, + "policies/namespace_policy.rego": `# METADATA +# title: "The default namespace should not be used" +# description: "ensure that default namespace should not be used" +# scope: package +# schemas: +# - input: schema["kubernetes"] +# related_resources: +# - https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ +# custom: +# id: KSV110 +# avd_id: AVD-KSV-0888 +# severity: LOW +# short_code: default-namespace-should-not-be-used +# recommended_action: "Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace." +# input: +# selector: +# - type: kubernetes +# subtypes: +# - kind: Namespace +package builtin.kubernetes.KSV888 + +import data.lib.kubernetes + +default defaultNamespaceInUse = false + +defaultNamespaceInUse { + kubernetes.namespace == "default" +} + +deny[res] { + defaultNamespaceInUse + msg := sprintf("%s '%s' should not be set with 'default' namespace", [kubernetes.kind, kubernetes.name]) + res := result.new(msg, input.metadata.namespace) +} + +`, + "test/KSV001/pod.yaml": `apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + capabilities: + drop: + - all +`, + }) + + scanner := NewScanner( + // options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true), + options.ScannerWithEmbeddedLibraries(true), + options.ScannerWithPolicyDirs("policies/"), + options.ScannerWithPolicyFilesystem(srcFS), + ) + results, err := scanner.ScanFS(context.TODO(), srcFS, "test/KSV001") + require.NoError(t, err) + + require.NoError(t, err) + require.Len(t, results.GetFailed(), 1) + + failure := results.GetFailed()[0].Rule() + assert.Equal(t, "Process can elevate its own privileges", failure.Summary) +} diff --git a/pkg/scanners/options/parser.go b/pkg/scanners/options/parser.go new file mode 100644 index 000000000000..65ec41bb825d --- /dev/null +++ b/pkg/scanners/options/parser.go @@ -0,0 +1,23 @@ +package options + +import "io" + +type ConfigurableParser interface { + SetDebugWriter(io.Writer) + SetSkipRequiredCheck(bool) +} + +type ParserOption func(s ConfigurableParser) + +func ParserWithSkipRequiredCheck(skip bool) ParserOption { + return func(s ConfigurableParser) { + s.SetSkipRequiredCheck(skip) + } +} + +// ParserWithDebug specifies an io.Writer for debug logs - if not set, they are discarded +func ParserWithDebug(w io.Writer) ParserOption { + return func(s ConfigurableParser) { + s.SetDebugWriter(w) + } +} diff --git a/pkg/scanners/options/scanner.go b/pkg/scanners/options/scanner.go new file mode 100644 index 000000000000..325e5951a0c7 --- /dev/null +++ b/pkg/scanners/options/scanner.go @@ -0,0 +1,128 @@ +package options + +import ( + "io" + "io/fs" + + "github.com/aquasecurity/trivy/pkg/framework" +) + +type ConfigurableScanner interface { + SetDebugWriter(io.Writer) + SetTraceWriter(io.Writer) + SetPerResultTracingEnabled(bool) + SetPolicyDirs(...string) + SetDataDirs(...string) + SetPolicyNamespaces(...string) + SetSkipRequiredCheck(bool) + SetPolicyReaders([]io.Reader) + SetPolicyFilesystem(fs.FS) + SetDataFilesystem(fs.FS) + SetUseEmbeddedPolicies(bool) + SetFrameworks(frameworks []framework.Framework) + SetSpec(spec string) + SetRegoOnly(regoOnly bool) + SetRegoErrorLimit(limit int) + SetUseEmbeddedLibraries(bool) +} + +type ScannerOption func(s ConfigurableScanner) + +func ScannerWithFrameworks(frameworks ...framework.Framework) ScannerOption { + return func(s ConfigurableScanner) { + s.SetFrameworks(frameworks) + } +} + +func ScannerWithSpec(spec string) ScannerOption { + return func(s ConfigurableScanner) { + s.SetSpec(spec) + } +} + +func ScannerWithPolicyReader(readers ...io.Reader) ScannerOption { + return func(s ConfigurableScanner) { + s.SetPolicyReaders(readers) + } +} + +// ScannerWithDebug specifies an io.Writer for debug logs - if not set, they are discarded +func ScannerWithDebug(w io.Writer) ScannerOption { + return func(s ConfigurableScanner) { + s.SetDebugWriter(w) + } +} + +func ScannerWithEmbeddedPolicies(embedded bool) ScannerOption { + return func(s ConfigurableScanner) { + s.SetUseEmbeddedPolicies(embedded) + } +} + +func ScannerWithEmbeddedLibraries(enabled bool) ScannerOption { + return func(s ConfigurableScanner) { + s.SetUseEmbeddedLibraries(enabled) + } +} + +// ScannerWithTrace specifies an io.Writer for trace logs (mainly rego tracing) - if not set, they are discarded +func ScannerWithTrace(w io.Writer) ScannerOption { + return func(s ConfigurableScanner) { + s.SetTraceWriter(w) + } +} + +func ScannerWithPerResultTracing(enabled bool) ScannerOption { + return func(s ConfigurableScanner) { + s.SetPerResultTracingEnabled(enabled) + } +} + +func ScannerWithPolicyDirs(paths ...string) ScannerOption { + return func(s ConfigurableScanner) { + s.SetPolicyDirs(paths...) + } +} + +func ScannerWithDataDirs(paths ...string) ScannerOption { + return func(s ConfigurableScanner) { + s.SetDataDirs(paths...) + } +} + +// ScannerWithPolicyNamespaces - namespaces which indicate rego policies containing enforced trules +func ScannerWithPolicyNamespaces(namespaces ...string) ScannerOption { + return func(s ConfigurableScanner) { + s.SetPolicyNamespaces(namespaces...) + } +} + +func ScannerWithSkipRequiredCheck(skip bool) ScannerOption { + return func(s ConfigurableScanner) { + s.SetSkipRequiredCheck(skip) + } +} + +func ScannerWithPolicyFilesystem(f fs.FS) ScannerOption { + return func(s ConfigurableScanner) { + s.SetPolicyFilesystem(f) + } +} + +func ScannerWithDataFilesystem(f fs.FS) ScannerOption { + return func(s ConfigurableScanner) { + s.SetDataFilesystem(f) + } +} + +func ScannerWithRegoOnly(regoOnly bool) ScannerOption { + return func(s ConfigurableScanner) { + s.SetRegoOnly(regoOnly) + } +} + +func ScannerWithRegoErrorLimits(limit int) ScannerOption { + return func(s ConfigurableScanner) { + s.SetRegoErrorLimit(limit) + } +} diff --git a/pkg/scanners/scanner.go b/pkg/scanners/scanner.go new file mode 100644 index 000000000000..aa7b3611340e --- /dev/null +++ b/pkg/scanners/scanner.go @@ -0,0 +1,21 @@ +package scanners + +import ( + "context" + "io/fs" + "os" + + "github.com/aquasecurity/trivy/pkg/scan" +) + +type WriteFileFS interface { + WriteFile(name string, data []byte, perm os.FileMode) error +} + +type FSScanner interface { + // Name provides the human-readable name of the scanner e.g. "CloudFormation" + Name() string + // ScanFS scans the given filesystem for issues, starting at the provided directory. + // Use '.' to scan an entire filesystem. + ScanFS(ctx context.Context, fs fs.FS, dir string) (scan.Results, error) +} diff --git a/pkg/scanners/terraform/executor/executor.go b/pkg/scanners/terraform/executor/executor.go new file mode 100644 index 000000000000..16bf3a462420 --- /dev/null +++ b/pkg/scanners/terraform/executor/executor.go @@ -0,0 +1,268 @@ +package executor + +import ( + "runtime" + "sort" + "strings" + "time" + + adapter "github.com/aquasecurity/trivy/internal/adapters/terraform" + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/state" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/trules" +) + +// Executor scans HCL blocks by running all registered trules against them +type Executor struct { + enableIgnores bool + excludedRuleIDs []string + excludeIgnoresIDs []string + includedRuleIDs []string + ignoreCheckErrors bool + workspaceName string + useSingleThread bool + debug debug.Logger + resultsFilters []func(scan.Results) scan.Results + alternativeIDProviderFunc func(string) []string + severityOverrides map[string]string + regoScanner *rego.Scanner + regoOnly bool + stateFuncs []func(*state.State) + frameworks []framework.Framework +} + +type Metrics struct { + Timings struct { + Adaptation time.Duration + RunningChecks time.Duration + } + Counts struct { + Ignored int + Failed int + Passed int + Critical int + High int + Medium int + Low int + } +} + +// New creates a new Executor +func New(options ...Option) *Executor { + s := &Executor{ + ignoreCheckErrors: true, + enableIgnores: true, + regoOnly: false, + } + for _, option := range options { + option(s) + } + return s +} + +// Find element in list +func checkInList(id string, altIDs []string, list []string) bool { + for _, codeIgnored := range list { + if codeIgnored == id { + return true + } + for _, alt := range altIDs { + if alt == codeIgnored { + return true + } + } + } + return false +} + +func (e *Executor) Execute(modules terraform.Modules) (scan.Results, Metrics, error) { + + var metrics Metrics + + e.debug.Log("Adapting modules...") + adaptationTime := time.Now() + infra := adapter.Adapt(modules) + metrics.Timings.Adaptation = time.Since(adaptationTime) + e.debug.Log("Adapted %d module(s) into defsec state data.", len(modules)) + + threads := runtime.NumCPU() + if threads > 1 { + threads-- + } + if e.useSingleThread { + threads = 1 + } + e.debug.Log("Using max routines of %d", threads) + + e.debug.Log("Applying state modifier functions...") + for _, f := range e.stateFuncs { + f(infra) + } + + checksTime := time.Now() + registeredRules := trules.GetRegistered(e.frameworks...) + e.debug.Log("Initialised %d rule(s).", len(registeredRules)) + + pool := NewPool(threads, registeredRules, modules, infra, e.ignoreCheckErrors, e.regoScanner, e.regoOnly) + e.debug.Log("Created pool with %d worker(s) to apply trules.", threads) + results, err := pool.Run() + if err != nil { + return nil, metrics, err + } + metrics.Timings.RunningChecks = time.Since(checksTime) + e.debug.Log("Finished applying trules.") + + if e.enableIgnores { + e.debug.Log("Applying ignores...") + var ignores terraform.Ignores + for _, module := range modules { + ignores = append(ignores, module.Ignores()...) + } + + ignores = e.removeExcludedIgnores(ignores) + + for i, result := range results { + allIDs := []string{ + result.Rule().LongID(), + result.Rule().AVDID, + strings.ToLower(result.Rule().AVDID), + result.Rule().ShortCode, + } + allIDs = append(allIDs, result.Rule().Aliases...) + + if e.alternativeIDProviderFunc != nil { + allIDs = append(allIDs, e.alternativeIDProviderFunc(result.Rule().LongID())...) + } + if ignores.Covering( + modules, + result.Metadata(), + e.workspaceName, + allIDs..., + ) != nil { + e.debug.Log("Ignored '%s' at '%s'.", result.Rule().LongID(), result.Range()) + results[i].OverrideStatus(scan.StatusIgnored) + } + } + } else { + e.debug.Log("Ignores are disabled.") + } + + results = e.updateSeverity(results) + results = e.filterResults(results) + metrics.Counts.Ignored = len(results.GetIgnored()) + metrics.Counts.Passed = len(results.GetPassed()) + metrics.Counts.Failed = len(results.GetFailed()) + + for _, res := range results.GetFailed() { + switch res.Severity() { + case severity.Critical: + metrics.Counts.Critical++ + case severity.High: + metrics.Counts.High++ + case severity.Medium: + metrics.Counts.Medium++ + case severity.Low: + metrics.Counts.Low++ + } + } + + e.sortResults(results) + return results, metrics, nil +} + +func (e *Executor) removeExcludedIgnores(ignores terraform.Ignores) terraform.Ignores { + var filteredIgnores terraform.Ignores + for _, ignore := range ignores { + if !contains(e.excludeIgnoresIDs, ignore.RuleID) { + filteredIgnores = append(filteredIgnores, ignore) + } + } + return filteredIgnores +} + +func contains(arr []string, s string) bool { + for _, elem := range arr { + if elem == s { + return true + } + } + return false +} + +func (e *Executor) updateSeverity(results []scan.Result) scan.Results { + if len(e.severityOverrides) == 0 { + return results + } + + var overriddenResults scan.Results + for _, res := range results { + for code, sev := range e.severityOverrides { + + var altMatch bool + if e.alternativeIDProviderFunc != nil { + alts := e.alternativeIDProviderFunc(res.Rule().LongID()) + for _, alt := range alts { + if alt == code { + altMatch = true + break + } + } + } + + if altMatch || res.Rule().LongID() == code { + overrides := scan.Results([]scan.Result{res}) + override := res.Rule() + override.Severity = severity.Severity(sev) + overrides.SetRule(override) + res = overrides[0] + } + } + overriddenResults = append(overriddenResults, res) + } + + return overriddenResults +} + +func (e *Executor) filterResults(results scan.Results) scan.Results { + includedOnly := len(e.includedRuleIDs) > 0 + for i, result := range results { + id := result.Rule().LongID() + var altIDs []string + if e.alternativeIDProviderFunc != nil { + altIDs = e.alternativeIDProviderFunc(id) + } + if (includedOnly && !checkInList(id, altIDs, e.includedRuleIDs)) || checkInList(id, altIDs, e.excludedRuleIDs) { + e.debug.Log("Excluding '%s' at '%s'.", result.Rule().LongID(), result.Range()) + results[i].OverrideStatus(scan.StatusIgnored) + } + } + + if len(e.resultsFilters) > 0 && len(results) > 0 { + before := len(results.GetIgnored()) + e.debug.Log("Applying %d results filters to %d results...", len(results), before) + for _, filter := range e.resultsFilters { + results = filter(results) + } + e.debug.Log("Filtered out %d results.", len(results.GetIgnored())-before) + } + + return results +} + +func (e *Executor) sortResults(results []scan.Result) { + sort.Slice(results, func(i, j int) bool { + switch { + case results[i].Rule().LongID() < results[j].Rule().LongID(): + return true + case results[i].Rule().LongID() > results[j].Rule().LongID(): + return false + default: + return results[i].Range().String() > results[j].Range().String() + } + }) +} diff --git a/pkg/scanners/terraform/executor/executor_test.go b/pkg/scanners/terraform/executor/executor_test.go new file mode 100644 index 000000000000..d59a8564c64a --- /dev/null +++ b/pkg/scanners/terraform/executor/executor_test.go @@ -0,0 +1,125 @@ +package executor + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/trules" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +var panicRule = scan.Rule{ + Provider: providers.AWSProvider, + Service: "service", + ShortCode: "abc", + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredTypes: []string{"resource"}, + RequiredLabels: []string{"problem"}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + if resourceBlock.GetAttribute("panic").IsTrue() { + panic("This is fine") + } + return + }, + }, + }, +} + +func Test_PanicInCheckNotAllowed(t *testing.T) { + + reg := trules.Register(panicRule) + defer trules.Deregister(reg) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +resource "problem" "this" { + panic = true +} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := New().Execute(modules) + assert.Equal(t, len(results.GetFailed()), 0) +} + +func Test_PanicInCheckAllowed(t *testing.T) { + + reg := trules.Register(panicRule) + defer trules.Deregister(reg) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +resource "problem" "this" { + panic = true +} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + _, _, err = New(OptionStopOnErrors(false)).Execute(modules) + assert.Error(t, err) +} + +func Test_PanicNotInCheckNotIncludePassed(t *testing.T) { + + reg := trules.Register(panicRule) + defer trules.Deregister(reg) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +resource "problem" "this" { + panic = true +} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := New().Execute(modules) + assert.Equal(t, len(results.GetFailed()), 0) +} + +func Test_PanicNotInCheckNotIncludePassedStopOnError(t *testing.T) { + + reg := trules.Register(panicRule) + defer trules.Deregister(reg) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +resource "problem" "this" { + panic = true +} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + + _, _, err = New(OptionStopOnErrors(false)).Execute(modules) + assert.Error(t, err) +} diff --git a/pkg/scanners/terraform/executor/option.go b/pkg/scanners/terraform/executor/option.go new file mode 100644 index 000000000000..878fd2ba1681 --- /dev/null +++ b/pkg/scanners/terraform/executor/option.go @@ -0,0 +1,103 @@ +package executor + +import ( + "io" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/state" +) + +type Option func(s *Executor) + +func OptionWithFrameworks(frameworks ...framework.Framework) Option { + return func(s *Executor) { + s.frameworks = frameworks + } +} + +func OptionWithAlternativeIDProvider(f func(string) []string) Option { + return func(s *Executor) { + s.alternativeIDProviderFunc = f + } +} + +func OptionWithResultsFilter(f func(scan.Results) scan.Results) Option { + return func(s *Executor) { + s.resultsFilters = append(s.resultsFilters, f) + } +} + +func OptionWithSeverityOverrides(overrides map[string]string) Option { + return func(s *Executor) { + s.severityOverrides = overrides + } +} + +func OptionWithDebugWriter(w io.Writer) Option { + return func(s *Executor) { + s.debug = debug.New(w, "terraform", "executor") + } +} + +func OptionNoIgnores() Option { + return func(s *Executor) { + s.enableIgnores = false + } +} + +func OptionExcludeRules(ruleIDs []string) Option { + return func(s *Executor) { + s.excludedRuleIDs = ruleIDs + } +} + +func OptionExcludeIgnores(ruleIDs []string) Option { + return func(s *Executor) { + s.excludeIgnoresIDs = ruleIDs + } +} + +func OptionIncludeRules(ruleIDs []string) Option { + return func(s *Executor) { + s.includedRuleIDs = ruleIDs + } +} + +func OptionStopOnErrors(stop bool) Option { + return func(s *Executor) { + s.ignoreCheckErrors = !stop + } +} + +func OptionWithWorkspaceName(workspaceName string) Option { + return func(s *Executor) { + s.workspaceName = workspaceName + } +} + +func OptionWithSingleThread(single bool) Option { + return func(s *Executor) { + s.useSingleThread = single + } +} + +func OptionWithRegoScanner(s *rego.Scanner) Option { + return func(e *Executor) { + e.regoScanner = s + } +} + +func OptionWithStateFunc(f ...func(*state.State)) Option { + return func(e *Executor) { + e.stateFuncs = f + } +} + +func OptionWithRegoOnly(regoOnly bool) Option { + return func(e *Executor) { + e.regoOnly = regoOnly + } +} diff --git a/pkg/scanners/terraform/executor/pool.go b/pkg/scanners/terraform/executor/pool.go new file mode 100644 index 000000000000..f4660feb519e --- /dev/null +++ b/pkg/scanners/terraform/executor/pool.go @@ -0,0 +1,299 @@ +package executor + +import ( + "context" + "fmt" + "os" + "path/filepath" + runtimeDebug "runtime/debug" + "strings" + "sync" + + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/state" + "github.com/aquasecurity/trivy/pkg/terraform" + types "github.com/aquasecurity/trivy/pkg/types/rules" +) + +type Pool struct { + size int + modules terraform.Modules + state *state.State + rules []types.RegisteredRule + ignoreErrors bool + rs *rego.Scanner + regoOnly bool +} + +func NewPool(size int, rules []types.RegisteredRule, modules terraform.Modules, state *state.State, ignoreErrors bool, regoScanner *rego.Scanner, regoOnly bool) *Pool { + return &Pool{ + size: size, + rules: rules, + state: state, + modules: modules, + ignoreErrors: ignoreErrors, + rs: regoScanner, + regoOnly: regoOnly, + } +} + +// Run runs the job in the pool - this will only return an error if a job panics +func (p *Pool) Run() (scan.Results, error) { + + outgoing := make(chan Job, p.size*2) + + var workers []*Worker + for i := 0; i < p.size; i++ { + worker := NewWorker(outgoing) + go worker.Start() + workers = append(workers, worker) + } + + if p.rs != nil { + var basePath string + if len(p.modules) > 0 { + basePath = p.modules[0].RootPath() + } + outgoing <- ®oJob{ + state: p.state, + scanner: p.rs, + basePath: basePath, + } + } + + if !p.regoOnly { + for _, r := range p.rules { + if r.GetRule().CustomChecks.Terraform != nil && r.GetRule().CustomChecks.Terraform.Check != nil { + // run local hcl rule + for _, module := range p.modules { + mod := *module + outgoing <- &hclModuleRuleJob{ + module: &mod, + rule: r, + ignoreErrors: p.ignoreErrors, + } + } + } else { + // run defsec rule + outgoing <- &infraRuleJob{ + state: p.state, + rule: r, + ignoreErrors: p.ignoreErrors, + } + } + } + } + + close(outgoing) + + var results scan.Results + for _, worker := range workers { + results = append(results, worker.Wait()...) + if err := worker.Error(); err != nil { + return nil, err + } + } + + return results, nil +} + +type Job interface { + Run() (scan.Results, error) +} + +type infraRuleJob struct { + state *state.State + rule types.RegisteredRule + + ignoreErrors bool +} + +type hclModuleRuleJob struct { + module *terraform.Module + rule types.RegisteredRule + ignoreErrors bool +} + +type regoJob struct { + state *state.State + scanner *rego.Scanner + basePath string +} + +func (h *infraRuleJob) Run() (_ scan.Results, err error) { + if h.ignoreErrors { + defer func() { + if panicErr := recover(); panicErr != nil { + err = fmt.Errorf("%s\n%s", panicErr, string(runtimeDebug.Stack())) + } + }() + } + return h.rule.Evaluate(h.state), err +} + +func (h *hclModuleRuleJob) Run() (results scan.Results, err error) { + if h.ignoreErrors { + defer func() { + if panicErr := recover(); panicErr != nil { + err = fmt.Errorf("%s\n%s", panicErr, string(runtimeDebug.Stack())) + } + }() + } + customCheck := h.rule.GetRule().CustomChecks.Terraform + for _, block := range h.module.GetBlocks() { + if !isCustomCheckRequiredForBlock(customCheck, block) { + continue + } + results = append(results, customCheck.Check(block, h.module)...) + } + results.SetRule(h.rule.GetRule()) + return +} + +func (h *regoJob) Run() (results scan.Results, err error) { + regoResults, err := h.scanner.ScanInput(context.TODO(), rego.Input{ + Contents: h.state.ToRego(), + Path: h.basePath, + }) + if err != nil { + return nil, fmt.Errorf("rego scan error: %w", err) + } + return regoResults, nil +} + +// nolint +func isCustomCheckRequiredForBlock(custom *scan.TerraformCustomCheck, b *terraform.Block) bool { + + var found bool + for _, requiredType := range custom.RequiredTypes { + if b.Type() == requiredType { + found = true + break + } + } + if !found && len(custom.RequiredTypes) > 0 { + return false + } + + found = false + for _, requiredLabel := range custom.RequiredLabels { + if requiredLabel == "*" || (len(b.Labels()) > 0 && wildcardMatch(requiredLabel, b.TypeLabel())) { + found = true + break + } + } + if !found && len(custom.RequiredLabels) > 0 { + return false + } + + found = false + if len(custom.RequiredSources) > 0 && b.Type() == terraform.TypeModule.Name() { + if sourceAttr := b.GetAttribute("source"); sourceAttr.IsNotNil() { + values := sourceAttr.AsStringValues().AsStrings() + if len(values) == 0 { + return false + } + sourcePath := values[0] + + // resolve module source path to path relative to cwd + if strings.HasPrefix(sourcePath, ".") { + sourcePath = cleanPathRelativeToWorkingDir(filepath.Dir(b.GetMetadata().Range().GetFilename()), sourcePath) + } + + for _, requiredSource := range custom.RequiredSources { + if requiredSource == "*" || wildcardMatch(requiredSource, sourcePath) { + found = true + break + } + } + } + return found + } + + return true +} + +func cleanPathRelativeToWorkingDir(dir, path string) string { + absPath := filepath.Clean(filepath.Join(dir, path)) + wDir, err := os.Getwd() + if err != nil { + return absPath + } + relPath, err := filepath.Rel(wDir, absPath) + if err != nil { + return absPath + } + return relPath +} + +func wildcardMatch(pattern string, subject string) bool { + if pattern == "" { + return false + } + parts := strings.Split(pattern, "*") + var lastIndex int + for i, part := range parts { + if part == "" { + continue + } + if i == 0 { + if !strings.HasPrefix(subject, part) { + return false + } + } + if i == len(parts)-1 { + if !strings.HasSuffix(subject, part) { + return false + } + } + newIndex := strings.Index(subject, part) + if newIndex < lastIndex { + return false + } + lastIndex = newIndex + } + return true +} + +type Worker struct { + incoming <-chan Job + mu sync.Mutex + results scan.Results + panic interface{} +} + +func NewWorker(incoming <-chan Job) *Worker { + w := &Worker{ + incoming: incoming, + } + w.mu.Lock() + return w +} + +func (w *Worker) Start() { + defer w.mu.Unlock() + w.results = nil + for job := range w.incoming { + func() { + results, err := job.Run() + if err != nil { + w.panic = err + } + w.results = append(w.results, results...) + }() + } +} + +func (w *Worker) Wait() scan.Results { + w.mu.Lock() + defer w.mu.Unlock() + return w.results +} + +func (w *Worker) Error() error { + if w.panic == nil { + return nil + } + return fmt.Errorf("job failed: %s", w.panic) +} diff --git a/pkg/scanners/terraform/executor/statistics.go b/pkg/scanners/terraform/executor/statistics.go new file mode 100644 index 000000000000..96db25c342dc --- /dev/null +++ b/pkg/scanners/terraform/executor/statistics.go @@ -0,0 +1,91 @@ +package executor + +import ( + "encoding/json" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/olekukonko/tablewriter" +) + +type StatisticsItem struct { + RuleID string `json:"rule_id"` + RuleDescription string `json:"rule_description"` + Links []string `json:"links"` + Count int `json:"count"` +} + +type Statistics []StatisticsItem + +type StatisticsResult struct { + Result Statistics `json:"results"` +} + +func SortStatistics(statistics Statistics) Statistics { + sort.Slice(statistics, func(i, j int) bool { + return statistics[i].Count > statistics[j].Count + }) + return statistics +} + +func (statistics Statistics) PrintStatisticsTable(format string, w io.Writer) error { + // lovely is the default so we keep it like that + if format != "lovely" && format != "markdown" && format != "json" { + return fmt.Errorf("you must specify only lovely, markdown or json format with --run-statistics") + } + + sorted := SortStatistics(statistics) + + if format == "json" { + result := StatisticsResult{Result: sorted} + val, err := json.MarshalIndent(result, "", " ") + if err != nil { + return err + } + + _, _ = fmt.Fprintln(w, string(val)) + + return nil + } + + table := tablewriter.NewWriter(w) + table.SetHeader([]string{"Rule ID", "Description", "Link", "Count"}) + table.SetRowLine(true) + + if format == "markdown" { + table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) + table.SetCenterSeparator("|") + } + + for _, item := range sorted { + table.Append([]string{item.RuleID, + item.RuleDescription, + strings.Join(item.Links, "\n"), + strconv.Itoa(item.Count)}) + } + + table.Render() + + return nil +} + +func AddStatisticsCount(statistics Statistics, result scan.Result) Statistics { + for i, statistic := range statistics { + if statistic.RuleID == result.Rule().LongID() { + statistics[i].Count += 1 + return statistics + } + } + statistics = append(statistics, StatisticsItem{ + RuleID: result.Rule().LongID(), + RuleDescription: result.Rule().Summary, + Links: result.Rule().Links, + Count: 1, + }) + + return statistics +} diff --git a/pkg/scanners/terraform/options.go b/pkg/scanners/terraform/options.go new file mode 100644 index 000000000000..a14417388dfd --- /dev/null +++ b/pkg/scanners/terraform/options.go @@ -0,0 +1,211 @@ +package terraform + +import ( + "io/fs" + "strings" + + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/state" + + "github.com/aquasecurity/trivy/pkg/scanners/terraform/executor" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser" +) + +type ConfigurableTerraformScanner interface { + options.ConfigurableScanner + SetForceAllDirs(bool) + AddExecutorOptions(options ...executor.Option) + AddParserOptions(options ...options.ParserOption) +} + +func ScannerWithTFVarsPaths(paths ...string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddParserOptions(parser.OptionWithTFVarsPaths(paths...)) + } + } +} + +func ScannerWithAlternativeIDProvider(f func(string) []string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionWithAlternativeIDProvider(f)) + } + } +} + +func ScannerWithSeverityOverrides(overrides map[string]string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionWithSeverityOverrides(overrides)) + } + } +} + +func ScannerWithNoIgnores() options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionNoIgnores()) + } + } +} + +func ScannerWithExcludedRules(ruleIDs []string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionExcludeRules(ruleIDs)) + } + } +} + +func ScannerWithExcludeIgnores(ruleIDs []string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionExcludeIgnores(ruleIDs)) + } + } +} + +func ScannerWithIncludedRules(ruleIDs []string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionIncludeRules(ruleIDs)) + } + } +} + +func ScannerWithStopOnRuleErrors(stop bool) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionStopOnErrors(stop)) + } + } +} + +func ScannerWithWorkspaceName(name string) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddParserOptions(parser.OptionWithWorkspaceName(name)) + tf.AddExecutorOptions(executor.OptionWithWorkspaceName(name)) + } + } +} + +func ScannerWithSingleThread(single bool) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionWithSingleThread(single)) + } + } +} + +func ScannerWithAllDirectories(all bool) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.SetForceAllDirs(all) + } + } +} + +func ScannerWithStopOnHCLError(stop bool) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddParserOptions(parser.OptionStopOnHCLError(stop)) + } + } +} + +func ScannerWithSkipDownloaded(skip bool) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if !skip { + return + } + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionWithResultsFilter(func(results scan.Results) scan.Results { + for i, result := range results { + prefix := result.Range().GetSourcePrefix() + switch { + case prefix == "": + case strings.HasPrefix(prefix, "."): + default: + results[i].OverrideStatus(scan.StatusIgnored) + } + } + return results + })) + } + } +} + +func ScannerWithResultsFilter(f func(scan.Results) scan.Results) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionWithResultsFilter(f)) + } + } +} + +func ScannerWithMinimumSeverity(minimum severity.Severity) options.ScannerOption { + min := severityAsOrdinal(minimum) + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionWithResultsFilter(func(results scan.Results) scan.Results { + for i, result := range results { + if severityAsOrdinal(result.Severity()) < min { + results[i].OverrideStatus(scan.StatusIgnored) + } + } + return results + })) + } + } +} + +func severityAsOrdinal(sev severity.Severity) int { + switch sev { + case severity.Critical: + return 4 + case severity.High: + return 3 + case severity.Medium: + return 2 + case severity.Low: + return 1 + default: + return 0 + } +} + +func ScannerWithStateFunc(f ...func(*state.State)) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddExecutorOptions(executor.OptionWithStateFunc(f...)) + } + } +} + +func ScannerWithDownloadsAllowed(allowed bool) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddParserOptions(parser.OptionWithDownloads(allowed)) + } + } +} + +func ScannerWithSkipCachedModules(b bool) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddParserOptions(parser.OptionWithDownloads(b)) + } + } +} + +func ScannerWithConfigsFileSystem(fsys fs.FS) options.ScannerOption { + return func(s options.ConfigurableScanner) { + if tf, ok := s.(ConfigurableTerraformScanner); ok { + tf.AddParserOptions(parser.OptionWithConfigsFS(fsys)) + } + } +} diff --git a/pkg/scanners/terraform/parser/evaluator.go b/pkg/scanners/terraform/parser/evaluator.go new file mode 100644 index 000000000000..cf99286742b9 --- /dev/null +++ b/pkg/scanners/terraform/parser/evaluator.go @@ -0,0 +1,511 @@ +package parser + +import ( + "context" + "errors" + "fmt" + "io/fs" + "reflect" + "time" + + "golang.org/x/exp/slices" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/terraform" + tfcontext "github.com/aquasecurity/trivy/pkg/terraform/context" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/typeexpr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +const ( + maxContextIterations = 32 +) + +type evaluator struct { + filesystem fs.FS + ctx *tfcontext.Context + blocks terraform.Blocks + inputVars map[string]cty.Value + moduleMetadata *modulesMetadata + projectRootPath string // root of the current scan + modulePath string + moduleName string + ignores terraform.Ignores + parentParser *Parser + debug debug.Logger + allowDownloads bool + skipCachedModules bool +} + +func newEvaluator( + target fs.FS, + parentParser *Parser, + projectRootPath string, + modulePath string, + workingDir string, + moduleName string, + blocks terraform.Blocks, + inputVars map[string]cty.Value, + moduleMetadata *modulesMetadata, + workspace string, + ignores []terraform.Ignore, + logger debug.Logger, + allowDownloads bool, + skipCachedModules bool, +) *evaluator { + + // create a context to store variables and make functions available + ctx := tfcontext.NewContext(&hcl.EvalContext{ + Functions: Functions(target, modulePath), + }, nil) + + // these variables are made available by terraform to each module + ctx.SetByDot(cty.StringVal(workspace), "terraform.workspace") + ctx.SetByDot(cty.StringVal(projectRootPath), "path.root") + ctx.SetByDot(cty.StringVal(modulePath), "path.module") + ctx.SetByDot(cty.StringVal(workingDir), "path.cwd") + + // each block gets its own scope to define variables in + for _, b := range blocks { + b.OverrideContext(ctx.NewChild()) + } + + return &evaluator{ + filesystem: target, + parentParser: parentParser, + modulePath: modulePath, + moduleName: moduleName, + projectRootPath: projectRootPath, + ctx: ctx, + blocks: blocks, + inputVars: inputVars, + moduleMetadata: moduleMetadata, + ignores: ignores, + debug: logger, + allowDownloads: allowDownloads, + } +} + +func (e *evaluator) evaluateStep() { + + e.ctx.Set(e.getValuesByBlockType("variable"), "var") + e.ctx.Set(e.getValuesByBlockType("locals"), "local") + e.ctx.Set(e.getValuesByBlockType("provider"), "provider") + + resources := e.getValuesByBlockType("resource") + for key, resource := range resources.AsValueMap() { + e.ctx.Set(resource, key) + } + + e.ctx.Set(e.getValuesByBlockType("data"), "data") + e.ctx.Set(e.getValuesByBlockType("output"), "output") +} + +// exportOutputs is used to export module outputs to the parent module +func (e *evaluator) exportOutputs() cty.Value { + data := make(map[string]cty.Value) + for _, block := range e.blocks.OfType("output") { + attr := block.GetAttribute("value") + if attr.IsNil() { + continue + } + data[block.Label()] = attr.Value() + e.debug.Log("Added module output %s=%s.", block.Label(), attr.Value().GoString()) + } + return cty.ObjectVal(data) +} + +func (e *evaluator) EvaluateAll(ctx context.Context) (terraform.Modules, map[string]fs.FS, time.Duration) { + + fsKey := types.CreateFSKey(e.filesystem) + e.debug.Log("Filesystem key is '%s'", fsKey) + + fsMap := make(map[string]fs.FS) + fsMap[fsKey] = e.filesystem + + var parseDuration time.Duration + + var lastContext hcl.EvalContext + start := time.Now() + e.debug.Log("Starting module evaluation...") + for i := 0; i < maxContextIterations; i++ { + + e.evaluateStep() + + // if ctx matches the last evaluation, we can bail, nothing left to resolve + if i > 0 && reflect.DeepEqual(lastContext.Variables, e.ctx.Inner().Variables) { + break + } + + if len(e.ctx.Inner().Variables) != len(lastContext.Variables) { + lastContext.Variables = make(map[string]cty.Value, len(e.ctx.Inner().Variables)) + } + for k, v := range e.ctx.Inner().Variables { + lastContext.Variables[k] = v + } + } + + // expand out resources and modules via count (not a typo, we do this twice so every order is processed) + e.blocks = e.expandBlocks(e.blocks) + e.blocks = e.expandBlocks(e.blocks) + + parseDuration += time.Since(start) + + e.debug.Log("Starting submodule evaluation...") + var modules terraform.Modules + for _, definition := range e.loadModules(ctx) { + submodules, outputs, err := definition.Parser.EvaluateAll(ctx) + if err != nil { + e.debug.Log("Failed to evaluate submodule '%s': %s.", definition.Name, err) + continue + } + // export module outputs + e.ctx.Set(outputs, "module", definition.Name) + modules = append(modules, submodules...) + for key, val := range definition.Parser.GetFilesystemMap() { + fsMap[key] = val + } + } + e.debug.Log("Finished processing %d submodule(s).", len(modules)) + + e.debug.Log("Starting post-submodule evaluation...") + for i := 0; i < maxContextIterations; i++ { + + e.evaluateStep() + + // if ctx matches the last evaluation, we can bail, nothing left to resolve + if i > 0 && reflect.DeepEqual(lastContext.Variables, e.ctx.Inner().Variables) { + break + } + + if len(e.ctx.Inner().Variables) != len(lastContext.Variables) { + lastContext.Variables = make(map[string]cty.Value, len(e.ctx.Inner().Variables)) + } + for k, v := range e.ctx.Inner().Variables { + lastContext.Variables[k] = v + } + } + + e.debug.Log("Module evaluation complete.") + parseDuration += time.Since(start) + rootModule := terraform.NewModule(e.projectRootPath, e.modulePath, e.blocks, e.ignores, e.isModuleLocal()) + for _, m := range modules { + m.SetParent(rootModule) + } + return append(terraform.Modules{rootModule}, modules...), fsMap, parseDuration +} + +func (e *evaluator) isModuleLocal() bool { + // the module source is empty only for local modules + return e.parentParser.moduleSource == "" +} + +func (e *evaluator) expandBlocks(blocks terraform.Blocks) terraform.Blocks { + return e.expandDynamicBlocks(e.expandBlockForEaches(e.expandBlockCounts(blocks))...) +} + +func (e *evaluator) expandDynamicBlocks(blocks ...*terraform.Block) terraform.Blocks { + for _, b := range blocks { + e.expandDynamicBlock(b) + } + return blocks +} + +func (e *evaluator) expandDynamicBlock(b *terraform.Block) { + for _, sub := range b.AllBlocks() { + e.expandDynamicBlock(sub) + } + for _, sub := range b.AllBlocks().OfType("dynamic") { + blockName := sub.TypeLabel() + expanded := e.expandBlockForEaches(terraform.Blocks{sub}) + for _, ex := range expanded { + if content := ex.GetBlock("content"); content.IsNotNil() { + _ = e.expandDynamicBlocks(content) + b.InjectBlock(content, blockName) + } + } + } +} + +func validateForEachArg(arg cty.Value) error { + if arg.IsNull() { + return errors.New("arg is null") + } + + ty := arg.Type() + + if !arg.IsKnown() || ty.Equals(cty.DynamicPseudoType) || arg.LengthInt() == 0 { + return nil + } + + if !(ty.IsSetType() || ty.IsObjectType() || ty.IsMapType()) { + return fmt.Errorf("%s type is not supported: arg is not set or map", ty.FriendlyName()) + } + + if ty.IsSetType() { + if !ty.ElementType().Equals(cty.String) { + return errors.New("arg is not set of strings") + } + + it := arg.ElementIterator() + for it.Next() { + key, _ := it.Element() + if key.IsNull() { + return errors.New("arg is set of strings, but contains null") + } + + if !key.IsKnown() { + return errors.New("arg is set of strings, but contains unknown value") + } + } + } + + return nil +} + +func isBlockSupportsForEachMetaArgument(block *terraform.Block) bool { + return slices.Contains([]string{"module", "resource", "data", "dynamic"}, block.Type()) +} + +func (e *evaluator) expandBlockForEaches(blocks terraform.Blocks) terraform.Blocks { + var forEachFiltered terraform.Blocks + + for _, block := range blocks { + + forEachAttr := block.GetAttribute("for_each") + + if forEachAttr.IsNil() || block.IsCountExpanded() || !isBlockSupportsForEachMetaArgument(block) { + forEachFiltered = append(forEachFiltered, block) + continue + } + + forEachVal := forEachAttr.Value() + + if err := validateForEachArg(forEachVal); err != nil { + e.debug.Log(`"for_each" argument is invalid: %s`, err.Error()) + continue + } + + clones := make(map[string]cty.Value) + _ = forEachAttr.Each(func(key cty.Value, val cty.Value) { + + if !key.Type().Equals(cty.String) { + e.debug.Log( + `Invalid "for-each" argument: map key (or set value) is not a string, but %s`, + key.Type().FriendlyName(), + ) + return + } + + clone := block.Clone(key) + + ctx := clone.Context() + + e.copyVariables(block, clone) + + ctx.SetByDot(key, "each.key") + ctx.SetByDot(val, "each.value") + + ctx.Set(key, block.TypeLabel(), "key") + ctx.Set(val, block.TypeLabel(), "value") + + forEachFiltered = append(forEachFiltered, clone) + + values := clone.Values() + clones[key.AsString()] = values + e.ctx.SetByDot(values, clone.GetMetadata().Reference()) + }) + + metadata := block.GetMetadata() + if len(clones) == 0 { + e.ctx.SetByDot(cty.EmptyTupleVal, metadata.Reference()) + } else { + // The for-each meta-argument creates multiple instances of the resource that are stored in the map. + // So we must replace the old resource with a map with the attributes of the resource. + e.ctx.Replace(cty.ObjectVal(clones), metadata.Reference()) + } + e.debug.Log("Expanded block '%s' into %d clones via 'for_each' attribute.", block.LocalName(), len(clones)) + } + + return forEachFiltered +} + +func isBlockSupportsCountMetaArgument(block *terraform.Block) bool { + return slices.Contains([]string{"module", "resource", "data"}, block.Type()) +} + +func (e *evaluator) expandBlockCounts(blocks terraform.Blocks) terraform.Blocks { + var countFiltered terraform.Blocks + for _, block := range blocks { + countAttr := block.GetAttribute("count") + if countAttr.IsNil() || block.IsCountExpanded() || !isBlockSupportsCountMetaArgument(block) { + countFiltered = append(countFiltered, block) + continue + } + count := 1 + countAttrVal := countAttr.Value() + if !countAttrVal.IsNull() && countAttrVal.IsKnown() && countAttrVal.Type() == cty.Number { + count = int(countAttr.AsNumber()) + } + + var clones []cty.Value + for i := 0; i < count; i++ { + clone := block.Clone(cty.NumberIntVal(int64(i))) + clones = append(clones, clone.Values()) + countFiltered = append(countFiltered, clone) + metadata := clone.GetMetadata() + e.ctx.SetByDot(clone.Values(), metadata.Reference()) + } + metadata := block.GetMetadata() + if len(clones) == 0 { + e.ctx.SetByDot(cty.EmptyTupleVal, metadata.Reference()) + } else { + e.ctx.SetByDot(cty.TupleVal(clones), metadata.Reference()) + } + e.debug.Log("Expanded block '%s' into %d clones via 'count' attribute.", block.LocalName(), len(clones)) + } + + return countFiltered +} + +func (e *evaluator) copyVariables(from, to *terraform.Block) { + + var fromBase string + var fromRel string + var toRel string + + switch from.Type() { + case "resource": + fromBase = from.TypeLabel() + fromRel = from.NameLabel() + toRel = to.NameLabel() + case "module": + fromBase = from.Type() + fromRel = from.TypeLabel() + toRel = to.TypeLabel() + default: + return + } + + srcValue := e.ctx.Root().Get(fromBase, fromRel) + if srcValue == cty.NilVal { + return + } + e.ctx.Root().Set(srcValue, fromBase, toRel) +} + +func (e *evaluator) evaluateVariable(b *terraform.Block) (cty.Value, error) { + if b.Label() == "" { + return cty.NilVal, errors.New("empty label - cannot resolve") + } + + attributes := b.Attributes() + if attributes == nil { + return cty.NilVal, errors.New("cannot resolve variable with no attributes") + } + + var valType cty.Type + var defaults *typeexpr.Defaults + if typeAttr, exists := attributes["type"]; exists { + ty, def, err := typeAttr.DecodeVarType() + if err != nil { + return cty.NilVal, err + } + valType = ty + defaults = def + } + + var val cty.Value + + if override, exists := e.inputVars[b.Label()]; exists { + val = override + } else if def, exists := attributes["default"]; exists { + val = def.NullableValue() + } else { + return cty.NilVal, errors.New("no value found") + } + + if valType != cty.NilType { + if defaults != nil { + val = defaults.Apply(val) + } + + typedVal, err := convert.Convert(val, valType) + if err != nil { + return cty.NilVal, err + } + return typedVal, nil + } + + return val, nil + +} + +func (e *evaluator) evaluateOutput(b *terraform.Block) (cty.Value, error) { + if b.Label() == "" { + return cty.NilVal, errors.New("empty label - cannot resolve") + } + + attribute := b.GetAttribute("value") + if attribute.IsNil() { + return cty.NilVal, errors.New("cannot resolve output with no attributes") + } + return attribute.Value(), nil +} + +// returns true if all evaluations were successful +func (e *evaluator) getValuesByBlockType(blockType string) cty.Value { + + blocksOfType := e.blocks.OfType(blockType) + values := make(map[string]cty.Value) + + for _, b := range blocksOfType { + + switch b.Type() { + case "variable": // variables are special in that their value comes from the "default" attribute + val, err := e.evaluateVariable(b) + if err != nil { + continue + } + values[b.Label()] = val + case "output": + val, err := e.evaluateOutput(b) + if err != nil { + continue + } + values[b.Label()] = val + case "locals", "moved", "import": + for key, val := range b.Values().AsValueMap() { + values[key] = val + } + case "provider", "module", "check": + if b.Label() == "" { + continue + } + values[b.Label()] = b.Values() + case "resource", "data": + if len(b.Labels()) < 2 { + continue + } + + blockMap, ok := values[b.Labels()[0]] + if !ok { + values[b.Labels()[0]] = cty.ObjectVal(make(map[string]cty.Value)) + blockMap = values[b.Labels()[0]] + } + + valueMap := blockMap.AsValueMap() + if valueMap == nil { + valueMap = make(map[string]cty.Value) + } + + valueMap[b.Labels()[1]] = b.Values() + values[b.Labels()[0]] = cty.ObjectVal(valueMap) + } + } + + return cty.ObjectVal(values) +} diff --git a/pkg/scanners/terraform/parser/evaluator_test.go b/pkg/scanners/terraform/parser/evaluator_test.go new file mode 100644 index 000000000000..8d3ef7b0f6e0 --- /dev/null +++ b/pkg/scanners/terraform/parser/evaluator_test.go @@ -0,0 +1,94 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/zclconf/go-cty/cty" +) + +func TestValidateForEachArg(t *testing.T) { + tests := []struct { + name string + arg cty.Value + expectedError string + }{ + { + name: "empty set", + arg: cty.SetValEmpty(cty.String), + }, + { + name: "set of strings", + arg: cty.SetVal([]cty.Value{cty.StringVal("val1"), cty.StringVal("val2")}), + }, + { + name: "set of non-strings", + arg: cty.SetVal([]cty.Value{cty.NumberIntVal(1), cty.NumberIntVal(2)}), + expectedError: "is not set of strings", + }, + { + name: "set with null", + arg: cty.SetVal([]cty.Value{cty.StringVal("val1"), cty.NullVal(cty.String)}), + expectedError: "arg is set of strings, but contains null", + }, + { + name: "set with unknown", + arg: cty.SetVal([]cty.Value{cty.StringVal("val1"), cty.UnknownVal(cty.String)}), + expectedError: "arg is set of strings, but contains unknown", + }, + { + name: "set with unknown", + arg: cty.SetVal([]cty.Value{cty.StringVal("val1"), cty.UnknownVal(cty.String)}), + expectedError: "arg is set of strings, but contains unknown", + }, + { + name: "non empty map", + arg: cty.MapVal(map[string]cty.Value{ + "val1": cty.StringVal("..."), + "val2": cty.StringVal("..."), + }), + }, + { + name: "map with unknown", + arg: cty.MapVal(map[string]cty.Value{ + "val1": cty.UnknownVal(cty.String), + "val2": cty.StringVal("..."), + }), + }, + { + name: "empty obj", + arg: cty.EmptyObjectVal, + }, + { + name: "obj with strings", + arg: cty.ObjectVal(map[string]cty.Value{ + "val1": cty.StringVal("..."), + "val2": cty.StringVal("..."), + }), + }, + { + name: "null", + arg: cty.NullVal(cty.Set(cty.String)), + expectedError: "arg is null", + }, + { + name: "unknown", + arg: cty.UnknownVal(cty.Set(cty.String)), + }, + { + name: "dynamic", + arg: cty.DynamicVal, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateForEachArg(tt.arg) + if tt.expectedError != "" && err != nil { + assert.ErrorContains(t, err, tt.expectedError) + return + } + assert.NoError(t, err) + }) + } +} diff --git a/pkg/scanners/terraform/parser/funcs/cidr.go b/pkg/scanners/terraform/parser/funcs/cidr.go new file mode 100644 index 000000000000..5f1504c0a8a1 --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/cidr.go @@ -0,0 +1,212 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "fmt" + "math/big" + "net" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CidrHostFunc constructs a function that calculates a full host IP address +// within a given IP network address prefix. +var CidrHostFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "hostnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var hostNum *big.Int + if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { + return cty.UnknownVal(cty.String), err + } + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.HostBig(network, hostNum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ip.String()), nil + }, +}) + +// CidrNetmaskFunc constructs a function that converts an IPv4 address prefix given +// in CIDR notation into a subnet mask address. +var CidrNetmaskFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + return cty.StringVal(net.IP(network.Mask).String()), nil + }, +}) + +// CidrSubnetFunc constructs a function that calculates a subnet address within +// a given IP network address prefix. +var CidrSubnetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "newbits", + Type: cty.Number, + }, + { + Name: "netnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var newbits int + if err := gocty.FromCtyValue(args[1], &newbits); err != nil { + return cty.UnknownVal(cty.String), err + } + var netnum *big.Int + if err := gocty.FromCtyValue(args[2], &netnum); err != nil { + return cty.UnknownVal(cty.String), err + } + + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + newNetwork, err := cidr.SubnetBig(network, newbits, netnum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(newNetwork.String()), nil + }, +}) + +// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive +// subnet addresses at once, rather than just a single subnet extension. +var CidrSubnetsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "newbits", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) + } + startPrefixLen, _ := network.Mask.Size() + + prefixLengthArgs := args[1:] + if len(prefixLengthArgs) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + var firstLength int + if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(1, err) + } + firstLength += startPrefixLen + + retVals := make([]cty.Value, len(prefixLengthArgs)) + + current, _ := cidr.PreviousSubnet(network, firstLength) + for i, lengthArg := range prefixLengthArgs { + var length int + if err := gocty.FromCtyValue(lengthArg, &length); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) + } + + if length < 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") + } + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if length > 32 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") + } + length += startPrefixLen + if length > (len(network.IP) * 8) { + protocol := "IP" + switch len(network.IP) * 8 { + case 32: + protocol = "IPv4" + case 128: + protocol = "IPv6" + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) + } + + next, rollover := cidr.NextSubnet(current, length) + if rollover || !network.Contains(next.IP) { + // If we run out of suffix bits in the base CIDR prefix then + // NextSubnet will start incrementing the prefix bits, which + // we don't allow because it would then allocate addresses + // outside of the caller's given prefix. + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) + } + + current = next + retVals[i] = cty.StringVal(current.String()) + } + + return cty.ListVal(retVals), nil + }, +}) + +// CidrHost calculates a full host IP address within a given IP network address prefix. +func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { + return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) +} + +// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. +func CidrNetmask(prefix cty.Value) (cty.Value, error) { + return CidrNetmaskFunc.Call([]cty.Value{prefix}) +} + +// CidrSubnet calculates a subnet address within a given IP network address prefix. +func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { + return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) +} + +// CidrSubnets calculates a sequence of consecutive subnet prefixes that may +// be of different prefix lengths under a common base prefix. +func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(newbits)+1) + args[0] = prefix + copy(args[1:], newbits) + return CidrSubnetsFunc.Call(args) +} diff --git a/pkg/scanners/terraform/parser/funcs/collection.go b/pkg/scanners/terraform/parser/funcs/collection.go new file mode 100644 index 000000000000..693b8912f618 --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/collection.go @@ -0,0 +1,711 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "errors" + "fmt" + "math/big" + "sort" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + "github.com/zclconf/go-cty/cty/gocty" +) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + AllowMarked: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + collTy := args[0].Type() + switch { + case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: + return cty.Number, nil + default: + return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + coll := args[0] + collTy := args[0].Type() + marks := coll.Marks() + switch { + case collTy == cty.DynamicPseudoType: + return cty.UnknownVal(cty.Number).WithMarks(marks), nil + case collTy.IsTupleType(): + l := len(collTy.TupleElementTypes()) + return cty.NumberIntVal(int64(l)).WithMarks(marks), nil + case collTy.IsObjectType(): + l := len(collTy.AttributeTypes()) + return cty.NumberIntVal(int64(l)).WithMarks(marks), nil + case collTy == cty.String: + // We'll delegate to the cty stdlib strlen function here, because + // it deals with all of the complexities of tokenizing unicode + // grapheme clusters. + return stdlib.Strlen(coll) + case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): + return coll.Length(), nil + default: + // Should never happen, because of the checks in our Type func above + return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") + } + }, +}) + +// AllTrueFunc constructs a function that returns true if all elements of the +// list are true. If the list is empty, return true. +var AllTrueFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.Bool), + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result := cty.True + for it := args[0].ElementIterator(); it.Next(); { + _, v := it.Element() + if !v.IsKnown() { + return cty.UnknownVal(cty.Bool), nil + } + if v.IsNull() { + return cty.False, nil + } + result = result.And(v) + if result.False() { + return cty.False, nil + } + } + return result, nil + }, +}) + +// AnyTrueFunc constructs a function that returns true if any element of the +// list is true. If the list is empty, return false. +var AnyTrueFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.Bool), + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result := cty.False + var hasUnknown bool + for it := args[0].ElementIterator(); it.Next(); { + _, v := it.Element() + if !v.IsKnown() { + hasUnknown = true + continue + } + if v.IsNull() { + continue + } + result = result.Or(v) + if result.True() { + return cty.True, nil + } + } + if hasUnknown { + return cty.UnknownVal(cty.Bool), nil + } + return result, nil + }, +}) + +// CoalesceFunc constructs a function that takes any number of arguments and +// returns the first one that isn't empty. This function was copied from go-cty +// stdlib and modified so that it returns the first *non-empty* non-null element +// from a sequence, instead of merely the first non-null. +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + // We already know this will succeed because of the checks in our Type func above + argVal, _ = convert.Convert(argVal, retType) + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { + continue + } + + return argVal, nil + } + return cty.NilVal, errors.New("no non-null, non-empty-string arguments") + }, +}) + +// IndexFunc constructs a function that finds the element index for a given value in a list. +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { + return cty.NilVal, errors.New("argument must be a list or tuple") + } + + if !args[0].IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, errors.New("cannot search an empty list") + } + + for it := args[0].ElementIterator(); it.Next(); { + i, v := it.Element() + eq, err := stdlib.Equal(v, args[1]) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + if eq.True() { + return i, nil + } + } + return cty.NilVal, errors.New("item not found") + + }, +}) + +// LookupFunc constructs a function that performs dynamic lookups of map types. +var LookupFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + AllowMarked: true, + }, + { + Name: "key", + Type: cty.String, + AllowMarked: true, + }, + }, + VarParam: &function.Parameter{ + Name: "default", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + AllowMarked: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 1 || len(args) > 3 { + return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) + } + + ty := args[0].Type() + + switch { + case ty.IsObjectType(): + if !args[1].IsKnown() { + return cty.DynamicPseudoType, nil + } + + keyVal, _ := args[1].Unmark() + key := keyVal.AsString() + if ty.HasAttribute(key) { + return args[0].GetAttr(key).Type(), nil + } else if len(args) == 3 { + // if the key isn't found but a default is provided, + // return the default type + return args[2].Type(), nil + } + return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) + case ty.IsMapType(): + if len(args) == 3 { + _, err = convert.Convert(args[2], ty.ElementType()) + if err != nil { + return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") + } + } + return ty.ElementType(), nil + default: + return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var defaultVal cty.Value + defaultValueSet := false + + if len(args) == 3 { + // intentionally leave default value marked + defaultVal = args[2] + defaultValueSet = true + } + + // keep track of marks from the collection and key + var markses []cty.ValueMarks + + // unmark collection, retain marks to reapply later + mapVar, mapMarks := args[0].Unmark() + markses = append(markses, mapMarks) + + // include marks on the key in the result + keyVal, keyMarks := args[1].Unmark() + if len(keyMarks) > 0 { + markses = append(markses, keyMarks) + } + lookupKey := keyVal.AsString() + + if !mapVar.IsKnown() { + return cty.UnknownVal(retType).WithMarks(markses...), nil + } + + if mapVar.Type().IsObjectType() { + if mapVar.Type().HasAttribute(lookupKey) { + return mapVar.GetAttr(lookupKey).WithMarks(markses...), nil + } + } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { + return mapVar.Index(cty.StringVal(lookupKey)).WithMarks(markses...), nil + } + + if defaultValueSet { + defaultVal, err = convert.Convert(defaultVal, retType) + if err != nil { + return cty.NilVal, err + } + return defaultVal.WithMarks(markses...), nil + } + + return cty.UnknownVal(cty.DynamicPseudoType).WithMarks(markses...), fmt.Errorf( + "lookup failed to find '%s'", lookupKey) + }, +}) + +// MatchkeysFunc constructs a function that constructs a new list by taking a +// subset of elements from one list whose indexes match the corresponding +// indexes of values in another list. +var MatchkeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "keys", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "searchset", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + if ty == cty.NilType { + return cty.NilType, errors.New("keys and searchset must be of the same type") + } + + // the return type is based on args[0] (values) + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !args[0].IsKnown() { + return cty.UnknownVal(cty.List(retType.ElementType())), nil + } + + if args[0].LengthInt() != args[1].LengthInt() { + return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") + } + + output := make([]cty.Value, 0) + values := args[0] + + // Keys and searchset must be the same type. + // We can skip error checking here because we've already verified that + // they can be unified in the Type function + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + keys, _ := convert.Convert(args[1], ty) + searchset, _ := convert.Convert(args[2], ty) + + // if searchset is empty, return an empty list. + if searchset.LengthInt() == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, key := it.Element() + for iter := searchset.ElementIterator(); iter.Next(); { + _, search := iter.Element() + eq, err := stdlib.Equal(key, search) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.ListValEmpty(retType.ElementType()), nil + } + if eq.True() { + v := values.Index(cty.NumberIntVal(int64(i))) + output = append(output, v) + break + } + } + i++ + } + + // if we haven't matched any key, then output is an empty list. + if len(output) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(output), nil + }, +}) + +// OneFunc returns either the first element of a one-element list, or null +// if given a zero-element list. +var OneFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty := args[0].Type() + switch { + case ty.IsListType() || ty.IsSetType(): + return ty.ElementType(), nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + switch len(etys) { + case 0: + // No specific type information, so we'll ultimately return + // a null value of unknown type. + return cty.DynamicPseudoType, nil + case 1: + return etys[0], nil + } + } + return cty.NilType, function.NewArgErrorf(0, "must be a list, set, or tuple value with either zero or one elements") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + val := args[0] + ty := val.Type() + + // Our parameter spec above doesn't set AllowUnknown or AllowNull, + // so we can assume our top-level collection is both known and non-null + // in here. + + switch { + case ty.IsListType() || ty.IsSetType(): + lenVal := val.Length() + if !lenVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + var l int + err := gocty.FromCtyValue(lenVal, &l) + if err != nil { + // It would be very strange to get here, because that would + // suggest that the length is either not a number or isn't + // an integer, which would suggest a bug in cty. + return cty.NilVal, fmt.Errorf("invalid collection length: %s", err) + } + switch l { + case 0: + return cty.NullVal(retType), nil + case 1: + var ret cty.Value + // We'll use an iterator here because that works for both lists + // and sets, whereas indexing directly would only work for lists. + // Since we've just checked the length, we should only actually + // run this loop body once. + for it := val.ElementIterator(); it.Next(); { + _, ret = it.Element() + } + return ret, nil + } + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + switch len(etys) { + case 0: + return cty.NullVal(retType), nil + case 1: + ret := val.Index(cty.NumberIntVal(0)) + return ret, nil + } + } + return cty.NilVal, function.NewArgErrorf(0, "must be a list, set, or tuple value with either zero or one elements") + }, +}) + +// SumFunc constructs a function that returns the sum of all +// numbers provided in a list +var SumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + + if !args[0].CanIterateElements() { + return cty.NilVal, function.NewArgErrorf(0, "cannot sum noniterable") + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, function.NewArgErrorf(0, "cannot sum an empty list") + } + + arg := args[0].AsValueSlice() + ty := args[0].Type() + + if !ty.IsListType() && !ty.IsSetType() && !ty.IsTupleType() { + return cty.NilVal, function.NewArgErrorf(0, fmt.Sprintf("argument must be list, set, or tuple. Received %s", ty.FriendlyName())) + } + + if !args[0].IsWhollyKnown() { + return cty.UnknownVal(cty.Number), nil + } + + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + s := arg[0] + if s.IsNull() { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + for _, v := range arg[1:] { + if v.IsNull() { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + v, err = convert.Convert(v, cty.Number) + if err != nil { + return cty.NilVal, function.NewArgErrorf(0, "argument must be list, set, or tuple of number values") + } + s = s.Add(v) + } + + return s, nil + }, +}) + +// TransposeFunc constructs a function that takes a map of lists of strings and +// swaps the keys and values to produce a new map of lists of strings. +var TransposeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.Map(cty.List(cty.String)), + }, + }, + Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputMap := args[0] + if !inputMap.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + outputMap := make(map[string]cty.Value) + tmpMap := make(map[string][]string) + + for it := inputMap.ElementIterator(); it.Next(); { + inKey, inVal := it.Element() + for iter := inVal.ElementIterator(); iter.Next(); { + _, val := iter.Element() + if !val.Type().Equals(cty.String) { + return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") + } + + outKey := val.AsString() + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey.AsString()) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]cty.Value, 0) + for _, v := range outVal { + values = append(values, cty.StringVal(v)) + } + outputMap[outKey] = cty.ListVal(values) + } + + if len(outputMap) == 0 { + return cty.MapValEmpty(cty.List(cty.String)), nil + } + + return cty.MapVal(outputMap), nil + }, +}) + +// ListFunc constructs a function that takes an arbitrary number of arguments +// and returns a list containing those values in the same order. +// +// This function is deprecated in Terraform v0.12 +var ListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("the \"list\" function was deprecated in Terraform v0.12 and is no longer available; use tolist([ ... ]) syntax to write a literal list") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.DynamicVal, fmt.Errorf("the \"list\" function was deprecated in Terraform v0.12 and is no longer available; use tolist([ ... ]) syntax to write a literal list") + }, +}) + +// MapFunc constructs a function that takes an even number of arguments and +// returns a map whose elements are constructed from consecutive pairs of arguments. +// +// This function is deprecated in Terraform v0.12 +var MapFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + return cty.DynamicPseudoType, fmt.Errorf("the \"map\" function was deprecated in Terraform v0.12 and is no longer available; use tomap({ ... }) syntax to write a literal map") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.DynamicVal, fmt.Errorf("the \"map\" function was deprecated in Terraform v0.12 and is no longer available; use tomap({ ... }) syntax to write a literal map") + }, +}) + +// Length returns the number of elements in the given collection or number of +// Unicode characters in the given string. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} + +// AllTrue returns true if all elements of the list are true. If the list is empty, +// return true. +func AllTrue(collection cty.Value) (cty.Value, error) { + return AllTrueFunc.Call([]cty.Value{collection}) +} + +// AnyTrue returns true if any element of the list is true. If the list is empty, +// return false. +func AnyTrue(collection cty.Value) (cty.Value, error) { + return AnyTrueFunc.Call([]cty.Value{collection}) +} + +// Coalesce takes any number of arguments and returns the first one that isn't empty. +func Coalesce(args ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(args) +} + +// Index finds the element index for a given value in a list. +func Index(list, value cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{list, value}) +} + +// List takes any number of list arguments and returns a list containing those +// +// values in the same order. +func List(args ...cty.Value) (cty.Value, error) { + return ListFunc.Call(args) +} + +// Lookup performs a dynamic lookup into a map. +// There are two required arguments, map and key, plus an optional default, +// which is a value to return if no key is found in map. +func Lookup(args ...cty.Value) (cty.Value, error) { + return LookupFunc.Call(args) +} + +// Map takes an even number of arguments and returns a map whose elements are constructed +// from consecutive pairs of arguments. +func Map(args ...cty.Value) (cty.Value, error) { + return MapFunc.Call(args) +} + +// Matchkeys constructs a new list by taking a subset of elements from one list +// whose indexes match the corresponding indexes of values in another list. +func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { + return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) +} + +// One returns either the first element of a one-element list, or null +// if given a zero-element list.. +func One(list cty.Value) (cty.Value, error) { + return OneFunc.Call([]cty.Value{list}) +} + +// Sum adds numbers in a list, set, or tuple +func Sum(list cty.Value) (cty.Value, error) { + return SumFunc.Call([]cty.Value{list}) +} + +// Transpose takes a map of lists of strings and swaps the keys and values to +// produce a new map of lists of strings. +func Transpose(values cty.Value) (cty.Value, error) { + return TransposeFunc.Call([]cty.Value{values}) +} diff --git a/pkg/scanners/terraform/parser/funcs/conversion.go b/pkg/scanners/terraform/parser/funcs/conversion.go new file mode 100644 index 000000000000..02fb3164a6f0 --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/conversion.go @@ -0,0 +1,223 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeToFunc constructs a "to..." function, like "tostring", which converts +// its argument to a specific type or type kind. +// +// The given type wantTy can be any type constraint that cty's "convert" package +// would accept. In particular, this means that you can pass +// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which +// will then cause cty to attempt to unify all of the element types when given +// a tuple. +func MakeToFunc(wantTy cty.Type) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "v", + // We use DynamicPseudoType rather than wantTy here so that + // all values will pass through the function API verbatim and + // we can handle the conversion logic within the Type and + // Impl functions. This allows us to customize the error + // messages to be more appropriate for an explicit type + // conversion, whereas the cty function system produces + // messages aimed at _implicit_ type conversions. + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowMarked: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + gotTy := args[0].Type() + if gotTy.Equals(wantTy) { + return wantTy, nil + } + conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) + if conv == nil { + // We'll use some specialized errors for some trickier cases, + // but most we can handle in a simple way. + switch { + case gotTy.IsTupleType() && wantTy.IsTupleType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + case gotTy.IsObjectType() && wantTy.IsObjectType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + default: + return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + // If a conversion is available then everything is fine. + return wantTy, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // We didn't set "AllowUnknown" on our argument, so it is guaranteed + // to be known here but may still be null. + ret, err := convert.Convert(args[0], retType) + if err != nil { + val, _ := args[0].UnmarkDeep() + // Because we used GetConversionUnsafe above, conversion can + // still potentially fail in here. For example, if the user + // asks to convert the string "a" to bool then we'll + // optimistically permit it during type checking but fail here + // once we note that the value isn't either "true" or "false". + gotTy := val.Type() + switch { + case Contains(args[0], MarkedSensitive): + // Generic message so we won't inadvertently disclose + // information about sensitive values. + return cty.NilVal, function.NewArgErrorf(0, "cannot convert this sensitive %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + + case gotTy == cty.String && wantTy == cty.Bool: + what := "string" + if !val.IsNull() { + what = strconv.Quote(val.AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) + case gotTy == cty.String && wantTy == cty.Number: + what := "string" + if !val.IsNull() { + what = strconv.Quote(val.AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) + default: + return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + return ret, nil + }, + }) +} + +var TypeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(TypeString(args[0].Type())).Mark(MarkedRaw), nil + }, +}) + +// Modified copy of TypeString from go-cty: +// https://github.com/zclconf/go-cty-debug/blob/master/ctydebug/type_string.go +// +// TypeString returns a string representation of a given type that is +// reminiscent of Go syntax calling into the cty package but is mainly +// intended for easy human inspection of values in tests, debug output, etc. +// +// The resulting string will include newlines and indentation in order to +// increase the readability of complex structures. It always ends with a +// newline, so you can print this result directly to your output. +func TypeString(ty cty.Type) string { + var b strings.Builder + writeType(ty, &b, 0) + return b.String() +} + +func writeType(ty cty.Type, b *strings.Builder, indent int) { + switch { + case ty == cty.NilType: + b.WriteString("nil") + return + case ty.IsObjectType(): + atys := ty.AttributeTypes() + if len(atys) == 0 { + b.WriteString("object({})") + return + } + attrNames := make([]string, 0, len(atys)) + for name := range atys { + attrNames = append(attrNames, name) + } + sort.Strings(attrNames) + b.WriteString("object({\n") + indent++ + for _, name := range attrNames { + aty := atys[name] + b.WriteString(indentSpaces(indent)) + fmt.Fprintf(b, "%s: ", name) + writeType(aty, b, indent) + b.WriteString(",\n") + } + indent-- + b.WriteString(indentSpaces(indent)) + b.WriteString("})") + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + if len(etys) == 0 { + b.WriteString("tuple([])") + return + } + b.WriteString("tuple([\n") + indent++ + for _, ety := range etys { + b.WriteString(indentSpaces(indent)) + writeType(ety, b, indent) + b.WriteString(",\n") + } + indent-- + b.WriteString(indentSpaces(indent)) + b.WriteString("])") + case ty.IsCollectionType(): + ety := ty.ElementType() + switch { + case ty.IsListType(): + b.WriteString("list(") + case ty.IsMapType(): + b.WriteString("map(") + case ty.IsSetType(): + b.WriteString("set(") + default: + // At the time of writing there are no other collection types, + // but we'll be robust here and just pass through the GoString + // of anything we don't recognize. + b.WriteString(ty.FriendlyName()) + return + } + // Because object and tuple types render split over multiple + // lines, a collection type container around them can end up + // being hard to see when scanning, so we'll generate some extra + // indentation to make a collection of structural type more visually + // distinct from the structural type alone. + complexElem := ety.IsObjectType() || ety.IsTupleType() + if complexElem { + indent++ + b.WriteString("\n") + b.WriteString(indentSpaces(indent)) + } + writeType(ty.ElementType(), b, indent) + if complexElem { + indent-- + b.WriteString(",\n") + b.WriteString(indentSpaces(indent)) + } + b.WriteString(")") + default: + // For any other type we'll just use its GoString and assume it'll + // follow the usual GoString conventions. + b.WriteString(ty.FriendlyName()) + } +} + +func indentSpaces(level int) string { + return strings.Repeat(" ", level) +} + +func Type(input []cty.Value) (cty.Value, error) { + return TypeFunc.Call(input) +} diff --git a/pkg/scanners/terraform/parser/funcs/crypto.go b/pkg/scanners/terraform/parser/funcs/crypto.go new file mode 100644 index 000000000000..424c4c184763 --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/crypto.go @@ -0,0 +1,335 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + "io/fs" + "strings" + + uuidv5 "github.com/google/uuid" + uuid "github.com/hashicorp/go-uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/ssh" +) + +var UUIDFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result, err := uuid.GenerateUUID() + if err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.StringVal(result), nil + }, +}) + +var UUIDV5Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "namespace", + Type: cty.String, + }, + { + Name: "name", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var namespace uuidv5.UUID + switch { + case args[0].AsString() == "dns": + namespace = uuidv5.NameSpaceDNS + case args[0].AsString() == "url": + namespace = uuidv5.NameSpaceURL + case args[0].AsString() == "oid": + namespace = uuidv5.NameSpaceOID + case args[0].AsString() == "x500": + namespace = uuidv5.NameSpaceX500 + default: + if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) + } + } + val := args[1].AsString() + return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil + }, +}) + +// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha256Func(target fs.FS, baseDir string) function.Function { + return makeFileHashFunction(target, baseDir, sha256.New, base64.StdEncoding.EncodeToString) +} + +// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha512Func(target fs.FS, baseDir string) function.Function { + return makeFileHashFunction(target, baseDir, sha512.New, base64.StdEncoding.EncodeToString) +} + +// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. +var BcryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "cost", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + defaultCost := 10 + + if len(args) > 1 { + var val int + if err := gocty.FromCtyValue(args[1], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + defaultCost = val + } + + if len(args) > 2 { + return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].AsString() + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("error occurred generating password %s", err.Error()) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. +var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) + +// MakeFileMd5Func constructs a function that is like Md5Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileMd5Func(target fs.FS, baseDir string) function.Function { + return makeFileHashFunction(target, baseDir, md5.New, hex.EncodeToString) +} + +// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. +var RsaDecryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "ciphertext", + Type: cty.String, + }, + { + Name: "privatekey", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + key := args[1].AsString() + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "failed to decode input %q: cipher text must be base64-encoded", s) + } + + rawKey, err := ssh.ParseRawPrivateKey([]byte(key)) + if err != nil { + var errStr string + switch e := err.(type) { + case asn1.SyntaxError: + errStr = strings.ReplaceAll(e.Error(), "asn1: syntax error", "invalid ASN1 data in the given private key") + case asn1.StructuralError: + errStr = strings.ReplaceAll(e.Error(), "asn1: structure error", "invalid ASN1 data in the given private key") + default: + errStr = fmt.Sprintf("invalid private key: %s", e) + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, errStr) + } + privateKey, ok := rawKey.(*rsa.PrivateKey) + if !ok { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "invalid private key type %t", rawKey) + } + + out, err := rsa.DecryptPKCS1v15(nil, privateKey, b) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decrypt: %s", err) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Sha1Func constructs a function that computes the SHA1 hash of a given string +// and encodes it with hexadecimal digits. +var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) + +// MakeFileSha1Func constructs a function that is like Sha1Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha1Func(target fs.FS, baseDir string) function.Function { + return makeFileHashFunction(target, baseDir, sha1.New, hex.EncodeToString) +} + +// Sha256Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with hexadecimal digits. +var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) + +// MakeFileSha256Func constructs a function that is like Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha256Func(target fs.FS, baseDir string) function.Function { + return makeFileHashFunction(target, baseDir, sha256.New, hex.EncodeToString) +} + +// Sha512Func constructs a function that computes the SHA512 hash of a given string +// and encodes it with hexadecimal digits. +var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) + +// MakeFileSha512Func constructs a function that is like Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha512Func(target fs.FS, baseDir string) function.Function { + return makeFileHashFunction(target, baseDir, sha512.New, hex.EncodeToString) +} + +func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + h := hf() + h.Write([]byte(s)) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +func makeFileHashFunction(target fs.FS, baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + path := args[0].AsString() + f, err := openFile(target, baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + h := hf() + _, err = io.Copy(h, f) + if err != nil { + return cty.UnknownVal(cty.String), err + } + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +// UUID generates and returns a Type-4 UUID in the standard hexadecimal string +// format. +// +// This is not a pure function: it will generate a different result for each +// call. It must therefore be registered as an impure function in the function +// table in the "lang" package. +func UUID() (cty.Value, error) { + return UUIDFunc.Call(nil) +} + +// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string +// format. +func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { + return UUIDV5Func.Call([]cty.Value{namespace, name}) +} + +// Base64Sha256 computes the SHA256 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +func Base64Sha256(str cty.Value) (cty.Value, error) { + return Base64Sha256Func.Call([]cty.Value{str}) +} + +// Base64Sha512 computes the SHA512 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 +func Base64Sha512(str cty.Value) (cty.Value, error) { + return Base64Sha512Func.Call([]cty.Value{str}) +} + +// Bcrypt computes a hash of the given string using the Blowfish cipher, +// returning a string in the Modular Crypt Format +// usually expected in the shadow password file on many Unix systems. +func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(cost)+1) + args[0] = str + copy(args[1:], cost) + return BcryptFunc.Call(args) +} + +// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. +func Md5(str cty.Value) (cty.Value, error) { + return Md5Func.Call([]cty.Value{str}) +} + +// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding +// cleartext. +func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { + return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) +} + +// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. +func Sha1(str cty.Value) (cty.Value, error) { + return Sha1Func.Call([]cty.Value{str}) +} + +// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. +func Sha256(str cty.Value) (cty.Value, error) { + return Sha256Func.Call([]cty.Value{str}) +} + +// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. +func Sha512(str cty.Value) (cty.Value, error) { + return Sha512Func.Call([]cty.Value{str}) +} diff --git a/pkg/scanners/terraform/parser/funcs/datetime.go b/pkg/scanners/terraform/parser/funcs/datetime.go new file mode 100644 index 000000000000..253e59eef018 --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/datetime.go @@ -0,0 +1,71 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// TimestampFunc constructs a function that returns a string representation of the current date and time. +var TimestampFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil + }, +}) + +// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. +var TimeAddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp", + Type: cty.String, + }, + { + Name: "duration", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ts, err := time.Parse(time.RFC3339, args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + duration, err := time.ParseDuration(args[1].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil + }, +}) + +// Timestamp returns a string representation of the current date and time. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax, and so timestamp +// returns a string in this format. +func Timestamp() (cty.Value, error) { + return TimestampFunc.Call([]cty.Value{}) +} + +// TimeAdd adds a duration to a timestamp, returning a new timestamp. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires +// the timestamp argument to be a string conforming to this syntax. +// +// `duration` is a string representation of a time difference, consisting of +// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted +// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first +// number may be negative to indicate a negative duration, like `"-2h5m"`. +// +// The result is a string, also in RFC 3339 format, representing the result +// of adding the given direction to the given timestamp. +func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { + return TimeAddFunc.Call([]cty.Value{timestamp, duration}) +} diff --git a/pkg/scanners/terraform/parser/funcs/defaults.go b/pkg/scanners/terraform/parser/funcs/defaults.go new file mode 100644 index 000000000000..4467b81e35ce --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/defaults.go @@ -0,0 +1,288 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// DefaultsFunc is a helper function for substituting default values in +// place of null values in a given data structure. +// +// See the documentation for function Defaults for more information. +var DefaultsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "input", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowMarked: true, + }, + { + Name: "defaults", + Type: cty.DynamicPseudoType, + AllowMarked: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // The result type is guaranteed to be the same as the input type, + // since all we're doing is replacing null values with non-null + // values of the same type. + retType := args[0].Type() + defaultsType := args[1].Type() + + // This function is aimed at filling in object types or collections + // of object types where some of the attributes might be null, so + // it doesn't make sense to use a primitive type directly with it. + // (The "coalesce" function may be appropriate for such cases.) + if retType.IsPrimitiveType() { + // This error message is a bit of a fib because we can actually + // apply defaults to tuples too, but we expect that to be so + // unusual as to not be worth mentioning here, because mentioning + // it would require using some less-well-known Terraform language + // terminology in the message (tuple types, structural types). + return cty.DynamicPseudoType, function.NewArgErrorf(1, "only object types and collections of object types can have defaults applied") + } + + defaultsPath := make(cty.Path, 0, 4) // some capacity so that most structures won't reallocate + if err := defaultsAssertSuitableFallback(retType, defaultsType, defaultsPath); err != nil { + errMsg := err.Error() + return cty.DynamicPseudoType, function.NewArgErrorf(1, "%s", errMsg) + } + + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if args[0].Type().HasDynamicTypes() { + // If the types our input object aren't known yet for some reason + // then we'll defer all of our work here, because our + // interpretation of the defaults depends on the types in + // the input. + return cty.UnknownVal(retType), nil + } + + v := defaultsApply(args[0], args[1]) + return v, nil + }, +}) + +func defaultsApply(input, fallback cty.Value) cty.Value { + wantTy := input.Type() + + umInput, inputMarks := input.Unmark() + umFb, fallbackMarks := fallback.Unmark() + + // If neither are known, we very conservatively return an unknown value + // with the union of marks on both input and default. + if !(umInput.IsKnown() && umFb.IsKnown()) { + return cty.UnknownVal(wantTy).WithMarks(inputMarks).WithMarks(fallbackMarks) + } + + // For the rest of this function we're assuming that the given defaults + // will always be valid, because we expect to have caught any problems + // during the type checking phase. Any inconsistencies that reach here are + // therefore considered to be implementation bugs, and so will panic. + + // Our strategy depends on the kind of type we're working with. + switch { + case wantTy.IsPrimitiveType(): + // For leaf primitive values the rule is relatively simple: use the + // input if it's non-null, or fallback if input is null. + if !umInput.IsNull() { + return input + } + v, err := convert.Convert(umFb, wantTy) + if err != nil { + // Should not happen because we checked in defaultsAssertSuitableFallback + panic(err.Error()) + } + return v.WithMarks(fallbackMarks) + + case wantTy.IsObjectType(): + // For structural types, a null input value must be passed through. We + // do not apply default values for missing optional structural values, + // only their contents. + // + // We also pass through the input if the fallback value is null. This + // can happen if the given defaults do not include a value for this + // attribute. + if umInput.IsNull() || umFb.IsNull() { + return input + } + atys := wantTy.AttributeTypes() + ret := map[string]cty.Value{} + for attr, aty := range atys { + inputSub := umInput.GetAttr(attr) + fallbackSub := cty.NullVal(aty) + if umFb.Type().HasAttribute(attr) { + fallbackSub = umFb.GetAttr(attr) + } + ret[attr] = defaultsApply(inputSub.WithMarks(inputMarks), fallbackSub.WithMarks(fallbackMarks)) + } + return cty.ObjectVal(ret) + + case wantTy.IsTupleType(): + // For structural types, a null input value must be passed through. We + // do not apply default values for missing optional structural values, + // only their contents. + // + // We also pass through the input if the fallback value is null. This + // can happen if the given defaults do not include a value for this + // attribute. + if umInput.IsNull() || umFb.IsNull() { + return input + } + + l := wantTy.Length() + ret := make([]cty.Value, l) + for i := 0; i < l; i++ { + inputSub := umInput.Index(cty.NumberIntVal(int64(i))) + fallbackSub := umFb.Index(cty.NumberIntVal(int64(i))) + ret[i] = defaultsApply(inputSub.WithMarks(inputMarks), fallbackSub.WithMarks(fallbackMarks)) + } + return cty.TupleVal(ret) + + case wantTy.IsCollectionType(): + // For collection types we apply a single fallback value to each + // element of the input collection, because in the situations this + // function is intended for we assume that the number of elements + // is the caller's decision, and so we'll just apply the same defaults + // to all of the elements. + ety := wantTy.ElementType() + switch { + case wantTy.IsMapType(): + newVals := map[string]cty.Value{} + + if !umInput.IsNull() { + for it := umInput.ElementIterator(); it.Next(); { + k, v := it.Element() + newVals[k.AsString()] = defaultsApply(v.WithMarks(inputMarks), fallback.WithMarks(fallbackMarks)) + } + } + + if len(newVals) == 0 { + return cty.MapValEmpty(ety) + } + return cty.MapVal(newVals) + case wantTy.IsListType(), wantTy.IsSetType(): + var newVals []cty.Value + + if !umInput.IsNull() { + for it := umInput.ElementIterator(); it.Next(); { + _, v := it.Element() + newV := defaultsApply(v.WithMarks(inputMarks), fallback.WithMarks(fallbackMarks)) + newVals = append(newVals, newV) + } + } + + if len(newVals) == 0 { + if wantTy.IsSetType() { + return cty.SetValEmpty(ety) + } + return cty.ListValEmpty(ety) + } + if wantTy.IsSetType() { + return cty.SetVal(newVals) + } + return cty.ListVal(newVals) + default: + // There are no other collection types, so this should not happen + panic(fmt.Sprintf("invalid collection type %#v", wantTy)) + } + default: + // We should've caught anything else in defaultsAssertSuitableFallback, + // so this should not happen. + panic(fmt.Sprintf("invalid target type %#v", wantTy)) + } +} + +func defaultsAssertSuitableFallback(wantTy, fallbackTy cty.Type, fallbackPath cty.Path) error { + // If the type we want is a collection type then we need to keep peeling + // away collection type wrappers until we find the non-collection-type + // that's underneath, which is what the fallback will actually be applied + // to. + inCollection := false + for wantTy.IsCollectionType() { + wantTy = wantTy.ElementType() + inCollection = true + } + + switch { + case wantTy.IsPrimitiveType(): + // The fallback is valid if it's equal to or convertible to what we want. + if fallbackTy.Equals(wantTy) { + return nil + } + conversion := convert.GetConversion(fallbackTy, wantTy) + if conversion == nil { + msg := convert.MismatchMessage(fallbackTy, wantTy) + return fallbackPath.NewErrorf("invalid default value for %s: %s", wantTy.FriendlyName(), msg) + } + return nil + case wantTy.IsObjectType(): + if !fallbackTy.IsObjectType() { + if inCollection { + return fallbackPath.NewErrorf("the default value for a collection of an object type must itself be an object type, not %s", fallbackTy.FriendlyName()) + } + return fallbackPath.NewErrorf("the default value for an object type must itself be an object type, not %s", fallbackTy.FriendlyName()) + } + for attr, wantAty := range wantTy.AttributeTypes() { + if !fallbackTy.HasAttribute(attr) { + continue // it's always okay to not have a default value + } + fallbackSubpath := fallbackPath.GetAttr(attr) + fallbackSubTy := fallbackTy.AttributeType(attr) + err := defaultsAssertSuitableFallback(wantAty, fallbackSubTy, fallbackSubpath) + if err != nil { + return err + } + } + for attr := range fallbackTy.AttributeTypes() { + if !wantTy.HasAttribute(attr) { + fallbackSubpath := fallbackPath.GetAttr(attr) + return fallbackSubpath.NewErrorf("target type does not expect an attribute named %q", attr) + } + } + return nil + case wantTy.IsTupleType(): + if !fallbackTy.IsTupleType() { + if inCollection { + return fallbackPath.NewErrorf("the default value for a collection of a tuple type must itself be a tuple type, not %s", fallbackTy.FriendlyName()) + } + return fallbackPath.NewErrorf("the default value for a tuple type must itself be a tuple type, not %s", fallbackTy.FriendlyName()) + } + wantEtys := wantTy.TupleElementTypes() + fallbackEtys := fallbackTy.TupleElementTypes() + if got, want := len(wantEtys), len(fallbackEtys); got != want { + return fallbackPath.NewErrorf("the default value for a tuple type of length %d must also have length %d, not %d", want, want, got) + } + for i := 0; i < len(wantEtys); i++ { + fallbackSubpath := fallbackPath.IndexInt(i) + wantSubTy := wantEtys[i] + fallbackSubTy := fallbackEtys[i] + err := defaultsAssertSuitableFallback(wantSubTy, fallbackSubTy, fallbackSubpath) + if err != nil { + return err + } + } + return nil + default: + // No other types are supported right now. + return fallbackPath.NewErrorf("cannot apply defaults to %s", wantTy.FriendlyName()) + } +} + +// Defaults is a helper function for substituting default values in +// place of null values in a given data structure. +// +// This is primarily intended for use with a module input variable that +// has an object type constraint (or a collection thereof) that has optional +// attributes, so that the receiver of a value that omits those attributes +// can insert non-null default values in place of the null values caused by +// omitting the attributes. +func Defaults(input, defaults cty.Value) (cty.Value, error) { + return DefaultsFunc.Call([]cty.Value{input, defaults}) +} diff --git a/pkg/scanners/terraform/parser/funcs/encoding.go b/pkg/scanners/terraform/parser/funcs/encoding.go new file mode 100644 index 000000000000..f74a508fb7ed --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/encoding.go @@ -0,0 +1,254 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "log" + "net/url" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "golang.org/x/text/encoding/ianaindex" +) + +// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. +var Base64DecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) + } + if !utf8.Valid([]byte(sDec)) { + log.Printf("[DEBUG] the result of decoding the provided string is not valid UTF-8: %s", sDec) + return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the provided string is not valid UTF-8") + } + return cty.StringVal(string(sDec)), nil + }, +}) + +// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. +var Base64EncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil + }, +}) + +// TextEncodeBase64Func constructs a function that encodes a string to a target encoding and then to a base64 sequence. +var TextEncodeBase64Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "string", + Type: cty.String, + }, + { + Name: "encoding", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + encoding, err := ianaindex.IANA.Encoding(args[1].AsString()) + if err != nil || encoding == nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "%q is not a supported IANA encoding name or alias in this Terraform version", args[1].AsString()) + } + + encName, err := ianaindex.IANA.Name(encoding) + if err != nil { // would be weird, since we just read this encoding out + encName = args[1].AsString() + } + + encoder := encoding.NewEncoder() + encodedInput, err := encoder.Bytes([]byte(args[0].AsString())) + if err != nil { + // The string representations of "err" disclose implementation + // details of the underlying library, and the main error we might + // like to return a special message for is unexported as + // golang.org/x/text/encoding/internal.RepertoireError, so this + // is just a generic error message for now. + // + // We also don't include the string itself in the message because + // it can typically be very large, contain newline characters, + // etc. + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given string contains characters that cannot be represented in %s", encName) + } + + return cty.StringVal(base64.StdEncoding.EncodeToString(encodedInput)), nil + }, +}) + +// TextDecodeBase64Func constructs a function that decodes a base64 sequence to a target encoding. +var TextDecodeBase64Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "source", + Type: cty.String, + }, + { + Name: "encoding", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + encoding, err := ianaindex.IANA.Encoding(args[1].AsString()) + if err != nil || encoding == nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(1, "%q is not a supported IANA encoding name or alias in this Terraform version", args[1].AsString()) + } + + encName, err := ianaindex.IANA.Name(encoding) + if err != nil { // would be weird, since we just read this encoding out + encName = args[1].AsString() + } + + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + switch err := err.(type) { + case base64.CorruptInputError: + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given value is has an invalid base64 symbol at offset %d", int(err)) + default: + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid source string: %T", err) + } + + } + + decoder := encoding.NewDecoder() + decoded, err := decoder.Bytes(sDec) + if err != nil || bytes.ContainsRune(decoded, '�') { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "the given string contains symbols that are not defined for %s", encName) + } + + return cty.StringVal(string(decoded)), nil + }, +}) + +// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in +// Base64 encoding. +var Base64GzipFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) + } + return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil + }, +}) + +// URLEncodeFunc constructs a function that applies URL encoding to a given string. +var URLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(url.QueryEscape(args[0].AsString())), nil + }, +}) + +// Base64Decode decodes a string containing a base64 sequence. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function +// produces an error. +func Base64Decode(str cty.Value) (cty.Value, error) { + return Base64DecodeFunc.Call([]cty.Value{str}) +} + +// Base64Encode applies Base64 encoding to a string. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func Base64Encode(str cty.Value) (cty.Value, error) { + return Base64EncodeFunc.Call([]cty.Value{str}) +} + +// Base64Gzip compresses a string with gzip and then encodes the result in +// Base64 encoding. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. +func Base64Gzip(str cty.Value) (cty.Value, error) { + return Base64GzipFunc.Call([]cty.Value{str}) +} + +// URLEncode applies URL encoding to a given string. +// +// This function identifies characters in the given string that would have a +// special meaning when included as a query string argument in a URL and +// escapes them using RFC 3986 "percent encoding". +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. +func URLEncode(str cty.Value) (cty.Value, error) { + return URLEncodeFunc.Call([]cty.Value{str}) +} + +// TextEncodeBase64 applies Base64 encoding to a string that was encoded before with a target encoding. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// First step is to apply the target IANA encoding (e.g. UTF-16LE). +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func TextEncodeBase64(str, enc cty.Value) (cty.Value, error) { + return TextEncodeBase64Func.Call([]cty.Value{str, enc}) +} + +// TextDecodeBase64 decodes a string containing a base64 sequence whereas a specific encoding of the string is expected. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// the target encoding. +func TextDecodeBase64(str, enc cty.Value) (cty.Value, error) { + return TextDecodeBase64Func.Call([]cty.Value{str, enc}) +} diff --git a/pkg/scanners/terraform/parser/funcs/filesystem.go b/pkg/scanners/terraform/parser/funcs/filesystem.go new file mode 100644 index 000000000000..910e17f325c6 --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/filesystem.go @@ -0,0 +1,467 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "encoding/base64" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "unicode/utf8" + + "github.com/bmatcuk/doublestar/v4" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeFileFunc constructs a function that takes a file path and returns the +// contents of that file, either directly as a string (where valid UTF-8 is +// required) or as a string containing base64 bytes. +func MakeFileFunc(target fs.FS, baseDir string, encBase64 bool) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + src, err := readFileBytes(target, baseDir, path) + if err != nil { + err = function.NewArgError(0, err) + return cty.UnknownVal(cty.String), err + } + + switch { + case encBase64: + enc := base64.StdEncoding.EncodeToString(src) + return cty.StringVal(enc), nil + default: + if !utf8.Valid(src) { + return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) + } + return cty.StringVal(string(src)), nil + } + }, + }) +} + +// MakeTemplateFileFunc constructs a function that takes a file path and +// an arbitrary object of named values and attempts to render the referenced +// file as a template using HCL template syntax. +// +// The template itself may recursively call other functions so a callback +// must be provided to get access to those functions. The template cannot, +// however, access any variables defined in the scope: it is restricted only to +// those variables provided in the second function argument, to ensure that all +// dependencies on other graph nodes can be seen before executing this function. +// +// As a special exception, a referenced template file may not recursively call +// the templatefile function, since that would risk the same file being +// included into itself indefinitely. +func MakeTemplateFileFunc(target fs.FS, baseDir string, funcsCb func() map[string]function.Function) function.Function { + + params := []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + { + Name: "vars", + Type: cty.DynamicPseudoType, + }, + } + + loadTmpl := func(fn string) (hcl.Expression, error) { + // We re-use File here to ensure the same filename interpretation + // as it does, along with its other safety checks. + tmplVal, err := File(target, baseDir, cty.StringVal(fn)) + if err != nil { + return nil, err + } + + expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return nil, diags + } + + return expr, nil + } + + renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { + if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { + return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time + } + + ctx := &hcl.EvalContext{ + Variables: varsVal.AsValueMap(), + } + + // We require all of the variables to be valid HCL identifiers, because + // otherwise there would be no way to refer to them in the template + // anyway. Rejecting this here gives better feedback to the user + // than a syntax error somewhere in the template itself. + for n := range ctx.Variables { + if !hclsyntax.ValidIdentifier(n) { + // This error message intentionally doesn't describe _all_ of + // the different permutations that are technically valid as an + // HCL identifier, but rather focuses on what we might + // consider to be an "idiomatic" variable name. + return cty.DynamicVal, function.NewArgErrorf(1, "invalid template variable name %q: must start with a letter, followed by zero or more letters, digits, and underscores", n) + } + } + + // We'll pre-check references in the template here so we can give a + // more specialized error message than HCL would by default, so it's + // clearer that this problem is coming from a templatefile call. + for _, traversal := range expr.Variables() { + root := traversal.RootName() + if _, ok := ctx.Variables[root]; !ok { + return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) + } + } + + givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems + funcs := make(map[string]function.Function, len(givenFuncs)) + for name, fn := range givenFuncs { + if name == "templatefile" { + // We stub this one out to prevent recursive calls. + funcs[name] = function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") + }, + }) + continue + } + funcs[name] = fn + } + ctx.Functions = funcs + + val, diags := expr.Value(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + return val, nil + } + + return function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + if !(args[0].IsKnown() && args[1].IsKnown()) { + return cty.DynamicPseudoType, nil + } + + // We'll render our template now to see what result type it produces. + // A template consisting only of a single interpolation an potentially + // return any type. + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicPseudoType, err + } + + // This is safe even if args[1] contains unknowns because the HCL + // template renderer itself knows how to short-circuit those. + val, err := renderTmpl(expr, args[1]) + return val.Type(), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicVal, err + } + return renderTmpl(expr, args[1]) + }, + }) + +} + +// MakeFileExistsFunc constructs a function that takes a path +// and determines whether a file exists at that path +func MakeFileExistsFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + path, err := homedir.Expand(path) + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return cty.False, nil + } + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) + } + + if fi.Mode().IsRegular() { + return cty.True, nil + } + + return cty.False, fmt.Errorf("%s is not a regular file, but %q", + path, fi.Mode().String()) + }, + }) +} + +// MakeFileSetFunc constructs a function that takes a glob pattern +// and enumerates a file set from that pattern +func MakeFileSetFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + { + Name: "pattern", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Set(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + pattern := args[1].AsString() + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Join the path to the glob pattern, while ensuring the full + // pattern is canonical for the host OS. The joined path is + // automatically cleaned during this operation. + pattern = filepath.Join(path, pattern) + + matches, err := doublestar.Glob(os.DirFS(path), pattern) + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to glob pattern (%s): %s", pattern, err) + } + + var matchVals []cty.Value + for _, match := range matches { + fi, err := os.Stat(match) + + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to stat (%s): %s", match, err) + } + + if !fi.Mode().IsRegular() { + continue + } + + // Remove the path and file separator from matches. + match, err = filepath.Rel(path, match) + + if err != nil { + return cty.UnknownVal(cty.Set(cty.String)), fmt.Errorf("failed to trim path of match (%s): %s", match, err) + } + + // Replace any remaining file separators with forward slash (/) + // separators for cross-system compatibility. + match = filepath.ToSlash(match) + + matchVals = append(matchVals, cty.StringVal(match)) + } + + if len(matchVals) == 0 { + return cty.SetValEmpty(cty.String), nil + } + + return cty.SetVal(matchVals), nil + }, + }) +} + +// BasenameFunc constructs a function that takes a string containing a filesystem path +// and removes all except the last portion from it. +var BasenameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Base(args[0].AsString())), nil + }, +}) + +// DirnameFunc constructs a function that takes a string containing a filesystem path +// and removes the last portion from it. +var DirnameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Dir(args[0].AsString())), nil + }, +}) + +// AbsPathFunc constructs a function that converts a filesystem path to an absolute path +var AbsPathFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + absPath, err := filepath.Abs(args[0].AsString()) + return cty.StringVal(filepath.ToSlash(absPath)), err + }, +}) + +// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. +var PathExpandFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + + homePath, err := homedir.Expand(args[0].AsString()) + return cty.StringVal(homePath), err + }, +}) + +func openFile(target fs.FS, baseDir, path string) (fs.File, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + if target != nil { + return target.Open(path) + } + return os.Open(path) +} + +func readFileBytes(target fs.FS, baseDir, path string) ([]byte, error) { + f, err := openFile(target, baseDir, path) + if err != nil { + if os.IsNotExist(err) { + // An extra Terraform-specific hint for this situation + return nil, fmt.Errorf("no file exists at %s; this function works only with files that are distributed as part of the configuration source code, so if this file will be created by a resource in this configuration you must instead obtain this result from an attribute of that resource", path) + } + return nil, err + } + + src, err := io.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("failed to read %s", path) + } + + return src, nil +} + +// File reads the contents of the file at the given path. +// +// The file must contain valid UTF-8 bytes, or this function will return an error. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func File(target fs.FS, baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(target, baseDir, false) + return fn.Call([]cty.Value{path}) +} + +// FileExists determines whether a file exists at the given path. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileExists(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileExistsFunc(baseDir) + return fn.Call([]cty.Value{path}) +} + +// FileSet enumerates a set of files given a glob pattern +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileSet(baseDir string, path, pattern cty.Value) (cty.Value, error) { + fn := MakeFileSetFunc(baseDir) + return fn.Call([]cty.Value{path, pattern}) +} + +// FileBase64 reads the contents of the file at the given path. +// +// The bytes from the file are encoded as base64 before returning. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileBase64(target fs.FS, baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(target, baseDir, true) + return fn.Call([]cty.Value{path}) +} + +// Basename takes a string containing a filesystem path and removes all except the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Basename(path cty.Value) (cty.Value, error) { + return BasenameFunc.Call([]cty.Value{path}) +} + +// Dirname takes a string containing a filesystem path and removes the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Dirname(path cty.Value) (cty.Value, error) { + return DirnameFunc.Call([]cty.Value{path}) +} + +// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with +// the current user's home directory path. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the leading segment in the path is not `~` then the given path is returned unmodified. +func Pathexpand(path cty.Value) (cty.Value, error) { + return PathExpandFunc.Call([]cty.Value{path}) +} diff --git a/pkg/scanners/terraform/parser/funcs/marks.go b/pkg/scanners/terraform/parser/funcs/marks.go new file mode 100644 index 000000000000..ca368c113c5c --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/marks.go @@ -0,0 +1,44 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/marks +package funcs + +import ( + "github.com/zclconf/go-cty/cty" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +// valueMarks allow creating strictly typed values for use as cty.Value marks. +// The variable name for new values should be the title-cased format of the +// value to better match the GoString output for debugging. +type valueMark string + +func (m valueMark) GoString() string { + return "marks." + cases.Title(language.English).String(string(m)) +} + +// Has returns true if and only if the cty.Value has the given mark. +func Has(val cty.Value, mark valueMark) bool { + return val.HasMark(mark) +} + +// Contains returns true if the cty.Value or any any value within it contains +// the given mark. +func Contains(val cty.Value, mark valueMark) bool { + ret := false + _ = cty.Walk(val, func(_ cty.Path, v cty.Value) (bool, error) { + if v.HasMark(mark) { + ret = true + return false, nil + } + return true, nil + }) + return ret +} + +// MarkedSensitive indicates that this value is marked as sensitive in the context of +// Terraform. +var MarkedSensitive = valueMark("sensitive") + +// MarkedRaw is used to indicate to the repl that the value should be written without +// any formatting. +var MarkedRaw = valueMark("raw") diff --git a/pkg/scanners/terraform/parser/funcs/number.go b/pkg/scanners/terraform/parser/funcs/number.go new file mode 100644 index 000000000000..6c8f5dc3b6d9 --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/number.go @@ -0,0 +1,170 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "math" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// LogFunc constructs a function that returns the logarithm of a given number in a given base. +var LogFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var base float64 + if err := gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil + }, +}) + +// PowFunc constructs a function that returns the logarithm of a given number in a given base. +var PowFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "power", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var power float64 + if err := gocty.FromCtyValue(args[1], &power); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Pow(num, power)), nil + }, +}) + +// SignumFunc constructs a function that returns the closest whole number greater +// than or equal to the given value. +var SignumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num int + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + switch { + case num < 0: + return cty.NumberIntVal(-1), nil + case num > 0: + return cty.NumberIntVal(+1), nil + default: + return cty.NumberIntVal(0), nil + } + }, +}) + +// ParseIntFunc constructs a function that parses a string argument and returns an integer of the specified base. +var ParseIntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "number", + Type: cty.DynamicPseudoType, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].Type().Equals(cty.String) { + return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) + } + return cty.Number, nil + }, + + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var numstr string + var base int + var err error + + if err = gocty.FromCtyValue(args[0], &numstr); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(0, err) + } + + if err = gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.Number), function.NewArgError(1, err) + } + + if base < 2 || base > 62 { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 1, + "base must be a whole number between 2 and 62 inclusive", + ) + } + + num, ok := (&big.Int{}).SetString(numstr, base) + if !ok { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 0, + "cannot parse %q as a base %d integer", + numstr, + base, + ) + } + + parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)) + + return parsedNum, nil + }, +}) + +// Log returns returns the logarithm of a given number in a given base. +func Log(num, base cty.Value) (cty.Value, error) { + return LogFunc.Call([]cty.Value{num, base}) +} + +// Pow returns the logarithm of a given number in a given base. +func Pow(num, power cty.Value) (cty.Value, error) { + return PowFunc.Call([]cty.Value{num, power}) +} + +// Signum determines the sign of a number, returning a number between -1 and +// 1 to represent the sign. +func Signum(num cty.Value) (cty.Value, error) { + return SignumFunc.Call([]cty.Value{num}) +} + +// ParseInt parses a string argument and returns an integer of the specified base. +func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { + return ParseIntFunc.Call([]cty.Value{num, base}) +} diff --git a/pkg/scanners/terraform/parser/funcs/sensitive.go b/pkg/scanners/terraform/parser/funcs/sensitive.go new file mode 100644 index 000000000000..c67ed13e6e7b --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/sensitive.go @@ -0,0 +1,67 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// SensitiveFunc returns a value identical to its argument except that +// Terraform will consider it to be sensitive. +var SensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // This function only affects the value's marks, so the result + // type is always the same as the argument type. + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + val, _ := args[0].Unmark() + return val.Mark(MarkedSensitive), nil + }, +}) + +// NonsensitiveFunc takes a sensitive value and returns the same value without +// the sensitive marking, effectively exposing the value. +var NonsensitiveFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowNull: true, + AllowMarked: true, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + // This function only affects the value's marks, so the result + // type is always the same as the argument type. + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if args[0].IsKnown() && !args[0].HasMark(MarkedSensitive) { + return cty.DynamicVal, function.NewArgErrorf(0, "the given value is not sensitive, so this call is redundant") + } + v, m := args[0].Unmark() + delete(m, MarkedSensitive) // remove the sensitive marking + return v.WithMarks(m), nil + }, +}) + +func Sensitive(v cty.Value) (cty.Value, error) { + return SensitiveFunc.Call([]cty.Value{v}) +} + +func Nonsensitive(v cty.Value) (cty.Value, error) { + return NonsensitiveFunc.Call([]cty.Value{v}) +} diff --git a/pkg/scanners/terraform/parser/funcs/string.go b/pkg/scanners/terraform/parser/funcs/string.go new file mode 100644 index 000000000000..49696784e872 --- /dev/null +++ b/pkg/scanners/terraform/parser/funcs/string.go @@ -0,0 +1,54 @@ +// Copied from github.com/hashicorp/terraform/internal/lang/funcs +package funcs + +import ( + "regexp" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// ReplaceFunc constructs a function that searches a given string for another +// given substring, and replaces each occurrence with a given replacement string. +var ReplaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "substr", + Type: cty.String, + }, + { + Name: "replace", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + str := args[0].AsString() + substr := args[1].AsString() + replace := args[2].AsString() + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { + re, err := regexp.Compile(substr[1 : len(substr)-1]) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(re.ReplaceAllString(str, replace)), nil + } + + return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil + }, +}) + +// Replace searches a given string for another given substring, +// and replaces all occurrences with a given replacement string. +func Replace(str, substr, replace cty.Value) (cty.Value, error) { + return ReplaceFunc.Call([]cty.Value{str, substr, replace}) +} diff --git a/pkg/scanners/terraform/parser/functions.go b/pkg/scanners/terraform/parser/functions.go new file mode 100644 index 000000000000..72cb74e0246b --- /dev/null +++ b/pkg/scanners/terraform/parser/functions.go @@ -0,0 +1,123 @@ +package parser + +import ( + "io/fs" + + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser/funcs" + "github.com/hashicorp/hcl/v2/ext/tryfunc" + ctyyaml "github.com/zclconf/go-cty-yaml" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +// Functions returns the set of functions that should be used to when evaluating +// expressions in the receiving scope. +func Functions(target fs.FS, baseDir string) map[string]function.Function { + return map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "abspath": funcs.AbsPathFunc, + "basename": funcs.BasenameFunc, + "base64decode": funcs.Base64DecodeFunc, + "base64encode": funcs.Base64EncodeFunc, + "base64gzip": funcs.Base64GzipFunc, + "base64sha256": funcs.Base64Sha256Func, + "base64sha512": funcs.Base64Sha512Func, + "bcrypt": funcs.BcryptFunc, + "can": tryfunc.CanFunc, + "ceil": stdlib.CeilFunc, + "chomp": stdlib.ChompFunc, + "cidrhost": funcs.CidrHostFunc, + "cidrnetmask": funcs.CidrNetmaskFunc, + "cidrsubnet": funcs.CidrSubnetFunc, + "cidrsubnets": funcs.CidrSubnetsFunc, + "coalesce": funcs.CoalesceFunc, + "coalescelist": stdlib.CoalesceListFunc, + "compact": stdlib.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": stdlib.ContainsFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "dirname": funcs.DirnameFunc, + "distinct": stdlib.DistinctFunc, + "element": stdlib.ElementFunc, + "chunklist": stdlib.ChunklistFunc, + "file": funcs.MakeFileFunc(target, baseDir, false), + "fileexists": funcs.MakeFileExistsFunc(baseDir), + "fileset": funcs.MakeFileSetFunc(baseDir), + "filebase64": funcs.MakeFileFunc(target, baseDir, true), + "filebase64sha256": funcs.MakeFileBase64Sha256Func(target, baseDir), + "filebase64sha512": funcs.MakeFileBase64Sha512Func(target, baseDir), + "filemd5": funcs.MakeFileMd5Func(target, baseDir), + "filesha1": funcs.MakeFileSha1Func(target, baseDir), + "filesha256": funcs.MakeFileSha256Func(target, baseDir), + "filesha512": funcs.MakeFileSha512Func(target, baseDir), + "flatten": stdlib.FlattenFunc, + "floor": stdlib.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": stdlib.IndentFunc, + "index": funcs.IndexFunc, // stdlib.IndexFunc is not compatible + "join": stdlib.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": stdlib.KeysFunc, + "length": funcs.LengthFunc, + "list": funcs.ListFunc, + "log": stdlib.LogFunc, + "lookup": funcs.LookupFunc, + "lower": stdlib.LowerFunc, + "map": funcs.MapFunc, + "matchkeys": funcs.MatchkeysFunc, + "max": stdlib.MaxFunc, + "md5": funcs.Md5Func, + "merge": stdlib.MergeFunc, + "min": stdlib.MinFunc, + "parseint": stdlib.ParseIntFunc, + "pathexpand": funcs.PathExpandFunc, + "pow": stdlib.PowFunc, + "range": stdlib.RangeFunc, + "regex": stdlib.RegexFunc, + "regexall": stdlib.RegexAllFunc, + "replace": funcs.ReplaceFunc, + "reverse": stdlib.ReverseListFunc, + "rsadecrypt": funcs.RsaDecryptFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": stdlib.SetProductFunc, + "setsubtract": stdlib.SetSubtractFunc, + "setunion": stdlib.SetUnionFunc, + "sha1": funcs.Sha1Func, + "sha256": funcs.Sha256Func, + "sha512": funcs.Sha512Func, + "signum": stdlib.SignumFunc, + "slice": stdlib.SliceFunc, + "sort": stdlib.SortFunc, + "split": stdlib.SplitFunc, + "strrev": stdlib.ReverseFunc, + "substr": stdlib.SubstrFunc, + "timestamp": funcs.TimestampFunc, + "timeadd": stdlib.TimeAddFunc, + "title": stdlib.TitleFunc, + "tostring": funcs.MakeToFunc(cty.String), + "tonumber": funcs.MakeToFunc(cty.Number), + "tobool": funcs.MakeToFunc(cty.Bool), + "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), + "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), + "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), + "transpose": funcs.TransposeFunc, + "trim": stdlib.TrimFunc, + "trimprefix": stdlib.TrimPrefixFunc, + "trimspace": stdlib.TrimSpaceFunc, + "trimsuffix": stdlib.TrimSuffixFunc, + "try": tryfunc.TryFunc, + "upper": stdlib.UpperFunc, + "urlencode": funcs.URLEncodeFunc, + "uuid": funcs.UUIDFunc, + "uuidv5": funcs.UUIDV5Func, + "values": stdlib.ValuesFunc, + "yamldecode": ctyyaml.YAMLDecodeFunc, + "yamlencode": ctyyaml.YAMLEncodeFunc, + "zipmap": stdlib.ZipmapFunc, + } + +} diff --git a/pkg/scanners/terraform/parser/load_blocks.go b/pkg/scanners/terraform/parser/load_blocks.go new file mode 100644 index 000000000000..04454825ad58 --- /dev/null +++ b/pkg/scanners/terraform/parser/load_blocks.go @@ -0,0 +1,130 @@ +package parser + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" + "github.com/hashicorp/hcl/v2" +) + +func loadBlocksFromFile(file sourceFile, moduleSource string) (hcl.Blocks, []terraform.Ignore, error) { + ignores := parseIgnores(file.file.Bytes, file.path, moduleSource) + contents, diagnostics := file.file.Body.Content(terraform.Schema) + if diagnostics != nil && diagnostics.HasErrors() { + return nil, nil, diagnostics + } + if contents == nil { + return nil, nil, nil + } + return contents.Blocks, ignores, nil +} + +func parseIgnores(data []byte, path string, moduleSource string) []terraform.Ignore { + var ignores []terraform.Ignore + for i, line := range strings.Split(string(data), "\n") { + line = strings.TrimSpace(line) + lineIgnores := parseIgnoresFromLine(line) + for _, lineIgnore := range lineIgnores { + lineIgnore.Range = types.NewRange(path, i+1, i+1, moduleSource, nil) + ignores = append(ignores, lineIgnore) + } + } + for a, ignoreA := range ignores { + if !ignoreA.Block { + continue + } + for _, ignoreB := range ignores { + if !ignoreB.Block { + continue + } + if ignoreA.Range.GetStartLine()+1 == ignoreB.Range.GetStartLine() { + ignoreA.Range = ignoreB.Range + ignores[a] = ignoreA + } + } + } + return ignores + +} + +var commentPattern = regexp.MustCompile(`^\s*([/]+|/\*|#)+\s*tfsec:`) +var trivyCommentPattern = regexp.MustCompile(`^\s*([/]+|/\*|#)+\s*trivy:`) + +func parseIgnoresFromLine(input string) []terraform.Ignore { + + var ignores []terraform.Ignore + + input = commentPattern.ReplaceAllString(input, "tfsec:") + input = trivyCommentPattern.ReplaceAllString(input, "trivy:") + + bits := strings.Split(strings.TrimSpace(input), " ") + for i, bit := range bits { + bit := strings.TrimSpace(bit) + bit = strings.TrimPrefix(bit, "#") + bit = strings.TrimPrefix(bit, "//") + bit = strings.TrimPrefix(bit, "/*") + + if strings.HasPrefix(bit, "tfsec:") || strings.HasPrefix(bit, "trivy:") { + ignore, err := parseIgnoreFromComment(bit) + if err != nil { + continue + } + ignore.Block = i == 0 + ignores = append(ignores, *ignore) + } + } + + return ignores +} + +func parseIgnoreFromComment(input string) (*terraform.Ignore, error) { + var ignore terraform.Ignore + if !strings.HasPrefix(input, "tfsec:") && !strings.HasPrefix(input, "trivy:") { + return nil, fmt.Errorf("invalid ignore") + } + + input = input[6:] + + segments := strings.Split(input, ":") + + for i := 0; i < len(segments)-1; i += 2 { + key := segments[i] + val := segments[i+1] + switch key { + case "ignore": + ignore.RuleID, ignore.Params = parseIDWithParams(val) + case "exp": + parsed, err := time.Parse("2006-01-02", val) + if err != nil { + return &ignore, err + } + ignore.Expiry = &parsed + case "ws": + ignore.Workspace = val + } + } + + return &ignore, nil +} + +func parseIDWithParams(input string) (string, map[string]string) { + params := make(map[string]string) + if !strings.Contains(input, "[") { + return input, params + } + parts := strings.Split(input, "[") + id := parts[0] + paramStr := strings.TrimSuffix(parts[1], "]") + for _, pair := range strings.Split(paramStr, ",") { + parts := strings.Split(pair, "=") + if len(parts) != 2 { + continue + } + params[parts[0]] = parts[1] + } + return id, params +} diff --git a/pkg/scanners/terraform/parser/load_blocks_test.go b/pkg/scanners/terraform/parser/load_blocks_test.go new file mode 100644 index 000000000000..e32d19a75044 --- /dev/null +++ b/pkg/scanners/terraform/parser/load_blocks_test.go @@ -0,0 +1,13 @@ +package parser + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParsingDoubleComment(t *testing.T) { + ignores := parseIgnoresFromLine("## tfsec:ignore:abc") + assert.Equal(t, 1, len(ignores)) + assert.Truef(t, ignores[0].Block, "Expected ignore to be a block") +} diff --git a/pkg/scanners/terraform/parser/load_module.go b/pkg/scanners/terraform/parser/load_module.go new file mode 100644 index 000000000000..c46c9247b146 --- /dev/null +++ b/pkg/scanners/terraform/parser/load_module.go @@ -0,0 +1,183 @@ +package parser + +import ( + "context" + "errors" + "fmt" + "io/fs" + "path/filepath" + "strings" + + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser/resolvers" + "github.com/aquasecurity/trivy/pkg/terraform" + + "github.com/zclconf/go-cty/cty" +) + +type moduleLoadError struct { + source string + err error +} + +func (m *moduleLoadError) Error() string { + return fmt.Sprintf("failed to load module '%s': %s", m.source, m.err) +} + +type ModuleDefinition struct { + Name string + Path string + FileSystem fs.FS + Definition *terraform.Block + Parser *Parser + External bool +} + +// LoadModules reads all module blocks and loads the underlying modules, adding blocks to e.moduleBlocks +func (e *evaluator) loadModules(ctx context.Context) []*ModuleDefinition { + + blocks := e.blocks + + var moduleDefinitions []*ModuleDefinition + + expanded := e.expandBlocks(blocks.OfType("module")) + + var loadErrors []*moduleLoadError + + for _, moduleBlock := range expanded { + if moduleBlock.Label() == "" { + continue + } + moduleDefinition, err := e.loadModule(ctx, moduleBlock) + if err != nil { + var loadErr *moduleLoadError + if errors.As(err, &loadErr) { + var found bool + for _, fm := range loadErrors { + if fm.source == loadErr.source { + found = true + break + } + } + if !found { + loadErrors = append(loadErrors, loadErr) + } + continue + } + e.debug.Log("Failed to load module '%s'. Maybe try 'terraform init'?", err) + continue + } + e.debug.Log("Loaded module '%s' from '%s'.", moduleDefinition.Name, moduleDefinition.Path) + moduleDefinitions = append(moduleDefinitions, moduleDefinition) + } + + return moduleDefinitions +} + +// takes in a module "x" {} block and loads resources etc. into e.moduleBlocks - additionally returns variables to add to ["module.x.*"] variables +func (e *evaluator) loadModule(ctx context.Context, b *terraform.Block) (*ModuleDefinition, error) { + + metadata := b.GetMetadata() + + if b.Label() == "" { + return nil, fmt.Errorf("module without label at %s", metadata.Range()) + } + + var source string + attrs := b.Attributes() + for _, attr := range attrs { + if attr.Name() == "source" { + sourceVal := attr.Value() + if sourceVal.Type() == cty.String { + source = sourceVal.AsString() + } + } + } + if source == "" { + return nil, fmt.Errorf("could not read module source attribute at %s", metadata.Range().String()) + } + + if def, err := e.loadModuleFromTerraformCache(ctx, b, source); err == nil { + e.debug.Log("found module '%s' in .terraform/modules", source) + return def, nil + } + + // we don't have the module installed via 'terraform init' so we need to grab it... + return e.loadExternalModule(ctx, b, source) +} + +func (e *evaluator) loadModuleFromTerraformCache(ctx context.Context, b *terraform.Block, source string) (*ModuleDefinition, error) { + var modulePath string + if e.moduleMetadata != nil { + // if we have module metadata we can parse all the modules as they'll be cached locally! + name := b.ModuleName() + for _, module := range e.moduleMetadata.Modules { + if module.Key == name { + modulePath = filepath.Clean(filepath.Join(e.projectRootPath, module.Dir)) + break + } + } + } + if modulePath == "" { + return nil, fmt.Errorf("failed to load module from .terraform/modules") + } + if strings.HasPrefix(source, ".") { + source = "" + } + + if prefix, relativeDir, ok := strings.Cut(source, "//"); ok && !strings.HasSuffix(prefix, ":") && strings.Count(prefix, "/") == 2 { + if !strings.HasSuffix(modulePath, relativeDir) { + modulePath = fmt.Sprintf("%s/%s", modulePath, relativeDir) + } + } + + e.debug.Log("Module '%s' resolved to path '%s' in filesystem '%s' using modules.json", b.FullName(), modulePath, e.filesystem) + moduleParser := e.parentParser.newModuleParser(e.filesystem, source, modulePath, b.Label(), b) + if err := moduleParser.ParseFS(ctx, modulePath); err != nil { + return nil, err + } + return &ModuleDefinition{ + Name: b.Label(), + Path: modulePath, + Definition: b, + Parser: moduleParser, + FileSystem: e.filesystem, + }, nil +} + +func (e *evaluator) loadExternalModule(ctx context.Context, b *terraform.Block, source string) (*ModuleDefinition, error) { + + e.debug.Log("locating non-initialised module '%s'...", source) + + version := b.GetAttribute("version").AsStringValueOrDefault("", b).Value() + opt := resolvers.Options{ + Source: source, + OriginalSource: source, + Version: version, + OriginalVersion: version, + WorkingDir: e.projectRootPath, + Name: b.FullName(), + ModulePath: e.modulePath, + DebugLogger: e.debug.Extend("resolver"), + AllowDownloads: e.allowDownloads, + SkipCache: e.skipCachedModules, + } + + filesystem, prefix, path, err := resolveModule(ctx, e.filesystem, opt) + if err != nil { + return nil, err + } + prefix = filepath.Join(e.parentParser.moduleSource, prefix) + e.debug.Log("Module '%s' resolved to path '%s' in filesystem '%s' with prefix '%s'", b.FullName(), path, filesystem, prefix) + moduleParser := e.parentParser.newModuleParser(filesystem, prefix, path, b.Label(), b) + if err := moduleParser.ParseFS(ctx, path); err != nil { + return nil, err + } + return &ModuleDefinition{ + Name: b.Label(), + Path: path, + Definition: b, + Parser: moduleParser, + FileSystem: filesystem, + External: true, + }, nil +} diff --git a/pkg/scanners/terraform/parser/load_module_metadata.go b/pkg/scanners/terraform/parser/load_module_metadata.go new file mode 100644 index 000000000000..9d06402a76fc --- /dev/null +++ b/pkg/scanners/terraform/parser/load_module_metadata.go @@ -0,0 +1,33 @@ +package parser + +import ( + "encoding/json" + "io/fs" + "path/filepath" +) + +type modulesMetadata struct { + Modules []struct { + Key string `json:"Key"` + Source string `json:"Source"` + Version string `json:"Version"` + Dir string `json:"Dir"` + } `json:"Modules"` +} + +func loadModuleMetadata(target fs.FS, fullPath string) (*modulesMetadata, string, error) { + metadataPath := filepath.Join(fullPath, ".terraform/modules/modules.json") + + f, err := target.Open(metadataPath) + if err != nil { + return nil, metadataPath, err + } + defer func() { _ = f.Close() }() + + var metadata modulesMetadata + if err := json.NewDecoder(f).Decode(&metadata); err != nil { + return nil, metadataPath, err + } + + return &metadata, metadataPath, nil +} diff --git a/pkg/scanners/terraform/parser/load_vars.go b/pkg/scanners/terraform/parser/load_vars.go new file mode 100644 index 000000000000..58f67ce93910 --- /dev/null +++ b/pkg/scanners/terraform/parser/load_vars.go @@ -0,0 +1,83 @@ +package parser + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + hcljson "github.com/hashicorp/hcl/v2/json" + "github.com/zclconf/go-cty/cty" +) + +func loadTFVars(srcFS fs.FS, filenames []string) (map[string]cty.Value, error) { + combinedVars := make(map[string]cty.Value) + + for _, env := range os.Environ() { + split := strings.Split(env, "=") + key := split[0] + if !strings.HasPrefix(key, "TF_VAR_") { + continue + } + key = strings.TrimPrefix(key, "TF_VAR_") + var val string + if len(split) > 1 { + val = split[1] + } + combinedVars[key] = cty.StringVal(val) + } + + for _, filename := range filenames { + vars, err := loadTFVarsFile(srcFS, filename) + if err != nil { + return nil, fmt.Errorf("failed to load tfvars from %s: %w", filename, err) + } + for k, v := range vars { + combinedVars[k] = v + } + } + + return combinedVars, nil +} + +func loadTFVarsFile(srcFS fs.FS, filename string) (map[string]cty.Value, error) { + inputVars := make(map[string]cty.Value) + if filename == "" { + return inputVars, nil + } + + src, err := fs.ReadFile(srcFS, filepath.ToSlash(filename)) + if err != nil { + return nil, err + } + + var attrs hcl.Attributes + if strings.HasSuffix(filename, ".json") { + variableFile, err := hcljson.Parse(src, filename) + if err != nil { + return nil, err + } + attrs, err = variableFile.Body.JustAttributes() + if err != nil { + return nil, err + } + } else { + variableFile, err := hclsyntax.ParseConfig(src, filename, hcl.Pos{Line: 1, Column: 1}) + if err != nil { + return nil, err + } + attrs, err = variableFile.Body.JustAttributes() + if err != nil { + return nil, err + } + } + + for _, attr := range attrs { + inputVars[attr.Name], _ = attr.Expr.Value(&hcl.EvalContext{}) + } + + return inputVars, nil +} diff --git a/pkg/scanners/terraform/parser/load_vars_test.go b/pkg/scanners/terraform/parser/load_vars_test.go new file mode 100644 index 000000000000..f6e6792206a8 --- /dev/null +++ b/pkg/scanners/terraform/parser/load_vars_test.go @@ -0,0 +1,46 @@ +package parser + +import ( + "testing" + + "github.com/aquasecurity/trivy/test/testutil" + + "github.com/zclconf/go-cty/cty" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_TFVarsFile(t *testing.T) { + t.Run("tfvars file", func(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "test.tfvars": `instance_type = "t2.large"`, + }) + + vars, err := loadTFVars(fs, []string{"test.tfvars"}) + require.NoError(t, err) + assert.Equal(t, "t2.large", vars["instance_type"].AsString()) + }) + + t.Run("tfvars json file", func(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "test.tfvars.json": `{ + "variable": { + "foo": { + "default": "bar" + }, + "baz": "qux" + }, + "foo2": true, + "foo3": 3 +}`, + }) + + vars, err := loadTFVars(fs, []string{"test.tfvars.json"}) + require.NoError(t, err) + assert.Equal(t, "bar", vars["variable"].GetAttr("foo").GetAttr("default").AsString()) + assert.Equal(t, "qux", vars["variable"].GetAttr("baz").AsString()) + assert.Equal(t, true, vars["foo2"].True()) + assert.Equal(t, true, vars["foo3"].Equals(cty.NumberIntVal(3)).True()) + }) +} diff --git a/pkg/scanners/terraform/parser/module_retrieval.go b/pkg/scanners/terraform/parser/module_retrieval.go new file mode 100644 index 000000000000..66127715f513 --- /dev/null +++ b/pkg/scanners/terraform/parser/module_retrieval.go @@ -0,0 +1,33 @@ +package parser + +import ( + "context" + "fmt" + "io/fs" + + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser/resolvers" +) + +type ModuleResolver interface { + Resolve(context.Context, fs.FS, resolvers.Options) (filesystem fs.FS, prefix string, downloadPath string, applies bool, err error) +} + +var defaultResolvers = []ModuleResolver{ + resolvers.Cache, + resolvers.Local, + resolvers.Remote, + resolvers.Registry, +} + +func resolveModule(ctx context.Context, current fs.FS, opt resolvers.Options) (filesystem fs.FS, sourcePrefix string, downloadPath string, err error) { + opt.Debug("Resolving module '%s' with source: '%s'...", opt.Name, opt.Source) + for _, resolver := range defaultResolvers { + if filesystem, prefix, path, applies, err := resolver.Resolve(ctx, current, opt); err != nil { + return nil, "", "", err + } else if applies { + opt.Debug("Module path is %s", path) + return filesystem, prefix, path, nil + } + } + return nil, "", "", fmt.Errorf("failed to resolve module '%s' with source: %s", opt.Name, opt.Source) +} diff --git a/pkg/scanners/terraform/parser/option.go b/pkg/scanners/terraform/parser/option.go new file mode 100644 index 000000000000..cdf065679f0a --- /dev/null +++ b/pkg/scanners/terraform/parser/option.go @@ -0,0 +1,67 @@ +package parser + +import ( + "io/fs" + + "github.com/aquasecurity/trivy/pkg/scanners/options" +) + +type ConfigurableTerraformParser interface { + options.ConfigurableParser + SetTFVarsPaths(...string) + SetStopOnHCLError(bool) + SetWorkspaceName(string) + SetAllowDownloads(bool) + SetSkipCachedModules(bool) + SetConfigsFS(fsys fs.FS) +} + +type Option func(p ConfigurableTerraformParser) + +func OptionWithTFVarsPaths(paths ...string) options.ParserOption { + return func(p options.ConfigurableParser) { + if tf, ok := p.(ConfigurableTerraformParser); ok { + tf.SetTFVarsPaths(paths...) + } + } +} + +func OptionStopOnHCLError(stop bool) options.ParserOption { + return func(p options.ConfigurableParser) { + if tf, ok := p.(ConfigurableTerraformParser); ok { + tf.SetStopOnHCLError(stop) + } + } +} + +func OptionWithWorkspaceName(workspaceName string) options.ParserOption { + return func(p options.ConfigurableParser) { + if tf, ok := p.(ConfigurableTerraformParser); ok { + tf.SetWorkspaceName(workspaceName) + } + } +} + +func OptionWithDownloads(allowed bool) options.ParserOption { + return func(p options.ConfigurableParser) { + if tf, ok := p.(ConfigurableTerraformParser); ok { + tf.SetAllowDownloads(allowed) + } + } +} + +func OptionWithSkipCachedModules(b bool) options.ParserOption { + return func(p options.ConfigurableParser) { + if tf, ok := p.(ConfigurableTerraformParser); ok { + tf.SetSkipCachedModules(b) + } + } +} + +func OptionWithConfigsFS(fsys fs.FS) options.ParserOption { + return func(s options.ConfigurableParser) { + if p, ok := s.(ConfigurableTerraformParser); ok { + p.SetConfigsFS(fsys) + } + } +} diff --git a/pkg/scanners/terraform/parser/parser.go b/pkg/scanners/terraform/parser/parser.go new file mode 100644 index 000000000000..af404ffd8318 --- /dev/null +++ b/pkg/scanners/terraform/parser/parser.go @@ -0,0 +1,349 @@ +package parser + +import ( + "context" + "io" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/terraform" + tfcontext "github.com/aquasecurity/trivy/pkg/terraform/context" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/zclconf/go-cty/cty" + + "github.com/aquasecurity/trivy/pkg/extrafs" +) + +type sourceFile struct { + file *hcl.File + path string +} + +type Metrics struct { + Timings struct { + DiskIODuration time.Duration + ParseDuration time.Duration + } + Counts struct { + Blocks int + Modules int + ModuleDownloads int + Files int + } +} + +var _ ConfigurableTerraformParser = (*Parser)(nil) + +// Parser is a tool for parsing terraform templates at a given file system location +type Parser struct { + projectRoot string + moduleName string + modulePath string + moduleSource string + moduleFS fs.FS + moduleBlock *terraform.Block + files []sourceFile + tfvarsPaths []string + stopOnHCLError bool + workspaceName string + underlying *hclparse.Parser + children []*Parser + metrics Metrics + options []options.ParserOption + debug debug.Logger + allowDownloads bool + skipCachedModules bool + fsMap map[string]fs.FS + skipRequired bool + configsFS fs.FS +} + +func (p *Parser) SetDebugWriter(writer io.Writer) { + p.debug = debug.New(writer, "terraform", "parser", "<"+p.moduleName+">") +} + +func (p *Parser) SetTFVarsPaths(s ...string) { + p.tfvarsPaths = s +} + +func (p *Parser) SetStopOnHCLError(b bool) { + p.stopOnHCLError = b +} + +func (p *Parser) SetWorkspaceName(s string) { + p.workspaceName = s +} + +func (p *Parser) SetAllowDownloads(b bool) { + p.allowDownloads = b +} + +func (p *Parser) SetSkipCachedModules(b bool) { + p.skipCachedModules = b +} + +func (p *Parser) SetSkipRequiredCheck(b bool) { + p.skipRequired = b +} + +func (p *Parser) SetConfigsFS(fsys fs.FS) { + p.configsFS = fsys +} + +// New creates a new Parser +func New(moduleFS fs.FS, moduleSource string, opts ...options.ParserOption) *Parser { + p := &Parser{ + workspaceName: "default", + underlying: hclparse.NewParser(), + options: opts, + moduleName: "root", + allowDownloads: true, + moduleFS: moduleFS, + moduleSource: moduleSource, + configsFS: moduleFS, + } + + for _, option := range opts { + option(p) + } + + return p +} + +func (p *Parser) newModuleParser(moduleFS fs.FS, moduleSource, modulePath, moduleName string, moduleBlock *terraform.Block) *Parser { + mp := New(moduleFS, moduleSource) + mp.modulePath = modulePath + mp.moduleBlock = moduleBlock + mp.moduleName = moduleName + mp.projectRoot = p.projectRoot + p.children = append(p.children, mp) + for _, option := range p.options { + option(mp) + } + return mp +} + +func (p *Parser) Metrics() Metrics { + total := p.metrics + for _, child := range p.children { + metrics := child.Metrics() + total.Counts.Files += metrics.Counts.Files + total.Counts.Blocks += metrics.Counts.Blocks + total.Timings.ParseDuration += metrics.Timings.ParseDuration + total.Timings.DiskIODuration += metrics.Timings.DiskIODuration + // NOTE: we don't add module count - this has already propagated to the top level + } + return total +} + +func (p *Parser) ParseFile(_ context.Context, fullPath string) error { + diskStart := time.Now() + + isJSON := strings.HasSuffix(fullPath, ".tf.json") + isHCL := strings.HasSuffix(fullPath, ".tf") + if !isJSON && !isHCL { + return nil + } + + p.debug.Log("Parsing '%s'...", fullPath) + f, err := p.moduleFS.Open(filepath.ToSlash(fullPath)) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + data, err := io.ReadAll(f) + if err != nil { + return err + } + p.metrics.Timings.DiskIODuration += time.Since(diskStart) + if dir := filepath.Dir(fullPath); p.projectRoot == "" { + p.debug.Log("Setting project/module root to '%s'", dir) + p.projectRoot = dir + p.modulePath = dir + } + + start := time.Now() + var file *hcl.File + var diag hcl.Diagnostics + + if isHCL { + file, diag = p.underlying.ParseHCL(data, fullPath) + } else { + file, diag = p.underlying.ParseJSON(data, fullPath) + } + if diag != nil && diag.HasErrors() { + return diag + } + p.files = append(p.files, sourceFile{ + file: file, + path: fullPath, + }) + p.metrics.Counts.Files++ + p.metrics.Timings.ParseDuration += time.Since(start) + p.debug.Log("Added file %s.", fullPath) + return nil +} + +// ParseFS parses a root module, where it exists at the root of the provided filesystem +func (p *Parser) ParseFS(ctx context.Context, dir string) error { + + dir = filepath.Clean(dir) + + if p.projectRoot == "" { + p.debug.Log("Setting project/module root to '%s'", dir) + p.projectRoot = dir + p.modulePath = dir + } + + slashed := filepath.ToSlash(dir) + p.debug.Log("Parsing FS from '%s'", slashed) + fileInfos, err := fs.ReadDir(p.moduleFS, slashed) + if err != nil { + return err + } + + var paths []string + for _, info := range fileInfos { + realPath := filepath.Join(dir, info.Name()) + if info.Type()&os.ModeSymlink != 0 { + extra, ok := p.moduleFS.(extrafs.FS) + if !ok { + // we can't handle symlinks in this fs type for now + p.debug.Log("Cannot resolve symlink '%s' in '%s' for this fs type", info.Name(), dir) + continue + } + realPath, err = extra.ResolveSymlink(info.Name(), dir) + if err != nil { + p.debug.Log("Failed to resolve symlink '%s' in '%s': %s", info.Name(), dir, err) + continue + } + info, err := extra.Stat(realPath) + if err != nil { + p.debug.Log("Failed to stat resolved symlink '%s': %s", realPath, err) + continue + } + if info.IsDir() { + continue + } + p.debug.Log("Resolved symlink '%s' in '%s' to '%s'", info.Name(), dir, realPath) + } else if info.IsDir() { + continue + } + paths = append(paths, realPath) + } + sort.Strings(paths) + for _, path := range paths { + if err := p.ParseFile(ctx, path); err != nil { + if p.stopOnHCLError { + return err + } + p.debug.Log("error parsing '%s': %s", path, err) + continue + } + } + + return nil +} + +func (p *Parser) EvaluateAll(ctx context.Context) (terraform.Modules, cty.Value, error) { + + p.debug.Log("Evaluating module...") + + if len(p.files) == 0 { + p.debug.Log("No files found, nothing to do.") + return nil, cty.NilVal, nil + } + + blocks, ignores, err := p.readBlocks(p.files) + if err != nil { + return nil, cty.NilVal, err + } + p.debug.Log("Read %d block(s) and %d ignore(s) for module '%s' (%d file[s])...", len(blocks), len(ignores), p.moduleName, len(p.files)) + + p.metrics.Counts.Blocks = len(blocks) + + var inputVars map[string]cty.Value + if p.moduleBlock != nil { + inputVars = p.moduleBlock.Values().AsValueMap() + p.debug.Log("Added %d input variables from module definition.", len(inputVars)) + } else { + inputVars, err = loadTFVars(p.configsFS, p.tfvarsPaths) + if err != nil { + return nil, cty.NilVal, err + } + p.debug.Log("Added %d variables from tfvars.", len(inputVars)) + } + + modulesMetadata, metadataPath, err := loadModuleMetadata(p.moduleFS, p.projectRoot) + if err != nil { + p.debug.Log("Error loading module metadata: %s.", err) + } else { + p.debug.Log("Loaded module metadata for %d module(s) from '%s'.", len(modulesMetadata.Modules), metadataPath) + } + + workingDir, err := os.Getwd() + if err != nil { + return nil, cty.NilVal, err + } + p.debug.Log("Working directory for module evaluation is '%s'", workingDir) + evaluator := newEvaluator( + p.moduleFS, + p, + p.projectRoot, + p.modulePath, + workingDir, + p.moduleName, + blocks, + inputVars, + modulesMetadata, + p.workspaceName, + ignores, + p.debug.Extend("evaluator"), + p.allowDownloads, + p.skipCachedModules, + ) + modules, fsMap, parseDuration := evaluator.EvaluateAll(ctx) + p.metrics.Counts.Modules = len(modules) + p.metrics.Timings.ParseDuration = parseDuration + p.debug.Log("Finished parsing module '%s'.", p.moduleName) + p.fsMap = fsMap + return modules, evaluator.exportOutputs(), nil +} + +func (p *Parser) GetFilesystemMap() map[string]fs.FS { + if p.fsMap == nil { + return make(map[string]fs.FS) + } + return p.fsMap +} + +func (p *Parser) readBlocks(files []sourceFile) (terraform.Blocks, terraform.Ignores, error) { + var blocks terraform.Blocks + var ignores terraform.Ignores + moduleCtx := tfcontext.NewContext(&hcl.EvalContext{}, nil) + for _, file := range files { + fileBlocks, fileIgnores, err := loadBlocksFromFile(file, p.moduleSource) + if err != nil { + if p.stopOnHCLError { + return nil, nil, err + } + p.debug.Log("Encountered HCL parse error: %s", err) + continue + } + for _, fileBlock := range fileBlocks { + blocks = append(blocks, terraform.NewBlock(fileBlock, moduleCtx, p.moduleBlock, nil, p.moduleSource, p.moduleFS)) + } + ignores = append(ignores, fileIgnores...) + } + + sortBlocksByHierarchy(blocks) + return blocks, ignores, nil +} diff --git a/pkg/scanners/terraform/parser/parser_integration_test.go b/pkg/scanners/terraform/parser/parser_integration_test.go new file mode 100644 index 000000000000..bbce2a151ce0 --- /dev/null +++ b/pkg/scanners/terraform/parser/parser_integration_test.go @@ -0,0 +1,51 @@ +package parser + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/require" +) + +func Test_DefaultRegistry(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` +module "registry" { + source = "terraform-aws-modules/vpc/aws" +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true), OptionWithSkipCachedModules(true)) + if err := parser.ParseFS(context.TODO(), "code"); err != nil { + t.Fatal(err) + } + modules, _, err := parser.EvaluateAll(context.TODO()) + require.NoError(t, err) + require.Len(t, modules, 2) +} + +func Test_SpecificRegistry(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` +module "registry" { + source = "registry.terraform.io/terraform-aws-modules/vpc/aws" +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true), OptionWithSkipCachedModules(true)) + if err := parser.ParseFS(context.TODO(), "code"); err != nil { + t.Fatal(err) + } + modules, _, err := parser.EvaluateAll(context.TODO()) + require.NoError(t, err) + require.Len(t, modules, 2) +} diff --git a/pkg/scanners/terraform/parser/parser_test.go b/pkg/scanners/terraform/parser/parser_test.go new file mode 100644 index 000000000000..28a2d844a933 --- /dev/null +++ b/pkg/scanners/terraform/parser/parser_test.go @@ -0,0 +1,1141 @@ +package parser + +import ( + "context" + "os" + "sort" + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/zclconf/go-cty/cty" +) + +func Test_BasicParsing(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "test.tf": ` + +locals { + proxy = var.cats_mother +} + +variable "cats_mother" { + default = "boots" +} + +provider "cats" { + +} + +moved { + +} + +import { + to = cats_cat.mittens + id = "mittens" +} + +resource "cats_cat" "mittens" { + name = "mittens" + special = true +} + +resource "cats_kitten" "the-great-destroyer" { + name = "the great destroyer" + parent = cats_cat.mittens.name +} + +data "cats_cat" "the-cats-mother" { + name = local.proxy +} + +check "cats_mittens_is_special" { + data "cats_cat" "mittens" { + name = "mittens" + } + + assert { + condition = data.cats_cat.mittens.special == true + error_message = "${data.cats_cat.mittens.name} must be special" + } +} + +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + modules, _, err := parser.EvaluateAll(context.TODO()) + require.NoError(t, err) + + blocks := modules[0].GetBlocks() + + // variable + variables := blocks.OfType("variable") + require.Len(t, variables, 1) + assert.Equal(t, "variable", variables[0].Type()) + require.Len(t, variables[0].Labels(), 1) + assert.Equal(t, "cats_mother", variables[0].TypeLabel()) + defaultVal := variables[0].GetAttribute("default") + require.NotNil(t, defaultVal) + assert.Equal(t, cty.String, defaultVal.Value().Type()) + assert.Equal(t, "boots", defaultVal.Value().AsString()) + + // provider + providerBlocks := blocks.OfType("provider") + require.Len(t, providerBlocks, 1) + assert.Equal(t, "provider", providerBlocks[0].Type()) + require.Len(t, providerBlocks[0].Labels(), 1) + assert.Equal(t, "cats", providerBlocks[0].TypeLabel()) + + // resources + resourceBlocks := blocks.OfType("resource") + + sort.Slice(resourceBlocks, func(i, j int) bool { + return resourceBlocks[i].TypeLabel() < resourceBlocks[j].TypeLabel() + }) + + require.Len(t, resourceBlocks, 2) + require.Len(t, resourceBlocks[0].Labels(), 2) + + assert.Equal(t, "resource", resourceBlocks[0].Type()) + assert.Equal(t, "cats_cat", resourceBlocks[0].TypeLabel()) + assert.Equal(t, "mittens", resourceBlocks[0].NameLabel()) + + assert.Equal(t, "mittens", resourceBlocks[0].GetAttribute("name").Value().AsString()) + assert.True(t, resourceBlocks[0].GetAttribute("special").Value().True()) + + assert.Equal(t, "resource", resourceBlocks[1].Type()) + assert.Equal(t, "cats_kitten", resourceBlocks[1].TypeLabel()) + assert.Equal(t, "the great destroyer", resourceBlocks[1].GetAttribute("name").Value().AsString()) + assert.Equal(t, "mittens", resourceBlocks[1].GetAttribute("parent").Value().AsString()) + + // import + importBlocks := blocks.OfType("import") + + assert.Equal(t, "import", importBlocks[0].Type()) + require.NotNil(t, importBlocks[0].GetAttribute("to")) + assert.Equal(t, "mittens", importBlocks[0].GetAttribute("id").Value().AsString()) + + // data + dataBlocks := blocks.OfType("data") + require.Len(t, dataBlocks, 1) + require.Len(t, dataBlocks[0].Labels(), 2) + + assert.Equal(t, "data", dataBlocks[0].Type()) + assert.Equal(t, "cats_cat", dataBlocks[0].TypeLabel()) + assert.Equal(t, "the-cats-mother", dataBlocks[0].NameLabel()) + + assert.Equal(t, "boots", dataBlocks[0].GetAttribute("name").Value().AsString()) + + // check + checkBlocks := blocks.OfType("check") + require.Len(t, checkBlocks, 1) + require.Len(t, checkBlocks[0].Labels(), 1) + + assert.Equal(t, "check", checkBlocks[0].Type()) + assert.Equal(t, "cats_mittens_is_special", checkBlocks[0].TypeLabel()) + + require.NotNil(t, checkBlocks[0].GetBlock("data")) + require.NotNil(t, checkBlocks[0].GetBlock("assert")) +} + +func Test_Modules(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` +module "my-mod" { + source = "../module" + input = "ok" +} + +output "result" { + value = module.my-mod.mod_result +} +`, + "module/module.tf": ` +variable "input" { + default = "?" +} + +output "mod_result" { + value = var.input +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true), options.ParserWithDebug(os.Stderr)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + + require.Len(t, modules, 2) + rootModule := modules[0] + childModule := modules[1] + + moduleBlocks := rootModule.GetBlocks().OfType("module") + require.Len(t, moduleBlocks, 1) + + assert.Equal(t, "module", moduleBlocks[0].Type()) + assert.Equal(t, "module.my-mod", moduleBlocks[0].FullName()) + inputAttr := moduleBlocks[0].GetAttribute("input") + require.NotNil(t, inputAttr) + require.Equal(t, cty.String, inputAttr.Value().Type()) + assert.Equal(t, "ok", inputAttr.Value().AsString()) + + rootOutputs := rootModule.GetBlocks().OfType("output") + require.Len(t, rootOutputs, 1) + assert.Equal(t, "output.result", rootOutputs[0].FullName()) + valAttr := rootOutputs[0].GetAttribute("value") + require.NotNil(t, valAttr) + require.Equal(t, cty.String, valAttr.Type()) + assert.Equal(t, "ok", valAttr.Value().AsString()) + + childOutputs := childModule.GetBlocks().OfType("output") + require.Len(t, childOutputs, 1) + assert.Equal(t, "module.my-mod.output.mod_result", childOutputs[0].FullName()) + childValAttr := childOutputs[0].GetAttribute("value") + require.NotNil(t, childValAttr) + require.Equal(t, cty.String, childValAttr.Type()) + assert.Equal(t, "ok", childValAttr.Value().AsString()) + +} + +func Test_NestedParentModule(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` +module "my-mod" { + source = "../." + input = "ok" +} + +output "result" { + value = module.my-mod.mod_result +} +`, + "root.tf": ` +variable "input" { + default = "?" +} + +output "mod_result" { + value = var.input +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + require.Len(t, modules, 2) + rootModule := modules[0] + childModule := modules[1] + + moduleBlocks := rootModule.GetBlocks().OfType("module") + require.Len(t, moduleBlocks, 1) + + assert.Equal(t, "module", moduleBlocks[0].Type()) + assert.Equal(t, "module.my-mod", moduleBlocks[0].FullName()) + inputAttr := moduleBlocks[0].GetAttribute("input") + require.NotNil(t, inputAttr) + require.Equal(t, cty.String, inputAttr.Value().Type()) + assert.Equal(t, "ok", inputAttr.Value().AsString()) + + rootOutputs := rootModule.GetBlocks().OfType("output") + require.Len(t, rootOutputs, 1) + assert.Equal(t, "output.result", rootOutputs[0].FullName()) + valAttr := rootOutputs[0].GetAttribute("value") + require.NotNil(t, valAttr) + require.Equal(t, cty.String, valAttr.Type()) + assert.Equal(t, "ok", valAttr.Value().AsString()) + + childOutputs := childModule.GetBlocks().OfType("output") + require.Len(t, childOutputs, 1) + assert.Equal(t, "module.my-mod.output.mod_result", childOutputs[0].FullName()) + childValAttr := childOutputs[0].GetAttribute("value") + require.NotNil(t, childValAttr) + require.Equal(t, cty.String, childValAttr.Type()) + assert.Equal(t, "ok", childValAttr.Value().AsString()) +} + +func Test_UndefinedModuleOutputReference(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` +resource "something" "blah" { + value = module.x.y +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + require.Len(t, modules, 1) + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("something") + require.Len(t, blocks, 1) + block := blocks[0] + + attr := block.GetAttribute("value") + require.NotNil(t, attr) + + assert.Equal(t, false, attr.IsResolvable()) +} + +func Test_UndefinedModuleOutputReferenceInSlice(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` +resource "something" "blah" { + value = ["first", module.x.y, "last"] +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + require.Len(t, modules, 1) + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("something") + require.Len(t, blocks, 1) + block := blocks[0] + + attr := block.GetAttribute("value") + require.NotNil(t, attr) + + assert.Equal(t, true, attr.IsResolvable()) + + values := attr.AsStringValueSliceOrEmpty() + require.Len(t, values, 3) + + assert.Equal(t, "first", values[0].Value()) + assert.Equal(t, true, values[0].GetMetadata().IsResolvable()) + + assert.Equal(t, false, values[1].GetMetadata().IsResolvable()) + + assert.Equal(t, "last", values[2].Value()) + assert.Equal(t, true, values[2].GetMetadata().IsResolvable()) +} + +func Test_TemplatedSliceValue(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` + +variable "x" { + default = "hello" +} + +resource "something" "blah" { + value = ["first", "${var.x}-${var.x}", "last"] +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + require.Len(t, modules, 1) + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("something") + require.Len(t, blocks, 1) + block := blocks[0] + + attr := block.GetAttribute("value") + require.NotNil(t, attr) + + assert.Equal(t, true, attr.IsResolvable()) + + values := attr.AsStringValueSliceOrEmpty() + require.Len(t, values, 3) + + assert.Equal(t, "first", values[0].Value()) + assert.Equal(t, true, values[0].GetMetadata().IsResolvable()) + + assert.Equal(t, "hello-hello", values[1].Value()) + assert.Equal(t, true, values[1].GetMetadata().IsResolvable()) + + assert.Equal(t, "last", values[2].Value()) + assert.Equal(t, true, values[2].GetMetadata().IsResolvable()) +} + +func Test_SliceOfVars(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` + +variable "x" { + default = "1" +} + +variable "y" { + default = "2" +} + +resource "something" "blah" { + value = [var.x, var.y] +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + require.Len(t, modules, 1) + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("something") + require.Len(t, blocks, 1) + block := blocks[0] + + attr := block.GetAttribute("value") + require.NotNil(t, attr) + + assert.Equal(t, true, attr.IsResolvable()) + + values := attr.AsStringValueSliceOrEmpty() + require.Len(t, values, 2) + + assert.Equal(t, "1", values[0].Value()) + assert.Equal(t, true, values[0].GetMetadata().IsResolvable()) + + assert.Equal(t, "2", values[1].Value()) + assert.Equal(t, true, values[1].GetMetadata().IsResolvable()) +} + +func Test_VarSlice(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` + +variable "x" { + default = ["a", "b", "c"] +} + +resource "something" "blah" { + value = var.x +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + require.Len(t, modules, 1) + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("something") + require.Len(t, blocks, 1) + block := blocks[0] + + attr := block.GetAttribute("value") + require.NotNil(t, attr) + + assert.Equal(t, true, attr.IsResolvable()) + + values := attr.AsStringValueSliceOrEmpty() + require.Len(t, values, 3) + + assert.Equal(t, "a", values[0].Value()) + assert.Equal(t, true, values[0].GetMetadata().IsResolvable()) + + assert.Equal(t, "b", values[1].Value()) + assert.Equal(t, true, values[1].GetMetadata().IsResolvable()) + + assert.Equal(t, "c", values[2].Value()) + assert.Equal(t, true, values[2].GetMetadata().IsResolvable()) +} + +func Test_LocalSliceNested(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` + +variable "x" { + default = "a" +} + +locals { + y = [var.x, "b", "c"] +} + +resource "something" "blah" { + value = local.y +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + require.Len(t, modules, 1) + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("something") + require.Len(t, blocks, 1) + block := blocks[0] + + attr := block.GetAttribute("value") + require.NotNil(t, attr) + + assert.Equal(t, true, attr.IsResolvable()) + + values := attr.AsStringValueSliceOrEmpty() + require.Len(t, values, 3) + + assert.Equal(t, "a", values[0].Value()) + assert.Equal(t, true, values[0].GetMetadata().IsResolvable()) + + assert.Equal(t, "b", values[1].Value()) + assert.Equal(t, true, values[1].GetMetadata().IsResolvable()) + + assert.Equal(t, "c", values[2].Value()) + assert.Equal(t, true, values[2].GetMetadata().IsResolvable()) +} + +func Test_FunctionCall(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/test.tf": ` + +variable "x" { + default = ["a", "b"] +} + +resource "something" "blah" { + value = concat(var.x, ["c"]) +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), "code")) + modules, _, err := parser.EvaluateAll(context.TODO()) + require.NoError(t, err) + + require.Len(t, modules, 1) + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("something") + require.Len(t, blocks, 1) + block := blocks[0] + + attr := block.GetAttribute("value") + require.NotNil(t, attr) + + assert.Equal(t, true, attr.IsResolvable()) + + values := attr.AsStringValueSliceOrEmpty() + require.Len(t, values, 3) + + assert.Equal(t, "a", values[0].Value()) + assert.Equal(t, true, values[0].GetMetadata().IsResolvable()) + + assert.Equal(t, "b", values[1].Value()) + assert.Equal(t, true, values[1].GetMetadata().IsResolvable()) + + assert.Equal(t, "c", values[2].Value()) + assert.Equal(t, true, values[2].GetMetadata().IsResolvable()) +} + +func Test_NullDefaultValueForVar(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "test.tf": ` +variable "bucket_name" { + type = string + default = null +} + +resource "aws_s3_bucket" "default" { + bucket = var.bucket_name != null ? var.bucket_name : "default" +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + modules, _, err := parser.EvaluateAll(context.TODO()) + require.NoError(t, err) + require.Len(t, modules, 1) + + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("aws_s3_bucket") + require.Len(t, blocks, 1) + block := blocks[0] + + attr := block.GetAttribute("bucket") + require.NotNil(t, attr) + assert.Equal(t, "default", attr.Value().AsString()) +} + +func Test_MultipleInstancesOfSameResource(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "test.tf": ` + +resource "aws_kms_key" "key1" { + description = "Key #1" + enable_key_rotation = true +} + +resource "aws_kms_key" "key2" { + description = "Key #2" + enable_key_rotation = true +} + +resource "aws_s3_bucket" "this" { + bucket = "test" + } + + +resource "aws_s3_bucket_server_side_encryption_configuration" "this1" { + bucket = aws_s3_bucket.this.id + + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.key1.arn + sse_algorithm = "aws:kms" + } + } +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "this2" { + bucket = aws_s3_bucket.this.id + + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.key2.arn + sse_algorithm = "aws:kms" + } + } +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("aws_s3_bucket_server_side_encryption_configuration") + assert.Len(t, blocks, 2) + + for _, block := range blocks { + attr, parent := block.GetNestedAttribute("rule.apply_server_side_encryption_by_default.kms_master_key_id") + assert.Equal(t, "apply_server_side_encryption_by_default", parent.Type()) + assert.NotNil(t, attr) + assert.NotEmpty(t, attr.Value().AsString()) + } +} + +func Test_IfConfigFsIsNotSet_ThenUseModuleFsForVars(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` +variable "bucket_name" { + type = string +} +resource "aws_s3_bucket" "main" { + bucket = var.bucket_name +} +`, + "main.tfvars": `bucket_name = "test_bucket"`, + }) + parser := New(fs, "", + OptionStopOnHCLError(true), + OptionWithTFVarsPaths("main.tfvars"), + ) + + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + rootModule := modules[0] + blocks := rootModule.GetResourcesByType("aws_s3_bucket") + require.Len(t, blocks, 1) + + block := blocks[0] + + assert.Equal(t, "test_bucket", block.GetAttribute("bucket").AsStringValueOrDefault("", block).Value()) +} + +func Test_ForEachRefToLocals(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` +locals { + buckets = toset([ + "foo", + "bar", + ]) +} + +resource "aws_s3_bucket" "this" { + for_each = local.buckets + bucket = each.key +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("aws_s3_bucket") + assert.Len(t, blocks, 2) + + for _, block := range blocks { + attr := block.GetAttribute("bucket") + require.NotNil(t, attr) + assert.Contains(t, []string{"foo", "bar"}, attr.AsStringValueOrDefault("", block).Value()) + } +} + +func Test_ForEachRefToVariableWithDefault(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` +variable "buckets" { + type = set(string) + default = ["foo", "bar"] +} + +resource "aws_s3_bucket" "this" { + for_each = var.buckets + bucket = each.key +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("aws_s3_bucket") + assert.Len(t, blocks, 2) + + for _, block := range blocks { + attr := block.GetAttribute("bucket") + require.NotNil(t, attr) + assert.Contains(t, []string{"foo", "bar"}, attr.AsStringValueOrDefault("", block).Value()) + } +} + +func Test_ForEachRefToVariableFromFile(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` +variable "policy_rules" { + type = object({ + secure_tags = optional(map(object({ + session_matcher = optional(string) + priority = number + enabled = optional(bool, true) + })), {}) + }) +} + +resource "google_network_security_gateway_security_policy_rule" "secure_tag_rules" { + for_each = var.policy_rules.secure_tags + provider = google-beta + project = "test" + name = each.key + enabled = each.value.enabled + priority = each.value.priority + session_matcher = each.value.session_matcher +} +`, + "main.tfvars": ` +policy_rules = { + secure_tags = { + secure-tag-1 = { + session_matcher = "host() != 'google.com'" + priority = 1001 + } + } +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true), OptionWithTFVarsPaths("main.tfvars")) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + rootModule := modules[0] + + blocks := rootModule.GetResourcesByType("google_network_security_gateway_security_policy_rule") + assert.Len(t, blocks, 1) + + block := blocks[0] + + assert.Equal(t, "secure-tag-1", block.GetAttribute("name").AsStringValueOrDefault("", block).Value()) + assert.Equal(t, true, block.GetAttribute("enabled").AsBoolValueOrDefault(false, block).Value()) + assert.Equal(t, "host() != 'google.com'", block.GetAttribute("session_matcher").AsStringValueOrDefault("", block).Value()) + assert.Equal(t, 1001, block.GetAttribute("priority").AsIntValueOrDefault(0, block).Value()) +} + +func Test_ForEachRefersToMapThatContainsSameStringValues(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": `locals { + buckets = { + bucket1 = "test1" + bucket2 = "test1" + } +} + +resource "aws_s3_bucket" "this" { + for_each = local.buckets + bucket = each.key +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + bucketBlocks := modules.GetResourcesByType("aws_s3_bucket") + assert.Len(t, bucketBlocks, 2) + + var labels []string + + for _, b := range bucketBlocks { + labels = append(labels, b.Label()) + } + + expectedLabels := []string{ + `aws_s3_bucket.this["bucket1"]`, + `aws_s3_bucket.this["bucket2"]`, + } + assert.Equal(t, expectedLabels, labels) +} + +func TestDataSourceWithCountMetaArgument(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` +data "http" "example" { + count = 2 +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + rootModule := modules[0] + + httpDataSources := rootModule.GetDatasByType("http") + assert.Len(t, httpDataSources, 2) + + var labels []string + for _, b := range httpDataSources { + labels = append(labels, b.Label()) + } + + expectedLabels := []string{ + `http.example[0]`, + `http.example[1]`, + } + assert.Equal(t, expectedLabels, labels) +} + +func TestDataSourceWithForEachMetaArgument(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` +locals { + ports = ["80", "8080"] +} +data "http" "example" { + for_each = toset(local.ports) + url = "localhost:${each.key}" +} +`, + }) + + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + rootModule := modules[0] + + httpDataSources := rootModule.GetDatasByType("http") + assert.Len(t, httpDataSources, 2) +} + +func TestForEach(t *testing.T) { + + tests := []struct { + name string + source string + expectedCount int + }{ + { + name: "arg is list of strings", + source: `locals { + buckets = ["bucket1", "bucket2"] +} + +resource "aws_s3_bucket" "this" { + for_each = local.buckets + bucket = each.key +}`, + expectedCount: 0, + }, + { + name: "arg is empty set", + source: `locals { + buckets = toset([]) +} + +resource "aws_s3_bucket" "this" { + for_each = loca.buckets + bucket = each.key +}`, + expectedCount: 0, + }, + { + name: "arg is set of strings", + source: `locals { + buckets = ["bucket1", "bucket2"] +} + +resource "aws_s3_bucket" "this" { + for_each = toset(local.buckets) + bucket = each.key +}`, + expectedCount: 2, + }, + { + name: "arg is map", + source: `locals { + buckets = { + 1 = {} + 2 = {} + } +} + +resource "aws_s3_bucket" "this" { + for_each = local.buckets + bucket = each.key +}`, + expectedCount: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": tt.source, + }) + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) + + bucketBlocks := modules.GetResourcesByType("aws_s3_bucket") + assert.Len(t, bucketBlocks, tt.expectedCount) + }) + } +} + +func TestForEachRefToResource(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` + locals { + vpcs = { + "test1" = { + cidr_block = "192.168.0.0/28" + } + "test2" = { + cidr_block = "192.168.1.0/28" + } + } +} + +resource "aws_vpc" "example" { + for_each = local.vpcs + cidr_block = each.value.cidr_block +} + +resource "aws_internet_gateway" "example" { + for_each = aws_vpc.example + vpc_id = each.key +} +`, + }) + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + require.NoError(t, err) + require.Len(t, modules, 1) + + blocks := modules.GetResourcesByType("aws_internet_gateway") + require.Len(t, blocks, 2) + + var vpcIds []string + for _, b := range blocks { + vpcIds = append(vpcIds, b.GetAttribute("vpc_id").Value().AsString()) + } + + expectedVpcIds := []string{"test1", "test2"} + assert.Equal(t, expectedVpcIds, vpcIds) +} + +func TestArnAttributeOfBucketIsCorrect(t *testing.T) { + + t.Run("the bucket doesn't have a name", func(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": `resource "aws_s3_bucket" "this" {}`, + }) + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + require.NoError(t, err) + require.Len(t, modules, 1) + + blocks := modules.GetResourcesByType("aws_s3_bucket") + assert.Len(t, blocks, 1) + + bucket := blocks[0] + + values := bucket.Values() + arnVal := values.GetAttr("arn") + assert.True(t, arnVal.Type().Equals(cty.String)) + + id := values.GetAttr("id").AsString() + + arn := arnVal.AsString() + assert.Equal(t, "arn:aws:s3:::"+id, arn) + }) + + t.Run("the bucket has a name", func(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": `resource "aws_s3_bucket" "this" { + bucket = "test" +} + +resource "aws_iam_role" "this" { + name = "test_role" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "" + Principal = { + Service = "s3.amazonaws.com" + } + }, + ] + }) +} + +resource "aws_iam_role_policy" "this" { + name = "test_policy" + role = aws_iam_role.this.id + policy = data.aws_iam_policy_document.this.json +} + +data "aws_iam_policy_document" "this" { + statement { + effect = "Allow" + actions = [ + "s3:GetObject" + ] + resources = ["${aws_s3_bucket.this.arn}/*"] + } +}`, + }) + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + require.NoError(t, err) + require.Len(t, modules, 1) + + blocks := modules[0].GetDatasByType("aws_iam_policy_document") + assert.Len(t, blocks, 1) + + policyDoc := blocks[0] + + statement := policyDoc.GetBlock("statement") + resources := statement.GetAttribute("resources").AsStringValueSliceOrEmpty() + + assert.Len(t, resources, 1) + assert.True(t, resources[0].EqualTo("arn:aws:s3:::test/*")) + }) +} + +func TestForEachWithObjectsOfDifferentTypes(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": `module "backups" { + bucket_name = each.key + client = each.value.client + path_writers = each.value.path_writers + + for_each = { + "bucket1" = { + client = "client1" + path_writers = ["writer1"] // tuple with string + }, + "bucket2" = { + client = "client2" + path_writers = [] // empty tuple + } + } +} +`, + }) + parser := New(fs, "", OptionStopOnHCLError(true)) + require.NoError(t, parser.ParseFS(context.TODO(), ".")) + + modules, _, err := parser.EvaluateAll(context.TODO()) + assert.NoError(t, err) + assert.Len(t, modules, 1) +} diff --git a/pkg/scanners/terraform/parser/resolvers/cache.go b/pkg/scanners/terraform/parser/resolvers/cache.go new file mode 100644 index 000000000000..1314d538a60a --- /dev/null +++ b/pkg/scanners/terraform/parser/resolvers/cache.go @@ -0,0 +1,62 @@ +package resolvers + +import ( + "context" + "crypto/md5" // nolint + "fmt" + "io/fs" + "os" + "path/filepath" +) + +type cacheResolver struct{} + +var Cache = &cacheResolver{} + +const tempDirName = ".aqua" + +func locateCacheFS() (fs.FS, error) { + dir, err := locateCacheDir() + if err != nil { + return nil, err + } + return os.DirFS(dir), nil +} + +func locateCacheDir() (string, error) { + cacheDir := filepath.Join(os.TempDir(), tempDirName, "cache") + if err := os.MkdirAll(cacheDir, 0o755); err != nil { + return "", err + } + if !isWritable(cacheDir) { + return "", fmt.Errorf("cache directory is not writable") + } + return cacheDir, nil +} + +func (r *cacheResolver) Resolve(_ context.Context, _ fs.FS, opt Options) (filesystem fs.FS, prefix string, downloadPath string, applies bool, err error) { + if opt.SkipCache { + opt.Debug("Cache is disabled.") + return nil, "", "", false, nil + } + cacheFS, err := locateCacheFS() + if err != nil { + opt.Debug("No cache filesystem is available on this machine.") + return nil, "", "", false, nil + } + key := cacheKey(opt.Source, opt.Version, opt.RelativePath) + opt.Debug("Trying to resolve: %s", key) + if info, err := fs.Stat(cacheFS, filepath.ToSlash(key)); err == nil && info.IsDir() { + opt.Debug("Module '%s' resolving via cache...", opt.Name) + cacheDir, err := locateCacheDir() + if err != nil { + return nil, "", "", true, err + } + return os.DirFS(filepath.Join(cacheDir, key)), opt.OriginalSource, ".", true, nil + } + return nil, "", "", false, nil +} + +func cacheKey(source, version, relativePath string) string { + return fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s:%s:%s", source, version, relativePath)))) // nolint +} diff --git a/pkg/scanners/terraform/parser/resolvers/local.go b/pkg/scanners/terraform/parser/resolvers/local.go new file mode 100644 index 000000000000..94d92099b6c3 --- /dev/null +++ b/pkg/scanners/terraform/parser/resolvers/local.go @@ -0,0 +1,26 @@ +package resolvers + +import ( + "context" + "io/fs" + "path/filepath" +) + +type localResolver struct{} + +var Local = &localResolver{} + +func (r *localResolver) Resolve(_ context.Context, target fs.FS, opt Options) (filesystem fs.FS, prefix string, downloadPath string, applies bool, err error) { + if !opt.hasPrefix(".", "..") { + return nil, "", "", false, nil + } + joined := filepath.Clean(filepath.Join(opt.ModulePath, opt.Source)) + if _, err := fs.Stat(target, filepath.ToSlash(joined)); err == nil { + opt.Debug("Module '%s' resolved locally to %s", opt.Name, joined) + return target, "", joined, true, nil + } + + clean := filepath.Clean(opt.Source) + opt.Debug("Module '%s' resolved locally to %s", opt.Name, clean) + return target, "", clean, true, nil +} diff --git a/pkg/scanners/terraform/parser/resolvers/options.go b/pkg/scanners/terraform/parser/resolvers/options.go new file mode 100644 index 000000000000..aeec3d9393d5 --- /dev/null +++ b/pkg/scanners/terraform/parser/resolvers/options.go @@ -0,0 +1,28 @@ +package resolvers + +import ( + "strings" + + "github.com/aquasecurity/trivy/pkg/debug" +) + +type Options struct { + Source, OriginalSource, Version, OriginalVersion, WorkingDir, Name, ModulePath string + DebugLogger debug.Logger + AllowDownloads bool + SkipCache bool + RelativePath string +} + +func (o *Options) hasPrefix(prefixes ...string) bool { + for _, prefix := range prefixes { + if strings.HasPrefix(o.Source, prefix) { + return true + } + } + return false +} + +func (o *Options) Debug(format string, args ...interface{}) { + o.DebugLogger.Log(format, args...) +} diff --git a/pkg/scanners/terraform/parser/resolvers/registry.go b/pkg/scanners/terraform/parser/resolvers/registry.go new file mode 100644 index 000000000000..5623e9064e06 --- /dev/null +++ b/pkg/scanners/terraform/parser/resolvers/registry.go @@ -0,0 +1,165 @@ +package resolvers + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "net/http" + "os" + "sort" + "strings" + "time" + + "github.com/Masterminds/semver" +) + +type registryResolver struct { + client *http.Client +} + +var Registry = ®istryResolver{ + client: &http.Client{ + // give it a maximum 5 seconds to resolve the module + Timeout: time.Second * 5, + }, +} + +type moduleVersions struct { + Modules []struct { + Versions []struct { + Version string `json:"version"` + } `json:"versions"` + } `json:"modules"` +} + +const registryHostname = "registry.terraform.io" + +// nolint +func (r *registryResolver) Resolve(ctx context.Context, target fs.FS, opt Options) (filesystem fs.FS, prefix string, downloadPath string, applies bool, err error) { + + if !opt.AllowDownloads { + return + } + + inputVersion := opt.Version + source, relativePath, _ := strings.Cut(opt.Source, "//") + parts := strings.Split(source, "/") + if len(parts) < 3 || len(parts) > 4 { + return + } + + hostname := registryHostname + var token string + if len(parts) == 4 { + hostname = parts[0] + parts = parts[1:] + + envVar := fmt.Sprintf("TF_TOKEN_%s", strings.ReplaceAll(hostname, ".", "_")) + token = os.Getenv(envVar) + if token != "" { + opt.Debug("Found a token for the registry at %s", hostname) + } else { + opt.Debug("No token was found for the registry at %s", hostname) + } + } + + moduleName := strings.Join(parts, "/") + + if opt.Version != "" { + versionUrl := fmt.Sprintf("https://%s/v1/modules/%s/versions", hostname, moduleName) + opt.Debug("Requesting module versions from registry using '%s'...", versionUrl) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, versionUrl, nil) + if err != nil { + return nil, "", "", true, err + } + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } + resp, err := r.client.Do(req) + if err != nil { + return nil, "", "", true, err + } + defer func() { _ = resp.Body.Close() }() + if resp.StatusCode != http.StatusOK { + return nil, "", "", true, fmt.Errorf("unexpected status code for versions endpoint: %d", resp.StatusCode) + } + var availableVersions moduleVersions + if err := json.NewDecoder(resp.Body).Decode(&availableVersions); err != nil { + return nil, "", "", true, err + } + + opt.Version, err = resolveVersion(inputVersion, availableVersions) + if err != nil { + return nil, "", "", true, err + } + opt.Debug("Found version '%s' for constraint '%s'", opt.Version, inputVersion) + } + + var url string + if opt.Version == "" { + url = fmt.Sprintf("https://%s/v1/modules/%s/download", hostname, moduleName) + } else { + url = fmt.Sprintf("https://%s/v1/modules/%s/%s/download", hostname, moduleName, opt.Version) + } + + opt.Debug("Requesting module source from registry using '%s'...", url) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, "", "", true, err + } + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } + if opt.Version != "" { + req.Header.Set("X-Terraform-Version", opt.Version) + } + + resp, err := r.client.Do(req) + if err != nil { + return nil, "", "", true, err + } + defer func() { _ = resp.Body.Close() }() + if resp.StatusCode != http.StatusNoContent { + return nil, "", "", true, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + opt.Source = resp.Header.Get("X-Terraform-Get") + opt.Debug("Module '%s' resolved via registry to new source: '%s'", opt.Name, opt.Source) + opt.RelativePath = relativePath + filesystem, prefix, downloadPath, _, err = Remote.Resolve(ctx, target, opt) + if err != nil { + return nil, "", "", true, err + } + + return filesystem, prefix, downloadPath, true, nil +} + +func resolveVersion(input string, versions moduleVersions) (string, error) { + if len(versions.Modules) != 1 { + return "", fmt.Errorf("1 module expected, found %d", len(versions.Modules)) + } + if len(versions.Modules[0].Versions) == 0 { + return "", fmt.Errorf("no available versions for module") + } + constraints, err := semver.NewConstraint(input) + if err != nil { + return "", err + } + var realVersions semver.Collection + for _, rawVersion := range versions.Modules[0].Versions { + realVersion, err := semver.NewVersion(rawVersion.Version) + if err != nil { + continue + } + realVersions = append(realVersions, realVersion) + } + sort.Sort(sort.Reverse(realVersions)) + for _, realVersion := range realVersions { + if constraints.Check(realVersion) { + return realVersion.String(), nil + } + } + return "", fmt.Errorf("no available versions for module constraint '%s'", input) +} diff --git a/pkg/scanners/terraform/parser/resolvers/remote.go b/pkg/scanners/terraform/parser/resolvers/remote.go new file mode 100644 index 000000000000..4c1a96437e65 --- /dev/null +++ b/pkg/scanners/terraform/parser/resolvers/remote.go @@ -0,0 +1,92 @@ +package resolvers + +import ( + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "sync/atomic" + + "github.com/hashicorp/go-getter" +) + +type remoteResolver struct { + count int32 +} + +var Remote = &remoteResolver{ + count: 0, +} + +func (r *remoteResolver) incrementCount(o Options) { + o.Debug("Incrementing the download counter") + atomic.CompareAndSwapInt32(&r.count, r.count, r.count+1) + o.Debug("Download counter is now %d", r.count) +} + +func (r *remoteResolver) GetDownloadCount() int { + return int(atomic.LoadInt32(&r.count)) +} + +func (r *remoteResolver) Resolve(ctx context.Context, _ fs.FS, opt Options) (filesystem fs.FS, prefix string, downloadPath string, applies bool, err error) { + if !opt.hasPrefix("github.com/", "bitbucket.org/", "s3:", "git@", "git:", "hg:", "https:", "gcs:") { + return nil, "", "", false, nil + } + + if !opt.AllowDownloads { + return nil, "", "", false, nil + } + + key := cacheKey(opt.OriginalSource, opt.OriginalVersion, opt.RelativePath) + opt.Debug("Storing with cache key %s", key) + + baseCacheDir, err := locateCacheDir() + if err != nil { + return nil, "", "", true, fmt.Errorf("failed to locate cache directory: %w", err) + } + cacheDir := filepath.Join(baseCacheDir, key) + if err := r.download(ctx, opt, cacheDir); err != nil { + return nil, "", "", true, err + } + + r.incrementCount(opt) + opt.Debug("Successfully downloaded %s from %s", opt.Name, opt.Source) + opt.Debug("Module '%s' resolved via remote download.", opt.Name) + return os.DirFS(cacheDir), opt.Source, filepath.Join(".", opt.RelativePath), true, nil +} + +func (r *remoteResolver) download(ctx context.Context, opt Options, dst string) error { + _ = os.RemoveAll(dst) + if err := os.MkdirAll(filepath.Dir(dst), 0o755); err != nil { + return err + } + + var opts []getter.ClientOption + + // Overwrite the file getter so that a file will be copied + getter.Getters["file"] = &getter.FileGetter{Copy: true} + + opt.Debug("Downloading %s...", opt.Source) + + // Build the client + client := &getter.Client{ + Ctx: ctx, + Src: opt.Source, + Dst: dst, + Pwd: opt.WorkingDir, + Getters: getter.Getters, + Mode: getter.ClientModeAny, + Options: opts, + } + + if err := client.Get(); err != nil { + return fmt.Errorf("failed to download: %w", err) + } + + return nil +} + +func (r *remoteResolver) GetSourcePrefix(source string) string { + return source +} diff --git a/pkg/scanners/terraform/parser/resolvers/writable.go b/pkg/scanners/terraform/parser/resolvers/writable.go new file mode 100644 index 000000000000..84f471f779c2 --- /dev/null +++ b/pkg/scanners/terraform/parser/resolvers/writable.go @@ -0,0 +1,36 @@ +//go:build !windows +// +build !windows + +package resolvers + +import ( + "os" + "syscall" +) + +func isWritable(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + if !info.IsDir() { + return false + } + + // Check if the user bit is enabled in file permission + if info.Mode().Perm()&(1<<(uint(7))) == 0 { + return false + } + + var stat syscall.Stat_t + if err = syscall.Stat(path, &stat); err != nil { + return false + } + + if uint32(os.Geteuid()) != stat.Uid { + return false + } + + return true +} diff --git a/pkg/scanners/terraform/parser/resolvers/writable_windows.go b/pkg/scanners/terraform/parser/resolvers/writable_windows.go new file mode 100644 index 000000000000..69cb3c7169b1 --- /dev/null +++ b/pkg/scanners/terraform/parser/resolvers/writable_windows.go @@ -0,0 +1,24 @@ +package resolvers + +import ( + "os" +) + +func isWritable(path string) bool { + + info, err := os.Stat(path) + if err != nil { + return false + } + + if !info.IsDir() { + return false + } + + // Check if the user bit is enabled in file permission + if info.Mode().Perm()&(1<<(uint(7))) == 0 { + return false + } + + return true +} diff --git a/pkg/scanners/terraform/parser/sort.go b/pkg/scanners/terraform/parser/sort.go new file mode 100644 index 000000000000..85aa4652fe3b --- /dev/null +++ b/pkg/scanners/terraform/parser/sort.go @@ -0,0 +1,58 @@ +package parser + +import ( + "sort" + + "github.com/aquasecurity/trivy/pkg/terraform" +) + +func sortBlocksByHierarchy(blocks terraform.Blocks) { + c := &counter{ + cache: make(map[string]int), + } + sort.Slice(blocks, func(i, j int) bool { + a := blocks[i] + b := blocks[j] + iDepth, jDepth := c.countBlockRecursion(a, blocks, 0), c.countBlockRecursion(b, blocks, 0) + switch { + case iDepth < jDepth: + return true + case iDepth > jDepth: + return false + default: + return blocks[i].FullName() < blocks[j].FullName() + } + }) +} + +type counter struct { + cache map[string]int +} + +func (c *counter) countBlockRecursion(block *terraform.Block, blocks terraform.Blocks, count int) int { + metadata := block.GetMetadata() + if cached, ok := c.cache[metadata.Reference()]; ok { + return cached + } + var maxCount int + var hasRecursion bool + for _, attrName := range []string{"for_each", "count"} { + if attr := block.GetAttribute(attrName); attr.IsNotNil() { + hasRecursion = true + for _, other := range blocks { + if attr.ReferencesBlock(other) { + depth := c.countBlockRecursion(other, blocks, count) + if depth > maxCount { + maxCount = depth + } + } + } + } + } + if hasRecursion { + maxCount++ + } + result := maxCount + count + c.cache[metadata.Reference()] = result + return result +} diff --git a/pkg/scanners/terraform/parser/testdata/tfvars/terraform.tfvars b/pkg/scanners/terraform/parser/testdata/tfvars/terraform.tfvars new file mode 100644 index 000000000000..23fee69e2bb1 --- /dev/null +++ b/pkg/scanners/terraform/parser/testdata/tfvars/terraform.tfvars @@ -0,0 +1 @@ +instance_type = "t2.large" \ No newline at end of file diff --git a/pkg/scanners/terraform/parser/testdata/tfvars/terraform.tfvars.json b/pkg/scanners/terraform/parser/testdata/tfvars/terraform.tfvars.json new file mode 100644 index 000000000000..bde0e75763b1 --- /dev/null +++ b/pkg/scanners/terraform/parser/testdata/tfvars/terraform.tfvars.json @@ -0,0 +1,10 @@ +{ + "variable": { + "foo": { + "default": "bar" + }, + "baz": "qux" + }, + "foo2": true, + "foo3": 3 +} \ No newline at end of file diff --git a/pkg/scanners/terraform/scanner.go b/pkg/scanners/terraform/scanner.go new file mode 100644 index 000000000000..4d49ea029c5f --- /dev/null +++ b/pkg/scanners/terraform/scanner.go @@ -0,0 +1,379 @@ +package terraform + +import ( + "context" + "io" + "io/fs" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/aquasecurity/trivy/pkg/debug" + "github.com/aquasecurity/trivy/pkg/framework" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/pkg/types" + "golang.org/x/exp/slices" + + "github.com/aquasecurity/trivy/pkg/extrafs" + "github.com/aquasecurity/trivy/pkg/rego" + "github.com/aquasecurity/trivy/pkg/scanners" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/executor" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser/resolvers" +) + +var _ scanners.FSScanner = (*Scanner)(nil) +var _ options.ConfigurableScanner = (*Scanner)(nil) +var _ ConfigurableTerraformScanner = (*Scanner)(nil) + +type Scanner struct { + sync.Mutex + options []options.ScannerOption + parserOpt []options.ParserOption + executorOpt []executor.Option + dirs map[string]struct{} + forceAllDirs bool + policyDirs []string + policyReaders []io.Reader + regoScanner *rego.Scanner + execLock sync.RWMutex + debug debug.Logger + frameworks []framework.Framework + spec string + loadEmbeddedLibraries bool + loadEmbeddedPolicies bool +} + +func (s *Scanner) SetSpec(spec string) { + s.spec = spec +} + +func (s *Scanner) SetRegoOnly(regoOnly bool) { + s.executorOpt = append(s.executorOpt, executor.OptionWithRegoOnly(regoOnly)) +} + +func (s *Scanner) SetFrameworks(frameworks []framework.Framework) { + s.frameworks = frameworks +} + +func (s *Scanner) SetUseEmbeddedPolicies(b bool) { + s.loadEmbeddedPolicies = b +} + +func (s *Scanner) SetUseEmbeddedLibraries(b bool) { + s.loadEmbeddedLibraries = b +} + +func (s *Scanner) Name() string { + return "Terraform" +} + +func (s *Scanner) SetForceAllDirs(b bool) { + s.forceAllDirs = b +} + +func (s *Scanner) AddParserOptions(options ...options.ParserOption) { + s.parserOpt = append(s.parserOpt, options...) +} + +func (s *Scanner) AddExecutorOptions(options ...executor.Option) { + s.executorOpt = append(s.executorOpt, options...) +} + +func (s *Scanner) SetPolicyReaders(readers []io.Reader) { + s.policyReaders = readers +} + +func (s *Scanner) SetSkipRequiredCheck(skip bool) { + s.parserOpt = append(s.parserOpt, options.ParserWithSkipRequiredCheck(skip)) +} + +func (s *Scanner) SetDebugWriter(writer io.Writer) { + s.parserOpt = append(s.parserOpt, options.ParserWithDebug(writer)) + s.executorOpt = append(s.executorOpt, executor.OptionWithDebugWriter(writer)) + s.debug = debug.New(writer, "terraform", "scanner") +} + +func (s *Scanner) SetTraceWriter(_ io.Writer) { +} + +func (s *Scanner) SetPerResultTracingEnabled(_ bool) { +} + +func (s *Scanner) SetPolicyDirs(dirs ...string) { + s.policyDirs = dirs +} + +func (s *Scanner) SetDataDirs(_ ...string) {} +func (s *Scanner) SetPolicyNamespaces(_ ...string) {} + +func (s *Scanner) SetPolicyFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} + +func (s *Scanner) SetDataFilesystem(_ fs.FS) { + // handled by rego when option is passed on +} +func (s *Scanner) SetRegoErrorLimit(_ int) {} + +type Metrics struct { + Parser parser.Metrics + Executor executor.Metrics + Timings struct { + Total time.Duration + } +} + +func New(options ...options.ScannerOption) *Scanner { + s := &Scanner{ + dirs: make(map[string]struct{}), + options: options, + } + for _, opt := range options { + opt(s) + } + return s +} + +func (s *Scanner) ScanFS(ctx context.Context, target fs.FS, dir string) (scan.Results, error) { + results, _, err := s.ScanFSWithMetrics(ctx, target, dir) + return results, err +} + +func (s *Scanner) initRegoScanner(srcFS fs.FS) (*rego.Scanner, error) { + s.Lock() + defer s.Unlock() + if s.regoScanner != nil { + return s.regoScanner, nil + } + regoScanner := rego.NewScanner(types.SourceCloud, s.options...) + regoScanner.SetParentDebugLogger(s.debug) + + if err := regoScanner.LoadPolicies(s.loadEmbeddedLibraries, s.loadEmbeddedPolicies, srcFS, s.policyDirs, s.policyReaders); err != nil { + return nil, err + } + s.regoScanner = regoScanner + return regoScanner, nil +} + +// terraformRootModule represents the module to be used as the root module for Terraform deployment. +type terraformRootModule struct { + rootPath string + childs terraform.Modules + fsMap map[string]fs.FS +} + +func excludeNonRootModules(modules []terraformRootModule) []terraformRootModule { + var result []terraformRootModule + var childPaths []string + + for _, module := range modules { + childPaths = append(childPaths, module.childs.ChildModulesPaths()...) + } + + for _, module := range modules { + // if the path of the root module matches the path of the child module, + // then we should not scan it + if !slices.Contains(childPaths, module.rootPath) { + result = append(result, module) + } + } + return result +} + +func (s *Scanner) ScanFSWithMetrics(ctx context.Context, target fs.FS, dir string) (scan.Results, Metrics, error) { + + var metrics Metrics + + s.debug.Log("Scanning [%s] at '%s'...", target, dir) + + // find directories which directly contain tf files (and have no parent containing tf files) + rootDirs := s.findRootModules(target, dir, dir) + sort.Strings(rootDirs) + + if len(rootDirs) == 0 { + s.debug.Log("no root modules found") + return nil, metrics, nil + } + + regoScanner, err := s.initRegoScanner(target) + if err != nil { + return nil, metrics, err + } + + s.execLock.Lock() + s.executorOpt = append(s.executorOpt, executor.OptionWithRegoScanner(regoScanner), executor.OptionWithFrameworks(s.frameworks...)) + s.execLock.Unlock() + + var allResults scan.Results + + // parse all root module directories + var rootModules []terraformRootModule + for _, dir := range rootDirs { + + s.debug.Log("Scanning root module '%s'...", dir) + + p := parser.New(target, "", s.parserOpt...) + + if err := p.ParseFS(ctx, dir); err != nil { + return nil, metrics, err + } + + modules, _, err := p.EvaluateAll(ctx) + if err != nil { + return nil, metrics, err + } + + parserMetrics := p.Metrics() + metrics.Parser.Counts.Blocks += parserMetrics.Counts.Blocks + metrics.Parser.Counts.Modules += parserMetrics.Counts.Modules + metrics.Parser.Counts.Files += parserMetrics.Counts.Files + metrics.Parser.Timings.DiskIODuration += parserMetrics.Timings.DiskIODuration + metrics.Parser.Timings.ParseDuration += parserMetrics.Timings.ParseDuration + + rootModules = append(rootModules, terraformRootModule{ + rootPath: dir, + childs: modules, + fsMap: p.GetFilesystemMap(), + }) + } + + rootModules = excludeNonRootModules(rootModules) + + for _, module := range rootModules { + s.execLock.RLock() + e := executor.New(s.executorOpt...) + s.execLock.RUnlock() + results, execMetrics, err := e.Execute(module.childs) + if err != nil { + return nil, metrics, err + } + + for i, result := range results { + if result.Metadata().Range().GetFS() != nil { + continue + } + key := result.Metadata().Range().GetFSKey() + if key == "" { + continue + } + if filesystem, ok := module.fsMap[key]; ok { + override := scan.Results{ + result, + } + override.SetSourceAndFilesystem(result.Range().GetSourcePrefix(), filesystem, false) + results[i] = override[0] + } + } + + metrics.Executor.Counts.Passed += execMetrics.Counts.Passed + metrics.Executor.Counts.Failed += execMetrics.Counts.Failed + metrics.Executor.Counts.Ignored += execMetrics.Counts.Ignored + metrics.Executor.Counts.Critical += execMetrics.Counts.Critical + metrics.Executor.Counts.High += execMetrics.Counts.High + metrics.Executor.Counts.Medium += execMetrics.Counts.Medium + metrics.Executor.Counts.Low += execMetrics.Counts.Low + metrics.Executor.Timings.Adaptation += execMetrics.Timings.Adaptation + metrics.Executor.Timings.RunningChecks += execMetrics.Timings.RunningChecks + + allResults = append(allResults, results...) + } + + metrics.Parser.Counts.ModuleDownloads = resolvers.Remote.GetDownloadCount() + + metrics.Timings.Total += metrics.Parser.Timings.DiskIODuration + metrics.Timings.Total += metrics.Parser.Timings.ParseDuration + metrics.Timings.Total += metrics.Executor.Timings.Adaptation + metrics.Timings.Total += metrics.Executor.Timings.RunningChecks + + return allResults, metrics, nil +} + +func (s *Scanner) removeNestedDirs(dirs []string) []string { + if s.forceAllDirs { + return dirs + } + var clean []string + for _, dirA := range dirs { + dirOK := true + for _, dirB := range dirs { + if dirA == dirB { + continue + } + if str, err := filepath.Rel(dirB, dirA); err == nil && !strings.HasPrefix(str, "..") { + dirOK = false + break + } + } + if dirOK { + clean = append(clean, dirA) + } + } + return clean +} + +func (s *Scanner) findRootModules(target fs.FS, scanDir string, dirs ...string) []string { + + var roots []string + var others []string + + for _, dir := range dirs { + if s.isRootModule(target, dir) { + roots = append(roots, dir) + if !s.forceAllDirs { + continue + } + } + + // if this isn't a root module, look at directories inside it + files, err := fs.ReadDir(target, filepath.ToSlash(dir)) + if err != nil { + continue + } + for _, file := range files { + realPath := filepath.Join(dir, file.Name()) + if symFS, ok := target.(extrafs.ReadLinkFS); ok { + realPath, err = symFS.ResolveSymlink(realPath, scanDir) + if err != nil { + s.debug.Log("failed to resolve symlink '%s': %s", file.Name(), err) + continue + } + } + if file.IsDir() { + others = append(others, realPath) + } else if statFS, ok := target.(fs.StatFS); ok { + info, err := statFS.Stat(filepath.ToSlash(realPath)) + if err != nil { + continue + } + if info.IsDir() { + others = append(others, realPath) + } + } + } + } + + if (len(roots) == 0 || s.forceAllDirs) && len(others) > 0 { + roots = append(roots, s.findRootModules(target, scanDir, others...)...) + } + + return s.removeNestedDirs(roots) +} + +func (s *Scanner) isRootModule(target fs.FS, dir string) bool { + files, err := fs.ReadDir(target, filepath.ToSlash(dir)) + if err != nil { + s.debug.Log("failed to read dir '%s' from filesystem [%s]: %s", dir, target, err) + return false + } + for _, file := range files { + if strings.HasSuffix(file.Name(), ".tf") || strings.HasSuffix(file.Name(), ".tf.json") { + return true + } + } + return false +} diff --git a/pkg/scanners/terraform/scanner_integration_test.go b/pkg/scanners/terraform/scanner_integration_test.go new file mode 100644 index 000000000000..20309b5139f3 --- /dev/null +++ b/pkg/scanners/terraform/scanner_integration_test.go @@ -0,0 +1,132 @@ +package terraform + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/test/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_ScanRemoteModule(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` +module "s3_bucket" { + source = "terraform-aws-modules/s3-bucket/aws" + + bucket = "my-s3-bucket" +} +`, + "/trules/bucket_name.rego": ` +# METADATA +# schemas: +# - input: schema.input +# custom: +# avd_id: AVD-AWS-0001 +# input: +# selector: +# - type: cloud +# subtypes: +# - service: s3 +# provider: aws +package defsec.test.aws1 +deny[res] { + bucket := input.aws.s3.buckets[_] + bucket.name.value == "" + res := result.new("The name of the bucket must not be empty", bucket) +}`, + }) + + debugLog := bytes.NewBuffer([]byte{}) + + scanner := New( + options.ScannerWithDebug(debugLog), + options.ScannerWithPolicyFilesystem(fs), + options.ScannerWithPolicyDirs("trules"), + options.ScannerWithEmbeddedPolicies(false), + options.ScannerWithEmbeddedLibraries(false), + options.ScannerWithRegoOnly(true), + ScannerWithAllDirectories(true), + ScannerWithSkipCachedModules(true), + ) + + results, err := scanner.ScanFS(context.TODO(), fs, ".") + require.NoError(t, err) + + assert.Len(t, results.GetPassed(), 1) + + if t.Failed() { + fmt.Printf("Debug logs:\n%s\n", debugLog.String()) + } +} + +func Test_ScanChildUseRemoteModule(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode") + } + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": ` +module "this" { + source = "./modules/s3" + bucket = "my-s3-bucket" +} +`, + "modules/s3/main.tf": ` +variable "bucket" { + type = string +} + +module "s3_bucket" { + source = "github.com/terraform-aws-modules/terraform-aws-s3-bucket?ref=v3.15.1" + bucket = var.bucket +} +`, + "trules/bucket_name.rego": ` +# METADATA +# schemas: +# - input: schema.input +# custom: +# avd_id: AVD-AWS-0001 +# input: +# selector: +# - type: cloud +# subtypes: +# - service: s3 +# provider: aws +package defsec.test.aws1 +deny[res] { + bucket := input.aws.s3.buckets[_] + bucket.name.value == "" + res := result.new("The name of the bucket must not be empty", bucket) +}`, + }) + + debugLog := bytes.NewBuffer([]byte{}) + + scanner := New( + options.ScannerWithDebug(debugLog), + options.ScannerWithPolicyFilesystem(fs), + options.ScannerWithPolicyDirs("trules"), + options.ScannerWithEmbeddedPolicies(false), + options.ScannerWithEmbeddedLibraries(false), + options.ScannerWithRegoOnly(true), + ScannerWithAllDirectories(true), + ScannerWithSkipCachedModules(true), + ) + + results, err := scanner.ScanFS(context.TODO(), fs, ".") + require.NoError(t, err) + + assert.Len(t, results.GetPassed(), 1) + + if t.Failed() { + fmt.Printf("Debug logs:\n%s\n", debugLog.String()) + } +} diff --git a/pkg/scanners/terraform/scanner_test.go b/pkg/scanners/terraform/scanner_test.go new file mode 100644 index 000000000000..a98d6eafc106 --- /dev/null +++ b/pkg/scanners/terraform/scanner_test.go @@ -0,0 +1,1360 @@ +package terraform + +import ( + "bytes" + "context" + "fmt" + "strconv" + "testing" + + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/state" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/test/testutil" +) + +var alwaysFailRule = scan.Rule{ + Provider: providers.AWSProvider, + Service: "service", + ShortCode: "abc", + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredTypes: []string{}, + RequiredLabels: []string{}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + results.Add("oh no", resourceBlock) + return + }, + }, + }, +} + +const emptyBucketRule = ` +# METADATA +# schemas: +# - input: schema.input +# custom: +# avd_id: AVD-AWS-0001 +# input: +# selector: +# - type: cloud +# subtypes: +# - service: s3 +# provider: aws +package defsec.test.aws1 +deny[res] { + bucket := input.aws.s3.buckets[_] + bucket.name.value == "" + res := result.new("The name of the bucket must not be empty", bucket) +} +` + +func scanWithOptions(t *testing.T, code string, opt ...options.ScannerOption) scan.Results { + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": code, + }) + + scanner := New(opt...) + results, _, err := scanner.ScanFSWithMetrics(context.TODO(), fs, "project") + require.NoError(t, err) + return results +} + +func Test_OptionWithAlternativeIDProvider(t *testing.T) { + reg := rules.Register(alwaysFailRule) + defer rules.Deregister(reg) + + options := []options.ScannerOption{ + ScannerWithAlternativeIDProvider(func(s string) []string { + return []string{"something", "altid", "blah"} + }), + } + results := scanWithOptions(t, ` +//tfsec:ignore:altid +resource "something" "else" {} +`, options...) + require.Len(t, results.GetFailed(), 0) + require.Len(t, results.GetIgnored(), 1) + +} + +func Test_TrivyOptionWithAlternativeIDProvider(t *testing.T) { + reg := rules.Register(alwaysFailRule) + defer rules.Deregister(reg) + + options := []options.ScannerOption{ + ScannerWithAlternativeIDProvider(func(s string) []string { + return []string{"something", "altid", "blah"} + }), + } + results := scanWithOptions(t, ` +//trivy:ignore:altid +resource "something" "else" {} +`, options...) + require.Len(t, results.GetFailed(), 0) + require.Len(t, results.GetIgnored(), 1) + +} + +func Test_OptionWithSeverityOverrides(t *testing.T) { + reg := rules.Register(alwaysFailRule) + defer rules.Deregister(reg) + + options := []options.ScannerOption{ + ScannerWithSeverityOverrides(map[string]string{"aws-service-abc": "LOW"}), + } + results := scanWithOptions(t, ` +resource "something" "else" {} +`, options...) + require.Len(t, results.GetFailed(), 1) + assert.Equal(t, severity.Low, results.GetFailed()[0].Severity()) +} + +func Test_OptionWithDebugWriter(t *testing.T) { + reg := rules.Register(alwaysFailRule) + defer rules.Deregister(reg) + + buffer := bytes.NewBuffer([]byte{}) + + scannerOpts := []options.ScannerOption{ + options.ScannerWithDebug(buffer), + } + _ = scanWithOptions(t, ` +resource "something" "else" {} +`, scannerOpts...) + require.Greater(t, buffer.Len(), 0) +} + +func Test_OptionNoIgnores(t *testing.T) { + reg := rules.Register(alwaysFailRule) + defer rules.Deregister(reg) + + scannerOpts := []options.ScannerOption{ + ScannerWithNoIgnores(), + } + results := scanWithOptions(t, ` +//tfsec:ignore:aws-service-abc +resource "something" "else" {} +`, scannerOpts...) + require.Len(t, results.GetFailed(), 1) + require.Len(t, results.GetIgnored(), 0) + +} + +func Test_OptionExcludeRules(t *testing.T) { + reg := rules.Register(alwaysFailRule) + defer rules.Deregister(reg) + + options := []options.ScannerOption{ + ScannerWithExcludedRules([]string{"aws-service-abc"}), + } + results := scanWithOptions(t, ` +resource "something" "else" {} +`, options...) + require.Len(t, results.GetFailed(), 0) + require.Len(t, results.GetIgnored(), 1) + +} + +func Test_OptionIncludeRules(t *testing.T) { + reg := rules.Register(alwaysFailRule) + defer rules.Deregister(reg) + + scannerOpts := []options.ScannerOption{ + ScannerWithIncludedRules([]string{"this-only"}), + } + results := scanWithOptions(t, ` +resource "something" "else" {} +`, scannerOpts...) + require.Len(t, results.GetFailed(), 0) + require.Len(t, results.GetIgnored(), 1) + +} + +func Test_OptionWithMinimumSeverity(t *testing.T) { + reg := rules.Register(alwaysFailRule) + defer rules.Deregister(reg) + + scannerOpts := []options.ScannerOption{ + ScannerWithMinimumSeverity(severity.Critical), + } + results := scanWithOptions(t, ` +resource "something" "else" {} +`, scannerOpts...) + require.Len(t, results.GetFailed(), 0) + require.Len(t, results.GetIgnored(), 1) + +} + +func Test_OptionWithPolicyDirs(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "/code/main.tf": ` +resource "aws_s3_bucket" "my-bucket" { + bucket = "evil" +} +`, + "/trules/test.rego": ` +package defsec.abcdefg + +__rego_metadata__ := { + "id": "TEST123", + "avd_id": "AVD-TEST-0123", + "title": "Buckets should not be evil", + "short_code": "no-evil-buckets", + "severity": "CRITICAL", + "type": "DefSec Security Check", + "description": "You should not allow buckets to be evil", + "recommended_actions": "Use a good bucket instead", + "url": "https://google.com/search?q=is+my+bucket+evil", +} + +__rego_input__ := { + "combine": false, + "selector": [{"type": "defsec", "subtypes": [{"service": "s3", "provider": "aws"}]}], +} + +deny[cause] { + bucket := input.aws.s3.buckets[_] + bucket.name.value == "evil" + cause := bucket.name +} +`, + }) + + debugLog := bytes.NewBuffer([]byte{}) + scanner := New( + options.ScannerWithDebug(debugLog), + options.ScannerWithPolicyFilesystem(fs), + options.ScannerWithPolicyDirs("trules"), + options.ScannerWithRegoOnly(true), + ) + + results, err := scanner.ScanFS(context.TODO(), fs, "code") + require.NoError(t, err) + + require.Len(t, results.GetFailed(), 1) + + failure := results.GetFailed()[0] + + assert.Equal(t, "AVD-TEST-0123", failure.Rule().AVDID) + + actualCode, err := failure.GetCode() + require.NoError(t, err) + for i := range actualCode.Lines { + actualCode.Lines[i].Highlighted = "" + } + assert.Equal(t, []scan.Line{ + { + Number: 2, + Content: "resource \"aws_s3_bucket\" \"my-bucket\" {", + IsCause: false, + FirstCause: false, + LastCause: false, + Annotation: "", + }, + { + Number: 3, + Content: "\tbucket = \"evil\"", + IsCause: true, + FirstCause: true, + LastCause: true, + Annotation: "", + }, + { + Number: 4, + Content: "}", + IsCause: false, + FirstCause: false, + LastCause: false, + Annotation: "", + }, + }, actualCode.Lines) + + if t.Failed() { + fmt.Printf("Debug logs:\n%s\n", debugLog.String()) + } + +} + +func Test_OptionWithPolicyNamespaces(t *testing.T) { + + tests := []struct { + includedNamespaces []string + policyNamespace string + wantFailure bool + }{ + { + includedNamespaces: nil, + policyNamespace: "blah", + wantFailure: false, + }, + { + includedNamespaces: nil, + policyNamespace: "appshield.something", + wantFailure: true, + }, + { + includedNamespaces: nil, + policyNamespace: "defsec.blah", + wantFailure: true, + }, + { + includedNamespaces: []string{"user"}, + policyNamespace: "users", + wantFailure: false, + }, + { + includedNamespaces: []string{"users"}, + policyNamespace: "something.users", + wantFailure: false, + }, + { + includedNamespaces: []string{"users"}, + policyNamespace: "users", + wantFailure: true, + }, + { + includedNamespaces: []string{"users"}, + policyNamespace: "users.my_rule", + wantFailure: true, + }, + { + includedNamespaces: []string{"a", "users", "b"}, + policyNamespace: "users", + wantFailure: true, + }, + { + includedNamespaces: []string{"user"}, + policyNamespace: "defsec", + wantFailure: true, + }, + } + + for i, test := range tests { + + t.Run(strconv.Itoa(i), func(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "/code/main.tf": ` +resource "aws_s3_bucket" "my-bucket" { + bucket = "evil" +} +`, + "/trules/test.rego": fmt.Sprintf(` +# METADATA +# custom: +# input: +# selector: +# - type: cloud +# subtypes: +# - service: s3 +# provider: aws +package %s + +deny[cause] { +bucket := input.aws.s3.buckets[_] +bucket.name.value == "evil" +cause := bucket.name +} + + `, test.policyNamespace), + }) + + scanner := New( + options.ScannerWithPolicyDirs("trules"), + options.ScannerWithPolicyNamespaces(test.includedNamespaces...), + ) + + results, _, err := scanner.ScanFSWithMetrics(context.TODO(), fs, "code") + require.NoError(t, err) + + var found bool + for _, result := range results.GetFailed() { + if result.RegoNamespace() == test.policyNamespace && result.RegoRule() == "deny" { + found = true + break + } + } + assert.Equal(t, test.wantFailure, found) + + }) + } + +} + +func Test_OptionWithStateFunc(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "code/main.tf": ` +resource "aws_s3_bucket" "my-bucket" { + bucket = "evil" +} +`, + }) + + var actual state.State + + debugLog := bytes.NewBuffer([]byte{}) + scanner := New( + options.ScannerWithDebug(debugLog), + ScannerWithStateFunc(func(s *state.State) { + require.NotNil(t, s) + actual = *s + }), + ) + + _, _, err := scanner.ScanFSWithMetrics(context.TODO(), fs, "code") + require.NoError(t, err) + + assert.Equal(t, 1, len(actual.AWS.S3.Buckets)) + + if t.Failed() { + fmt.Printf("Debug logs:\n%s\n", debugLog.String()) + } + +} + +func Test_OptionWithRegoOnly(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "/code/main.tf": ` +resource "aws_s3_bucket" "my-bucket" { + bucket = "evil" +} +`, + "/trules/test.rego": ` +package defsec.abcdefg + +__rego_metadata__ := { + "id": "TEST123", + "avd_id": "AVD-TEST-0123", + "title": "Buckets should not be evil", + "short_code": "no-evil-buckets", + "severity": "CRITICAL", + "type": "DefSec Security Check", + "description": "You should not allow buckets to be evil", + "recommended_actions": "Use a good bucket instead", + "url": "https://google.com/search?q=is+my+bucket+evil", +} + +__rego_input__ := { + "combine": false, + "selector": [{"type": "defsec", "subtypes": [{"service": "s3", "provider": "aws"}]}], +} + +deny[cause] { + bucket := input.aws.s3.buckets[_] + bucket.name.value == "evil" + cause := bucket.name +} +`, + }) + + debugLog := bytes.NewBuffer([]byte{}) + scanner := New( + options.ScannerWithDebug(debugLog), + options.ScannerWithPolicyDirs("trules"), + options.ScannerWithRegoOnly(true), + ) + + results, err := scanner.ScanFS(context.TODO(), fs, "code") + require.NoError(t, err) + + require.Len(t, results.GetFailed(), 1) + assert.Equal(t, "AVD-TEST-0123", results[0].Rule().AVDID) + + if t.Failed() { + fmt.Printf("Debug logs:\n%s\n", debugLog.String()) + } +} + +func Test_OptionWithRegoOnly_CodeHighlighting(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "/code/main.tf": ` +resource "aws_s3_bucket" "my-bucket" { + bucket = "evil" +} +`, + "/trules/test.rego": ` +package defsec.abcdefg + +__rego_metadata__ := { + "id": "TEST123", + "avd_id": "AVD-TEST-0123", + "title": "Buckets should not be evil", + "short_code": "no-evil-buckets", + "severity": "CRITICAL", + "type": "DefSec Security Check", + "description": "You should not allow buckets to be evil", + "recommended_actions": "Use a good bucket instead", + "url": "https://google.com/search?q=is+my+bucket+evil", +} + +__rego_input__ := { + "combine": false, + "selector": [{"type": "defsec", "subtypes": [{"service": "s3", "provider": "aws"}]}], +} + +deny[res] { + bucket := input.aws.s3.buckets[_] + bucket.name.value == "evil" + res := result.new("oh no", bucket.name) +} +`, + }) + + debugLog := bytes.NewBuffer([]byte{}) + scanner := New( + options.ScannerWithDebug(debugLog), + options.ScannerWithPolicyDirs("trules"), + options.ScannerWithRegoOnly(true), + options.ScannerWithEmbeddedLibraries(true), + ) + + results, err := scanner.ScanFS(context.TODO(), fs, "code") + require.NoError(t, err) + + require.Len(t, results.GetFailed(), 1) + assert.Equal(t, "AVD-TEST-0123", results[0].Rule().AVDID) + assert.NotNil(t, results[0].Metadata().Range().GetFS()) + + if t.Failed() { + fmt.Printf("Debug logs:\n%s\n", debugLog.String()) + } +} + +func Test_OptionWithSkipDownloaded(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "test/main.tf": ` +module "s3-bucket" { + source = "terraform-aws-modules/s3-bucket/aws" + version = "3.14.0" + bucket = mybucket +} +`, + // creating our own rule for the reliability of the test + "/trules/test.rego": ` +package defsec.abcdefg + +__rego_input__ := { + "combine": false, + "selector": [{"type": "defsec", "subtypes": [{"service": "s3", "provider": "aws"}]}], +} + +deny[cause] { + bucket := input.aws.s3.buckets[_] + bucket.name.value == "mybucket" + cause := bucket.name +}`, + }) + + scanner := New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + results, err := scanner.ScanFS(context.TODO(), fs, "test") + assert.NoError(t, err) + assert.Greater(t, len(results.GetFailed()), 0) + + scanner = New(ScannerWithSkipDownloaded(true)) + results, err = scanner.ScanFS(context.TODO(), fs, "test") + assert.NoError(t, err) + assert.Len(t, results.GetFailed(), 0) + +} + +func Test_IAMPolicyRego(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "/code/main.tf": ` +resource "aws_sqs_queue_policy" "bad_example" { + queue_url = aws_sqs_queue.q.id + + policy = < 0 { + if t.Variables()[0].RootName() == "data" { + // we can't resolve data lookups at this time, so make unresolvable + return append(results, defsecTypes.StringUnresolvable(a.metadata)) + } + } + subVal, err := t.Value(ctx) + if err != nil { + return append(results, defsecTypes.StringUnresolvable(a.metadata)) + } + return a.valueToStrings(subVal) + default: + val, err := t.Value(a.ctx.Inner()) + if err != nil { + return append(results, defsecTypes.StringUnresolvable(a.metadata)) + } + results = a.valueToStrings(val) + } + return results +} + +func (a *Attribute) valueToStrings(value cty.Value) (results []defsecTypes.StringValue) { + defer func() { + if err := recover(); err != nil { + results = []defsecTypes.StringValue{defsecTypes.StringUnresolvable(a.metadata)} + } + }() + if value.IsNull() { + return []defsecTypes.StringValue{defsecTypes.StringUnresolvable(a.metadata)} + } + if !value.IsKnown() { + return []defsecTypes.StringValue{defsecTypes.StringUnresolvable(a.metadata)} + } + if value.Type().IsListType() || value.Type().IsTupleType() || value.Type().IsSetType() { + for _, val := range value.AsValueSlice() { + results = append(results, a.valueToString(val)) + } + } + return results +} + +func (a *Attribute) valueToString(value cty.Value) (result defsecTypes.StringValue) { + defer func() { + if err := recover(); err != nil { + result = defsecTypes.StringUnresolvable(a.metadata) + } + }() + + result = defsecTypes.StringUnresolvable(a.metadata) + + if value.IsNull() || !value.IsKnown() { + return result + } + + switch value.Type() { + case cty.String: + return defsecTypes.String(value.AsString(), a.metadata) + default: + return result + } +} + +func (a *Attribute) listContains(val cty.Value, stringToLookFor string, ignoreCase bool) bool { + if a == nil { + return false + } + + valueSlice := val.AsValueSlice() + for _, value := range valueSlice { + if value.IsNull() || !value.IsKnown() { + // there is nothing we can do with this value + continue + } + stringToTest := value + if value.Type().IsObjectType() || value.Type().IsMapType() { + valueMap := value.AsValueMap() + stringToTest = valueMap["key"] + } + if value.Type().HasDynamicTypes() { + for _, extracted := range a.extractListValues() { + if extracted == stringToLookFor { + return true + } + } + return false + } + if !value.IsKnown() { + continue + } + if ignoreCase && strings.EqualFold(stringToTest.AsString(), stringToLookFor) { + return true + } + if stringToTest.AsString() == stringToLookFor { + return true + } + } + return false +} + +func (a *Attribute) extractListValues() []string { + var values []string + if a.hclAttribute == nil || a.hclAttribute.Expr == nil || a.hclAttribute.Expr.Variables() == nil { + return values + } + for _, v := range a.hclAttribute.Expr.Variables() { + values = append(values, v.RootName()) + } + return values +} + +func (a *Attribute) mapContains(checkValue interface{}, val cty.Value) bool { + if a == nil { + return false + } + valueMap := val.AsValueMap() + switch t := checkValue.(type) { + case map[interface{}]interface{}: + for k, v := range t { + for key, value := range valueMap { + rawValue := getRawValue(value) + if key == k && evaluate(v, rawValue) { + return true + } + } + } + return false + case map[string]interface{}: + for k, v := range t { + for key, value := range valueMap { + rawValue := getRawValue(value) + if key == k && evaluate(v, rawValue) { + return true + } + } + } + return false + default: + for key := range valueMap { + if key == checkValue { + return true + } + } + return false + } +} + +func (a *Attribute) NotContains(checkValue interface{}, equalityOptions ...EqualityOption) bool { + return !a.Contains(checkValue, equalityOptions...) +} + +func (a *Attribute) Contains(checkValue interface{}, equalityOptions ...EqualityOption) bool { + if a == nil { + return false + } + ignoreCase := false + for _, option := range equalityOptions { + if option == IgnoreCase { + ignoreCase = true + } + } + val := a.Value() + if val.IsNull() { + return false + } + + if val.Type().IsObjectType() || val.Type().IsMapType() { + return a.mapContains(checkValue, val) + } + + stringToLookFor := fmt.Sprintf("%v", checkValue) + + if val.Type().IsListType() || val.Type().IsTupleType() { + return a.listContains(val, stringToLookFor, ignoreCase) + } + + if ignoreCase && containsIgnoreCase(val.AsString(), stringToLookFor) { + return true + } + + return strings.Contains(val.AsString(), stringToLookFor) +} + +func (a *Attribute) OnlyContains(checkValue interface{}) bool { + if a == nil { + return false + } + val := a.Value() + if val.IsNull() { + return false + } + + checkSlice, ok := checkValue.([]interface{}) + if !ok { + return false + } + + if val.Type().IsListType() || val.Type().IsTupleType() { + for _, value := range val.AsValueSlice() { + found := false + for _, cVal := range checkSlice { + switch t := cVal.(type) { + case string: + if t == value.AsString() { + found = true + break + } + case bool: + if t == value.True() { + found = true + break + } + case int, int8, int16, int32, int64: + i, _ := value.AsBigFloat().Int64() + if t == i { + found = true + break + } + case float32, float64: + f, _ := value.AsBigFloat().Float64() + if t == f { + found = true + break + } + } + + } + if !found { + return false + } + } + return true + } + + return false +} + +func containsIgnoreCase(left, substring string) bool { + return strings.Contains(strings.ToLower(left), strings.ToLower(substring)) +} + +func (a *Attribute) StartsWith(prefix interface{}) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.String { + return strings.HasPrefix(a.Value().AsString(), fmt.Sprintf("%v", prefix)) + } + return false +} + +func (a *Attribute) EndsWith(suffix interface{}) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.String { + return strings.HasSuffix(a.Value().AsString(), fmt.Sprintf("%v", suffix)) + } + return false +} + +type EqualityOption int + +const ( + IgnoreCase EqualityOption = iota +) + +func (a *Attribute) Equals(checkValue interface{}, equalityOptions ...EqualityOption) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.String { + for _, option := range equalityOptions { + if option == IgnoreCase { + return strings.EqualFold(strings.ToLower(a.Value().AsString()), strings.ToLower(fmt.Sprintf("%v", checkValue))) + } + } + result := strings.EqualFold(a.Value().AsString(), fmt.Sprintf("%v", checkValue)) + return result + } + if a.Value().Type() == cty.Bool { + return a.Value().True() == checkValue + } + if a.Value().Type() == cty.Number { + checkNumber, err := gocty.ToCtyValue(checkValue, cty.Number) + if err != nil { + return false + } + return a.Value().RawEquals(checkNumber) + } + + return false +} + +func (a *Attribute) NotEqual(checkValue interface{}, equalityOptions ...EqualityOption) bool { + return !a.Equals(checkValue, equalityOptions...) +} + +func (a *Attribute) RegexMatches(re regexp.Regexp) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.String { + match := re.MatchString(a.Value().AsString()) + return match + } + return false +} + +func (a *Attribute) IsNotAny(options ...interface{}) bool { + return !a.IsAny(options...) +} + +func (a *Attribute) IsAny(options ...interface{}) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.String { + value := a.Value().AsString() + for _, option := range options { + if option == value { + return true + } + } + } + if a.Value().Type() == cty.Number { + for _, option := range options { + checkValue, err := gocty.ToCtyValue(option, cty.Number) + if err != nil { + return false + } + if a.Value().RawEquals(checkValue) { + return true + } + } + } + return false +} + +func (a *Attribute) IsNone(options ...interface{}) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.String { + for _, option := range options { + if option == a.Value().AsString() { + return false + } + } + } + if a.Value().Type() == cty.Number { + for _, option := range options { + checkValue, err := gocty.ToCtyValue(option, cty.Number) + if err != nil { + return false + } + if a.Value().RawEquals(checkValue) { + return false + } + + } + } + + return true +} + +func (a *Attribute) IsTrue() bool { + if a == nil { + return false + } + switch a.Value().Type() { + case cty.Bool: + return a.Value().True() + case cty.String: + val := a.Value().AsString() + val = strings.Trim(val, "\"") + return strings.ToLower(val) == "true" + case cty.Number: + val := a.Value().AsBigFloat() + f, _ := val.Float64() + return f > 0 + } + return false +} + +func (a *Attribute) IsFalse() bool { + if a == nil { + return false + } + switch a.Value().Type() { + case cty.Bool: + return a.Value().False() + case cty.String: + val := a.Value().AsString() + val = strings.Trim(val, "\"") + return strings.ToLower(val) == "false" + case cty.Number: + val := a.Value().AsBigFloat() + f, _ := val.Float64() + return f == 0 + } + return false +} + +func (a *Attribute) IsEmpty() bool { + if a == nil { + return false + } + if a.Value().Type() == cty.String { + return len(a.Value().AsString()) == 0 + } + if a.Type().IsListType() || a.Type().IsTupleType() { + return len(a.Value().AsValueSlice()) == 0 + } + if a.Type().IsMapType() || a.Type().IsObjectType() { + return len(a.Value().AsValueMap()) == 0 + } + if a.Value().Type() == cty.Number { + // a number can't ever be empty + return false + } + if a.Value().IsNull() { + return a.isNullAttributeEmpty() + } + return true +} + +func (a *Attribute) IsNotEmpty() bool { + return !a.IsEmpty() +} + +func (a *Attribute) isNullAttributeEmpty() bool { + if a == nil { + return false + } + switch t := a.hclAttribute.Expr.(type) { + case *hclsyntax.FunctionCallExpr, *hclsyntax.ScopeTraversalExpr, + *hclsyntax.ConditionalExpr, *hclsyntax.LiteralValueExpr: + return false + case *hclsyntax.TemplateExpr: + // walk the parts of the expression to ensure that it has a literal value + for _, p := range t.Parts { + switch pt := p.(type) { + case *hclsyntax.LiteralValueExpr: + if pt != nil && !pt.Val.IsNull() { + return false + } + case *hclsyntax.ScopeTraversalExpr: + return false + } + } + } + return true +} + +func (a *Attribute) MapValue(mapKey string) cty.Value { + if a == nil { + return cty.NilVal + } + if a.Type().IsObjectType() || a.Type().IsMapType() { + attrMap := a.Value().AsValueMap() + for key, value := range attrMap { + if key == mapKey { + return value + } + } + } + return cty.NilVal +} + +func (a *Attribute) AsMapValue() defsecTypes.MapValue { + if a.IsNil() || a.IsNotResolvable() || !a.IsMapOrObject() { + return defsecTypes.MapValue{} + } + + values := make(map[string]string) + _ = a.Each(func(key, val cty.Value) { + if key.Type() == cty.String && val.Type() == cty.String { + values[key.AsString()] = val.AsString() + } + }) + + return defsecTypes.Map(values, a.GetMetadata()) +} + +func (a *Attribute) LessThan(checkValue interface{}) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.Number { + checkNumber, err := gocty.ToCtyValue(checkValue, cty.Number) + if err != nil { + return false + } + + return a.Value().LessThan(checkNumber).True() + } + return false +} + +func (a *Attribute) LessThanOrEqualTo(checkValue interface{}) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.Number { + checkNumber, err := gocty.ToCtyValue(checkValue, cty.Number) + if err != nil { + return false + } + + return a.Value().LessThanOrEqualTo(checkNumber).True() + } + return false +} + +func (a *Attribute) GreaterThan(checkValue interface{}) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.Number { + checkNumber, err := gocty.ToCtyValue(checkValue, cty.Number) + if err != nil { + return false + } + + return a.Value().GreaterThan(checkNumber).True() + } + return false +} + +func (a *Attribute) GreaterThanOrEqualTo(checkValue interface{}) bool { + if a == nil { + return false + } + if a.Value().Type() == cty.Number { + checkNumber, err := gocty.ToCtyValue(checkValue, cty.Number) + if err != nil { + return false + } + + return a.Value().GreaterThanOrEqualTo(checkNumber).True() + } + return false +} + +func (a *Attribute) IsDataBlockReference() bool { + if a == nil { + return false + } + switch t := a.hclAttribute.Expr.(type) { + case *hclsyntax.ScopeTraversalExpr: + split := t.Traversal.SimpleSplit() + return split.Abs.RootName() == "data" + } + return false +} + +func createDotReferenceFromTraversal(parentRef string, traversals ...hcl.Traversal) (*Reference, error) { + var refParts []string + var key cty.Value + for _, x := range traversals { + for _, p := range x { + switch part := p.(type) { + case hcl.TraverseRoot: + refParts = append(refParts, part.Name) + case hcl.TraverseAttr: + refParts = append(refParts, part.Name) + case hcl.TraverseIndex: + key = part.Key + } + } + } + ref, err := newReference(refParts, parentRef) + if err != nil { + return nil, err + } + ref.SetKey(key) + return ref, nil +} + +func (a *Attribute) ReferencesBlock(b *Block) bool { + if a == nil { + return false + } + for _, ref := range a.AllReferences() { + if ref.RefersTo(b.reference) { + return true + } + } + return false +} + +func (a *Attribute) AllReferences(blocks ...*Block) []*Reference { + if a == nil { + return nil + } + refs := a.extractReferences() + for _, block := range blocks { + for _, ref := range refs { + if ref.TypeLabel() == "each" && block.HasChild("for_each") { + refs = append(refs, block.GetAttribute("for_each").AllReferences()...) + } + } + } + return refs +} + +// nolint +func (a *Attribute) referencesFromExpression(expression hcl.Expression) []*Reference { + var refs []*Reference + switch t := expression.(type) { + case *hclsyntax.ConditionalExpr: + if ref, err := createDotReferenceFromTraversal(a.module, t.TrueResult.Variables()...); err == nil { + refs = append(refs, ref) + } + if ref, err := createDotReferenceFromTraversal(a.module, t.FalseResult.Variables()...); err == nil { + refs = append(refs, ref) + } + if ref, err := createDotReferenceFromTraversal(a.module, t.Condition.Variables()...); err == nil { + refs = append(refs, ref) + } + case *hclsyntax.ScopeTraversalExpr: + if ref, err := createDotReferenceFromTraversal(a.module, t.Variables()...); err == nil { + refs = append(refs, ref) + } + case *hclsyntax.TemplateWrapExpr: + refs = a.referencesFromExpression(t.Wrapped) + case *hclsyntax.TemplateExpr: + for _, part := range t.Parts { + ref, err := createDotReferenceFromTraversal(a.module, part.Variables()...) + if err != nil { + continue + } + refs = append(refs, ref) + } + case *hclsyntax.TupleConsExpr: + for _, v := range t.Variables() { + if ref, err := createDotReferenceFromTraversal(a.module, v); err == nil { + refs = append(refs, ref) + } + } + case *hclsyntax.RelativeTraversalExpr: + switch s := t.Source.(type) { + case *hclsyntax.IndexExpr: + if collectionRef, err := createDotReferenceFromTraversal(a.module, s.Collection.Variables()...); err == nil { + key, _ := s.Key.Value(a.ctx.Inner()) + collectionRef.SetKey(key) + refs = append(refs, collectionRef) + } + default: + if ref, err := createDotReferenceFromTraversal(a.module, t.Source.Variables()...); err == nil { + refs = append(refs, ref) + } + } + default: + if reflect.TypeOf(expression).String() == "*json.expression" { + if ref, err := createDotReferenceFromTraversal(a.module, expression.Variables()...); err == nil { + refs = append(refs, ref) + } + } + } + return refs +} + +func (a *Attribute) extractReferences() []*Reference { + if a == nil { + return nil + } + return a.referencesFromExpression(a.hclAttribute.Expr) +} + +func (a *Attribute) IsResourceBlockReference(resourceType string) bool { + if a == nil { + return false + } + switch t := a.hclAttribute.Expr.(type) { + case *hclsyntax.ScopeTraversalExpr: + split := t.Traversal.SimpleSplit() + return split.Abs.RootName() == resourceType + } + return false +} + +func (a *Attribute) References(r Reference) bool { + if a == nil { + return false + } + for _, ref := range a.AllReferences() { + if ref.RefersTo(r) { + return true + } + } + return false +} + +func getRawValue(value cty.Value) interface{} { + if value.IsNull() || !value.IsKnown() { + return value + } + + typeName := value.Type().FriendlyName() + + switch typeName { + case "string": + return value.AsString() + case "number": + return value.AsBigFloat() + case "bool": + return value.True() + } + + return value +} + +func (a *Attribute) IsNil() bool { + return a == nil +} + +func (a *Attribute) IsNotNil() bool { + return !a.IsNil() +} + +func (a *Attribute) HasIntersect(checkValues ...interface{}) bool { + if !a.Type().IsListType() && !a.Type().IsTupleType() { + return false + } + + for _, item := range checkValues { + if a.Contains(item) { + return true + } + } + return false + +} + +func (a *Attribute) AsNumber() float64 { + if a.Value().Type() == cty.Number { + v, _ := a.Value().AsBigFloat().Float64() + return v + } + if a.Value().Type() == cty.String { + v, _ := strconv.ParseFloat(a.Value().AsString(), 64) + return v + } + panic("Attribute is not a number") +} diff --git a/pkg/terraform/block.go b/pkg/terraform/block.go new file mode 100644 index 000000000000..cffa82b5bf10 --- /dev/null +++ b/pkg/terraform/block.go @@ -0,0 +1,459 @@ +package terraform + +import ( + "fmt" + "io/fs" + "strings" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/aquasecurity/trivy/pkg/terraform/context" + + "github.com/google/uuid" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +type Block struct { + id string + hclBlock *hcl.Block + context *context.Context + moduleBlock *Block + parentBlock *Block + expanded bool + cloneIndex int + childBlocks []*Block + attributes []*Attribute + metadata defsecTypes.MisconfigMetadata + moduleSource string + moduleFS fs.FS + reference Reference +} + +func NewBlock(hclBlock *hcl.Block, ctx *context.Context, moduleBlock *Block, parentBlock *Block, moduleSource string, + moduleFS fs.FS, index ...cty.Value) *Block { + if ctx == nil { + ctx = context.NewContext(&hcl.EvalContext{}, nil) + } + + var r hcl.Range + switch body := hclBlock.Body.(type) { + case *hclsyntax.Body: + r = body.SrcRange + default: + r = hclBlock.DefRange + r.End = hclBlock.Body.MissingItemRange().End + } + moduleName := "root" + if moduleBlock != nil { + moduleName = moduleBlock.FullName() + } + rng := defsecTypes.NewRange( + r.Filename, + r.Start.Line, + r.End.Line, + moduleSource, + moduleFS, + ) + + var parts []string + // if there are no labels then use the block type + // this is for the case where "special" keywords like "resource" are used + // as normal block names in top level blocks - see issue tfsec#1528 for an example + if hclBlock.Type != "resource" || len(hclBlock.Labels) == 0 { + parts = append(parts, hclBlock.Type) + } + parts = append(parts, hclBlock.Labels...) + + var parent string + if moduleBlock != nil { + parent = moduleBlock.FullName() + } + ref, _ := newReference(parts, parent) + if len(index) > 0 { + key := index[0] + ref.SetKey(key) + } + + metadata := defsecTypes.NewMisconfigMetadata(rng, ref.String()) + + if parentBlock != nil { + metadata = metadata.WithParent(parentBlock.metadata) + } else if moduleBlock != nil { + metadata = metadata.WithParent(moduleBlock.GetMetadata()) + } + + b := Block{ + id: uuid.New().String(), + context: ctx, + hclBlock: hclBlock, + moduleBlock: moduleBlock, + moduleSource: moduleSource, + moduleFS: moduleFS, + parentBlock: parentBlock, + metadata: metadata, + reference: *ref, + } + + var children Blocks + switch body := hclBlock.Body.(type) { + case *hclsyntax.Body: + for _, b2 := range body.Blocks { + children = append(children, NewBlock(b2.AsHCLBlock(), ctx, moduleBlock, &b, moduleSource, moduleFS)) + } + default: + content, _, diag := hclBlock.Body.PartialContent(Schema) + if diag == nil { + for _, hb := range content.Blocks { + children = append(children, NewBlock(hb, ctx, moduleBlock, &b, moduleSource, moduleFS)) + } + } + } + + b.childBlocks = children + + for _, attr := range b.createAttributes() { + b.attributes = append(b.attributes, NewAttribute(attr, ctx, moduleName, metadata, *ref, moduleSource, moduleFS)) + } + + return &b +} + +func (b *Block) ID() string { + return b.id +} + +func (b *Block) Reference() Reference { + return b.reference +} + +func (b *Block) GetMetadata() defsecTypes.MisconfigMetadata { + return b.metadata +} + +func (b *Block) GetRawValue() interface{} { + return nil +} + +func (b *Block) InjectBlock(block *Block, name string) { + block.hclBlock.Labels = []string{} + block.hclBlock.Type = name + for attrName, attr := range block.Attributes() { + b.context.Root().SetByDot(attr.Value(), fmt.Sprintf("%s.%s.%s", b.reference.String(), name, attrName)) + } + b.childBlocks = append(b.childBlocks, block) +} + +func (b *Block) markCountExpanded() { + b.expanded = true +} + +func (b *Block) IsCountExpanded() bool { + return b.expanded +} + +func (b *Block) Clone(index cty.Value) *Block { + var childCtx *context.Context + if b.context != nil { + childCtx = b.context.NewChild() + } else { + childCtx = context.NewContext(&hcl.EvalContext{}, nil) + } + + cloneHCL := *b.hclBlock + + clone := NewBlock(&cloneHCL, childCtx, b.moduleBlock, b.parentBlock, b.moduleSource, b.moduleFS, index) + if len(clone.hclBlock.Labels) > 0 { + position := len(clone.hclBlock.Labels) - 1 + labels := make([]string, len(clone.hclBlock.Labels)) + for i := 0; i < len(labels); i++ { + labels[i] = clone.hclBlock.Labels[i] + } + if index.IsKnown() && !index.IsNull() { + switch index.Type() { + case cty.Number: + f, _ := index.AsBigFloat().Float64() + labels[position] = fmt.Sprintf("%s[%d]", clone.hclBlock.Labels[position], int(f)) + case cty.String: + labels[position] = fmt.Sprintf("%s[%q]", clone.hclBlock.Labels[position], index.AsString()) + default: + labels[position] = fmt.Sprintf("%s[%#v]", clone.hclBlock.Labels[position], index) + } + } else { + labels[position] = fmt.Sprintf("%s[%d]", clone.hclBlock.Labels[position], b.cloneIndex) + } + clone.hclBlock.Labels = labels + } + indexVal, _ := gocty.ToCtyValue(index, cty.Number) + clone.context.SetByDot(indexVal, "count.index") + clone.markCountExpanded() + b.cloneIndex++ + return clone +} + +func (b *Block) Context() *context.Context { + return b.context +} + +func (b *Block) OverrideContext(ctx *context.Context) { + b.context = ctx + for _, block := range b.childBlocks { + block.OverrideContext(ctx.NewChild()) + } + for _, attr := range b.attributes { + attr.ctx = ctx + } +} + +func (b *Block) Type() string { + return b.hclBlock.Type +} + +func (b *Block) Labels() []string { + return b.hclBlock.Labels +} + +func (b *Block) GetFirstMatchingBlock(names ...string) *Block { + var returnBlock *Block + for _, name := range names { + childBlock := b.GetBlock(name) + if childBlock.IsNotNil() { + return childBlock + } + } + return returnBlock +} + +func (b *Block) createAttributes() hcl.Attributes { + switch body := b.hclBlock.Body.(type) { + case *hclsyntax.Body: + attributes := make(hcl.Attributes) + for _, a := range body.Attributes { + attributes[a.Name] = a.AsHCLAttribute() + } + return attributes + default: + _, body, diag := b.hclBlock.Body.PartialContent(Schema) + if diag != nil { + return nil + } + attrs, diag := body.JustAttributes() + if diag != nil { + return nil + } + return attrs + } +} + +func (b *Block) GetBlock(name string) *Block { + var returnBlock *Block + if b == nil || b.hclBlock == nil { + return returnBlock + } + for _, child := range b.childBlocks { + if child.Type() == name { + return child + } + } + return returnBlock +} + +func (b *Block) AllBlocks() Blocks { + if b == nil || b.hclBlock == nil { + return nil + } + return b.childBlocks +} + +func (b *Block) GetBlocks(name string) Blocks { + if b == nil || b.hclBlock == nil { + return nil + } + var results []*Block + for _, child := range b.childBlocks { + if child.Type() == name { + results = append(results, child) + } + } + return results +} + +func (b *Block) GetAttributes() []*Attribute { + if b == nil { + return nil + } + return b.attributes +} + +func (b *Block) GetAttribute(name string) *Attribute { + if b == nil || b.hclBlock == nil { + return nil + } + for _, attr := range b.attributes { + if attr.Name() == name { + return attr + } + } + return nil +} + +func (b *Block) GetNestedAttribute(name string) (*Attribute, *Block) { + + parts := strings.Split(name, ".") + blocks := parts[:len(parts)-1] + attrName := parts[len(parts)-1] + + working := b + for _, subBlock := range blocks { + if checkBlock := working.GetBlock(subBlock); checkBlock == nil { + return nil, working + } else { + working = checkBlock + } + } + + if working != nil { + return working.GetAttribute(attrName), working + } + + return nil, b +} + +func MapNestedAttribute[T any](block *Block, path string, f func(attr *Attribute, parent *Block) T) T { + return f(block.GetNestedAttribute(path)) +} + +// LocalName is the name relative to the current module +func (b *Block) LocalName() string { + return b.reference.String() +} + +func (b *Block) FullName() string { + + if b.moduleBlock != nil { + return fmt.Sprintf( + "%s.%s", + b.moduleBlock.FullName(), + b.LocalName(), + ) + } + + return b.LocalName() +} + +func (b *Block) ModuleName() string { + name := strings.TrimPrefix(b.LocalName(), "module.") + if b.moduleBlock != nil { + module := strings.TrimPrefix(b.moduleBlock.FullName(), "module.") + name = fmt.Sprintf( + "%s.%s", + module, + name, + ) + } + var parts []string + for _, part := range strings.Split(name, ".") { + part = strings.Split(part, "[")[0] + parts = append(parts, part) + } + return strings.Join(parts, ".") +} + +func (b *Block) UniqueName() string { + if b.moduleBlock != nil { + return fmt.Sprintf("%s:%s:%s", b.FullName(), b.metadata.Range().GetFilename(), b.moduleBlock.UniqueName()) + } + return fmt.Sprintf("%s:%s", b.FullName(), b.metadata.Range().GetFilename()) +} + +func (b *Block) TypeLabel() string { + if len(b.Labels()) > 0 { + return b.Labels()[0] + } + return "" +} + +func (b *Block) NameLabel() string { + if len(b.Labels()) > 1 { + return b.Labels()[1] + } + return "" +} + +func (b *Block) HasChild(childElement string) bool { + return b.GetAttribute(childElement).IsNotNil() || b.GetBlock(childElement).IsNotNil() +} + +func (b *Block) MissingChild(childElement string) bool { + if b == nil { + return true + } + + return !b.HasChild(childElement) +} + +func (b *Block) MissingNestedChild(name string) bool { + if b == nil { + return true + } + + parts := strings.Split(name, ".") + blocks := parts[:len(parts)-1] + last := parts[len(parts)-1] + + working := b + for _, subBlock := range blocks { + if checkBlock := working.GetBlock(subBlock); checkBlock == nil { + return true + } else { + working = checkBlock + } + } + return !working.HasChild(last) + +} + +func (b *Block) InModule() bool { + if b == nil { + return false + } + return b.moduleBlock != nil +} + +func (b *Block) Label() string { + return strings.Join(b.hclBlock.Labels, ".") +} + +func (b *Block) IsResourceType(resourceType string) bool { + return b.TypeLabel() == resourceType +} + +func (b *Block) IsEmpty() bool { + return len(b.AllBlocks()) == 0 && len(b.GetAttributes()) == 0 +} + +func (b *Block) Attributes() map[string]*Attribute { + attributes := make(map[string]*Attribute) + for _, attr := range b.GetAttributes() { + attributes[attr.Name()] = attr + } + return attributes +} + +func (b *Block) Values() cty.Value { + values := createPresetValues(b) + for _, attribute := range b.GetAttributes() { + values[attribute.Name()] = attribute.Value() + } + return cty.ObjectVal(postProcessValues(b, values)) +} + +func (b *Block) IsNil() bool { + return b == nil +} + +func (b *Block) IsNotNil() bool { + return !b.IsNil() +} diff --git a/pkg/terraform/blocks.go b/pkg/terraform/blocks.go new file mode 100644 index 000000000000..311e83583d26 --- /dev/null +++ b/pkg/terraform/blocks.go @@ -0,0 +1,22 @@ +package terraform + +type Blocks []*Block + +func (blocks Blocks) OfType(t string) Blocks { + var results []*Block + for _, block := range blocks { + if block.Type() == t { + results = append(results, block) + } + } + return results +} + +func (blocks Blocks) WithID(id string) *Block { + for _, block := range blocks { + if block.ID() == id { + return block + } + } + return nil +} diff --git a/pkg/terraform/context/context.go b/pkg/terraform/context/context.go new file mode 100644 index 000000000000..496aad1cb920 --- /dev/null +++ b/pkg/terraform/context/context.go @@ -0,0 +1,134 @@ +package context + +import ( + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +type Context struct { + ctx *hcl.EvalContext + parent *Context +} + +func NewContext(ctx *hcl.EvalContext, parent *Context) *Context { + if ctx.Variables == nil { + ctx.Variables = make(map[string]cty.Value) + } + return &Context{ + ctx: ctx, + parent: parent, + } +} + +func (c *Context) NewChild() *Context { + return NewContext(c.ctx.NewChild(), c) +} + +func (c *Context) Parent() *Context { + return c.parent +} + +func (c *Context) Inner() *hcl.EvalContext { + return c.ctx +} + +func (c *Context) Root() *Context { + root := c + for root.Parent() != nil { + root = root.Parent() + } + return root +} + +func (c *Context) Get(parts ...string) cty.Value { + if len(parts) == 0 { + return cty.NilVal + } + src := c.ctx.Variables + for i, part := range parts { + if i == len(parts)-1 { + return src[part] + } + nextPart := src[part] + if nextPart == cty.NilVal { + return cty.NilVal + } + src = nextPart.AsValueMap() + } + return cty.NilVal +} + +func (c *Context) GetByDot(path string) cty.Value { + return c.Get(strings.Split(path, ".")...) +} + +func (c *Context) SetByDot(val cty.Value, path string) { + c.Set(val, strings.Split(path, ".")...) +} + +func (c *Context) Set(val cty.Value, parts ...string) { + if len(parts) == 0 { + return + } + + v := mergeVars(c.ctx.Variables[parts[0]], parts[1:], val) + c.ctx.Variables[parts[0]] = v +} + +func (c *Context) Replace(val cty.Value, path string) { + parts := strings.Split(path, ".") + if len(parts) == 0 { + return + } + + delete(c.ctx.Variables, parts[0]) + c.Set(val, parts...) +} + +func mergeVars(src cty.Value, parts []string, value cty.Value) cty.Value { + + if len(parts) == 0 { + if isNotEmptyObject(src) && isNotEmptyObject(value) { + return mergeObjects(src, value) + } + return value + } + + data := make(map[string]cty.Value) + if src.Type().IsObjectType() && !src.IsNull() && src.LengthInt() > 0 { + data = src.AsValueMap() + tmp, ok := src.AsValueMap()[parts[0]] + if !ok { + src = cty.ObjectVal(make(map[string]cty.Value)) + } else { + src = tmp + } + } + + data[parts[0]] = mergeVars(src, parts[1:], value) + + return cty.ObjectVal(data) +} + +func mergeObjects(a cty.Value, b cty.Value) cty.Value { + output := make(map[string]cty.Value) + + for key, val := range a.AsValueMap() { + output[key] = val + } + for key, val := range b.AsValueMap() { + old, exists := output[key] + if exists && isNotEmptyObject(old) && isNotEmptyObject(val) { + output[key] = mergeObjects(old, val) + } else { + output[key] = val + } + } + return cty.ObjectVal(output) +} + +func isNotEmptyObject(val cty.Value) bool { + return !val.IsNull() && val.IsKnown() && val.Type().IsObjectType() && val.LengthInt() > 0 +} diff --git a/pkg/terraform/context/context_test.go b/pkg/terraform/context/context_test.go new file mode 100644 index 000000000000..8185d7b9892d --- /dev/null +++ b/pkg/terraform/context/context_test.go @@ -0,0 +1,238 @@ +package context + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +func Test_ContextVariables(t *testing.T) { + underlying := &hcl.EvalContext{} + ctx := NewContext(underlying, nil) + + val, err := gocty.ToCtyValue("hello", cty.String) + if err != nil { + t.Fatal(err) + } + + ctx.Set(val, "my", "value") + value := underlying.Variables["my"].AsValueMap()["value"] + assert.Equal(t, "hello", value.AsString()) + +} + +func Test_ContextVariablesPreservation(t *testing.T) { + + underlying := &hcl.EvalContext{} + underlying.Variables = make(map[string]cty.Value) + underlying.Variables["x"], _ = gocty.ToCtyValue("does it work?", cty.String) + str, _ := gocty.ToCtyValue("something", cty.String) + underlying.Variables["my"] = cty.ObjectVal(map[string]cty.Value{ + "other": str, + "obj": cty.ObjectVal(map[string]cty.Value{ + "another": str, + }), + }) + ctx := NewContext(underlying, nil) + + val, err := gocty.ToCtyValue("hello", cty.String) + if err != nil { + t.Fatal(err) + } + + ctx.Set(val, "my", "value") + assert.Equal(t, "hello", underlying.Variables["my"].AsValueMap()["value"].AsString()) + assert.Equal(t, "something", underlying.Variables["my"].AsValueMap()["other"].AsString()) + assert.Equal(t, "something", underlying.Variables["my"].AsValueMap()["obj"].AsValueMap()["another"].AsString()) + assert.Equal(t, "does it work?", underlying.Variables["x"].AsString()) + +} + +func Test_ContextVariablesPreservationByDot(t *testing.T) { + + underlying := &hcl.EvalContext{} + underlying.Variables = make(map[string]cty.Value) + underlying.Variables["x"], _ = gocty.ToCtyValue("does it work?", cty.String) + str, _ := gocty.ToCtyValue("something", cty.String) + underlying.Variables["my"] = cty.ObjectVal(map[string]cty.Value{ + "other": str, + "obj": cty.ObjectVal(map[string]cty.Value{ + "another": str, + }), + }) + ctx := NewContext(underlying, nil) + + val, err := gocty.ToCtyValue("hello", cty.String) + if err != nil { + t.Fatal(err) + } + + ctx.SetByDot(val, "my.something.value") + assert.Equal(t, "hello", underlying.Variables["my"].AsValueMap()["something"].AsValueMap()["value"].AsString()) + assert.Equal(t, "something", underlying.Variables["my"].AsValueMap()["other"].AsString()) + assert.Equal(t, "something", underlying.Variables["my"].AsValueMap()["obj"].AsValueMap()["another"].AsString()) + assert.Equal(t, "does it work?", underlying.Variables["x"].AsString()) +} + +func Test_ContextSetThenImmediateGet(t *testing.T) { + + underlying := &hcl.EvalContext{} + + ctx := NewContext(underlying, nil) + + ctx.Set(cty.ObjectVal(map[string]cty.Value{ + "mod_result": cty.StringVal("ok"), + }), "module", "modulename") + + val := ctx.Get("module", "modulename", "mod_result") + assert.Equal(t, "ok", val.AsString()) +} + +func Test_ContextSetThenImmediateGetWithChild(t *testing.T) { + + underlying := &hcl.EvalContext{} + + ctx := NewContext(underlying, nil) + + childCtx := ctx.NewChild() + + childCtx.Root().Set(cty.ObjectVal(map[string]cty.Value{ + "mod_result": cty.StringVal("ok"), + }), "module", "modulename") + + val := ctx.Get("module", "modulename", "mod_result") + assert.Equal(t, "ok", val.AsString()) +} + +func Test_MergeObjects(t *testing.T) { + + tests := []struct { + name string + oldVal cty.Value + newVal cty.Value + expected cty.Value + }{ + { + name: "happy", + oldVal: cty.ObjectVal(map[string]cty.Value{ + "this": cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("some_id"), + "arn": cty.StringVal("some_arn"), + }), + }), + newVal: cty.ObjectVal(map[string]cty.Value{ + "this": cty.ObjectVal(map[string]cty.Value{ + "arn": cty.StringVal("some_new_arn"), + "bucket": cty.StringVal("test"), + }), + }), + expected: cty.ObjectVal(map[string]cty.Value{ + "this": cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("some_id"), + "arn": cty.StringVal("some_new_arn"), + "bucket": cty.StringVal("test"), + }), + }), + }, + { + name: "old value is empty", + oldVal: cty.EmptyObjectVal, + newVal: cty.ObjectVal(map[string]cty.Value{ + "this": cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + }), + }), + expected: cty.ObjectVal(map[string]cty.Value{ + "this": cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + }), + }), + }, + { + name: "new value is empty", + oldVal: cty.ObjectVal(map[string]cty.Value{ + "this": cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + }), + }), + newVal: cty.EmptyObjectVal, + expected: cty.ObjectVal(map[string]cty.Value{ + "this": cty.ObjectVal(map[string]cty.Value{ + "bucket": cty.StringVal("test"), + }), + }), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, mergeObjects(tt.oldVal, tt.newVal)) + }) + } + +} + +func Test_IsNotEmptyObject(t *testing.T) { + tests := []struct { + name string + val cty.Value + expected bool + }{ + { + name: "happy", + val: cty.ObjectVal(map[string]cty.Value{ + "field": cty.NilVal, + }), + expected: true, + }, + { + name: "empty object", + val: cty.EmptyObjectVal, + expected: false, + }, + { + name: "nil value", + val: cty.NilVal, + expected: false, + }, + { + name: "dynamic value", + val: cty.DynamicVal, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, isNotEmptyObject(tt.val)) + }) + } +} + +func TestReplace(t *testing.T) { + t.Run("replacement of an existing value", func(t *testing.T) { + underlying := &hcl.EvalContext{} + ctx := NewContext(underlying, nil) + ctx.SetByDot(cty.StringVal("some-value"), "my.value") + require.NotEqual(t, cty.NilVal, ctx.GetByDot("my.value")) + ctx.Replace(cty.NumberIntVal(-1), "my.value") + assert.Equal(t, cty.NumberIntVal(-1), ctx.GetByDot("my.value")) + }) + + t.Run("replacement of a non-existing value", func(t *testing.T) { + underlying := &hcl.EvalContext{} + ctx := NewContext(underlying, nil) + ctx.Replace(cty.NumberIntVal(-1), "my.value") + assert.Equal(t, cty.NumberIntVal(-1), ctx.GetByDot("my.value")) + }) + + t.Run("empty path", func(t *testing.T) { + underlying := &hcl.EvalContext{} + ctx := NewContext(underlying, nil) + ctx.Replace(cty.NumberIntVal(-1), "") + }) +} diff --git a/pkg/terraform/ignore.go b/pkg/terraform/ignore.go new file mode 100644 index 000000000000..c0a91b69c95f --- /dev/null +++ b/pkg/terraform/ignore.go @@ -0,0 +1,100 @@ +package terraform + +import ( + "fmt" + "time" + + defsecTypes "github.com/aquasecurity/trivy/pkg/types" + + "github.com/zclconf/go-cty/cty" +) + +type Ignore struct { + Range defsecTypes.Range + RuleID string + Expiry *time.Time + Workspace string + Block bool + Params map[string]string +} + +type Ignores []Ignore + +func (ignores Ignores) Covering(modules Modules, m defsecTypes.MisconfigMetadata, workspace string, ids ...string) *Ignore { + for _, ignore := range ignores { + if ignore.Covering(modules, m, workspace, ids...) { + return &ignore + } + } + return nil +} + +func (ignore Ignore) Covering(modules Modules, m defsecTypes.MisconfigMetadata, workspace string, ids ...string) bool { + if ignore.Expiry != nil && time.Now().After(*ignore.Expiry) { + return false + } + if ignore.Workspace != "" && ignore.Workspace != workspace { + return false + } + idMatch := ignore.RuleID == "*" || len(ids) == 0 + for _, id := range ids { + if id == ignore.RuleID { + idMatch = true + break + } + } + if !idMatch { + return false + } + + metaHierarchy := &m + for metaHierarchy != nil { + if ignore.Range.GetFilename() != metaHierarchy.Range().GetFilename() { + metaHierarchy = metaHierarchy.Parent() + continue + } + if metaHierarchy.Range().GetStartLine() == ignore.Range.GetStartLine()+1 || metaHierarchy.Range().GetStartLine() == ignore.Range.GetStartLine() { + return ignore.MatchParams(modules, metaHierarchy) + } + metaHierarchy = metaHierarchy.Parent() + } + return false + +} + +func (ignore Ignore) MatchParams(modules Modules, blockMetadata *defsecTypes.MisconfigMetadata) bool { + if len(ignore.Params) == 0 { + return true + } + block := modules.GetBlockByIgnoreRange(blockMetadata) + if block == nil { + return true + } + for key, val := range ignore.Params { + attr := block.GetAttribute(key) + if attr.IsNil() || !attr.Value().IsKnown() { + return false + } + switch attr.Type() { + case cty.String: + if !attr.Equals(val) { + return false + } + case cty.Number: + bf := attr.Value().AsBigFloat() + f64, _ := bf.Float64() + comparableInt := fmt.Sprintf("%d", int(f64)) + comparableFloat := fmt.Sprintf("%f", f64) + if val != comparableInt && val != comparableFloat { + return false + } + case cty.Bool: + if fmt.Sprintf("%t", attr.IsTrue()) != val { + return false + } + default: + return false + } + } + return true +} diff --git a/pkg/terraform/module.go b/pkg/terraform/module.go new file mode 100644 index 000000000000..673e3ac7f625 --- /dev/null +++ b/pkg/terraform/module.go @@ -0,0 +1,188 @@ +package terraform + +import ( + "fmt" + "strings" +) + +type Module struct { + blocks Blocks + blockMap map[string]Blocks + rootPath string + modulePath string + ignores Ignores + parent *Module + local bool +} + +func NewModule(rootPath string, modulePath string, blocks Blocks, ignores Ignores, local bool) *Module { + + blockMap := make(map[string]Blocks) + + for _, b := range blocks { + if b.NameLabel() != "" { + blockMap[b.TypeLabel()] = append(blockMap[b.TypeLabel()], b) + } + } + + return &Module{ + blocks: blocks, + ignores: ignores, + blockMap: blockMap, + rootPath: rootPath, + modulePath: modulePath, + local: local, + } +} + +func (c *Module) SetParent(parent *Module) { + c.parent = parent +} + +func (c *Module) RootPath() string { + return c.rootPath +} + +func (c *Module) Ignores() Ignores { + return c.ignores +} + +func (c *Module) GetBlocks() Blocks { + return c.blocks +} + +func (h *Module) GetBlocksByTypeLabel(typeLabel string) Blocks { + return h.blockMap[typeLabel] +} + +func (c *Module) getBlocksByType(blockType string, labels ...string) Blocks { + if blockType == "module" { + return c.getModuleBlocks() + } + var results Blocks + for _, label := range labels { + for _, block := range c.blockMap[label] { + if block.Type() == blockType { + results = append(results, block) + } + } + } + return results +} + +func (c *Module) getModuleBlocks() Blocks { + var results Blocks + for _, block := range c.blocks { + if block.Type() == "module" { + results = append(results, block) + } + } + return results +} + +func (c *Module) GetResourcesByType(labels ...string) Blocks { + return c.getBlocksByType("resource", labels...) +} + +func (c *Module) GetResourcesByIDs(ids ...string) Blocks { + var blocks Blocks + + for _, id := range ids { + if block := c.blocks.WithID(id); block != nil { + blocks = append(blocks, block) + } + } + return blocks +} + +func (c *Module) GetDatasByType(label string) Blocks { + return c.getBlocksByType("data", label) +} + +func (c *Module) GetProviderBlocksByProvider(providerName string, alias string) Blocks { + var results Blocks + for _, block := range c.blocks { + if block.Type() == "provider" && len(block.Labels()) > 0 && block.TypeLabel() == providerName { + if alias != "" { + if block.HasChild("alias") && block.GetAttribute("alias").Equals(strings.ReplaceAll(alias, fmt.Sprintf("%s.", providerName), "")) { + results = append(results, block) + + } + } else if block.MissingChild("alias") { + results = append(results, block) + } + } + } + return results +} + +func (c *Module) GetReferencedBlock(referringAttr *Attribute, parentBlock *Block) (*Block, error) { + for _, ref := range referringAttr.AllReferences() { + if ref.TypeLabel() == "each" { + if forEachAttr := parentBlock.GetAttribute("for_each"); forEachAttr.IsNotNil() { + if b, err := c.GetReferencedBlock(forEachAttr, parentBlock); err == nil { + return b, nil + } + } + } + for _, block := range c.blocks { + if ref.RefersTo(block.reference) { + return block, nil + } + kref := *ref + kref.SetKey(parentBlock.reference.RawKey()) + if kref.RefersTo(block.reference) { + return block, nil + } + } + } + return nil, fmt.Errorf("no referenced block found in '%s'", referringAttr.Name()) +} + +func (c *Module) GetBlockByID(id string) (*Block, error) { + found := c.blocks.WithID(id) + if found == nil { + return nil, fmt.Errorf("no block found with id '%s'", id) + } + return found, nil +} + +func (c *Module) GetReferencingResources(originalBlock *Block, referencingLabel string, referencingAttributeName string) Blocks { + return c.GetReferencingBlocks(originalBlock, "resource", referencingLabel, referencingAttributeName) +} + +func (c *Module) GetsModulesBySource(moduleSource string) (Blocks, error) { + var results Blocks + + modules := c.getModuleBlocks() + for _, module := range modules { + if module.HasChild("source") && module.GetAttribute("source").Equals(moduleSource) { + results = append(results, module) + } + } + return results, nil +} + +func (c *Module) GetReferencingBlocks(originalBlock *Block, referencingType string, referencingLabel string, referencingAttributeName string) Blocks { + blocks := c.getBlocksByType(referencingType, referencingLabel) + var results Blocks + for _, block := range blocks { + attr := block.GetAttribute(referencingAttributeName) + if attr == nil { + continue + } + if attr.References(originalBlock.reference) { + results = append(results, block) + } else { + for _, ref := range attr.AllReferences() { + if ref.TypeLabel() == "each" { + fe := block.GetAttribute("for_each") + if fe.References(originalBlock.reference) { + results = append(results, block) + } + } + } + } + } + return results +} diff --git a/pkg/terraform/modules.go b/pkg/terraform/modules.go new file mode 100644 index 000000000000..825c62b16225 --- /dev/null +++ b/pkg/terraform/modules.go @@ -0,0 +1,118 @@ +package terraform + +import ( + "fmt" + + "github.com/aquasecurity/trivy/pkg/types" +) + +type Modules []*Module + +func (m Modules) ChildModulesPaths() []string { + var result []string + for _, module := range m { + if module.parent != nil && module.local { + result = append(result, module.modulePath) + } + } + return result +} + +type ResourceIDResolutions map[string]bool + +func (r ResourceIDResolutions) Resolve(id string) { + r[id] = true +} + +func (r ResourceIDResolutions) Orphans() (orphanIDs []string) { + for id, resolved := range r { + if !resolved { + orphanIDs = append(orphanIDs, id) + } + } + return orphanIDs +} + +func (m Modules) GetResourcesByType(typeLabel ...string) Blocks { + var blocks Blocks + for _, module := range m { + blocks = append(blocks, module.GetResourcesByType(typeLabel...)...) + } + + return blocks +} + +func (m Modules) GetChildResourceIDMapByType(typeLabels ...string) ResourceIDResolutions { + blocks := m.GetResourcesByType(typeLabels...) + + idMap := make(map[string]bool) + for _, block := range blocks { + idMap[block.ID()] = false + } + + return idMap +} + +func (m Modules) GetReferencedBlock(referringAttr *Attribute, parentBlock *Block) (*Block, error) { + var bestMatch *Block + for _, module := range m { + b, err := module.GetReferencedBlock(referringAttr, parentBlock) + if err == nil { + if bestMatch == nil || b.moduleBlock == parentBlock.moduleBlock { + bestMatch = b + } + } + } + if bestMatch != nil { + return bestMatch, nil + } + return nil, fmt.Errorf("block not found") +} + +func (m Modules) GetReferencingResources(originalBlock *Block, referencingLabel string, referencingAttributeName string) Blocks { + var blocks Blocks + for _, module := range m { + blocks = append(blocks, module.GetReferencingResources(originalBlock, referencingLabel, referencingAttributeName)...) + } + + return blocks +} + +func (m Modules) GetBlocks() Blocks { + var blocks Blocks + for _, module := range m { + blocks = append(blocks, module.GetBlocks()...) + } + return blocks +} + +func (m Modules) GetBlockById(id string) (*Block, error) { + for _, module := range m { + if found := module.blocks.WithID(id); found != nil { + return found, nil + } + + } + return nil, fmt.Errorf("block not found") +} + +func (m Modules) GetResourceByIDs(id ...string) Blocks { + var blocks Blocks + for _, module := range m { + blocks = append(blocks, module.GetResourcesByIDs(id...)...) + } + + return blocks +} + +func (m Modules) GetBlockByIgnoreRange(blockMetadata *types.MisconfigMetadata) *Block { + for _, module := range m { + for _, block := range module.GetBlocks() { + metadata := block.GetMetadata() + if blockMetadata.Reference() == metadata.Reference() { + return block + } + } + } + return nil +} diff --git a/pkg/terraform/presets.go b/pkg/terraform/presets.go new file mode 100644 index 000000000000..6dff625a29c7 --- /dev/null +++ b/pkg/terraform/presets.go @@ -0,0 +1,56 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/google/uuid" + "github.com/zclconf/go-cty/cty" +) + +func createPresetValues(b *Block) map[string]cty.Value { + presets := make(map[string]cty.Value) + + // here we set up common "id" values that are set by the provider - this ensures all blocks have a default + // referencable id/arn. this isn't perfect, but the only way to link blocks in certain circumstances. + presets["id"] = cty.StringVal(b.ID()) + + if strings.HasPrefix(b.TypeLabel(), "aws_") { + presets["arn"] = cty.StringVal(b.ID()) + } + + // workaround for weird iam feature + switch b.TypeLabel() { + case "aws_iam_policy_document": + presets["json"] = cty.StringVal(b.ID()) + // If the user leaves the name blank, Terraform will automatically generate a unique name + case "aws_launch_template": + presets["name"] = cty.StringVal(uuid.New().String()) + } + + return presets + +} + +func postProcessValues(b *Block, input map[string]cty.Value) map[string]cty.Value { + + // alias id to "bucket" (bucket name) for s3 bucket resources + if strings.HasPrefix(b.TypeLabel(), "aws_s3_bucket") { + if bucket, ok := input["bucket"]; ok { + input["id"] = bucket + } else { + input["bucket"] = cty.StringVal(b.ID()) + } + } + + switch b.TypeLabel() { + case "aws_s3_bucket": + var bucketName string + if bucket := input["bucket"]; bucket.Type().Equals(cty.String) { + bucketName = bucket.AsString() + } + input["arn"] = cty.StringVal(fmt.Sprintf("arn:aws:s3:::%s", bucketName)) + } + + return input +} diff --git a/pkg/terraform/reference.go b/pkg/terraform/reference.go new file mode 100644 index 000000000000..978773da5010 --- /dev/null +++ b/pkg/terraform/reference.go @@ -0,0 +1,177 @@ +package terraform + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +type Reference struct { + blockType Type + typeLabel string + nameLabel string + remainder []string + key cty.Value + parent string +} + +func extendReference(ref Reference, name string) Reference { + child := ref + child.remainder = make([]string, len(ref.remainder)) + if len(ref.remainder) > 0 { + copy(child.remainder, ref.remainder) + } + child.remainder = append(child.remainder, name) + return child +} + +func newReference(parts []string, parentKey string) (*Reference, error) { + + var ref Reference + + if len(parts) == 0 { + return nil, fmt.Errorf("cannot create empty reference") + } + + blockType, err := TypeFromRefName(parts[0]) + if err != nil { + blockType = &TypeResource + } + + ref.blockType = *blockType + + if ref.blockType.removeTypeInReference && parts[0] != blockType.name { + ref.typeLabel = parts[0] + if len(parts) > 1 { + ref.nameLabel = parts[1] + } + } else if len(parts) > 1 { + ref.typeLabel = parts[1] + if len(parts) > 2 { + ref.nameLabel = parts[2] + } else { + ref.nameLabel = ref.typeLabel + ref.typeLabel = "" + } + } + if len(parts) > 3 { + ref.remainder = parts[3:] + } + + if parentKey != "root" { + ref.parent = parentKey + } + + return &ref, nil +} + +func (r Reference) BlockType() Type { + return r.blockType +} + +func (r Reference) TypeLabel() string { + return r.typeLabel +} + +func (r Reference) NameLabel() string { + return r.nameLabel +} + +func (r Reference) HumanReadable() string { + if r.parent == "" { + return r.String() + } + return fmt.Sprintf("%s:%s", r.parent, r.String()) +} + +func (r Reference) LogicalID() string { + return r.String() +} + +func (r Reference) String() string { + + base := r.typeLabel + if r.nameLabel != "" { + base = fmt.Sprintf("%s.%s", base, r.nameLabel) + } + + if !r.blockType.removeTypeInReference { + base = r.blockType.Name() + if r.typeLabel != "" { + base += "." + r.typeLabel + } + if r.nameLabel != "" { + base += "." + r.nameLabel + } + } + + base += r.KeyBracketed() + + for _, rem := range r.remainder { + base += "." + rem + } + + return base +} + +func (r Reference) RefersTo(other Reference) bool { + + if r.BlockType() != other.BlockType() { + return false + } + if r.TypeLabel() != other.TypeLabel() { + return false + } + if r.NameLabel() != other.NameLabel() { + return false + } + if (r.Key() != "" || other.Key() != "") && r.Key() != other.Key() { + return false + } + return true +} + +func (r *Reference) SetKey(key cty.Value) { + if key.IsNull() || !key.IsKnown() { + return + } + r.key = key +} + +func (r Reference) KeyBracketed() string { + switch v := key(r).(type) { + case int: + return fmt.Sprintf("[%d]", v) + case string: + if v == "" { + return "" + } + return fmt.Sprintf("[%q]", v) + default: + return "" + } +} + +func (r Reference) RawKey() cty.Value { + return r.key +} + +func (r Reference) Key() string { + return fmt.Sprintf("%v", key(r)) +} + +func key(r Reference) interface{} { + if r.key.IsNull() || !r.key.IsKnown() { + return "" + } + switch r.key.Type() { + case cty.Number: + f := r.key.AsBigFloat() + f64, _ := f.Float64() + return int(f64) + case cty.String: + return r.key.AsString() + default: + return "" + } +} diff --git a/pkg/terraform/reference_test.go b/pkg/terraform/reference_test.go new file mode 100644 index 000000000000..1b6a7b59be73 --- /dev/null +++ b/pkg/terraform/reference_test.go @@ -0,0 +1,171 @@ +package terraform + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/zclconf/go-cty/cty" +) + +func Test_ReferenceParsing(t *testing.T) { + cases := []struct { + input []string + expected string + }{ + { + input: []string{"module", "my-mod"}, + expected: "module.my-mod", + }, + { + input: []string{"aws_s3_bucket", "test"}, + expected: "aws_s3_bucket.test", + }, + { + input: []string{"resource", "aws_s3_bucket", "test"}, + expected: "aws_s3_bucket.test", + }, + { + input: []string{"module", "my-mod"}, + expected: "module.my-mod", + }, + { + input: []string{"data", "aws_iam_policy_document", "s3_policy"}, + expected: "data.aws_iam_policy_document.s3_policy", + }, + { + input: []string{"provider", "aws"}, + expected: "provider.aws", + }, + { + input: []string{"output", "something"}, + expected: "output.something", + }, + } + + for _, test := range cases { + t.Run(test.expected, func(t *testing.T) { + ref, err := newReference(test.input, "") + assert.NoError(t, err) + assert.Equal(t, test.expected, ref.String()) + }) + } +} + +func Test_SetKey(t *testing.T) { + tests := []struct { + name string + key cty.Value + want cty.Value + }{ + { + name: "happy", + key: cty.StringVal("str"), + want: cty.StringVal("str"), + }, + { + name: "null key", + key: cty.NullVal(cty.String), + want: cty.Value{}, + }, + { + name: "unknown key", + key: cty.UnknownVal(cty.String), + want: cty.Value{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(t *testing.T) { + ref, err := newReference([]string{"resource", "test"}, "") + require.NoError(t, err) + + ref.SetKey(tt.key) + + assert.Equal(t, tt.want, ref.RawKey()) + }) + }) + } +} + +func Test_Key(t *testing.T) { + + tests := []struct { + name string + key cty.Value + want string + }{ + { + name: "empty key", + want: "", + }, + { + name: "str key", + key: cty.StringVal("some_value"), + want: "some_value", + }, + { + name: "number key", + key: cty.NumberIntVal(122), + want: "122", + }, + { + name: "bool key", + key: cty.BoolVal(true), + want: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run(tt.name, func(t *testing.T) { + ref, err := newReference([]string{"resource", "test"}, "") + require.NoError(t, err) + + ref.SetKey(tt.key) + + assert.Equal(t, tt.want, ref.Key()) + }) + }) + } +} + +func Test_KeyBracketed(t *testing.T) { + tests := []struct { + name string + key cty.Value + want string + }{ + { + name: "empty key", + want: "", + }, + { + name: "str key", + key: cty.StringVal("some_value"), + want: "[\"some_value\"]", + }, + { + name: "number key", + key: cty.NumberIntVal(122), + want: "[122]", + }, + { + name: "bool key", + key: cty.BoolVal(true), + want: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ref, err := newReference([]string{"resource", "test"}, "") + require.NoError(t, err) + + ref.SetKey(tt.key) + + assert.Equal(t, tt.want, ref.KeyBracketed()) + }) + } +} diff --git a/pkg/terraform/resource_block.go b/pkg/terraform/resource_block.go new file mode 100644 index 000000000000..cc50c8d9b872 --- /dev/null +++ b/pkg/terraform/resource_block.go @@ -0,0 +1,160 @@ +package terraform + +import ( + "bytes" + "fmt" + "strings" + "text/template" +) + +type PlanReference struct { + Value interface{} +} + +type PlanBlock struct { + Type string + Name string + BlockType string + Blocks map[string]map[string]interface{} + Attributes map[string]interface{} +} + +func NewPlanBlock(blockType, resourceType, resourceName string) *PlanBlock { + if blockType == "managed" { + blockType = "resource" + } + + return &PlanBlock{ + Type: resourceType, + Name: resourceName, + BlockType: blockType, + Blocks: make(map[string]map[string]interface{}), + Attributes: make(map[string]interface{}), + } +} + +func (rb *PlanBlock) HasAttribute(attribute string) bool { + for k := range rb.Attributes { + if k == attribute { + return true + } + } + return false +} + +func (rb *PlanBlock) ToHCL() string { + + resourceTmpl, err := template.New("resource").Funcs(template.FuncMap{ + "RenderValue": renderTemplateValue, + "RenderPrimitive": renderPrimitive, + }).Parse(resourceTemplate) + if err != nil { + panic(err) + } + + var res bytes.Buffer + if err := resourceTmpl.Execute(&res, map[string]interface{}{ + "BlockType": rb.BlockType, + "Type": rb.Type, + "Name": rb.Name, + "Attributes": rb.Attributes, + "Blocks": rb.Blocks, + }); err != nil { + return "" + } + return res.String() +} + +var resourceTemplate = `{{ .BlockType }} "{{ .Type }}" "{{ .Name }}" { + {{ range $name, $value := .Attributes }}{{ if $value }}{{ $name }} {{ RenderValue $value }} + {{end}}{{ end }}{{ range $name, $block := .Blocks }}{{ $name }} { + {{ range $name, $value := $block }}{{ if $value }}{{ $name }} {{ RenderValue $value }} + {{end}}{{ end }}} +{{end}}}` + +func renderTemplateValue(val interface{}) string { + switch t := val.(type) { + case map[string]interface{}: + return fmt.Sprintf("= %s", renderMap(t)) + case []interface{}: + if isMapSlice(t) { + return renderSlice(t) + } + return fmt.Sprintf("= %s", renderSlice(t)) + default: + return fmt.Sprintf("= %s", renderPrimitive(val)) + } +} + +func renderPrimitive(val interface{}) string { + switch t := val.(type) { + case PlanReference: + return fmt.Sprintf("%v", t.Value) + case string: + if strings.Contains(t, "\n") { + return fmt.Sprintf(`< i +} + +func (s IntValue) ToRego() interface{} { + m := s.misconfigmetadata.ToRego().(map[string]interface{}) + m["value"] = s.Value() + return m +} diff --git a/pkg/types/int_test.go b/pkg/types/int_test.go new file mode 100644 index 000000000000..f37fc8e280d2 --- /dev/null +++ b/pkg/types/int_test.go @@ -0,0 +1,21 @@ +package types + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_IntJSON(t *testing.T) { + val := Int(0x66, NewMisconfigMetadata(NewRange("main.tf", 123, 123, "", nil), "")) + data, err := json.Marshal(val) + require.NoError(t, err) + + var restored IntValue + err = json.Unmarshal(data, &restored) + require.NoError(t, err) + + assert.Equal(t, val, restored) +} diff --git a/pkg/types/map.go b/pkg/types/map.go new file mode 100755 index 000000000000..7bf4916a3c42 --- /dev/null +++ b/pkg/types/map.go @@ -0,0 +1,92 @@ +package types + +import ( + "encoding/json" +) + +type MapValue struct { + BaseAttribute + value map[string]string +} + +func (b MapValue) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "value": b.value, + "misconfigmetadata": b.misconfigmetadata, + }) +} + +func (b *MapValue) UnmarshalJSON(data []byte) error { + var keys map[string]interface{} + if err := json.Unmarshal(data, &keys); err != nil { + return err + } + if keys["value"] != nil { + var target map[string]string + raw, err := json.Marshal(keys["value"]) + if err != nil { + return err + } + if err := json.Unmarshal(raw, &target); err != nil { + return err + } + b.value = target + } + if keys["misconfigmetadata"] != nil { + raw, err := json.Marshal(keys["misconfigmetadata"]) + if err != nil { + return err + } + var m MisconfigMetadata + if err := json.Unmarshal(raw, &m); err != nil { + return err + } + b.misconfigmetadata = m + } + return nil +} + +func Map(value map[string]string, m MisconfigMetadata) MapValue { + return MapValue{ + value: value, + BaseAttribute: BaseAttribute{misconfigmetadata: m}, + } +} + +func MapDefault(value map[string]string, m MisconfigMetadata) MapValue { + b := Map(value, m) + b.BaseAttribute.misconfigmetadata.isDefault = true + return b +} + +func MapExplicit(value map[string]string, m MisconfigMetadata) MapValue { + b := Map(value, m) + b.BaseAttribute.misconfigmetadata.isExplicit = true + return b +} + +func (b MapValue) Value() map[string]string { + return b.value +} + +func (b MapValue) GetRawValue() interface{} { + return b.value +} + +func (b MapValue) Len() int { + return len(b.value) +} + +func (b MapValue) HasKey(key string) bool { + if b.value == nil { + return false + } + _, ok := b.value[key] + return ok +} + +func (s MapValue) ToRego() interface{} { + m := s.misconfigmetadata.ToRego().(map[string]interface{}) + m["value"] = s.Value() + return m +} diff --git a/pkg/types/map_test.go b/pkg/types/map_test.go new file mode 100644 index 000000000000..fdc57cd1a2b3 --- /dev/null +++ b/pkg/types/map_test.go @@ -0,0 +1,25 @@ +package types + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_MapJSON(t *testing.T) { + val := Map(map[string]string{ + "yeah": "it", + "seems": "to", + "work": "fine", + }, NewMisconfigMetadata(NewRange("main.tf", 123, 123, "", nil), "")) + data, err := json.Marshal(val) + require.NoError(t, err) + + var restored MapValue + err = json.Unmarshal(data, &restored) + require.NoError(t, err) + + assert.Equal(t, val, restored) +} diff --git a/pkg/types/metadata.go b/pkg/types/metadata.go new file mode 100755 index 000000000000..479c8c8ba651 --- /dev/null +++ b/pkg/types/metadata.go @@ -0,0 +1,222 @@ +package types + +import ( + "encoding/json" + "fmt" + "strings" +) + +type MisconfigMetadata struct { + rnge Range + ref string + isManaged bool + isDefault bool + isExplicit bool + isUnresolvable bool + parent *MisconfigMetadata + internal interface{} +} + +func (m MisconfigMetadata) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "range": m.rnge, + "ref": m.ref, + "managed": m.isManaged, + "default": m.isDefault, + "explicit": m.isExplicit, + "unresolvable": m.isUnresolvable, + "parent": m.parent, + }) +} + +func (m *MisconfigMetadata) UnmarshalJSON(data []byte) error { + var keys map[string]interface{} + if err := json.Unmarshal(data, &keys); err != nil { + return err + } + if keys["range"] != nil { + raw, err := json.Marshal(keys["range"]) + if err != nil { + return err + } + var r Range + if err := json.Unmarshal(raw, &r); err != nil { + return err + } + m.rnge = r + } + if keys["ref"] != nil { + m.ref = keys["ref"].(string) + } + if keys["managed"] != nil { + m.isManaged = keys["managed"].(bool) + } + if keys["default"] != nil { + m.isDefault = keys["default"].(bool) + } + if keys["explicit"] != nil { + m.isExplicit = keys["explicit"].(bool) + } + if keys["unresolvable"] != nil { + m.isUnresolvable = keys["unresolvable"].(bool) + } + if keys["parent"] != nil { + if _, ok := keys["parent"].(map[string]interface{}); ok { + raw, err := json.Marshal(keys["parent"]) + if err != nil { + return err + } + var parent MisconfigMetadata + if err := json.Unmarshal(raw, &parent); err != nil { + return err + } + m.parent = &parent + } + } + return nil +} + +func (m *MisconfigMetadata) ToRego() interface{} { + input := map[string]interface{}{ + "filepath": m.Range().GetLocalFilename(), + "startline": m.Range().GetStartLine(), + "endline": m.Range().GetEndLine(), + "sourceprefix": m.Range().GetSourcePrefix(), + "managed": m.isManaged, + "explicit": m.isExplicit, + "fskey": CreateFSKey(m.Range().GetFS()), + "resource": m.Reference(), + } + if m.parent != nil { + input["parent"] = m.parent.ToRego() + } + return input +} + +func NewMisconfigMetadata(r Range, ref string) MisconfigMetadata { + return MisconfigMetadata{ + rnge: r, + ref: ref, + isManaged: true, + } +} + +func NewUnresolvableMisconfigMetadata(r Range, ref string) MisconfigMetadata { + unres := NewMisconfigMetadata(r, ref) + unres.isUnresolvable = true + return unres +} + +func NewExplicitMisconfigMetadata(r Range, ref string) MisconfigMetadata { + m := NewMisconfigMetadata(r, ref) + m.isExplicit = true + return m +} + +func (m MisconfigMetadata) WithParent(p MisconfigMetadata) MisconfigMetadata { + m.parent = &p + return m +} + +func (m *MisconfigMetadata) SetParentPtr(p *MisconfigMetadata) { + m.parent = p +} + +func (m MisconfigMetadata) Parent() *MisconfigMetadata { + return m.parent +} + +func (m MisconfigMetadata) Root() MisconfigMetadata { + meta := &m + for meta.Parent() != nil { + meta = meta.Parent() + } + return *meta +} + +func (m MisconfigMetadata) WithInternal(internal interface{}) MisconfigMetadata { + m.internal = internal + return m +} + +func (m MisconfigMetadata) Internal() interface{} { + return m.internal +} + +func (m MisconfigMetadata) IsMultiLine() bool { + return m.rnge.GetStartLine() < m.rnge.GetEndLine() +} + +func NewUnmanagedMisconfigMetadata() MisconfigMetadata { + m := NewMisconfigMetadata(NewRange("", 0, 0, "", nil), "") + m.isManaged = false + return m +} + +func NewTestMisconfigMetadata() MisconfigMetadata { + return NewMisconfigMetadata(NewRange("test.test", 123, 123, "", nil), "") +} + +func NewApiMisconfigMetadata(provider string, parts ...string) MisconfigMetadata { + return NewMisconfigMetadata(NewRange(fmt.Sprintf("/%s/%s", provider, strings.Join(parts, "/")), 0, 0, "", nil), "") +} + +func NewRemoteMisconfigMetadata(id string) MisconfigMetadata { + return NewMisconfigMetadata(NewRange(id, 0, 0, "remote", nil), id) +} + +func (m MisconfigMetadata) IsDefault() bool { + return m.isDefault +} + +func (m MisconfigMetadata) IsResolvable() bool { + return !m.isUnresolvable +} + +func (m MisconfigMetadata) IsExplicit() bool { + return m.isExplicit +} + +func (m MisconfigMetadata) String() string { + return m.ref +} + +func (m MisconfigMetadata) Reference() string { + return m.ref +} + +func (m MisconfigMetadata) Range() Range { + return m.rnge +} + +func (m MisconfigMetadata) IsManaged() bool { + return m.isManaged +} + +func (m MisconfigMetadata) IsUnmanaged() bool { + return !m.isManaged +} + +type BaseAttribute struct { + misconfigmetadata MisconfigMetadata +} + +func (b BaseAttribute) GetMetadata() MisconfigMetadata { + return b.misconfigmetadata +} + +func (m MisconfigMetadata) GetMisconfigMetadata() MisconfigMetadata { + return m +} + +func (m MisconfigMetadata) GetRawValue() interface{} { + return nil +} + +func (m *MisconfigMetadata) SetReference(ref string) { + m.ref = ref +} + +func (m *MisconfigMetadata) SetRange(r Range) { + m.rnge = r +} diff --git a/pkg/types/metadata_test.go b/pkg/types/metadata_test.go new file mode 100644 index 000000000000..b8a4a1e48cc4 --- /dev/null +++ b/pkg/types/metadata_test.go @@ -0,0 +1,35 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_MetadataToRego(t *testing.T) { + m1 := NewTestMisconfigMetadata() + expected := map[string]interface{}{ + "endline": 123, + "explicit": false, + "filepath": "test.test", + "fskey": "", + "managed": true, + "resource": "", + "sourceprefix": "", + "startline": 123, + } + assert.Equal(t, expected, m1.ToRego()) + m2 := NewTestMisconfigMetadata() + m1.SetParentPtr(&m2) + expected["parent"] = map[string]interface{}{ + "endline": 123, + "explicit": false, + "filepath": "test.test", + "fskey": "", + "managed": true, + "resource": "", + "sourceprefix": "", + "startline": 123, + } + assert.Equal(t, expected, m1.ToRego()) +} diff --git a/pkg/types/range.go b/pkg/types/range.go new file mode 100755 index 000000000000..bbcb94a8a57e --- /dev/null +++ b/pkg/types/range.go @@ -0,0 +1,148 @@ +package types + +import ( + "encoding/json" + "fmt" + "io/fs" + "path/filepath" +) + +func NewRange(filename string, startLine int, endLine int, sourcePrefix string, srcFS fs.FS) Range { + r := Range{ + filename: filename, + startLine: startLine, + endLine: endLine, + fs: srcFS, + fsKey: CreateFSKey(srcFS), + sourcePrefix: sourcePrefix, + } + return r +} + +func NewRangeWithLogicalSource(filename string, startLine int, endLine int, sourcePrefix string, + srcFS fs.FS) Range { + r := Range{ + filename: filename, + startLine: startLine, + endLine: endLine, + fs: srcFS, + fsKey: CreateFSKey(srcFS), + sourcePrefix: sourcePrefix, + isLogicalSource: true, + } + return r +} + +func NewRangeWithFSKey(filename string, startLine int, endLine int, sourcePrefix string, fsKey string, fs fs.FS) Range { + r := Range{ + filename: filename, + startLine: startLine, + endLine: endLine, + fs: fs, + fsKey: fsKey, + sourcePrefix: sourcePrefix, + } + return r +} + +type Range struct { + filename string + startLine int + endLine int + sourcePrefix string + isLogicalSource bool + fs fs.FS + fsKey string +} + +func (r Range) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "filename": r.filename, + "startLine": r.startLine, + "endLine": r.endLine, + "sourcePrefix": r.sourcePrefix, + "fsKey": r.fsKey, + "isLogicalSource": r.isLogicalSource, + }) +} + +func (r *Range) UnmarshalJSON(data []byte) error { + var keys map[string]interface{} + if err := json.Unmarshal(data, &keys); err != nil { + return err + } + if keys["filename"] != nil { + r.filename = keys["filename"].(string) + } + if keys["startLine"] != nil { + r.startLine = int(keys["startLine"].(float64)) + } + if keys["endLine"] != nil { + r.endLine = int(keys["endLine"].(float64)) + } + if keys["sourcePrefix"] != nil { + r.sourcePrefix = keys["sourcePrefix"].(string) + } + if keys["fsKey"] != nil { + r.fsKey = keys["fsKey"].(string) + } + if keys["isLogicalSource"] != nil { + r.isLogicalSource = keys["isLogicalSource"].(bool) + } + return nil +} + +func (r Range) GetFSKey() string { + return r.fsKey +} + +func (r Range) LineCount() int { + if r.endLine == 0 { + return 0 + } + return (r.endLine - r.startLine) + 1 +} + +func (r Range) GetFilename() string { + if r.sourcePrefix == "" { + return r.filename + } + if r.isLogicalSource { + return fmt.Sprintf("%s:%s", r.sourcePrefix, r.filename) + } + return filepath.Join(r.sourcePrefix, r.filename) +} + +func (r Range) GetLocalFilename() string { + return r.filename +} + +func (r Range) GetStartLine() int { + return r.startLine +} + +func (r Range) GetEndLine() int { + return r.endLine +} + +func (r Range) IsMultiLine() bool { + return r.startLine < r.endLine +} + +func (r Range) String() string { + if r.startLine != r.endLine { + return fmt.Sprintf("%s:%d-%d", r.GetFilename(), r.startLine, r.endLine) + } + if r.startLine == 0 && r.endLine == 0 { + return r.GetFilename() + } + return fmt.Sprintf("%s:%d", r.GetFilename(), r.startLine) +} + +func (r Range) GetFS() fs.FS { + return r.fs +} + +func (r Range) GetSourcePrefix() string { + return r.sourcePrefix +} diff --git a/pkg/types/rules/rule.go b/pkg/types/rules/rule.go new file mode 100644 index 000000000000..a074649516fe --- /dev/null +++ b/pkg/types/rules/rule.go @@ -0,0 +1,18 @@ +package rules + +import ( + "github.com/aquasecurity/trivy/pkg/scan" +) + +type RegisteredRule struct { + scan.Rule + Number int +} + +func (r *RegisteredRule) GetRule() scan.Rule { + return r.Rule +} + +func (r *RegisteredRule) AddLink(link string) { + r.Rule.Links = append([]string{link}, r.Rule.Links...) +} diff --git a/pkg/types/sources.go b/pkg/types/sources.go new file mode 100644 index 000000000000..02e43dac3a35 --- /dev/null +++ b/pkg/types/sources.go @@ -0,0 +1,14 @@ +package types + +type Source string + +const ( + SourceDockerfile Source = "dockerfile" + SourceKubernetes Source = "kubernetes" + SourceRbac Source = "rbac" // deprecated - please use "kubernetes" instead + SourceDefsec Source = "defsec" // deprecated - please use "cloud" instead + SourceCloud Source = "cloud" + SourceYAML Source = "yaml" + SourceJSON Source = "json" + SourceTOML Source = "toml" +) diff --git a/pkg/types/string.go b/pkg/types/string.go new file mode 100755 index 000000000000..597e7270d7ec --- /dev/null +++ b/pkg/types/string.go @@ -0,0 +1,189 @@ +package types + +import ( + "encoding/json" + "strings" +) + +type StringEqualityOption int + +const ( + IgnoreCase StringEqualityOption = iota + IsPallindrome + IgnoreWhitespace +) + +func String(str string, m MisconfigMetadata) StringValue { + return StringValue{ + value: str, + BaseAttribute: BaseAttribute{misconfigmetadata: m}, + } +} +func StringDefault(value string, m MisconfigMetadata) StringValue { + b := String(value, m) + b.BaseAttribute.misconfigmetadata.isDefault = true + return b +} + +func StringUnresolvable(m MisconfigMetadata) StringValue { + b := String("", m) + b.BaseAttribute.misconfigmetadata.isUnresolvable = true + return b +} + +func StringExplicit(value string, m MisconfigMetadata) StringValue { + b := String(value, m) + b.BaseAttribute.misconfigmetadata.isExplicit = true + return b +} + +type StringValueList []StringValue + +type StringValue struct { + BaseAttribute + value string +} + +func (l StringValueList) AsStrings() (output []string) { + for _, item := range l { + output = append(output, item.Value()) + } + return output +} + +type stringCheckFunc func(string, string) bool + +func (b StringValue) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "value": b.value, + "misconfigmetadata": b.misconfigmetadata, + }) +} + +func (b *StringValue) UnmarshalJSON(data []byte) error { + var keys map[string]interface{} + if err := json.Unmarshal(data, &keys); err != nil { + return err + } + if keys["value"] != nil { + b.value = keys["value"].(string) + } + if keys["misconfigmetadata"] != nil { + raw, err := json.Marshal(keys["misconfigmetadata"]) + if err != nil { + return err + } + var m MisconfigMetadata + if err := json.Unmarshal(raw, &m); err != nil { + return err + } + b.misconfigmetadata = m + } + return nil +} + +func (s StringValue) ToRego() interface{} { + m := s.misconfigmetadata.ToRego().(map[string]interface{}) + m["value"] = s.Value() + return m +} + +func (s StringValue) IsOneOf(values ...string) bool { + if s.misconfigmetadata.isUnresolvable { + return false + } + for _, value := range values { + if value == s.value { + return true + } + } + return false +} + +func (s StringValue) GetMetadata() MisconfigMetadata { + return s.misconfigmetadata +} + +func (s StringValue) Value() string { + return s.value +} + +func (b StringValue) GetRawValue() interface{} { + return b.value +} + +func (s StringValue) IsEmpty() bool { + if s.misconfigmetadata.isUnresolvable { + return false + } + return s.value == "" +} + +func (s StringValue) IsNotEmpty() bool { + if s.misconfigmetadata.isUnresolvable { + return false + } + return s.value != "" +} + +func (s StringValue) EqualTo(value string, equalityOptions ...StringEqualityOption) bool { + if s.misconfigmetadata.isUnresolvable { + return false + } + + return s.executePredicate(value, func(a, b string) bool { return a == b }, equalityOptions...) +} + +func (s StringValue) NotEqualTo(value string, equalityOptions ...StringEqualityOption) bool { + if s.misconfigmetadata.isUnresolvable { + return false + } + + return !s.EqualTo(value, equalityOptions...) +} + +func (s StringValue) StartsWith(prefix string, equalityOptions ...StringEqualityOption) bool { + if s.misconfigmetadata.isUnresolvable { + return false + } + + return s.executePredicate(prefix, strings.HasPrefix, equalityOptions...) +} + +func (s StringValue) EndsWith(suffix string, equalityOptions ...StringEqualityOption) bool { + if s.misconfigmetadata.isUnresolvable { + return false + } + return s.executePredicate(suffix, strings.HasSuffix, equalityOptions...) +} + +func (s StringValue) Contains(value string, equalityOptions ...StringEqualityOption) bool { + if s.misconfigmetadata.isUnresolvable { + return false + } + return s.executePredicate(value, strings.Contains, equalityOptions...) +} + +func (s StringValue) executePredicate(value string, fn stringCheckFunc, equalityOptions ...StringEqualityOption) bool { + subjectString := s.value + searchString := value + + for _, eqOpt := range equalityOptions { + switch eqOpt { + case IgnoreCase: + subjectString = strings.ToLower(subjectString) + searchString = strings.ToLower(searchString) + case IsPallindrome: + var result string + for _, v := range subjectString { + result = string(v) + result + } + subjectString = result + case IgnoreWhitespace: + subjectString = strings.ReplaceAll(subjectString, " ", "") + searchString = strings.ReplaceAll(searchString, " ", "") + } + } + + return fn(subjectString, searchString) +} diff --git a/pkg/types/string_test.go b/pkg/types/string_test.go new file mode 100755 index 000000000000..9bf2d86d594f --- /dev/null +++ b/pkg/types/string_test.go @@ -0,0 +1,94 @@ +package types + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" +) + +func Test_StringValueEqualTo(t *testing.T) { + testCases := []struct { + desc string + input string + check string + ignoreCase bool + expected bool + }{ + { + desc: "return truw when string is equal", + input: "something", + check: "", + expected: false, + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + + }) + } +} + +func Test_StringValueStartsWith(t *testing.T) { + testCases := []struct { + desc string + input string + prefix string + ignoreCase bool + expected bool + }{ + { + desc: "return true when starts with", + input: "something", + prefix: "some", + expected: true, + }, + { + desc: "return false when does not start with", + input: "something", + prefix: "nothing", + expected: false, + }, + { + desc: "return true when starts with", + input: "something", + prefix: "SOME", + ignoreCase: true, + expected: true, + }, + { + desc: "return false when does not start with", + input: "something", + prefix: "SOME", + expected: false, + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + + val := String(tC.input, fakeMetadata) + + var options []StringEqualityOption + + if tC.ignoreCase { + options = append(options, IgnoreCase) + } + + assert.Equal(t, tC.expected, val.StartsWith(tC.prefix, options...)) + }) + } +} + +func Test_StringJSON(t *testing.T) { + val := String("hello world", NewMisconfigMetadata(NewRange("main.tf", 123, 123, "", nil), "")) + data, err := json.Marshal(val) + require.NoError(t, err) + + var restored StringValue + err = json.Unmarshal(data, &restored) + require.NoError(t, err) + + assert.Equal(t, val, restored) +} diff --git a/pkg/types/time.go b/pkg/types/time.go new file mode 100755 index 000000000000..f16c25f9abad --- /dev/null +++ b/pkg/types/time.go @@ -0,0 +1,102 @@ +package types + +import ( + "encoding/json" + "time" +) + +type TimeValue struct { + BaseAttribute + value time.Time +} + +func (b TimeValue) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "value": b.value.Format(time.RFC3339), + "misconfigmetadata": b.misconfigmetadata, + }) +} + +func (b *TimeValue) UnmarshalJSON(data []byte) error { + var keys map[string]interface{} + if err := json.Unmarshal(data, &keys); err != nil { + return err + } + if keys["value"] != nil { + if ti, err := time.Parse(time.RFC3339, keys["value"].(string)); err == nil { + b.value = ti + } + } + if keys["misconfigmetadata"] != nil { + raw, err := json.Marshal(keys["misconfigmetadata"]) + if err != nil { + return err + } + var m MisconfigMetadata + if err := json.Unmarshal(raw, &m); err != nil { + return err + } + b.misconfigmetadata = m + } + return nil +} + +func Time(value time.Time, m MisconfigMetadata) TimeValue { + return TimeValue{ + value: value, + BaseAttribute: BaseAttribute{misconfigmetadata: m}, + } +} + +func TimeDefault(value time.Time, m MisconfigMetadata) TimeValue { + b := Time(value, m) + b.BaseAttribute.misconfigmetadata.isDefault = true + return b +} + +func TimeExplicit(value time.Time, m MisconfigMetadata) TimeValue { + b := Time(value, m) + b.BaseAttribute.misconfigmetadata.isExplicit = true + return b +} + +func TimeUnresolvable(m MisconfigMetadata) TimeValue { + b := Time(time.Time{}, m) + b.BaseAttribute.misconfigmetadata.isUnresolvable = true + return b +} + +func (t TimeValue) Value() time.Time { + return t.value +} + +func (t TimeValue) GetRawValue() interface{} { + return t.value +} + +func (t TimeValue) IsNever() bool { + if t.GetMetadata().isUnresolvable { + return false + } + return t.value.IsZero() +} + +func (t TimeValue) Before(i time.Time) bool { + if t.misconfigmetadata.isUnresolvable { + return false + } + return t.value.Before(i) +} + +func (t TimeValue) After(i time.Time) bool { + if t.misconfigmetadata.isUnresolvable { + return false + } + return t.value.After(i) +} + +func (t TimeValue) ToRego() interface{} { + m := t.misconfigmetadata.ToRego().(map[string]interface{}) + m["value"] = t.Value().Format(time.RFC3339) + return m +} diff --git a/pkg/types/time_test.go b/pkg/types/time_test.go new file mode 100644 index 000000000000..4f306178489a --- /dev/null +++ b/pkg/types/time_test.go @@ -0,0 +1,23 @@ +package types + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_TimeJSON(t *testing.T) { + val := Time(time.Now(), NewMisconfigMetadata(NewRange("main.tf", 123, 123, "", nil), "")) + data, err := json.Marshal(val) + require.NoError(t, err) + + var restored TimeValue + err = json.Unmarshal(data, &restored) + require.NoError(t, err) + + assert.Equal(t, val.value.Format(time.RFC3339), restored.Value().Format(time.RFC3339)) + assert.Equal(t, val.misconfigmetadata, restored.misconfigmetadata) +} diff --git a/test/attribute_test.go b/test/attribute_test.go new file mode 100644 index 000000000000..7714a529790a --- /dev/null +++ b/test/attribute_test.go @@ -0,0 +1,712 @@ +package test + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/stretchr/testify/assert" +) + +func Test_AttributeStartsWith(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + checkValue string + expectedResult bool + }{ + { + name: "bucket name starts with bucket", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" +}`, + checkAttribute: "bucket_name", + checkValue: "bucket", + expectedResult: true, + }, + { + name: "bucket acl starts with public", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" +}`, + checkAttribute: "acl", + checkValue: "public", + expectedResult: true, + }, + { + name: "bucket name doesn't start with secret", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +}`, + checkAttribute: "bucket_name", + checkValue: "secret_", + expectedResult: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.StartsWith(test.checkValue)) + } + } + }) + } +} + +func Test_AttributeEndsWith(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + checkValue string + expectedResult bool + }{ + { + name: "bucket name ends with Name", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" +}`, + checkAttribute: "bucket_name", + checkValue: "Name", + expectedResult: true, + }, + { + name: "bucket acl ends with read not Read", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" +}`, + checkAttribute: "acl", + checkValue: "Read", + expectedResult: false, + }, + { + name: "bucket name doesn't end with bucket", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +}`, + checkAttribute: "bucket_name", + checkValue: "_bucket", + expectedResult: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.EndsWith(test.checkValue)) + } + } + }) + } +} + +func Test_AttributeContains(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + checkValue string + expectedResult bool + ignoreCase bool + }{ + { + name: "bucket name contains Name", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" +}`, + checkAttribute: "bucket_name", + checkValue: "etNa", + expectedResult: true, + }, + { + name: "bucket acl doesn't contain private", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" +}`, + checkAttribute: "acl", + checkValue: "private", + expectedResult: false, + }, + { + name: "tags attribute is a map with a Department key", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" + tags = { + Department = "Finance" + } +}`, + checkAttribute: "tags", + checkValue: "Department", + expectedResult: true, + }, + { + name: "cidr_block has expected subnet", + source: ` +resource "aws_security_group" "my-security_group" { + cidr_block = ["10.0.0.0/16", "172.0.0.0/8" ] +}`, + checkAttribute: "cidr_block", + checkValue: "172.0.0.0/8", + expectedResult: true, + }, + { + name: "autoscaling group has propagated key defined 1st tag is present", + source: ` +resource "aws_autoscaling_group" "my-aws_autoscaling_group" { + tags = [ + { + "key" = "Name" + "propagate_at_launch" = "true" + "value" = "couchbase-seb-develop-dev" + }, + { + "key" = "app" + "propagate_at_launch" = "true" + "value" = "myapp" + } + ] +}`, + checkAttribute: "tags", + checkValue: "Name", + expectedResult: true, + }, + { + name: "autoscaling group has propagated key defined 2nd tag is present", + source: ` +resource "aws_autoscaling_group" "my-aws_autoscaling_group" { + tags = [ + { + "key" = "Name" + "propagate_at_launch" = "true" + "value" = "couchbase-seb-develop-dev" + }, + { + "key" = "app" + "propagate_at_launch" = "true" + "value" = "myapp" + } + ] +}`, + checkAttribute: "tags", + checkValue: "app", + expectedResult: true, + }, + { + name: "autoscaling group has propagated key defined and tag is not present", + source: ` +resource "aws_autoscaling_group" "my-aws_autoscaling_group" { + tags = [ + { + "key" = "Name" + "propagate_at_launch" = "true" + "value" = "couchbase-seb-develop-dev" + }, + { + "key" = "app" + "propagate_at_launch" = "true" + "value" = "myapp" + } + ] +}`, + checkAttribute: "tags", + checkValue: "NotThere", + expectedResult: false, + }, + { + name: "contains array of strings ignores case", + source: ` +resource "aws_security_group" "my-security_group" { + cidr_block = ["Foo", "Bar" ] +}`, + checkAttribute: "cidr_block", + checkValue: "foo", + expectedResult: true, + ignoreCase: true, + }, + { + name: "contains array of strings without ignore case", + source: ` +resource "aws_security_group" "my-security_group" { + cidr_block = ["Foo", "Bar" ] +}`, + checkAttribute: "cidr_block", + checkValue: "foo", + expectedResult: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, b := range module.GetBlocks() { + if !b.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := b.GetAttribute(test.checkAttribute) + if test.ignoreCase { + assert.Equal(t, test.expectedResult, attr.Contains(test.checkValue, terraform.IgnoreCase)) + } else { + assert.Equal(t, test.expectedResult, attr.Contains(test.checkValue)) + } + } + } + }) + } +} + +func Test_AttributeIsAny(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + checkValue []interface{} + expectedResult bool + }{ + { + name: "bucket acl is not one of the specified acls", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" +}`, + checkAttribute: "acl", + checkValue: []interface{}{"private", "authenticated-read"}, + expectedResult: false, + }, + { + name: "bucket acl is one of the specified acls", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "private" +}`, + checkAttribute: "acl", + checkValue: []interface{}{"private", "authenticated-read"}, + expectedResult: true, + }, + { + name: "is is one of the provided valued", + source: ` +resource "aws_security_group" "my-security_group" { + count = 1 +}`, + checkAttribute: "count", + checkValue: []interface{}{1, 2}, + expectedResult: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.IsAny(test.checkValue...)) + } + } + }) + } +} + +func Test_AttributeIsNone(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + checkValue []interface{} + expectedResult bool + }{ + { + name: "bucket acl is not one of the specified acls", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" +}`, + checkAttribute: "acl", + checkValue: []interface{}{"private", "authenticated-read"}, + expectedResult: true, + }, + { + name: "bucket acl is one of the specified acls", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "private" +}`, + checkAttribute: "acl", + checkValue: []interface{}{"private", "authenticated-read"}, + expectedResult: false, + }, + { + name: "count is non-of the provided values", + source: ` +resource "aws_security_group" "my-security_group" { + count = 0 +}`, + checkAttribute: "count", + checkValue: []interface{}{1, 2}, + expectedResult: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.IsNone(test.checkValue...)) + } + } + }) + } +} + +func Test_AttributeIsEmpty(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + checkValue []interface{} + expectedResult bool + }{ + { + name: "bucket acl is not empty", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" +}`, + checkAttribute: "acl", + expectedResult: false, + }, + { + name: "bucket acl is empty", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "" +}`, + checkAttribute: "acl", + expectedResult: true, + }, + { + name: "tags is not empty", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" + tags = { + Department = "Finance" + } +}`, + checkAttribute: "tags", + expectedResult: false, + }, + { + name: "tags is empty", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" + tags = { + } +}`, + checkAttribute: "tags", + expectedResult: true, + }, + { + name: "cidr is not empty", + source: ` +resource "aws_security_group_rule" "example" { + type = "ingress" + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = "sg-123456" +}`, + checkAttribute: "cidr_blocks", + expectedResult: false, + }, + { + name: "cidr is empty", + source: ` +resource "aws_security_group_rule" "example" { + type = "ingress" + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_blocks = [] + security_group_id = "sg-123456" +}`, + checkAttribute: "cidr_blocks", + expectedResult: true, + }, + { + name: "from_port_is_not_empty", + source: ` +resource "aws_security_group_rule" "example" { + type = "ingress" + from_port = 0 + to_port = 65535 + protocol = "tcp" + cidr_blocks = [] + security_group_id = "sg-123456" +}`, + checkAttribute: "from_port", + expectedResult: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.IsEmpty()) + } + } + }) + } +} + +func Test_AttributeIsLessThan(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + checkValue int + expectedResult bool + }{ + { + name: "check attribute is less than check value", + source: ` +resource "numerical_something" "my-bucket" { + value = 100 +}`, + checkAttribute: "value", + checkValue: 200, + expectedResult: true, + }, + { + name: "check attribute is not less than check value", + source: ` +resource "numerical_something" "my-bucket" { + value = 100 +}`, + checkAttribute: "value", + checkValue: 50, + expectedResult: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.LessThan(test.checkValue)) + } + } + }) + } +} + +func Test_AttributeIsLessThanOrEqual(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + checkValue int + expectedResult bool + }{ + { + name: "check attribute is less than or equal check value", + source: ` +resource "numerical_something" "my-bucket" { + value = 100 +}`, + checkAttribute: "value", + checkValue: 100, + expectedResult: true, + }, + { + name: "check attribute is not less than check value", + source: ` +resource "numerical_something" "my-bucket" { + value = 100 +}`, + checkAttribute: "value", + checkValue: 50, + expectedResult: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.LessThanOrEqualTo(test.checkValue)) + } + } + }) + } +} + +func Test_AttributeIsTrue(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + expectedResult bool + }{ + { + name: "check attribute is true", + source: ` +resource "boolean_something" "my-something" { + value = true +}`, + checkAttribute: "value", + expectedResult: true, + }, + { + name: "check attribute as string is true", + source: ` +resource "boolean_something" "my-something" { + value = "true" +}`, + checkAttribute: "value", + expectedResult: true, + }, + { + name: "check attribute as string is false", + source: ` +resource "boolean_something" "my-something" { + value = "true" +}`, + checkAttribute: "value", + expectedResult: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.IsTrue()) + } + } + }) + } +} + +func Test_AttributeIsFalse(t *testing.T) { + var tests = []struct { + name string + source string + checkAttribute string + expectedResult bool + }{ + { + name: "check attribute is false", + source: ` +resource "boolean_something" "my-something" { + value = false +}`, + checkAttribute: "value", + expectedResult: true, + }, + { + name: "check attribute as string is false", + source: ` +resource "boolean_something" "my-something" { + value = "false" +}`, + checkAttribute: "value", + expectedResult: true, + }, + { + name: "check attribute true", + source: ` +resource "boolean_something" "my-something" { + value = "true" +}`, + checkAttribute: "value", + expectedResult: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + if !block.HasChild(test.checkAttribute) { + t.FailNow() + } + attr := block.GetAttribute(test.checkAttribute) + assert.Equal(t, test.expectedResult, attr.IsFalse()) + } + } + }) + } +} diff --git a/test/block_test.go b/test/block_test.go new file mode 100644 index 000000000000..9f806f5b2ca3 --- /dev/null +++ b/test/block_test.go @@ -0,0 +1,138 @@ +package test + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_IsPresentCheckOnBlock(t *testing.T) { + var tests = []struct { + name string + source string + expectedAttribute string + }{ + { + name: "expected attribute is present", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" +}`, + expectedAttribute: "bucket_name", + }, + { + name: "expected acl attribute is present", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" +}`, + expectedAttribute: "acl", + }, + { + name: "expected acl attribute is present", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +}`, + expectedAttribute: "logging", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + assert.Equal(t, block.HasChild(test.expectedAttribute), true) + assert.Equal(t, !block.HasChild(test.expectedAttribute), false) + } + } + }) + } +} + +func Test_IsNotPresentCheckOnBlock(t *testing.T) { + var tests = []struct { + name string + source string + expectedAttribute string + }{ + { + name: "expected attribute is not present", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + +}`, + expectedAttribute: "acl", + }, + { + name: "expected acl attribute is not present", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" + +}`, + expectedAttribute: "logging", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + assert.Equal(t, block.HasChild(test.expectedAttribute), false) + assert.Equal(t, !block.HasChild(test.expectedAttribute), true) + } + } + }) + } +} + +func Test_MissingChildNotFoundOnBlock(t *testing.T) { + var tests = []struct { + name string + source string + expectedAttribute string + }{ + { + name: "expected attribute is not present", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + +}`, + expectedAttribute: "acl", + }, + { + name: "expected acl attribute is not present", + source: ` +resource "aws_s3_bucket" "my-bucket" { + bucket_name = "bucketName" + acl = "public-read" + +}`, + expectedAttribute: "logging", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + modules := createModulesFromSource(t, test.source, ".tf") + for _, module := range modules { + for _, block := range module.GetBlocks() { + assert.Equal(t, block.MissingChild(test.expectedAttribute), true) + assert.Equal(t, !block.HasChild(test.expectedAttribute), true) + } + } + }) + } +} diff --git a/test/count_test.go b/test/count_test.go new file mode 100644 index 000000000000..62e15d9d4416 --- /dev/null +++ b/test/count_test.go @@ -0,0 +1,193 @@ +package test + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/stretchr/testify/assert" + + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_ResourcesWithCount(t *testing.T) { + var tests = []struct { + name string + source string + expectedResults int + }{ + { + name: "unspecified count defaults to 1", + source: ` + resource "bad" "this" {} +`, + expectedResults: 1, + }, + { + name: "count is literal 1", + source: ` + resource "bad" "this" { + count = 1 + } +`, + expectedResults: 1, + }, + { + name: "count is literal 99", + source: ` + resource "bad" "this" { + count = 99 + } +`, + expectedResults: 99, + }, + { + name: "count is literal 0", + source: ` + resource "bad" "this" { + count = 0 + } +`, + expectedResults: 0, + }, + { + name: "count is 0 from variable", + source: ` + variable "count" { + default = 0 + } + resource "bad" "this" { + count = var.count + } +`, + expectedResults: 0, + }, + { + name: "count is 1 from variable", + source: ` + variable "count" { + default = 1 + } + resource "bad" "this" { + count = var.count + } +`, + expectedResults: 1, + }, + { + name: "count is 1 from variable without default", + source: ` + variable "count" { + } + resource "bad" "this" { + count = var.count + } +`, + expectedResults: 1, + }, + { + name: "count is 0 from conditional", + source: ` + variable "enabled" { + default = false + } + resource "bad" "this" { + count = var.enabled ? 1 : 0 + } +`, + expectedResults: 0, + }, + { + name: "count is 1 from conditional", + source: ` + variable "enabled" { + default = true + } + resource "bad" "this" { + count = var.enabled ? 1 : 0 + } +`, + expectedResults: 1, + }, + { + name: "issue 962", + source: ` + resource "something" "else" { + count = 2 + ok = true + } + + resource "bad" "bad" { + secure = something.else[0].ok + } +`, + expectedResults: 0, + }, + { + name: "Test use of count.index", + source: ` +resource "bad" "thing" { + count = 1 + secure = var.things[count.index]["ok"] +} + +variable "things" { + description = "A list of maps that creates a number of sg" + type = list(map(string)) + + default = [ + { + ok = true + } + ] +} + `, + expectedResults: 0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + r1 := scan.Rule{ + Provider: providers.AWSProvider, + Service: "service", + ShortCode: "abc123", + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredLabels: []string{"bad"}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + if resourceBlock.GetAttribute("secure").IsTrue() { + return + } + results.Add( + "example problem", + resourceBlock, + ) + return + }, + }, + }, + } + reg := rules.Register(r1) + defer rules.Deregister(reg) + results := scanHCL(t, test.source) + var include string + var exclude string + if test.expectedResults > 0 { + include = r1.LongID() + } else { + exclude = r1.LongID() + } + assert.Equal(t, test.expectedResults, len(results.GetFailed())) + if include != "" { + testutil.AssertRuleFound(t, include, results, "false negative found") + } + if exclude != "" { + testutil.AssertRuleNotFound(t, exclude, results, "false positive found") + } + }) + } +} diff --git a/test/deterministic_test.go b/test/deterministic_test.go new file mode 100644 index 000000000000..1de14723476e --- /dev/null +++ b/test/deterministic_test.go @@ -0,0 +1,50 @@ +package test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/terraform/executor" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_DeterministicResults(t *testing.T) { + + reg := rules.Register(badRule) + defer rules.Deregister(reg) + + fs := testutil.CreateFS(t, map[string]string{ + "first.tf": ` +resource "problem" "uhoh" { + bad = true + for_each = other.thing +} + `, + "second.tf": ` +resource "other" "thing" { + for_each = local.list +} + `, + "third.tf": ` +locals { + list = { + a = 1, + b = 2, + } +} + `, + }) + + for i := 0; i < 100; i++ { + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), ".") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + require.Len(t, results.GetFailed(), 2) + } +} diff --git a/test/docker_test.go b/test/docker_test.go new file mode 100644 index 000000000000..f732dda9b742 --- /dev/null +++ b/test/docker_test.go @@ -0,0 +1,138 @@ +package test + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/dockerfile" +) + +// func addFilesToMemFS(memfs *memoryfs.FS, typePolicy bool, folderName string) error { +// base := filepath.Base(folderName) +// if err := memfs.MkdirAll(base, 0o700); err != nil { +// return err +// } +// err := filepath.Walk(filepath.FromSlash(folderName), +// func(fpath string, info os.FileInfo, err error) error { +// if err != nil { +// return err +// } +// if info.IsDir() { +// return nil +// } +// if typePolicy && !rego.IsRegoFile(info.Name()) { +// return nil +// } +// data, err := os.ReadFile(fpath) +// if err != nil { +// return err +// } +// fileName := getFileName(fpath, info, typePolicy) +// if err := memfs.WriteFile(path.Join(base, fileName), data, 0o644); err != nil { +// return err +// } +// return nil +// }) +// +// if err != nil { +// return err +// } +// return nil +//} + +// TODO: Evaluate usefulness of this test +// func Test_Docker_RegoPoliciesFromDisk(t *testing.T) { +// t.Parallel() +// +// entries, err := os.ReadDir("./testdata/dockerfile") +// require.NoError(t, err) +// +// policiesPath, err := filepath.Abs("../trules") +// require.NoError(t, err) +// scanner := dockerfile.NewScanner( +// options.ScannerWithPolicyDirs(filepath.Base(policiesPath)), +// ) +// memfs := memoryfs.New() +// // add policies +// err = addFilesToMemFS(memfs, true, policiesPath) +// require.NoError(t, err) +// +// // add test data +// testDataPath, err := filepath.Abs("./testdata/dockerfile") +// require.NoError(t, err) +// err = addFilesToMemFS(memfs, false, testDataPath) +// require.NoError(t, err) +// +// results, err := scanner.ScanFS(context.TODO(), memfs, filepath.Base(testDataPath)) +// require.NoError(t, err) +// +// for _, entry := range entries { +// if !entry.IsDir() { +// continue +// } +// t.Run(entry.Name(), func(t *testing.T) { +// require.NoError(t, err) +// t.Run(entry.Name(), func(t *testing.T) { +// var matched int +// for _, result := range results { +// if result.Rule().HasID(entry.Name()) && result.Status() == scan.StatusFailed { +// if result.Description() != "Specify at least 1 USER command in Dockerfile with non-root user as argument" { +// assert.Greater(t, result.Range().GetStartLine(), 0) +// assert.Greater(t, result.Range().GetEndLine(), 0) +// } +// if !strings.HasSuffix(result.Range().GetFilename(), entry.Name()) { +// continue +// } +// matched++ +// } +// } +// assert.Equal(t, 1, matched, "Rule should be matched once") +// }) +// +// }) +// } +//} + +func Test_Docker_RegoPoliciesEmbedded(t *testing.T) { + t.Parallel() + + entries, err := os.ReadDir("./testdata/dockerfile") + require.NoError(t, err) + + scanner := dockerfile.NewScanner(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + srcFS := os.DirFS("../") + + results, err := scanner.ScanFS(context.TODO(), srcFS, "test/testdata/dockerfile") + require.NoError(t, err) + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + t.Run(entry.Name(), func(t *testing.T) { + require.NoError(t, err) + t.Run(entry.Name(), func(t *testing.T) { + var matched bool + for _, result := range results { + if result.Rule().HasID(entry.Name()) && result.Status() == scan.StatusFailed { + if result.Description() != "Specify at least 1 USER command in Dockerfile with non-root user as argument" { + assert.Greater(t, result.Range().GetStartLine(), 0) + assert.Greater(t, result.Range().GetEndLine(), 0) + } + assert.Equal(t, fmt.Sprintf("test/testdata/dockerfile/%s/Dockerfile.denied", entry.Name()), result.Range().GetFilename()) + matched = true + } + } + assert.True(t, matched) + }) + + }) + } +} diff --git a/test/fs_test.go b/test/fs_test.go new file mode 100644 index 000000000000..4b628e4d41e6 --- /dev/null +++ b/test/fs_test.go @@ -0,0 +1,24 @@ +package test + +import ( + "context" + "os" + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/terraform" +) + +func Test_OS_FS(t *testing.T) { + s := terraform.New( + options.ScannerWithDebug(os.Stderr), + options.ScannerWithEmbeddedPolicies(true), + options.ScannerWithEmbeddedLibraries(true), + ) + results, err := s.ScanFS(context.TODO(), os.DirFS("tf"), "fail") + require.NoError(t, err) + assert.Greater(t, len(results.GetFailed()), 0) +} diff --git a/test/ignore_test.go b/test/ignore_test.go new file mode 100644 index 000000000000..55bc1ac9e574 --- /dev/null +++ b/test/ignore_test.go @@ -0,0 +1,528 @@ +package test + +import ( + "fmt" + "strings" + "testing" + + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/stretchr/testify/assert" +) + +var exampleRule = scan.Rule{ + Provider: providers.AWSProvider, + Service: "service", + ShortCode: "abc123", + AVDID: "AWS-ABC-123", + Aliases: []string{"aws-other-abc123"}, + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredLabels: []string{"bad"}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + attr := resourceBlock.GetAttribute("secure") + if attr.IsNil() { + results.Add("example problem", resourceBlock) + } + if attr.IsFalse() { + results.Add("example problem", attr) + } + return + }, + }, + }, +} + +func Test_IgnoreAll(t *testing.T) { + + var testCases = []struct { + name string + inputOptions string + assertLength int + }{ + {name: "IgnoreAll", inputOptions: ` +resource "bad" "my-rule" { + secure = false // tfsec:ignore:* +} +`, assertLength: 0}, + {name: "IgnoreLineAboveTheBlock", inputOptions: ` +// tfsec:ignore:* +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "IgnoreLineAboveTheBlockMatchingParamBool", inputOptions: ` +// tfsec:ignore:*[secure=false] +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "IgnoreLineAboveTheBlockNotMatchingParamBool", inputOptions: ` +// tfsec:ignore:*[secure=true] +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 1}, + {name: "IgnoreLineAboveTheBlockMatchingParamString", inputOptions: ` +// tfsec:ignore:*[name=myrule] +resource "bad" "my-rule" { + name = "myrule" + secure = false +} +`, assertLength: 0}, + {name: "IgnoreLineAboveTheBlockNotMatchingParamString", inputOptions: ` +// tfsec:ignore:*[name=myrule2] +resource "bad" "my-rule" { + name = "myrule" + secure = false +} +`, assertLength: 1}, + {name: "IgnoreLineAboveTheBlockMatchingParamInt", inputOptions: ` +// tfsec:ignore:*[port=123] +resource "bad" "my-rule" { + secure = false + port = 123 +} +`, assertLength: 0}, + {name: "IgnoreLineAboveTheBlockNotMatchingParamInt", inputOptions: ` +// tfsec:ignore:*[port=456] +resource "bad" "my-rule" { + secure = false + port = 123 +} +`, assertLength: 1}, + {name: "IgnoreLineStackedAboveTheBlock", inputOptions: ` +// tfsec:ignore:* +// tfsec:ignore:a +// tfsec:ignore:b +// tfsec:ignore:c +// tfsec:ignore:d +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "IgnoreLineStackedAboveTheBlockWithoutMatch", inputOptions: ` +#tfsec:ignore:* + +#tfsec:ignore:x +#tfsec:ignore:a +#tfsec:ignore:b +#tfsec:ignore:c +#tfsec:ignore:d +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 1}, + {name: "IgnoreLineStackedAboveTheBlockWithHashesWithoutSpaces", inputOptions: ` +#tfsec:ignore:* +#tfsec:ignore:a +#tfsec:ignore:b +#tfsec:ignore:c +#tfsec:ignore:d +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "IgnoreLineStackedAboveTheBlockWithoutSpaces", inputOptions: ` +//tfsec:ignore:* +//tfsec:ignore:a +//tfsec:ignore:b +//tfsec:ignore:c +//tfsec:ignore:d +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "IgnoreLineAboveTheLine", inputOptions: ` +resource "bad" "my-rule" { + # tfsec:ignore:aws-service-abc123 + secure = false +} +`, assertLength: 0}, + {name: "IgnoreWithExpDateIfDateBreachedThenDontIgnore", inputOptions: ` +resource "bad" "my-rule" { + secure = false # tfsec:ignore:aws-service-abc123:exp:2000-01-02 +} +`, assertLength: 1}, + {name: "IgnoreWithExpDateIfDateNotBreachedThenIgnoreIgnore", inputOptions: ` +resource "bad" "my-rule" { + secure = false # tfsec:ignore:aws-service-abc123:exp:2221-01-02 +} +`, assertLength: 0}, + {name: "IgnoreWithExpDateIfDateInvalidThenDropTheIgnore", inputOptions: ` +resource "bad" "my-rule" { + secure = false # tfsec:ignore:aws-service-abc123:exp:2221-13-02 +} +`, assertLength: 1}, + {name: "IgnoreAboveResourceBlockWithExpDateIfDateNotBreachedThenIgnoreIgnore", inputOptions: ` +#tfsec:ignore:aws-service-abc123:exp:2221-01-02 +resource "bad" "my-rule" { +} +`, assertLength: 0}, + {name: "IgnoreAboveResourceBlockWithExpDateAndMultipleIgnoresIfDateNotBreachedThenIgnoreIgnore", inputOptions: ` +# tfsec:ignore:aws-service-abc123:exp:2221-01-02 +resource "bad" "my-rule" { + +} +`, assertLength: 0}, + {name: "IgnoreForImpliedIAMResource", inputOptions: ` +terraform { +required_version = "~> 1.1.6" + +required_providers { +aws = { +source = "hashicorp/aws" +version = "~> 3.48" +} +} +} + +# Retrieve an IAM group defined outside of this Terraform config. + +# tfsec:ignore:aws-iam-enforce-mfa +data "aws_iam_group" "externally_defined_group" { +group_name = "group-name" # tfsec:ignore:aws-iam-enforce-mfa +} + +# Create an IAM policy and attach it to the group. + +# tfsec:ignore:aws-iam-enforce-mfa +resource "aws_iam_policy" "test_policy" { +name = "test-policy" # tfsec:ignore:aws-iam-enforce-mfa +policy = data.aws_iam_policy_document.test_policy.json # tfsec:ignore:aws-iam-enforce-mfa +} + +# tfsec:ignore:aws-iam-enforce-mfa +resource "aws_iam_group_policy_attachment" "test_policy_attachment" { +group = data.aws_iam_group.externally_defined_group.group_name # tfsec:ignore:aws-iam-enforce-mfa +policy_arn = aws_iam_policy.test_policy.arn # tfsec:ignore:aws-iam-enforce-mfa +} + +# tfsec:ignore:aws-iam-enforce-mfa +data "aws_iam_policy_document" "test_policy" { +statement { +sid = "PublishToCloudWatch" # tfsec:ignore:aws-iam-enforce-mfa +actions = [ +"cloudwatch:PutMetricData", # tfsec:ignore:aws-iam-enforce-mfa +] +resources = ["*"] # tfsec:ignore:aws-iam-enforce-mfa +} +} +`, assertLength: 0}, + {name: "TrivyIgnoreAll", inputOptions: ` +resource "bad" "my-rule" { + secure = false // trivy:ignore:* +} +`, assertLength: 0}, + {name: "TrivyIgnoreLineAboveTheBlock", inputOptions: ` +// trivy:ignore:* +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "TrivyIgnoreLineAboveTheBlockMatchingParamBool", inputOptions: ` +// trivy:ignore:*[secure=false] +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "TrivyIgnoreLineAboveTheBlockNotMatchingParamBool", inputOptions: ` +// trivy:ignore:*[secure=true] +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 1}, + {name: "TrivyIgnoreLineAboveTheBlockMatchingParamString", inputOptions: ` +// trivy:ignore:*[name=myrule] +resource "bad" "my-rule" { + name = "myrule" + secure = false +} +`, assertLength: 0}, + {name: "TrivyIgnoreLineAboveTheBlockNotMatchingParamString", inputOptions: ` +// trivy:ignore:*[name=myrule2] +resource "bad" "my-rule" { + name = "myrule" + secure = false +} +`, assertLength: 1}, + {name: "TrivyIgnoreLineAboveTheBlockMatchingParamInt", inputOptions: ` +// trivy:ignore:*[port=123] +resource "bad" "my-rule" { + secure = false + port = 123 +} +`, assertLength: 0}, + {name: "TrivyIgnoreLineAboveTheBlockNotMatchingParamInt", inputOptions: ` +// trivy:ignore:*[port=456] +resource "bad" "my-rule" { + secure = false + port = 123 +} +`, assertLength: 1}, + {name: "TrivyIgnoreLineStackedAboveTheBlock", inputOptions: ` +// trivy:ignore:* +// trivy:ignore:a +// trivy:ignore:b +// trivy:ignore:c +// trivy:ignore:d +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "TrivyIgnoreLineStackedAboveTheBlockWithoutMatch", inputOptions: ` +#trivy:ignore:* + +#trivy:ignore:x +#trivy:ignore:a +#trivy:ignore:b +#trivy:ignore:c +#trivy:ignore:d +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 1}, + {name: "TrivyIgnoreLineStackedAboveTheBlockWithHashesWithoutSpaces", inputOptions: ` +#trivy:ignore:* +#trivy:ignore:a +#trivy:ignore:b +#trivy:ignore:c +#trivy:ignore:d +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "TrivyIgnoreLineStackedAboveTheBlockWithoutSpaces", inputOptions: ` +//trivy:ignore:* +//trivy:ignore:a +//trivy:ignore:b +//trivy:ignore:c +//trivy:ignore:d +resource "bad" "my-rule" { + secure = false +} +`, assertLength: 0}, + {name: "TrivyIgnoreLineAboveTheLine", inputOptions: ` +resource "bad" "my-rule" { + # trivy:ignore:aws-service-abc123 + secure = false +} +`, assertLength: 0}, + {name: "TrivyIgnoreWithExpDateIfDateBreachedThenDontIgnore", inputOptions: ` +resource "bad" "my-rule" { + secure = false # trivy:ignore:aws-service-abc123:exp:2000-01-02 +} +`, assertLength: 1}, + {name: "TrivyIgnoreWithExpDateIfDateNotBreachedThenIgnoreIgnore", inputOptions: ` +resource "bad" "my-rule" { + secure = false # trivy:ignore:aws-service-abc123:exp:2221-01-02 +} +`, assertLength: 0}, + {name: "TrivyIgnoreWithExpDateIfDateInvalidThenDropTheIgnore", inputOptions: ` +resource "bad" "my-rule" { + secure = false # trivy:ignore:aws-service-abc123:exp:2221-13-02 +} +`, assertLength: 1}, + {name: "TrivyIgnoreAboveResourceBlockWithExpDateIfDateNotBreachedThenIgnoreIgnore", inputOptions: ` +#trivy:ignore:aws-service-abc123:exp:2221-01-02 +resource "bad" "my-rule" { +} +`, assertLength: 0}, + {name: "TrivyIgnoreAboveResourceBlockWithExpDateAndMultipleIgnoresIfDateNotBreachedThenIgnoreIgnore", inputOptions: ` +# trivy:ignore:aws-service-abc123:exp:2221-01-02 +resource "bad" "my-rule" { + +} +`, assertLength: 0}, + {name: "TrivyIgnoreForImpliedIAMResource", inputOptions: ` +terraform { +required_version = "~> 1.1.6" + +required_providers { +aws = { +source = "hashicorp/aws" +version = "~> 3.48" +} +} +} + +# Retrieve an IAM group defined outside of this Terraform config. + +# trivy:ignore:aws-iam-enforce-mfa +data "aws_iam_group" "externally_defined_group" { +group_name = "group-name" # trivy:ignore:aws-iam-enforce-mfa +} + +# Create an IAM policy and attach it to the group. + +# trivy:ignore:aws-iam-enforce-mfa +resource "aws_iam_policy" "test_policy" { +name = "test-policy" # trivy:ignore:aws-iam-enforce-mfa +policy = data.aws_iam_policy_document.test_policy.json # trivy:ignore:aws-iam-enforce-mfa +} + +# trivy:ignore:aws-iam-enforce-mfa +resource "aws_iam_group_policy_attachment" "test_policy_attachment" { +group = data.aws_iam_group.externally_defined_group.group_name # trivy:ignore:aws-iam-enforce-mfa +policy_arn = aws_iam_policy.test_policy.arn # trivy:ignore:aws-iam-enforce-mfa +} + +# trivy:ignore:aws-iam-enforce-mfa +data "aws_iam_policy_document" "test_policy" { +statement { +sid = "PublishToCloudWatch" # trivy:ignore:aws-iam-enforce-mfa +actions = [ +"cloudwatch:PutMetricData", # trivy:ignore:aws-iam-enforce-mfa +] +resources = ["*"] # trivy:ignore:aws-iam-enforce-mfa +} +} +`, assertLength: 0}} + + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + results := scanHCL(t, tc.inputOptions) + assert.Len(t, results.GetFailed(), tc.assertLength) + }) + } +} + +func Test_IgnoreIgnoreWithExpiryAndWorkspaceAndWorkspaceSupplied(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + results := scanHCLWithWorkspace(t, ` +# tfsec:ignore:aws-service-abc123:exp:2221-01-02:ws:testworkspace +resource "bad" "my-rule" { +} +`, "testworkspace") + assert.Len(t, results.GetFailed(), 0) +} + +func Test_IgnoreInline(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + results := scanHCL(t, fmt.Sprintf(` + resource "bad" "sample" { + secure = false # tfsec:ignore:%s + } + `, exampleRule.LongID())) + assert.Len(t, results.GetFailed(), 0) +} + +func Test_IgnoreIgnoreWithExpiryAndWorkspaceButWrongWorkspaceSupplied(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + results := scanHCLWithWorkspace(t, ` +# tfsec:ignore:aws-service-abc123:exp:2221-01-02:ws:otherworkspace +resource "bad" "my-rule" { + +} +`, "testworkspace") + assert.Len(t, results.GetFailed(), 1) +} + +func Test_IgnoreWithAliasCodeStillIgnored(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + results := scanHCLWithWorkspace(t, ` +# tfsec:ignore:aws-other-abc123 +resource "bad" "my-rule" { + +} +`, "testworkspace") + assert.Len(t, results.GetFailed(), 0) +} + +func Test_TrivyIgnoreIgnoreWithExpiryAndWorkspaceAndWorkspaceSupplied(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + results := scanHCLWithWorkspace(t, ` +# trivy:ignore:aws-service-abc123:exp:2221-01-02:ws:testworkspace +resource "bad" "my-rule" { +} +`, "testworkspace") + assert.Len(t, results.GetFailed(), 0) +} + +func Test_TrivyIgnoreIgnoreWithExpiryAndWorkspaceButWrongWorkspaceSupplied(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + results := scanHCLWithWorkspace(t, ` +# trivy:ignore:aws-service-abc123:exp:2221-01-02:ws:otherworkspace +resource "bad" "my-rule" { + +} +`, "testworkspace") + assert.Len(t, results.GetFailed(), 1) +} + +func Test_TrivyIgnoreWithAliasCodeStillIgnored(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + results := scanHCLWithWorkspace(t, ` +# trivy:ignore:aws-other-abc123 +resource "bad" "my-rule" { + +} +`, "testworkspace") + assert.Len(t, results.GetFailed(), 0) +} + +func Test_TrivyIgnoreInline(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + + results := scanHCL(t, fmt.Sprintf(` + resource "bad" "sample" { + secure = false # trivy:ignore:%s + } + `, exampleRule.LongID())) + assert.Len(t, results.GetFailed(), 0) +} + +func Test_IgnoreInlineByAVDID(t *testing.T) { + testCases := []struct { + input string + }{ + { + input: ` + resource "bad" "sample" { + secure = false # tfsec:ignore:%s + } + `, + }, + { + input: ` + resource "bad" "sample" { + secure = false # trivy:ignore:%s + } + `, + }, + } + + for _, tc := range testCases { + tc := tc + for _, id := range []string{exampleRule.AVDID, strings.ToLower(exampleRule.AVDID), exampleRule.ShortCode, exampleRule.LongID()} { + id := id + t.Run("", func(t *testing.T) { + reg := rules.Register(exampleRule) + defer rules.Deregister(reg) + results := scanHCL(t, fmt.Sprintf(tc.input, id)) + assert.Len(t, results.GetFailed(), 0) + }) + } + } +} diff --git a/test/json_test.go b/test/json_test.go new file mode 100644 index 000000000000..da053a776697 --- /dev/null +++ b/test/json_test.go @@ -0,0 +1,102 @@ +package test + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/test/testutil" +) + +func TestScanningJSON(t *testing.T) { + + var tests = []struct { + name string + source string + shouldFail bool + }{ + { + name: "check results are picked up in tf json configs", + source: ` + { + "provider": { + "aws": { + "profile": null, + "region": "eu-west-1" + } + }, + "resource": { + "bad": { + "thing": { + "type": "ingress", + "cidr_blocks": ["0.0.0.0/0"], + "description": "testing" + } + } + } + }`, + shouldFail: true, + }, + { + name: "check attributes are checked in tf json configs", + source: ` + { + "provider": { + "aws": { + "profile": null, + "region": "eu-west-1" + } + }, + "resource": { + "bad": { + "or_not": { + "secure": true + } + } + } + }`, + shouldFail: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + r1 := scan.Rule{ + Provider: providers.AWSProvider, + Service: "service", + ShortCode: "abc123", + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredLabels: []string{"bad"}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + if resourceBlock.GetAttribute("secure").IsTrue() { + return + } + results.Add("something", resourceBlock) + return + }, + }, + }, + } + reg := rules.Register(r1) + defer rules.Deregister(reg) + + results := scanJSON(t, test.source) + var include, exclude string + if test.shouldFail { + include = r1.LongID() + } else { + exclude = r1.LongID() + } + if include != "" { + testutil.AssertRuleFound(t, include, results, "false negative found") + } + if exclude != "" { + testutil.AssertRuleNotFound(t, exclude, results, "false positive found") + } + }) + } +} diff --git a/test/kubernetes_test.go b/test/kubernetes_test.go new file mode 100644 index 000000000000..25c63e5bd490 --- /dev/null +++ b/test/kubernetes_test.go @@ -0,0 +1,131 @@ +package test + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy/pkg/scanners/kubernetes" +) + +func Test_Kubernetes_RegoPoliciesFromDisk(t *testing.T) { + t.Parallel() + + entries, err := os.ReadDir("./testdata/kubernetes") + require.NoError(t, err) + + scanner := kubernetes.NewScanner( + options.ScannerWithPerResultTracing(true), + options.ScannerWithEmbeddedPolicies(true), + options.ScannerWithEmbeddedLibraries(true), + ) + + srcFS := os.DirFS("../") + + results, err := scanner.ScanFS(context.TODO(), srcFS, "test/testdata/kubernetes") + require.NoError(t, err) + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + if entry.Name() == "optional" { + continue + } + t.Run(entry.Name(), func(t *testing.T) { + var matched bool + for _, result := range results { + if result.Rule().HasID(entry.Name()) { + + failCase := fmt.Sprintf("test/testdata/kubernetes/%s/denied.yaml", entry.Name()) + passCase := fmt.Sprintf("test/testdata/kubernetes/%s/allowed.yaml", entry.Name()) + + switch result.Range().GetFilename() { + case failCase: + assert.Equal(t, scan.StatusFailed, result.Status(), "Rule should have failed, but didn't.") + assert.Greater(t, result.Range().GetStartLine(), 0, "We should have line numbers for a failure") + assert.Greater(t, result.Range().GetEndLine(), 0, "We should have line numbers for a failure") + matched = true + case passCase: + assert.Equal(t, scan.StatusPassed, result.Status(), "Rule should have passed, but didn't.") + matched = true + default: + if strings.Contains(result.Range().GetFilename(), entry.Name()) { + t.Fatal(result.Range().GetFilename()) + } + continue + } + + if t.Failed() { + fmt.Println("Test failed - rego trace follows:") + for _, trace := range result.Traces() { + fmt.Println(trace) + } + } + } + } + assert.True(t, matched, "Neither a pass or fail result was found for %s - did you add example code for it?", entry.Name()) + }) + } +} + +func Test_Kubernetes_RegoPoliciesEmbedded(t *testing.T) { + t.Parallel() + + entries, err := os.ReadDir("./testdata/kubernetes") + require.NoError(t, err) + + scanner := kubernetes.NewScanner(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true), options.ScannerWithEmbeddedLibraries(true)) + + srcFS := os.DirFS("../") + + results, err := scanner.ScanFS(context.TODO(), srcFS, "test/testdata/kubernetes") + require.NoError(t, err) + + for _, entry := range entries { + if !entry.IsDir() { + continue + } + if entry.Name() == "optional" { + continue + } + t.Run(entry.Name(), func(t *testing.T) { + var matched bool + for _, result := range results { + if result.Rule().HasID(entry.Name()) { + + failCase := fmt.Sprintf("test/testdata/kubernetes/%s/denied.yaml", entry.Name()) + passCase := fmt.Sprintf("test/testdata/kubernetes/%s/allowed.yaml", entry.Name()) + + switch result.Range().GetFilename() { + case failCase: + assert.Equal(t, scan.StatusFailed, result.Status(), "Rule should have failed, but didn't.") + assert.Greater(t, result.Range().GetStartLine(), 0, "We should have line numbers for a failure") + assert.Greater(t, result.Range().GetEndLine(), 0, "We should have line numbers for a failure") + matched = true + case passCase: + assert.Equal(t, scan.StatusPassed, result.Status(), "Rule should have passed, but didn't.") + matched = true + default: + continue + } + + if t.Failed() { + fmt.Println("Test failed - rego trace follows:") + for _, trace := range result.Traces() { + fmt.Println(trace) + } + } + } + } + assert.True(t, matched, "Neither a pass or fail result was found for %s - did you add example code for it?", entry.Name()) + }) + } +} diff --git a/test/module_test.go b/test/module_test.go new file mode 100644 index 000000000000..b47f0d48352b --- /dev/null +++ b/test/module_test.go @@ -0,0 +1,631 @@ +package test + +import ( + "bytes" + "context" + "fmt" + "os" + "testing" + + "github.com/aquasecurity/trivy/pkg/providers" + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/stretchr/testify/require" + + "github.com/aquasecurity/trivy-policies/checks/cloud/aws/iam" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/executor" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +var badRule = scan.Rule{ + Provider: providers.AWSProvider, + Service: "service", + ShortCode: "abc", + Summary: "A stupid example check for a test.", + Impact: "You will look stupid", + Resolution: "Don't do stupid stuff", + Explanation: "Bad should not be set.", + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredTypes: []string{"resource"}, + RequiredLabels: []string{"problem"}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + if attr := resourceBlock.GetAttribute("bad"); attr.IsTrue() { + results.Add("bad", attr) + } + return + }, + }, + }, +} + +// IMPORTANT: if this test is failing, you probably need to set the version of go-cty in go.mod to the same version that hcl uses. +func Test_GoCtyCompatibilityIssue(t *testing.T) { + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "/project/main.tf": ` +data "aws_vpc" "default" { + default = true +} + +module "test" { + source = "../modules/problem/" + cidr_block = data.aws_vpc.default.cidr_block +} +`, + "/modules/problem/main.tf": ` +variable "cidr_block" {} + +variable "open" { + default = false +} + +resource "aws_security_group" "this" { + name = "Test" + + ingress { + description = "HTTPs" + from_port = 443 + to_port = 443 + protocol = "tcp" + self = ! var.open + cidr_blocks = var.open ? [var.cidr_block] : null + } +} + +resource "problem" "uhoh" { + bad = true +} +`, + }) + + debug := bytes.NewBuffer([]byte{}) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true), options.ParserWithDebug(debug)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, err := executor.New().Execute(modules) + require.NoError(t, err) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") + if t.Failed() { + fmt.Println(debug.String()) + } +} + +func Test_ProblemInModuleInSiblingDir(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "/project/main.tf": ` +module "something" { + source = "../modules/problem" +} +`, + "modules/problem/main.tf": ` +resource "problem" "uhoh" { + bad = true +} +`}, + ) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") + +} + +func Test_ProblemInModuleIgnored(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "/project/main.tf": ` +#tfsec:ignore:aws-service-abc +module "something" { + source = "../modules/problem" +} +`, + "modules/problem/main.tf": ` +resource "problem" "uhoh" { + bad = true +} +`}, + ) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleNotFound(t, badRule.LongID(), results, "") + +} + +func Test_ProblemInModuleInSubdirectory(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +module "something" { + source = "./modules/problem" +} +`, + "project/modules/problem/main.tf": ` +resource "problem" "uhoh" { + bad = true +} +`}) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") + +} + +func Test_ProblemInModuleInParentDir(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +module "something" { + source = "../problem" +} +`, + "problem/main.tf": ` +resource "problem" "uhoh" { + bad = true +} +`}) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") + +} + +func Test_ProblemInModuleReuse(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +module "something_good" { + source = "../modules/problem" + bad = false +} + +module "something_bad" { + source = "../modules/problem" + bad = true +} +`, + "modules/problem/main.tf": ` +variable "bad" { + default = false +} +resource "problem" "uhoh" { + bad = var.bad +} +`}) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") + +} + +func Test_ProblemInNestedModule(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +module "something" { + source = "../modules/a" +} +`, + "modules/a/main.tf": ` + module "something" { + source = "../../modules/b" +} +`, + "modules/b/main.tf": ` +module "something" { + source = "../c" +} +`, + "modules/c/main.tf": ` +resource "problem" "uhoh" { + bad = true +} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true), options.ParserWithDebug(os.Stderr)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") + +} + +func Test_ProblemInReusedNestedModule(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +module "something" { + source = "../modules/a" + bad = false +} + +module "something-bad" { + source = "../modules/a" + bad = true +} +`, + "modules/a/main.tf": ` +variable "bad" { + default = false +} +module "something" { + source = "../../modules/b" + bad = var.bad +} +`, + "modules/b/main.tf": ` +variable "bad" { + default = false +} +module "something" { + source = "../c" + bad = var.bad +} +`, + "modules/c/main.tf": ` +variable "bad" { + default = false +} +resource "problem" "uhoh" { + bad = var.bad +} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") +} + +func Test_ProblemInInitialisedModule(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +module "something" { + source = "../modules/somewhere" + bad = false +} +`, + "modules/somewhere/main.tf": ` +module "something_nested" { + count = 1 + source = "github.com/some/module.git" + bad = true +} + +variable "bad" { + default = false +} + +`, + "project/.terraform/modules/something.something_nested/main.tf": ` +variable "bad" { + default = false +} +resource "problem" "uhoh" { + bad = var.bad +} +`, + "project/.terraform/modules/modules.json": ` + {"Modules":[ + {"Key":"something","Source":"../modules/somewhere","Version":"2.35.0","Dir":"../modules/somewhere"}, + {"Key":"something.something_nested","Source":"git::https://github.com/some/module.git","Version":"2.35.0","Dir":".terraform/modules/something.something_nested"} + ]} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") +} + +func Test_ProblemInReusedInitialisedModule(t *testing.T) { + + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +module "something" { + source = "/nowhere" + bad = false +} +module "something2" { + source = "/nowhere" + bad = true +} +`, + "project/.terraform/modules/a/main.tf": ` +variable "bad" { + default = false +} +resource "problem" "uhoh" { + bad = var.bad +} +`, + "project/.terraform/modules/modules.json": ` + {"Modules":[{"Key":"something","Source":"/nowhere","Version":"2.35.0","Dir":".terraform/modules/a"},{"Key":"something2","Source":"/nowhere","Version":"2.35.0","Dir":".terraform/modules/a"}]} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") + +} + +func Test_ProblemInDuplicateModuleNameAndPath(t *testing.T) { + registered := rules.Register(badRule) + defer rules.Deregister(registered) + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +module "something" { + source = "../modules/a" + bad = 0 +} + +module "something-bad" { + source = "../modules/a" + bad = 1 +} +`, + "modules/a/main.tf": ` +variable "bad" { + default = 0 +} +module "something" { + source = "../b" + bad = var.bad +} +`, + "modules/b/main.tf": ` +variable "bad" { + default = 0 +} +module "something" { + source = "../c" + bad = var.bad +} +`, + "modules/c/main.tf": ` +variable "bad" { + default = 0 +} +resource "problem" "uhoh" { + count = var.bad + bad = true +} +`, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, badRule.LongID(), results, "") + +} + +func Test_Dynamic_Variables(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +resource "something" "this" { + + dynamic "blah" { + for_each = ["a"] + + content { + ok = true + } + } +} + +resource "bad" "thing" { + secure = something.this.blah[0].ok +} +`}) + + r1 := scan.Rule{ + Provider: providers.AWSProvider, + Service: "service", + ShortCode: "abc123", + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredLabels: []string{"bad"}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + if resourceBlock.GetAttribute("secure").IsTrue() { + return + } + results.Add("example problem", resourceBlock) + return + }, + }, + }, + } + reg := rules.Register(r1) + defer rules.Deregister(reg) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleFound(t, r1.LongID(), results, "") +} + +func Test_Dynamic_Variables_FalsePositive(t *testing.T) { + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` +resource "something" "else" { + x = 1 + dynamic "blah" { + for_each = toset(["true"]) + + content { + ok = each.value + } + } +} + +resource "bad" "thing" { + secure = something.else.blah.ok +} +`}) + + r1 := scan.Rule{ + Provider: providers.AWSProvider, + Service: "service", + ShortCode: "abc123", + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredLabels: []string{"bad"}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + if resourceBlock.GetAttribute("secure").IsTrue() { + return + } + results.Add("example problem", resourceBlock) + return + }, + }, + }, + } + reg := rules.Register(r1) + defer rules.Deregister(reg) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleNotFound(t, r1.LongID(), results, "") +} + +func Test_ReferencesPassedToNestedModule(t *testing.T) { + + fs := testutil.CreateFS(t, map[string]string{ + "project/main.tf": ` + +resource "aws_iam_group" "developers" { + name = "developers" +} + +module "something" { + source = "../modules/a" + group = aws_iam_group.developers.name +} +`, + "modules/a/main.tf": ` +variable "group" { + type = string +} + +resource aws_iam_group_policy mfa { + group = var.group + policy = data.aws_iam_policy_document.policy.json +} + +data "aws_iam_policy_document" "policy" { + statement { + sid = "main" + effect = "Allow" + + actions = ["s3:*"] + resources = ["*"] + condition { + test = "Bool" + variable = "aws:MultiFactorAuthPresent" + values = ["true"] + } + } +} +`}) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + err := p.ParseFS(context.TODO(), "project") + require.NoError(t, err) + modules, _, err := p.EvaluateAll(context.TODO()) + require.NoError(t, err) + results, _, _ := executor.New().Execute(modules) + testutil.AssertRuleNotFound(t, iam.CheckEnforceGroupMFA.LongID(), results, "") + +} diff --git a/test/performance_test.go b/test/performance_test.go new file mode 100644 index 000000000000..73d970091460 --- /dev/null +++ b/test/performance_test.go @@ -0,0 +1,57 @@ +package test + +import ( + "context" + "fmt" + "io/fs" + "testing" + + "github.com/aquasecurity/trivy/pkg/scanners/terraform/executor" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +func BenchmarkCalculate(b *testing.B) { + + f, err := createBadBlocks() + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + p := parser.New(f, "", parser.OptionStopOnHCLError(true)) + if err := p.ParseFS(context.TODO(), "project"); err != nil { + b.Fatal(err) + } + modules, _, err := p.EvaluateAll(context.TODO()) + if err != nil { + b.Fatal(err) + } + _, _, _ = executor.New().Execute(modules) + } +} + +func createBadBlocks() (fs.FS, error) { + + files := make(map[string]string) + + files["/project/main.tf"] = ` +module "something" { + source = "../modules/problem" +} +` + + for _, rule := range rules.GetRegistered() { + if rule.GetRule().Terraform == nil { + continue + } + for i, bad := range rule.GetRule().Terraform.BadExamples { + filename := fmt.Sprintf("/modules/problem/%s-%d.tf", rule.GetRule().LongID(), i) + files[filename] = bad + } + } + + f := testutil.CreateFS(&testing.T{}, files) + return f, nil +} diff --git a/test/rules_test.go b/test/rules_test.go new file mode 100644 index 000000000000..4d295c1a7a5f --- /dev/null +++ b/test/rules_test.go @@ -0,0 +1,40 @@ +package test + +import ( + "testing" + + "github.com/aquasecurity/trivy/pkg/framework" +) + +func TestAVDIDs(t *testing.T) { + existing := make(map[string]struct{}) + for _, rule := range rules.GetRegistered(framework.ALL) { + t.Run(rule.LongID(), func(t *testing.T) { + if rule.GetRule().AVDID == "" { + t.Errorf("Rule has no AVD ID: %#v", rule) + return + } + if _, ok := existing[rule.GetRule().AVDID]; ok { + t.Errorf("Rule detected with duplicate AVD ID: %s", rule.GetRule().AVDID) + } + }) + existing[rule.GetRule().AVDID] = struct{}{} + } +} + +//func TestRulesAgainstExampleCode(t *testing.T) { +// for _, rule := range trules.GetRegistered(framework.ALL) { +// testName := fmt.Sprintf("%s/%s", rule.GetRule().AVDID, rule.LongID()) +// t.Run(testName, func(t *testing.T) { +// rule := rule +// t.Parallel() +// +// t.Run("avd docs", func(t *testing.T) { +// provider := strings.ToLower(rule.GetRule().Provider.ConstName()) +// service := strings.ToLower(strings.ReplaceAll(rule.GetRule().Service, "-", "")) +// _, err := os.Stat(filepath.Join("..", "avd_docs", provider, service, rule.GetRule().AVDID, "docs.md")) +// require.NoError(t, err) +// }) +// }) +// } +//} diff --git a/test/setup_test.go b/test/setup_test.go new file mode 100644 index 000000000000..39a44e2bfe92 --- /dev/null +++ b/test/setup_test.go @@ -0,0 +1,59 @@ +package test + +import ( + "context" + "testing" + + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/scanners/options" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/stretchr/testify/require" + + tfScanner "github.com/aquasecurity/trivy/pkg/scanners/terraform" + "github.com/aquasecurity/trivy/pkg/scanners/terraform/parser" + "github.com/aquasecurity/trivy/test/testutil" +) + +func createModulesFromSource(t *testing.T, source string, ext string) terraform.Modules { + fs := testutil.CreateFS(t, map[string]string{ + "source" + ext: source, + }) + + p := parser.New(fs, "", parser.OptionStopOnHCLError(true)) + if err := p.ParseFS(context.TODO(), "."); err != nil { + t.Fatal(err) + } + modules, _, err := p.EvaluateAll(context.TODO()) + if err != nil { + t.Fatalf("parse error: %s", err) + } + return modules +} + +func scanHCLWithWorkspace(t *testing.T, source string, workspace string) scan.Results { + return scanHCL(t, source, tfScanner.ScannerWithWorkspaceName(workspace)) +} + +func scanHCL(t *testing.T, source string, opts ...options.ScannerOption) scan.Results { + + fs := testutil.CreateFS(t, map[string]string{ + "main.tf": source, + }) + + localScanner := tfScanner.New(append(opts, options.ScannerWithEmbeddedPolicies(false))...) + results, err := localScanner.ScanFS(context.TODO(), fs, ".") + require.NoError(t, err) + return results +} + +func scanJSON(t *testing.T, source string) scan.Results { + + fs := testutil.CreateFS(t, map[string]string{ + "main.tf.json": source, + }) + + s := tfScanner.New(options.ScannerWithEmbeddedPolicies(true), options.ScannerWithEmbeddedLibraries(true)) + results, _, err := s.ScanFSWithMetrics(context.TODO(), fs, ".") + require.NoError(t, err) + return results +} diff --git a/test/testdata/dockerfile/DS001/Dockerfile.allowed b/test/testdata/dockerfile/DS001/Dockerfile.allowed new file mode 100644 index 000000000000..ee5c6cc930dc --- /dev/null +++ b/test/testdata/dockerfile/DS001/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM debian:9 +RUN apt-get update && apt-get -y install vim && apt-get clean +USER foo diff --git a/test/testdata/dockerfile/DS001/Dockerfile.denied b/test/testdata/dockerfile/DS001/Dockerfile.denied new file mode 100644 index 000000000000..5e2b193a0d4e --- /dev/null +++ b/test/testdata/dockerfile/DS001/Dockerfile.denied @@ -0,0 +1,3 @@ +FROM debian:latest +RUN apt-get update && apt-get -y install vim && apt-get clean +USER foo diff --git a/test/testdata/dockerfile/DS002/Dockerfile.allowed b/test/testdata/dockerfile/DS002/Dockerfile.allowed new file mode 100644 index 000000000000..8bb3de30ba3b --- /dev/null +++ b/test/testdata/dockerfile/DS002/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM debian:9 +RUN apt-get update && apt-get -y install vim && apt-get clean +USER foo \ No newline at end of file diff --git a/test/testdata/dockerfile/DS002/Dockerfile.denied b/test/testdata/dockerfile/DS002/Dockerfile.denied new file mode 100644 index 000000000000..9b996cc7b47d --- /dev/null +++ b/test/testdata/dockerfile/DS002/Dockerfile.denied @@ -0,0 +1,2 @@ +FROM debian:9 +RUN apt-get update && apt-get -y install vim && apt-get clean diff --git a/test/testdata/dockerfile/DS004/Dockerfile.allowed b/test/testdata/dockerfile/DS004/Dockerfile.allowed new file mode 100644 index 000000000000..8af97be727f6 --- /dev/null +++ b/test/testdata/dockerfile/DS004/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +EXPOSE 8080 diff --git a/test/testdata/dockerfile/DS004/Dockerfile.denied b/test/testdata/dockerfile/DS004/Dockerfile.denied new file mode 100644 index 000000000000..91016100d36f --- /dev/null +++ b/test/testdata/dockerfile/DS004/Dockerfile.denied @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +EXPOSE 22 \ No newline at end of file diff --git a/test/testdata/dockerfile/DS005/Dockerfile.allowed b/test/testdata/dockerfile/DS005/Dockerfile.allowed new file mode 100644 index 000000000000..28d89b4361f6 --- /dev/null +++ b/test/testdata/dockerfile/DS005/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +ADD "/target/resources.tar.gz" "resources" diff --git a/test/testdata/dockerfile/DS005/Dockerfile.denied b/test/testdata/dockerfile/DS005/Dockerfile.denied new file mode 100644 index 000000000000..98c1249f9f34 --- /dev/null +++ b/test/testdata/dockerfile/DS005/Dockerfile.denied @@ -0,0 +1,4 @@ +FROM alpine:3.13 +USER mike +ADD "/target/resources.tar.gz" "resources.jar" +ADD "/target/app.jar" "app.jar" \ No newline at end of file diff --git a/test/testdata/dockerfile/DS006/Dockerfile.allowed b/test/testdata/dockerfile/DS006/Dockerfile.allowed new file mode 100644 index 000000000000..529198acf3b7 --- /dev/null +++ b/test/testdata/dockerfile/DS006/Dockerfile.allowed @@ -0,0 +1,6 @@ +FROM golang:1.7.3 as dep +COPY /binary / + +FROM alpine:3.13 +USER mike +ENTRYPOINT [ "/opt/app/run.sh --port 8080" ] \ No newline at end of file diff --git a/test/testdata/dockerfile/DS006/Dockerfile.denied b/test/testdata/dockerfile/DS006/Dockerfile.denied new file mode 100644 index 000000000000..cdb11213d551 --- /dev/null +++ b/test/testdata/dockerfile/DS006/Dockerfile.denied @@ -0,0 +1,6 @@ +FROM golang:1.7.3 as dep +COPY --from=dep /binary / + +FROM alpine:3.13 +USER mike +ENTRYPOINT [ "/opt/app/run.sh --port 8080" ] \ No newline at end of file diff --git a/test/testdata/dockerfile/DS007/Dockerfile.allowed b/test/testdata/dockerfile/DS007/Dockerfile.allowed new file mode 100644 index 000000000000..37b3bb398312 --- /dev/null +++ b/test/testdata/dockerfile/DS007/Dockerfile.allowed @@ -0,0 +1,6 @@ +FROM golang:1.7.3 as dep +COPY /binary / + +FROM alpine:3.13 +USER mike +ENTRYPOINT [ "/opt/app/run.sh --port 8080" ] \ No newline at end of file diff --git a/test/testdata/dockerfile/DS007/Dockerfile.denied b/test/testdata/dockerfile/DS007/Dockerfile.denied new file mode 100644 index 000000000000..228966f1f1ad --- /dev/null +++ b/test/testdata/dockerfile/DS007/Dockerfile.denied @@ -0,0 +1,8 @@ +FROM golang:1.7.3 as dep +COPY dep /binary / +ENTRYPOINT [ "/opt/app/run.sh --port 8080" ] +ENTRYPOINT [ "/opt/app/run.sh --port 8080" ] + +FROM alpine:3.13 +USER mike +ENTRYPOINT [ "/opt/app/run.sh --port 8080" ] \ No newline at end of file diff --git a/test/testdata/dockerfile/DS008/Dockerfile.allowed b/test/testdata/dockerfile/DS008/Dockerfile.allowed new file mode 100644 index 000000000000..f66bb31d8ef7 --- /dev/null +++ b/test/testdata/dockerfile/DS008/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +EXPOSE 65530 8080 diff --git a/test/testdata/dockerfile/DS008/Dockerfile.denied b/test/testdata/dockerfile/DS008/Dockerfile.denied new file mode 100644 index 000000000000..89c465a66d44 --- /dev/null +++ b/test/testdata/dockerfile/DS008/Dockerfile.denied @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +EXPOSE 65536 8080 diff --git a/test/testdata/dockerfile/DS009/Dockerfile.allowed b/test/testdata/dockerfile/DS009/Dockerfile.allowed new file mode 100644 index 000000000000..1db32e18327e --- /dev/null +++ b/test/testdata/dockerfile/DS009/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +WORKDIR /path/to/workdir diff --git a/test/testdata/dockerfile/DS009/Dockerfile.denied b/test/testdata/dockerfile/DS009/Dockerfile.denied new file mode 100644 index 000000000000..422d65f083b7 --- /dev/null +++ b/test/testdata/dockerfile/DS009/Dockerfile.denied @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +WORKDIR path/to/workdir diff --git a/test/testdata/dockerfile/DS010/Dockerfile.allowed b/test/testdata/dockerfile/DS010/Dockerfile.allowed new file mode 100644 index 000000000000..67232624f130 --- /dev/null +++ b/test/testdata/dockerfile/DS010/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM alpine:3.13 +RUN pip install --upgrade pip +USER mike diff --git a/test/testdata/dockerfile/DS010/Dockerfile.denied b/test/testdata/dockerfile/DS010/Dockerfile.denied new file mode 100644 index 000000000000..cd63e40e132c --- /dev/null +++ b/test/testdata/dockerfile/DS010/Dockerfile.denied @@ -0,0 +1,3 @@ +FROM alpine:3.13 +RUN sudo pip install --upgrade pip +USER mike diff --git a/test/testdata/dockerfile/DS011/Dockerfile.allowed b/test/testdata/dockerfile/DS011/Dockerfile.allowed new file mode 100644 index 000000000000..c5d7133a7b7e --- /dev/null +++ b/test/testdata/dockerfile/DS011/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +COPY ["package.json", "yarn.lock", "myapp/"] diff --git a/test/testdata/dockerfile/DS011/Dockerfile.denied b/test/testdata/dockerfile/DS011/Dockerfile.denied new file mode 100644 index 000000000000..72df0188ffb9 --- /dev/null +++ b/test/testdata/dockerfile/DS011/Dockerfile.denied @@ -0,0 +1,3 @@ +FROM alpine:3.13 +USER mike +COPY ["package.json", "yarn.lock", "myapp"] diff --git a/test/testdata/dockerfile/DS012/Dockerfile.allowed b/test/testdata/dockerfile/DS012/Dockerfile.allowed new file mode 100644 index 000000000000..a3eeb0f4318a --- /dev/null +++ b/test/testdata/dockerfile/DS012/Dockerfile.allowed @@ -0,0 +1,10 @@ +FROM baseImage:1.1 +RUN test + +FROM debian:jesse2 as build2 +USER mike +RUN stuff + +FROM debian:jesse1 as build1 +USER mike +RUN more_stuff \ No newline at end of file diff --git a/test/testdata/dockerfile/DS012/Dockerfile.denied b/test/testdata/dockerfile/DS012/Dockerfile.denied new file mode 100644 index 000000000000..86e7882d3ada --- /dev/null +++ b/test/testdata/dockerfile/DS012/Dockerfile.denied @@ -0,0 +1,10 @@ +FROM baseImage:1.1 +RUN test + +FROM debian:jesse2 as build +USER mike +RUN stuff + +FROM debian:jesse1 as build +USER mike +RUN more_stuff \ No newline at end of file diff --git a/test/testdata/dockerfile/DS013/Dockerfile.allowed b/test/testdata/dockerfile/DS013/Dockerfile.allowed new file mode 100644 index 000000000000..c14262268770 --- /dev/null +++ b/test/testdata/dockerfile/DS013/Dockerfile.allowed @@ -0,0 +1,4 @@ +FROM nginx:2.2 +WORKDIR /usr/share/nginx/html +USER mike +CMD cd /usr/share/nginx/html && sed -e s/Docker/\"$AUTHOR\"/ Hello_docker.html > index.html ; nginx -g 'daemon off;' \ No newline at end of file diff --git a/test/testdata/dockerfile/DS013/Dockerfile.denied b/test/testdata/dockerfile/DS013/Dockerfile.denied new file mode 100644 index 000000000000..e5a769aadc86 --- /dev/null +++ b/test/testdata/dockerfile/DS013/Dockerfile.denied @@ -0,0 +1,4 @@ +FROM nginx:2.2 +RUN cd /usr/share/nginx/html +USER mike +CMD cd /usr/share/nginx/html && sed -e s/Docker/\"$AUTHOR\"/ Hello_docker.html > index.html ; nginx -g 'daemon off;' \ No newline at end of file diff --git a/test/testdata/dockerfile/DS014/Dockerfile.allowed b/test/testdata/dockerfile/DS014/Dockerfile.allowed new file mode 100644 index 000000000000..b46d24c9f879 --- /dev/null +++ b/test/testdata/dockerfile/DS014/Dockerfile.allowed @@ -0,0 +1,7 @@ +FROM debian:stable-20210621 +RUN curl http://bing.com +RUN curl http://google.com + +FROM baseimage:1.0 +USER mike +RUN curl http://bing.com diff --git a/test/testdata/dockerfile/DS014/Dockerfile.denied b/test/testdata/dockerfile/DS014/Dockerfile.denied new file mode 100644 index 000000000000..c5ec6eff395a --- /dev/null +++ b/test/testdata/dockerfile/DS014/Dockerfile.denied @@ -0,0 +1,7 @@ +FROM debian:stable-20210621 +RUN wget http://bing.com +RUN curl http://google.com + +FROM baseimage:1.0 +USER mike +RUN curl http://bing.com diff --git a/test/testdata/dockerfile/DS015/Dockerfile.allowed b/test/testdata/dockerfile/DS015/Dockerfile.allowed new file mode 100644 index 000000000000..5ab6a65688ef --- /dev/null +++ b/test/testdata/dockerfile/DS015/Dockerfile.allowed @@ -0,0 +1,5 @@ +FROM alpine:3.5 +RUN yum install && yum clean all +RUN pip install --no-cache-dir -r /usr/src/app/requirements.txt +USER mike +CMD python /usr/src/app/app.py \ No newline at end of file diff --git a/test/testdata/dockerfile/DS015/Dockerfile.denied b/test/testdata/dockerfile/DS015/Dockerfile.denied new file mode 100644 index 000000000000..e1ba5704d7d5 --- /dev/null +++ b/test/testdata/dockerfile/DS015/Dockerfile.denied @@ -0,0 +1,5 @@ +FROM alpine:3.5 +RUN yum install vim +RUN pip install --no-cache-dir -r /usr/src/app/requirements.txt +USER mike +CMD python /usr/src/app/app.py \ No newline at end of file diff --git a/test/testdata/dockerfile/DS016/Dockerfile.allowed b/test/testdata/dockerfile/DS016/Dockerfile.allowed new file mode 100644 index 000000000000..46f07fda1436 --- /dev/null +++ b/test/testdata/dockerfile/DS016/Dockerfile.allowed @@ -0,0 +1,5 @@ +FROM golang:1.7.3 +USER mike +CMD ./apps +FROM alpine:3.13 +CMD ./app diff --git a/test/testdata/dockerfile/DS016/Dockerfile.denied b/test/testdata/dockerfile/DS016/Dockerfile.denied new file mode 100644 index 000000000000..e861f0a0d284 --- /dev/null +++ b/test/testdata/dockerfile/DS016/Dockerfile.denied @@ -0,0 +1,6 @@ +FROM golang:1.7.3 +USER mike +CMD ./app +CMD ./apps +FROM alpine:3.13 +CMD ./app diff --git a/test/testdata/dockerfile/DS017/Dockerfile.allowed b/test/testdata/dockerfile/DS017/Dockerfile.allowed new file mode 100644 index 000000000000..d92984d79ae8 --- /dev/null +++ b/test/testdata/dockerfile/DS017/Dockerfile.allowed @@ -0,0 +1,4 @@ +FROM ubuntu:18.04 +RUN apt-get update && apt-get install -y --no-install-recommends mysql-client && rm -rf /var/lib/apt/lists/* && apt-get clean +USER mike +ENTRYPOINT mysql \ No newline at end of file diff --git a/test/testdata/dockerfile/DS017/Dockerfile.denied b/test/testdata/dockerfile/DS017/Dockerfile.denied new file mode 100644 index 000000000000..e9bf2a9abd1e --- /dev/null +++ b/test/testdata/dockerfile/DS017/Dockerfile.denied @@ -0,0 +1,5 @@ +FROM ubuntu:18.04 +RUN apt-get update +RUN apt-get install -y --no-install-recommends mysql-client && rm -rf /var/lib/apt/lists/* && apt-get clean +USER mike +ENTRYPOINT mysql \ No newline at end of file diff --git a/test/testdata/dockerfile/DS019/Dockerfile.allowed b/test/testdata/dockerfile/DS019/Dockerfile.allowed new file mode 100644 index 000000000000..0b97504659ec --- /dev/null +++ b/test/testdata/dockerfile/DS019/Dockerfile.allowed @@ -0,0 +1,5 @@ +FROM fedora:27 +USER mike +RUN set -uex && dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo && sed -i 's/\\$releasever/26/g' /etc/yum.repos.d/docker-ce.repo && dnf install -vy docker-ce && dnf clean all +HEALTHCHECK CMD curl --fail http://localhost:3000 || exit 1 + diff --git a/test/testdata/dockerfile/DS019/Dockerfile.denied b/test/testdata/dockerfile/DS019/Dockerfile.denied new file mode 100644 index 000000000000..47c2c25fd495 --- /dev/null +++ b/test/testdata/dockerfile/DS019/Dockerfile.denied @@ -0,0 +1,4 @@ +FROM fedora:27 +USER mike +RUN set -uex && dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo && sed -i 's/\\$releasever/26/g' /etc/yum.repos.d/docker-ce.repo && dnf install -vy docker-ce +HEALTHCHECK CMD curl --fail http://localhost:3000 || exit 1 diff --git a/test/testdata/dockerfile/DS020/Dockerfile.allowed b/test/testdata/dockerfile/DS020/Dockerfile.allowed new file mode 100644 index 000000000000..b76d238a9c8d --- /dev/null +++ b/test/testdata/dockerfile/DS020/Dockerfile.allowed @@ -0,0 +1,5 @@ +FROM alpine:3.5 +RUN zypper install bash && zypper clean +RUN pip install --no-cache-dir -r /usr/src/app/requirements.txt +USER mike +CMD python /usr/src/app/app.py \ No newline at end of file diff --git a/test/testdata/dockerfile/DS020/Dockerfile.denied b/test/testdata/dockerfile/DS020/Dockerfile.denied new file mode 100644 index 000000000000..22235094173c --- /dev/null +++ b/test/testdata/dockerfile/DS020/Dockerfile.denied @@ -0,0 +1,5 @@ +FROM alpine:3.5 +RUN zypper install bash +RUN pip install --no-cache-dir -r /usr/src/app/requirements.txt +USER mike +CMD python /usr/src/app/app.py \ No newline at end of file diff --git a/test/testdata/dockerfile/DS021/Dockerfile.allowed b/test/testdata/dockerfile/DS021/Dockerfile.allowed new file mode 100644 index 000000000000..84d2c55941bd --- /dev/null +++ b/test/testdata/dockerfile/DS021/Dockerfile.allowed @@ -0,0 +1,3 @@ +FROM node:12 +USER mike +RUN apt-get -fmy install apt-utils && apt-get clean \ No newline at end of file diff --git a/test/testdata/dockerfile/DS021/Dockerfile.denied b/test/testdata/dockerfile/DS021/Dockerfile.denied new file mode 100644 index 000000000000..988e111d5d04 --- /dev/null +++ b/test/testdata/dockerfile/DS021/Dockerfile.denied @@ -0,0 +1,3 @@ +FROM node:12 +USER mike +RUN apt-get install apt-utils && apt-get clean \ No newline at end of file diff --git a/test/testdata/dockerfile/DS022/Dockerfile.allowed b/test/testdata/dockerfile/DS022/Dockerfile.allowed new file mode 100644 index 000000000000..eaa7e488d692 --- /dev/null +++ b/test/testdata/dockerfile/DS022/Dockerfile.allowed @@ -0,0 +1,2 @@ +FROM busybox:1.33.1 +USER mike \ No newline at end of file diff --git a/test/testdata/dockerfile/DS022/Dockerfile.denied b/test/testdata/dockerfile/DS022/Dockerfile.denied new file mode 100644 index 000000000000..aebd38f065da --- /dev/null +++ b/test/testdata/dockerfile/DS022/Dockerfile.denied @@ -0,0 +1,3 @@ +FROM busybox:1.33.1 +USER mike +MAINTAINER Lukas Martinelli \ No newline at end of file diff --git a/test/testdata/dockerfile/DS023/Dockerfile.allowed b/test/testdata/dockerfile/DS023/Dockerfile.allowed new file mode 100644 index 000000000000..29c48f20c700 --- /dev/null +++ b/test/testdata/dockerfile/DS023/Dockerfile.allowed @@ -0,0 +1,7 @@ +FROM busybox:1.33.1 +HEALTHCHECK CMD /bin/healthcheck + +FROM alpine:3.13 +HEALTHCHECK CMD /bin/healthcheck +USER mike +CMD ./app diff --git a/test/testdata/dockerfile/DS023/Dockerfile.denied b/test/testdata/dockerfile/DS023/Dockerfile.denied new file mode 100644 index 000000000000..6dc49ab5f151 --- /dev/null +++ b/test/testdata/dockerfile/DS023/Dockerfile.denied @@ -0,0 +1,8 @@ +FROM busybox:1.33.1 +HEALTHCHECK CMD curl http://localhost:8080 +HEALTHCHECK CMD /bin/healthcheck + +FROM alpine:3.13 +HEALTHCHECK CMD /bin/healthcheck +USER mike +CMD ./app diff --git a/test/testdata/dockerfile/DS024/Dockerfile.allowed b/test/testdata/dockerfile/DS024/Dockerfile.allowed new file mode 100644 index 000000000000..b551287049e7 --- /dev/null +++ b/test/testdata/dockerfile/DS024/Dockerfile.allowed @@ -0,0 +1,4 @@ +FROM debian:9.13 +RUN apt-get update && apt-get install -y curl && apt-get clean +USER mike +CMD python /usr/src/app/app.py diff --git a/test/testdata/dockerfile/DS024/Dockerfile.denied b/test/testdata/dockerfile/DS024/Dockerfile.denied new file mode 100644 index 000000000000..7bc3ae8975a3 --- /dev/null +++ b/test/testdata/dockerfile/DS024/Dockerfile.denied @@ -0,0 +1,4 @@ +FROM debian:9.13 +RUN apt-get update && apt-get dist-upgrade && apt-get -y install curl && apt-get clean +USER mike +CMD python /usr/src/app/app.py diff --git a/test/testdata/kubernetes/KSV001/allowed.yaml b/test/testdata/kubernetes/KSV001/allowed.yaml new file mode 100644 index 000000000000..f40d17d24580 --- /dev/null +++ b/test/testdata/kubernetes/KSV001/allowed.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + allowPrivilegeEscalation: false diff --git a/test/testdata/kubernetes/KSV001/denied.yaml b/test/testdata/kubernetes/KSV001/denied.yaml new file mode 100644 index 000000000000..3622b1bfbcd2 --- /dev/null +++ b/test/testdata/kubernetes/KSV001/denied.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + capabilities: + drop: + - all diff --git a/test/testdata/kubernetes/KSV002/allowed.yaml b/test/testdata/kubernetes/KSV002/allowed.yaml new file mode 100644 index 000000000000..c98da678b4bb --- /dev/null +++ b/test/testdata/kubernetes/KSV002/allowed.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/hello: runtime/default + name: hello-apparmor +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello AppArmor!' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV002/denied.yaml b/test/testdata/kubernetes/KSV002/denied.yaml new file mode 100644 index 000000000000..a127b4b47b9d --- /dev/null +++ b/test/testdata/kubernetes/KSV002/denied.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + container.apparmor.security.beta.kubernetes.io/hello: custom + name: hello-apparmor +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello AppArmor!' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV003/allowed.yaml b/test/testdata/kubernetes/KSV003/allowed.yaml new file mode 100644 index 000000000000..3622b1bfbcd2 --- /dev/null +++ b/test/testdata/kubernetes/KSV003/allowed.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + capabilities: + drop: + - all diff --git a/test/testdata/kubernetes/KSV003/denied.yaml b/test/testdata/kubernetes/KSV003/denied.yaml new file mode 100644 index 000000000000..07754a354ca3 --- /dev/null +++ b/test/testdata/kubernetes/KSV003/denied.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV005/allowed.yaml b/test/testdata/kubernetes/KSV005/allowed.yaml new file mode 100644 index 000000000000..ff08b26f90bc --- /dev/null +++ b/test/testdata/kubernetes/KSV005/allowed.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-sys-admin-capabilities +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV005/denied.yaml b/test/testdata/kubernetes/KSV005/denied.yaml new file mode 100644 index 000000000000..c34e9fad024a --- /dev/null +++ b/test/testdata/kubernetes/KSV005/denied.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-sys-admin-capabilities +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + securityContext: + capabilities: + add: + - SYS_ADMIN diff --git a/test/testdata/kubernetes/KSV006/allowed.yaml b/test/testdata/kubernetes/KSV006/allowed.yaml new file mode 100644 index 000000000000..04f1710d7c51 --- /dev/null +++ b/test/testdata/kubernetes/KSV006/allowed.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-docker-socket +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + volumes: + - name: test-volume diff --git a/test/testdata/kubernetes/KSV006/denied.yaml b/test/testdata/kubernetes/KSV006/denied.yaml new file mode 100644 index 000000000000..d7335ac91d56 --- /dev/null +++ b/test/testdata/kubernetes/KSV006/denied.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-docker-socket +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + volumes: + - name: test-volume + hostPath: + path: "/var/run/docker.sock" + type: Directory diff --git a/test/testdata/kubernetes/KSV008/allowed.yaml b/test/testdata/kubernetes/KSV008/allowed.yaml new file mode 100644 index 000000000000..6dd4513d063a --- /dev/null +++ b/test/testdata/kubernetes/KSV008/allowed.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-ipc +spec: + hostIPC: false + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV008/denied.yaml b/test/testdata/kubernetes/KSV008/denied.yaml new file mode 100644 index 000000000000..826f58a65485 --- /dev/null +++ b/test/testdata/kubernetes/KSV008/denied.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-ipc +spec: + hostIPC: true + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV009/allowed.yaml b/test/testdata/kubernetes/KSV009/allowed.yaml new file mode 100644 index 000000000000..61d615b1a8b0 --- /dev/null +++ b/test/testdata/kubernetes/KSV009/allowed.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-host-network +spec: + hostNetwork: false + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV009/denied.yaml b/test/testdata/kubernetes/KSV009/denied.yaml new file mode 100644 index 000000000000..2b862ca596ef --- /dev/null +++ b/test/testdata/kubernetes/KSV009/denied.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-host-network +spec: + hostNetwork: true + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV010/allowed.yaml b/test/testdata/kubernetes/KSV010/allowed.yaml new file mode 100644 index 000000000000..b215b5c7faf3 --- /dev/null +++ b/test/testdata/kubernetes/KSV010/allowed.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-host-network +spec: + hostPID: false + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV010/denied.yaml b/test/testdata/kubernetes/KSV010/denied.yaml new file mode 100644 index 000000000000..69acff1a2d92 --- /dev/null +++ b/test/testdata/kubernetes/KSV010/denied.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-host-network +spec: + hostPID: true + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV011/allowed.yaml b/test/testdata/kubernetes/KSV011/allowed.yaml new file mode 100644 index 000000000000..f271ed677908 --- /dev/null +++ b/test/testdata/kubernetes/KSV011/allowed.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + resources: + limits: + cpu: 500m diff --git a/test/testdata/kubernetes/KSV011/denied.yaml b/test/testdata/kubernetes/KSV011/denied.yaml new file mode 100644 index 000000000000..71287dea0a04 --- /dev/null +++ b/test/testdata/kubernetes/KSV011/denied.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV012/allowed.yaml b/test/testdata/kubernetes/KSV012/allowed.yaml new file mode 100644 index 000000000000..0811a40e50a0 --- /dev/null +++ b/test/testdata/kubernetes/KSV012/allowed.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + runAsNonRoot: true diff --git a/test/testdata/kubernetes/KSV012/denied.yaml b/test/testdata/kubernetes/KSV012/denied.yaml new file mode 100644 index 000000000000..07754a354ca3 --- /dev/null +++ b/test/testdata/kubernetes/KSV012/denied.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV013/allowed.yaml b/test/testdata/kubernetes/KSV013/allowed.yaml new file mode 100644 index 000000000000..f46dae03dfa3 --- /dev/null +++ b/test/testdata/kubernetes/KSV013/allowed.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-tag +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox:1.33.1 + name: hello diff --git a/test/testdata/kubernetes/KSV013/denied.yaml b/test/testdata/kubernetes/KSV013/denied.yaml new file mode 100644 index 000000000000..d6fd19396048 --- /dev/null +++ b/test/testdata/kubernetes/KSV013/denied.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-tag +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox:latest + name: hello diff --git a/test/testdata/kubernetes/KSV014/allowed.yaml b/test/testdata/kubernetes/KSV014/allowed.yaml new file mode 100644 index 000000000000..0ff96a444328 --- /dev/null +++ b/test/testdata/kubernetes/KSV014/allowed.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-fs-not-readonly +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + securityContext: + readOnlyRootFilesystem: true diff --git a/test/testdata/kubernetes/KSV014/denied.yaml b/test/testdata/kubernetes/KSV014/denied.yaml new file mode 100644 index 000000000000..c15b769f4c5e --- /dev/null +++ b/test/testdata/kubernetes/KSV014/denied.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-fs-not-readonly +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + securityContext: + readOnlyRootFilesystem: false diff --git a/test/testdata/kubernetes/KSV015/allowed.yaml b/test/testdata/kubernetes/KSV015/allowed.yaml new file mode 100644 index 000000000000..fd55236361b1 --- /dev/null +++ b/test/testdata/kubernetes/KSV015/allowed.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + resources: + requests: + cpu: 250m diff --git a/test/testdata/kubernetes/KSV015/denied.yaml b/test/testdata/kubernetes/KSV015/denied.yaml new file mode 100644 index 000000000000..71287dea0a04 --- /dev/null +++ b/test/testdata/kubernetes/KSV015/denied.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV016/allowed.yaml b/test/testdata/kubernetes/KSV016/allowed.yaml new file mode 100644 index 000000000000..c43f990f1ab8 --- /dev/null +++ b/test/testdata/kubernetes/KSV016/allowed.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + resources: + requests: + memory: 64Mi diff --git a/test/testdata/kubernetes/KSV016/denied.yaml b/test/testdata/kubernetes/KSV016/denied.yaml new file mode 100644 index 000000000000..71287dea0a04 --- /dev/null +++ b/test/testdata/kubernetes/KSV016/denied.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV017/allowed.yaml b/test/testdata/kubernetes/KSV017/allowed.yaml new file mode 100644 index 000000000000..b608e5c78113 --- /dev/null +++ b/test/testdata/kubernetes/KSV017/allowed.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-privileged +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV017/denied.yaml b/test/testdata/kubernetes/KSV017/denied.yaml new file mode 100644 index 000000000000..620f6497f9aa --- /dev/null +++ b/test/testdata/kubernetes/KSV017/denied.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-privileged +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + securityContext: + privileged: true diff --git a/test/testdata/kubernetes/KSV018/allowed.yaml b/test/testdata/kubernetes/KSV018/allowed.yaml new file mode 100644 index 000000000000..eb00e56e4c7e --- /dev/null +++ b/test/testdata/kubernetes/KSV018/allowed.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + resources: + limits: + memory: 128Mi diff --git a/test/testdata/kubernetes/KSV018/denied.yaml b/test/testdata/kubernetes/KSV018/denied.yaml new file mode 100644 index 000000000000..6bf001e3c075 --- /dev/null +++ b/test/testdata/kubernetes/KSV018/denied.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + diff --git a/test/testdata/kubernetes/KSV020/allowed.yaml b/test/testdata/kubernetes/KSV020/allowed.yaml new file mode 100644 index 000000000000..36f7916bbea0 --- /dev/null +++ b/test/testdata/kubernetes/KSV020/allowed.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-gid +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + securityContext: + runAsUser: 10004 diff --git a/test/testdata/kubernetes/KSV020/denied.yaml b/test/testdata/kubernetes/KSV020/denied.yaml new file mode 100644 index 000000000000..e9dbef332273 --- /dev/null +++ b/test/testdata/kubernetes/KSV020/denied.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-gid +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV021/allowed.yaml b/test/testdata/kubernetes/KSV021/allowed.yaml new file mode 100644 index 000000000000..f176cb07ce5c --- /dev/null +++ b/test/testdata/kubernetes/KSV021/allowed.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-gid +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + securityContext: + runAsGroup: 10004 diff --git a/test/testdata/kubernetes/KSV021/denied.yaml b/test/testdata/kubernetes/KSV021/denied.yaml new file mode 100644 index 000000000000..e9dbef332273 --- /dev/null +++ b/test/testdata/kubernetes/KSV021/denied.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-gid +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV022/allowed.yaml b/test/testdata/kubernetes/KSV022/allowed.yaml new file mode 100644 index 000000000000..1e4b014e5ece --- /dev/null +++ b/test/testdata/kubernetes/KSV022/allowed.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-add-capabilities +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV022/denied.yaml b/test/testdata/kubernetes/KSV022/denied.yaml new file mode 100644 index 000000000000..3e5b7aec50a7 --- /dev/null +++ b/test/testdata/kubernetes/KSV022/denied.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-add-capabilities +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + securityContext: + capabilities: + add: + - NET_BIND_SERVICE \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV023/allowed.yaml b/test/testdata/kubernetes/KSV023/allowed.yaml new file mode 100644 index 000000000000..8c19827425fb --- /dev/null +++ b/test/testdata/kubernetes/KSV023/allowed.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-host-path +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV023/denied.yaml b/test/testdata/kubernetes/KSV023/denied.yaml new file mode 100644 index 000000000000..da474eb987d0 --- /dev/null +++ b/test/testdata/kubernetes/KSV023/denied.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-host-path +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + volumes: + - hostPath: + path: "/sys" + type: '' diff --git a/test/testdata/kubernetes/KSV024/allowed.yaml b/test/testdata/kubernetes/KSV024/allowed.yaml new file mode 100644 index 000000000000..24b1c9757c12 --- /dev/null +++ b/test/testdata/kubernetes/KSV024/allowed.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-host-ports +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV024/denied.yaml b/test/testdata/kubernetes/KSV024/denied.yaml new file mode 100644 index 000000000000..f23d66ed4817 --- /dev/null +++ b/test/testdata/kubernetes/KSV024/denied.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-host-ports +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + ports: + - hostPort: 8080 \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV025/allowed.yaml b/test/testdata/kubernetes/KSV025/allowed.yaml new file mode 100644 index 000000000000..508ad7b2ec51 --- /dev/null +++ b/test/testdata/kubernetes/KSV025/allowed.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-selinux +spec: + securityContext: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV025/denied.yaml b/test/testdata/kubernetes/KSV025/denied.yaml new file mode 100644 index 000000000000..9fbaa41da184 --- /dev/null +++ b/test/testdata/kubernetes/KSV025/denied.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-selinux +spec: + securityContext: + seLinuxOptions: + type: custom + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV026/allowed.yaml b/test/testdata/kubernetes/KSV026/allowed.yaml new file mode 100644 index 000000000000..9ff2d7bcfdb5 --- /dev/null +++ b/test/testdata/kubernetes/KSV026/allowed.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-sysctls +spec: + securityContext: + sysctls: + - name: kernel.shm_rmid_forced + value: '0' + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV026/denied.yaml b/test/testdata/kubernetes/KSV026/denied.yaml new file mode 100644 index 000000000000..69eed5d69b03 --- /dev/null +++ b/test/testdata/kubernetes/KSV026/denied.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-sysctls +spec: + securityContext: + sysctls: + - name: net.core.somaxconn + value: '1024' + - name: kernel.msgmax + value: '65536' + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello diff --git a/test/testdata/kubernetes/KSV027/allowed.yaml b/test/testdata/kubernetes/KSV027/allowed.yaml new file mode 100644 index 000000000000..40b8c24aff06 --- /dev/null +++ b/test/testdata/kubernetes/KSV027/allowed.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-proc-mount +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + ports: + - hostPort: 8080 diff --git a/test/testdata/kubernetes/KSV027/denied.yaml b/test/testdata/kubernetes/KSV027/denied.yaml new file mode 100644 index 000000000000..40354e4e8427 --- /dev/null +++ b/test/testdata/kubernetes/KSV027/denied.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-proc-mount +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + ports: + - hostPort: 8080 + securityContext: + procMount: Unmasked diff --git a/test/testdata/kubernetes/KSV028/allowed.yaml b/test/testdata/kubernetes/KSV028/allowed.yaml new file mode 100644 index 000000000000..a2f93da0dde0 --- /dev/null +++ b/test/testdata/kubernetes/KSV028/allowed.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-volume-types +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + volumes: + - name: volume-a diff --git a/test/testdata/kubernetes/KSV028/denied.yaml b/test/testdata/kubernetes/KSV028/denied.yaml new file mode 100644 index 000000000000..57fc35cfc7a5 --- /dev/null +++ b/test/testdata/kubernetes/KSV028/denied.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-volume-types +spec: + containers: + - command: + - sh + - "-c" + - echo 'Hello' && sleep 1h + image: busybox + name: hello + volumes: + - name: volume-a + scaleIO: + gateway: https://localhost:443/api + system: scaleio + protectionDomain: sd0 + storagePool: sp1 + volumeName: vol-a + secretRef: + name: sio-secret + fsType: xfs diff --git a/test/testdata/kubernetes/KSV030/allowed.yaml b/test/testdata/kubernetes/KSV030/allowed.yaml new file mode 100644 index 000000000000..48b8c1d4ed3a --- /dev/null +++ b/test/testdata/kubernetes/KSV030/allowed.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + seccompProfile: + type: RuntimeDefault + localhostProfile: profiles/audit.json \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV030/denied.yaml b/test/testdata/kubernetes/KSV030/denied.yaml new file mode 100644 index 000000000000..45b3bd316652 --- /dev/null +++ b/test/testdata/kubernetes/KSV030/denied.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + seccompProfile: + type: LocalPort + localhostProfile: profiles/audit.json \ No newline at end of file diff --git a/test/testdata/kubernetes/KSV036/allowed.yaml b/test/testdata/kubernetes/KSV036/allowed.yaml new file mode 100644 index 000000000000..42a9ded8b589 --- /dev/null +++ b/test/testdata/kubernetes/KSV036/allowed.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: mypod + namespace: test + labels: + name: mypod +spec: + containers: + - name: mypod + image: nginx + diff --git a/test/testdata/kubernetes/KSV036/denied.yaml b/test/testdata/kubernetes/KSV036/denied.yaml new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/test/testdata/kubernetes/KSV037/allowed.yaml b/test/testdata/kubernetes/KSV037/allowed.yaml new file mode 100644 index 000000000000..99c22f0afff9 --- /dev/null +++ b/test/testdata/kubernetes/KSV037/allowed.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: mypod + namespace: test + labels: + name: mypod +spec: + automountServiceAccountToken: true + containers: + - name: mypod + image: nginx + diff --git a/test/testdata/kubernetes/KSV037/denied.yaml b/test/testdata/kubernetes/KSV037/denied.yaml new file mode 100644 index 000000000000..c42d41e0349c --- /dev/null +++ b/test/testdata/kubernetes/KSV037/denied.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: mypod + namespace: kube-system + labels: + name: mypod +spec: + containers: + - name: mypod + image: nginx + diff --git a/test/testdata/kubernetes/KSV038/allowed.yaml b/test/testdata/kubernetes/KSV038/allowed.yaml new file mode 100644 index 000000000000..ccdac794b92d --- /dev/null +++ b/test/testdata/kubernetes/KSV038/allowed.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: NetworkPolicy +metadata: + name: hello-cpu-limit +spec: + podSelector: + matchLabels: + role: db diff --git a/test/testdata/kubernetes/KSV038/denied.yaml b/test/testdata/kubernetes/KSV038/denied.yaml new file mode 100644 index 000000000000..ed554daccad6 --- /dev/null +++ b/test/testdata/kubernetes/KSV038/denied.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: NetworkPolicy +metadata: + name: hello-cpu-limit +spec: + something: true diff --git a/test/testdata/kubernetes/KSV102/allowed.yaml b/test/testdata/kubernetes/KSV102/allowed.yaml new file mode 100644 index 000000000000..3b6b9f49d5ba --- /dev/null +++ b/test/testdata/kubernetes/KSV102/allowed.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: Onga +spec: + template: + spec: + containers: + - name: carts-db + image: mongo + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: true + initContainers: + - name: init-svc + image: busybox:1.28 + securityContext: + allowPrivilegeEscalation: false + metadata: + name: None + labels: + app: example + tier: backend diff --git a/test/testdata/kubernetes/KSV102/denied.yaml b/test/testdata/kubernetes/KSV102/denied.yaml new file mode 100644 index 000000000000..c760bc6880a2 --- /dev/null +++ b/test/testdata/kubernetes/KSV102/denied.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongo-deployment +spec: + template: + spec: + containers: + - name: carts-db + image: tiller + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: true + initContainers: + - name: init-svc + image: busybox:1.28 + securityContext: + allowPrivilegeEscalation: false diff --git a/test/testdata/kubernetes/optional/KSV004/allowed.yaml b/test/testdata/kubernetes/optional/KSV004/allowed.yaml new file mode 100644 index 000000000000..3622b1bfbcd2 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV004/allowed.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + capabilities: + drop: + - all diff --git a/test/testdata/kubernetes/optional/KSV004/denied.yaml b/test/testdata/kubernetes/optional/KSV004/denied.yaml new file mode 100644 index 000000000000..dc02a2664512 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV004/denied.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - command: ["sh", "-c", "echo 'Hello' && sleep 1h"] + image: busybox + name: hello + securityContext: + capabilities: diff --git a/test/testdata/kubernetes/optional/KSV007/allowed.yaml b/test/testdata/kubernetes/optional/KSV007/allowed.yaml new file mode 100644 index 000000000000..86b256077c21 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV007/allowed.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: diff --git a/test/testdata/kubernetes/optional/KSV007/denied.yaml b/test/testdata/kubernetes/optional/KSV007/denied.yaml new file mode 100644 index 000000000000..a9480234d151 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV007/denied.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + hostAliases: + - ip: "127.0.0.1" + hostnames: + - "foo.local" + - "bar.local" diff --git a/test/testdata/kubernetes/optional/KSV032/allowed.yaml b/test/testdata/kubernetes/optional/KSV032/allowed.yaml new file mode 100644 index 000000000000..5809dcb0d328 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV032/allowed.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - name: hello + image: azurecr.io/something diff --git a/test/testdata/kubernetes/optional/KSV032/denied.yaml b/test/testdata/kubernetes/optional/KSV032/denied.yaml new file mode 100644 index 000000000000..0d9857cad0a5 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV032/denied.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - name: hello + image: blah/something diff --git a/test/testdata/kubernetes/optional/KSV033/allowed.yaml b/test/testdata/kubernetes/optional/KSV033/allowed.yaml new file mode 100644 index 000000000000..4c8bfa5783aa --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV033/allowed.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - name: hello + image: gcr.io/something diff --git a/test/testdata/kubernetes/optional/KSV033/denied.yaml b/test/testdata/kubernetes/optional/KSV033/denied.yaml new file mode 100644 index 000000000000..0d9857cad0a5 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV033/denied.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - name: hello + image: blah/something diff --git a/test/testdata/kubernetes/optional/KSV034/allowed.yaml b/test/testdata/kubernetes/optional/KSV034/allowed.yaml new file mode 100644 index 000000000000..5809dcb0d328 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV034/allowed.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - name: hello + image: azurecr.io/something diff --git a/test/testdata/kubernetes/optional/KSV034/denied.yaml b/test/testdata/kubernetes/optional/KSV034/denied.yaml new file mode 100644 index 000000000000..b7f7eef038fd --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV034/denied.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - name: hello + image: ghcr.io/something diff --git a/test/testdata/kubernetes/optional/KSV035/allowed.yaml b/test/testdata/kubernetes/optional/KSV035/allowed.yaml new file mode 100644 index 000000000000..feaa3199c175 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV035/allowed.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - name: hello + image: ecr.us-east-2.amazonaws.com/something diff --git a/test/testdata/kubernetes/optional/KSV035/denied.yaml b/test/testdata/kubernetes/optional/KSV035/denied.yaml new file mode 100644 index 000000000000..0d9857cad0a5 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV035/denied.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: hello-cpu-limit +spec: + containers: + - name: hello + image: blah/something diff --git a/test/testdata/kubernetes/optional/KSV039/allowed.yaml b/test/testdata/kubernetes/optional/KSV039/allowed.yaml new file mode 100644 index 000000000000..7844f5db6d64 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV039/allowed.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: v1 +kind: LimitRange +metadata: + name: core-resource-limits +spec: + limits: + - type: Pod + default: + cpu: '2' + memory: 1Gi + defaultRequest: + cpu: '2' + memory: 1Gi + max: + cpu: '2' + memory: 1Gi + min: + cpu: 200m + memory: 6Mi + - type: Container + max: + cpu: '2' + memory: 1Gi + min: + cpu: 100m + memory: 4Mi + default: + cpu: 300m + memory: 200Mi + defaultRequest: + cpu: 200m + memory: 100Mi + maxLimitRequestRatio: + cpu: '10' \ No newline at end of file diff --git a/test/testdata/kubernetes/optional/KSV039/denied.yaml b/test/testdata/kubernetes/optional/KSV039/denied.yaml new file mode 100644 index 000000000000..b53d29718168 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV039/denied.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: cpu-limit-range +spec: + limits: + - default: + cpu: 1 + defaultRequest: + cpu: 0.5 + type: Container diff --git a/test/testdata/kubernetes/optional/KSV040/allowed.yaml b/test/testdata/kubernetes/optional/KSV040/allowed.yaml new file mode 100644 index 000000000000..cf9bbf2d7870 --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV040/allowed.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ResourceQuota +metadata: + name: mem-cpu-demo +spec: + hard: + requests.cpu: '1' + requests.memory: 1Gi + limits.cpu: '2' + limits.memory: 2Gi diff --git a/test/testdata/kubernetes/optional/KSV040/denied.yaml b/test/testdata/kubernetes/optional/KSV040/denied.yaml new file mode 100644 index 000000000000..b73d6e67d2ee --- /dev/null +++ b/test/testdata/kubernetes/optional/KSV040/denied.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: ResourceQuota +metadata: + name: mem-cpu-demo +spec: + hard: + requests.cpu: '1' + requests.memory: 1Gi + limits.cpu: '2' diff --git a/test/testutil/util.go b/test/testutil/util.go new file mode 100644 index 000000000000..d6f5e64a42d7 --- /dev/null +++ b/test/testutil/util.go @@ -0,0 +1,113 @@ +package testutil + +import ( + "encoding/json" + "io/fs" + "path/filepath" + "strings" + "testing" + + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/liamg/memoryfs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func AssertRuleFound(t *testing.T, ruleID string, results scan.Results, message string, args ...interface{}) { + found := ruleIDInResults(ruleID, results.GetFailed()) + assert.True(t, found, append([]interface{}{message}, args...)...) + for _, result := range results.GetFailed() { + if result.Rule().LongID() == ruleID { + m := result.Metadata() + meta := &m + for meta != nil { + assert.NotNil(t, meta.Range(), 0) + assert.Greater(t, meta.Range().GetStartLine(), 0) + assert.Greater(t, meta.Range().GetEndLine(), 0) + meta = meta.Parent() + } + } + } +} + +func AssertRuleNotFound(t *testing.T, ruleID string, results scan.Results, message string, args ...interface{}) { + found := ruleIDInResults(ruleID, results.GetFailed()) + assert.False(t, found, append([]interface{}{message}, args...)...) +} + +func ruleIDInResults(ruleID string, results scan.Results) bool { + for _, res := range results { + if res.Rule().LongID() == ruleID { + return true + } + } + return false +} + +func CreateFS(t *testing.T, files map[string]string) fs.FS { + memfs := memoryfs.New() + for name, content := range files { + name := strings.TrimPrefix(name, "/") + err := memfs.MkdirAll(filepath.Dir(name), 0o700) + require.NoError(t, err) + err = memfs.WriteFile(name, []byte(content), 0o644) + require.NoError(t, err) + } + return memfs +} + +func AssertDefsecEqual(t *testing.T, expected interface{}, actual interface{}) { + expectedJson, err := json.MarshalIndent(expected, "", "\t") + require.NoError(t, err) + actualJson, err := json.MarshalIndent(actual, "", "\t") + require.NoError(t, err) + + if expectedJson[0] == '[' { + var expectedSlice []map[string]interface{} + require.NoError(t, json.Unmarshal(expectedJson, &expectedSlice)) + var actualSlice []map[string]interface{} + require.NoError(t, json.Unmarshal(actualJson, &actualSlice)) + expectedSlice = purgeMetadataSlice(expectedSlice) + actualSlice = purgeMetadataSlice(actualSlice) + assert.Equal(t, expectedSlice, actualSlice, "defsec adapted and expected values do not match") + } else { + var expectedMap map[string]interface{} + require.NoError(t, json.Unmarshal(expectedJson, &expectedMap)) + var actualMap map[string]interface{} + require.NoError(t, json.Unmarshal(actualJson, &actualMap)) + expectedMap = purgeMetadata(expectedMap) + actualMap = purgeMetadata(actualMap) + assert.Equal(t, expectedMap, actualMap, "defsec adapted and expected values do not match") + } +} + +func purgeMetadata(input map[string]interface{}) map[string]interface{} { + for k, v := range input { + if k == "metadata" || k == "Metadata" { + delete(input, k) + continue + } + if v, ok := v.(map[string]interface{}); ok { + input[k] = purgeMetadata(v) + } + if v, ok := v.([]interface{}); ok { + if len(v) > 0 { + if _, ok := v[0].(map[string]interface{}); ok { + maps := make([]map[string]interface{}, len(v)) + for i := range v { + maps[i] = v[i].(map[string]interface{}) + } + input[k] = purgeMetadataSlice(maps) + } + } + } + } + return input +} + +func purgeMetadataSlice(input []map[string]interface{}) []map[string]interface{} { + for i := range input { + input[i] = purgeMetadata(input[i]) + } + return input +} diff --git a/test/tf/fail/main.tf b/test/tf/fail/main.tf new file mode 100644 index 000000000000..afa28405f67c --- /dev/null +++ b/test/tf/fail/main.tf @@ -0,0 +1,3 @@ +resource "aws_s3_bucket" "bad" { + +} diff --git a/test/wildcard_test.go b/test/wildcard_test.go new file mode 100644 index 000000000000..727745851571 --- /dev/null +++ b/test/wildcard_test.go @@ -0,0 +1,83 @@ +package test + +import ( + "fmt" + "testing" + + "github.com/aquasecurity/trivy/pkg/scan" + "github.com/aquasecurity/trivy/pkg/severity" + "github.com/aquasecurity/trivy/pkg/terraform" + "github.com/aquasecurity/trivy/test/testutil" +) + +func Test_WildcardMatchingOnRequiredLabels(t *testing.T) { + + tests := []struct { + input string + pattern string + expectedFailure bool + }{ + { + pattern: "aws_*", + input: `resource "aws_instance" "blah" {}`, + expectedFailure: true, + }, + { + pattern: "gcp_*", + input: `resource "aws_instance" "blah" {}`, + expectedFailure: false, + }, + { + pattern: "x_aws_*", + input: `resource "aws_instance" "blah" {}`, + expectedFailure: false, + }, + { + pattern: "aws_security_group*", + input: `resource "aws_security_group" "blah" {}`, + expectedFailure: true, + }, + { + pattern: "aws_security_group*", + input: `resource "aws_security_group_rule" "blah" {}`, + expectedFailure: true, + }, + } + + for i, test := range tests { + + code := fmt.Sprintf("wild%d", i) + + t.Run(code, func(t *testing.T) { + + rule := scan.Rule{ + Service: "service", + ShortCode: code, + Summary: "blah", + Provider: "custom", + Severity: severity.High, + CustomChecks: scan.CustomChecks{ + Terraform: &scan.TerraformCustomCheck{ + RequiredTypes: []string{"resource"}, + RequiredLabels: []string{test.pattern}, + Check: func(resourceBlock *terraform.Block, _ *terraform.Module) (results scan.Results) { + results.Add("Custom check failed for resource.", resourceBlock) + return + }, + }, + }, + } + reg := rules.Register(rule) + defer rules.Deregister(reg) + + results := scanHCL(t, test.input) + + if test.expectedFailure { + testutil.AssertRuleFound(t, fmt.Sprintf("custom-service-%s", code), results, "") + } else { + testutil.AssertRuleNotFound(t, fmt.Sprintf("custom-service-%s", code), results, "") + } + }) + } + +}