diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 57d94c4175e3e..10f47dd90f059 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -305,6 +305,7 @@ variables: MACOS_GITHUB_APP_1: macos-github-app-one # agent-devx-infra MACOS_GITHUB_APP_2: macos-github-app-two # agent-devx-infra SLACK_AGENT: slack-agent-ci # agent-devx-infra + SMP_ACCOUNT: smp # single-machine-performance # End vault variables DD_PKG_VERSION: "latest" diff --git a/.gitlab/common/macos.yml b/.gitlab/common/macos.yml index bae0768e69706..452ce75fe067c 100644 --- a/.gitlab/common/macos.yml +++ b/.gitlab/common/macos.yml @@ -19,14 +19,19 @@ .select_python_env_commands: # Select the virtualenv using the current Python version. Create it if it doesn't exist. + - PYTHON_VERSION=$(python3 --version | awk '{print $2}') + - VENV_NAME="datadog-agent-python-$PYTHON_VERSION" + - VENV_PATH="$(pyenv root)/versions/$VENV_NAME" + - echo "Using Python $PYTHON_VERSION..." - | - PYTHON_VERSION=$(python3 --version | awk '{print $2}') - VENV_NAME="datadog-agent-python-$PYTHON_VERSION" - echo "Using Python $PYTHON_VERSION..." - if ! pyenv virtualenvs --bare | grep -q "${VENV_NAME}$"; then - pyenv virtualenv $PYTHON_VERSION $VENV_NAME + # Check if the virtual environment directory exists + if [ ! -d "$VENV_PATH" ]; then + echo "Creating virtual environment '$VENV_NAME'..." + pyenv virtualenv "$PYTHON_VERSION" "$VENV_NAME" + else + echo "Virtual environment '$VENV_NAME' already exists. Skipping creation." fi - pyenv activate $VENV_NAME + - pyenv activate $VENV_NAME .macos_gitlab: before_script: diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index 599082bebda50..4009cf1119e94 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -20,7 +20,7 @@ single-machine-performance-regression_detector: - outputs/junit.xml # for debugging, also on S3 when: always variables: - SMP_VERSION: 0.16.0 + SMP_VERSION: 0.18.1 # At present we require two artifacts to exist for the 'baseline' and the # 'comparison'. We are guaranteed by the structure of the pipeline that # 'comparison' exists, not so much with 'baseline' as it has to come from main @@ -35,7 +35,6 @@ single-machine-performance-regression_detector: script: # Ensure output files exist for artifact downloads step - mkdir outputs # Also needed for smp job sync step - - touch outputs/report.md # Will be emitted by smp job sync # Compute merge base of current commit and `main` - git fetch origin - SMP_BASE_BRANCH=$(inv release.get-release-json-value base_branch) @@ -44,12 +43,12 @@ single-machine-performance-regression_detector: - echo "Merge base is ${SMP_MERGE_BASE}" # Setup AWS credentials for single-machine-performance AWS account - AWS_NAMED_PROFILE="single-machine-performance" - - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT_ID) || exit $? + - SMP_ACCOUNT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT account_id) || exit $? - SMP_ECR_URL=${SMP_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com - - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_AGENT_TEAM_ID) || exit $? - - SMP_API=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_API) || exit $? - - SMP_BOT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY_ID) || exit $? - - SMP_BOT_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_BOT_ACCESS_KEY) || exit $? + - SMP_AGENT_TEAM_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT agent_team_id) || exit $? + - SMP_API=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT api_url) || exit $? + - SMP_BOT_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT bot_login) || exit $? + - SMP_BOT_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $SMP_ACCOUNT bot_token) || exit $? - aws configure set aws_access_key_id "$SMP_BOT_ID" --profile ${AWS_NAMED_PROFILE} - aws configure set aws_secret_access_key "$SMP_BOT_KEY" --profile ${AWS_NAMED_PROFILE} - aws configure set region us-west-2 --profile ${AWS_NAMED_PROFILE} @@ -158,6 +157,14 @@ single-machine-performance-regression_detector-pr-comment: FF_KUBERNETES_HONOR_ENTRYPOINT: false allow_failure: true # allow_failure here should have same setting as in job above script: # ignore error message about no PR, because it happens for dev branches without PRs + # Prevent posting empty Regression Detector report if Markdown report is not found or + # has zero size. + - | + if [[ ! -s "outputs/report.md" ]] + then + echo "ERROR: Regression Detector report not found -- no PR comment posted" + exit 1 + fi # We need to transform the Markdown report into a valid JSON string (without # quotes) in order to pass a well-formed payload to the PR commenting # service. Note that on macOS, the "-z" flag is invalid for `sed` (but diff --git a/.gitlab/package_build/heroku.yml b/.gitlab/package_build/heroku.yml index 8b7549cd7a8e3..1231ac0eed8d4 100644 --- a/.gitlab/package_build/heroku.yml +++ b/.gitlab/package_build/heroku.yml @@ -51,7 +51,6 @@ agent_heroku_deb-x64-a7: extends: .heroku_build_base variables: - DESTINATION_DEB: "datadog-heroku-agent_7_amd64.deb" DESTINATION_DBG_DEB: "datadog-heroku-agent-dbg_7_amd64.deb" RELEASE_VERSION: $RELEASE_VERSION_7 AGENT_MAJOR_VERSION: 7 diff --git a/.gitlab/packaging/deb.yml b/.gitlab/packaging/deb.yml index 7b99527a6142a..55f7026d8df79 100644 --- a/.gitlab/packaging/deb.yml +++ b/.gitlab/packaging/deb.yml @@ -46,7 +46,6 @@ agent_deb-x64-a7: - when: on_success needs: ["datadog-agent-7-x64"] variables: - DESTINATION_DEB: "datadog-agent_7_amd64.deb" DD_PROJECT: "agent" agent_deb-arm64-a7: @@ -56,7 +55,6 @@ agent_deb-arm64-a7: - when: on_success needs: ["datadog-agent-7-arm64"] variables: - DESTINATION_DEB: "datadog-agent_7_arm64.deb" DD_PROJECT: "agent" .package_ot_deb_common: @@ -75,7 +73,6 @@ ot_agent_deb-x64-a7: - when: on_success needs: ["datadog-ot-agent-7-x64"] variables: - DESTINATION_DEB: "datadog-ot-agent_7_amd64.deb" DD_PROJECT: "agent" ot_agent_deb-arm64-a7: @@ -85,7 +82,6 @@ ot_agent_deb-arm64-a7: - when: on_success needs: ["datadog-ot-agent-7-arm64"] variables: - DESTINATION_DEB: "datadog-ot-agent_7_arm64.deb" DD_PROJECT: "agent" installer_deb-amd64: @@ -95,7 +91,6 @@ installer_deb-amd64: - when: on_success needs: ["installer-amd64"] variables: - DESTINATION_DEB: "datadog-installer_7_amd64.deb" DD_PROJECT: "installer" # There are currently no files to check for in the installer so we # explicitly disable the check @@ -108,7 +103,6 @@ installer_deb-arm64: - when: on_success needs: ["installer-arm64"] variables: - DESTINATION_DEB: "datadog-installer_7_arm64.deb" DD_PROJECT: "installer" PACKAGE_REQUIRED_FILES_LIST: "" @@ -140,14 +134,10 @@ installer_deb-arm64: iot_agent_deb-x64: extends: [.package_iot_deb_common, .package_deb_x86] needs: ["iot-agent-x64"] - variables: - DESTINATION_DEB: "datadog-iot-agent_7_amd64.deb" iot_agent_deb-arm64: extends: [.package_iot_deb_common, .package_deb_arm64] needs: ["iot-agent-arm64"] - variables: - DESTINATION_DEB: "datadog-iot-agent_7_arm64.deb" iot_agent_deb-armhf: extends: .package_iot_deb_common @@ -156,7 +146,6 @@ iot_agent_deb-armhf: needs: ["iot-agent-armhf"] variables: PACKAGE_ARCH: armhf - DESTINATION_DEB: "datadog-iot-agent_7_armhf.deb" DD_PKG_ARCH: "arm64" FORCED_PACKAGE_COMPRESSION_LEVEL: 5 @@ -168,7 +157,6 @@ dogstatsd_deb-x64: needs: ["dogstatsd-x64"] variables: DD_PROJECT: dogstatsd - DESTINATION_DEB: "datadog-dogstatsd_amd64.deb" PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-deb.txt" dogstatsd_deb-arm64: @@ -179,6 +167,5 @@ dogstatsd_deb-arm64: needs: ["dogstatsd-arm64"] variables: DD_PROJECT: dogstatsd - DESTINATION_DEB: "datadog-dogstatsd_arm64.deb" PACKAGE_REQUIRED_FILES_LIST: "test/required_files/dogstatsd-deb.txt" diff --git a/cmd/agent/subcommands/snmp/command.go b/cmd/agent/subcommands/snmp/command.go index 9ca08528338af..b8c86d15c4f11 100644 --- a/cmd/agent/subcommands/snmp/command.go +++ b/cmd/agent/subcommands/snmp/command.go @@ -135,7 +135,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Supply(core.BundleParams{ ConfigParams: config.NewAgentParams(globalParams.ConfFilePath, config.WithExtraConfFiles(globalParams.ExtraConfFilePath), config.WithFleetPoliciesDirPath(globalParams.FleetPoliciesDirPath)), SecretParams: secrets.NewEnabledParams(), - LogParams: log.ForOneShot(command.LoggerName, "info", true)}), + LogParams: log.ForOneShot(command.LoggerName, "off", true)}), core.Bundle(), snmpscanfx.Module(), demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams()), diff --git a/cmd/installer/subcommands/installer/command.go b/cmd/installer/subcommands/installer/command.go index bb380cbfd79ab..af06d875537df 100644 --- a/cmd/installer/subcommands/installer/command.go +++ b/cmd/installer/subcommands/installer/command.go @@ -9,6 +9,7 @@ package installer import ( "context" "fmt" + "net/url" "os" "runtime" "strings" @@ -46,6 +47,10 @@ const ( envAgentMajorVersion = "DD_AGENT_MAJOR_VERSION" envAgentMinorVersion = "DD_AGENT_MINOR_VERSION" envAgentDistChannel = "DD_AGENT_DIST_CHANNEL" + envRemoteUpdates = "DD_REMOTE_UPDATES" + envHTTPProxy = "HTTP_PROXY" + envHTTPSProxy = "HTTPS_PROXY" + envNoProxy = "NO_PROXY" ) // BootstrapCommand returns the bootstrap command. @@ -161,11 +166,27 @@ func newBootstrapperCmd(operation string) *bootstrapperCmd { cmd.span.SetTag("env.DD_RPM_REPO_GPGCHECK", os.Getenv(envRPMRepoGPGCheck)) cmd.span.SetTag("env.DD_AGENT_MAJOR_VERSION", os.Getenv(envAgentMajorVersion)) cmd.span.SetTag("env.DD_AGENT_MINOR_VERSION", os.Getenv(envAgentMinorVersion)) + cmd.span.SetTag("env.DD_AGENT_DIST_CHANNEL", os.Getenv(envAgentDistChannel)) + cmd.span.SetTag("env.DD_REMOTE_UPDATES", os.Getenv(envRemoteUpdates)) + cmd.span.SetTag("env.HTTP_PROXY", redactURL(os.Getenv(envHTTPProxy))) + cmd.span.SetTag("env.HTTPS_PROXY", redactURL(os.Getenv(envHTTPSProxy))) + cmd.span.SetTag("env.NO_PROXY", os.Getenv(envNoProxy)) return &bootstrapperCmd{ cmd: cmd, } } +func redactURL(u string) string { + if u == "" { + return "" + } + url, err := url.Parse(u) + if err != nil { + return "invalid" + } + return url.Redacted() +} + type telemetryConfigFields struct { APIKey string `yaml:"api_key"` Site string `yaml:"site"` diff --git a/cmd/serverless-init/main.go b/cmd/serverless-init/main.go index 8540768bb1ca7..f1ae1daff5438 100644 --- a/cmd/serverless-init/main.go +++ b/cmd/serverless-init/main.go @@ -177,6 +177,7 @@ func setupTraceAgent(tags map[string]string, tagger tagger.Component) trace.Serv ColdStartSpanID: random.Random.Uint64(), AzureContainerAppTags: azureTags.String(), }) + traceAgent.SetTags(tags) go func() { for range time.Tick(3 * time.Second) { traceAgent.Flush() diff --git a/cmd/system-probe/modules/gpu.go b/cmd/system-probe/modules/gpu.go index 1b68038a4edf7..3f1a5f35b16bb 100644 --- a/cmd/system-probe/modules/gpu.go +++ b/cmd/system-probe/modules/gpu.go @@ -33,7 +33,7 @@ var GPUMonitoring = module.Factory{ ConfigNamespaces: gpuMonitoringConfigNamespaces, Fn: func(_ *sysconfigtypes.Config, deps module.FactoryDependencies) (module.Module, error) { - c := gpuconfig.NewConfig() + c := gpuconfig.New() probeDeps := gpu.ProbeDependencies{ Telemetry: deps.Telemetry, //if the config parameter doesn't exist or is empty string, the default value is used as defined in go-nvml library diff --git a/cmd/trace-agent/test/testsuite/cards_test.go b/cmd/trace-agent/test/testsuite/cards_test.go index f82896e8abd59..af67a1815f985 100644 --- a/cmd/trace-agent/test/testsuite/cards_test.go +++ b/cmd/trace-agent/test/testsuite/cards_test.go @@ -86,6 +86,17 @@ apm_config: out: "?", version: "v0.7", }, + { + conf: []byte(` +apm_config: + env: my-env + obfuscation: + credit_cards: + enabled: false + keep_values: ["credit_card_number"]`), + out: "4166 6766 6766 6746", + version: "v0.5", + }, } { t.Run(string(tt.version)+"/"+tt.out, func(t *testing.T) { if err := r.RunAgent(tt.conf); err != nil { diff --git a/docs/cloud-workload-security/linux_expressions.md b/docs/cloud-workload-security/linux_expressions.md index 437198fc2aaf0..fbc72d894b298 100644 --- a/docs/cloud-workload-security/linux_expressions.md +++ b/docs/cloud-workload-security/linux_expressions.md @@ -386,6 +386,7 @@ A bind was executed | -------- | ------------- | | [`bind.addr.family`](#bind-addr-family-doc) | Address family | | [`bind.addr.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`bind.addr.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`bind.addr.port`](#common-ipportcontext-port-doc) | Port number | | [`bind.retval`](#common-syscallevent-retval-doc) | Return value of the syscall | @@ -520,10 +521,12 @@ A connect was executed | -------- | ------------- | | [`connect.addr.family`](#connect-addr-family-doc) | Address family | | [`connect.addr.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`connect.addr.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`connect.addr.port`](#common-ipportcontext-port-doc) | Port number | | [`connect.retval`](#common-syscallevent-retval-doc) | Return value of the syscall | | [`connect.server.addr.family`](#connect-server-addr-family-doc) | Server address family | | [`connect.server.addr.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`connect.server.addr.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`connect.server.addr.port`](#common-ipportcontext-port-doc) | Port number | ### Event `dns` @@ -540,12 +543,14 @@ A DNS request was sent | [`dns.question.name.length`](#common-string-length-doc) | Length of the corresponding element | | [`dns.question.type`](#dns-question-type-doc) | a two octet code which specifies the DNS question type | | [`network.destination.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`network.destination.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`network.destination.port`](#common-ipportcontext-port-doc) | Port number | | [`network.device.ifname`](#common-networkdevicecontext-ifname-doc) | Interface ifname | | [`network.l3_protocol`](#common-networkcontext-l3_protocol-doc) | L3 protocol of the network packet | | [`network.l4_protocol`](#common-networkcontext-l4_protocol-doc) | L4 protocol of the network packet | | [`network.size`](#common-networkcontext-size-doc) | Size in bytes of the network packet | | [`network.source.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`network.source.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`network.source.port`](#common-ipportcontext-port-doc) | Port number | ### Event `exec` @@ -742,12 +747,14 @@ An IMDS event was captured | [`imds.url`](#imds-url-doc) | the queried IMDS URL | | [`imds.user_agent`](#imds-user_agent-doc) | the user agent of the HTTP client | | [`network.destination.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`network.destination.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`network.destination.port`](#common-ipportcontext-port-doc) | Port number | | [`network.device.ifname`](#common-networkdevicecontext-ifname-doc) | Interface ifname | | [`network.l3_protocol`](#common-networkcontext-l3_protocol-doc) | L3 protocol of the network packet | | [`network.l4_protocol`](#common-networkcontext-l4_protocol-doc) | L4 protocol of the network packet | | [`network.size`](#common-networkcontext-size-doc) | Size in bytes of the network packet | | [`network.source.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`network.source.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`network.source.port`](#common-ipportcontext-port-doc) | Port number | ### Event `link` @@ -960,6 +967,7 @@ A raw network packet captured | Property | Definition | | -------- | ------------- | | [`packet.destination.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`packet.destination.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`packet.destination.port`](#common-ipportcontext-port-doc) | Port number | | [`packet.device.ifname`](#common-networkdevicecontext-ifname-doc) | Interface ifname | | [`packet.filter`](#packet-filter-doc) | pcap filter expression | @@ -967,6 +975,7 @@ A raw network packet captured | [`packet.l4_protocol`](#common-networkcontext-l4_protocol-doc) | L4 protocol of the network packet | | [`packet.size`](#common-networkcontext-size-doc) | Size in bytes of the network packet | | [`packet.source.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`packet.source.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | | [`packet.source.port`](#common-ipportcontext-port-doc) | Port number | | [`packet.tls.version`](#packet-tls-version-doc) | TLS version | @@ -2157,6 +2166,15 @@ Definition: Indicates whether the process is a kworker `exec` `exit` `process` `process.ancestors` `process.parent` `ptrace.tracee` `ptrace.tracee.ancestors` `ptrace.tracee.parent` `signal.target` `signal.target.ancestors` `signal.target.parent` +### `*.is_public` {#common-ipportcontext-is_public-doc} +Type: bool + +Definition: Whether the IP address belongs to a public network + +`*.is_public` has 7 possible prefixes: +`bind.addr` `connect.addr` `connect.server.addr` `network.destination` `network.source` `packet.destination` `packet.source` + + ### `*.is_thread` {#common-process-is_thread-doc} Type: bool diff --git a/docs/cloud-workload-security/secl_linux.json b/docs/cloud-workload-security/secl_linux.json index 034d1b5c4f8ec..c6deb90ff4e13 100644 --- a/docs/cloud-workload-security/secl_linux.json +++ b/docs/cloud-workload-security/secl_linux.json @@ -1316,6 +1316,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "bind.addr.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "bind.addr.port", "definition": "Port number", @@ -1820,6 +1825,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "connect.addr.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "connect.addr.port", "definition": "Port number", @@ -1840,6 +1850,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "connect.server.addr.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "connect.server.addr.port", "definition": "Port number", @@ -1894,6 +1909,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "network.destination.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "network.destination.port", "definition": "Port number", @@ -1924,6 +1944,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "network.source.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "network.source.port", "definition": "Port number", @@ -2826,6 +2851,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "network.destination.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "network.destination.port", "definition": "Port number", @@ -2856,6 +2886,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "network.source.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "network.source.port", "definition": "Port number", @@ -3698,6 +3733,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "packet.destination.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "packet.destination.port", "definition": "Port number", @@ -3733,6 +3773,11 @@ "definition": "IP address", "property_doc_link": "common-ipportcontext-ip-doc" }, + { + "name": "packet.source.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, { "name": "packet.source.port", "definition": "Port number", @@ -8379,6 +8424,24 @@ "constants_link": "", "examples": [] }, + { + "name": "*.is_public", + "link": "common-ipportcontext-is_public-doc", + "type": "bool", + "definition": "Whether the IP address belongs to a public network", + "prefixes": [ + "bind.addr", + "connect.addr", + "connect.server.addr", + "network.destination", + "network.source", + "packet.destination", + "packet.source" + ], + "constants": "", + "constants_link": "", + "examples": [] + }, { "name": "*.is_thread", "link": "common-process-is_thread-doc", diff --git a/pkg/config/setup/apm.go b/pkg/config/setup/apm.go index 370bdcae39ff9..225aa0607b7a7 100644 --- a/pkg/config/setup/apm.go +++ b/pkg/config/setup/apm.go @@ -152,6 +152,7 @@ func setupAPM(config pkgconfigmodel.Setup) { config.BindEnv("apm_config.install_time", "DD_INSTRUMENTATION_INSTALL_TIME") config.BindEnv("apm_config.obfuscation.credit_cards.enabled", "DD_APM_OBFUSCATION_CREDIT_CARDS_ENABLED") config.BindEnv("apm_config.obfuscation.credit_cards.luhn", "DD_APM_OBFUSCATION_CREDIT_CARDS_LUHN") + config.BindEnv("apm_config.obfuscation.credit_cards.keep_values", "DD_APM_OBFUSCATION_CREDIT_CARDS_KEEP_VALUES") config.BindEnvAndSetDefault("apm_config.debug.port", 5012, "DD_APM_DEBUG_PORT") config.BindEnv("apm_config.features", "DD_APM_FEATURES") config.ParseEnvAsStringSlice("apm_config.features", func(s string) []string { @@ -183,7 +184,7 @@ func setupAPM(config pkgconfigmodel.Setup) { config.ParseEnvAsStringSlice("apm_config.filter_tags.reject", parseKVList("apm_config.filter_tags.reject")) config.ParseEnvAsStringSlice("apm_config.filter_tags_regex.require", parseKVList("apm_config.filter_tags_regex.require")) config.ParseEnvAsStringSlice("apm_config.filter_tags_regex.reject", parseKVList("apm_config.filter_tags_regex.reject")) - + config.ParseEnvAsStringSlice("apm_config.obfuscation.credit_cards.keep_values", parseKVList("apm_config.obfuscation.credit_cards.keep_values")) config.ParseEnvAsSliceMapString("apm_config.replace_tags", func(in string) []map[string]string { var out []map[string]string if err := json.Unmarshal([]byte(in), &out); err != nil { diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index 9414bcda99cf1..7d1c6f69dcfc1 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -378,6 +378,8 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.enabled"), true) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.ingress.enabled"), false) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.raw_packet.enabled"), false) + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.private_ip_ranges"), DefaultPrivateIPCIDRs) + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.extra_private_ip_ranges"), []string{}) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "events_stats.polling_interval"), 20) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "syscalls_monitor.enabled"), false) cfg.BindEnvAndSetDefault(join(evNS, "socket"), defaultEventMonitorAddress) @@ -450,3 +452,26 @@ func eventMonitorBindEnv(config pkgconfigmodel.Config, key string) { config.BindEnv(key, emConfigKey, runtimeSecKey) } + +// DefaultPrivateIPCIDRs is a list of private IP CIDRs that are used to determine if an IP is private or not. +var DefaultPrivateIPCIDRs = []string{ + // IETF RPC 1918 + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + // IETF RFC 5735 + "0.0.0.0/8", + "127.0.0.0/8", + "169.254.0.0/16", + "192.0.0.0/24", + "192.0.2.0/24", + "198.18.0.0/15", + "198.51.100.0/24", + "203.0.113.0/24", + "224.0.0.0/4", + "240.0.0.0/4", + // IETF RFC 6598 + "100.64.0.0/10", + // IETF RFC 4193 + "fc00::/7", +} diff --git a/pkg/flare/envvars.go b/pkg/flare/envvars.go index 7f53fcbc2ceef..2d232cdbe5728 100644 --- a/pkg/flare/envvars.go +++ b/pkg/flare/envvars.go @@ -88,6 +88,7 @@ var allowedEnvvarNames = []string{ "DD_APM_SYMDB_DD_URL", "DD_APM_OBFUSCATION_CREDIT_CARDS_ENABLED", "DD_APM_OBFUSCATION_CREDIT_CARDS_LUHN", + "DD_APM_OBFUSCATION_CREDIT_CARDS_KEEP_VALUES", "DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED", "DD_APM_OBFUSCATION_ELASTICSEARCH_KEEP_VALUES", "DD_APM_OBFUSCATION_ELASTICSEARCH_OBFUSCATE_SQL_VALUES", diff --git a/pkg/fleet/installer/installer.go b/pkg/fleet/installer/installer.go index 934ea1b2a877e..aa3121dc18b49 100644 --- a/pkg/fleet/installer/installer.go +++ b/pkg/fleet/installer/installer.go @@ -605,7 +605,7 @@ func (i *installerImpl) configurePackage(ctx context.Context, pkg string) (err e defer func() { span.Finish(tracer.WithError(err)) }() switch pkg { - case packageDatadogAgent: + case packageDatadogAgent, packageAPMInjector: config, err := i.cdn.Get(ctx, pkg) if err != nil { return fmt.Errorf("could not get %s CDN config: %w", pkg, err) diff --git a/pkg/fleet/internal/cdn/cdn_http.go b/pkg/fleet/internal/cdn/cdn_http.go index 1bcc8124112f3..825b448ca531e 100644 --- a/pkg/fleet/internal/cdn/cdn_http.go +++ b/pkg/fleet/internal/cdn/cdn_http.go @@ -23,6 +23,7 @@ import ( type cdnHTTP struct { client *remoteconfig.HTTPClient currentRootsVersion uint64 + hostTagsGetter hostTagsGetter } func newCDNHTTP(env *env.Env, configDBPath string) (CDN, error) { @@ -38,6 +39,7 @@ func newCDNHTTP(env *env.Env, configDBPath string) (CDN, error) { return &cdnHTTP{ client: client, currentRootsVersion: 1, + hostTagsGetter: newHostTagsGetter(), }, nil } @@ -57,6 +59,15 @@ func (c *cdnHTTP) Get(ctx context.Context, pkg string) (cfg Config, err error) { if err != nil { return nil, err } + case "datadog-apm-inject": + orderConfig, layers, err := c.get(ctx) + if err != nil { + return nil, err + } + cfg, err = newAPMConfig(c.hostTagsGetter.get(), orderConfig, layers...) + if err != nil { + return nil, err + } default: return nil, ErrProductNotSupported } diff --git a/pkg/fleet/internal/cdn/cdn_local.go b/pkg/fleet/internal/cdn/cdn_local.go index b70e0c4499dd8..00b0b3e7b5a6e 100644 --- a/pkg/fleet/internal/cdn/cdn_local.go +++ b/pkg/fleet/internal/cdn/cdn_local.go @@ -65,6 +65,11 @@ func (c *cdnLocal) Get(_ context.Context, pkg string) (cfg Config, err error) { if err != nil { return nil, err } + case "datadog-apm-inject": + cfg, err = newAPMConfig([]string{}, orderConfig, layers...) + if err != nil { + return nil, err + } default: return nil, ErrProductNotSupported } diff --git a/pkg/fleet/internal/cdn/cdn_rc.go b/pkg/fleet/internal/cdn/cdn_rc.go index de3d21c1f11a7..4244fe8ba3168 100644 --- a/pkg/fleet/internal/cdn/cdn_rc.go +++ b/pkg/fleet/internal/cdn/cdn_rc.go @@ -31,6 +31,7 @@ type cdnRC struct { clientUUID string configDBPath string firstRequest bool + hostTagsGetter hostTagsGetter } // newCDNRC creates a new CDN with RC: it fetches the configuration from the remote config service instead of cloudfront @@ -84,6 +85,7 @@ func newCDNRC(env *env.Env, configDBPath string) (CDN, error) { clientUUID: uuid.New().String(), configDBPath: configDBPathTemp, firstRequest: true, + hostTagsGetter: ht, } service.Start() return cdn, nil @@ -104,6 +106,15 @@ func (c *cdnRC) Get(ctx context.Context, pkg string) (cfg Config, err error) { if err != nil { return nil, err } + case "datadog-apm-inject": + orderConfig, layers, err := c.get(ctx) + if err != nil { + return nil, err + } + cfg, err = newAPMConfig(c.hostTagsGetter.get(), orderConfig, layers...) + if err != nil { + return nil, err + } default: return nil, ErrProductNotSupported } diff --git a/pkg/fleet/internal/cdn/config_datadog_agent_test.go b/pkg/fleet/internal/cdn/config_datadog_agent_test.go index f24b95956886b..b931f710b0513 100644 --- a/pkg/fleet/internal/cdn/config_datadog_agent_test.go +++ b/pkg/fleet/internal/cdn/config_datadog_agent_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestConfig(t *testing.T) { +func TestAgentConfig(t *testing.T) { baseLayer := &agentConfigLayer{ ID: "base", AgentConfig: map[string]interface{}{ diff --git a/pkg/fleet/internal/cdn/config_datadog_apm.go b/pkg/fleet/internal/cdn/config_datadog_apm.go new file mode 100644 index 0000000000000..12add385bd377 --- /dev/null +++ b/pkg/fleet/internal/cdn/config_datadog_apm.go @@ -0,0 +1,112 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package cdn + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/vmihailenco/msgpack/v5" +) + +const ( + injectorConfigFilename = "injector.msgpack" +) + +// apmConfig represents the injector configuration from the CDN. +type apmConfig struct { + version string + injectorConfig []byte +} + +// apmConfigLayer is a config layer that can be merged with other layers into a config. +type apmConfigLayer struct { + ID string `json:"name"` + InjectorConfig map[string]interface{} `json:"apm_ssi_config"` +} + +// Version returns the version (hash) of the agent configuration. +func (i *apmConfig) Version() string { + return i.version +} + +func newAPMConfig(hostTags []string, configOrder *orderConfig, rawLayers ...[]byte) (*apmConfig, error) { + if configOrder == nil { + return nil, fmt.Errorf("order config is nil") + } + + // Unmarshal layers + layers := map[string]*apmConfigLayer{} + for _, rawLayer := range rawLayers { + layer := &apmConfigLayer{} + if err := json.Unmarshal(rawLayer, layer); err != nil { + log.Warnf("Failed to unmarshal layer: %v", err) + continue + } + + if layer.InjectorConfig != nil { + // Only add layers that have at least one config that matches + layers[layer.ID] = layer + } + } + + // Compile ordered layers into a single config + // TODO: maybe we don't want that and we should reject if there are more than one config? + compiledLayer := &apmConfigLayer{ + InjectorConfig: map[string]interface{}{}, + } + for i := len(configOrder.Order) - 1; i >= 0; i-- { + layerID := configOrder.Order[i] + layer, ok := layers[layerID] + if !ok { + continue + } + + if layer.InjectorConfig != nil { + agentConfig, err := merge(compiledLayer.InjectorConfig, layer.InjectorConfig) + if err != nil { + return nil, err + } + compiledLayer.InjectorConfig = agentConfig.(map[string]interface{}) + } + } + + hash := sha256.New() + version, err := json.Marshal(compiledLayer) + if err != nil { + return nil, err + } + hash.Write(version) + + // Add host tags AFTER compiling the version -- we don't want to trigger noop updates + compiledLayer.InjectorConfig["host_tags"] = hostTags + + // Marshal into msgpack configs + injectorConfig, err := msgpack.Marshal(compiledLayer.InjectorConfig) + if err != nil { + return nil, err + } + + return &apmConfig{ + version: fmt.Sprintf("%x", hash.Sum(nil)), + injectorConfig: injectorConfig, + }, nil +} + +// Write writes the agent configuration to the given directory. +func (i *apmConfig) Write(dir string) error { + if i.injectorConfig != nil { + err := os.WriteFile(filepath.Join(dir, injectorConfigFilename), []byte(i.injectorConfig), 0644) // Must be world readable + if err != nil { + return fmt.Errorf("could not write datadog.yaml: %w", err) + } + } + return nil +} diff --git a/pkg/fleet/internal/oci/download.go b/pkg/fleet/internal/oci/download.go index 40dd17b727a1f..7924395f76a46 100644 --- a/pkg/fleet/internal/oci/download.go +++ b/pkg/fleet/internal/oci/download.go @@ -73,7 +73,6 @@ var ( defaultRegistriesProd = []string{ "install.datadoghq.com", "gcr.io/datadoghq", - "public.ecr.aws/datadog", "docker.io/datadog", } ) @@ -210,7 +209,8 @@ func getRefAndKeychain(env *env.Env, url string) urlWithKeychain { } } ref := url - if registryOverride != "" { + // public.ecr.aws/datadog is ignored for now as there are issues with it + if registryOverride != "" && registryOverride != "public.ecr.aws/datadog" { if !strings.HasSuffix(registryOverride, "/") { registryOverride += "/" } diff --git a/pkg/fleet/internal/oci/download_test.go b/pkg/fleet/internal/oci/download_test.go index 55fcf673fb6e6..3a51426ced4c7 100644 --- a/pkg/fleet/internal/oci/download_test.go +++ b/pkg/fleet/internal/oci/download_test.go @@ -279,7 +279,6 @@ func TestGetRefAndKeychains(t *testing.T) { expectedRefAndKeychains: []urlWithKeychain{ {ref: "install.datadoghq.com/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, {ref: "gcr.io/datadoghq/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, - {ref: "public.ecr.aws/datadog/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, {ref: "docker.io/datadog/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, }, }, @@ -291,7 +290,6 @@ func TestGetRefAndKeychains(t *testing.T) { {ref: "mysuperregistry.tv/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, {ref: "install.datadoghq.com/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, {ref: "gcr.io/datadoghq/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, - {ref: "public.ecr.aws/datadog/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, {ref: "docker.io/datadog/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, }, }, @@ -305,7 +303,6 @@ func TestGetRefAndKeychains(t *testing.T) { {ref: "mysuperregistry.tv/agent-package@sha256:1234", keychain: google.Keychain}, {ref: "install.datadoghq.com/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, {ref: "gcr.io/datadoghq/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, - {ref: "public.ecr.aws/datadog/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, {ref: "docker.io/datadog/agent-package@sha256:1234", keychain: authn.DefaultKeychain}, }, }, diff --git a/pkg/gpu/config/config.go b/pkg/gpu/config/config.go index e9920e62a6e30..aa239a129d24e 100644 --- a/pkg/gpu/config/config.go +++ b/pkg/gpu/config/config.go @@ -58,8 +58,8 @@ type Config struct { NVMLLibraryPath string } -// NewConfig generates a new configuration for the GPU monitoring probe. -func NewConfig() *Config { +// New generates a new configuration for the GPU monitoring probe. +func New() *Config { spCfg := pkgconfigsetup.SystemProbe() return &Config{ Config: *ebpf.NewConfig(), diff --git a/pkg/gpu/consumer_test.go b/pkg/gpu/consumer_test.go index ef90d14624953..6530f2bb49738 100644 --- a/pkg/gpu/consumer_test.go +++ b/pkg/gpu/consumer_test.go @@ -21,7 +21,7 @@ import ( func TestConsumerCanStartAndStop(t *testing.T) { handler := ddebpf.NewRingBufferHandler(consumerChannelSize) - cfg := config.NewConfig() + cfg := config.New() ctx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot()) require.NoError(t, err) consumer := newCudaEventConsumer(ctx, handler, cfg) diff --git a/pkg/gpu/probe_test.go b/pkg/gpu/probe_test.go index 58e46ba0d4093..2a08ec30415fc 100644 --- a/pkg/gpu/probe_test.go +++ b/pkg/gpu/probe_test.go @@ -24,7 +24,7 @@ func TestProbeCanLoad(t *testing.T) { t.Skipf("minimum kernel version not met, %v", err) } - cfg := config.NewConfig() + cfg := config.New() cfg.InitialProcessSync = false nvmlMock := testutil.GetBasicNvmlMock() probe, err := NewProbe(cfg, ProbeDependencies{NvmlLib: nvmlMock}) @@ -46,7 +46,7 @@ func TestProbeCanReceiveEvents(t *testing.T) { require.NoError(t, procMon.Initialize(false)) t.Cleanup(procMon.Stop) - cfg := config.NewConfig() + cfg := config.New() cfg.InitialProcessSync = false cfg.BPFDebug = true @@ -99,7 +99,7 @@ func TestProbeCanGenerateStats(t *testing.T) { require.NoError(t, procMon.Initialize(false)) t.Cleanup(procMon.Stop) - cfg := config.NewConfig() + cfg := config.New() cfg.InitialProcessSync = false cfg.BPFDebug = true diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index 703be195d3b37..bd56759eb271f 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -52,6 +52,7 @@ const ( kafkaPort = "9092" kafkaTLSPort = "9093" kafkaSuccessErrorCode = 0 + ubuntuPlatform = "ubuntu" ) // testContext shares the context of a given test. @@ -100,6 +101,19 @@ type groupInfo struct { msgs []Message } +// isUnsupportedUbuntu checks if the test is running on an unsupported Ubuntu version. +// As of now, we don’t support Kafka TLS with Ubuntu 24.10, so this function identifies +// if the current platform and version match this unsupported configuration. +func isUnsupportedUbuntu(t *testing.T) bool { + platform, err := kernel.Platform() + require.NoError(t, err) + platformVersion, err := kernel.PlatformVersion() + require.NoError(t, err) + arch := kernel.Arch() + + return platform == ubuntuPlatform && platformVersion == "24.10" && arch == "x86" +} + func skipTestIfKernelNotSupported(t *testing.T) { currKernelVersion, err := kernel.HostVersion() require.NoError(t, err) @@ -156,6 +170,9 @@ func (s *KafkaProtocolParsingSuite) TestKafkaProtocolParsing() { if mode && !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } + if mode && isUnsupportedUbuntu(t) { + t.Skip("Kafka TLS not supported on Ubuntu 24.10") + } for _, version := range versions { t.Run(versionName(version), func(t *testing.T) { s.testKafkaProtocolParsing(t, mode, version) @@ -1250,6 +1267,9 @@ func (s *KafkaProtocolParsingSuite) TestKafkaFetchRaw() { if !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } + if isUnsupportedUbuntu(t) { + t.Skip("Kafka TLS not supported on Ubuntu 24.10") + } for _, version := range versions { t.Run(fmt.Sprintf("api%d", version), func(t *testing.T) { @@ -1476,6 +1496,9 @@ func (s *KafkaProtocolParsingSuite) TestKafkaProduceRaw() { if !gotlsutils.GoTLSSupported(t, config.New()) { t.Skip("GoTLS not supported for this setup") } + if isUnsupportedUbuntu(t) { + t.Skip("Kafka TLS not supported on Ubuntu 24.10") + } for _, version := range versions { t.Run(fmt.Sprintf("api%d", version), func(t *testing.T) { diff --git a/pkg/networkpath/traceroute/runner.go b/pkg/networkpath/traceroute/runner.go index 316f2daa5a852..ef211ef98610c 100644 --- a/pkg/networkpath/traceroute/runner.go +++ b/pkg/networkpath/traceroute/runner.go @@ -266,6 +266,7 @@ func (r *Runner) processTCPResults(res *tcp.Results, hname string, destinationHo if !hop.IP.Equal(net.IP{}) { isReachable = true hopname = hop.IP.String() + hostname = hopname // setting to ip address for now, reverse DNS lookup will override hostname field later } npHop := payload.NetworkPathHop{ diff --git a/pkg/obfuscate/credit_cards.go b/pkg/obfuscate/credit_cards.go index b172b2c173e97..3246d6b0a4fdd 100644 --- a/pkg/obfuscate/credit_cards.go +++ b/pkg/obfuscate/credit_cards.go @@ -11,12 +11,18 @@ import ( // creditCard maintains credit card obfuscation state and processing. type creditCard struct { - luhn bool + luhn bool + keepValues map[string]struct{} } func newCCObfuscator(config *CreditCardsConfig) *creditCard { + keepValues := make(map[string]struct{}, len(config.KeepValues)) + for _, sk := range config.KeepValues { + keepValues[sk] = struct{}{} + } return &creditCard{ - luhn: config.Luhn, + luhn: config.Luhn, + keepValues: keepValues, } } @@ -57,6 +63,9 @@ func (o *Obfuscator) ObfuscateCreditCardNumber(key, val string) string { if strings.HasPrefix(key, "_") { return val } + if _, ok := o.ccObfuscator.keepValues[key]; ok { + return val + } if o.ccObfuscator.IsCardNumber(val) { return "?" } diff --git a/pkg/obfuscate/credit_cards_test.go b/pkg/obfuscate/credit_cards_test.go index c9237b359543a..7024b3b737d6c 100644 --- a/pkg/obfuscate/credit_cards_test.go +++ b/pkg/obfuscate/credit_cards_test.go @@ -273,6 +273,14 @@ func TestIINIsSensitive(t *testing.T) { }) } +func TestCCKeepValues(t *testing.T) { + possibleCard := "378282246310005" + o := NewObfuscator(Config{CreditCard: CreditCardsConfig{Enabled: true, KeepValues: []string{"skip_me"}}}) + + assert.Equal(t, possibleCard, o.ObfuscateCreditCardNumber("skip_me", possibleCard)) + assert.Equal(t, "?", o.ObfuscateCreditCardNumber("obfuscate_me", possibleCard)) +} + func BenchmarkIsSensitive(b *testing.B) { run := func(str string, luhn bool) func(b *testing.B) { cco := &creditCard{luhn: luhn} diff --git a/pkg/obfuscate/obfuscate.go b/pkg/obfuscate/obfuscate.go index 3032fb54f38a4..c9de3906ed2a2 100644 --- a/pkg/obfuscate/obfuscate.go +++ b/pkg/obfuscate/obfuscate.go @@ -264,6 +264,10 @@ type CreditCardsConfig struct { // https://dev.to/shiraazm/goluhn-a-simple-library-for-generating-calculating-and-verifying-luhn-numbers-588j // It reduces false positives, but increases the CPU time X3. Luhn bool `mapstructure:"luhn"` + + // KeepValues specifies tag keys that are known to not ever contain credit cards + // and therefore their values can be kept. + KeepValues []string `mapstructure:"keep_values"` } // NewObfuscator creates a new obfuscator diff --git a/pkg/security/probe/config/config.go b/pkg/security/probe/config/config.go index b008c587b2782..ab86af5410f4e 100644 --- a/pkg/security/probe/config/config.go +++ b/pkg/security/probe/config/config.go @@ -136,6 +136,12 @@ type Config struct { // NetworkRawPacketEnabled defines if the network raw packet is enabled NetworkRawPacketEnabled bool + // NetworkPrivateIPRanges defines the list of IP that should be considered private + NetworkPrivateIPRanges []string + + // NetworkExtraPrivateIPRanges defines the list of extra IP that should be considered private + NetworkExtraPrivateIPRanges []string + // StatsPollingInterval determines how often metrics should be polled StatsPollingInterval time.Duration @@ -174,6 +180,8 @@ func NewConfig() (*Config, error) { NetworkEnabled: getBool("network.enabled"), NetworkIngressEnabled: getBool("network.ingress.enabled"), NetworkRawPacketEnabled: getBool("network.raw_packet.enabled"), + NetworkPrivateIPRanges: getStringSlice("network.private_ip_ranges"), + NetworkExtraPrivateIPRanges: getStringSlice("network.extra_private_ip_ranges"), StatsPollingInterval: time.Duration(getInt("events_stats.polling_interval")) * time.Second, SyscallsMonitorEnabled: getBool("syscalls_monitor.enabled"), diff --git a/pkg/security/probe/field_handlers.go b/pkg/security/probe/field_handlers.go index 8f137aa235cdd..41a85131494ee 100644 --- a/pkg/security/probe/field_handlers.go +++ b/pkg/security/probe/field_handlers.go @@ -10,10 +10,12 @@ package probe import ( "cmp" + "fmt" "slices" "strings" "github.com/DataDog/datadog-agent/pkg/security/config" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" ) @@ -75,21 +77,60 @@ func getProcessService(config *config.Config, entry *model.ProcessCacheEntry) (s return config.RuntimeSecurity.HostServiceName, false } -type pceResolver interface { - ResolveProcessCacheEntry(ev *model.Event) (*model.ProcessCacheEntry, bool) +// BaseFieldHandlers holds the base field handlers +type BaseFieldHandlers struct { + config *config.Config + privateCIDRs eval.CIDRValues + hostname string } -func resolveService(cfg *config.Config, fh pceResolver, ev *model.Event, e *model.BaseEvent) string { +// NewBaseFieldHandlers creates a new BaseFieldHandlers +func NewBaseFieldHandlers(cfg *config.Config, hostname string) (*BaseFieldHandlers, error) { + bfh := &BaseFieldHandlers{ + config: cfg, + hostname: hostname, + } + + for _, cidr := range cfg.Probe.NetworkPrivateIPRanges { + if err := bfh.privateCIDRs.AppendCIDR(cidr); err != nil { + return nil, fmt.Errorf("error adding private IP range %s: %w", cidr, err) + } + } + for _, cidr := range cfg.Probe.NetworkExtraPrivateIPRanges { + if err := bfh.privateCIDRs.AppendCIDR(cidr); err != nil { + return nil, fmt.Errorf("error adding extra private IP range %s: %w", cidr, err) + } + } + + return bfh, nil +} + +// ResolveIsIPPublic resolves if the IP is public +func (bfh *BaseFieldHandlers) ResolveIsIPPublic(_ *model.Event, ipCtx *model.IPPortContext) bool { + if !ipCtx.IsPublicResolved { + ipCtx.IsPublic = !bfh.privateCIDRs.Contains(&ipCtx.IPNet) + ipCtx.IsPublicResolved = true + } + return ipCtx.IsPublic +} + +// ResolveHostname resolve the hostname +func (bfh *BaseFieldHandlers) ResolveHostname(_ *model.Event, _ *model.BaseEvent) string { + return bfh.hostname +} + +// ResolveService returns the service tag based on the process context +func (bfh *BaseFieldHandlers) ResolveService(ev *model.Event, e *model.BaseEvent) string { if e.Service != "" { return e.Service } - entry, _ := fh.ResolveProcessCacheEntry(ev) + entry, _ := ev.ResolveProcessCacheEntry() if entry == nil { return "" } - service, ok := getProcessService(cfg, entry) + service, ok := getProcessService(bfh.config, entry) if ok { e.Service = service } diff --git a/pkg/security/probe/field_handlers_ebpf.go b/pkg/security/probe/field_handlers_ebpf.go index 9aa9849dd2a6e..2a4628ac4413d 100644 --- a/pkg/security/probe/field_handlers_ebpf.go +++ b/pkg/security/probe/field_handlers_ebpf.go @@ -28,12 +28,25 @@ import ( // EBPFFieldHandlers defines a field handlers type EBPFFieldHandlers struct { - config *config.Config + *BaseFieldHandlers resolvers *resolvers.EBPFResolvers - hostname string onDemand *OnDemandProbesManager } +// NewEBPFFieldHandlers returns a new EBPFFieldHandlers +func NewEBPFFieldHandlers(config *config.Config, resolvers *resolvers.EBPFResolvers, hostname string, onDemand *OnDemandProbesManager) (*EBPFFieldHandlers, error) { + bfh, err := NewBaseFieldHandlers(config, hostname) + if err != nil { + return nil, err + } + + return &EBPFFieldHandlers{ + BaseFieldHandlers: bfh, + resolvers: resolvers, + onDemand: onDemand, + }, nil +} + // ResolveProcessCacheEntry queries the ProcessResolver to retrieve the ProcessContext of the event func (fh *EBPFFieldHandlers) ResolveProcessCacheEntry(ev *model.Event) (*model.ProcessCacheEntry, bool) { if ev.PIDContext.IsKworker { @@ -400,11 +413,6 @@ func (fh *EBPFFieldHandlers) ResolveEventTimestamp(ev *model.Event, e *model.Bas return int(fh.ResolveEventTime(ev, e).UnixNano()) } -// ResolveService returns the service tag based on the process context -func (fh *EBPFFieldHandlers) ResolveService(ev *model.Event, e *model.BaseEvent) string { - return resolveService(fh.config, fh, ev, e) -} - // ResolveEventTime resolves the monolitic kernel event timestamp to an absolute time func (fh *EBPFFieldHandlers) ResolveEventTime(ev *model.Event, _ *model.BaseEvent) time.Time { if ev.Timestamp.IsZero() { @@ -671,11 +679,6 @@ func (fh *EBPFFieldHandlers) ResolveSyscallCtxArgsInt3(ev *model.Event, e *model return int(e.IntArg3) } -// ResolveHostname resolve the hostname -func (fh *EBPFFieldHandlers) ResolveHostname(_ *model.Event, _ *model.BaseEvent) string { - return fh.hostname -} - // ResolveOnDemandName resolves the on-demand event name func (fh *EBPFFieldHandlers) ResolveOnDemandName(_ *model.Event, e *model.OnDemandEvent) string { if fh.onDemand == nil { diff --git a/pkg/security/probe/field_handlers_ebpfless.go b/pkg/security/probe/field_handlers_ebpfless.go index b2b66d75f99c9..0a6f9ac835c4d 100644 --- a/pkg/security/probe/field_handlers_ebpfless.go +++ b/pkg/security/probe/field_handlers_ebpfless.go @@ -23,14 +23,21 @@ import ( // EBPFLessFieldHandlers defines a field handlers type EBPFLessFieldHandlers struct { - config *config.Config + *BaseFieldHandlers resolvers *resolvers.EBPFLessResolvers - hostname string } -// ResolveService returns the service tag based on the process context -func (fh *EBPFLessFieldHandlers) ResolveService(ev *model.Event, e *model.BaseEvent) string { - return resolveService(fh.config, fh, ev, e) +// NewEBPFLessFieldHandlers returns a new EBPFLessFieldHandlers +func NewEBPFLessFieldHandlers(config *config.Config, resolvers *resolvers.EBPFLessResolvers, hostname string) (*EBPFLessFieldHandlers, error) { + bfh, err := NewBaseFieldHandlers(config, hostname) + if err != nil { + return nil, err + } + + return &EBPFLessFieldHandlers{ + BaseFieldHandlers: bfh, + resolvers: resolvers, + }, nil } // ResolveProcessCacheEntry queries the ProcessResolver to retrieve the ProcessContext of the event @@ -413,11 +420,6 @@ func (fh *EBPFLessFieldHandlers) ResolveSyscallCtxArgsInt3(_ *model.Event, e *mo return int(e.IntArg3) } -// ResolveHostname resolve the hostname -func (fh *EBPFLessFieldHandlers) ResolveHostname(_ *model.Event, _ *model.BaseEvent) string { - return fh.hostname -} - // ResolveOnDemandName resolves the on-demand event name func (fh *EBPFLessFieldHandlers) ResolveOnDemandName(_ *model.Event, _ *model.OnDemandEvent) string { return "" diff --git a/pkg/security/probe/field_handlers_test.go b/pkg/security/probe/field_handlers_test.go new file mode 100644 index 0000000000000..52da4fef61171 --- /dev/null +++ b/pkg/security/probe/field_handlers_test.go @@ -0,0 +1,84 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux || windows + +// Package probe holds probe related files +package probe + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" +) + +func TestIsIPPublic(t *testing.T) { + bfh := BaseFieldHandlers{} + for _, cidr := range setup.DefaultPrivateIPCIDRs { + if err := bfh.privateCIDRs.AppendCIDR(cidr); err != nil { + t.Fatalf("failed to append CIDR %s: %v", cidr, err) + } + } + + testCases := []struct { + name string + ip string + expected bool + }{ + { + name: "public 1", + ip: "11.1.1.1", + expected: true, + }, + { + name: "public 2", + ip: "172.48.1.1", + expected: true, + }, + { + name: "public 3", + ip: "192.167.1.1", + expected: true, + }, + { + name: "private in 24-bit block", + ip: "10.11.11.11", + expected: false, + }, + { + name: "private in 20-bit block", + ip: "172.24.11.11", + expected: false, + }, + { + name: "private in 16-bit block", + ip: "192.168.11.11", + expected: false, + }, + { + name: "IPv6 ULA", + ip: "fdf8:b35f:91b1::11", + expected: false, + }, + { + name: "IPv6 Global", + ip: "2001:0:0eab:dead::a0:abcd:4e", + expected: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + assert.Equal(t, testCase.expected, bfh.ResolveIsIPPublic(model.NewFakeEvent(), &model.IPPortContext{ + IPNet: *eval.IPNetFromIP(net.ParseIP(testCase.ip)), + })) + }) + } +} diff --git a/pkg/security/probe/field_handlers_windows.go b/pkg/security/probe/field_handlers_windows.go index 937b9104c85e4..6f8a8c77f0338 100644 --- a/pkg/security/probe/field_handlers_windows.go +++ b/pkg/security/probe/field_handlers_windows.go @@ -16,9 +16,21 @@ import ( // FieldHandlers defines a field handlers type FieldHandlers struct { - config *config.Config + *BaseFieldHandlers resolvers *resolvers.Resolvers - hostname string +} + +// NewFieldHandlers returns a new FieldHandlers +func NewFieldHandlers(config *config.Config, resolvers *resolvers.Resolvers, hostname string) (*FieldHandlers, error) { + bfh, err := NewBaseFieldHandlers(config, hostname) + if err != nil { + return nil, err + } + + return &FieldHandlers{ + BaseFieldHandlers: bfh, + resolvers: resolvers, + }, nil } // ResolveEventTime resolves the monolitic kernel event timestamp to an absolute time @@ -78,11 +90,6 @@ func (fh *FieldHandlers) ResolveProcessCacheEntry(ev *model.Event) (*model.Proce return ev.ProcessCacheEntry, true } -// ResolveService returns the service tag based on the process context -func (fh *FieldHandlers) ResolveService(ev *model.Event, e *model.BaseEvent) string { - return resolveService(fh.config, fh, ev, e) -} - // ResolveProcessCmdLineScrubbed returns a scrubbed version of the cmdline func (fh *FieldHandlers) ResolveProcessCmdLineScrubbed(_ *model.Event, e *model.Process) string { return fh.resolvers.ProcessResolver.GetProcessCmdLineScrubbed(e) @@ -150,8 +157,3 @@ func (fh *FieldHandlers) ResolveNewSecurityDescriptor(_ *model.Event, cp *model. } return hrsd } - -// ResolveHostname resolve the hostname -func (fh *FieldHandlers) ResolveHostname(_ *model.Event, _ *model.BaseEvent) string { - return fh.hostname -} diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 4d2506f4f0005..ae37c6f4f6b4e 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -2021,7 +2021,11 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts, telemetry tele } } - p.fieldHandlers = &EBPFFieldHandlers{config: config, resolvers: p.Resolvers, hostname: hostname, onDemand: p.onDemandManager} + fh, err := NewEBPFFieldHandlers(config, p.Resolvers, hostname, p.onDemandManager) + if err != nil { + return nil, err + } + p.fieldHandlers = fh if useRingBuffers { p.eventStream = ringbuffer.New(p.handleEvent) diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index 4ca6146e185b9..380a0053410f3 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -694,9 +694,11 @@ func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts, telemetry hostname = "unknown" } - p.fieldHandlers = &EBPFLessFieldHandlers{config: config, resolvers: p.Resolvers, hostname: hostname} - - p.event = p.NewEvent() + fh, err := NewEBPFLessFieldHandlers(config, p.Resolvers, hostname) + if err != nil { + return nil, err + } + p.fieldHandlers = fh // be sure to zero the probe event before everything else p.zeroEvent() diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index ef3a1ae606844..2a0ee98f0dba3 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -1234,7 +1234,11 @@ func NewWindowsProbe(probe *Probe, config *config.Config, opts Opts, telemetry t hostname = "unknown" } - p.fieldHandlers = &FieldHandlers{config: config, resolvers: p.Resolvers, hostname: hostname} + fh, err := NewFieldHandlers(config, p.Resolvers, hostname) + if err != nil { + return nil, err + } + p.fieldHandlers = fh p.event = p.NewEvent() diff --git a/pkg/security/resolvers/sbom/resolver.go b/pkg/security/resolvers/sbom/resolver.go index d67f3bffa0bb9..4754634c2f9c3 100644 --- a/pkg/security/resolvers/sbom/resolver.go +++ b/pkg/security/resolvers/sbom/resolver.go @@ -242,7 +242,7 @@ func (r *Resolver) Start(ctx context.Context) error { if err := retry.Do(func() error { return r.analyzeWorkload(sbom) }, retry.Attempts(maxSBOMGenerationRetries), retry.Delay(200*time.Millisecond)); err != nil { - seclog.Errorf("%s", err.Error()) + seclog.Warnf("%s", err.Error()) } } } diff --git a/pkg/security/secl/model/accessors_unix.go b/pkg/security/secl/model/accessors_unix.go index edbd810554799..1d7cd758a4dad 100644 --- a/pkg/security/secl/model/accessors_unix.go +++ b/pkg/security/secl/model/accessors_unix.go @@ -60,6 +60,8 @@ func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { switch field { case "network.destination.ip": return []eval.EventType{"dns", "imds"} + case "network.destination.is_public": + return []eval.EventType{"dns", "imds"} case "network.destination.port": return []eval.EventType{"dns", "imds"} case "network.device.ifname": @@ -72,6 +74,8 @@ func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { return []eval.EventType{"dns", "imds"} case "network.source.ip": return []eval.EventType{"dns", "imds"} + case "network.source.is_public": + return []eval.EventType{"dns", "imds"} case "network.source.port": return []eval.EventType{"dns", "imds"} } @@ -97,6 +101,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "bind.addr.is_public": + return &eval.BoolEvaluator{ + EvalFnc: func(ctx *eval.Context) bool { + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Bind.Addr) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil case "bind.addr.port": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -959,6 +972,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "connect.addr.is_public": + return &eval.BoolEvaluator{ + EvalFnc: func(ctx *eval.Context) bool { + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil case "connect.addr.port": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -995,6 +1017,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "connect.server.addr.is_public": + return &eval.BoolEvaluator{ + EvalFnc: func(ctx *eval.Context) bool { + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil case "connect.server.addr.port": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -4111,6 +4142,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "network.destination.is_public": + return &eval.BoolEvaluator{ + EvalFnc: func(ctx *eval.Context) bool { + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Destination) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil case "network.destination.port": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -4165,6 +4205,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "network.source.is_public": + return &eval.BoolEvaluator{ + EvalFnc: func(ctx *eval.Context) bool { + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Source) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil case "network.source.port": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -4502,6 +4551,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "packet.destination.is_public": + return &eval.BoolEvaluator{ + EvalFnc: func(ctx *eval.Context) bool { + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Destination) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil case "packet.destination.port": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -4566,6 +4624,15 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "packet.source.is_public": + return &eval.BoolEvaluator{ + EvalFnc: func(ctx *eval.Context) bool { + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Source) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil case "packet.source.port": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -20547,6 +20614,7 @@ func (ev *Event) GetFields() []eval.Field { return []eval.Field{ "bind.addr.family", "bind.addr.ip", + "bind.addr.is_public", "bind.addr.port", "bind.retval", "bpf.cmd", @@ -20641,10 +20709,12 @@ func (ev *Event) GetFields() []eval.Field { "chown.syscall.uid", "connect.addr.family", "connect.addr.ip", + "connect.addr.is_public", "connect.addr.port", "connect.retval", "connect.server.addr.family", "connect.server.addr.ip", + "connect.server.addr.is_public", "connect.server.addr.port", "container.created_at", "container.id", @@ -20963,12 +21033,14 @@ func (ev *Event) GetFields() []eval.Field { "mprotect.retval", "mprotect.vm_protection", "network.destination.ip", + "network.destination.is_public", "network.destination.port", "network.device.ifname", "network.l3_protocol", "network.l4_protocol", "network.size", "network.source.ip", + "network.source.is_public", "network.source.port", "ondemand.arg1.str", "ondemand.arg1.uint", @@ -21006,6 +21078,7 @@ func (ev *Event) GetFields() []eval.Field { "open.syscall.mode", "open.syscall.path", "packet.destination.ip", + "packet.destination.is_public", "packet.destination.port", "packet.device.ifname", "packet.filter", @@ -21013,6 +21086,7 @@ func (ev *Event) GetFields() []eval.Field { "packet.l4_protocol", "packet.size", "packet.source.ip", + "packet.source.is_public", "packet.source.port", "packet.tls.version", "process.ancestors.args", @@ -21958,6 +22032,8 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { return int(ev.Bind.AddrFamily), nil case "bind.addr.ip": return ev.Bind.Addr.IPNet, nil + case "bind.addr.is_public": + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Bind.Addr), nil case "bind.addr.port": return int(ev.Bind.Addr.Port), nil case "bind.retval": @@ -22150,6 +22226,8 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { return int(ev.Connect.AddrFamily), nil case "connect.addr.ip": return ev.Connect.Addr.IPNet, nil + case "connect.addr.is_public": + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr), nil case "connect.addr.port": return int(ev.Connect.Addr.Port), nil case "connect.retval": @@ -22158,6 +22236,8 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { return int(ev.Connect.AddrFamily), nil case "connect.server.addr.ip": return ev.Connect.Addr.IPNet, nil + case "connect.server.addr.is_public": + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr), nil case "connect.server.addr.port": return int(ev.Connect.Addr.Port), nil case "container.created_at": @@ -23010,6 +23090,8 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { return ev.MProtect.VMProtection, nil case "network.destination.ip": return ev.NetworkContext.Destination.IPNet, nil + case "network.destination.is_public": + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Destination), nil case "network.destination.port": return int(ev.NetworkContext.Destination.Port), nil case "network.device.ifname": @@ -23022,6 +23104,8 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { return int(ev.NetworkContext.Size), nil case "network.source.ip": return ev.NetworkContext.Source.IPNet, nil + case "network.source.is_public": + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Source), nil case "network.source.port": return int(ev.NetworkContext.Source.Port), nil case "ondemand.arg1.str": @@ -23096,6 +23180,8 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Open.SyscallContext), nil case "packet.destination.ip": return ev.RawPacket.NetworkContext.Destination.IPNet, nil + case "packet.destination.is_public": + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Destination), nil case "packet.destination.port": return int(ev.RawPacket.NetworkContext.Destination.Port), nil case "packet.device.ifname": @@ -23110,6 +23196,8 @@ func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { return int(ev.RawPacket.NetworkContext.Size), nil case "packet.source.ip": return ev.RawPacket.NetworkContext.Source.IPNet, nil + case "packet.source.is_public": + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Source), nil case "packet.source.port": return int(ev.RawPacket.NetworkContext.Source.Port), nil case "packet.tls.version": @@ -28650,6 +28738,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "bind", nil case "bind.addr.ip": return "bind", nil + case "bind.addr.is_public": + return "bind", nil case "bind.addr.port": return "bind", nil case "bind.retval": @@ -28838,6 +28928,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "connect", nil case "connect.addr.ip": return "connect", nil + case "connect.addr.is_public": + return "connect", nil case "connect.addr.port": return "connect", nil case "connect.retval": @@ -28846,6 +28938,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "connect", nil case "connect.server.addr.ip": return "connect", nil + case "connect.server.addr.is_public": + return "connect", nil case "connect.server.addr.port": return "connect", nil case "container.created_at": @@ -29482,6 +29576,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "mprotect", nil case "network.destination.ip": return "", nil + case "network.destination.is_public": + return "", nil case "network.destination.port": return "", nil case "network.device.ifname": @@ -29494,6 +29590,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "", nil case "network.source.ip": return "", nil + case "network.source.is_public": + return "", nil case "network.source.port": return "", nil case "ondemand.arg1.str": @@ -29568,6 +29666,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "open", nil case "packet.destination.ip": return "packet", nil + case "packet.destination.is_public": + return "packet", nil case "packet.destination.port": return "packet", nil case "packet.device.ifname": @@ -29582,6 +29682,8 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { return "packet", nil case "packet.source.ip": return "packet", nil + case "packet.source.is_public": + return "packet", nil case "packet.source.port": return "packet", nil case "packet.tls.version": @@ -31465,6 +31567,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Int, nil case "bind.addr.ip": return reflect.Struct, nil + case "bind.addr.is_public": + return reflect.Bool, nil case "bind.addr.port": return reflect.Int, nil case "bind.retval": @@ -31653,6 +31757,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Int, nil case "connect.addr.ip": return reflect.Struct, nil + case "connect.addr.is_public": + return reflect.Bool, nil case "connect.addr.port": return reflect.Int, nil case "connect.retval": @@ -31661,6 +31767,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Int, nil case "connect.server.addr.ip": return reflect.Struct, nil + case "connect.server.addr.is_public": + return reflect.Bool, nil case "connect.server.addr.port": return reflect.Int, nil case "container.created_at": @@ -32297,6 +32405,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Int, nil case "network.destination.ip": return reflect.Struct, nil + case "network.destination.is_public": + return reflect.Bool, nil case "network.destination.port": return reflect.Int, nil case "network.device.ifname": @@ -32309,6 +32419,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Int, nil case "network.source.ip": return reflect.Struct, nil + case "network.source.is_public": + return reflect.Bool, nil case "network.source.port": return reflect.Int, nil case "ondemand.arg1.str": @@ -32383,6 +32495,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.String, nil case "packet.destination.ip": return reflect.Struct, nil + case "packet.destination.is_public": + return reflect.Bool, nil case "packet.destination.port": return reflect.Int, nil case "packet.device.ifname": @@ -32397,6 +32511,8 @@ func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { return reflect.Int, nil case "packet.source.ip": return reflect.Struct, nil + case "packet.source.is_public": + return reflect.Bool, nil case "packet.source.port": return reflect.Int, nil case "packet.tls.version": @@ -34293,6 +34409,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.Bind.Addr.IPNet = rv return nil + case "bind.addr.is_public": + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "Bind.Addr.IsPublic"} + } + ev.Bind.Addr.IsPublic = rv + return nil case "bind.addr.port": rv, ok := value.(int) if !ok { @@ -34959,6 +35082,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.Connect.Addr.IPNet = rv return nil + case "connect.addr.is_public": + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "Connect.Addr.IsPublic"} + } + ev.Connect.Addr.IsPublic = rv + return nil case "connect.addr.port": rv, ok := value.(int) if !ok { @@ -34993,6 +35123,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.Connect.Addr.IPNet = rv return nil + case "connect.server.addr.is_public": + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "Connect.Addr.IsPublic"} + } + ev.Connect.Addr.IsPublic = rv + return nil case "connect.server.addr.port": rv, ok := value.(int) if !ok { @@ -37763,6 +37900,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.NetworkContext.Destination.IPNet = rv return nil + case "network.destination.is_public": + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Destination.IsPublic"} + } + ev.NetworkContext.Destination.IsPublic = rv + return nil case "network.destination.port": rv, ok := value.(int) if !ok { @@ -37814,6 +37958,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.NetworkContext.Source.IPNet = rv return nil + case "network.source.is_public": + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Source.IsPublic"} + } + ev.NetworkContext.Source.IsPublic = rv + return nil case "network.source.port": rv, ok := value.(int) if !ok { @@ -38075,6 +38226,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.RawPacket.NetworkContext.Destination.IPNet = rv return nil + case "packet.destination.is_public": + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Destination.IsPublic"} + } + ev.RawPacket.NetworkContext.Destination.IsPublic = rv + return nil case "packet.destination.port": rv, ok := value.(int) if !ok { @@ -38133,6 +38291,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } ev.RawPacket.NetworkContext.Source.IPNet = rv return nil + case "packet.source.is_public": + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Source.IsPublic"} + } + ev.RawPacket.NetworkContext.Source.IsPublic = rv + return nil case "packet.source.port": rv, ok := value.(int) if !ok { diff --git a/pkg/security/secl/model/field_accessors_unix.go b/pkg/security/secl/model/field_accessors_unix.go index 9df3c15cb4cb9..9d7d7dea7488a 100644 --- a/pkg/security/secl/model/field_accessors_unix.go +++ b/pkg/security/secl/model/field_accessors_unix.go @@ -30,6 +30,14 @@ func (ev *Event) GetBindAddrIp() net.IPNet { return ev.Bind.Addr.IPNet } +// GetBindAddrIsPublic returns the value of the field, resolving if necessary +func (ev *Event) GetBindAddrIsPublic() bool { + if ev.GetEventType().String() != "bind" { + return false + } + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Bind.Addr) +} + // GetBindAddrPort returns the value of the field, resolving if necessary func (ev *Event) GetBindAddrPort() uint16 { if ev.GetEventType().String() != "bind" { @@ -914,6 +922,14 @@ func (ev *Event) GetConnectAddrIp() net.IPNet { return ev.Connect.Addr.IPNet } +// GetConnectAddrIsPublic returns the value of the field, resolving if necessary +func (ev *Event) GetConnectAddrIsPublic() bool { + if ev.GetEventType().String() != "connect" { + return false + } + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) +} + // GetConnectAddrPort returns the value of the field, resolving if necessary func (ev *Event) GetConnectAddrPort() uint16 { if ev.GetEventType().String() != "connect" { @@ -946,6 +962,14 @@ func (ev *Event) GetConnectServerAddrIp() net.IPNet { return ev.Connect.Addr.IPNet } +// GetConnectServerAddrIsPublic returns the value of the field, resolving if necessary +func (ev *Event) GetConnectServerAddrIsPublic() bool { + if ev.GetEventType().String() != "connect" { + return false + } + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) +} + // GetConnectServerAddrPort returns the value of the field, resolving if necessary func (ev *Event) GetConnectServerAddrPort() uint16 { if ev.GetEventType().String() != "connect" { @@ -4447,6 +4471,11 @@ func (ev *Event) GetNetworkDestinationIp() net.IPNet { return ev.NetworkContext.Destination.IPNet } +// GetNetworkDestinationIsPublic returns the value of the field, resolving if necessary +func (ev *Event) GetNetworkDestinationIsPublic() bool { + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Destination) +} + // GetNetworkDestinationPort returns the value of the field, resolving if necessary func (ev *Event) GetNetworkDestinationPort() uint16 { return ev.NetworkContext.Destination.Port @@ -4477,6 +4506,11 @@ func (ev *Event) GetNetworkSourceIp() net.IPNet { return ev.NetworkContext.Source.IPNet } +// GetNetworkSourceIsPublic returns the value of the field, resolving if necessary +func (ev *Event) GetNetworkSourceIsPublic() bool { + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Source) +} + // GetNetworkSourcePort returns the value of the field, resolving if necessary func (ev *Event) GetNetworkSourcePort() uint16 { return ev.NetworkContext.Source.Port @@ -4818,6 +4852,14 @@ func (ev *Event) GetPacketDestinationIp() net.IPNet { return ev.RawPacket.NetworkContext.Destination.IPNet } +// GetPacketDestinationIsPublic returns the value of the field, resolving if necessary +func (ev *Event) GetPacketDestinationIsPublic() bool { + if ev.GetEventType().String() != "packet" { + return false + } + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Destination) +} + // GetPacketDestinationPort returns the value of the field, resolving if necessary func (ev *Event) GetPacketDestinationPort() uint16 { if ev.GetEventType().String() != "packet" { @@ -4874,6 +4916,14 @@ func (ev *Event) GetPacketSourceIp() net.IPNet { return ev.RawPacket.NetworkContext.Source.IPNet } +// GetPacketSourceIsPublic returns the value of the field, resolving if necessary +func (ev *Event) GetPacketSourceIsPublic() bool { + if ev.GetEventType().String() != "packet" { + return false + } + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Source) +} + // GetPacketSourcePort returns the value of the field, resolving if necessary func (ev *Event) GetPacketSourcePort() uint16 { if ev.GetEventType().String() != "packet" { diff --git a/pkg/security/secl/model/field_handlers_unix.go b/pkg/security/secl/model/field_handlers_unix.go index 3a019888dbe7e..1140ebbef5def 100644 --- a/pkg/security/secl/model/field_handlers_unix.go +++ b/pkg/security/secl/model/field_handlers_unix.go @@ -37,7 +37,9 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent) } _ = ev.FieldHandlers.ResolveEventTimestamp(ev, &ev.BaseEvent) + _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Destination) _ = ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.NetworkContext.Device) + _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Source) if !forADs { _ = ev.FieldHandlers.ResolveProcessArgs(ev, &ev.BaseEvent.ProcessContext.Process) } @@ -233,6 +235,7 @@ func (ev *Event) resolveFields(forADs bool) { // resolve event specific fields switch ev.GetEventType().String() { case "bind": + _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Bind.Addr) case "bpf": case "capset": case "chdir": @@ -295,6 +298,8 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Chown.SyscallContext) } case "connect": + _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) + _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) case "dns": case "exec": if ev.Exec.Process.IsNotKworker() { @@ -584,6 +589,8 @@ func (ev *Event) resolveFields(forADs bool) { } case "packet": _ = ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.RawPacket.NetworkContext.Device) + _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Source) + _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Destination) case "ptrace": if ev.PTrace.Tracee.Process.IsNotKworker() { _ = ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) @@ -1129,6 +1136,7 @@ type FieldHandlers interface { ResolveFilePath(ev *Event, e *FileEvent) string ResolveHashesFromEvent(ev *Event, e *FileEvent) []string ResolveHostname(ev *Event, e *BaseEvent) string + ResolveIsIPPublic(ev *Event, e *IPPortContext) bool ResolveK8SGroups(ev *Event, e *UserSessionContext) []string ResolveK8SUID(ev *Event, e *UserSessionContext) string ResolveK8SUsername(ev *Event, e *UserSessionContext) string @@ -1240,6 +1248,9 @@ func (dfh *FakeFieldHandlers) ResolveHashesFromEvent(ev *Event, e *FileEvent) [] func (dfh *FakeFieldHandlers) ResolveHostname(ev *Event, e *BaseEvent) string { return string(e.Hostname) } +func (dfh *FakeFieldHandlers) ResolveIsIPPublic(ev *Event, e *IPPortContext) bool { + return bool(e.IsPublic) +} func (dfh *FakeFieldHandlers) ResolveK8SGroups(ev *Event, e *UserSessionContext) []string { return []string(e.K8SGroups) } diff --git a/pkg/security/secl/model/model.go b/pkg/security/secl/model/model.go index fc2a06c89f794..196b13903b99e 100644 --- a/pkg/security/secl/model/model.go +++ b/pkg/security/secl/model/model.go @@ -90,8 +90,10 @@ type SecurityProfileContext struct { // IPPortContext is used to hold an IP and Port type IPPortContext struct { - IPNet net.IPNet `field:"ip"` // SECLDoc[ip] Definition:`IP address` - Port uint16 `field:"port"` // SECLDoc[port] Definition:`Port number` + IPNet net.IPNet `field:"ip"` // SECLDoc[ip] Definition:`IP address` + Port uint16 `field:"port"` // SECLDoc[port] Definition:`Port number` + IsPublic bool `field:"is_public,handler:ResolveIsIPPublic"` // SECLDoc[is_public] Definition:`Whether the IP address belongs to a public network` + IsPublicResolved bool `field:"-"` } // NetworkContext represents the network context of the event diff --git a/pkg/security/seclwin/model/model.go b/pkg/security/seclwin/model/model.go index fc2a06c89f794..196b13903b99e 100644 --- a/pkg/security/seclwin/model/model.go +++ b/pkg/security/seclwin/model/model.go @@ -90,8 +90,10 @@ type SecurityProfileContext struct { // IPPortContext is used to hold an IP and Port type IPPortContext struct { - IPNet net.IPNet `field:"ip"` // SECLDoc[ip] Definition:`IP address` - Port uint16 `field:"port"` // SECLDoc[port] Definition:`Port number` + IPNet net.IPNet `field:"ip"` // SECLDoc[ip] Definition:`IP address` + Port uint16 `field:"port"` // SECLDoc[port] Definition:`Port number` + IsPublic bool `field:"is_public,handler:ResolveIsIPPublic"` // SECLDoc[is_public] Definition:`Whether the IP address belongs to a public network` + IsPublicResolved bool `field:"-"` } // NetworkContext represents the network context of the event diff --git a/pkg/trace/agent/obfuscate.go b/pkg/trace/agent/obfuscate.go index 7d7d11ccaeb1b..00411697d49b7 100644 --- a/pkg/trace/agent/obfuscate.go +++ b/pkg/trace/agent/obfuscate.go @@ -32,6 +32,7 @@ func (a *Agent) obfuscateSpan(span *pb.Span) { for k, v := range span.Meta { newV := o.ObfuscateCreditCardNumber(k, v) if v != newV { + log.Debugf("obfuscating possible credit card under key %s from service %s", k, span.Service) span.Meta[k] = newV } } diff --git a/pkg/trace/api/otlp.go b/pkg/trace/api/otlp.go index 8badf01d92852..9dda5532f1338 100644 --- a/pkg/trace/api/otlp.go +++ b/pkg/trace/api/otlp.go @@ -61,6 +61,33 @@ type OTLPReceiver struct { // NewOTLPReceiver returns a new OTLPReceiver which sends any incoming traces down the out channel. func NewOTLPReceiver(out chan<- *Payload, cfg *config.AgentConfig, statsd statsd.ClientInterface, timing timing.Reporter) *OTLPReceiver { + operationAndResourceNamesV2GateEnabled := cfg.HasFeature("enable_operation_and_resource_name_logic_v2") + operationAndResourceNamesV2GateEnabledVal := 0.0 + if operationAndResourceNamesV2GateEnabled { + operationAndResourceNamesV2GateEnabledVal = 1.0 + } + _ = statsd.Gauge("datadog.trace_agent.otlp.operation_and_resource_names_v2_gate_enabled", operationAndResourceNamesV2GateEnabledVal, nil, 1) + + spanNameAsResourceNameEnabledVal := 0.0 + if cfg.OTLPReceiver.SpanNameAsResourceName { + if operationAndResourceNamesV2GateEnabled { + log.Warnf("Detected SpanNameAsResourceName in config - this feature will be deprecated in a future version, and overrides feature gate \"enable_operation_and_resource_name_logic_v2\". Please remove it and set \"operation.name\" attribute on your spans instead.") + } else { + log.Warnf("Detected SpanNameAsResourceName in config - this feature will be deprecated in a future version. Please remove it, enable feature gate \"enable_operation_and_resource_name_logic_v2\", and set \"operation.name\" attribute on your spans instead.") + } + spanNameAsResourceNameEnabledVal = 1.0 + } + _ = statsd.Gauge("datadog.trace_agent.otlp.span_name_as_resource_name_enabled", spanNameAsResourceNameEnabledVal, nil, 1) + spanNameRemappingsEnabledVal := 0.0 + if cfg.OTLPReceiver.SpanNameRemappings != nil && len(cfg.OTLPReceiver.SpanNameRemappings) > 0 { + if operationAndResourceNamesV2GateEnabled { + log.Warnf("Detected SpanNameRemappings in config - this feature will be deprecated in a future version. Please remove it to access functionality from feature gate \"enable_operation_and_resource_name_logic_v2\".") + } else { + log.Warnf("Detected SpanNameRemappings in config - this feature will be deprecated in a future version. Please remove it and enable feature gate \"enable_operation_and_resource_name_logic_v2\"") + } + spanNameRemappingsEnabledVal = 1.0 + } + _ = statsd.Gauge("datadog.trace_agent.otlp.span_name_remappings_enabled", spanNameRemappingsEnabledVal, nil, 1) computeTopLevelBySpanKindVal := 0.0 if cfg.HasFeature("enable_otlp_compute_top_level_by_span_kind") { computeTopLevelBySpanKindVal = 1.0 @@ -215,7 +242,13 @@ func (o *OTLPReceiver) receiveResourceSpansV2(ctx context.Context, rspans ptrace libspans := rspans.ScopeSpans().At(i) for j := 0; j < libspans.Spans().Len(); j++ { otelspan := libspans.Spans().At(j) - if _, exists := o.ignoreResNames[traceutil.GetOTelResource(otelspan, otelres)]; exists { + var resourceName string + if transform.OperationAndResourceNameV2Enabled(o.conf) { + resourceName = traceutil.GetOTelResourceV2(otelspan, otelres) + } else { + resourceName = traceutil.GetOTelResourceV1(otelspan, otelres) + } + if _, exists := o.ignoreResNames[resourceName]; exists { continue } @@ -583,29 +616,41 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume transform.SetMetaOTLP(span, semconv.OtelStatusDescription, msg) } transform.Status2Error(in.Status(), in.Events(), span) - if span.Name == "" { - name := in.Name() - if !o.conf.OTLPReceiver.SpanNameAsResourceName { - name = traceutil.OTelSpanKindName(in.Kind()) - if lib.Name() != "" { - name = lib.Name() + "." + name - } else { - name = "opentelemetry." + name + if transform.OperationAndResourceNameV2Enabled(o.conf) { + span.Name = traceutil.GetOTelOperationNameV2(in) + } else { + if span.Name == "" { + name := in.Name() + if !o.conf.OTLPReceiver.SpanNameAsResourceName { + name = traceutil.OTelSpanKindName(in.Kind()) + if lib.Name() != "" { + name = lib.Name() + "." + name + } else { + name = "opentelemetry." + name + } } + if v, ok := o.conf.OTLPReceiver.SpanNameRemappings[name]; ok { + name = v + } + span.Name = name } - if v, ok := o.conf.OTLPReceiver.SpanNameRemappings[name]; ok { - name = v - } - span.Name = name } if span.Service == "" { span.Service = "OTLPResourceNoServiceName" } if span.Resource == "" { - if r := resourceFromTags(span.Meta); r != "" { - span.Resource = r + if transform.OperationAndResourceNameV2Enabled(o.conf) { + res := pcommon.NewResource() + for k, v := range rattr { + res.Attributes().PutStr(k, v) + } + span.Resource = traceutil.GetOTelResourceV2(in, res) } else { - span.Resource = in.Name() + if r := resourceFromTags(span.Meta); r != "" { + span.Resource = r + } else { + span.Resource = in.Name() + } } } if span.Type == "" { diff --git a/pkg/trace/api/otlp_test.go b/pkg/trace/api/otlp_test.go index 9ea2f39e3cfb4..15d911d2cc517 100644 --- a/pkg/trace/api/otlp_test.go +++ b/pkg/trace/api/otlp_test.go @@ -227,7 +227,9 @@ func TestOTLPNameRemapping(t *testing.T) { } func testOTLPNameRemapping(enableReceiveResourceSpansV2 bool, t *testing.T) { + // Verify that while EnableOperationAndResourceNamesV2 is in alpha, SpanNameRemappings overrides it cfg := NewTestConfig(t) + cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} if enableReceiveResourceSpansV2 { cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} } @@ -253,6 +255,446 @@ func testOTLPNameRemapping(enableReceiveResourceSpansV2 bool, t *testing.T) { } } +func TestOTLPSpanNameV2(t *testing.T) { + t.Run("ReceiveResourceSpansV1", func(t *testing.T) { + testOTLPSpanNameV2(false, t) + }) + + t.Run("ReceiveResourceSpansV2", func(t *testing.T) { + testOTLPSpanNameV2(true, t) + }) +} + +func testOTLPSpanNameV2(enableReceiveResourceSpansV2 bool, t *testing.T) { + cfg := NewTestConfig(t) + cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} + if enableReceiveResourceSpansV2 { + cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + } + out := make(chan *Payload, 1) + rcv := NewOTLPReceiver(out, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) + require := require.New(t) + for _, tt := range []struct { + in []testutil.OTLPResourceSpan + fn func(*pb.TracerPayload) + }{ + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{{ + Attributes: map[string]interface{}{semconv.AttributeContainerID: "http.method"}, + }}, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("Internal", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindServer, + Attributes: map[string]interface{}{semconv.AttributeHTTPMethod: "GET"}, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("http.server.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindClient, + Attributes: map[string]interface{}{semconv.AttributeHTTPMethod: "GET"}, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("http.client.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindClient, + Attributes: map[string]interface{}{semconv.AttributeDBSystem: "mysql"}, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("mysql.query", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Attributes: map[string]interface{}{semconv.AttributeDBSystem: "mysql"}, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("Internal", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Attributes: map[string]interface{}{semconv.AttributeMessagingSystem: "kafka"}, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("Internal", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Attributes: map[string]interface{}{ + semconv.AttributeMessagingSystem: "kafka", + semconv.AttributeMessagingOperation: "send", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("Internal", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindClient, + Attributes: map[string]interface{}{ + semconv.AttributeMessagingSystem: "kafka", + semconv.AttributeMessagingOperation: "send", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("kafka.send", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindServer, + Attributes: map[string]interface{}{ + semconv.AttributeRPCSystem: "aws-api", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("aws-api.server.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindClient, + Attributes: map[string]interface{}{ + semconv.AttributeRPCSystem: "aws-api", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("aws.client.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindClient, + Attributes: map[string]interface{}{ + semconv.AttributeRPCSystem: "aws-api", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("aws.client.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindClient, + Attributes: map[string]interface{}{ + semconv.AttributeRPCSystem: "aws-api", + semconv.AttributeRPCService: "s3", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("aws.s3.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindClient, + Attributes: map[string]interface{}{ + semconv.AttributeRPCSystem: "grpc", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("grpc.client.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindServer, + Attributes: map[string]interface{}{ + semconv.AttributeRPCSystem: "grpc", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("grpc.server.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindClient, + Attributes: map[string]interface{}{ + semconv.AttributeFaaSInvokedProvider: "gcp", + semconv.AttributeFaaSInvokedName: "foo", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("gcp.foo.invoke", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Attributes: map[string]interface{}{ + semconv.AttributeFaaSInvokedProvider: "gcp", + semconv.AttributeFaaSInvokedName: "foo", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("Internal", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindServer, + Attributes: map[string]interface{}{ + semconv.AttributeFaaSTrigger: "timer", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("timer.invoke", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Attributes: map[string]interface{}{ + semconv.AttributeFaaSTrigger: "timer", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("Internal", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Attributes: map[string]interface{}{ + "graphql.operation.type": "query", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("graphql.server.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindServer, + Attributes: map[string]interface{}{ + "network.protocol.name": "tcp", + }, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("tcp.server.request", out.Chunks[0].Spans[0].Name) + }, + }, + { + in: []testutil.OTLPResourceSpan{ + { + LibName: "libname", + LibVersion: "1.2", + Attributes: map[string]interface{}{}, + Spans: []*testutil.OTLPSpan{ + { + Kind: ptrace.SpanKindServer, + Attributes: map[string]interface{}{}, + }, + }, + }, + }, + fn: func(out *pb.TracerPayload) { + require.Equal("server.request", out.Chunks[0].Spans[0].Name) + }, + }, + } { + t.Run("", func(t *testing.T) { + rspans := testutil.NewOTLPTracesRequest(tt.in).Traces().ResourceSpans().At(0) + rcv.ReceiveResourceSpans(context.Background(), rspans, http.Header{}) + timeout := time.After(500 * time.Millisecond) + select { + case <-timeout: + t.Fatal("timed out") + case p := <-out: + tt.fn(p.TracerPayload) + } + }) + } +} + func TestCreateChunks(t *testing.T) { t.Run("ReceiveResourceSpansV1", func(t *testing.T) { testCreateChunk(false, t) @@ -1163,26 +1605,45 @@ func TestOTLPHelpers(t *testing.T) { func TestOTLPConvertSpan(t *testing.T) { t.Run("ReceiveResourceSpansV1", func(t *testing.T) { - testOTLPConvertSpan(false, t) + t.Run("OperationAndResourceNameV1", func(t *testing.T) { + testOTLPConvertSpan(false, false, t) + }) + + t.Run("OperationAndResourceNameV2", func(t *testing.T) { + testOTLPConvertSpan(false, true, t) + }) }) t.Run("ReceiveResourceSpansV2", func(t *testing.T) { - testOTLPConvertSpan(true, t) + t.Run("OperationAndResourceNameV1", func(t *testing.T) { + testOTLPConvertSpan(true, false, t) + }) + + t.Run("OperationAndResourceNameV2", func(t *testing.T) { + testOTLPConvertSpan(true, true, t) + }) }) } -func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { - now := uint64(otlpTestSpan.StartTimestamp()) +func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, enableOperationAndResourceNameV2 bool, t *testing.T) { cfg := NewTestConfig(t) + now := uint64(otlpTestSpan.StartTimestamp()) if enableReceiveResourceSpansV2 { cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} } + if enableOperationAndResourceNameV2 { + cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} + } o := NewOTLPReceiver(nil, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) for i, tt := range []struct { rattr map[string]string libname string libver string in ptrace.Span + operationNameV1 string + operationNameV2 string + resourceNameV1 string + resourceNameV2 string out *pb.Span outTags map[string]string topLevelOutMetrics map[string]float64 @@ -1193,13 +1654,15 @@ func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { "service.version": "v1.2.3", "env": "staging", }, - libname: "ddtracer", - libver: "v2", - in: otlpTestSpan, + libname: "ddtracer", + libver: "v2", + in: otlpTestSpan, + operationNameV1: "ddtracer.server", + operationNameV2: "server.request", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "pylons", - Name: "ddtracer.server", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1321,10 +1784,12 @@ func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { StatusMsg: "Error", StatusCode: ptrace.StatusCodeError, }), + operationNameV1: "ddtracer.server", + operationNameV2: "http.server.request", + resourceNameV1: "GET /path", + resourceNameV2: "GET /path", out: &pb.Span{ Service: "myservice", - Name: "ddtracer.server", - Resource: "GET /path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1449,10 +1914,12 @@ func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { StatusMsg: "Error", StatusCode: ptrace.StatusCodeError, }), + operationNameV1: "ddtracer.server", + operationNameV2: "http.server.request", + resourceNameV1: "GET /path", + resourceNameV2: "GET /path", out: &pb.Span{ Service: "pylons", - Name: "ddtracer.server", - Resource: "GET /path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1517,10 +1984,12 @@ func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { "analytics.event": true, }, }), + operationNameV1: "READ", + operationNameV2: "READ", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "mongo", - Name: "READ", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1577,10 +2046,12 @@ func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { "error.type": "WebSocketDisconnect", }, }), + operationNameV1: "ddtracer.server", + operationNameV2: "ddtracer.server", + resourceNameV1: "POST /uploads/:document_id", + resourceNameV2: "POST", out: &pb.Span{ Service: "document-uploader", - Name: "ddtracer.server", - Resource: "POST /uploads/:document_id", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1637,10 +2108,12 @@ func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { "error.type": "WebSocketDisconnect", }, }), + operationNameV1: "ddtracer.server", + operationNameV2: "ddtracer.server", + resourceNameV1: "POST /uploads/:document_id", + resourceNameV2: "POST", out: &pb.Span{ Service: "document-uploader", - Name: "ddtracer.server", - Resource: "POST /uploads/:document_id", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1695,10 +2168,12 @@ func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { "error.type": "WebSocketDisconnect", }, }), + operationNameV1: "ddtracer.server", + operationNameV2: "ddtracer.server", + resourceNameV1: "POST /uploads/:document_id", + resourceNameV2: "POST", out: &pb.Span{ Service: "document-uploader", - Name: "ddtracer.server", - Resource: "POST /uploads/:document_id", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1784,6 +2259,13 @@ func testOTLPConvertSpan(enableReceiveResourceSpansV2 bool, t *testing.T) { want.Metrics = nil got.Meta = nil got.Metrics = nil + if enableOperationAndResourceNameV2 { + want.Name = tt.operationNameV2 + want.Resource = tt.resourceNameV2 + } else { + want.Name = tt.operationNameV1 + want.Resource = tt.resourceNameV1 + } assert.Equal(want, got, i) // test new top-level identification feature flag @@ -1828,26 +2310,45 @@ func TestAppendTags(t *testing.T) { func TestOTLPConvertSpanSetPeerService(t *testing.T) { t.Run("ReceiveResourceSpansV1", func(t *testing.T) { - testOTLPConvertSpanSetPeerService(false, t) + t.Run("OperationAndResourceNameV1", func(t *testing.T) { + testOTLPConvertSpanSetPeerService(false, false, t) + }) + + t.Run("OperationAndResourceNameV2", func(t *testing.T) { + testOTLPConvertSpanSetPeerService(false, true, t) + }) }) t.Run("ReceiveResourceSpansV2", func(t *testing.T) { - testOTLPConvertSpanSetPeerService(true, t) + t.Run("OperationAndResourceNameV1", func(t *testing.T) { + testOTLPConvertSpanSetPeerService(true, false, t) + }) + + t.Run("OperationAndResourceNameV2", func(t *testing.T) { + testOTLPConvertSpanSetPeerService(true, true, t) + }) }) } -func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *testing.T) { +func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, enableOperationAndResourceNameV2 bool, t *testing.T) { now := uint64(otlpTestSpan.StartTimestamp()) cfg := NewTestConfig(t) if enableReceiveResourceSpansV2 { - cfg.Features["receive_resource_spans_v2"] = struct{}{} + cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + } + if enableOperationAndResourceNameV2 { + cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} } o := NewOTLPReceiver(nil, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) for i, tt := range []struct { - rattr map[string]string - libname string - libver string - in ptrace.Span - out *pb.Span + rattr map[string]string + libname string + libver string + in ptrace.Span + out *pb.Span + operationNameV1 string + operationNameV2 string + resourceNameV1 string + resourceNameV2 string }{ { rattr: map[string]string{ @@ -1868,10 +2369,12 @@ func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *tes "deployment.environment": "prod", }, }), + operationNameV1: "ddtracer.server", + operationNameV2: "server.request", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "myservice", - Name: "ddtracer.server", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1913,10 +2416,12 @@ func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *tes "deployment.environment": "prod", }, }), + operationNameV1: "ddtracer.server", + operationNameV2: "server.request", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "myservice", - Name: "ddtracer.server", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -1959,10 +2464,12 @@ func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *tes "deployment.environment": "prod", }, }), + operationNameV1: "ddtracer.client", + operationNameV2: "postgres.query", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "myservice", - Name: "ddtracer.client", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -2005,10 +2512,12 @@ func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *tes "deployment.environment": "prod", }, }), + operationNameV1: "ddtracer.client", + operationNameV2: "client.request", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "myservice", - Name: "ddtracer.client", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -2050,10 +2559,12 @@ func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *tes "deployment.environment": "prod", }, }), + operationNameV1: "ddtracer.server", + operationNameV2: "server.request", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "myservice", - Name: "ddtracer.server", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -2094,10 +2605,12 @@ func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *tes "deployment.environment": "prod", }, }), + operationNameV1: "ddtracer.server", + operationNameV2: "server.request", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "myservice", - Name: "ddtracer.server", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -2138,10 +2651,12 @@ func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *tes "deployment.environment": "prod", }, }), + operationNameV1: "ddtracer.server", + operationNameV2: "server.request", + resourceNameV1: "/path", + resourceNameV2: "/path", out: &pb.Span{ Service: "myservice", - Name: "ddtracer.server", - Resource: "/path", TraceID: 2594128270069917171, SpanID: 2594128270069917171, ParentID: 0, @@ -2179,7 +2694,15 @@ func testOTLPConvertSpanSetPeerService(enableReceiveResourceSpansV2 bool, t *tes } else { got = o.convertSpan(tt.rattr, lib, tt.in) } - assert.Equal(tt.out, got, i) + want := tt.out + if enableOperationAndResourceNameV2 { + want.Name = tt.operationNameV2 + want.Resource = tt.resourceNameV2 + } else { + want.Name = tt.operationNameV1 + want.Resource = tt.resourceNameV1 + } + assert.Equal(want, got, i) }) } } diff --git a/pkg/trace/stats/otel_util.go b/pkg/trace/stats/otel_util.go index d2107862bbfca..1fce97378a030 100644 --- a/pkg/trace/stats/otel_util.go +++ b/pkg/trace/stats/otel_util.go @@ -46,7 +46,13 @@ func OTLPTracesToConcentratorInputs( containerTagsByID := make(map[string][]string) for spanID, otelspan := range spanByID { otelres := resByID[spanID] - if _, exists := ignoreResNames[traceutil.GetOTelResource(otelspan, otelres)]; exists { + var resourceName string + if transform.OperationAndResourceNameV2Enabled(conf) { + resourceName = traceutil.GetOTelResourceV2(otelspan, otelres) + } else { + resourceName = traceutil.GetOTelResourceV1(otelspan, otelres) + } + if _, exists := ignoreResNames[resourceName]; exists { continue } // TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked diff --git a/pkg/trace/stats/otel_util_test.go b/pkg/trace/stats/otel_util_test.go index 1422226c8ecc5..939c27f8c9f77 100644 --- a/pkg/trace/stats/otel_util_test.go +++ b/pkg/trace/stats/otel_util_test.go @@ -214,6 +214,10 @@ func TestProcessOTLPTraces(t *testing.T) { conf.PeerTagsAggregation = tt.peerTagsAggr conf.OTLPReceiver.AttributesTranslator = attributesTranslator conf.OTLPReceiver.SpanNameAsResourceName = tt.spanNameAsResourceName + if conf.OTLPReceiver.SpanNameAsResourceName { + // Verify that while EnableOperationAndResourceNamesV2 is in alpha, SpanNameAsResourceName overrides it + conf.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} + } conf.OTLPReceiver.SpanNameRemappings = tt.spanNameRemappings conf.Ignore["resource"] = tt.ignoreRes if !tt.legacyTopLevel { diff --git a/pkg/trace/traceutil/otel_util.go b/pkg/trace/traceutil/otel_util.go index df0c93641732d..5f7fa7acfafa1 100644 --- a/pkg/trace/traceutil/otel_util.go +++ b/pkg/trace/traceutil/otel_util.go @@ -172,8 +172,8 @@ func GetOTelService(span ptrace.Span, res pcommon.Resource, normalize bool) stri return svc } -// GetOTelResource returns the DD resource name based on OTel span and resource attributes. -func GetOTelResource(span ptrace.Span, res pcommon.Resource) (resName string) { +// GetOTelResourceV1 returns the DD resource name based on OTel span and resource attributes. +func GetOTelResourceV1(span ptrace.Span, res pcommon.Resource) (resName string) { resName = GetOTelAttrValInResAndSpanAttrs(span, res, false, "resource.name") if resName == "" { if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, "http.request.method", semconv.AttributeHTTPMethod); m != "" { @@ -205,8 +205,151 @@ func GetOTelResource(span ptrace.Span, res pcommon.Resource) (resName string) { return } -// GetOTelOperationName returns the DD operation name based on OTel span and resource attributes and given configs. -func GetOTelOperationName( +// GetOTelResourceV2 returns the DD resource name based on OTel span and resource attributes. +func GetOTelResourceV2(span ptrace.Span, res pcommon.Resource) (resName string) { + defer func() { + if len(resName) > MaxResourceLen { + resName = resName[:MaxResourceLen] + } + }() + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, "resource.name"); m != "" { + resName = m + return + } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, "http.request.method", semconv.AttributeHTTPMethod); m != "" { + if m == "_OTHER" { + m = "HTTP" + } + // use the HTTP method + route (if available) + resName = m + if span.Kind() == ptrace.SpanKindServer { + if route := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeHTTPRoute); route != "" { + resName = resName + " " + route + } + } + return + } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingOperation); m != "" { + resName = m + // use the messaging operation + if dest := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingDestination, semconv117.AttributeMessagingDestinationName); dest != "" { + resName = resName + " " + dest + } + return + } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCMethod); m != "" { + resName = m + // use the RPC method + if svc := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCService); m != "" { + // ...and service if available + resName = resName + " " + svc + } + return + } + resName = span.Name() + + return +} + +// GetOTelOperationNameV2 returns the DD operation name based on OTel span and resource attributes and given configs. +func GetOTelOperationNameV2( + span ptrace.Span, +) string { + if operationName := GetOTelAttrVal(span.Attributes(), false, "operation.name"); operationName != "" { + return operationName + } + + isClient := span.Kind() == ptrace.SpanKindClient + isServer := span.Kind() == ptrace.SpanKindServer + + // http + if method := GetOTelAttrVal(span.Attributes(), false, "http.request.method", semconv.AttributeHTTPMethod); method != "" { + if isServer { + return "http.server.request" + } + if isClient { + return "http.client.request" + } + } + + // database + if v := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeDBSystem); v != "" && isClient { + return v + ".query" + } + + // messaging + system := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeMessagingSystem) + op := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeMessagingOperation) + if system != "" && op != "" { + switch span.Kind() { + case ptrace.SpanKindClient, ptrace.SpanKindServer, ptrace.SpanKindConsumer, ptrace.SpanKindProducer: + return system + "." + op + } + } + + // RPC & AWS + rpcValue := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeRPCSystem) + isRPC := rpcValue != "" + isAws := isRPC && (rpcValue == "aws-api") + // AWS client + if isAws && isClient { + if service := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeRPCService); service != "" { + return "aws." + service + ".request" + } + return "aws.client.request" + } + // RPC client + if isRPC && isClient { + return rpcValue + ".client.request" + } + // RPC server + if isRPC && isServer { + return rpcValue + ".server.request" + } + + // FAAS client + provider := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeFaaSInvokedProvider) + invokedName := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeFaaSInvokedName) + if provider != "" && invokedName != "" && isClient { + return provider + "." + invokedName + ".invoke" + } + + // FAAS server + trigger := GetOTelAttrVal(span.Attributes(), true, semconv.AttributeFaaSTrigger) + if trigger != "" && isServer { + return trigger + ".invoke" + } + + // GraphQL + if GetOTelAttrVal(span.Attributes(), true, "graphql.operation.type") != "" { + return "graphql.server.request" + } + + // if nothing matches, checking for generic http server/client + protocol := GetOTelAttrVal(span.Attributes(), true, "network.protocol.name") + if isServer { + if protocol != "" { + return protocol + ".server.request" + } + return "server.request" + } else if isClient { + if protocol != "" { + return protocol + ".client.request" + } + return "client.request" + } + + if span.Kind() != ptrace.SpanKindUnspecified { + return span.Kind().String() + } + return ptrace.SpanKindInternal.String() +} + +// GetOTelOperationNameV1 returns the DD operation name based on OTel span and resource attributes and given configs. +func GetOTelOperationNameV1( span ptrace.Span, res pcommon.Resource, lib pcommon.InstrumentationScope, diff --git a/pkg/trace/traceutil/otel_util_test.go b/pkg/trace/traceutil/otel_util_test.go index 7eed064d83d94..dfa0fa0fd30f4 100644 --- a/pkg/trace/traceutil/otel_util_test.go +++ b/pkg/trace/traceutil/otel_util_test.go @@ -257,36 +257,42 @@ func TestGetOTelService(t *testing.T) { func TestGetOTelResource(t *testing.T) { for _, tt := range []struct { - name string - rattrs map[string]string - sattrs map[string]string - normalize bool - expected string + name string + rattrs map[string]string + sattrs map[string]string + normalize bool + expectedV1 string + expectedV2 string }{ { - name: "resource not set", - expected: "span_name", + name: "resource not set", + expectedV1: "span_name", + expectedV2: "span_name", }, { - name: "normal resource", - sattrs: map[string]string{"resource.name": "res"}, - expected: "res", + name: "normal resource", + sattrs: map[string]string{"resource.name": "res"}, + expectedV1: "res", + expectedV2: "res", }, { - name: "HTTP request method resource", - sattrs: map[string]string{"http.request.method": "GET"}, - expected: "GET", + name: "HTTP request method resource", + sattrs: map[string]string{"http.request.method": "GET"}, + expectedV1: "GET", + expectedV2: "GET", }, { - name: "HTTP method and route resource", - sattrs: map[string]string{semconv.AttributeHTTPMethod: "GET", semconv.AttributeHTTPRoute: "/"}, - expected: "GET /", + name: "HTTP method and route resource", + sattrs: map[string]string{semconv.AttributeHTTPMethod: "GET", semconv.AttributeHTTPRoute: "/"}, + expectedV1: "GET /", + expectedV2: "GET", }, { - name: "truncate long resource", - sattrs: map[string]string{"resource.name": strings.Repeat("a", MaxResourceLen+1)}, - normalize: true, - expected: strings.Repeat("a", MaxResourceLen), + name: "truncate long resource", + sattrs: map[string]string{"resource.name": strings.Repeat("a", MaxResourceLen+1)}, + normalize: true, + expectedV1: strings.Repeat("a", MaxResourceLen), + expectedV2: strings.Repeat("a", MaxResourceLen), }, } { t.Run(tt.name, func(t *testing.T) { @@ -299,8 +305,8 @@ func TestGetOTelResource(t *testing.T) { for k, v := range tt.rattrs { res.Attributes().PutStr(k, v) } - actual := GetOTelResource(span, res) - assert.Equal(t, tt.expected, actual) + assert.Equal(t, tt.expectedV1, GetOTelResourceV1(span, res)) + assert.Equal(t, tt.expectedV2, GetOTelResourceV2(span, res)) }) } } @@ -384,7 +390,7 @@ func TestGetOTelOperationName(t *testing.T) { } lib := pcommon.NewInstrumentationScope() lib.SetName(tt.libname) - actual := GetOTelOperationName(span, res, lib, tt.spanNameAsResourceName, tt.spanNameRemappings, tt.normalize) + actual := GetOTelOperationNameV1(span, res, lib, tt.spanNameAsResourceName, tt.spanNameRemappings, tt.normalize) assert.Equal(t, tt.expected, actual) }) } diff --git a/pkg/trace/transform/transform.go b/pkg/trace/transform/transform.go index f19af44b5d636..dd2d5a5f08367 100644 --- a/pkg/trace/transform/transform.go +++ b/pkg/trace/transform/transform.go @@ -23,6 +23,11 @@ import ( semconv "go.opentelemetry.io/collector/semconv/v1.6.1" ) +// OperationAndResourceNameV2Enabled checks if the new operation and resource name logic should be used +func OperationAndResourceNameV2Enabled(conf *config.AgentConfig) bool { + return !conf.OTLPReceiver.SpanNameAsResourceName && (conf.OTLPReceiver.SpanNameRemappings == nil || len(conf.OTLPReceiver.SpanNameRemappings) == 0) && conf.HasFeature("enable_operation_and_resource_name_logic_v2") +} + // OtelSpanToDDSpanMinimal otelSpanToDDSpan converts an OTel span to a DD span. // The converted DD span only has the minimal number of fields for APM stats calculation and is only meant // to be used in OTLPTracesToConcentratorInputs. Do not use them for other purposes. @@ -34,10 +39,20 @@ func OtelSpanToDDSpanMinimal( conf *config.AgentConfig, peerTagKeys []string, ) *pb.Span { + var operationName string + var resourceName string + if OperationAndResourceNameV2Enabled(conf) { + operationName = traceutil.GetOTelOperationNameV2(otelspan) + resourceName = traceutil.GetOTelResourceV2(otelspan, otelres) + } else { + operationName = traceutil.GetOTelOperationNameV1(otelspan, otelres, lib, conf.OTLPReceiver.SpanNameAsResourceName, conf.OTLPReceiver.SpanNameRemappings, true) + resourceName = traceutil.GetOTelResourceV1(otelspan, otelres) + } + ddspan := &pb.Span{ Service: traceutil.GetOTelService(otelspan, otelres, true), - Name: traceutil.GetOTelOperationName(otelspan, otelres, lib, conf.OTLPReceiver.SpanNameAsResourceName, conf.OTLPReceiver.SpanNameRemappings, true), - Resource: traceutil.GetOTelResource(otelspan, otelres), + Name: operationName, + Resource: resourceName, TraceID: traceutil.OTelTraceIDToUint64(otelspan.TraceID()), SpanID: traceutil.OTelSpanIDToUint64(otelspan.SpanID()), ParentID: traceutil.OTelSpanIDToUint64(otelspan.ParentSpanID()), diff --git a/pkg/util/winutil/iisconfig/apmtags.go b/pkg/util/winutil/iisconfig/apmtags.go index ee940ac2b7f5e..5964e2d89cf65 100644 --- a/pkg/util/winutil/iisconfig/apmtags.go +++ b/pkg/util/winutil/iisconfig/apmtags.go @@ -22,14 +22,20 @@ import ( // APMTags holds the APM tags type APMTags struct { - DDService string - DDEnv string - DDVersion string + DDService string `json:"DD_SERVICE"` + DDEnv string `json:"DD_ENV"` + DDVersion string `json:"DD_VERSION"` } +// keep a count of errors to avoid flooding the log +var ( + jsonLogCount = 0 + dotnetConfigLogCount = 0 + logErrorCountInterval = 500 +) + // ReadDatadogJSON reads a datadog.json file and returns the APM tags func ReadDatadogJSON(datadogJSONPath string) (APMTags, error) { - var datadogJSON map[string]string var apmtags APMTags file, err := os.Open(datadogJSONPath) @@ -39,13 +45,14 @@ func ReadDatadogJSON(datadogJSONPath string) (APMTags, error) { defer file.Close() decoder := json.NewDecoder(file) - err = decoder.Decode(&datadogJSON) + err = decoder.Decode(&apmtags) if err != nil { + if jsonLogCount%logErrorCountInterval == 0 { + log.Warnf("Error reading datadog.json file %s: %v", datadogJSONPath, err) + jsonLogCount++ + } return apmtags, err } - apmtags.DDService = datadogJSON["DD_SERVICE"] - apmtags.DDEnv = datadogJSON["DD_ENV"] - apmtags.DDVersion = datadogJSON["DD_VERSION"] return apmtags, nil } @@ -78,6 +85,10 @@ func ReadDotNetConfig(cfgpath string) (APMTags, error) { //(APMTags, error) { } err = xml.Unmarshal(f, &newcfg) if err != nil { + if dotnetConfigLogCount%logErrorCountInterval == 0 { + log.Warnf("Error reading datadog.json file %s: %v", cfgpath, err) + jsonLogCount++ + } return apmtags, err } for _, setting := range newcfg.AppSettings.Adds { @@ -105,8 +116,8 @@ func ReadDotNetConfig(cfgpath string) (APMTags, error) { //(APMTags, error) { apmtags.DDVersion = ddjson.DDVersion } } else { - // only log every 1000 occurrences because if this is misconfigured, it could flood the log - if errorlogcount%1000 == 0 { + // only log every logErrorCountInterval occurrences because if this is misconfigured, it could flood the log + if errorlogcount%logErrorCountInterval == 0 { log.Warnf("Error reading configured datadog.json file %s: %v", chasedatadogJSON, err) } errorlogcount++ diff --git a/pkg/util/winutil/iisconfig/testdata/app1/datadog.json b/pkg/util/winutil/iisconfig/testdata/app1/datadog.json index deb9f3e52db63..82dfd0b72c3e6 100644 --- a/pkg/util/winutil/iisconfig/testdata/app1/datadog.json +++ b/pkg/util/winutil/iisconfig/testdata/app1/datadog.json @@ -1,5 +1,6 @@ { "DD_SERVICE": "app1", "DD_ENV": "staging", - "DD_VERSION": "1.0-prerelease" + "DD_VERSION": "1.0-prerelease", + "DD_RUNTIME_METRICS_ENABLED": true } \ No newline at end of file diff --git a/pkg/util/winutil/iisconfig/testdata/app2/datadog.json b/pkg/util/winutil/iisconfig/testdata/app2/datadog.json index e1a8718f32fec..af7aa48b6d856 100644 --- a/pkg/util/winutil/iisconfig/testdata/app2/datadog.json +++ b/pkg/util/winutil/iisconfig/testdata/app2/datadog.json @@ -1,5 +1,6 @@ { "DD_SERVICE": "app2", "DD_ENV": "staging", - "DD_VERSION": "1.0-prerelease" + "DD_VERSION": "1.0-prerelease", + "DD_RUNTIME_METRICS_ENABLED": true } \ No newline at end of file diff --git a/pkg/util/winutil/iisconfig/testdata/app3/datadog.json b/pkg/util/winutil/iisconfig/testdata/app3/datadog.json index ee2153f9835f3..75f50b14ef15c 100644 --- a/pkg/util/winutil/iisconfig/testdata/app3/datadog.json +++ b/pkg/util/winutil/iisconfig/testdata/app3/datadog.json @@ -1,5 +1,6 @@ { "DD_SERVICE": "app3", "DD_ENV": "staging", - "DD_VERSION": "1.0-prerelease" + "DD_VERSION": "1.0-prerelease", + "DD_RUNTIME_METRICS_ENABLED": true } \ No newline at end of file diff --git a/pkg/util/winutil/iisconfig/testdata/app4/datadog.json b/pkg/util/winutil/iisconfig/testdata/app4/datadog.json index 5dd98fd3f7fea..8c60b6580d86a 100644 --- a/pkg/util/winutil/iisconfig/testdata/app4/datadog.json +++ b/pkg/util/winutil/iisconfig/testdata/app4/datadog.json @@ -1,5 +1,6 @@ { "DD_SERVICE": "app4", "DD_ENV": "staging", - "DD_VERSION": "1.0-prerelease" + "DD_VERSION": "1.0-prerelease", + "DD_RUNTIME_METRICS_ENABLED": true } \ No newline at end of file diff --git a/releasenotes/notes/CCObfSkipKeys-b639ee1a05455030.yaml b/releasenotes/notes/CCObfSkipKeys-b639ee1a05455030.yaml new file mode 100644 index 0000000000000..63f5f861d7621 --- /dev/null +++ b/releasenotes/notes/CCObfSkipKeys-b639ee1a05455030.yaml @@ -0,0 +1,14 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- + +features: + - | + APM: New configuration apm_config.obfuscation.credit_cards.keep_values (DD_APM_OBFUSCATION_CREDIT_CARDS_KEEP_VALUES) + can be used to skip specific tag keys that are known to never contain credit card numbers. This is especially useful + in cases where a span tag value is a number that triggers false positives from the credit card obfuscator. diff --git a/releasenotes/notes/operation-and-resource-name-logic-v2-75929121247f2059.yaml b/releasenotes/notes/operation-and-resource-name-logic-v2-75929121247f2059.yaml new file mode 100644 index 0000000000000..14baea0aee210 --- /dev/null +++ b/releasenotes/notes/operation-and-resource-name-logic-v2-75929121247f2059.yaml @@ -0,0 +1,10 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - Added a new feature flag `enable_operation_and_resource_name_logic_v2` in DD_APM_FEATURES. Enabling this flag modifies the logic for computing operation and resource names from OTLP spans to produce shorter, more readable names and improve alignment with OpenTelemetry specifications. diff --git a/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go b/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go index baf96bcea406e..cad8fab208066 100644 --- a/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go +++ b/test/new-e2e/tests/installer/windows/suites/agent-package/upgrade_test.go @@ -46,7 +46,7 @@ func (s *testAgentUpgradeSuite) TestDowngradeAgentPackage() { // Act _, err = s.Installer().InstallExperiment(installerwindows.AgentPackage, - installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithRegistry("install.datadoghq.com"), installer.WithVersion(s.StableAgentVersion().PackageVersion()), installer.WithAuthentication(""), ) @@ -69,7 +69,7 @@ func (s *testAgentUpgradeSuite) TestExperimentFailure() { // Act _, err := s.Installer().InstallExperiment(installerwindows.AgentPackage, - installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithRegistry("install.datadoghq.com"), installer.WithVersion("unknown-version"), installer.WithAuthentication(""), ) @@ -88,7 +88,7 @@ func (s *testAgentUpgradeSuite) TestExperimentCurrentVersion() { // Act _, err := s.Installer().InstallExperiment(installerwindows.AgentPackage, - installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithRegistry("install.datadoghq.com"), installer.WithVersion(s.StableAgentVersion().PackageVersion()), installer.WithAuthentication(""), ) @@ -121,7 +121,7 @@ func (s *testAgentUpgradeSuite) installStableAgent() { // Act output, err := s.Installer().InstallPackage(installerwindows.AgentPackage, - installer.WithRegistry("public.ecr.aws/datadog"), + installer.WithRegistry("install.datadoghq.com"), installer.WithVersion(s.StableAgentVersion().PackageVersion()), installer.WithAuthentication(""), ) diff --git a/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go b/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go index a2fc1d9970507..83d67475a6daf 100644 --- a/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go +++ b/test/new-e2e/tests/installer/windows/suites/installer-package/upgrade_test.go @@ -6,13 +6,15 @@ package installertests import ( + "testing" + + "github.com/DataDog/datadog-agent/pkg/util/testutil/flake" agentVersion "github.com/DataDog/datadog-agent/pkg/version" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/e2e" "github.com/DataDog/datadog-agent/test/new-e2e/pkg/environments/aws/host/windows" installerwindows "github.com/DataDog/datadog-agent/test/new-e2e/tests/installer/windows" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/agent" "github.com/DataDog/datadog-agent/test/new-e2e/tests/windows/common/pipeline" - "testing" ) type testInstallerUpgradesSuite struct { @@ -26,6 +28,7 @@ func TestInstallerUpgrades(t *testing.T) { // TestUpgrades tests upgrading the stable version of the Datadog installer to the latest from the pipeline. func (s *testInstallerUpgradesSuite) TestUpgrades() { + flake.Mark(s.T()) // Arrange s.Require().NoError(s.Installer().Install( installerwindows.WithInstallerURLFromInstallersJSON(pipeline.StableURL, s.StableInstallerVersion().PackageVersion())), diff --git a/test/regression/cases/file_to_blackhole_0ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml b/test/regression/cases/file_to_blackhole_0ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml index 2737fc2dd4f7c..ec51a59de1c46 100644 --- a/test/regression/cases/file_to_blackhole_0ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml +++ b/test/regression/cases/file_to_blackhole_0ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml @@ -1,5 +1,5 @@ logs: - type: file - path: "/tmp/smp/*.log" + path: "/smp-shared/*.log" service: "my-service" source: "my-client-app" diff --git a/test/regression/cases/file_to_blackhole_0ms_latency/experiment.yaml b/test/regression/cases/file_to_blackhole_0ms_latency/experiment.yaml index 215dcfa86cc48..4884b1e7a2964 100644 --- a/test/regression/cases/file_to_blackhole_0ms_latency/experiment.yaml +++ b/test/regression/cases/file_to_blackhole_0ms_latency/experiment.yaml @@ -31,3 +31,9 @@ checks: series: total_rss_bytes # The machine has 12GiB free. upper_bound: 1.2GiB + + - name: lost_bytes + description: "Available bytes not polled by log Agent" + bounds: + series: lost_bytes + upper_bound: 0KB diff --git a/test/regression/cases/file_to_blackhole_0ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_0ms_latency/lading/lading.yaml index 68bf4bdd44bea..e5cf494648f87 100644 --- a/test/regression/cases/file_to_blackhole_0ms_latency/lading/lading.yaml +++ b/test/regression/cases/file_to_blackhole_0ms_latency/lading/lading.yaml @@ -1,16 +1,16 @@ generator: - file_gen: - logrotate: + logrotate_fs: seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] - root: "/tmp/smp" concurrent_logs: 8 - maximum_bytes_per_log: "500MiB" + maximum_bytes_per_log: 500MB total_rotations: 5 - max_depth: 1 # flat, all logs are /tmp/smp/12345.log + max_depth: 0 variant: "ascii" - bytes_per_second: "10MiB" - maximum_prebuild_cache_size_bytes: "300MiB" + bytes_per_second: 10MB + maximum_prebuild_cache_size_bytes: 300MB + mount_point: /smp-shared blackhole: - http: diff --git a/test/regression/cases/file_to_blackhole_1000ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml b/test/regression/cases/file_to_blackhole_1000ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml index 2737fc2dd4f7c..ec51a59de1c46 100644 --- a/test/regression/cases/file_to_blackhole_1000ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml +++ b/test/regression/cases/file_to_blackhole_1000ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml @@ -1,5 +1,5 @@ logs: - type: file - path: "/tmp/smp/*.log" + path: "/smp-shared/*.log" service: "my-service" source: "my-client-app" diff --git a/test/regression/cases/file_to_blackhole_1000ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_1000ms_latency/lading/lading.yaml index 7cff76eb2e818..cf3c07bf79121 100644 --- a/test/regression/cases/file_to_blackhole_1000ms_latency/lading/lading.yaml +++ b/test/regression/cases/file_to_blackhole_1000ms_latency/lading/lading.yaml @@ -1,16 +1,16 @@ generator: - file_gen: - logrotate: + logrotate_fs: seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] - root: "/tmp/smp" concurrent_logs: 8 - maximum_bytes_per_log: "500MiB" + maximum_bytes_per_log: 500MB total_rotations: 5 - max_depth: 1 # flat, all logs are /tmp/smp/12345.log + max_depth: 0 variant: "ascii" - bytes_per_second: "10MiB" - maximum_prebuild_cache_size_bytes: "300MiB" + bytes_per_second: 10MB + maximum_prebuild_cache_size_bytes: 300MB + mount_point: /smp-shared blackhole: - http: diff --git a/test/regression/cases/file_to_blackhole_100ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml b/test/regression/cases/file_to_blackhole_100ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml index 2737fc2dd4f7c..ec51a59de1c46 100644 --- a/test/regression/cases/file_to_blackhole_100ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml +++ b/test/regression/cases/file_to_blackhole_100ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml @@ -1,5 +1,5 @@ logs: - type: file - path: "/tmp/smp/*.log" + path: "/smp-shared/*.log" service: "my-service" source: "my-client-app" diff --git a/test/regression/cases/file_to_blackhole_100ms_latency/experiment.yaml b/test/regression/cases/file_to_blackhole_100ms_latency/experiment.yaml index 215dcfa86cc48..64fe41015db6f 100644 --- a/test/regression/cases/file_to_blackhole_100ms_latency/experiment.yaml +++ b/test/regression/cases/file_to_blackhole_100ms_latency/experiment.yaml @@ -31,3 +31,9 @@ checks: series: total_rss_bytes # The machine has 12GiB free. upper_bound: 1.2GiB + + - name: lost_bytes + description: "Allowable bytes not polled by log Agent" + bounds: + series: lost_bytes + upper_bound: 0KB diff --git a/test/regression/cases/file_to_blackhole_100ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_100ms_latency/lading/lading.yaml index 1cf21c8935ced..d1c1648be96c1 100644 --- a/test/regression/cases/file_to_blackhole_100ms_latency/lading/lading.yaml +++ b/test/regression/cases/file_to_blackhole_100ms_latency/lading/lading.yaml @@ -1,16 +1,16 @@ generator: - file_gen: - logrotate: + logrotate_fs: seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] - root: "/tmp/smp" concurrent_logs: 8 - maximum_bytes_per_log: "500MiB" + maximum_bytes_per_log: 500MB total_rotations: 5 - max_depth: 1 # flat, all logs are /tmp/smp/12345.log + max_depth: 0 variant: "ascii" - bytes_per_second: "10MiB" - maximum_prebuild_cache_size_bytes: "300MiB" + bytes_per_second: 10MB + maximum_prebuild_cache_size_bytes: 300MB + mount_point: /smp-shared blackhole: - http: diff --git a/test/regression/cases/file_to_blackhole_300ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml b/test/regression/cases/file_to_blackhole_300ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml index 2737fc2dd4f7c..ec51a59de1c46 100644 --- a/test/regression/cases/file_to_blackhole_300ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml +++ b/test/regression/cases/file_to_blackhole_300ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml @@ -1,5 +1,5 @@ logs: - type: file - path: "/tmp/smp/*.log" + path: "/smp-shared/*.log" service: "my-service" source: "my-client-app" diff --git a/test/regression/cases/file_to_blackhole_300ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_300ms_latency/lading/lading.yaml index f8f94fbbebe2f..dc84682becb8a 100644 --- a/test/regression/cases/file_to_blackhole_300ms_latency/lading/lading.yaml +++ b/test/regression/cases/file_to_blackhole_300ms_latency/lading/lading.yaml @@ -1,16 +1,16 @@ generator: - file_gen: - logrotate: + logrotate_fs: seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] - root: "/tmp/smp" concurrent_logs: 8 - maximum_bytes_per_log: "500MiB" + maximum_bytes_per_log: 500MB total_rotations: 5 - max_depth: 1 # flat, all logs are /tmp/smp/12345.log + max_depth: 0 variant: "ascii" - bytes_per_second: "10MiB" - maximum_prebuild_cache_size_bytes: "300MiB" + bytes_per_second: 10MB + maximum_prebuild_cache_size_bytes: 300MB + mount_point: /smp-shared blackhole: - http: diff --git a/test/regression/cases/file_to_blackhole_500ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml b/test/regression/cases/file_to_blackhole_500ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml index 2737fc2dd4f7c..ec51a59de1c46 100644 --- a/test/regression/cases/file_to_blackhole_500ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml +++ b/test/regression/cases/file_to_blackhole_500ms_latency/datadog-agent/conf.d/disk-listener.d/conf.yaml @@ -1,5 +1,5 @@ logs: - type: file - path: "/tmp/smp/*.log" + path: "/smp-shared/*.log" service: "my-service" source: "my-client-app" diff --git a/test/regression/cases/file_to_blackhole_500ms_latency/lading/lading.yaml b/test/regression/cases/file_to_blackhole_500ms_latency/lading/lading.yaml index d88c5c4679fcf..90920efb161d4 100644 --- a/test/regression/cases/file_to_blackhole_500ms_latency/lading/lading.yaml +++ b/test/regression/cases/file_to_blackhole_500ms_latency/lading/lading.yaml @@ -1,16 +1,16 @@ generator: - file_gen: - logrotate: + logrotate_fs: seed: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131] - root: "/tmp/smp" concurrent_logs: 8 - maximum_bytes_per_log: "500MiB" + maximum_bytes_per_log: 500MB total_rotations: 5 - max_depth: 1 # flat, all logs are /tmp/smp/12345.log + max_depth: 0 variant: "ascii" - bytes_per_second: "10MiB" - maximum_prebuild_cache_size_bytes: "300MiB" + bytes_per_second: 10MB + maximum_prebuild_cache_size_bytes: 300MB + mount_point: /smp-shared blackhole: - http: