From 15e3e862ec19d965294d8daebe5d16431c7e9255 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Thu, 30 Nov 2023 09:30:50 -0500 Subject: [PATCH] [release-v0.38] Prepare for v0.38.1 release (#5893) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Allow x-faro-session-id header for faro receiver (#5835) (cherry picked from commit cc7cb3716ed4106326bb064882ee235694cbf9b2) * misc: follow up on #5835 (#5837) * Add missing CHANGELOG entry * Mirror fix to static mode (cherry picked from commit 7da5726b2794dcc10d5330ce976dd53a21313a12) * Agent Management: Introduce support for template variables (#5788) * Agent Management: Introduce support for template variables - This change allows managing template variables for remote configuration on a per-agent basis. - Both base configurations and snippets can be interpreted as templates and evaluated at load time with the provided template variables. - Templates must follow go's `text/template` syntax. - This greatly increases the flexibility and reusability of snippets. - Template evaluation has been tested in different scenarios and seems really robust. If the variables defined in the template cannot be resolved (even nested ones), and empty string is rendered instead. - Note: templates are only evaluated when the `template_variables` field within the `agent_metadata` remote config field is non-empty. - Note: this feature only applies to static mode. * Improve naming * Check error for template execution * Add tests - Tests different scenarios, including: - Referencing non existing nested objects - Conditionals - Ranges - Character escaping * Update CHANGELOG * Always evaluate templates - This is required because certain agents might start before their labels are synced. If some of the snippets assigned to them contain template variables, loading the config will fail. * Add test for template inside a template - Templates inside templates must be escaped using backticks to avoid them being evaluated by the snippet template execution * Move feature to the next release in CHANGELOG * Document templating functionality * Fix doc (cherry picked from commit d388f94a6bce953895dc5a61e6882f092fb198b8) * pyroscope.scrape: change error log level to not swallow errors (#5840) (cherry picked from commit 2242e4a40d5b8a742dfe6835b97e0e85d9a53624) * Update windows defaults to use upstream defaults except for enabled collectors. (#5832) Fix #5831 and use the defaults from windows. (cherry picked from commit 15d3d9f43c86c8583c9f94ec43f5f7585a92aea8) * Bump otelgrpc to fix CVE 2023 47108 (#5806) * Fix CVE-2023-47108 by updating `otelgrpc` from v0.45.0 to v0.46.0. * Stop using the deprecated trace.NewNoopTracerProvider. Using noop.NewTracerProvider from "go.opentelemetry.io/otel/trace/noop" instead. * Reorder changelog Comply with the ordering in: https://github.com/grafana/agent/blob/main/docs/developer/contributing.md#updating-the-changelog --------- Signed-off-by: hainenber Co-authored-by: Paulin Todev (cherry picked from commit a2348a0f4b928a32720b279a425f6223835d5038) * fix(otelcol/fanoutconsumer): nil check during fanout consumer creation (#5854) Signed-off-by: hainenber (cherry picked from commit 84344fb1a273ed1c25423e46a654f9af273636f8) * prometheus.operator.* - Fix issue with missing targets when one monitor's name is a prefix of another (#5862) Co-authored-by: Paul Bormans <1399350+Paul424@users.noreply.github.com> (cherry picked from commit f232fb476c84cee7c2c04a2f3b7cc28b5907b429) * max_cache_size was being set to 0 (#5869) * max_cache_size was being set to 0, due to issue where it doesnt exist in static but the default wasnt carrying over to river syntax. In truth we should never write it. * Clean up from PR (cherry picked from commit 356c50cbdf384dd9e09a1dd9f26ecf1b0ee41034) * Add Deploy Mode to usage stats. (#5880) Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> (cherry picked from commit 7bf82eaa0be9ff65236963191e98f484e2c512c7) * Fix promtail converter: docker_sd_configs (#5881) * Fix promtail converter: docker_sd * changelog and lint * typo (cherry picked from commit c5dc968097401a665a3881448804c52bda26d32f) * prepare for 0.38.1 release (#5891) (cherry picked from commit fc2fd5b46938374a5aac75cdc8f421f8f5c81740) * fix misplaced CHANGELOG entry for #5869 --------- Co-authored-by: Cedric Ziel Co-authored-by: Jorge Creixell Co-authored-by: Tolya Korniltsev Co-authored-by: mattdurham Co-authored-by: Đỗ Trọng Hải <41283691+hainenber@users.noreply.github.com> Co-authored-by: Craig Peterson <192540+captncraig@users.noreply.github.com> Co-authored-by: Piotr <17101802+thampiotr@users.noreply.github.com> --- CHANGELOG.md | 34 +++- component/faro/receiver/handler.go | 2 +- .../otelcol/internal/fanoutconsumer/logs.go | 14 +- .../otelcol/receiver/prometheus/prometheus.go | 8 +- .../prometheus/exporter/windows/config.go | 71 -------- .../windows/config_default_windows_test.go | 46 +++-- .../exporter/windows/config_windows.go | 75 ++++++++ .../prometheus/operator/common/crdmanager.go | 44 +++-- .../operator/common/crdmanager_test.go | 168 ++++++++++++++++++ .../prometheus/operator/common/interfaces.go | 23 +++ component/pyroscope/scrape/scrape_loop.go | 2 +- .../internal/build/docker_sd.go | 95 ++++++++++ .../internal/build/scrape_builder.go | 5 + .../internal/build/service_discovery.go | 4 - .../promtailconvert/promtailconvert.go | 3 + .../testdata/cloudflare_relabel.river | 1 - .../promtailconvert/testdata/docker.river | 53 +++++- .../testdata/docker_relabel.river | 63 +++++++ .../testdata/docker_relabel.yaml | 37 ++++ .../testdata/mixed_pipeline.river | 75 ++++++++ .../testdata/mixed_pipeline.yaml | 48 +++++ .../testdata/windowsevents_relabel.river | 1 - .../internal/build/windows_exporter.go | 4 +- .../testdata/promtail_scrape.river | 1 - .../staticconvert/testdata/sanitize.river | 2 - docs/sources/_index.md | 2 +- docs/sources/data-collection.md | 1 + .../static/configuration/agent-management.md | 7 +- go.mod | 16 +- go.sum | 27 +-- internal/useragent/useragent.go | 11 +- internal/useragent/useragent_test.go | 13 ++ .../agent_management_remote_config_test.go | 77 ++++++++ pkg/config/agentmanagement_remote_config.go | 39 +++- pkg/flow/componenttest/componenttest.go | 4 +- pkg/flow/internal/controller/loader_test.go | 6 +- pkg/flow/tracing/tracing.go | 1 + pkg/flow/tracing/wrap_tracer.go | 22 +-- .../v2/app_agent_receiver/handler.go | 2 +- pkg/integrations/windows_exporter/config.go | 73 -------- .../windows_exporter/config_windows.go | 74 +++++++- pkg/operator/defaults.go | 2 +- pkg/traces/instance.go | 4 +- pkg/traces/traceutils/server.go | 4 +- pkg/usagestats/stats.go | 3 + service/cluster/cluster.go | 3 +- service/http/http.go | 3 +- service/http/http_test.go | 4 +- tools/gen-versioned-files/agent-version.txt | 2 +- 49 files changed, 1008 insertions(+), 271 deletions(-) create mode 100644 component/prometheus/exporter/windows/config_windows.go create mode 100644 component/prometheus/operator/common/crdmanager_test.go create mode 100644 component/prometheus/operator/common/interfaces.go create mode 100644 converter/internal/promtailconvert/internal/build/docker_sd.go create mode 100644 converter/internal/promtailconvert/testdata/docker_relabel.river create mode 100644 converter/internal/promtailconvert/testdata/docker_relabel.yaml create mode 100644 converter/internal/promtailconvert/testdata/mixed_pipeline.river create mode 100644 converter/internal/promtailconvert/testdata/mixed_pipeline.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 78652c614c94..a10846d29200 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,8 +7,38 @@ This document contains a historical list of changes between releases. Only changes that impact end-user behavior are listed; changes to documentation or internal API changes are not present. -Main (unreleased) ------------------ +v0.38.1 (2023-11-30) +-------------------- + +### Security fixes + +- Fix CVE-2023-47108 by updating `otelgrpc` from v0.45.0 to v0.46.0. (@hainenber) + +### Features + +- Agent Management: Introduce support for templated configuration. (@jcreixell) + +### Bugfixes + +- Permit `X-Faro-Session-ID` header in CORS requests for the `faro.receiver` + component (flow mode) and the `app_agent_receiver` integration (static mode). + (@cedricziel) + +- Fix issue with windows_exporter defaults not being set correctly. (@mattdurham) + +- Fix agent crash when process null OTel's fan out consumers. (@hainenber) + +- Fix issue in `prometheus.operator.*` where targets would be dropped if two crds share a common prefix in their names. (@Paul424, @captncraig) + +- Fix issue where `convert` command would generate incorrect Flow Mode config + when provided `promtail` configuration that uses `docker_sd_configs` (@thampiotr) + +- Fix converter issue with `loki.relabel` and `max_cache_size` being set to 0 + instead of default (10_000). (@mattdurham) + +### Other changes + +- Add Agent Deploy Mode to usage report. (@captncraig) v0.38.0 (2023-11-21) -------------------- diff --git a/component/faro/receiver/handler.go b/component/faro/receiver/handler.go index fb8511e0bbde..636f00859e2b 100644 --- a/component/faro/receiver/handler.go +++ b/component/faro/receiver/handler.go @@ -69,7 +69,7 @@ func (h *handler) Update(args ServerArguments) { if len(args.CORSAllowedOrigins) > 0 { h.cors = cors.New(cors.Options{ AllowedOrigins: args.CORSAllowedOrigins, - AllowedHeaders: []string{apiKeyHeader, "content-type"}, + AllowedHeaders: []string{apiKeyHeader, "content-type", "x-faro-session-id"}, }) } else { h.cors = nil // Disable cors. diff --git a/component/otelcol/internal/fanoutconsumer/logs.go b/component/otelcol/internal/fanoutconsumer/logs.go index a01202686e01..a8ee4df45b7f 100644 --- a/component/otelcol/internal/fanoutconsumer/logs.go +++ b/component/otelcol/internal/fanoutconsumer/logs.go @@ -29,6 +29,10 @@ func Logs(in []otelcol.Consumer) otelconsumer.Logs { for i := 0; i < len(in)-1; i++ { consumer := in[i] + if consumer == nil { + continue + } + if consumer.Capabilities().MutatesData { clone = append(clone, consumer) } else { @@ -40,10 +44,12 @@ func Logs(in []otelcol.Consumer) otelconsumer.Logs { // The final consumer can be given to the passthrough list regardless of // whether it mutates as long as there's no other read-only consumers. - if len(passthrough) == 0 || !last.Capabilities().MutatesData { - passthrough = append(passthrough, last) - } else { - clone = append(clone, last) + if last != nil { + if len(passthrough) == 0 || !last.Capabilities().MutatesData { + passthrough = append(passthrough, last) + } else { + clone = append(clone, last) + } } return &logsFanout{ diff --git a/component/otelcol/receiver/prometheus/prometheus.go b/component/otelcol/receiver/prometheus/prometheus.go index 7928f504287b..96cda03ac929 100644 --- a/component/otelcol/receiver/prometheus/prometheus.go +++ b/component/otelcol/receiver/prometheus/prometheus.go @@ -20,8 +20,8 @@ import ( "github.com/prometheus/prometheus/storage" otelcomponent "go.opentelemetry.io/collector/component" otelreceiver "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/trace" + metricNoop "go.opentelemetry.io/otel/metric/noop" + traceNoop "go.opentelemetry.io/otel/trace/noop" ) func init() { @@ -109,8 +109,8 @@ func (c *Component) Update(newConfig component.Arguments) error { Logger: zapadapter.New(c.opts.Logger), // TODO(tpaschalis): expose tracing and logging statistics. - TracerProvider: trace.NewNoopTracerProvider(), - MeterProvider: noop.NewMeterProvider(), + TracerProvider: traceNoop.NewTracerProvider(), + MeterProvider: metricNoop.NewMeterProvider(), ReportComponentStatus: func(*otelcomponent.StatusEvent) error { return nil diff --git a/component/prometheus/exporter/windows/config.go b/component/prometheus/exporter/windows/config.go index 674d355011c5..cc4cb20e4b17 100644 --- a/component/prometheus/exporter/windows/config.go +++ b/component/prometheus/exporter/windows/config.go @@ -6,72 +6,6 @@ import ( windows_integration "github.com/grafana/agent/pkg/integrations/windows_exporter" ) -// DefaultArguments holds non-zero default options for Arguments when it is -// unmarshaled from YAML. -// -// Some defaults are populated from init functions in the github.com/grafana/agent/pkg/integrations/windows_exporter package. - -var DefaultArguments = Arguments{ - EnabledCollectors: strings.Split(windows_integration.DefaultConfig.EnabledCollectors, ","), - Dfsr: DfsrConfig{ - SourcesEnabled: strings.Split(windows_integration.DefaultConfig.Dfsr.SourcesEnabled, ","), - }, - Exchange: ExchangeConfig{ - EnabledList: strings.Split(windows_integration.DefaultConfig.Exchange.EnabledList, ","), - }, - IIS: IISConfig{ - AppBlackList: windows_integration.DefaultConfig.IIS.AppBlackList, - AppWhiteList: windows_integration.DefaultConfig.IIS.AppWhiteList, - SiteBlackList: windows_integration.DefaultConfig.IIS.SiteBlackList, - SiteWhiteList: windows_integration.DefaultConfig.IIS.SiteWhiteList, - AppInclude: windows_integration.DefaultConfig.IIS.AppInclude, - AppExclude: windows_integration.DefaultConfig.IIS.AppExclude, - SiteInclude: windows_integration.DefaultConfig.IIS.SiteInclude, - SiteExclude: windows_integration.DefaultConfig.IIS.SiteExclude, - }, - LogicalDisk: LogicalDiskConfig{ - BlackList: windows_integration.DefaultConfig.LogicalDisk.BlackList, - WhiteList: windows_integration.DefaultConfig.LogicalDisk.WhiteList, - Include: windows_integration.DefaultConfig.LogicalDisk.Include, - Exclude: windows_integration.DefaultConfig.LogicalDisk.Exclude, - }, - MSMQ: MSMQConfig{ - Where: windows_integration.DefaultConfig.MSMQ.Where, - }, - MSSQL: MSSQLConfig{ - EnabledClasses: strings.Split(windows_integration.DefaultConfig.MSSQL.EnabledClasses, ","), - }, - Network: NetworkConfig{ - BlackList: windows_integration.DefaultConfig.Network.BlackList, - WhiteList: windows_integration.DefaultConfig.Network.WhiteList, - Include: windows_integration.DefaultConfig.Network.Include, - Exclude: windows_integration.DefaultConfig.Network.Exclude, - }, - Process: ProcessConfig{ - BlackList: windows_integration.DefaultConfig.Process.BlackList, - WhiteList: windows_integration.DefaultConfig.Process.WhiteList, - Include: windows_integration.DefaultConfig.Process.Include, - Exclude: windows_integration.DefaultConfig.Process.Exclude, - }, - ScheduledTask: ScheduledTaskConfig{ - Include: windows_integration.DefaultConfig.ScheduledTask.Include, - Exclude: windows_integration.DefaultConfig.ScheduledTask.Exclude, - }, - Service: ServiceConfig{ - UseApi: windows_integration.DefaultConfig.Service.UseApi, - Where: windows_integration.DefaultConfig.Service.Where, - }, - SMTP: SMTPConfig{ - BlackList: windows_integration.DefaultConfig.SMTP.BlackList, - WhiteList: windows_integration.DefaultConfig.SMTP.WhiteList, - Include: windows_integration.DefaultConfig.SMTP.Include, - Exclude: windows_integration.DefaultConfig.SMTP.Exclude, - }, - TextFile: TextFileConfig{ - TextFileDirectory: windows_integration.DefaultConfig.TextFile.TextFileDirectory, - }, -} - // Arguments is used for controlling for this exporter. type Arguments struct { // Collectors to mark as enabled @@ -92,11 +26,6 @@ type Arguments struct { TextFile TextFileConfig `river:"text_file,block,optional"` } -// SetToDefault implements river.Defaulter. -func (a *Arguments) SetToDefault() { - *a = DefaultArguments -} - // Convert converts the component's Arguments to the integration's Config. func (a *Arguments) Convert() *windows_integration.Config { return &windows_integration.Config{ diff --git a/component/prometheus/exporter/windows/config_default_windows_test.go b/component/prometheus/exporter/windows/config_default_windows_test.go index c17f6e33fa60..9fddd1d635eb 100644 --- a/component/prometheus/exporter/windows/config_default_windows_test.go +++ b/component/prometheus/exporter/windows/config_default_windows_test.go @@ -1,10 +1,8 @@ package windows import ( - "strings" "testing" - windows_integration "github.com/grafana/agent/pkg/integrations/windows_exporter" "github.com/grafana/river" "github.com/stretchr/testify/require" ) @@ -14,26 +12,26 @@ func TestRiverUnmarshalWithDefaultConfig(t *testing.T) { err := river.Unmarshal([]byte(""), &args) require.NoError(t, err) - require.Equal(t, strings.Split(windows_integration.DefaultConfig.EnabledCollectors, ","), args.EnabledCollectors) - require.Equal(t, strings.Split(windows_integration.DefaultConfig.Dfsr.SourcesEnabled, ","), args.Dfsr.SourcesEnabled) - require.Equal(t, strings.Split(windows_integration.DefaultConfig.Exchange.EnabledList, ","), args.Exchange.EnabledList) - require.Equal(t, windows_integration.DefaultConfig.IIS.AppExclude, args.IIS.AppExclude) - require.Equal(t, windows_integration.DefaultConfig.IIS.AppInclude, args.IIS.AppInclude) - require.Equal(t, windows_integration.DefaultConfig.IIS.SiteExclude, args.IIS.SiteExclude) - require.Equal(t, windows_integration.DefaultConfig.IIS.SiteInclude, args.IIS.SiteInclude) - require.Equal(t, windows_integration.DefaultConfig.LogicalDisk.Exclude, args.LogicalDisk.Exclude) - require.Equal(t, windows_integration.DefaultConfig.LogicalDisk.Include, args.LogicalDisk.Include) - require.Equal(t, windows_integration.DefaultConfig.MSMQ.Where, args.MSMQ.Where) - require.Equal(t, strings.Split(windows_integration.DefaultConfig.MSSQL.EnabledClasses, ","), args.MSSQL.EnabledClasses) - require.Equal(t, windows_integration.DefaultConfig.Network.Exclude, args.Network.Exclude) - require.Equal(t, windows_integration.DefaultConfig.Network.Include, args.Network.Include) - require.Equal(t, windows_integration.DefaultConfig.Process.Exclude, args.Process.Exclude) - require.Equal(t, windows_integration.DefaultConfig.Process.Include, args.Process.Include) - require.Equal(t, windows_integration.DefaultConfig.ScheduledTask.Exclude, args.ScheduledTask.Exclude) - require.Equal(t, windows_integration.DefaultConfig.ScheduledTask.Include, args.ScheduledTask.Include) - require.Equal(t, windows_integration.DefaultConfig.Service.UseApi, args.Service.UseApi) - require.Equal(t, windows_integration.DefaultConfig.Service.Where, args.Service.Where) - require.Equal(t, windows_integration.DefaultConfig.SMTP.Exclude, args.SMTP.Exclude) - require.Equal(t, windows_integration.DefaultConfig.SMTP.Include, args.SMTP.Include) - require.Equal(t, windows_integration.DefaultConfig.TextFile.TextFileDirectory, args.TextFile.TextFileDirectory) + require.Equal(t, DefaultArguments.EnabledCollectors, args.EnabledCollectors) + require.Equal(t, DefaultArguments.Dfsr.SourcesEnabled, args.Dfsr.SourcesEnabled) + require.Equal(t, DefaultArguments.Exchange.EnabledList, args.Exchange.EnabledList) + require.Equal(t, DefaultArguments.IIS.AppExclude, args.IIS.AppExclude) + require.Equal(t, DefaultArguments.IIS.AppInclude, args.IIS.AppInclude) + require.Equal(t, DefaultArguments.IIS.SiteExclude, args.IIS.SiteExclude) + require.Equal(t, DefaultArguments.IIS.SiteInclude, args.IIS.SiteInclude) + require.Equal(t, DefaultArguments.LogicalDisk.Exclude, args.LogicalDisk.Exclude) + require.Equal(t, DefaultArguments.LogicalDisk.Include, args.LogicalDisk.Include) + require.Equal(t, DefaultArguments.MSMQ.Where, args.MSMQ.Where) + require.Equal(t, DefaultArguments.MSSQL.EnabledClasses, args.MSSQL.EnabledClasses) + require.Equal(t, DefaultArguments.Network.Exclude, args.Network.Exclude) + require.Equal(t, DefaultArguments.Network.Include, args.Network.Include) + require.Equal(t, DefaultArguments.Process.Exclude, args.Process.Exclude) + require.Equal(t, DefaultArguments.Process.Include, args.Process.Include) + require.Equal(t, DefaultArguments.ScheduledTask.Exclude, args.ScheduledTask.Exclude) + require.Equal(t, DefaultArguments.ScheduledTask.Include, args.ScheduledTask.Include) + require.Equal(t, DefaultArguments.Service.UseApi, args.Service.UseApi) + require.Equal(t, DefaultArguments.Service.Where, args.Service.Where) + require.Equal(t, DefaultArguments.SMTP.Exclude, args.SMTP.Exclude) + require.Equal(t, DefaultArguments.SMTP.Include, args.SMTP.Include) + require.Equal(t, DefaultArguments.TextFile.TextFileDirectory, args.TextFile.TextFileDirectory) } diff --git a/component/prometheus/exporter/windows/config_windows.go b/component/prometheus/exporter/windows/config_windows.go new file mode 100644 index 000000000000..b634788eda8c --- /dev/null +++ b/component/prometheus/exporter/windows/config_windows.go @@ -0,0 +1,75 @@ +package windows + +import ( + windows_integration "github.com/grafana/agent/pkg/integrations/windows_exporter" + col "github.com/prometheus-community/windows_exporter/pkg/collector" + "strings" +) + +// DefaultArguments holds non-zero default options for Arguments when it is +// unmarshaled from YAML. +var DefaultArguments = Arguments{ + EnabledCollectors: strings.Split(windows_integration.DefaultConfig.EnabledCollectors, ","), + Dfsr: DfsrConfig{ + SourcesEnabled: strings.Split(col.ConfigDefaults.Dfsr.DfsrEnabledCollectors, ","), + }, + Exchange: ExchangeConfig{ + EnabledList: strings.Split(col.ConfigDefaults.Exchange.CollectorsEnabled, ","), + }, + IIS: IISConfig{ + AppBlackList: col.ConfigDefaults.Iis.AppExclude, + AppWhiteList: col.ConfigDefaults.Iis.AppInclude, + SiteBlackList: col.ConfigDefaults.Iis.SiteExclude, + SiteWhiteList: col.ConfigDefaults.Iis.SiteInclude, + AppInclude: col.ConfigDefaults.Iis.AppInclude, + AppExclude: col.ConfigDefaults.Iis.AppExclude, + SiteInclude: col.ConfigDefaults.Iis.SiteInclude, + SiteExclude: col.ConfigDefaults.Iis.SiteExclude, + }, + LogicalDisk: LogicalDiskConfig{ + BlackList: col.ConfigDefaults.LogicalDisk.VolumeExclude, + WhiteList: col.ConfigDefaults.LogicalDisk.VolumeInclude, + Include: col.ConfigDefaults.LogicalDisk.VolumeInclude, + Exclude: col.ConfigDefaults.LogicalDisk.VolumeExclude, + }, + MSMQ: MSMQConfig{ + Where: col.ConfigDefaults.Msmq.QueryWhereClause, + }, + MSSQL: MSSQLConfig{ + EnabledClasses: strings.Split(col.ConfigDefaults.Mssql.EnabledCollectors, ","), + }, + Network: NetworkConfig{ + BlackList: col.ConfigDefaults.Net.NicExclude, + WhiteList: col.ConfigDefaults.Net.NicInclude, + Include: col.ConfigDefaults.Net.NicInclude, + Exclude: col.ConfigDefaults.Net.NicExclude, + }, + Process: ProcessConfig{ + BlackList: col.ConfigDefaults.Process.ProcessExclude, + WhiteList: col.ConfigDefaults.Process.ProcessInclude, + Include: col.ConfigDefaults.Process.ProcessInclude, + Exclude: col.ConfigDefaults.Process.ProcessExclude, + }, + ScheduledTask: ScheduledTaskConfig{ + Include: col.ConfigDefaults.ScheduledTask.TaskInclude, + Exclude: col.ConfigDefaults.ScheduledTask.TaskExclude, + }, + Service: ServiceConfig{ + UseApi: "false", + Where: col.ConfigDefaults.Service.ServiceWhereClause, + }, + SMTP: SMTPConfig{ + BlackList: col.ConfigDefaults.Smtp.ServerExclude, + WhiteList: col.ConfigDefaults.Smtp.ServerInclude, + Include: col.ConfigDefaults.Smtp.ServerInclude, + Exclude: col.ConfigDefaults.Smtp.ServerExclude, + }, + TextFile: TextFileConfig{ + TextFileDirectory: col.ConfigDefaults.Textfile.TextFileDirectories, + }, +} + +// SetToDefault implements river.Defaulter. +func (a *Arguments) SetToDefault() { + *a = DefaultArguments +} diff --git a/component/prometheus/operator/common/crdmanager.go b/component/prometheus/operator/common/crdmanager.go index 9f8bd55f79f6..85f13719e970 100644 --- a/component/prometheus/operator/common/crdmanager.go +++ b/component/prometheus/operator/common/crdmanager.go @@ -42,12 +42,19 @@ const informerSyncTimeout = 10 * time.Second // crdManager is all of the fields required to run a crd based component. // on update, this entire thing should be recreated and restarted type crdManager struct { - mut sync.Mutex - discoveryConfigs map[string]discovery.Configs - scrapeConfigs map[string]*config.ScrapeConfig - debugInfo map[string]*operator.DiscoveredResource - discoveryManager *discovery.Manager - scrapeManager *scrape.Manager + mut sync.Mutex + + // these maps are keyed by job name + discoveryConfigs map[string]discovery.Configs + scrapeConfigs map[string]*config.ScrapeConfig + + // list of keys to the above maps for a given resource by `ns/name` + crdsToMapKeys map[string][]string + // debug info by `kind/ns/name` + debugInfo map[string]*operator.DiscoveredResource + + discoveryManager discoveryManager + scrapeManager scrapeManager clusteringUpdated chan struct{} ls labelstore.LabelStore @@ -80,6 +87,7 @@ func newCrdManager(opts component.Options, cluster cluster.Cluster, logger log.L cluster: cluster, discoveryConfigs: map[string]discovery.Configs{}, scrapeConfigs: map[string]*config.ScrapeConfig{}, + crdsToMapKeys: map[string][]string{}, debugInfo: map[string]*operator.DiscoveredResource{}, kind: kind, clusteringUpdated: make(chan struct{}, 1), @@ -392,6 +400,7 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { AdditionalRelabelConfigs: c.args.RelabelConfigs, ScrapeOptions: c.args.Scrape, } + mapKeys := []string{} for i, ep := range pm.Spec.PodMetricsEndpoints { var scrapeConfig *config.ScrapeConfig scrapeConfig, err = gen.GeneratePodMonitorConfig(pm, ep, i) @@ -400,6 +409,7 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { level.Error(c.logger).Log("name", pm.Name, "err", err, "msg", "error generating scrapeconfig from podmonitor") break } + mapKeys = append(mapKeys, scrapeConfig.JobName) c.mut.Lock() c.discoveryConfigs[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs c.scrapeConfigs[scrapeConfig.JobName] = scrapeConfig @@ -409,6 +419,9 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { c.addDebugInfo(pm.Namespace, pm.Name, err) return } + c.mut.Lock() + c.crdsToMapKeys[fmt.Sprintf("%s/%s", pm.Namespace, pm.Name)] = mapKeys + c.mut.Unlock() if err = c.apply(); err != nil { level.Error(c.logger).Log("name", pm.Name, "err", err, "msg", "error applying scrape configs from "+c.kind) } @@ -442,6 +455,8 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { AdditionalRelabelConfigs: c.args.RelabelConfigs, ScrapeOptions: c.args.Scrape, } + + mapKeys := []string{} for i, ep := range sm.Spec.Endpoints { var scrapeConfig *config.ScrapeConfig scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i) @@ -450,6 +465,7 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error generating scrapeconfig from serviceMonitor") break } + mapKeys = append(mapKeys, scrapeConfig.JobName) c.mut.Lock() c.discoveryConfigs[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs c.scrapeConfigs[scrapeConfig.JobName] = scrapeConfig @@ -459,6 +475,9 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { c.addDebugInfo(sm.Namespace, sm.Name, err) return } + c.mut.Lock() + c.crdsToMapKeys[fmt.Sprintf("%s/%s", sm.Namespace, sm.Name)] = mapKeys + c.mut.Unlock() if err = c.apply(); err != nil { level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error applying scrape configs from "+c.kind) } @@ -503,6 +522,7 @@ func (c *crdManager) addProbe(p *promopv1.Probe) { c.mut.Lock() c.discoveryConfigs[pmc.JobName] = pmc.ServiceDiscoveryConfigs c.scrapeConfigs[pmc.JobName] = pmc + c.crdsToMapKeys[fmt.Sprintf("%s/%s", p.Namespace, p.Name)] = []string{pmc.JobName} c.mut.Unlock() if err = c.apply(); err != nil { @@ -533,12 +553,10 @@ func (c *crdManager) onDeleteProbe(obj interface{}) { func (c *crdManager) clearConfigs(ns, name string) { c.mut.Lock() defer c.mut.Unlock() - prefix := fmt.Sprintf("%s/%s/%s", c.kind, ns, name) - for k := range c.discoveryConfigs { - if strings.HasPrefix(k, prefix) { - delete(c.discoveryConfigs, k) - delete(c.scrapeConfigs, k) - } + + for _, k := range c.crdsToMapKeys[fmt.Sprintf("%s/%s", ns, name)] { + delete(c.discoveryConfigs, k) + delete(c.scrapeConfigs, k) } - delete(c.debugInfo, prefix) + delete(c.debugInfo, fmt.Sprintf("%s/%s/%s", c.kind, ns, name)) } diff --git a/component/prometheus/operator/common/crdmanager_test.go b/component/prometheus/operator/common/crdmanager_test.go new file mode 100644 index 000000000000..7e3cd75fbd37 --- /dev/null +++ b/component/prometheus/operator/common/crdmanager_test.go @@ -0,0 +1,168 @@ +package common + +import ( + "testing" + + "golang.org/x/exp/maps" + + "github.com/go-kit/log" + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/prometheus/operator" + "github.com/grafana/agent/service/cluster" + "github.com/grafana/agent/service/labelstore" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/scrape" + "k8s.io/apimachinery/pkg/util/intstr" + + promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stretchr/testify/require" +) + +func TestClearConfigsSameNsSamePrefix(t *testing.T) { + logger := log.NewNopLogger() + m := newCrdManager( + component.Options{ + Logger: logger, + GetServiceData: func(name string) (interface{}, error) { return nil, nil }, + }, + cluster.Mock(), + logger, + &operator.DefaultArguments, + KindServiceMonitor, + labelstore.New(logger), + ) + + m.discoveryManager = newMockDiscoveryManager() + m.scrapeManager = newMockScrapeManager() + + targetPort := intstr.FromInt(9090) + m.onAddServiceMonitor(&promopv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "svcmonitor", + }, + Spec: promopv1.ServiceMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "group": "my-group", + }, + }, + Endpoints: []promopv1.Endpoint{ + { + TargetPort: &targetPort, + ScrapeTimeout: "5s", + Interval: "10s", + }, + }, + }, + }) + m.onAddServiceMonitor(&promopv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "svcmonitor-another", + }, + Spec: promopv1.ServiceMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "group": "my-group", + }, + }, + Endpoints: []promopv1.Endpoint{ + { + TargetPort: &targetPort, + ScrapeTimeout: "5s", + Interval: "10s", + }, + }, + }}) + + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another/0", "serviceMonitor/monitoring/svcmonitor/0"}, maps.Keys(m.discoveryConfigs)) + m.clearConfigs("monitoring", "svcmonitor") + require.ElementsMatch(t, []string{"monitoring/svcmonitor", "monitoring/svcmonitor-another"}, maps.Keys(m.crdsToMapKeys)) + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another/0"}, maps.Keys(m.discoveryConfigs)) + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another"}, maps.Keys(m.debugInfo)) +} + +func TestClearConfigsProbe(t *testing.T) { + logger := log.NewNopLogger() + m := newCrdManager( + component.Options{ + Logger: logger, + GetServiceData: func(name string) (interface{}, error) { return nil, nil }, + }, + cluster.Mock(), + logger, + &operator.DefaultArguments, + KindProbe, + labelstore.New(logger), + ) + + m.discoveryManager = newMockDiscoveryManager() + m.scrapeManager = newMockScrapeManager() + + m.onAddProbe(&promopv1.Probe{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "probe", + }, + Spec: promopv1.ProbeSpec{}, + }) + m.onAddProbe(&promopv1.Probe{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "probe-another", + }, + Spec: promopv1.ProbeSpec{}}) + + require.ElementsMatch(t, []string{"probe/monitoring/probe-another", "probe/monitoring/probe"}, maps.Keys(m.discoveryConfigs)) + m.clearConfigs("monitoring", "probe") + require.ElementsMatch(t, []string{"monitoring/probe", "monitoring/probe-another"}, maps.Keys(m.crdsToMapKeys)) + require.ElementsMatch(t, []string{"probe/monitoring/probe-another"}, maps.Keys(m.discoveryConfigs)) + require.ElementsMatch(t, []string{"probe/monitoring/probe-another"}, maps.Keys(m.debugInfo)) +} + +type mockDiscoveryManager struct { +} + +func newMockDiscoveryManager() *mockDiscoveryManager { + return &mockDiscoveryManager{} +} + +func (m *mockDiscoveryManager) Run() error { + return nil +} + +func (m *mockDiscoveryManager) SyncCh() <-chan map[string][]*targetgroup.Group { + return nil +} + +func (m *mockDiscoveryManager) ApplyConfig(cfg map[string]discovery.Configs) error { + return nil +} + +type mockScrapeManager struct { +} + +func newMockScrapeManager() *mockScrapeManager { + return &mockScrapeManager{} +} + +func (m *mockScrapeManager) Run(tsets <-chan map[string][]*targetgroup.Group) error { + return nil +} + +func (m *mockScrapeManager) Stop() { + +} + +func (m *mockScrapeManager) TargetsActive() map[string][]*scrape.Target { + return nil +} + +func (m *mockScrapeManager) ApplyConfig(cfg *config.Config) error { + return nil +} diff --git a/component/prometheus/operator/common/interfaces.go b/component/prometheus/operator/common/interfaces.go new file mode 100644 index 000000000000..4652154f6dc6 --- /dev/null +++ b/component/prometheus/operator/common/interfaces.go @@ -0,0 +1,23 @@ +package common + +import ( + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/scrape" +) + +// discoveryManager is an interface around discovery.Manager +type discoveryManager interface { + Run() error + SyncCh() <-chan map[string][]*targetgroup.Group + ApplyConfig(cfg map[string]discovery.Configs) error +} + +// scrapeManager is an interface around scrape.Manager +type scrapeManager interface { + Run(tsets <-chan map[string][]*targetgroup.Group) error + Stop() + TargetsActive() map[string][]*scrape.Target + ApplyConfig(cfg *config.Config) error +} diff --git a/component/pyroscope/scrape/scrape_loop.go b/component/pyroscope/scrape/scrape_loop.go index 2b74930ed191..a1f7d2a6c1b7 100644 --- a/component/pyroscope/scrape/scrape_loop.go +++ b/component/pyroscope/scrape/scrape_loop.go @@ -228,7 +228,7 @@ func (t *scrapeLoop) scrape() { } } if err := t.fetchProfile(scrapeCtx, profileType, buf); err != nil { - level.Debug(t.logger).Log("msg", "fetch profile failed", "target", t.Labels().String(), "err", err) + level.Error(t.logger).Log("msg", "fetch profile failed", "target", t.Labels().String(), "err", err) t.updateTargetStatus(start, err) return } diff --git a/converter/internal/promtailconvert/internal/build/docker_sd.go b/converter/internal/promtailconvert/internal/build/docker_sd.go new file mode 100644 index 000000000000..5fcc953881f8 --- /dev/null +++ b/converter/internal/promtailconvert/internal/build/docker_sd.go @@ -0,0 +1,95 @@ +package build + +import ( + "time" + + "github.com/grafana/agent/component/common/loki" + flow_relabel "github.com/grafana/agent/component/common/relabel" + "github.com/grafana/agent/component/discovery" + "github.com/grafana/agent/component/discovery/docker" + loki_docker "github.com/grafana/agent/component/loki/source/docker" + "github.com/grafana/agent/converter/internal/common" + "github.com/prometheus/prometheus/discovery/moby" +) + +func (s *ScrapeConfigBuilder) AppendDockerPipeline() { + if len(s.cfg.DockerSDConfigs) == 0 { + return + } + + for i, sd := range s.cfg.DockerSDConfigs { + compLabel := common.LabelWithIndex(i, s.globalCtx.LabelPrefix, s.cfg.JobName) + + // Add discovery.docker + s.f.Body().AppendBlock(common.NewBlockWithOverride( + []string{"discovery", "docker"}, + compLabel, + toDiscoveryDocker(sd), + )) + + // The targets output from above component + targets := "discovery.docker." + compLabel + ".targets" + + // Add loki.source.docker + overrideHook := func(val interface{}) interface{} { + switch val.(type) { + case []discovery.Target: // override targets expression to our string + return common.CustomTokenizer{Expr: targets} + case flow_relabel.Rules: // use the relabel rules defined for this pipeline + return common.CustomTokenizer{Expr: s.getOrNewDiscoveryRelabelRules()} + } + return val + } + + forwardTo := s.getOrNewProcessStageReceivers() // forward to process stage, which forwards to writers + s.f.Body().AppendBlock(common.NewBlockWithOverrideFn( + []string{"loki", "source", "docker"}, + compLabel, + toLokiSourceDocker(sd, forwardTo), + overrideHook, + )) + } +} + +func toLokiSourceDocker(sd *moby.DockerSDConfig, forwardTo []loki.LogsReceiver) *loki_docker.Arguments { + return &loki_docker.Arguments{ + Host: sd.Host, + Targets: nil, + ForwardTo: forwardTo, + Labels: nil, + RelabelRules: flow_relabel.Rules{}, + HTTPClientConfig: common.ToHttpClientConfig(&sd.HTTPClientConfig), + RefreshInterval: time.Duration(sd.RefreshInterval), + } +} + +func toDiscoveryDocker(sdConfig *moby.DockerSDConfig) *docker.Arguments { + if sdConfig == nil { + return nil + } + + return &docker.Arguments{ + Host: sdConfig.Host, + Port: sdConfig.Port, + HostNetworkingHost: sdConfig.HostNetworkingHost, + RefreshInterval: time.Duration(sdConfig.RefreshInterval), + Filters: toFlowDockerSDFilters(sdConfig.Filters), + HTTPClientConfig: *common.ToHttpClientConfig(&sdConfig.HTTPClientConfig), + } +} + +func toFlowDockerSDFilters(filters []moby.Filter) []docker.Filter { + if len(filters) == 0 { + return nil + } + + flowFilters := make([]docker.Filter, len(filters)) + for i, filter := range filters { + flowFilters[i] = docker.Filter{ + Name: filter.Name, + Values: filter.Values, + } + } + + return flowFilters +} diff --git a/converter/internal/promtailconvert/internal/build/scrape_builder.go b/converter/internal/promtailconvert/internal/build/scrape_builder.go index fc26d29cc832..c7288be0fc01 100644 --- a/converter/internal/promtailconvert/internal/build/scrape_builder.go +++ b/converter/internal/promtailconvert/internal/build/scrape_builder.go @@ -103,6 +103,11 @@ func (s *ScrapeConfigBuilder) getOrNewLokiRelabel() string { args := lokirelabel.Arguments{ ForwardTo: s.getOrNewProcessStageReceivers(), RelabelConfigs: component.ToFlowRelabelConfigs(s.cfg.RelabelConfigs), + // max_cache_size doesnt exist in static, and we need to manually set it to default. + // Since the default is 10_000 if we didnt set the value, it would compare the default 10k to 0 and emit 0. + // We actually dont want to emit anything since this setting doesnt exist in static, setting to 10k matches the default + // and ensures it doesnt get emitted. + MaxCacheSize: lokirelabel.DefaultArguments.MaxCacheSize, } compLabel := common.LabelForParts(s.globalCtx.LabelPrefix, s.cfg.JobName) s.f.Body().AppendBlock(common.NewBlockWithOverride([]string{"loki", "relabel"}, compLabel, args)) diff --git a/converter/internal/promtailconvert/internal/build/service_discovery.go b/converter/internal/promtailconvert/internal/build/service_discovery.go index 6219bc2b121d..3405e0966a34 100644 --- a/converter/internal/promtailconvert/internal/build/service_discovery.go +++ b/converter/internal/promtailconvert/internal/build/service_discovery.go @@ -69,10 +69,6 @@ func toDiscoveryConfig(cfg *scrapeconfig.Config) prom_discover.Configs { sdConfigs = append(sdConfigs, sd) } - for _, sd := range cfg.DockerSDConfigs { - sdConfigs = append(sdConfigs, sd) - } - for _, sd := range cfg.ServiceDiscoveryConfig.DNSSDConfigs { sdConfigs = append(sdConfigs, sd) } diff --git a/converter/internal/promtailconvert/promtailconvert.go b/converter/internal/promtailconvert/promtailconvert.go index 21e1be683217..4983631c04be 100644 --- a/converter/internal/promtailconvert/promtailconvert.go +++ b/converter/internal/promtailconvert/promtailconvert.go @@ -166,4 +166,7 @@ func appendScrapeConfig( b.AppendAzureEventHubs() b.AppendGelfConfig() b.AppendHerokuDrainConfig() + + // Docker has a special treatment in Promtail, we replicate it here. + b.AppendDockerPipeline() } diff --git a/converter/internal/promtailconvert/testdata/cloudflare_relabel.river b/converter/internal/promtailconvert/testdata/cloudflare_relabel.river index 201ce8f30356..014d812eab61 100644 --- a/converter/internal/promtailconvert/testdata/cloudflare_relabel.river +++ b/converter/internal/promtailconvert/testdata/cloudflare_relabel.river @@ -5,7 +5,6 @@ loki.relabel "fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.cloudflare "fun" { diff --git a/converter/internal/promtailconvert/testdata/docker.river b/converter/internal/promtailconvert/testdata/docker.river index c55f4f5bba4d..944a06360a19 100644 --- a/converter/internal/promtailconvert/testdata/docker.river +++ b/converter/internal/promtailconvert/testdata/docker.river @@ -28,6 +28,30 @@ discovery.docker "fun" { } } +loki.source.docker "fun" { + host = "unix:///var/run/docker.sock" + targets = discovery.docker.fun.targets + forward_to = [] + relabel_rules = null + + http_client_config { + basic_auth { + username = "robin" + password_file = "/home/robin/.password" + } + proxy_url = "http://proxy.example.com" + + tls_config { + ca_file = "/home/robin/.ca" + cert_file = "/home/robin/.cert" + key_file = "/home/robin/.key" + server_name = "example.local" + insecure_skip_verify = true + } + } + refresh_interval = "10s" +} + discovery.docker "fun_2" { host = "unix:///var/run/docker.sock" port = 54321 @@ -52,14 +76,25 @@ discovery.docker "fun_2" { } } -local.file_match "fun" { - path_targets = concat( - discovery.docker.fun.targets, - discovery.docker.fun_2.targets, - ) -} +loki.source.docker "fun_2" { + host = "unix:///var/run/docker.sock" + targets = discovery.docker.fun_2.targets + forward_to = [] + relabel_rules = null + + http_client_config { + oauth2 { + client_id = "client_id" + client_secret_file = "foo/bar" + scopes = ["scope1", "scope2"] + token_url = "https://example/oauth2/token" + endpoint_params = { + host = "example", + path = "/oauth2/token", + } -loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + tls_config { } + } + } + refresh_interval = "10s" } diff --git a/converter/internal/promtailconvert/testdata/docker_relabel.river b/converter/internal/promtailconvert/testdata/docker_relabel.river new file mode 100644 index 000000000000..0c06ffa48124 --- /dev/null +++ b/converter/internal/promtailconvert/testdata/docker_relabel.river @@ -0,0 +1,63 @@ +discovery.docker "flog_scrape" { + host = "unix:///var/run/docker.sock" + refresh_interval = "5s" +} + +discovery.relabel "flog_scrape" { + targets = [] + + rule { + source_labels = ["__meta_docker_container_name"] + regex = "/(.*)" + target_label = "container" + } +} + +loki.source.docker "flog_scrape" { + host = "unix:///var/run/docker.sock" + targets = discovery.docker.flog_scrape.targets + forward_to = [loki.write.default.receiver] + relabel_rules = discovery.relabel.flog_scrape.rules + refresh_interval = "5s" +} + +discovery.docker "scrape_two" { + host = "unix:///var/run/second_docker_why_not.sock" +} + +loki.process "scrape_two" { + forward_to = [loki.write.default.receiver] + + stage.json { + expressions = { + face = "smiley", + hand = "thumbs-up", + } + source = "video" + drop_malformed = true + } +} + +discovery.relabel "scrape_two" { + targets = [] + + rule { + source_labels = ["__trail__"] + target_label = "__path__" + } +} + +loki.source.docker "scrape_two" { + host = "unix:///var/run/second_docker_why_not.sock" + targets = discovery.docker.scrape_two.targets + forward_to = [loki.process.scrape_two.receiver] + relabel_rules = discovery.relabel.scrape_two.rules +} + +loki.write "default" { + endpoint { + url = "http://gateway:3100/loki/api/v1/push" + tenant_id = "tenant1" + } + external_labels = {} +} diff --git a/converter/internal/promtailconvert/testdata/docker_relabel.yaml b/converter/internal/promtailconvert/testdata/docker_relabel.yaml new file mode 100644 index 000000000000..a9b090183979 --- /dev/null +++ b/converter/internal/promtailconvert/testdata/docker_relabel.yaml @@ -0,0 +1,37 @@ +tracing: { enabled: false } +server: + http_listen_port: 9080 + grpc_listen_port: 0 + register_instrumentation: false + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://gateway:3100/loki/api/v1/push + tenant_id: tenant1 + +scrape_configs: + - job_name: flog_scrape + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + relabel_configs: + - source_labels: [ '__meta_docker_container_name' ] + regex: '/(.*)' + target_label: 'container' + - job_name: scrape_two + docker_sd_configs: + - host: unix:///var/run/second_docker_why_not.sock + refresh_interval: 1m + pipeline_stages: + - json: + expressions: + face: smiley + hand: thumbs-up + source: video + drop_malformed: true + relabel_configs: + - source_labels: + - __trail__ + target_label: __path__ diff --git a/converter/internal/promtailconvert/testdata/mixed_pipeline.river b/converter/internal/promtailconvert/testdata/mixed_pipeline.river new file mode 100644 index 000000000000..24fe5221cfc5 --- /dev/null +++ b/converter/internal/promtailconvert/testdata/mixed_pipeline.river @@ -0,0 +1,75 @@ +discovery.kubernetes "uber_pipeline" { + role = "pod" + kubeconfig_file = "/home/toby/.kube/config" +} + +discovery.consulagent "uber_pipeline" { + datacenter = "bigdata" +} + +discovery.relabel "uber_pipeline" { + targets = concat( + discovery.kubernetes.uber_pipeline.targets, + discovery.consulagent.uber_pipeline.targets, + ) + + rule { + source_labels = ["__trail__"] + target_label = "__path__" + } +} + +local.file_match "uber_pipeline" { + path_targets = discovery.relabel.uber_pipeline.output +} + +loki.process "uber_pipeline" { + forward_to = [loki.write.default.receiver] + + stage.json { + expressions = { + face = "smiley", + hand = "thumbs-up", + } + source = "video" + drop_malformed = true + } +} + +loki.source.file "uber_pipeline" { + targets = local.file_match.uber_pipeline.targets + forward_to = [loki.process.uber_pipeline.receiver] +} + +loki.source.api "uber_pipeline" { + http { } + + grpc { } + graceful_shutdown_timeout = "0s" + forward_to = [loki.process.uber_pipeline.receiver] + labels = { + identity = "unidentified", + object_type = "flying", + } + relabel_rules = discovery.relabel.uber_pipeline.rules + use_incoming_timestamp = true +} + +discovery.docker "uber_pipeline" { + host = "unix:///var/run/second_docker_why_not.sock" +} + +loki.source.docker "uber_pipeline" { + host = "unix:///var/run/second_docker_why_not.sock" + targets = discovery.docker.uber_pipeline.targets + forward_to = [loki.process.uber_pipeline.receiver] + relabel_rules = discovery.relabel.uber_pipeline.rules +} + +loki.write "default" { + endpoint { + url = "http://gateway:3100/loki/api/v1/push" + tenant_id = "tenant1" + } + external_labels = {} +} diff --git a/converter/internal/promtailconvert/testdata/mixed_pipeline.yaml b/converter/internal/promtailconvert/testdata/mixed_pipeline.yaml new file mode 100644 index 000000000000..74f356e55972 --- /dev/null +++ b/converter/internal/promtailconvert/testdata/mixed_pipeline.yaml @@ -0,0 +1,48 @@ +tracing: { enabled: false } +server: + http_listen_port: 9080 + grpc_listen_port: 0 + register_instrumentation: false + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://gateway:3100/loki/api/v1/push + tenant_id: tenant1 + +scrape_configs: + # Trying to combine all the special cases in one scrape config + - job_name: uber_pipeline + # one typical SD config + kubernetes_sd_configs: + - role: pod + kubeconfig_file: /home/toby/.kube/config + + # one typical logs producing config + loki_push_api: + use_incoming_timestamp: true + labels: + identity: unidentified + object_type: flying + + # this one is handled in a special way + consulagent_sd_configs: + - server: 'localhost:8500' + datacenter: bigdata + + # this one is also handled in a special way + docker_sd_configs: + - host: unix:///var/run/second_docker_why_not.sock + refresh_interval: 1m + pipeline_stages: + - json: + expressions: + face: smiley + hand: thumbs-up + source: video + drop_malformed: true + relabel_configs: + - source_labels: + - __trail__ + target_label: __path__ diff --git a/converter/internal/promtailconvert/testdata/windowsevents_relabel.river b/converter/internal/promtailconvert/testdata/windowsevents_relabel.river index 6fa1b693dc1a..39d28dea7a67 100644 --- a/converter/internal/promtailconvert/testdata/windowsevents_relabel.river +++ b/converter/internal/promtailconvert/testdata/windowsevents_relabel.river @@ -5,7 +5,6 @@ loki.relabel "fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.windowsevent "fun" { diff --git a/converter/internal/staticconvert/internal/build/windows_exporter.go b/converter/internal/staticconvert/internal/build/windows_exporter.go index 27e9679887b8..73aa706e8235 100644 --- a/converter/internal/staticconvert/internal/build/windows_exporter.go +++ b/converter/internal/staticconvert/internal/build/windows_exporter.go @@ -47,8 +47,8 @@ func toWindowsExporter(config *windows_exporter.Config) *windows.Arguments { Network: windows.NetworkConfig{ BlackList: config.Network.BlackList, WhiteList: config.Network.WhiteList, - Exclude: config.Network.Include, - Include: config.Network.Exclude, + Exclude: config.Network.Exclude, + Include: config.Network.Include, }, Process: windows.ProcessConfig{ BlackList: config.Process.BlackList, diff --git a/converter/internal/staticconvert/testdata/promtail_scrape.river b/converter/internal/staticconvert/testdata/promtail_scrape.river index 46efa90bcc9f..22ee8576ed96 100644 --- a/converter/internal/staticconvert/testdata/promtail_scrape.river +++ b/converter/internal/staticconvert/testdata/promtail_scrape.river @@ -5,7 +5,6 @@ loki.relabel "logs_log_config_fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.cloudflare "logs_log_config_fun" { diff --git a/converter/internal/staticconvert/testdata/sanitize.river b/converter/internal/staticconvert/testdata/sanitize.river index 38dabad4a85c..1bf214eda874 100644 --- a/converter/internal/staticconvert/testdata/sanitize.river +++ b/converter/internal/staticconvert/testdata/sanitize.river @@ -37,7 +37,6 @@ loki.relabel "logs_integrations_integrations_windows_exporter_application" { source_labels = ["computer"] target_label = "agent_hostname" } - max_cache_size = 0 } loki.source.windowsevent "logs_integrations_integrations_windows_exporter_application" { @@ -75,7 +74,6 @@ loki.relabel "logs_integrations_integrations_windows_exporter_system" { source_labels = ["computer"] target_label = "agent_hostname" } - max_cache_size = 0 } loki.source.windowsevent "logs_integrations_integrations_windows_exporter_system" { diff --git a/docs/sources/_index.md b/docs/sources/_index.md index f744311514c5..1561d91650b8 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -9,7 +9,7 @@ title: Grafana Agent description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: v0.38.0 + AGENT_RELEASE: v0.38.1 OTEL_VERSION: v0.87.0 --- diff --git a/docs/sources/data-collection.md b/docs/sources/data-collection.md index a464f5b892b5..da008ce32059 100644 --- a/docs/sources/data-collection.md +++ b/docs/sources/data-collection.md @@ -30,6 +30,7 @@ The usage information includes the following details: * List of enabled feature flags ([Static] mode only). * List of enabled integrations ([Static] mode only). * List of enabled [components][] ([Flow] mode only). +* Method used to deploy Grafana Agent, for example Docker, Helm, RPM, or Operator. This list may change over time. All newly reported data is documented in the CHANGELOG. diff --git a/docs/sources/static/configuration/agent-management.md b/docs/sources/static/configuration/agent-management.md index 0feb5c78def1..af327bb17b6e 100644 --- a/docs/sources/static/configuration/agent-management.md +++ b/docs/sources/static/configuration/agent-management.md @@ -123,7 +123,6 @@ selector: > **Note:** Snippet selection is currently done in the API server. This behaviour is subject to change in the future. - ### Example response body ```yaml @@ -164,3 +163,9 @@ snippets: os: linux app: app1 ``` + +> **Note:** Base configurations and snippets can contain go's [text/template](https://pkg.go.dev/text/template) actions. If you need preserve the literal value of a template action, you can escape it using backticks. For example: + +``` +{{ `{{ .template_var }}` }} +``` diff --git a/go.mod b/go.mod index 891de83235af..6ad50cf1fc35 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/go-git/go-git/v5 v5.4.2 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 - github.com/go-logr/logr v1.2.4 + github.com/go-logr/logr v1.3.0 github.com/go-sourcemap/sourcemap v2.1.3+incompatible github.com/go-sql-driver/mysql v1.7.1 github.com/gogo/protobuf v1.3.2 @@ -45,7 +45,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/cadvisor v0.47.0 github.com/google/dnsmasq_exporter v0.2.1-0.20230620100026-44b14480804a - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/google/go-jsonnet v0.18.0 github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 github.com/google/renameio/v2 v2.0.0 @@ -201,13 +201,13 @@ require ( go.opentelemetry.io/collector/semconv v0.87.0 go.opentelemetry.io/collector/service v0.87.0 go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.45.0 - go.opentelemetry.io/otel v1.19.0 + go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 go.opentelemetry.io/otel/exporters/prometheus v0.42.0 - go.opentelemetry.io/otel/metric v1.19.0 - go.opentelemetry.io/otel/sdk v1.19.0 + go.opentelemetry.io/otel/metric v1.21.0 + go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.19.0 - go.opentelemetry.io/otel/trace v1.19.0 + go.opentelemetry.io/otel/trace v1.21.0 go.opentelemetry.io/proto/otlp v1.0.0 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.2.1 @@ -581,8 +581,8 @@ require ( go.mongodb.org/mongo-driver v1.12.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/config/internal v0.87.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.19.0 // indirect go.opentelemetry.io/otel/bridge/opencensus v0.42.0 // indirect go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect diff --git a/go.sum b/go.sum index 0384745ab682..616b7f09f18f 100644 --- a/go.sum +++ b/go.sum @@ -732,8 +732,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= @@ -949,8 +949,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-jsonnet v0.18.0 h1:/6pTy6g+Jh1a1I2UMoAODkqELFiVIdOxbNwv0DDzoOg= @@ -2378,16 +2379,16 @@ go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXn go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.45.0 h1:CaagQrotQLgtDlHU6u9pE/Mf4mAwiLD8wrReIVt06lY= go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.45.0/go.mod h1:LOjFy00/ZMyMYfKFPta6kZe2cDUc1sNo/qtv1pSORWA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 h1:RsQi0qJ2imFfCvZabqzM9cNXBG8k6gXMv1A0cXRmH6A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0/go.mod h1:vsh3ySueQCiKPxFLvjWC4Z135gIa34TQ/NSqkDTZYUM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= go.opentelemetry.io/contrib/propagators/b3 v1.19.0 h1:ulz44cpm6V5oAeg5Aw9HyqGFMS6XM7untlMEhD7YzzA= go.opentelemetry.io/contrib/propagators/b3 v1.19.0/go.mod h1:OzCmE2IVS+asTI+odXQstRGVfXQ4bXv9nMBRK0nNyqQ= go.opentelemetry.io/contrib/zpages v0.45.0 h1:jIwHHGoWzJoZdbIUtWdErjL85Gni6BignnAFqDtMRL4= go.opentelemetry.io/contrib/zpages v0.45.0/go.mod h1:4mIdA5hqH6hEx9sZgV50qKfQO8aIYolUZboHmz+G7vw= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel/bridge/opencensus v0.42.0 h1:QvC+bcZkWMphWPiVqRQygMj6M0/3TOuJEO+erRA7kI8= go.opentelemetry.io/otel/bridge/opencensus v0.42.0/go.mod h1:XJojP7g5DqYdiyArix/H9i1XzPPlIUc9dGLKtF9copI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU= @@ -2408,14 +2409,14 @@ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0 h1:4jJuoeOo9W6hZn go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0/go.mod h1:/MtYTE1SfC2QIcE0bDot6fIX+h+WvXjgTqgn9P0LNPE= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 h1:Nw7Dv4lwvGrI68+wULbcq7su9K2cebeCUrDjVrUJHxM= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0/go.mod h1:1MsF6Y7gTqosgoZvHlzcaaM8DIMNZgJh87ykokoNH7Y= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= diff --git a/internal/useragent/useragent.go b/internal/useragent/useragent.go index bb6043f97aa3..8150d5d8ba95 100644 --- a/internal/useragent/useragent.go +++ b/internal/useragent/useragent.go @@ -19,6 +19,7 @@ const ( // settable by tests var goos = runtime.GOOS +var executable = os.Executable func Get() string { parenthesis := "" @@ -27,7 +28,7 @@ func Get() string { metadata = append(metadata, mode) } metadata = append(metadata, goos) - if op := getDeployMode(); op != "" { + if op := GetDeployMode(); op != "" { metadata = append(metadata, op) } if len(metadata) > 0 { @@ -49,12 +50,18 @@ func getRunMode() string { } } -func getDeployMode() string { +// GetDeployMode returns our best-effort guess at the way Grafana Agent was deployed. +func GetDeployMode() string { op := os.Getenv(deployModeEnv) // only return known modes. Use "binary" as a default catch-all. switch op { case "operator", "helm", "docker", "deb", "rpm", "brew": return op } + // try to detect if executable is in homebrew directory + if path, err := executable(); err == nil && goos == "darwin" && strings.Contains(path, "brew") { + return "brew" + } + // fallback to binary return "binary" } diff --git a/internal/useragent/useragent_test.go b/internal/useragent/useragent_test.go index abaf0b80d192..b242a17e4263 100644 --- a/internal/useragent/useragent_test.go +++ b/internal/useragent/useragent_test.go @@ -15,6 +15,7 @@ func TestUserAgent(t *testing.T) { Expected string DeployMode string GOOS string + Exe string }{ { Name: "basic", @@ -76,9 +77,21 @@ func TestUserAgent(t *testing.T) { Expected: "GrafanaAgent/v1.2.3 (flow; linux; helm)", GOOS: "linux", }, + { + Name: "brew", + Mode: "flow", + Expected: "GrafanaAgent/v1.2.3 (flow; darwin; brew)", + GOOS: "darwin", + Exe: "/opt/homebrew/bin/agent", + }, } for _, tst := range tests { t.Run(tst.Name, func(t *testing.T) { + if tst.Exe != "" { + executable = func() (string, error) { return tst.Exe, nil } + } else { + executable = func() (string, error) { return "/agent", nil } + } goos = tst.GOOS t.Setenv(deployModeEnv, tst.DeployMode) t.Setenv(modeEnv, tst.Mode) diff --git a/pkg/config/agent_management_remote_config_test.go b/pkg/config/agent_management_remote_config_test.go index af97bd70190a..834375bda3ce 100644 --- a/pkg/config/agent_management_remote_config_test.go +++ b/pkg/config/agent_management_remote_config_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + process_exporter "github.com/grafana/agent/pkg/integrations/process_exporter" "github.com/grafana/agent/pkg/metrics/instance" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -182,6 +183,82 @@ integration_configs: require.Equal(t, 5*time.Second, c.Integrations.ConfigV1.IntegrationRestartBackoff) }) + t.Run("template variables provided", func(t *testing.T) { + baseConfig := ` +server: + log_level: {{.log_level}} +` + templateInsideTemplate := "`{{ .template_inside_template }}`" + snippet := Snippet{ + Config: ` +integration_configs: + process_exporter: + enabled: true + process_names: + - name: "grafana-agent" + cmdline: + - 'grafana-agent' + - name: "{{.nonexistent.foo.bar.baz.bat}}" + cmdline: + - "{{ ` + templateInsideTemplate + ` }}" + # Custom process monitors + {{- range $key, $value := .process_exporter_processes }} + - name: "{{ $value.name }}" + cmdline: + - "{{ $value.cmdline }}" + {{if $value.exe}} + exe: + - "{{ $value.exe }}" + {{end}} + {{- end }} +`, + } + + rc := RemoteConfig{ + BaseConfig: BaseConfigContent(baseConfig), + Snippets: []Snippet{snippet}, + AgentMetadata: AgentMetadata{ + TemplateVariables: map[string]any{ + "log_level": "debug", + "process_exporter_processes": []map[string]string{ + { + "name": "java_processes", + "cmdline": ".*/java", + }, + { + "name": "{{.ExeFull}}:{{.Matches.Cfgfile}}", + "cmdline": `-config.path\\s+(?P\\S+)`, + "exe": "/usr/local/bin/process-exporter", + }, + }, + }, + }, + } + + c, err := rc.BuildAgentConfig() + require.NoError(t, err) + require.Equal(t, 1, len(c.Integrations.ConfigV1.Integrations)) + processExporterConfig := c.Integrations.ConfigV1.Integrations[0].Config.(*process_exporter.Config) + + require.Equal(t, 4, len(processExporterConfig.ProcessExporter)) + + require.Equal(t, "grafana-agent", processExporterConfig.ProcessExporter[0].Name) + require.Equal(t, "grafana-agent", processExporterConfig.ProcessExporter[0].CmdlineRules[0]) + require.Equal(t, 0, len(processExporterConfig.ProcessExporter[0].ExeRules)) + + require.Equal(t, "", processExporterConfig.ProcessExporter[1].Name) + require.Equal(t, "{{ .template_inside_template }}", processExporterConfig.ProcessExporter[1].CmdlineRules[0]) + require.Equal(t, 0, len(processExporterConfig.ProcessExporter[1].ExeRules)) + + require.Equal(t, "java_processes", processExporterConfig.ProcessExporter[2].Name) + require.Equal(t, ".*/java", processExporterConfig.ProcessExporter[2].CmdlineRules[0]) + require.Equal(t, 0, len(processExporterConfig.ProcessExporter[2].ExeRules)) + + require.Equal(t, "{{.ExeFull}}:{{.Matches.Cfgfile}}", processExporterConfig.ProcessExporter[3].Name) + require.Equal(t, `-config.path\s+(?P\S+)`, processExporterConfig.ProcessExporter[3].CmdlineRules[0]) + require.Equal(t, "/usr/local/bin/process-exporter", processExporterConfig.ProcessExporter[3].ExeRules[0]) + }) + t.Run("no external labels provided", func(t *testing.T) { rc := RemoteConfig{ BaseConfig: BaseConfigContent(baseConfig), diff --git a/pkg/config/agentmanagement_remote_config.go b/pkg/config/agentmanagement_remote_config.go index c1f87615930d..8b5093861381 100644 --- a/pkg/config/agentmanagement_remote_config.go +++ b/pkg/config/agentmanagement_remote_config.go @@ -1,6 +1,9 @@ package config import ( + "bytes" + "text/template" + "github.com/grafana/agent/pkg/integrations" "github.com/grafana/agent/pkg/logs" "github.com/grafana/agent/pkg/metrics/instance" @@ -28,7 +31,8 @@ type ( } AgentMetadata struct { - ExternalLabels map[string]string `json:"external_labels,omitempty" yaml:"external_labels,omitempty"` + ExternalLabels map[string]string `json:"external_labels,omitempty" yaml:"external_labels,omitempty"` + TemplateVariables map[string]any `json:"template_variables,omitempty" yaml:"template_variables,omitempty"` } // SnippetContent defines the internal structure of a snippet configuration. @@ -55,8 +59,13 @@ func NewRemoteConfig(buf []byte) (*RemoteConfig, error) { // BuildAgentConfig builds an agent configuration from a base config and a list of snippets func (rc *RemoteConfig) BuildAgentConfig() (*Config, error) { + baseConfig, err := evaluateTemplate(string(rc.BaseConfig), rc.AgentMetadata.TemplateVariables) + if err != nil { + return nil, err + } + c := DefaultConfig() - err := yaml.Unmarshal([]byte(rc.BaseConfig), &c) + err = yaml.Unmarshal([]byte(baseConfig), &c) if err != nil { return nil, err } @@ -66,7 +75,7 @@ func (rc *RemoteConfig) BuildAgentConfig() (*Config, error) { return nil, err } - err = appendSnippets(&c, rc.Snippets) + err = appendSnippets(&c, rc.Snippets, rc.AgentMetadata.TemplateVariables) if err != nil { return nil, err } @@ -74,7 +83,7 @@ func (rc *RemoteConfig) BuildAgentConfig() (*Config, error) { return &c, nil } -func appendSnippets(c *Config, snippets []Snippet) error { +func appendSnippets(c *Config, snippets []Snippet, templateVars map[string]any) error { metricsConfigs := instance.DefaultConfig metricsConfigs.Name = "snippets" logsConfigs := logs.InstanceConfig{ @@ -91,8 +100,13 @@ func appendSnippets(c *Config, snippets []Snippet) error { } for _, snippet := range snippets { + snippetConfig, err := evaluateTemplate(snippet.Config, templateVars) + if err != nil { + return err + } + var snippetContent SnippetContent - err := yaml.Unmarshal([]byte(snippet.Config), &snippetContent) + err = yaml.Unmarshal([]byte(snippetConfig), &snippetContent) if err != nil { return err } @@ -148,3 +162,18 @@ func appendExternalLabels(c *Config, externalLabels map[string]string) { c.Logs.Global.ClientConfigs[i].ExternalLabels.LabelSet = logsExternalLabels.Merge(cc.ExternalLabels.LabelSet) } } + +func evaluateTemplate(config string, templateVariables map[string]any) (string, error) { + tpl, err := template.New("config").Parse(config) + if err != nil { + return "", err + } + + var buf bytes.Buffer + err = tpl.Execute(&buf, templateVariables) + if err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/pkg/flow/componenttest/componenttest.go b/pkg/flow/componenttest/componenttest.go index b545db4bf3f4..f8af382a70df 100644 --- a/pkg/flow/componenttest/componenttest.go +++ b/pkg/flow/componenttest/componenttest.go @@ -11,12 +11,12 @@ import ( "github.com/grafana/agent/service/labelstore" "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" "github.com/go-kit/log" "github.com/grafana/agent/component" "github.com/grafana/agent/pkg/flow/logging" + "go.opentelemetry.io/otel/trace/noop" ) // A Controller is a testing controller which controls a single component. @@ -155,7 +155,7 @@ func (c *Controller) buildComponent(dataPath string, args component.Arguments) ( opts := component.Options{ ID: c.reg.Name + ".test", Logger: l, - Tracer: trace.NewNoopTracerProvider(), + Tracer: noop.NewTracerProvider(), DataPath: dataPath, OnStateChange: c.onStateChange, Registerer: prometheus.NewRegistry(), diff --git a/pkg/flow/internal/controller/loader_test.go b/pkg/flow/internal/controller/loader_test.go index c6b491f4c899..72e09c0b1d8b 100644 --- a/pkg/flow/internal/controller/loader_test.go +++ b/pkg/flow/internal/controller/loader_test.go @@ -15,7 +15,7 @@ import ( "github.com/grafana/river/parser" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" _ "github.com/grafana/agent/pkg/flow/internal/testcomponents" // Include test components ) @@ -71,7 +71,7 @@ func TestLoader(t *testing.T) { return controller.LoaderOptions{ ComponentGlobals: controller.ComponentGlobals{ Logger: l, - TraceProvider: trace.NewNoopTracerProvider(), + TraceProvider: noop.NewTracerProvider(), DataPath: t.TempDir(), OnComponentUpdate: func(cn *controller.ComponentNode) { /* no-op */ }, Registerer: prometheus.NewRegistry(), @@ -205,7 +205,7 @@ func TestScopeWithFailingComponent(t *testing.T) { return controller.LoaderOptions{ ComponentGlobals: controller.ComponentGlobals{ Logger: l, - TraceProvider: trace.NewNoopTracerProvider(), + TraceProvider: noop.NewTracerProvider(), DataPath: t.TempDir(), OnComponentUpdate: func(cn *controller.ComponentNode) { /* no-op */ }, Registerer: prometheus.NewRegistry(), diff --git a/pkg/flow/tracing/tracing.go b/pkg/flow/tracing/tracing.go index 9cf5466dab29..12247d477a40 100644 --- a/pkg/flow/tracing/tracing.go +++ b/pkg/flow/tracing/tracing.go @@ -76,6 +76,7 @@ func (opts *JaegerRemoteSamplerOptions) SetToDefault() { // [trace.TracerProvider] and can be used to forward internally generated // traces to a OpenTelemetry Collector-compatible Flow component. type Tracer struct { + trace.TracerProvider sampler *lazySampler client *client exp *otlptrace.Exporter diff --git a/pkg/flow/tracing/wrap_tracer.go b/pkg/flow/tracing/wrap_tracer.go index 197e7ce3200b..5f166d874a9c 100644 --- a/pkg/flow/tracing/wrap_tracer.go +++ b/pkg/flow/tracing/wrap_tracer.go @@ -18,9 +18,9 @@ var ( // componentID as an attribute to each span. func WrapTracer(inner trace.TracerProvider, componentID string) trace.TracerProvider { return &wrappedProvider{ - inner: inner, - id: componentID, - spanName: componentIDAttributeKey, + TracerProvider: inner, + id: componentID, + spanName: componentIDAttributeKey, } } @@ -28,14 +28,14 @@ func WrapTracer(inner trace.TracerProvider, componentID string) trace.TracerProv // controller id as an attribute to each span. func WrapTracerForLoader(inner trace.TracerProvider, componentID string) trace.TracerProvider { return &wrappedProvider{ - inner: inner, - id: componentID, - spanName: controllerIDAttributeKey, + TracerProvider: inner, + id: componentID, + spanName: controllerIDAttributeKey, } } type wrappedProvider struct { - inner trace.TracerProvider + trace.TracerProvider id string spanName string } @@ -49,16 +49,16 @@ func (wp *wrappedProvider) Tracer(name string, options ...trace.TracerOption) tr otelComponentName := strings.TrimSuffix(wp.id, filepath.Ext(wp.id)) options = append(options, trace.WithInstrumentationAttributes(attribute.String(wp.spanName, otelComponentName))) } - innerTracer := wp.inner.Tracer(name, options...) + innerTracer := wp.TracerProvider.Tracer(name, options...) return &wrappedTracer{ - inner: innerTracer, + Tracer: innerTracer, id: wp.id, spanName: wp.spanName, } } type wrappedTracer struct { - inner trace.Tracer + trace.Tracer id string spanName string } @@ -66,7 +66,7 @@ type wrappedTracer struct { var _ trace.Tracer = (*wrappedTracer)(nil) func (tp *wrappedTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - ctx, span := tp.inner.Start(ctx, spanName, opts...) + ctx, span := tp.Tracer.Start(ctx, spanName, opts...) if tp.id != "" { span.SetAttributes( attribute.String(tp.spanName, tp.id), diff --git a/pkg/integrations/v2/app_agent_receiver/handler.go b/pkg/integrations/v2/app_agent_receiver/handler.go index 6831885cb51b..c430e9099312 100644 --- a/pkg/integrations/v2/app_agent_receiver/handler.go +++ b/pkg/integrations/v2/app_agent_receiver/handler.go @@ -117,7 +117,7 @@ func (ar *AppAgentReceiverHandler) HTTPHandler(logger log.Logger) http.Handler { if len(ar.config.Server.CORSAllowedOrigins) > 0 { c := cors.New(cors.Options{ AllowedOrigins: ar.config.Server.CORSAllowedOrigins, - AllowedHeaders: []string{apiKeyHeader, "content-type"}, + AllowedHeaders: []string{apiKeyHeader, "content-type", "x-faro-session-id"}, }) handler = c.Handler(handler) } diff --git a/pkg/integrations/windows_exporter/config.go b/pkg/integrations/windows_exporter/config.go index 37f4e249a8fa..006bc5426d72 100644 --- a/pkg/integrations/windows_exporter/config.go +++ b/pkg/integrations/windows_exporter/config.go @@ -7,71 +7,6 @@ import ( "github.com/grafana/agent/pkg/integrations/v2/metricsutils" ) -// DefaultConfig holds the default settings for the windows_exporter integration. -var DefaultConfig = Config{ - // NOTE(rfratto): there is an init function in config_windows.go that - // populates defaults for collectors based on the exporter defaults. - EnabledCollectors: "cpu,cs,logical_disk,net,os,service,system", - - Dfsr: DfsrConfig{ - SourcesEnabled: "", - }, - Exchange: ExchangeConfig{ - EnabledList: "", - }, - IIS: IISConfig{ - AppBlackList: "", - AppWhiteList: "", - SiteBlackList: "", - SiteWhiteList: "", - AppInclude: "", - AppExclude: "", - SiteInclude: "", - SiteExclude: "", - }, - LogicalDisk: LogicalDiskConfig{ - BlackList: "", - WhiteList: "", - Include: "", - Exclude: "", - }, - MSMQ: MSMQConfig{ - Where: "", - }, - MSSQL: MSSQLConfig{ - EnabledClasses: "", - }, - Network: NetworkConfig{ - BlackList: "", - WhiteList: "", - Include: "", - Exclude: "", - }, - Process: ProcessConfig{ - BlackList: "", - WhiteList: "", - Include: "", - Exclude: "", - }, - ScheduledTask: ScheduledTaskConfig{ - Include: "", - Exclude: "", - }, - Service: ServiceConfig{ - UseApi: "", - Where: "", - }, - SMTP: SMTPConfig{ - BlackList: "", - WhiteList: "", - Include: "", - Exclude: "", - }, - TextFile: TextFileConfig{ - TextFileDirectory: "", - }, -} - func init() { integrations.RegisterIntegration(&Config{}) integrations_v2.RegisterLegacy(&Config{}, integrations_v2.TypeSingleton, metricsutils.NewNamedShim("windows")) @@ -96,14 +31,6 @@ type Config struct { ScheduledTask ScheduledTaskConfig `yaml:"scheduled_task,omitempty"` } -// UnmarshalYAML implements yaml.Unmarshaler for Config. -func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultConfig - - type plain Config - return unmarshal((*plain)(c)) -} - // Name returns the name used, "windows_explorer" func (c *Config) Name() string { return "windows_exporter" diff --git a/pkg/integrations/windows_exporter/config_windows.go b/pkg/integrations/windows_exporter/config_windows.go index cb556363d084..17fd03d4f80c 100644 --- a/pkg/integrations/windows_exporter/config_windows.go +++ b/pkg/integrations/windows_exporter/config_windows.go @@ -1,6 +1,8 @@ package windows_exporter -import "github.com/prometheus-community/windows_exporter/pkg/collector" +import ( + "github.com/prometheus-community/windows_exporter/pkg/collector" +) func (c *Config) ToWindowsExporterConfig() collector.Config { cfg := collector.ConfigDefaults @@ -47,3 +49,73 @@ func coalesceString(v ...string) string { } return "" } + +// DefaultConfig holds the default settings for the windows_exporter integration. +var DefaultConfig = Config{ + EnabledCollectors: "cpu,cs,logical_disk,net,os,service,system", + Dfsr: DfsrConfig{ + SourcesEnabled: collector.ConfigDefaults.Dfsr.DfsrEnabledCollectors, + }, + Exchange: ExchangeConfig{ + EnabledList: collector.ConfigDefaults.Exchange.CollectorsEnabled, + }, + IIS: IISConfig{ + AppBlackList: collector.ConfigDefaults.Iis.AppExclude, + AppWhiteList: collector.ConfigDefaults.Iis.AppInclude, + SiteBlackList: collector.ConfigDefaults.Iis.SiteExclude, + SiteWhiteList: collector.ConfigDefaults.Iis.SiteInclude, + AppInclude: collector.ConfigDefaults.Iis.AppInclude, + AppExclude: collector.ConfigDefaults.Iis.AppExclude, + SiteInclude: collector.ConfigDefaults.Iis.SiteInclude, + SiteExclude: collector.ConfigDefaults.Iis.SiteExclude, + }, + LogicalDisk: LogicalDiskConfig{ + BlackList: collector.ConfigDefaults.LogicalDisk.VolumeExclude, + WhiteList: collector.ConfigDefaults.LogicalDisk.VolumeInclude, + Include: collector.ConfigDefaults.LogicalDisk.VolumeInclude, + Exclude: collector.ConfigDefaults.LogicalDisk.VolumeExclude, + }, + MSMQ: MSMQConfig{ + Where: collector.ConfigDefaults.Msmq.QueryWhereClause, + }, + MSSQL: MSSQLConfig{ + EnabledClasses: collector.ConfigDefaults.Mssql.EnabledCollectors, + }, + Network: NetworkConfig{ + BlackList: collector.ConfigDefaults.Net.NicExclude, + WhiteList: collector.ConfigDefaults.Net.NicInclude, + Include: collector.ConfigDefaults.Net.NicInclude, + Exclude: collector.ConfigDefaults.Net.NicExclude, + }, + Process: ProcessConfig{ + BlackList: collector.ConfigDefaults.Process.ProcessExclude, + WhiteList: collector.ConfigDefaults.Process.ProcessInclude, + Include: collector.ConfigDefaults.Process.ProcessInclude, + Exclude: collector.ConfigDefaults.Process.ProcessExclude, + }, + ScheduledTask: ScheduledTaskConfig{ + Include: collector.ConfigDefaults.ScheduledTask.TaskInclude, + Exclude: collector.ConfigDefaults.ScheduledTask.TaskExclude, + }, + Service: ServiceConfig{ + UseApi: "false", + Where: collector.ConfigDefaults.Service.ServiceWhereClause, + }, + SMTP: SMTPConfig{ + BlackList: collector.ConfigDefaults.Smtp.ServerExclude, + WhiteList: collector.ConfigDefaults.Smtp.ServerInclude, + Include: collector.ConfigDefaults.Smtp.ServerInclude, + Exclude: collector.ConfigDefaults.Smtp.ServerExclude, + }, + TextFile: TextFileConfig{ + TextFileDirectory: collector.ConfigDefaults.Textfile.TextFileDirectories, + }, +} + +// UnmarshalYAML implements yaml.Unmarshaler for Config. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultConfig + + type plain Config + return unmarshal((*plain)(c)) +} diff --git a/pkg/operator/defaults.go b/pkg/operator/defaults.go index 72aa6b12d6a3..b66cfb52289b 100644 --- a/pkg/operator/defaults.go +++ b/pkg/operator/defaults.go @@ -2,7 +2,7 @@ package operator // Supported versions of the Grafana Agent. var ( - DefaultAgentVersion = "v0.38.0" + DefaultAgentVersion = "v0.38.1" DefaultAgentBaseImage = "grafana/agent" DefaultAgentImage = DefaultAgentBaseImage + ":" + DefaultAgentVersion ) diff --git a/pkg/traces/instance.go b/pkg/traces/instance.go index 855b5b81a133..bf35e31ccb14 100644 --- a/pkg/traces/instance.go +++ b/pkg/traces/instance.go @@ -26,7 +26,7 @@ import ( "github.com/grafana/agent/pkg/traces/traceutils" "github.com/grafana/agent/pkg/util" prom_client "github.com/prometheus/client_golang/prometheus" - "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" ) // Instance wraps the OpenTelemetry collector to enable tracing pipelines @@ -153,7 +153,7 @@ func (i *Instance) buildAndStartPipeline(ctx context.Context, cfg InstanceConfig OtelMetricReader: promExporter, DisableProcessMetrics: true, UseExternalMetricsServer: true, - TracerProvider: trace.NewNoopTracerProvider(), + TracerProvider: noop.NewTracerProvider(), //TODO: Plug in an AsyncErrorChannel to shut down the Agent in case of a fatal event LoggingOptions: []zap.Option{ zap.WrapCore(func(zapcore.Core) zapcore.Core { diff --git a/pkg/traces/traceutils/server.go b/pkg/traces/traceutils/server.go index 4889e873df42..2c484e3c086b 100644 --- a/pkg/traces/traceutils/server.go +++ b/pkg/traces/traceutils/server.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver" "go.opentelemetry.io/collector/service" - "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "gopkg.in/yaml.v3" ) @@ -153,7 +153,7 @@ func newServer(addr string, callback func(ptrace.Traces)) (*server, error) { Connectors: connector.NewBuilder(otelCfg.Connectors, factories.Connectors), Extensions: extension.NewBuilder(otelCfg.Extensions, factories.Extensions), UseExternalMetricsServer: false, - TracerProvider: trace.NewNoopTracerProvider(), + TracerProvider: noop.NewTracerProvider(), }, otelCfg.Service) if err != nil { return nil, fmt.Errorf("failed to create Otel service: %w", err) diff --git a/pkg/usagestats/stats.go b/pkg/usagestats/stats.go index 7256701b5003..d3418a5c4bde 100644 --- a/pkg/usagestats/stats.go +++ b/pkg/usagestats/stats.go @@ -10,6 +10,7 @@ import ( "runtime" "time" + "github.com/grafana/agent/internal/useragent" "github.com/prometheus/common/version" ) @@ -27,6 +28,7 @@ type Report struct { Metrics map[string]interface{} `json:"metrics"` Os string `json:"os"` Arch string `json:"arch"` + DeployMode string `json:"deployMode"` } func sendReport(ctx context.Context, seed *AgentSeed, interval time.Time, metrics map[string]interface{}) error { @@ -38,6 +40,7 @@ func sendReport(ctx context.Context, seed *AgentSeed, interval time.Time, metric Arch: runtime.GOARCH, Interval: interval, Metrics: metrics, + DeployMode: useragent.GetDeployMode(), } out, err := json.MarshalIndent(report, "", " ") if err != nil { diff --git a/service/cluster/cluster.go b/service/cluster/cluster.go index 398a0db92979..dc4d66e8d418 100644 --- a/service/cluster/cluster.go +++ b/service/cluster/cluster.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "golang.org/x/net/http2" ) @@ -94,7 +95,7 @@ func New(opts Options) (*Service, error) { l = log.NewNopLogger() } if t == nil { - t = trace.NewNoopTracerProvider() + t = noop.NewTracerProvider() } ckitConfig := ckit.Config{ diff --git a/service/http/http.go b/service/http/http.go index cf703f942474..a8608f4dfcb5 100644 --- a/service/http/http.go +++ b/service/http/http.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" ) @@ -93,7 +94,7 @@ func New(opts Options) *Service { l = log.NewNopLogger() } if t == nil { - t = trace.NewNoopTracerProvider() + t = noop.NewTracerProvider() } if r == nil { r = prometheus.NewRegistry() diff --git a/service/http/http_test.go b/service/http/http_test.go index b341a677f129..660e0deee43a 100644 --- a/service/http/http_test.go +++ b/service/http/http_test.go @@ -16,7 +16,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" ) func TestHTTP(t *testing.T) { @@ -168,7 +168,7 @@ func newTestEnvironment(t *testing.T) (*testEnvironment, error) { svc := New(Options{ Logger: util.TestLogger(t), - Tracer: trace.NewNoopTracerProvider(), + Tracer: noop.NewTracerProvider(), Gatherer: prometheus.NewRegistry(), ReadyFunc: func() bool { return true }, diff --git a/tools/gen-versioned-files/agent-version.txt b/tools/gen-versioned-files/agent-version.txt index 765098dc40e9..b4a466a81031 100644 --- a/tools/gen-versioned-files/agent-version.txt +++ b/tools/gen-versioned-files/agent-version.txt @@ -1 +1 @@ -v0.38.0 +v0.38.1