Skip to content

Commit

Permalink
Merge branch 'main' into exporter-target-block-name
Browse files Browse the repository at this point in the history
  • Loading branch information
hainenber authored Dec 11, 2023
2 parents c4991ee + a2badce commit 822a045
Show file tree
Hide file tree
Showing 6 changed files with 13 additions and 11 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ Main (unreleased)

- Fixes `loki.source.docker` a behavior that synced an incomplete list of targets to the tailer manager. (@FerdinandvHagen)

- Fixes `otelcol.connector.servicegraph` store ttl default value from 2ms to 2s. (@rlankfo)

### Other changes

- Bump github.com/IBM/sarama from v1.41.2 to v1.42.1
Expand Down
2 changes: 1 addition & 1 deletion component/otelcol/connector/servicegraph/servicegraph.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ var DefaultArguments = Arguments{
Dimensions: []string{},
Store: StoreConfig{
MaxItems: 1000,
TTL: 2 * time.Millisecond,
TTL: 2 * time.Second,
},
CacheLoop: 1 * time.Minute,
StoreExpirationLoop: 2 * time.Second,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) {
Dimensions: []string{},
Store: servicegraphprocessor.StoreConfig{
MaxItems: 1000,
TTL: 2 * time.Millisecond,
TTL: 2 * time.Second,
},
CacheLoop: 1 * time.Minute,
StoreExpirationLoop: 2 * time.Second,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ The `store` block configures the in-memory store for spans.
Name | Type | Description | Default | Required
---- | ---- | ----------- | ------- | --------
`max_items` | `number` | Maximum number of items to keep in the store. | `1000` | no
`ttl` | `duration` | The time to live for spans in the store. | `"2ms"` | no
`ttl` | `duration` | The time to live for spans in the store. | `"2s"` | no

### output block

Expand Down Expand Up @@ -238,4 +238,4 @@ connection work correctly. Refer to the linked documentation for more details.
{{% /admonition %}}
<!-- END GENERATED COMPATIBLE COMPONENTS -->
<!-- END GENERATED COMPATIBLE COMPONENTS -->
2 changes: 1 addition & 1 deletion pkg/flow/flow.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ func (f *Flow) Run(ctx context.Context) {
// throughput - it prevents the situation where two components have the same dependency, and the first time
// it's picked up by the worker pool and the second time it's enqueued again, resulting in more evaluations.
all := f.updateQueue.DequeueAll()
f.loader.EvaluateDependencies(ctx, all)
f.loader.EvaluateDependants(ctx, all)
case <-f.loadFinished:
level.Info(f.log).Log("msg", "scheduling loaded components and services")

Expand Down
12 changes: 6 additions & 6 deletions pkg/flow/internal/controller/loader.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ type Loader struct {
componentReg ComponentRegistry
workerPool worker.Pool
// backoffConfig is used to backoff when an updated component's dependencies cannot be submitted to worker
// pool for evaluation in EvaluateDependencies, because the queue is full. This is an unlikely scenario, but when
// pool for evaluation in EvaluateDependants, because the queue is full. This is an unlikely scenario, but when
// it happens we should avoid retrying too often to give other goroutines a chance to progress. Having a backoff
// also prevents log spamming with errors.
backoffConfig backoff.Config
Expand Down Expand Up @@ -543,13 +543,13 @@ func (l *Loader) OriginalGraph() *dag.Graph {
return l.originalGraph.Clone()
}

// EvaluateDependencies sends components which depend directly on components in updatedNodes for evaluation to the
// EvaluateDependants sends components which depend directly on components in updatedNodes for evaluation to the
// workerPool. It should be called whenever components update their exports.
// It is beneficial to call EvaluateDependencies with a batch of components, as it will enqueue the entire batch before
// It is beneficial to call EvaluateDependants with a batch of components, as it will enqueue the entire batch before
// the worker pool starts to evaluate them, resulting in smaller number of total evaluations when
// node updates are frequent. If the worker pool's queue is full, EvaluateDependencies will retry with a backoff until
// node updates are frequent. If the worker pool's queue is full, EvaluateDependants will retry with a backoff until
// it succeeds or until the ctx is cancelled.
func (l *Loader) EvaluateDependencies(ctx context.Context, updatedNodes []*ComponentNode) {
func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*ComponentNode) {
if len(updatedNodes) == 0 {
return
}
Expand Down Expand Up @@ -578,7 +578,7 @@ func (l *Loader) EvaluateDependencies(ctx context.Context, updatedNodes []*Compo

// Submit all dependencies for asynchronous evaluation.
// During evaluation, if a node's exports change, Flow will add it to updated nodes queue (controller.Queue) and
// the Flow controller will call EvaluateDependencies on it again. This results in a concurrent breadth-first
// the Flow controller will call EvaluateDependants on it again. This results in a concurrent breadth-first
// traversal of the nodes that need to be evaluated.
for n, parent := range dependenciesToParentsMap {
dependantCtx, span := tracer.Start(spanCtx, "SubmitForEvaluation", trace.WithSpanKind(trace.SpanKindInternal))
Expand Down

0 comments on commit 822a045

Please sign in to comment.