Skip to content

Commit

Permalink
Merge branch 'main' into scrape_config_probe_selector
Browse files Browse the repository at this point in the history
  • Loading branch information
dominicqi authored Nov 25, 2024
2 parents 51f12aa + bf7cdd1 commit 1d92baf
Show file tree
Hide file tree
Showing 47 changed files with 1,266 additions and 62 deletions.
16 changes: 16 additions & 0 deletions .chloggen/3427.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: collector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Create RBAC rules for the k8s_cluster receiver automatically.

# One or more tracking issues related to the change
issues: [3427]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
16 changes: 16 additions & 0 deletions .chloggen/3432.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: collector

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Add a warning message when one created collector needs extra RBAC permissions and the service account doesn't have them.

# One or more tracking issues related to the change
issues: [3432]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
21 changes: 21 additions & 0 deletions .chloggen/add_fallback_strategy_for_per_node_strategy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: 'enhancement'

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: target allocator

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Added allocation_fallback_strategy option as fallback strategy for per-node allocation strategy, can be enabled with feature flag operator.targetallocator.fallbackstrategy

# One or more tracking issues related to the change
issues: [3477]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext: |
If using per-node allocation strategy, targets that are not attached to a node will not
be allocated. As the per-node strategy is required when running as a daemonset, it is
not possible to assign some targets under a daemonset deployment.
Feature flag operator.targetallocator.fallbackstrategy has been added and results in consistent-hashing
being used as the fallback allocation strategy for "per-node" only at this time.
16 changes: 16 additions & 0 deletions .chloggen/bump-base-instrumentation-mem-limit.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: bug_fix

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: auto-instrumentation

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Bump base memory requirements for python and go

# One or more tracking issues related to the change
issues: [3479]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
16 changes: 16 additions & 0 deletions .chloggen/chore_change-kube-rbac-proxy-image-registry.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. collector, target allocator, auto-instrumentation, opamp, github action)
component: operator

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Replace references to gcr.io/kubebuilder/kube-rbac-proxy with quay.io/brancz/kube-rbac-proxy

# One or more tracking issues related to the change
issues: [3485]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
2 changes: 1 addition & 1 deletion .github/workflows/publish-autoinstrumentation-nodejs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
- uses: actions/checkout@v4

- name: Read version
run: echo VERSION=$(cat autoinstrumentation/nodejs/package.json | jq -r '.dependencies."@opentelemetry/sdk-node"') >> $GITHUB_ENV
run: echo VERSION=$(cat autoinstrumentation/nodejs/package.json | jq -r '.dependencies."@opentelemetry/auto-instrumentations-node"') >> $GITHUB_ENV

- name: Docker meta
id: meta
Expand Down
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@ add-rbac-permissions-to-operator: manifests kustomize
# This folder is ignored by .gitignore
mkdir -p config/rbac/extra-permissions-operator
cp -r tests/e2e-automatic-rbac/extra-permissions-operator/* config/rbac/extra-permissions-operator
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/clusterresourcequotas.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/cronjobs.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/daemonsets.yaml
cd config/rbac && $(KUSTOMIZE) edit add patch --kind ClusterRole --name manager-role --path extra-permissions-operator/events.yaml
Expand Down
8 changes: 4 additions & 4 deletions apis/v1alpha1/instrumentation_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,13 +128,13 @@ func (w InstrumentationWebhook) defaulter(r *Instrumentation) error {
if r.Spec.Python.Resources.Limits == nil {
r.Spec.Python.Resources.Limits = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
corev1.ResourceMemory: resource.MustParse("32Mi"),
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
}
if r.Spec.Python.Resources.Requests == nil {
r.Spec.Python.Resources.Requests = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("32Mi"),
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
}
if r.Spec.DotNet.Image == "" {
Expand All @@ -158,13 +158,13 @@ func (w InstrumentationWebhook) defaulter(r *Instrumentation) error {
if r.Spec.Go.Resources.Limits == nil {
r.Spec.Go.Resources.Limits = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
corev1.ResourceMemory: resource.MustParse("32Mi"),
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
}
if r.Spec.Go.Resources.Requests == nil {
r.Spec.Go.Resources.Requests = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("32Mi"),
corev1.ResourceMemory: resource.MustParse("64Mi"),
}
}
if r.Spec.ApacheHttpd.Image == "" {
Expand Down
13 changes: 2 additions & 11 deletions autoinstrumentation/nodejs/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,8 @@
"typescript": "^5.6.3"
},
"dependencies": {
"@opentelemetry/api": "1.9.0",
"@opentelemetry/auto-instrumentations-node": "0.52.0",
"@opentelemetry/auto-instrumentations-node": "0.52.1",
"@opentelemetry/exporter-metrics-otlp-grpc": "0.54.0",
"@opentelemetry/exporter-prometheus": "0.54.0",
"@opentelemetry/exporter-trace-otlp-grpc": "0.54.0",
"@opentelemetry/resource-detector-alibaba-cloud": "0.29.4",
"@opentelemetry/resource-detector-aws": "1.7.0",
"@opentelemetry/resource-detector-container": "0.5.0",
"@opentelemetry/resource-detector-gcp": "0.29.13",
"@opentelemetry/resources": "1.27.0",
"@opentelemetry/sdk-metrics": "1.27.0",
"@opentelemetry/sdk-node": "0.54.0"
"@opentelemetry/exporter-prometheus": "0.54.0"
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ spec:
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=0
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
image: quay.io/brancz/kube-rbac-proxy:v0.13.1
name: kube-rbac-proxy
ports:
- containerPort: 8443
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -522,7 +522,7 @@ spec:
- --tls-private-key-file=/var/run/tls/server/tls.key
- --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA256
- --tls-min-version=VersionTLS12
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1
image: quay.io/brancz/kube-rbac-proxy:v0.13.1
name: kube-rbac-proxy
ports:
- containerPort: 8443
Expand Down
5 changes: 5 additions & 0 deletions cmd/otel-allocator/allocation/allocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@ func (a *allocator) SetFilter(filter Filter) {
a.filter = filter
}

// SetFallbackStrategy sets the fallback strategy to use.
func (a *allocator) SetFallbackStrategy(strategy Strategy) {
a.strategy.SetFallbackStrategy(strategy)
}

// SetTargets accepts a list of targets that will be used to make
// load balancing decisions. This method should be called when there are
// new targets discovered or existing targets are shutdown.
Expand Down
2 changes: 2 additions & 0 deletions cmd/otel-allocator/allocation/consistent_hashing.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,3 +83,5 @@ func (s *consistentHashingStrategy) SetCollectors(collectors map[string]*Collect
s.consistentHasher = consistent.New(members, s.config)

}

func (s *consistentHashingStrategy) SetFallbackStrategy(fallbackStrategy Strategy) {}
2 changes: 2 additions & 0 deletions cmd/otel-allocator/allocation/least_weighted.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,3 +54,5 @@ func (s *leastWeightedStrategy) GetCollectorForTarget(collectors map[string]*Col
}

func (s *leastWeightedStrategy) SetCollectors(_ map[string]*Collector) {}

func (s *leastWeightedStrategy) SetFallbackStrategy(fallbackStrategy Strategy) {}
18 changes: 16 additions & 2 deletions cmd/otel-allocator/allocation/per_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,21 +25,31 @@ const perNodeStrategyName = "per-node"
var _ Strategy = &perNodeStrategy{}

type perNodeStrategy struct {
collectorByNode map[string]*Collector
collectorByNode map[string]*Collector
fallbackStrategy Strategy
}

func newPerNodeStrategy() Strategy {
return &perNodeStrategy{
collectorByNode: make(map[string]*Collector),
collectorByNode: make(map[string]*Collector),
fallbackStrategy: nil,
}
}

func (s *perNodeStrategy) SetFallbackStrategy(fallbackStrategy Strategy) {
s.fallbackStrategy = fallbackStrategy
}

func (s *perNodeStrategy) GetName() string {
return perNodeStrategyName
}

func (s *perNodeStrategy) GetCollectorForTarget(collectors map[string]*Collector, item *target.Item) (*Collector, error) {
targetNodeName := item.GetNodeName()
if targetNodeName == "" && s.fallbackStrategy != nil {
return s.fallbackStrategy.GetCollectorForTarget(collectors, item)
}

collector, ok := s.collectorByNode[targetNodeName]
if !ok {
return nil, fmt.Errorf("could not find collector for node %s", targetNodeName)
Expand All @@ -54,4 +64,8 @@ func (s *perNodeStrategy) SetCollectors(collectors map[string]*Collector) {
s.collectorByNode[collector.NodeName] = collector
}
}

if s.fallbackStrategy != nil {
s.fallbackStrategy.SetCollectors(collectors)
}
}
83 changes: 82 additions & 1 deletion cmd/otel-allocator/allocation/per_node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,17 @@ import (

var loggerPerNode = logf.Log.WithName("unit-tests")

// Tests that two targets with the same target url and job name but different label set are both added.
func GetTargetsWithNodeName(targets []*target.Item) (targetsWithNodeName []*target.Item) {
for _, item := range targets {
if item.GetNodeName() != "" {
targetsWithNodeName = append(targetsWithNodeName, item)
}
}
return targetsWithNodeName
}

// Tests that four targets, with one of them lacking node labels, are assigned except for the
// target that lacks node labels.
func TestAllocationPerNode(t *testing.T) {
// prepare allocator with initial targets and collectors
s, _ := New("per-node", loggerPerNode)
Expand Down Expand Up @@ -93,6 +103,77 @@ func TestAllocationPerNode(t *testing.T) {
}
}

// Tests that four targets, with one of them missing node labels, are all assigned.
func TestAllocationPerNodeUsingFallback(t *testing.T) {
// prepare allocator with initial targets and collectors
s, _ := New("per-node", loggerPerNode, WithFallbackStrategy(consistentHashingStrategyName))

cols := MakeNCollectors(4, 0)
s.SetCollectors(cols)
firstLabels := labels.Labels{
{Name: "test", Value: "test1"},
{Name: "__meta_kubernetes_pod_node_name", Value: "node-0"},
}
secondLabels := labels.Labels{
{Name: "test", Value: "test2"},
{Name: "__meta_kubernetes_node_name", Value: "node-1"},
}
// no label, should be allocated by the fallback strategy
thirdLabels := labels.Labels{
{Name: "test", Value: "test3"},
}
// endpointslice target kind and name
fourthLabels := labels.Labels{
{Name: "test", Value: "test4"},
{Name: "__meta_kubernetes_endpointslice_address_target_kind", Value: "Node"},
{Name: "__meta_kubernetes_endpointslice_address_target_name", Value: "node-3"},
}

firstTarget := target.NewItem("sample-name", "0.0.0.0:8000", firstLabels, "")
secondTarget := target.NewItem("sample-name", "0.0.0.0:8000", secondLabels, "")
thirdTarget := target.NewItem("sample-name", "0.0.0.0:8000", thirdLabels, "")
fourthTarget := target.NewItem("sample-name", "0.0.0.0:8000", fourthLabels, "")

targetList := map[string]*target.Item{
firstTarget.Hash(): firstTarget,
secondTarget.Hash(): secondTarget,
thirdTarget.Hash(): thirdTarget,
fourthTarget.Hash(): fourthTarget,
}

// test that targets and collectors are added properly
s.SetTargets(targetList)

// verify length
actualItems := s.TargetItems()

// all targets should be allocated
expectedTargetLen := len(targetList)
assert.Len(t, actualItems, expectedTargetLen)

// verify allocation to nodes
for targetHash, item := range targetList {
actualItem, found := actualItems[targetHash]

assert.True(t, found, "target with hash %s not found", item.Hash())

itemsForCollector := s.GetTargetsForCollectorAndJob(actualItem.CollectorName, actualItem.JobName)

// first two should be assigned one to each collector; if third target, it should be assigned
// according to the fallback strategy which may assign it to the otherwise empty collector or
// one of the others, depending on the strategy and collector loop order
if targetHash == thirdTarget.Hash() {
assert.Empty(t, item.GetNodeName())
assert.NotZero(t, len(itemsForCollector))
continue
}

// Only check targets that have been assigned using the per-node (not fallback) strategy here
assert.Len(t, GetTargetsWithNodeName(itemsForCollector), 1)
assert.Equal(t, actualItem, GetTargetsWithNodeName(itemsForCollector)[0])
}
}

func TestTargetsWithNoCollectorsPerNode(t *testing.T) {
// prepare allocator with initial targets and collectors
c, _ := New("per-node", loggerPerNode)
Expand Down
Loading

0 comments on commit 1d92baf

Please sign in to comment.