From b79a9650fb92a28989e7b1986373c43c2290fad5 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 15 Jul 2024 06:56:44 -0600 Subject: [PATCH 1/8] Point pylint to the root directory of every package (#4048) * Point pylint to the root directory of every package Fixes #3814 * Fix lint for api * Fix sdk lint * Fix sdk lint * Fix lint for b3 * Fix tox ini and lint * Fix zipkin --- .../test_benchmark_trace_exporter.py | 2 + .../tests/attributes/test_attributes.py | 4 +- .../tests/logs/test_logger_provider.py | 14 +++-- .../tests/metrics/test_instruments.py | 2 + opentelemetry-api/tests/metrics/test_meter.py | 18 +++--- .../tests/metrics/test_meter_provider.py | 7 ++- .../metrics/test_subclass_instantiation.py | 2 + .../propagators/test_global_httptextformat.py | 6 +- .../propagators/test_w3cbaggagepropagator.py | 4 +- .../test_tracecontexthttptextformat.py | 6 +- .../tests/util/test__providers.py | 4 +- .../metrics/test_benchmark_metrics.py | 2 + ...py => test_benchmark_metrics_histogram.py} | 12 ++-- .../benchmarks/trace/test_benchmark_trace.py | 5 +- .../test_exponent_mapping.py | 5 +- ...xponential_bucket_histogram_aggregation.py | 7 ++- .../test_logarithm_mapping.py | 5 +- ...t_explicit_bucket_histogram_aggregation.py | 2 + .../tests/metrics/test_aggregation.py | 2 + .../tests/metrics/test_backward_compat.py | 4 +- .../tests/metrics/test_import.py | 2 +- .../metrics/test_in_memory_metric_reader.py | 2 + .../tests/metrics/test_instrument.py | 3 +- .../metrics/test_measurement_consumer.py | 2 + .../tests/metrics/test_metric_reader.py | 5 +- .../metrics/test_metric_reader_storage.py | 2 + .../tests/metrics/test_metrics.py | 23 ++++---- .../test_periodic_exporting_metric_reader.py | 9 ++- opentelemetry-sdk/tests/metrics/test_point.py | 6 -- opentelemetry-sdk/tests/metrics/test_view.py | 2 + .../metrics/test_view_instrument_match.py | 4 +- .../propagation/test_benchmark_b3_format.py | 4 +- tox.ini | 59 +++++++------------ 33 files changed, 135 insertions(+), 101 deletions(-) rename opentelemetry-sdk/benchmarks/metrics/{test_benchmark_metrics_histogram,.py => test_benchmark_metrics_histogram.py} (92%) diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks/test_benchmark_trace_exporter.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks/test_benchmark_trace_exporter.py index 2b39a8feb33..9051dbeed0c 100644 --- a/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks/test_benchmark_trace_exporter.py +++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks/test_benchmark_trace_exporter.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=invalid-name + from unittest.mock import patch from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( diff --git a/opentelemetry-api/tests/attributes/test_attributes.py b/opentelemetry-api/tests/attributes/test_attributes.py index 61388de722b..cf6aecb41fa 100644 --- a/opentelemetry-api/tests/attributes/test_attributes.py +++ b/opentelemetry-api/tests/attributes/test_attributes.py @@ -21,6 +21,7 @@ class TestAttributes(unittest.TestCase): + # pylint: disable=invalid-name def assertValid(self, value, key="k"): expected = value if isinstance(value, MutableSequence): @@ -89,6 +90,7 @@ def test_sequence_attr_decode(self): class TestBoundedAttributes(unittest.TestCase): + # pylint: disable=consider-using-dict-items base = { "name": "Firulais", "age": 7, @@ -188,7 +190,7 @@ def test_locking(self): """ bdict = BoundedAttributes(immutable=False) - with bdict._lock: + with bdict._lock: # pylint: disable=protected-access for num in range(100): bdict[str(num)] = num diff --git a/opentelemetry-api/tests/logs/test_logger_provider.py b/opentelemetry-api/tests/logs/test_logger_provider.py index 84f3f13e9c3..71f403c695f 100644 --- a/opentelemetry-api/tests/logs/test_logger_provider.py +++ b/opentelemetry-api/tests/logs/test_logger_provider.py @@ -34,16 +34,16 @@ def tearDown(self): def test_set_logger_provider(self): lp_mock = Mock() # pylint: disable=protected-access - assert logs_internal._LOGGER_PROVIDER is None + self.assertIsNone(logs_internal._LOGGER_PROVIDER) set_logger_provider(lp_mock) - assert logs_internal._LOGGER_PROVIDER is lp_mock - assert get_logger_provider() is lp_mock + self.assertIs(logs_internal._LOGGER_PROVIDER, lp_mock) + self.assertIs(get_logger_provider(), lp_mock) def test_get_logger_provider(self): # pylint: disable=protected-access - assert logs_internal._LOGGER_PROVIDER is None + self.assertIsNone(logs_internal._LOGGER_PROVIDER) - assert isinstance( + self.assertIsInstance( get_logger_provider(), logs_internal.ProxyLoggerProvider ) @@ -59,4 +59,6 @@ def test_get_logger_provider(self): "opentelemetry._logs._internal.cast", Mock(**{"return_value": "test_logger_provider"}), ): - assert get_logger_provider() == "test_logger_provider" + self.assertEqual( + get_logger_provider(), "test_logger_provider" + ) diff --git a/opentelemetry-api/tests/metrics/test_instruments.py b/opentelemetry-api/tests/metrics/test_instruments.py index 12267433af0..840fbf6e132 100644 --- a/opentelemetry-api/tests/metrics/test_instruments.py +++ b/opentelemetry-api/tests/metrics/test_instruments.py @@ -36,6 +36,7 @@ class ChildInstrument(Instrument): + # pylint: disable=useless-parent-delegation def __init__(self, name, *args, unit="", description="", **kwargs): super().__init__( name, *args, unit=unit, description=description, **kwargs @@ -500,6 +501,7 @@ def test_up_down_counter_add_method(self): class TestObservableUpDownCounter(TestCase): + # pylint: disable=protected-access def test_create_observable_up_down_counter(self): """ Test that the ObservableUpDownCounter can be created with create_observable_up_down_counter. diff --git a/opentelemetry-api/tests/metrics/test_meter.py b/opentelemetry-api/tests/metrics/test_meter.py index 2226965521e..984690bdbbf 100644 --- a/opentelemetry-api/tests/metrics/test_meter.py +++ b/opentelemetry-api/tests/metrics/test_meter.py @@ -23,6 +23,7 @@ class ChildMeter(Meter): + # pylint: disable=signature-differs def create_counter(self, name, unit="", description=""): super().create_counter(name, unit=unit, description=description) @@ -32,10 +33,10 @@ def create_up_down_counter(self, name, unit="", description=""): ) def create_observable_counter( - self, name, callback, unit="", description="" + self, name, callbacks, unit="", description="" ): super().create_observable_counter( - name, callback, unit=unit, description=description + name, callbacks, unit=unit, description=description ) def create_histogram(self, name, unit="", description=""): @@ -44,20 +45,23 @@ def create_histogram(self, name, unit="", description=""): def create_gauge(self, name, unit="", description=""): super().create_gauge(name, unit=unit, description=description) - def create_observable_gauge(self, name, callback, unit="", description=""): + def create_observable_gauge( + self, name, callbacks, unit="", description="" + ): super().create_observable_gauge( - name, callback, unit=unit, description=description + name, callbacks, unit=unit, description=description ) def create_observable_up_down_counter( - self, name, callback, unit="", description="" + self, name, callbacks, unit="", description="" ): super().create_observable_up_down_counter( - name, callback, unit=unit, description=description + name, callbacks, unit=unit, description=description ) class TestMeter(TestCase): + # pylint: disable=no-member def test_repeated_instrument_names(self): try: @@ -72,7 +76,7 @@ def test_repeated_instrument_names(self): test_meter.create_observable_up_down_counter( "observable_up_down_counter", Mock() ) - except Exception as error: + except Exception as error: # pylint: disable=broad-exception-caught self.fail(f"Unexpected exception raised {error}") for instrument_name in [ diff --git a/opentelemetry-api/tests/metrics/test_meter_provider.py b/opentelemetry-api/tests/metrics/test_meter_provider.py index 559b56205ec..bce530d6caf 100644 --- a/opentelemetry-api/tests/metrics/test_meter_provider.py +++ b/opentelemetry-api/tests/metrics/test_meter_provider.py @@ -13,6 +13,8 @@ # limitations under the License. # type: ignore +# pylint: disable=protected-access + from unittest import TestCase from unittest.mock import Mock, patch @@ -54,6 +56,7 @@ def reset_meter_provider(): reset_metrics_globals() +# pylint: disable=redefined-outer-name def test_set_meter_provider(reset_meter_provider): """ Test that the API provides a way to set a global default MeterProvider @@ -113,7 +116,7 @@ def test_get_meter_parameters(self): NoOpMeterProvider().get_meter( "name", version="version", schema_url="schema_url" ) - except Exception as error: + except Exception as error: # pylint: disable=broad-exception-caught self.fail(f"Unexpected exception raised: {error}") def test_invalid_name(self): @@ -176,7 +179,7 @@ def test_proxy_provider(self): self.assertIsInstance(meter2, Mock) mock_real_mp.get_meter.assert_called_with(another_name, None, None) - # pylint: disable=too-many-locals + # pylint: disable=too-many-locals,too-many-statements def test_proxy_meter(self): meter_name = "foo" proxy_meter: _ProxyMeter = _ProxyMeterProvider().get_meter(meter_name) diff --git a/opentelemetry-api/tests/metrics/test_subclass_instantiation.py b/opentelemetry-api/tests/metrics/test_subclass_instantiation.py index a5b68d1c063..67001e8206b 100644 --- a/opentelemetry-api/tests/metrics/test_subclass_instantiation.py +++ b/opentelemetry-api/tests/metrics/test_subclass_instantiation.py @@ -17,6 +17,8 @@ # Any tests that fail here indicate that the public API has changed in a way that is not backwards compatible. # Either bump the major version of the API, or make the necessary changes to the API to remain semver compatible. +# pylint: disable=useless-parent-delegation,arguments-differ + from typing import Optional from opentelemetry.metrics import ( diff --git a/opentelemetry-api/tests/propagators/test_global_httptextformat.py b/opentelemetry-api/tests/propagators/test_global_httptextformat.py index 466ce6895f8..c383ec6030b 100644 --- a/opentelemetry-api/tests/propagators/test_global_httptextformat.py +++ b/opentelemetry-api/tests/propagators/test_global_httptextformat.py @@ -29,9 +29,9 @@ class TestDefaultGlobalPropagator(unittest.TestCase): SPAN_ID = int("1234567890123456", 16) # type:int def test_propagation(self): - traceparent_value = "00-{trace_id}-{span_id}-00".format( - trace_id=format_trace_id(self.TRACE_ID), - span_id=format_span_id(self.SPAN_ID), + traceparent_value = ( + f"00-{format_trace_id(self.TRACE_ID)}-" + f"{format_span_id(self.SPAN_ID)}-00" ) tracestate_value = "foo=1,bar=2,baz=3" headers = { diff --git a/opentelemetry-api/tests/propagators/test_w3cbaggagepropagator.py b/opentelemetry-api/tests/propagators/test_w3cbaggagepropagator.py index 99e37652ab0..2200fc6f384 100644 --- a/opentelemetry-api/tests/propagators/test_w3cbaggagepropagator.py +++ b/opentelemetry-api/tests/propagators/test_w3cbaggagepropagator.py @@ -27,6 +27,8 @@ class TestW3CBaggagePropagator(TestCase): + # pylint: disable=protected-access + # pylint: disable=too-many-public-methods def setUp(self): self.propagator = W3CBaggagePropagator() @@ -38,7 +40,7 @@ def _extract(self, header_value): def _inject(self, values): """Test helper""" ctx = get_current() - for k, v in values.items(): + for k, v in values.items(): # pylint: disable=invalid-name ctx = set_baggage(k, v, context=ctx) output = {} self.propagator.inject(output, context=ctx) diff --git a/opentelemetry-api/tests/trace/propagation/test_tracecontexthttptextformat.py b/opentelemetry-api/tests/trace/propagation/test_tracecontexthttptextformat.py index 7fefd8dea67..9db07add257 100644 --- a/opentelemetry-api/tests/trace/propagation/test_tracecontexthttptextformat.py +++ b/opentelemetry-api/tests/trace/propagation/test_tracecontexthttptextformat.py @@ -47,9 +47,9 @@ def test_headers_with_tracestate(self): """When there is a traceparent and tracestate header, data from both should be added to the SpanContext. """ - traceparent_value = "00-{trace_id}-{span_id}-00".format( - trace_id=format(self.TRACE_ID, "032x"), - span_id=format(self.SPAN_ID, "016x"), + traceparent_value = ( + f"00-{format(self.TRACE_ID, '032x')}-" + f"{format(self.SPAN_ID, '016x')}-00" ) tracestate_value = "foo=1,bar=2,baz=3" span_context = trace.get_current_span( diff --git a/opentelemetry-api/tests/util/test__providers.py b/opentelemetry-api/tests/util/test__providers.py index f7b21ebacf1..940303f1375 100644 --- a/opentelemetry-api/tests/util/test__providers.py +++ b/opentelemetry-api/tests/util/test__providers.py @@ -20,7 +20,7 @@ from opentelemetry.util import _providers -class Test_Providers(TestCase): +class Test_Providers(TestCase): # pylint: disable=invalid-name @patch.dict( environ, { # type: ignore @@ -49,7 +49,7 @@ def test__providers(self, mock_entry_points): ) self.assertEqual( - _providers._load_provider( + _providers._load_provider( # pylint: disable=protected-access "provider_environment_variable", "provider" ), "a", diff --git a/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics.py b/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics.py index 81fb0b6e1d8..7b062ce2c26 100644 --- a/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics.py +++ b/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics.py @@ -53,6 +53,7 @@ ) def test_counter_add(benchmark, num_labels, temporality): labels = {} + # pylint: disable=invalid-name for i in range(num_labels): labels = {f"Key{i}": f"Value{i}" for i in range(num_labels)} @@ -68,6 +69,7 @@ def benchmark_counter_add(): @pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 10]) def test_up_down_counter_add(benchmark, num_labels): labels = {} + # pylint: disable=invalid-name for i in range(num_labels): labels = {f"Key{i}": f"Value{i}" for i in range(num_labels)} diff --git a/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics_histogram,.py b/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics_histogram.py similarity index 92% rename from opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics_histogram,.py rename to opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics_histogram.py index 2f9c4405418..1c7cdf2cb5a 100644 --- a/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics_histogram,.py +++ b/opentelemetry-sdk/benchmarks/metrics/test_benchmark_metrics_histogram.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +# pylint: disable=invalid-name import random import pytest @@ -70,7 +72,7 @@ def _generate_bounds(bound_count): def test_histogram_record(benchmark, num_labels): labels = {} for i in range(num_labels): - labels["Key{}".format(i)] = "Value{}".format(i) + labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record(): hist.record(random.random() * MAX_BOUND_VALUE) @@ -82,7 +84,7 @@ def benchmark_histogram_record(): def test_histogram_record_10(benchmark, num_labels): labels = {} for i in range(num_labels): - labels["Key{}".format(i)] = "Value{}".format(i) + labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record_10(): hist10.record(random.random() * MAX_BOUND_VALUE) @@ -94,7 +96,7 @@ def benchmark_histogram_record_10(): def test_histogram_record_49(benchmark, num_labels): labels = {} for i in range(num_labels): - labels["Key{}".format(i)] = "Value{}".format(i) + labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record_49(): hist49.record(random.random() * MAX_BOUND_VALUE) @@ -106,7 +108,7 @@ def benchmark_histogram_record_49(): def test_histogram_record_50(benchmark, num_labels): labels = {} for i in range(num_labels): - labels["Key{}".format(i)] = "Value{}".format(i) + labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record_50(): hist50.record(random.random() * MAX_BOUND_VALUE) @@ -118,7 +120,7 @@ def benchmark_histogram_record_50(): def test_histogram_record_1000(benchmark, num_labels): labels = {} for i in range(num_labels): - labels["Key{}".format(i)] = "Value{}".format(i) + labels[f"Key{i}"] = "Value{i}" def benchmark_histogram_record_1000(): hist1000.record(random.random() * MAX_BOUND_VALUE) diff --git a/opentelemetry-sdk/benchmarks/trace/test_benchmark_trace.py b/opentelemetry-sdk/benchmarks/trace/test_benchmark_trace.py index a407a341f45..20a9b909427 100644 --- a/opentelemetry-sdk/benchmarks/trace/test_benchmark_trace.py +++ b/opentelemetry-sdk/benchmarks/trace/test_benchmark_trace.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import opentelemetry.sdk.trace as trace from opentelemetry.sdk.resources import Resource -from opentelemetry.sdk.trace import sampling +from opentelemetry.sdk.trace import TracerProvider, sampling -tracer = trace.TracerProvider( +tracer = TracerProvider( sampler=sampling.DEFAULT_ON, resource=Resource( { diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponent_mapping.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponent_mapping.py index 96ba3991819..cfd33ef4a15 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponent_mapping.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponent_mapping.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access + from math import inf from sys import float_info, version_info from unittest.mock import patch @@ -60,7 +62,7 @@ def test_singleton(self): "opentelemetry.sdk.metrics._internal.exponential_histogram.mapping." "exponent_mapping.ExponentMapping._init" ) - def test_init_called_once(self, mock_init): + def test_init_called_once(self, mock_init): # pylint: disable=no-self-use ExponentMapping(-3) ExponentMapping(-3) @@ -171,6 +173,7 @@ def test_exponent_mapping_neg_one(self): self.assertEqual(exponent_mapping.map_to_index(0.06), -3) def test_exponent_mapping_neg_four(self): + # pylint: disable=too-many-statements exponent_mapping = ExponentMapping(-4) self.assertEqual(exponent_mapping.map_to_index(float(0x1)), -1) self.assertEqual(exponent_mapping.map_to_index(float(0x10)), 0) diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index bae0aca20bf..e243e09643d 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access,too-many-lines,invalid-name +# pylint: disable=consider-using-enumerate,no-self-use,too-many-public-methods + import random as insecure_random from itertools import permutations from logging import WARNING @@ -386,7 +389,7 @@ def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket """ - + # pylint: disable=cell-var-from-loop self._counts[bucket_index] += increment exponential_histogram_aggregation = ( @@ -658,6 +661,7 @@ def test_aggregator_copy_swap(self): exponential_histogram_aggregation_1, ) + # pylint: disable=unnecessary-dunder-call exponential_histogram_aggregation_2._positive.__init__() exponential_histogram_aggregation_2._negative.__init__() exponential_histogram_aggregation_2._sum = 0 @@ -962,6 +966,7 @@ def collect_and_validate() -> None: upper_bound = 2 ** ((index + 1) / (2**scale)) matches = 0 for value in values: + # pylint: disable=chained-comparison if value > lower_bound and value <= upper_bound: matches += 1 assert ( diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_logarithm_mapping.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_logarithm_mapping.py index 1fd18845bb6..43820d677b0 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_logarithm_mapping.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_logarithm_mapping.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access + from math import sqrt from unittest import TestCase from unittest.mock import patch @@ -53,6 +55,7 @@ def left_boundary(scale: int, index: int) -> float: class TestLogarithmMapping(TestCase): + # pylint: disable=invalid-name def assertInEpsilon(self, first, second, epsilon): self.assertLessEqual(first, (second * (1 + epsilon))) self.assertGreaterEqual(first, (second * (1 - epsilon))) @@ -66,7 +69,7 @@ def assertInEpsilon(self, first, second, epsilon): "opentelemetry.sdk.metrics._internal.exponential_histogram.mapping." "logarithm_mapping.LogarithmMapping._init" ) - def test_init_called_once(self, mock_init): + def test_init_called_once(self, mock_init): # pylint: disable=no-self-use LogarithmMapping(3) LogarithmMapping(3) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_explicit_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/integration_test/test_explicit_bucket_histogram_aggregation.py index b705be33569..fee13525a32 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_explicit_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_explicit_bucket_histogram_aggregation.py @@ -104,6 +104,7 @@ def test_synchronous_delta_temporality(self): previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual( metric_data.bucket_counts, + # pylint: disable=consider-using-generator tuple( [ 1 if internal_index == index + 2 else 0 @@ -224,6 +225,7 @@ def test_synchronous_cumulative_temporality(self): ) self.assertEqual( metric_data.bucket_counts, + # pylint: disable=consider-using-generator tuple( [ ( diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index fb87d177e8d..9d61da72a04 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access + from math import inf from time import sleep from typing import Union diff --git a/opentelemetry-sdk/tests/metrics/test_backward_compat.py b/opentelemetry-sdk/tests/metrics/test_backward_compat.py index 46008554fe6..e29ca71469f 100644 --- a/opentelemetry-sdk/tests/metrics/test_backward_compat.py +++ b/opentelemetry-sdk/tests/metrics/test_backward_compat.py @@ -44,7 +44,7 @@ class OrigMetricExporter(MetricExporter): def export( self, - metrics: Sequence[Metric], + metrics_data: Sequence[Metric], timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: @@ -60,7 +60,7 @@ def force_flush(self, timeout_millis: float = 10_000) -> bool: class OrigMetricReader(MetricReader): def _receive_metrics( self, - metrics: Iterable[Metric], + metrics_data: Iterable[Metric], timeout_millis: float = 10_000, **kwargs, ) -> None: diff --git a/opentelemetry-sdk/tests/metrics/test_import.py b/opentelemetry-sdk/tests/metrics/test_import.py index 73b9e1ece9c..5d656acce69 100644 --- a/opentelemetry-sdk/tests/metrics/test_import.py +++ b/opentelemetry-sdk/tests/metrics/test_import.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# pylint: disable=unused-import +# pylint: disable=unused-import,import-outside-toplevel,too-many-locals from opentelemetry.test import TestCase diff --git a/opentelemetry-sdk/tests/metrics/test_in_memory_metric_reader.py b/opentelemetry-sdk/tests/metrics/test_in_memory_metric_reader.py index 68c81e8b7ef..40a0f3a3042 100644 --- a/opentelemetry-sdk/tests/metrics/test_in_memory_metric_reader.py +++ b/opentelemetry-sdk/tests/metrics/test_in_memory_metric_reader.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access + from time import sleep from unittest import TestCase from unittest.mock import Mock diff --git a/opentelemetry-sdk/tests/metrics/test_instrument.py b/opentelemetry-sdk/tests/metrics/test_instrument.py index d103050994e..d4a2ddf5094 100644 --- a/opentelemetry-sdk/tests/metrics/test_instrument.py +++ b/opentelemetry-sdk/tests/metrics/test_instrument.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=no-self-use + from logging import WARNING from unittest import TestCase from unittest.mock import Mock @@ -198,7 +200,6 @@ def test_generator_callback_0(self): ) def test_generator_multiple_generator_callback(self): - self.maxDiff = None observable_gauge = _ObservableGauge( "name", Mock(), diff --git a/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py b/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py index 9d3b9691d61..19c514c13e8 100644 --- a/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py +++ b/opentelemetry-sdk/tests/metrics/test_measurement_consumer.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=invalid-name,no-self-use + from time import sleep from unittest import TestCase from unittest.mock import MagicMock, Mock, patch diff --git a/opentelemetry-sdk/tests/metrics/test_metric_reader.py b/opentelemetry-sdk/tests/metrics/test_metric_reader.py index fff645e36d9..5a09112bd0e 100644 --- a/opentelemetry-sdk/tests/metrics/test_metric_reader.py +++ b/opentelemetry-sdk/tests/metrics/test_metric_reader.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access + from typing import Dict, Iterable from unittest import TestCase from unittest.mock import patch @@ -62,7 +64,7 @@ def __init__( def _receive_metrics( self, - metrics: Iterable[Metric], + metrics_data: Iterable[Metric], timeout_millis: float = 10_000, **kwargs, ) -> None: @@ -146,6 +148,7 @@ def test_configure_aggregation(self): LastValueAggregation, ) + # pylint: disable=no-self-use def test_force_flush(self): with patch.object(DummyMetricReader, "collect") as mock_collect: diff --git a/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py b/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py index 5bcf07f6b68..2aac9874659 100644 --- a/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py +++ b/opentelemetry-sdk/tests/metrics/test_metric_reader_storage.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access,invalid-name + from logging import WARNING from unittest.mock import MagicMock, Mock, patch diff --git a/opentelemetry-sdk/tests/metrics/test_metrics.py b/opentelemetry-sdk/tests/metrics/test_metrics.py index d55262274b6..f899b30808a 100644 --- a/opentelemetry-sdk/tests/metrics/test_metrics.py +++ b/opentelemetry-sdk/tests/metrics/test_metrics.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access,no-self-use + from logging import WARNING from time import sleep @@ -52,7 +54,7 @@ def __init__(self): def _receive_metrics( self, - metrics: Iterable[Metric], + metrics_data: Iterable[Metric], timeout_millis: float = 10_000, **kwargs, ) -> None: @@ -316,7 +318,7 @@ def test_shutdown_race(self, mock_logger): self.assertEqual(mock_logger.warning.call_count, num_threads - 1) @patch( - "opentelemetry.sdk.metrics._internal." "SynchronousMeasurementConsumer" + "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_measurement_collect_callback( self, mock_sync_measurement_consumer @@ -339,7 +341,7 @@ def test_measurement_collect_callback( ) @patch( - "opentelemetry.sdk.metrics." "_internal.SynchronousMeasurementConsumer" + "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_creates_sync_measurement_consumer( self, mock_sync_measurement_consumer @@ -348,7 +350,7 @@ def test_creates_sync_measurement_consumer( mock_sync_measurement_consumer.assert_called() @patch( - "opentelemetry.sdk.metrics." "_internal.SynchronousMeasurementConsumer" + "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_register_asynchronous_instrument( self, mock_sync_measurement_consumer @@ -356,6 +358,7 @@ def test_register_asynchronous_instrument( meter_provider = MeterProvider() + # pylint: disable=no-member meter_provider._measurement_consumer.register_asynchronous_instrument.assert_called_with( meter_provider.get_meter("name").create_observable_counter( "name0", callbacks=[Mock()] @@ -373,7 +376,7 @@ def test_register_asynchronous_instrument( ) @patch( - "opentelemetry.sdk.metrics._internal." "SynchronousMeasurementConsumer" + "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_consume_measurement_counter(self, mock_sync_measurement_consumer): sync_consumer_instance = mock_sync_measurement_consumer() @@ -385,7 +388,7 @@ def test_consume_measurement_counter(self, mock_sync_measurement_consumer): sync_consumer_instance.consume_measurement.assert_called() @patch( - "opentelemetry.sdk.metrics." "_internal.SynchronousMeasurementConsumer" + "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_consume_measurement_up_down_counter( self, mock_sync_measurement_consumer @@ -401,7 +404,7 @@ def test_consume_measurement_up_down_counter( sync_consumer_instance.consume_measurement.assert_called() @patch( - "opentelemetry.sdk.metrics._internal." "SynchronousMeasurementConsumer" + "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_consume_measurement_histogram( self, mock_sync_measurement_consumer @@ -415,7 +418,7 @@ def test_consume_measurement_histogram( sync_consumer_instance.consume_measurement.assert_called() @patch( - "opentelemetry.sdk.metrics._internal." "SynchronousMeasurementConsumer" + "opentelemetry.sdk.metrics._internal.SynchronousMeasurementConsumer" ) def test_consume_measurement_gauge(self, mock_sync_measurement_consumer): sync_consumer_instance = mock_sync_measurement_consumer() @@ -544,11 +547,11 @@ def __init__(self): def export( self, - metrics: Sequence[Metric], + metrics_data: Sequence[Metric], timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: - self.metrics[self._counter] = metrics + self.metrics[self._counter] = metrics_data self._counter += 1 return MetricExportResult.SUCCESS diff --git a/opentelemetry-sdk/tests/metrics/test_periodic_exporting_metric_reader.py b/opentelemetry-sdk/tests/metrics/test_periodic_exporting_metric_reader.py index 98f59526ef6..962b4fdd643 100644 --- a/opentelemetry-sdk/tests/metrics/test_periodic_exporting_metric_reader.py +++ b/opentelemetry-sdk/tests/metrics/test_periodic_exporting_metric_reader.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access,invalid-name,no-self-use + import math from logging import WARNING from time import sleep, time_ns @@ -53,12 +55,12 @@ def __init__( def export( self, - metrics: Sequence[Metric], + metrics_data: Sequence[Metric], timeout_millis: float = 10_000, **kwargs, ) -> MetricExportResult: sleep(self.wait) - self.metrics.extend(metrics) + self.metrics.extend(metrics_data) return True def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: @@ -83,6 +85,7 @@ def __init__( ) self._collect_exception = exception + # pylint: disable=overridden-final-method def collect(self, timeout_millis: float = 10_000) -> None: raise self._collect_exception @@ -214,7 +217,7 @@ def test_shutdown_multiple_times(self): pmr = self._create_periodic_reader([], FakeMetricsExporter()) with self.assertLogs(level="WARNING") as w: self.run_with_many_threads(pmr.shutdown) - self.assertTrue("Can't shutdown multiple times", w.output[0]) + self.assertTrue("Can't shutdown multiple times" in w.output[0]) with self.assertLogs(level="WARNING") as w: pmr.shutdown() diff --git a/opentelemetry-sdk/tests/metrics/test_point.py b/opentelemetry-sdk/tests/metrics/test_point.py index cff07ff6aea..846f2c2fc9f 100644 --- a/opentelemetry-sdk/tests/metrics/test_point.py +++ b/opentelemetry-sdk/tests/metrics/test_point.py @@ -238,8 +238,6 @@ def test_histogram_data_point(self): def test_exp_histogram_data_point(self): - self.maxDiff = None - self.assertEqual( self.exp_histogram_data_point_0.to_json(indent=None), self.exp_histogram_data_point_0_str, @@ -251,8 +249,6 @@ def test_sum(self): def test_gauge(self): - self.maxDiff = None - self.assertEqual(self.gauge_0.to_json(indent=None), self.gauge_0_str) def test_histogram(self): @@ -263,8 +259,6 @@ def test_histogram(self): def test_exp_histogram(self): - self.maxDiff = None - self.assertEqual( self.exp_histogram_0.to_json(indent=None), self.exp_histogram_0_str ) diff --git a/opentelemetry-sdk/tests/metrics/test_view.py b/opentelemetry-sdk/tests/metrics/test_view.py index 00376a0068b..96d433677cf 100644 --- a/opentelemetry-sdk/tests/metrics/test_view.py +++ b/opentelemetry-sdk/tests/metrics/test_view.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access + from unittest import TestCase from unittest.mock import Mock diff --git a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py index c22c2d7a96b..f4d2d02351a 100644 --- a/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py +++ b/opentelemetry-sdk/tests/metrics/test_view_instrument_match.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# pylint: disable=protected-access + from unittest import TestCase from unittest.mock import MagicMock, Mock @@ -36,7 +38,7 @@ ) -class Test_ViewInstrumentMatch(TestCase): +class Test_ViewInstrumentMatch(TestCase): # pylint: disable=invalid-name @classmethod def setUpClass(cls): diff --git a/propagator/opentelemetry-propagator-b3/benchmarks/trace/propagation/test_benchmark_b3_format.py b/propagator/opentelemetry-propagator-b3/benchmarks/trace/propagation/test_benchmark_b3_format.py index 23cbf773eda..26fdb650f27 100644 --- a/propagator/opentelemetry-propagator-b3/benchmarks/trace/propagation/test_benchmark_b3_format.py +++ b/propagator/opentelemetry-propagator-b3/benchmarks/trace/propagation/test_benchmark_b3_format.py @@ -13,7 +13,7 @@ # limitations under the License. import opentelemetry.propagators.b3 as b3_format -import opentelemetry.sdk.trace as trace +from opentelemetry.sdk.trace import TracerProvider FORMAT = b3_format.B3Format() @@ -28,7 +28,7 @@ def test_extract_single_header(benchmark): def test_inject_empty_context(benchmark): - tracer = trace.TracerProvider().get_tracer("sdk_tracer_provider") + tracer = TracerProvider().get_tracer("sdk_tracer_provider") with tracer.start_as_current_span("Root Span"): with tracer.start_as_current_span("Child Span"): benchmark( diff --git a/tox.ini b/tox.ini index dfa3edd0289..fe9906e15ad 100644 --- a/tox.ini +++ b/tox.ini @@ -18,8 +18,8 @@ envlist = pypy3-test-opentelemetry-proto-{0,1} lint-opentelemetry-proto - py3{8,9,10,11,12}-opentelemetry-sdk - pypy3-opentelemetry-sdk + py3{8,9,10,11,12}-test-opentelemetry-sdk + pypy3-test-opentelemetry-sdk lint-opentelemetry-sdk benchmark-opentelemetry-sdk @@ -119,6 +119,7 @@ deps = proto3: protobuf~=3.19.0 proto4: protobuf~=4.0 +allowlist_externals = sh setenv = ; override CONTRIB_REPO_SHA via env variable when testing other branches/commits than main @@ -198,30 +199,26 @@ commands = lint-opentelemetry-api: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/opentelemetry-api lint-opentelemetry-api: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/opentelemetry-api lint-opentelemetry-api: flake8 --config {toxinidir}/.flake8 {toxinidir}/opentelemetry-api - lint-opentelemetry-api: pylint {toxinidir}/opentelemetry-api/src/opentelemetry - lint-opentelemetry-api: pylint {toxinidir}/opentelemetry-api/tests + lint-opentelemetry-api: pylint {toxinidir}/opentelemetry-api test-opentelemetry-sdk: pytest {toxinidir}/opentelemetry-sdk/tests {posargs} lint-opentelemetry-sdk: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/opentelemetry-sdk lint-opentelemetry-sdk: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/opentelemetry-sdk lint-opentelemetry-sdk: flake8 --config {toxinidir}/.flake8 {toxinidir}/opentelemetry-sdk - lint-opentelemetry-sdk: pylint {toxinidir}/opentelemetry-sdk/src/opentelemetry - lint-opentelemetry-sdk: pylint {toxinidir}/opentelemetry-sdk/tests + lint-opentelemetry-sdk: pylint {toxinidir}/opentelemetry-sdk benchmark-opentelemetry-sdk: pytest {toxinidir}/opentelemetry-sdk/benchmarks {posargs} --benchmark-json=sdk-benchmark.json test-opentelemetry-proto: pytest {toxinidir}/opentelemetry-proto/tests {posargs} lint-opentelemetry-proto: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/opentelemetry-proto lint-opentelemetry-proto: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/opentelemetry-proto lint-opentelemetry-proto: flake8 --config {toxinidir}/.flake8 {toxinidir}/opentelemetry-proto - lint-opentelemetry-proto: pylint {toxinidir}/opentelemetry-proto/src/opentelemetry - lint-opentelemetry-proto: pylint {toxinidir}/opentelemetry-proto/tests + lint-opentelemetry-proto: pylint {toxinidir}/opentelemetry-proto test-opentelemetry-semantic-conventions: pytest {toxinidir}/opentelemetry-semantic-conventions/tests {posargs} lint-opentelemetry-semantic-conventions: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/opentelemetry-semantic-conventions lint-opentelemetry-semantic-conventions: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/opentelemetry-semantic-conventions lint-opentelemetry-semantic-conventions: flake8 --config {toxinidir}/.flake8 {toxinidir}/opentelemetry-semantic-conventions - lint-opentelemetry-semantic-conventions: pylint {toxinidir}/opentelemetry-semantic-conventions/src/opentelemetry - lint-opentelemetry-semantic-conventions: pylint {toxinidir}/opentelemetry-semantic-conventions/tests + lint-opentelemetry-semantic-conventions: pylint {toxinidir}/opentelemetry-semantic-conventions test-opentelemetry-getting-started: pytest {toxinidir}/docs/getting_started/tests {posargs} lint-opentelemetry-getting-started: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/docs/getting_started @@ -233,101 +230,87 @@ commands = lint-opentelemetry-opentracing-shim: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/shim/opentelemetry-opentracing-shim lint-opentelemetry-opentracing-shim: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/shim/opentelemetry-opentracing-shim lint-opentelemetry-opentracing-shim: flake8 --config {toxinidir}/.flake8 {toxinidir}/shim/opentelemetry-opentracing-shim - lint-opentelemetry-opentracing-shim: pylint {toxinidir}/shim/opentelemetry-opentracing-shim/src/opentelemetry - lint-opentelemetry-opentracing-shim: pylint {toxinidir}/shim/opentelemetry-opentracing-shim/tests + lint-opentelemetry-opentracing-shim: sh -c "cd shim && pylint --rcfile ../.pylintrc {toxinidir}/shim/opentelemetry-opentracing-shim" test-opentelemetry-opencensus-shim: pytest {toxinidir}/shim/opentelemetry-opencensus-shim/tests {posargs} lint-opentelemetry-opencensus-shim: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/shim/opentelemetry-opencensus-shim lint-opentelemetry-opencensus-shim: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/shim/opentelemetry-opencensus-shim lint-opentelemetry-opencensus-shim: flake8 --config {toxinidir}/.flake8 {toxinidir}/shim/opentelemetry-opencensus-shim - lint-opentelemetry-opencensus-shim: pylint {toxinidir}/shim/opentelemetry-opencensus-shim/src/opentelemetry - lint-opentelemetry-opencensus-shim: pylint {toxinidir}/shim/opentelemetry-opencensus-shim/tests + lint-opentelemetry-opencensus-shim: sh -c "cd shim && pylint --rcfile ../.pylintrc {toxinidir}/shim/opentelemetry-opencensus-shim" test-opentelemetry-exporter-opencensus: pytest {toxinidir}/exporter/opentelemetry-exporter-opencensus/tests {posargs} lint-opentelemetry-exporter-opencensus: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-opencensus lint-opentelemetry-exporter-opencensus: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-opencensus lint-opentelemetry-exporter-opencensus: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-opencensus - lint-opentelemetry-exporter-opencensus: pylint {toxinidir}/exporter/opentelemetry-exporter-opencensus/src/opentelemetry - lint-opentelemetry-exporter-opencensus: pylint {toxinidir}/exporter/opentelemetry-exporter-opencensus/tests + lint-opentelemetry-exporter-opencensus: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-opencensus" test-opentelemetry-exporter-otlp-proto-common: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/tests {posargs} lint-opentelemetry-exporter-otlp-proto-common: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common lint-opentelemetry-exporter-otlp-proto-common: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common lint-opentelemetry-exporter-otlp-proto-common: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common - lint-opentelemetry-exporter-otlp-proto-common: pylint {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/src/opentelemetry - lint-opentelemetry-exporter-otlp-proto-common: pylint {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/tests + lint-opentelemetry-exporter-otlp-proto-common: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common" test-opentelemetry-exporter-otlp-combined: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp/tests {posargs} lint-opentelemetry-exporter-otlp-combined: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-otlp lint-opentelemetry-exporter-otlp-combined: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-otlp lint-opentelemetry-exporter-otlp-combined: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-otlp - lint-opentelemetry-exporter-otlp-combined: pylint {toxinidir}/exporter/opentelemetry-exporter-otlp/src/opentelemetry - lint-opentelemetry-exporter-otlp-combined: pylint {toxinidir}/exporter/opentelemetry-exporter-otlp/tests + lint-opentelemetry-exporter-otlp-combined: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp" test-opentelemetry-exporter-otlp-proto-grpc: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/tests {posargs} lint-opentelemetry-exporter-otlp-proto-grpc: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc lint-opentelemetry-exporter-otlp-proto-grpc: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc lint-opentelemetry-exporter-otlp-proto-grpc: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc - lint-opentelemetry-exporter-otlp-proto-grpc: pylint {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry - lint-opentelemetry-exporter-otlp-proto-grpc: pylint {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/tests + lint-opentelemetry-exporter-otlp-proto-grpc: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc" benchmark-opentelemetry-exporter-otlp-proto-grpc: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks {posargs} --benchmark-json=exporter-otlp-proto-grpc-benchmark.json test-opentelemetry-exporter-otlp-proto-http: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/tests {posargs} lint-opentelemetry-exporter-otlp-proto-http: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http lint-opentelemetry-exporter-otlp-proto-http: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http lint-opentelemetry-exporter-otlp-proto-http: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http - lint-opentelemetry-exporter-otlp-proto-http: pylint {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry - lint-opentelemetry-exporter-otlp-proto-http: pylint {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/tests + lint-opentelemetry-exporter-otlp-proto-http: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http" test-opentelemetry-exporter-prometheus: pytest {toxinidir}/exporter/opentelemetry-exporter-prometheus/tests {posargs} lint-opentelemetry-exporter-prometheus: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-prometheus lint-opentelemetry-exporter-prometheus: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-prometheus lint-opentelemetry-exporter-prometheus: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-prometheus - lint-opentelemetry-exporter-prometheus: pylint {toxinidir}/exporter/opentelemetry-exporter-prometheus/src/opentelemetry - lint-opentelemetry-exporter-prometheus: pylint {toxinidir}/exporter/opentelemetry-exporter-prometheus/tests + lint-opentelemetry-exporter-prometheus: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-prometheus" test-opentelemetry-exporter-zipkin-combined: pytest {toxinidir}/exporter/opentelemetry-exporter-zipkin/tests {posargs} lint-opentelemetry-exporter-zipkin-combined: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-zipkin lint-opentelemetry-exporter-zipkin-combined: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-zipkin lint-opentelemetry-exporter-zipkin-combined: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-zipkin - lint-opentelemetry-exporter-zipkin-combined: pylint {toxinidir}/exporter/opentelemetry-exporter-zipkin/src/opentelemetry - lint-opentelemetry-exporter-zipkin-combined: pylint {toxinidir}/exporter/opentelemetry-exporter-zipkin/tests + lint-opentelemetry-exporter-zipkin-combined: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-zipkin" test-opentelemetry-exporter-zipkin-proto-http: pytest {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http/tests {posargs} lint-opentelemetry-exporter-zipkin-proto-http: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http lint-opentelemetry-exporter-zipkin-proto-http: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http lint-opentelemetry-exporter-zipkin-proto-http: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http - lint-opentelemetry-exporter-zipkin-proto-http: pylint {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http/src/opentelemetry - lint-opentelemetry-exporter-zipkin-proto-http: pylint {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http/tests + lint-opentelemetry-exporter-zipkin-proto-http: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http" test-opentelemetry-exporter-zipkin-json: pytest {toxinidir}/exporter/opentelemetry-exporter-zipkin-json/tests {posargs} lint-opentelemetry-exporter-zipkin-json: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-zipkin-json lint-opentelemetry-exporter-zipkin-json: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-zipkin-json lint-opentelemetry-exporter-zipkin-json: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-zipkin-json - lint-opentelemetry-exporter-zipkin-json: pylint {toxinidir}/exporter/opentelemetry-exporter-zipkin-json/src/opentelemetry - lint-opentelemetry-exporter-zipkin-json: pylint {toxinidir}/exporter/opentelemetry-exporter-zipkin-json/tests + lint-opentelemetry-exporter-zipkin-json: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-zipkin-json" test-opentelemetry-propagator-b3: pytest {toxinidir}/propagator/opentelemetry-propagator-b3/tests {posargs} lint-opentelemetry-propagator-b3: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/propagator/opentelemetry-propagator-b3 lint-opentelemetry-propagator-b3: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/propagator/opentelemetry-propagator-b3 lint-opentelemetry-propagator-b3: flake8 --config {toxinidir}/.flake8 {toxinidir}/propagator/opentelemetry-propagator-b3 - lint-opentelemetry-propagator-b3: pylint {toxinidir}/propagator/opentelemetry-propagator-b3/src/opentelemetry - lint-opentelemetry-propagator-b3: pylint {toxinidir}/propagator/opentelemetry-propagator-b3/tests + lint-opentelemetry-propagator-b3: sh -c "cd propagator && pylint --rcfile ../.pylintrc {toxinidir}/propagator/opentelemetry-propagator-b3" benchmark-opentelemetry-propagator-b3: pytest {toxinidir}/propagator/opentelemetry-propagator-b3/benchmarks {posargs} --benchmark-json=propagator-b3-benchmark.json test-opentelemetry-propagator-jaeger: pytest {toxinidir}/propagator/opentelemetry-propagator-jaeger/tests {posargs} lint-opentelemetry-propagator-jaeger: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/propagator/opentelemetry-propagator-jaeger lint-opentelemetry-propagator-jaeger: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/propagator/opentelemetry-propagator-jaeger lint-opentelemetry-propagator-jaeger: flake8 --config {toxinidir}/.flake8 {toxinidir}/propagator/opentelemetry-propagator-jaeger - lint-opentelemetry-propagator-jaeger: pylint {toxinidir}/propagator/opentelemetry-propagator-jaeger/src/opentelemetry - lint-opentelemetry-propagator-jaeger: pylint {toxinidir}/propagator/opentelemetry-propagator-jaeger/tests + lint-opentelemetry-propagator-jaeger: sh -c "cd propagator && pylint --rcfile ../.pylintrc {toxinidir}/propagator/opentelemetry-propagator-jaeger" test-opentelemetry-test-utils: pytest {toxinidir}/tests/opentelemetry-test-utils/tests {posargs} lint-opentelemetry-test-utils: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/tests/opentelemetry-test-utils lint-opentelemetry-test-utils: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/tests/opentelemetry-test-utils lint-opentelemetry-test-utils: flake8 --config {toxinidir}/.flake8 {toxinidir}/tests/opentelemetry-test-utils - lint-opentelemetry-test-utils: pylint {toxinidir}/tests/opentelemetry-test-utils/src/opentelemetry - lint-opentelemetry-test-utils: pylint {toxinidir}/tests/opentelemetry-test-utils/tests + lint-opentelemetry-test-utils: sh -c "cd tests && pylint --rcfile ../.pylintrc {toxinidir}/tests/opentelemetry-test-utils" coverage: {toxinidir}/scripts/coverage.sh From 58f34f8eeba9757cc3c2ef69feffba42e3a35af0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Em=C3=ADdio=20Neto?= <9735060+emdneto@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:23:17 -0300 Subject: [PATCH 2/8] Benchmarks job is failing on CI (#4056) --- .github/workflows/benchmarks.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 965b701f312..be9b44d8e2a 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -6,7 +6,6 @@ on: jobs: sdk-benchmarks: - env: runs-on: self-hosted steps: - name: Checkout Core Repo @ SHA - ${{ github.sha }} From 5dbb385a3397cde6bbf6bc579e15e23a6620d4c9 Mon Sep 17 00:00:00 2001 From: Igor Udot <47724762+horw@users.noreply.github.com> Date: Mon, 15 Jul 2024 10:13:13 -0700 Subject: [PATCH 3/8] fix: grpc endpoint (#4051) --- docs/examples/logs/README.rst | 15 ++++++++++----- docs/examples/logs/otel-collector-config.yaml | 1 + docs/examples/metrics/instruments/README.rst | 1 + .../instruments/otel-collector-config.yaml | 1 + 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/docs/examples/logs/README.rst b/docs/examples/logs/README.rst index 608a904f1c2..5032b8816f2 100644 --- a/docs/examples/logs/README.rst +++ b/docs/examples/logs/README.rst @@ -15,13 +15,14 @@ Start the Collector locally to see data being exported. Write the following file otlp: protocols: grpc: - - processors: - batch: + endpoint: 0.0.0.0:4317 exporters: logging: - verbosity: detailed + loglevel: debug + + processors: + batch: service: pipelines: @@ -29,7 +30,11 @@ Start the Collector locally to see data being exported. Write the following file receivers: [otlp] processors: [batch] exporters: [logging] - + traces: + receivers: [otlp] + processors: [batch] + exporters: [logging] + Then start the Docker container: .. code-block:: sh diff --git a/docs/examples/logs/otel-collector-config.yaml b/docs/examples/logs/otel-collector-config.yaml index e08cbc18738..dcc17bb3628 100644 --- a/docs/examples/logs/otel-collector-config.yaml +++ b/docs/examples/logs/otel-collector-config.yaml @@ -2,6 +2,7 @@ receivers: otlp: protocols: grpc: + endpoint: 0.0.0.0:4317 exporters: logging: diff --git a/docs/examples/metrics/instruments/README.rst b/docs/examples/metrics/instruments/README.rst index 50e80a945e1..465f2b0e338 100644 --- a/docs/examples/metrics/instruments/README.rst +++ b/docs/examples/metrics/instruments/README.rst @@ -10,6 +10,7 @@ Start the Collector locally to see data being exported. Write the following file otlp: protocols: grpc: + endpoint: 0.0.0.0:4317 exporters: logging: diff --git a/docs/examples/metrics/instruments/otel-collector-config.yaml b/docs/examples/metrics/instruments/otel-collector-config.yaml index 3ae12695e6d..1cd24e8a2e0 100644 --- a/docs/examples/metrics/instruments/otel-collector-config.yaml +++ b/docs/examples/metrics/instruments/otel-collector-config.yaml @@ -2,6 +2,7 @@ receivers: otlp: protocols: grpc: + endpoint: 0.0.0.0:4317 exporters: logging: From b600499136b25ed83be9581e11f84a653c53c940 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Em=C3=ADdio=20Neto?= <9735060+emdneto@users.noreply.github.com> Date: Mon, 15 Jul 2024 17:38:23 -0300 Subject: [PATCH 4/8] fix benchmark sdk ci empty output (#4059) --- tox.ini | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index fe9906e15ad..5024ff31d80 100644 --- a/tox.ini +++ b/tox.ini @@ -206,7 +206,7 @@ commands = lint-opentelemetry-sdk: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/opentelemetry-sdk lint-opentelemetry-sdk: flake8 --config {toxinidir}/.flake8 {toxinidir}/opentelemetry-sdk lint-opentelemetry-sdk: pylint {toxinidir}/opentelemetry-sdk - benchmark-opentelemetry-sdk: pytest {toxinidir}/opentelemetry-sdk/benchmarks {posargs} --benchmark-json=sdk-benchmark.json + benchmark-opentelemetry-sdk: pytest {toxinidir}/opentelemetry-sdk/benchmarks --benchmark-json={toxinidir}/opentelemetry-sdk/sdk-benchmark.json {posargs} test-opentelemetry-proto: pytest {toxinidir}/opentelemetry-proto/tests {posargs} lint-opentelemetry-proto: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/opentelemetry-proto @@ -261,7 +261,7 @@ commands = lint-opentelemetry-exporter-otlp-proto-grpc: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc lint-opentelemetry-exporter-otlp-proto-grpc: flake8 --config {toxinidir}/.flake8 {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc lint-opentelemetry-exporter-otlp-proto-grpc: sh -c "cd exporter && pylint --rcfile ../.pylintrc {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc" - benchmark-opentelemetry-exporter-otlp-proto-grpc: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks {posargs} --benchmark-json=exporter-otlp-proto-grpc-benchmark.json + benchmark-opentelemetry-exporter-otlp-proto-grpc: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/benchmarks --benchmark-json=exporter-otlp-proto-grpc-benchmark.json {posargs} test-opentelemetry-exporter-otlp-proto-http: pytest {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/tests {posargs} lint-opentelemetry-exporter-otlp-proto-http: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http @@ -298,7 +298,7 @@ commands = lint-opentelemetry-propagator-b3: isort --diff --check-only --settings-path {toxinidir}/.isort.cfg {toxinidir}/propagator/opentelemetry-propagator-b3 lint-opentelemetry-propagator-b3: flake8 --config {toxinidir}/.flake8 {toxinidir}/propagator/opentelemetry-propagator-b3 lint-opentelemetry-propagator-b3: sh -c "cd propagator && pylint --rcfile ../.pylintrc {toxinidir}/propagator/opentelemetry-propagator-b3" - benchmark-opentelemetry-propagator-b3: pytest {toxinidir}/propagator/opentelemetry-propagator-b3/benchmarks {posargs} --benchmark-json=propagator-b3-benchmark.json + benchmark-opentelemetry-propagator-b3: pytest {toxinidir}/propagator/opentelemetry-propagator-b3/benchmarks --benchmark-json=propagator-b3-benchmark.json {posargs} test-opentelemetry-propagator-jaeger: pytest {toxinidir}/propagator/opentelemetry-propagator-jaeger/tests {posargs} lint-opentelemetry-propagator-jaeger: black --diff --check --config {toxinidir}/pyproject.toml {toxinidir}/propagator/opentelemetry-propagator-jaeger From 2447868e76100bee5ad01faf78f84bf71cdf6cf5 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 16 Jul 2024 14:43:10 -0600 Subject: [PATCH 5/8] Remove sklearn from contrib tests (#4064) Fixes #4063 --- .github/workflows/test.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 179233c9254..e02495418e8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -10,7 +10,7 @@ env: # Otherwise, set variable to the commit of your branch on # opentelemetry-python-contrib which is compatible with these Core repo # changes. - CONTRIB_REPO_SHA: 7d4573da89c7aef748614e6f1511be3eddf5b230 + CONTRIB_REPO_SHA: main # This is needed because we do not clone the core repo in contrib builds anymore. # When running contrib builds as part of core builds, we use actions/checkout@v4 which @@ -174,7 +174,6 @@ jobs: - "redis" - "remoulade" - "requests" - - "sklearn" - "sqlalchemy" - "sqlite3" - "starlette" From b1e99c1555721f818e578d7457587693e767e182 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Em=C3=ADdio=20Neto?= <9735060+emdneto@users.noreply.github.com> Date: Tue, 16 Jul 2024 18:46:42 -0300 Subject: [PATCH 6/8] Add otlp exporters missing documentation (#4055) --- docs-requirements.txt | 4 ++++ docs/conf.py | 23 +++++++++++++++++++++++ docs/exporter/otlp/otlp.rst | 24 +++++++++++++++++++++++- 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/docs-requirements.txt b/docs-requirements.txt index 983fcddd8a4..799a10cb8e8 100644 --- a/docs-requirements.txt +++ b/docs-requirements.txt @@ -9,8 +9,12 @@ sphinx-jekyll-builder==0.3.0 ./opentelemetry-api ./opentelemetry-semantic-conventions ./opentelemetry-sdk +./opentelemetry-proto ./shim/opentelemetry-opencensus-shim ./shim/opentelemetry-opentracing-shim +./exporter/opentelemetry-exporter-otlp-proto-common +./exporter/opentelemetry-exporter-otlp-proto-http +./exporter/opentelemetry-exporter-otlp-proto-grpc # Required by instrumentation and exporter packages grpcio~=1.27 diff --git a/docs/conf.py b/docs/conf.py index 352cf927cd4..3aa7e022e3a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -115,6 +115,29 @@ "py:class", "opentelemetry.trace._LinkBase", ), + ( + "py:class", + "opentelemetry.exporter.otlp.proto.grpc.exporter.OTLPExporterMixin", + ), + ( + "py:class", + "opentelemetry.proto.collector.trace.v1.trace_service_pb2.ExportTraceServiceRequest", + ), + ( + "py:class", + "opentelemetry.exporter.otlp.proto.common._internal.metrics_encoder.OTLPMetricExporterMixin", + ), + ("py:class", "opentelemetry.proto.resource.v1.resource_pb2.Resource"), + ( + "py:class", + "opentelemetry.proto.collector.metrics.v1.metrics_service_pb2.ExportMetricsServiceRequest", + ), + ("py:class", "opentelemetry.sdk._logs._internal.export.LogExporter"), + ("py:class", "opentelemetry.sdk._logs._internal.export.LogExportResult"), + ( + "py:class", + "opentelemetry.proto.collector.logs.v1.logs_service_pb2.ExportLogsServiceRequest", + ), ] # Add any paths that contain templates here, relative to this directory. diff --git a/docs/exporter/otlp/otlp.rst b/docs/exporter/otlp/otlp.rst index 471f2935fb7..18b8b157340 100644 --- a/docs/exporter/otlp/otlp.rst +++ b/docs/exporter/otlp/otlp.rst @@ -1,12 +1,34 @@ OpenTelemetry OTLP Exporters ============================ - .. automodule:: opentelemetry.exporter.otlp :members: :undoc-members: :show-inheritance: +opentelemetry.exporter.otlp.proto.http +--------------------------------------- + +.. automodule:: opentelemetry.exporter.otlp.proto.http + :members: + :undoc-members: + :show-inheritance: + +.. automodule:: opentelemetry.exporter.otlp.proto.http.trace_exporter + +.. automodule:: opentelemetry.exporter.otlp.proto.http.metric_exporter + +.. automodule:: opentelemetry.exporter.otlp.proto.http._log_exporter + +opentelemetry.exporter.otlp.proto.grpc +--------------------------------------- + .. automodule:: opentelemetry.exporter.otlp.proto.grpc :members: :undoc-members: :show-inheritance: + +.. automodule:: opentelemetry.exporter.otlp.proto.grpc.trace_exporter + +.. automodule:: opentelemetry.exporter.otlp.proto.grpc.metric_exporter + +.. automodule:: opentelemetry.exporter.otlp.proto.grpc._log_exporter From e78675ed78ea3292838afe7c2667bd0d5dbedf55 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Thu, 18 Jul 2024 13:10:15 -0600 Subject: [PATCH 7/8] Fix `ExponentialBucketHistogramAggregation` (#3978) * Refactor __init__ * Refactor aggregate * Handle case for DELTA DELTA * Checkpoint before removing old code * Remove old code * Fix some test cases * Relocate method * Debugging test_aggregate_collect * Fix empty previous bucket handling * Fix max and min setting * Fix explicit bucket aggregation to make it consistent * Rearrange __init__s * Set scale in aggregate * Use right values in exponential point * Set scale right * Start scale as None * Make test_collect_results_cumulative pass I am not sure these changes are right, I just wanted to find what would be the value that would make this test case pass. * Actually use random values * Add integration test for exponential histogram * Handle all cases for current and previous buckets and scale * Rename test module * Use random values * Fix bucket setting * WIP integration test * WIP * Finish integration tests * Rename variables * Explain analogy with ExplicitBucket * Add changelog and fix lint * Fix equality tests * Fix location of returns * Fix another issue and add test case * Added comments to integration test case * Fix lint * Add documentation for test case --- CHANGELOG.md | 2 + .../sdk/metrics/_internal/aggregation.py | 708 ++++++++++-------- .../exponential_histogram/buckets.py | 14 + ...xponential_bucket_histogram_aggregation.py | 267 ++++--- .../test_exponential_bucket_histogram.py | 357 +++++++++ .../tests/metrics/test_aggregation.py | 24 +- 6 files changed, 933 insertions(+), 439 deletions(-) create mode 100644 opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 44938228ca3..439a9eb9640 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3956](https://github.com/open-telemetry/opentelemetry-python/pull/3956)) - When encountering an error encoding metric attributes in the OTLP exporter, log the key that had an error. ([#3838](https://github.com/open-telemetry/opentelemetry-python/pull/3838)) +- Fix `ExponentialHistogramAggregation` + ([#3978](https://github.com/open-telemetry/opentelemetry-python/pull/3978)) - Log a warning when a `LogRecord` in `sdk/log` has dropped attributes due to reaching limits ([#3946](https://github.com/open-telemetry/opentelemetry-python/pull/3946)) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 3a09cdcfea1..62ac967bbec 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -126,17 +126,17 @@ def __init__( ) self._instrument_is_monotonic = instrument_is_monotonic - self._current_value = None + self._value = None self._previous_collection_start_nano = self._start_time_unix_nano - self._previous_cumulative_value = 0 + self._previous_value = 0 def aggregate(self, measurement: Measurement) -> None: with self._lock: - if self._current_value is None: - self._current_value = 0 + if self._value is None: + self._value = 0 - self._current_value = self._current_value + measurement.value + self._value = self._value + measurement.value def collect( self, @@ -204,15 +204,15 @@ def collect( When the collection aggregation temporality does not match the instrument aggregation temporality, then a conversion is made. For this purpose, this aggregation keeps a private attribute, - self._previous_cumulative. + self._previous_value. When the instrument is synchronous: - self._previous_cumulative_value is the sum of every previously + self._previous_value is the sum of every previously collected (delta) value. In this case, the returned (cumulative) value will be: - self._previous_cumulative_value + current_value + self._previous_value + value synchronous_instrument.add(2) collect(CUMULATIVE) -> 2 @@ -225,10 +225,10 @@ def collect( time -> - self._previous_cumulative_value + self._previous_value |-------------| - current_value (delta) + value (delta) |----| returned value (cumulative) @@ -236,11 +236,11 @@ def collect( When the instrument is asynchronous: - self._previous_cumulative_value is the value of the previously + self._previous_value is the value of the previously collected (cumulative) value. In this case, the returned (delta) value will be: - current_value - self._previous_cumulative_value + value - self._previous_value callback() -> 1352 collect(DELTA) -> 1352 @@ -253,10 +253,10 @@ def collect( time -> - self._previous_cumulative_value + self._previous_value |-------------| - current_value (cumulative) + value (cumulative) |------------------| returned value (delta) @@ -264,8 +264,8 @@ def collect( """ with self._lock: - current_value = self._current_value - self._current_value = None + value = self._value + self._value = None if ( self._instrument_aggregation_temporality @@ -285,34 +285,32 @@ def collect( collection_start_nano ) - if current_value is None: + if value is None: return None return NumberDataPoint( attributes=self._attributes, start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, - value=current_value, + value=value, ) - if current_value is None: - current_value = 0 + if value is None: + value = 0 - self._previous_cumulative_value = ( - current_value + self._previous_cumulative_value - ) + self._previous_value = value + self._previous_value return NumberDataPoint( attributes=self._attributes, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, - value=self._previous_cumulative_value, + value=self._previous_value, ) # This happens when the corresponding instrument for this # aggregation is asynchronous. - if current_value is None: + if value is None: # This happens when the corresponding instrument callback # does not produce measurements. return None @@ -321,9 +319,9 @@ def collect( collection_aggregation_temporality is AggregationTemporality.DELTA ): - result_value = current_value - self._previous_cumulative_value + result_value = value - self._previous_value - self._previous_cumulative_value = current_value + self._previous_value = value previous_collection_start_nano = ( self._previous_collection_start_nano @@ -341,7 +339,7 @@ def collect( attributes=self._attributes, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, - value=current_value, + value=value, ) @@ -403,42 +401,43 @@ def __init__( ): super().__init__(attributes) + self._instrument_aggregation_temporality = ( + instrument_aggregation_temporality + ) + self._start_time_unix_nano = start_time_unix_nano self._boundaries = tuple(boundaries) self._record_min_max = record_min_max + + self._value = None self._min = inf self._max = -inf self._sum = 0 - self._start_time_unix_nano = start_time_unix_nano - self._instrument_aggregation_temporality = ( - instrument_aggregation_temporality - ) - - self._current_value = None - - self._previous_collection_start_nano = self._start_time_unix_nano - self._previous_cumulative_value = self._get_empty_bucket_counts() + self._previous_value = None self._previous_min = inf self._previous_max = -inf self._previous_sum = 0 + self._previous_collection_start_nano = self._start_time_unix_nano + def _get_empty_bucket_counts(self) -> List[int]: return [0] * (len(self._boundaries) + 1) def aggregate(self, measurement: Measurement) -> None: + with self._lock: - if self._current_value is None: - self._current_value = self._get_empty_bucket_counts() + if self._value is None: + self._value = self._get_empty_bucket_counts() - value = measurement.value + measurement_value = measurement.value - self._sum += value + self._sum += measurement_value if self._record_min_max: - self._min = min(self._min, value) - self._max = max(self._max, value) + self._min = min(self._min, measurement_value) + self._max = max(self._max, measurement_value) - self._current_value[bisect_left(self._boundaries, value)] += 1 + self._value[bisect_left(self._boundaries, measurement_value)] += 1 def collect( self, @@ -450,12 +449,12 @@ def collect( """ with self._lock: - current_value = self._current_value + value = self._value sum_ = self._sum min_ = self._min max_ = self._max - self._current_value = None + self._value = None self._sum = 0 self._min = inf self._max = -inf @@ -478,30 +477,33 @@ def collect( collection_start_nano ) - if current_value is None: + if value is None: return None return HistogramDataPoint( attributes=self._attributes, start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, - count=sum(current_value), + count=sum(value), sum=sum_, - bucket_counts=tuple(current_value), + bucket_counts=tuple(value), explicit_bounds=self._boundaries, min=min_, max=max_, ) - if current_value is None: - current_value = self._get_empty_bucket_counts() + if value is None: + value = self._get_empty_bucket_counts() + + if self._previous_value is None: + self._previous_value = self._get_empty_bucket_counts() - self._previous_cumulative_value = [ - current_value_element + previous_cumulative_value_element + self._previous_value = [ + value_element + previous_value_element for ( - current_value_element, - previous_cumulative_value_element, - ) in zip(current_value, self._previous_cumulative_value) + value_element, + previous_value_element, + ) in zip(value, self._previous_value) ] self._previous_min = min(min_, self._previous_min) self._previous_max = max(max_, self._previous_max) @@ -511,9 +513,9 @@ def collect( attributes=self._attributes, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, - count=sum(self._previous_cumulative_value), + count=sum(self._previous_value), sum=self._previous_sum, - bucket_counts=tuple(self._previous_cumulative_value), + bucket_counts=tuple(self._previous_value), explicit_bounds=self._boundaries, min=self._previous_min, max=self._previous_max, @@ -522,12 +524,6 @@ def collect( return None -def _new_exponential_mapping(scale: int) -> Mapping: - if scale <= 0: - return ExponentMapping(scale) - return LogarithmMapping(scale) - - # pylint: disable=protected-access class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): # _min_max_size and _max_max_size are the smallest and largest values @@ -544,6 +540,7 @@ class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): def __init__( self, attributes: Attributes, + instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, # This is the default maximum number of buckets per positive or # negative number range. The value 160 is specified by OpenTelemetry. @@ -552,9 +549,16 @@ def __init__( max_size: int = 160, max_scale: int = 20, ): - super().__init__(attributes) # max_size is the maximum capacity of the positive and negative # buckets. + # _sum is the sum of all the values aggregated by this aggregator. + # _count is the count of all calls to aggregate. + # _zero_count is the count of all the calls to aggregate when the value + # to be aggregated is exactly 0. + # _min is the smallest value aggregated by this aggregator. + # _max is the smallest value aggregated by this aggregator. + # _positive holds the positive values. + # _negative holds the negative values by their absolute value. if max_size < self._min_max_size: raise ValueError( f"Buckets max size {max_size} is smaller than " @@ -566,167 +570,159 @@ def __init__( f"Buckets max size {max_size} is larger than " "maximum max size {self._max_max_size}" ) + if max_scale > 20: + _logger.warning( + "max_scale is set to %s which is " + "larger than the recommended value of 20", + max_scale, + ) + + # This aggregation is analogous to _ExplicitBucketHistogramAggregation, + # the only difference is that with every call to aggregate, the size + # and amount of buckets can change (in + # _ExplicitBucketHistogramAggregation both size and amount of buckets + # remain constant once it is instantiated). + + super().__init__(attributes) + self._instrument_aggregation_temporality = ( + instrument_aggregation_temporality + ) + self._start_time_unix_nano = start_time_unix_nano self._max_size = max_size self._max_scale = max_scale - # _sum is the sum of all the values aggregated by this aggregator. + self._value_positive = None + self._value_negative = None + self._min = inf + self._max = -inf self._sum = 0 - - # _count is the count of all calls to aggregate. self._count = 0 - - # _zero_count is the count of all the calls to aggregate when the value - # to be aggregated is exactly 0. self._zero_count = 0 + self._scale = None - # _min is the smallest value aggregated by this aggregator. - self._min = inf - - # _max is the smallest value aggregated by this aggregator. - self._max = -inf - - # _positive holds the positive values. - self._positive = Buckets() - - # _negative holds the negative values by their absolute value. - self._negative = Buckets() - - # _mapping corresponds to the current scale, is shared by both the - # positive and negative buckets. - - if self._max_scale > 20: - _logger.warning( - "max_scale is set to %s which is " - "larger than the recommended value of 20", - self._max_scale, - ) - self._mapping = _new_exponential_mapping(self._max_scale) + self._previous_value_positive = None + self._previous_value_negative = None + self._previous_min = inf + self._previous_max = -inf + self._previous_sum = 0 + self._previous_count = 0 + self._previous_zero_count = 0 + self._previous_scale = None - self._instrument_aggregation_temporality = AggregationTemporality.DELTA - self._start_time_unix_nano = start_time_unix_nano + self._previous_collection_start_nano = self._start_time_unix_nano - self._previous_scale = None - self._previous_start_time_unix_nano = None - self._previous_zero_count = None - self._previous_count = None - self._previous_sum = None - self._previous_max = None - self._previous_min = None - self._previous_positive = None - self._previous_negative = None + self._mapping = self._new_mapping(self._max_scale) def aggregate(self, measurement: Measurement) -> None: # pylint: disable=too-many-branches,too-many-statements, too-many-locals with self._lock: + if self._value_positive is None: + self._value_positive = Buckets() + if self._value_negative is None: + self._value_negative = Buckets() - value = measurement.value + measurement_value = measurement.value - # 0. Set the following attributes: - # _min - # _max - # _count - # _zero_count - # _sum - if value < self._min: - self._min = value + self._sum += measurement_value - if value > self._max: - self._max = value + self._min = min(self._min, measurement_value) + self._max = max(self._max, measurement_value) self._count += 1 - if value == 0: + if measurement_value == 0: self._zero_count += 1 - # No need to do anything else if value is zero, just increment the - # zero count. - return - self._sum += value + if self._count == self._zero_count: + self._scale = 0 - # 1. Use the positive buckets for positive values and the negative - # buckets for negative values. - if value > 0: - buckets = self._positive + return - else: - # Both exponential and logarithm mappings use only positive values - # so the absolute value is used here. - value = -value - buckets = self._negative + if measurement_value > 0: + value = self._value_positive - # 2. Compute the index for the value at the current scale. - index = self._mapping.map_to_index(value) + else: + measurement_value = -measurement_value + value = self._value_negative - # IncrementIndexBy starts here + # The following code finds out if it is necessary to change the + # buckets to hold the incoming measurement_value, changes them if + # necessary. This process does not exist in + # _ExplicitBucketHistogram aggregation because the buckets there + # are constant in size and amount. + index = self._mapping.map_to_index(measurement_value) - # 3. Determine if a change of scale is needed. is_rescaling_needed = False low, high = 0, 0 - if len(buckets) == 0: - buckets.index_start = index - buckets.index_end = index - buckets.index_base = index + if len(value) == 0: + value.index_start = index + value.index_end = index + value.index_base = index elif ( - index < buckets.index_start - and (buckets.index_end - index) >= self._max_size + index < value.index_start + and (value.index_end - index) >= self._max_size ): is_rescaling_needed = True low = index - high = buckets.index_end + high = value.index_end elif ( - index > buckets.index_end - and (index - buckets.index_start) >= self._max_size + index > value.index_end + and (index - value.index_start) >= self._max_size ): is_rescaling_needed = True - low = buckets.index_start + low = value.index_start high = index - # 4. Rescale the mapping if needed. if is_rescaling_needed: scale_change = self._get_scale_change(low, high) self._downscale( scale_change, - self._positive, - self._negative, + self._value_positive, + self._value_negative, + ) + self._mapping = self._new_mapping( + self._mapping.scale - scale_change ) - new_scale = self._mapping.scale - scale_change - self._mapping = _new_exponential_mapping(new_scale) - index = self._mapping.map_to_index(value) + index = self._mapping.map_to_index(measurement_value) + + self._scale = self._mapping.scale - # 5. If the index is outside - # [buckets.index_start, buckets.index_end] readjust the buckets - # boundaries or add more buckets. - if index < buckets.index_start: - span = buckets.index_end - index + if index < value.index_start: + span = value.index_end - index - if span >= len(buckets.counts): - buckets.grow(span + 1, self._max_size) + if span >= len(value.counts): + value.grow(span + 1, self._max_size) - buckets.index_start = index + value.index_start = index - elif index > buckets.index_end: - span = index - buckets.index_start + elif index > value.index_end: + span = index - value.index_start - if span >= len(buckets.counts): - buckets.grow(span + 1, self._max_size) + if span >= len(value.counts): + value.grow(span + 1, self._max_size) - buckets.index_end = index + value.index_end = index - # 6. Compute the index of the bucket to be incremented. - bucket_index = index - buckets.index_base + bucket_index = index - value.index_base if bucket_index < 0: - bucket_index += len(buckets.counts) + bucket_index += len(value.counts) - # 7. Increment the bucket. - buckets.increment_bucket(bucket_index) + # Now the buckets have been changed if needed and bucket_index will + # be used to increment the counter of the bucket that needs to be + # incremented. + + # This is analogous to + # self._value[bisect_left(self._boundaries, measurement_value)] += 1 + # in _ExplicitBucketHistogramAggregation.aggregate + value.increment_bucket(bucket_index) def collect( self, @@ -736,200 +732,238 @@ def collect( """ Atomically return a point for the current value of the metric. """ - # pylint: disable=too-many-statements, too-many-locals + # pylint: disable=too-many-statements, too-many-locals with self._lock: - if self._count == 0: - return None - - current_negative = self._negative - current_positive = self._positive - current_zero_count = self._zero_count - current_count = self._count - current_start_time_unix_nano = self._start_time_unix_nano - current_sum = self._sum - current_max = self._max - if current_max == -inf: - current_max = None - current_min = self._min - if current_min == inf: - current_min = None - - if self._count == self._zero_count: - current_scale = 0 - - else: - current_scale = self._mapping.scale + value_positive = self._value_positive + value_negative = self._value_negative + sum_ = self._sum + min_ = self._min + max_ = self._max + count = self._count + zero_count = self._zero_count + scale = self._scale - self._negative = Buckets() - self._positive = Buckets() - self._start_time_unix_nano = collection_start_nano + self._value_positive = None + self._value_negative = None self._sum = 0 - self._count = 0 - self._zero_count = 0 self._min = inf self._max = -inf + self._count = 0 + self._zero_count = 0 + self._scale = None - if self._previous_scale is None or ( + if ( self._instrument_aggregation_temporality - is collection_aggregation_temporality + is AggregationTemporality.DELTA ): - self._previous_scale = current_scale - self._previous_start_time_unix_nano = ( - current_start_time_unix_nano - ) - self._previous_max = current_max - self._previous_min = current_min - self._previous_sum = current_sum - self._previous_count = current_count - self._previous_zero_count = current_zero_count - self._previous_positive = current_positive - self._previous_negative = current_negative - - current_point = ExponentialHistogramDataPoint( - attributes=self._attributes, - start_time_unix_nano=current_start_time_unix_nano, - time_unix_nano=collection_start_nano, - count=current_count, - sum=current_sum, - scale=current_scale, - zero_count=current_zero_count, - positive=BucketsPoint( - offset=current_positive.offset, - bucket_counts=current_positive.get_offset_counts(), - ), - negative=BucketsPoint( - offset=current_negative.offset, - bucket_counts=current_negative.get_offset_counts(), - ), - # FIXME: Find the right value for flags - flags=0, - min=current_min, - max=current_max, - ) - - return current_point + # This happens when the corresponding instrument for this + # aggregation is synchronous. + if ( + collection_aggregation_temporality + is AggregationTemporality.DELTA + ): - min_scale = min(self._previous_scale, current_scale) + previous_collection_start_nano = ( + self._previous_collection_start_nano + ) + self._previous_collection_start_nano = ( + collection_start_nano + ) - low_positive, high_positive = self._get_low_high_previous_current( - self._previous_positive, - current_positive, - current_scale, - min_scale, - ) - low_negative, high_negative = self._get_low_high_previous_current( - self._previous_negative, - current_negative, - current_scale, - min_scale, - ) + if value_positive is None and value_negative is None: + return None - min_scale = min( - min_scale - - self._get_scale_change(low_positive, high_positive), - min_scale - - self._get_scale_change(low_negative, high_negative), - ) + return ExponentialHistogramDataPoint( + attributes=self._attributes, + start_time_unix_nano=previous_collection_start_nano, + time_unix_nano=collection_start_nano, + count=count, + sum=sum_, + scale=scale, + zero_count=zero_count, + positive=BucketsPoint( + offset=value_positive.offset, + bucket_counts=(value_positive.get_offset_counts()), + ), + negative=BucketsPoint( + offset=value_negative.offset, + bucket_counts=(value_negative.get_offset_counts()), + ), + # FIXME: Find the right value for flags + flags=0, + min=min_, + max=max_, + ) - # FIXME Go implementation checks if the histogram (not the mapping - # but the histogram) has a count larger than zero, if not, scale - # (the histogram scale) would be zero. See exponential.go 191 - self._downscale( - self._previous_scale - min_scale, - self._previous_positive, - self._previous_negative, - ) - self._previous_scale = min_scale + # Here collection_temporality is CUMULATIVE. + # instrument_temporality is always DELTA for the time being. + # Here we need to handle the case where: + # collect is called after at least one other call to collect + # (there is data in previous buckets, a call to merge is needed + # to handle possible differences in bucket sizes). + # collect is called without another call previous call to + # collect was made (there is no previous buckets, previous, + # empty buckets that are the same scale of the current buckets + # need to be made so that they can be cumulatively aggregated + # to the current buckets). - if ( - collection_aggregation_temporality - is AggregationTemporality.CUMULATIVE - ): + if ( + value_positive is None + and self._previous_value_positive is None + ): + # This happens if collect is called for the first time + # and aggregate has not yet been called. + value_positive = Buckets() + self._previous_value_positive = value_positive.copy_empty() + if ( + value_negative is None + and self._previous_value_negative is None + ): + value_negative = Buckets() + self._previous_value_negative = value_negative.copy_empty() + if scale is None and self._previous_scale is None: + scale = self._mapping.scale + self._previous_scale = scale - start_time_unix_nano = self._previous_start_time_unix_nano - sum_ = current_sum + self._previous_sum - zero_count = current_zero_count + self._previous_zero_count - count = current_count + self._previous_count - # Only update min/max on delta -> cumulative - max_ = max(current_max, self._previous_max) - min_ = min(current_min, self._previous_min) + if ( + value_positive is not None + and self._previous_value_positive is None + ): + # This happens when collect is called the very first time + # and aggregate has been called before. + + # We need previous buckets to add them to the current ones. + # When collect is called for the first time, there are no + # previous buckets, so we need to create empty buckets to + # add them to the current ones. The addition of empty + # buckets to the current ones will result in the current + # ones unchanged. + + # The way the previous buckets are generated here is + # different from the explicit bucket histogram where + # the size and amount of the buckets does not change once + # they are instantiated. Here, the size and amount of the + # buckets can change with every call to aggregate. In order + # to get empty buckets that can be added to the current + # ones resulting in the current ones unchanged we need to + # generate empty buckets that have the same size and amount + # as the current ones, this is what copy_empty does. + self._previous_value_positive = value_positive.copy_empty() + if ( + value_negative is not None + and self._previous_value_negative is None + ): + self._previous_value_negative = value_negative.copy_empty() + if scale is not None and self._previous_scale is None: + self._previous_scale = scale - self._merge( - self._previous_positive, - current_positive, - current_scale, - min_scale, - collection_aggregation_temporality, + if ( + value_positive is None + and self._previous_value_positive is not None + ): + value_positive = self._previous_value_positive.copy_empty() + if ( + value_negative is None + and self._previous_value_negative is not None + ): + value_negative = self._previous_value_negative.copy_empty() + if scale is None and self._previous_scale is not None: + scale = self._previous_scale + + min_scale = min(self._previous_scale, scale) + + low_positive, high_positive = ( + self._get_low_high_previous_current( + self._previous_value_positive, + value_positive, + scale, + min_scale, + ) ) - self._merge( - self._previous_negative, - current_negative, - current_scale, - min_scale, - collection_aggregation_temporality, + low_negative, high_negative = ( + self._get_low_high_previous_current( + self._previous_value_negative, + value_negative, + scale, + min_scale, + ) ) - current_scale = min_scale - current_positive = self._previous_positive - current_negative = self._previous_negative + min_scale = min( + min_scale + - self._get_scale_change(low_positive, high_positive), + min_scale + - self._get_scale_change(low_negative, high_negative), + ) - else: - start_time_unix_nano = self._previous_start_time_unix_nano - sum_ = current_sum - self._previous_sum - zero_count = current_zero_count - count = current_count - max_ = current_max - min_ = current_min + self._downscale( + self._previous_scale - min_scale, + self._previous_value_positive, + self._previous_value_negative, + ) + # self._merge adds the values from value to + # self._previous_value, this is analogous to + # self._previous_value = [ + # value_element + previous_value_element + # for ( + # value_element, + # previous_value_element, + # ) in zip(value, self._previous_value) + # ] + # in _ExplicitBucketHistogramAggregation.collect. self._merge( - self._previous_positive, - current_positive, - current_scale, + self._previous_value_positive, + value_positive, + scale, min_scale, collection_aggregation_temporality, ) self._merge( - self._previous_negative, - current_negative, - current_scale, + self._previous_value_negative, + value_negative, + scale, min_scale, collection_aggregation_temporality, ) - current_point = ExponentialHistogramDataPoint( - attributes=self._attributes, - start_time_unix_nano=start_time_unix_nano, - time_unix_nano=collection_start_nano, - count=count, - sum=sum_, - scale=current_scale, - zero_count=zero_count, - positive=BucketsPoint( - offset=current_positive.offset, - bucket_counts=current_positive.get_offset_counts(), - ), - negative=BucketsPoint( - offset=current_negative.offset, - bucket_counts=current_negative.get_offset_counts(), - ), - # FIXME: Find the right value for flags - flags=0, - min=min_, - max=max_, - ) + self._previous_min = min(min_, self._previous_min) + self._previous_max = max(max_, self._previous_max) + self._previous_sum = sum_ + self._previous_sum + self._previous_count = count + self._previous_count + self._previous_zero_count = ( + zero_count + self._previous_zero_count + ) + self._previous_scale = min_scale - self._previous_scale = current_scale - self._previous_positive = current_positive - self._previous_negative = current_negative - self._previous_start_time_unix_nano = current_start_time_unix_nano - self._previous_sum = sum_ - self._previous_count = count - self._previous_max = max_ - self._previous_min = min_ - self._previous_zero_count = zero_count + return ExponentialHistogramDataPoint( + attributes=self._attributes, + start_time_unix_nano=self._start_time_unix_nano, + time_unix_nano=collection_start_nano, + count=self._previous_count, + sum=self._previous_sum, + scale=self._previous_scale, + zero_count=self._previous_zero_count, + positive=BucketsPoint( + offset=self._previous_value_positive.offset, + bucket_counts=( + self._previous_value_positive.get_offset_counts() + ), + ), + negative=BucketsPoint( + offset=self._previous_value_negative.offset, + bucket_counts=( + self._previous_value_negative.get_offset_counts() + ), + ), + # FIXME: Find the right value for flags + flags=0, + min=self._previous_min, + max=self._previous_max, + ) - return current_point + return None def _get_low_high_previous_current( self, @@ -969,6 +1003,12 @@ def _get_low_high(buckets, scale, min_scale): return buckets.index_start >> shift, buckets.index_end >> shift + @staticmethod + def _new_mapping(scale: int) -> Mapping: + if scale <= 0: + return ExponentMapping(scale) + return LogarithmMapping(scale) + def _get_scale_change(self, low, high): change = 0 @@ -1174,8 +1214,18 @@ def _create_aggregation( attributes: Attributes, start_time_unix_nano: int, ) -> _Aggregation: + + instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED + if isinstance(instrument, Synchronous): + instrument_aggregation_temporality = AggregationTemporality.DELTA + elif isinstance(instrument, Asynchronous): + instrument_aggregation_temporality = ( + AggregationTemporality.CUMULATIVE + ) + return _ExponentialBucketHistogramAggregation( attributes, + instrument_aggregation_temporality, start_time_unix_nano, max_size=self._max_size, max_scale=self._max_scale, diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py index 4dbe8f385e8..8877985c234 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py @@ -177,3 +177,17 @@ def downscale(self, amount: int) -> None: def increment_bucket(self, bucket_index: int, increment: int = 1) -> None: self._counts[bucket_index] += increment + + def copy_empty(self) -> "Buckets": + copy = Buckets() + + # pylint: disable=no-member + # pylint: disable=protected-access + # pylint: disable=attribute-defined-outside-init + # pylint: disable=invalid-name + copy._Buckets__index_base = self._Buckets__index_base + copy._Buckets__index_start = self._Buckets__index_start + copy._Buckets__index_end = self._Buckets__index_end + copy._counts = [0 for _ in self._counts] + + return copy diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index e243e09643d..85c28070c15 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -15,11 +15,12 @@ # pylint: disable=protected-access,too-many-lines,invalid-name # pylint: disable=consider-using-enumerate,no-self-use,too-many-public-methods -import random as insecure_random +from inspect import currentframe from itertools import permutations from logging import WARNING from math import ldexp -from sys import float_info +from random import Random, randrange +from sys import float_info, maxsize from types import MethodType from unittest.mock import Mock, patch @@ -73,8 +74,8 @@ def swap( ): for attribute in [ - "_positive", - "_negative", + "_value_positive", + "_value_negative", "_sum", "_count", "_zero_count", @@ -137,14 +138,18 @@ def require_equal(self, a, b): self.assertEqual(a._mapping.scale, b._mapping.scale) - self.assertEqual(len(a._positive), len(b._positive)) - self.assertEqual(len(a._negative), len(b._negative)) + self.assertEqual(len(a._value_positive), len(b._value_positive)) + self.assertEqual(len(a._value_negative), len(b._value_negative)) - for index in range(len(a._positive)): - self.assertEqual(a._positive[index], b._positive[index]) + for index in range(len(a._value_positive)): + self.assertEqual( + a._value_positive[index], b._value_positive[index] + ) - for index in range(len(a._negative)): - self.assertEqual(a._negative[index], b._negative[index]) + for index in range(len(a._value_negative)): + self.assertEqual( + a._value_negative[index], b._value_negative[index] + ) def test_alternating_growth_0(self): """ @@ -161,7 +166,9 @@ def test_alternating_growth_0(self): # agg is an instance of github.com/lightstep/otel-launcher-go/lightstep/sdk/metric/aggregator/histogram/structure.Histogram[float64] exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=4) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + ) ) exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) @@ -169,11 +176,12 @@ def test_alternating_growth_0(self): exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) self.assertEqual( - exponential_histogram_aggregation._positive.offset, -1 + exponential_histogram_aggregation._value_positive.offset, -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - get_counts(exponential_histogram_aggregation._positive), [1, 1, 1] + get_counts(exponential_histogram_aggregation._value_positive), + [1, 1, 1], ) def test_alternating_growth_1(self): @@ -185,7 +193,9 @@ def test_alternating_growth_1(self): """ exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=4) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + ) ) exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) @@ -196,11 +206,12 @@ def test_alternating_growth_1(self): exponential_histogram_aggregation.aggregate(Measurement(0.5, Mock())) self.assertEqual( - exponential_histogram_aggregation._positive.offset, -1 + exponential_histogram_aggregation._value_positive.offset, -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, -1) self.assertEqual( - get_counts(exponential_histogram_aggregation._positive), [2, 3, 1] + get_counts(exponential_histogram_aggregation._value_positive), + [2, 3, 1], ) def test_permutations(self): @@ -246,7 +257,10 @@ def test_permutations(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=2 + Mock(), + AggregationTemporality.DELTA, + Mock(), + max_size=2, ) ) @@ -261,19 +275,19 @@ def test_permutations(self): expected["scale"], ) self.assertEqual( - exponential_histogram_aggregation._positive.offset, + exponential_histogram_aggregation._value_positive.offset, expected["offset"], ) self.assertEqual( - len(exponential_histogram_aggregation._positive), + len(exponential_histogram_aggregation._value_positive), expected["len"], ) self.assertEqual( - exponential_histogram_aggregation._positive[0], + exponential_histogram_aggregation._value_positive[0], expected["at_0"], ) self.assertEqual( - exponential_histogram_aggregation._positive[1], + exponential_histogram_aggregation._value_positive[1], expected["at_1"], ) @@ -292,7 +306,10 @@ def ascending_sequence_test( exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=max_size + Mock(), + AggregationTemporality.DELTA, + Mock(), + max_size=max_size, ) ) @@ -317,7 +334,8 @@ def ascending_sequence_test( init_scale, exponential_histogram_aggregation._mapping._scale ) self.assertEqual( - offset, exponential_histogram_aggregation._positive.offset + offset, + exponential_histogram_aggregation._value_positive.offset, ) exponential_histogram_aggregation.aggregate( @@ -326,7 +344,7 @@ def ascending_sequence_test( sum_ += max_val self.assertNotEqual( - 0, exponential_histogram_aggregation._positive[0] + 0, exponential_histogram_aggregation._value_positive[0] ) # The maximum-index filled bucket is at or @@ -337,12 +355,15 @@ def ascending_sequence_test( total_count = 0 for index in range( - len(exponential_histogram_aggregation._positive) + len(exponential_histogram_aggregation._value_positive) ): - total_count += exponential_histogram_aggregation._positive[ - index - ] - if exponential_histogram_aggregation._positive[index] != 0: + total_count += ( + exponential_histogram_aggregation._value_positive[index] + ) + if ( + exponential_histogram_aggregation._value_positive[index] + != 0 + ): max_fill = index # FIXME the corresponding Go code is @@ -369,15 +390,15 @@ def ascending_sequence_test( index = mapping.map_to_index(min_val) self.assertEqual( - index, exponential_histogram_aggregation._positive.offset + index, exponential_histogram_aggregation._value_positive.offset ) index = mapping.map_to_index(max_val) self.assertEqual( index, - exponential_histogram_aggregation._positive.offset - + len(exponential_histogram_aggregation._positive) + exponential_histogram_aggregation._value_positive.offset + + len(exponential_histogram_aggregation._value_positive) - 1, ) @@ -394,7 +415,7 @@ def mock_increment(self, bucket_index: int) -> None: exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=256 + Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 ) ) @@ -405,15 +426,16 @@ def mock_increment(self, bucket_index: int) -> None: self.assertEqual(0, exponential_histogram_aggregation._sum) expect = 0 + exponential_histogram_aggregation._value_positive = Buckets() + for value in range(2, 257): expect += value * increment with patch.object( - exponential_histogram_aggregation._positive, + exponential_histogram_aggregation._value_positive, "increment_bucket", - # new=positive_mock MethodType( mock_increment, - exponential_histogram_aggregation._positive, + exponential_histogram_aggregation._value_positive, ), ): exponential_histogram_aggregation.aggregate( @@ -434,16 +456,16 @@ def mock_increment(self, bucket_index: int) -> None: self.assertEqual( 256 - ((1 << scale) - 1), - len(exponential_histogram_aggregation._positive), + len(exponential_histogram_aggregation._value_positive), ) self.assertEqual( (1 << scale) - 1, - exponential_histogram_aggregation._positive.offset, + exponential_histogram_aggregation._value_positive.offset, ) for index in range(0, 256): self.assertLessEqual( - exponential_histogram_aggregation._positive[index], + exponential_histogram_aggregation._value_positive[index], 6 * increment, ) @@ -451,12 +473,12 @@ def test_move_into(self): exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=256 + Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 ) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=256 + Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 ) ) @@ -489,36 +511,38 @@ def test_move_into(self): self.assertEqual( 256 - ((1 << scale) - 1), - len(exponential_histogram_aggregation_1._positive), + len(exponential_histogram_aggregation_1._value_positive), ) self.assertEqual( (1 << scale) - 1, - exponential_histogram_aggregation_1._positive.offset, + exponential_histogram_aggregation_1._value_positive.offset, ) for index in range(0, 256): self.assertLessEqual( - exponential_histogram_aggregation_1._positive[index], 6 + exponential_histogram_aggregation_1._value_positive[index], 6 ) def test_very_large_numbers(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=2) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=2 + ) ) def expect_balanced(count: int): self.assertEqual( - 2, len(exponential_histogram_aggregation._positive) + 2, len(exponential_histogram_aggregation._value_positive) ) self.assertEqual( - -1, exponential_histogram_aggregation._positive.offset + -1, exponential_histogram_aggregation._value_positive.offset ) self.assertEqual( - count, exponential_histogram_aggregation._positive[0] + count, exponential_histogram_aggregation._value_positive[0] ) self.assertEqual( - count, exponential_histogram_aggregation._positive[1] + count, exponential_histogram_aggregation._value_positive[1] ) exponential_histogram_aggregation.aggregate( @@ -580,7 +604,9 @@ def expect_balanced(count: int): def test_full_range(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=2) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=2 + ) ) exponential_histogram_aggregation.aggregate( @@ -602,18 +628,24 @@ def test_full_range(self): self.assertEqual( _ExponentialBucketHistogramAggregation._min_max_size, - len(exponential_histogram_aggregation._positive), + len(exponential_histogram_aggregation._value_positive), ) self.assertEqual( - -1, exponential_histogram_aggregation._positive.offset + -1, exponential_histogram_aggregation._value_positive.offset + ) + self.assertLessEqual( + exponential_histogram_aggregation._value_positive[0], 2 + ) + self.assertLessEqual( + exponential_histogram_aggregation._value_positive[1], 1 ) - self.assertLessEqual(exponential_histogram_aggregation._positive[0], 2) - self.assertLessEqual(exponential_histogram_aggregation._positive[1], 1) def test_aggregator_min_max(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) for value in [1, 3, 5, 7, 9]: @@ -625,7 +657,9 @@ def test_aggregator_min_max(self): self.assertEqual(9, exponential_histogram_aggregation._max) exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) for value in [-1, -3, -5, -7, -9]: @@ -639,21 +673,27 @@ def test_aggregator_min_max(self): def test_aggregator_copy_swap(self): exponential_histogram_aggregation_0 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) for value in [1, 3, 5, 7, 9, -1, -3, -5]: exponential_histogram_aggregation_0.aggregate( Measurement(value, Mock()) ) exponential_histogram_aggregation_1 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) for value in [5, 4, 3, 2]: exponential_histogram_aggregation_1.aggregate( Measurement(value, Mock()) ) exponential_histogram_aggregation_2 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) swap( @@ -662,8 +702,8 @@ def test_aggregator_copy_swap(self): ) # pylint: disable=unnecessary-dunder-call - exponential_histogram_aggregation_2._positive.__init__() - exponential_histogram_aggregation_2._negative.__init__() + exponential_histogram_aggregation_2._value_positive.__init__() + exponential_histogram_aggregation_2._value_negative.__init__() exponential_histogram_aggregation_2._sum = 0 exponential_histogram_aggregation_2._count = 0 exponential_histogram_aggregation_2._zero_count = 0 @@ -674,8 +714,8 @@ def test_aggregator_copy_swap(self): ) for attribute in [ - "_positive", - "_negative", + "_value_positive", + "_value_negative", "_sum", "_count", "_zero_count", @@ -697,7 +737,9 @@ def test_aggregator_copy_swap(self): def test_zero_count_by_increment(self): exponential_histogram_aggregation_0 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) increment = 10 @@ -707,10 +749,11 @@ def test_zero_count_by_increment(self): Measurement(0, Mock()) ) exponential_histogram_aggregation_1 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) - # positive_mock = Mock(wraps=exponential_histogram_aggregation_1._positive) def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket @@ -718,12 +761,14 @@ def mock_increment(self, bucket_index: int) -> None: self._counts[bucket_index] += increment + exponential_histogram_aggregation_1._value_positive = Buckets() + with patch.object( - exponential_histogram_aggregation_1._positive, + exponential_histogram_aggregation_1._value_positive, "increment_bucket", - # new=positive_mock MethodType( - mock_increment, exponential_histogram_aggregation_1._positive + mock_increment, + exponential_histogram_aggregation_1._value_positive, ), ): exponential_histogram_aggregation_1.aggregate( @@ -740,7 +785,9 @@ def mock_increment(self, bucket_index: int) -> None: def test_one_count_by_increment(self): exponential_histogram_aggregation_0 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) increment = 10 @@ -750,10 +797,11 @@ def test_one_count_by_increment(self): Measurement(1, Mock()) ) exponential_histogram_aggregation_1 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) - # positive_mock = Mock(wraps=exponential_histogram_aggregation_1._positive) def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket @@ -761,12 +809,14 @@ def mock_increment(self, bucket_index: int) -> None: self._counts[bucket_index] += increment + exponential_histogram_aggregation_1._value_positive = Buckets() + with patch.object( - exponential_histogram_aggregation_1._positive, + exponential_histogram_aggregation_1._value_positive, "increment_bucket", - # new=positive_mock MethodType( - mock_increment, exponential_histogram_aggregation_1._positive + mock_increment, + exponential_histogram_aggregation_1._value_positive, ), ): exponential_histogram_aggregation_1.aggregate( @@ -820,6 +870,7 @@ def test_min_max_size(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + AggregationTemporality.DELTA, Mock(), max_size=_ExponentialBucketHistogramAggregation._min_max_size, ) @@ -833,7 +884,7 @@ def test_min_max_size(self): # This means the smallest max_scale is enough for the full range of the # normal floating point values. self.assertEqual( - len(exponential_histogram_aggregation._positive._counts), + len(exponential_histogram_aggregation._value_positive._counts), exponential_histogram_aggregation._min_max_size, ) @@ -844,6 +895,7 @@ def test_aggregate_collect(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + AggregationTemporality.DELTA, Mock(), ) ) @@ -865,6 +917,7 @@ def test_collect_results_cumulative(self) -> None: exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + AggregationTemporality.DELTA, Mock(), ) ) @@ -949,9 +1002,13 @@ def test_collect_results_cumulative(self) -> None: self.assertEqual(collection_1.max, 8) def test_cumulative_aggregation_with_random_data(self) -> None: - histogram = _ExponentialBucketHistogramAggregation(Mock(), Mock()) + histogram = _ExponentialBucketHistogramAggregation( + Mock(), + AggregationTemporality.DELTA, + Mock(), + ) - def collect_and_validate() -> None: + def collect_and_validate(values, histogram) -> None: result: ExponentialHistogramDataPoint = histogram.collect( AggregationTemporality.CUMULATIVE, 0 ) @@ -981,20 +1038,31 @@ def collect_and_validate() -> None: assert result.zero_count == len([v for v in values if v == 0]) assert scale >= 3 - random = insecure_random.Random("opentelemetry2") + seed = randrange(maxsize) + # This test case is executed with random values every time. In order to + # run this test case with the same values used in a previous execution, + # check the value printed by that previous execution of this test case + # and use the same value for the seed variable in the line below. + # seed = 4539544373807492135 + + random_generator = Random(seed) + print(f"seed for {currentframe().f_code.co_name} is {seed}") + values = [] for i in range(2000): - value = random.randint(0, 1000) + value = random_generator.randint(0, 1000) values.append(value) histogram.aggregate(Measurement(value, Mock())) if i % 20 == 0: - collect_and_validate() + collect_and_validate(values, histogram) - collect_and_validate() + collect_and_validate(values, histogram) def test_merge_collect_cumulative(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=4) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + ) ) for value in [2, 4, 8, 16]: @@ -1003,16 +1071,21 @@ def test_merge_collect_cumulative(self): ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) - self.assertEqual(exponential_histogram_aggregation._positive.offset, 0) self.assertEqual( - exponential_histogram_aggregation._positive.counts, [1, 1, 1, 1] + exponential_histogram_aggregation._value_positive.offset, 0 + ) + self.assertEqual( + exponential_histogram_aggregation._value_positive.counts, + [1, 1, 1, 1], ) - result = exponential_histogram_aggregation.collect( + result_0 = exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0, ) + self.assertEqual(result_0.scale, 0) + for value in [1, 2, 4, 8]: exponential_histogram_aggregation.aggregate( Measurement(1 / value, Mock()) @@ -1020,10 +1093,11 @@ def test_merge_collect_cumulative(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._positive.offset, -4 + exponential_histogram_aggregation._value_positive.offset, -4 ) self.assertEqual( - exponential_histogram_aggregation._positive.counts, [1, 1, 1, 1] + exponential_histogram_aggregation._value_positive.counts, + [1, 1, 1, 1], ) result_1 = exponential_histogram_aggregation.collect( @@ -1031,12 +1105,13 @@ def test_merge_collect_cumulative(self): 0, ) - self.assertEqual(result.scale, 0) self.assertEqual(result_1.scale, -1) def test_merge_collect_delta(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=4) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + ) ) for value in [2, 4, 8, 16]: @@ -1045,9 +1120,12 @@ def test_merge_collect_delta(self): ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) - self.assertEqual(exponential_histogram_aggregation._positive.offset, 0) self.assertEqual( - exponential_histogram_aggregation._positive.counts, [1, 1, 1, 1] + exponential_histogram_aggregation._value_positive.offset, 0 + ) + self.assertEqual( + exponential_histogram_aggregation._value_positive.counts, + [1, 1, 1, 1], ) result = exponential_histogram_aggregation.collect( @@ -1062,10 +1140,11 @@ def test_merge_collect_delta(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._positive.offset, -4 + exponential_histogram_aggregation._value_positive.offset, -4 ) self.assertEqual( - exponential_histogram_aggregation._positive.counts, [1, 1, 1, 1] + exponential_histogram_aggregation._value_positive.counts, + [1, 1, 1, 1], ) result_1 = exponential_histogram_aggregation.collect( diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py new file mode 100644 index 00000000000..6574bf43357 --- /dev/null +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -0,0 +1,357 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from platform import system +from time import sleep +from unittest import TestCase + +from pytest import mark + +from opentelemetry.sdk.metrics import Histogram, MeterProvider +from opentelemetry.sdk.metrics.export import ( + AggregationTemporality, + InMemoryMetricReader, +) +from opentelemetry.sdk.metrics.view import ( + ExponentialBucketHistogramAggregation, +) + + +class TestExponentialBucketHistogramAggregation(TestCase): + + test_values = [2, 4, 1, 1, 8, 0.5, 0.1, 0.045] + + @mark.skipif( + system() == "Windows", + reason=( + "Tests fail because Windows time_ns resolution is too low so " + "two different time measurements may end up having the exact same" + "value." + ), + ) + def test_synchronous_delta_temporality(self): + """ + This test case instantiates an exponential histogram aggregation and + then uses it to record measurements and get metrics. The order in which + these actions are taken are relevant to the testing that happens here. + For this reason, the aggregation is only instantiated once, since the + reinstantiation of the aggregation would defeat the purpose of this + test case. + """ + + aggregation = ExponentialBucketHistogramAggregation() + + reader = InMemoryMetricReader( + preferred_aggregation={Histogram: aggregation}, + preferred_temporality={Histogram: AggregationTemporality.DELTA}, + ) + + provider = MeterProvider(metric_readers=[reader]) + meter = provider.get_meter("name", "version") + + histogram = meter.create_histogram("histogram") + + # The test scenario here is calling collect without calling aggregate + # ever before. + results = [] + + for _ in range(10): + + results.append(reader.get_metrics_data()) + + for metrics_data in results: + self.assertIsNone(metrics_data) + + # The test scenario here is calling aggregate then collect repeatedly. + results = [] + + for test_value in self.test_values: + histogram.record(test_value) + results.append(reader.get_metrics_data()) + + metric_data = ( + results[0] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + previous_time_unix_nano = metric_data.time_unix_nano + + self.assertEqual(metric_data.positive.bucket_counts, [1]) + self.assertEqual(metric_data.negative.bucket_counts, [0]) + + self.assertLess( + metric_data.start_time_unix_nano, + previous_time_unix_nano, + ) + self.assertEqual(metric_data.min, self.test_values[0]) + self.assertEqual(metric_data.max, self.test_values[0]) + self.assertEqual(metric_data.sum, self.test_values[0]) + + for index, metrics_data in enumerate(results[1:]): + metric_data = ( + metrics_data.resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + self.assertEqual( + previous_time_unix_nano, metric_data.start_time_unix_nano + ) + previous_time_unix_nano = metric_data.time_unix_nano + self.assertEqual(metric_data.positive.bucket_counts, [1]) + self.assertEqual(metric_data.negative.bucket_counts, [0]) + self.assertLess( + metric_data.start_time_unix_nano, metric_data.time_unix_nano + ) + self.assertEqual(metric_data.min, self.test_values[index + 1]) + self.assertEqual(metric_data.max, self.test_values[index + 1]) + # Using assertAlmostEqual here because in 3.12 resolution can cause + # these checks to fail. + self.assertAlmostEqual( + metric_data.sum, self.test_values[index + 1] + ) + + # The test scenario here is calling collect without calling aggregate + # immediately before, but having aggregate being called before at some + # moment. + results = [] + + for _ in range(10): + + results.append(reader.get_metrics_data()) + + provider.shutdown() + + for metrics_data in results: + self.assertIsNone(metrics_data) + + # The test scenario here is calling aggregate and collect, waiting for + # a certain amount of time, calling collect, then calling aggregate and + # collect again. + results = [] + + histogram.record(1) + results.append(reader.get_metrics_data()) + + sleep(0.1) + results.append(reader.get_metrics_data()) + + histogram.record(2) + results.append(reader.get_metrics_data()) + + metric_data_0 = ( + results[0] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + metric_data_2 = ( + results[2] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + self.assertIsNone(results[1]) + + self.assertGreater( + metric_data_2.start_time_unix_nano, metric_data_0.time_unix_nano + ) + + provider.shutdown() + + @mark.skipif( + system() == "Windows", + reason=( + "Tests fail because Windows time_ns resolution is too low so " + "two different time measurements may end up having the exact same" + "value." + ), + ) + def test_synchronous_cumulative_temporality(self): + + aggregation = ExponentialBucketHistogramAggregation() + + reader = InMemoryMetricReader( + preferred_aggregation={Histogram: aggregation}, + preferred_temporality={ + Histogram: AggregationTemporality.CUMULATIVE + }, + ) + + provider = MeterProvider(metric_readers=[reader]) + meter = provider.get_meter("name", "version") + + histogram = meter.create_histogram("histogram") + + results = [] + + for _ in range(10): + results.append(reader.get_metrics_data()) + + for metrics_data in results: + self.assertIsNone(metrics_data) + + results = [] + + for test_value in self.test_values: + histogram.record(test_value) + results.append(reader.get_metrics_data()) + + metric_data = ( + results[0] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + start_time_unix_nano = metric_data.start_time_unix_nano + + self.assertLess( + metric_data.start_time_unix_nano, + metric_data.time_unix_nano, + ) + self.assertEqual(metric_data.min, self.test_values[0]) + self.assertEqual(metric_data.max, self.test_values[0]) + self.assertEqual(metric_data.sum, self.test_values[0]) + + previous_time_unix_nano = metric_data.time_unix_nano + + for index, metrics_data in enumerate(results[1:]): + metric_data = ( + metrics_data.resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + self.assertEqual( + start_time_unix_nano, metric_data.start_time_unix_nano + ) + self.assertLess( + metric_data.start_time_unix_nano, + metric_data.time_unix_nano, + ) + self.assertEqual( + metric_data.min, min(self.test_values[: index + 2]) + ) + self.assertEqual( + metric_data.max, max(self.test_values[: index + 2]) + ) + self.assertAlmostEqual( + metric_data.sum, sum(self.test_values[: index + 2]) + ) + + self.assertGreater( + metric_data.time_unix_nano, previous_time_unix_nano + ) + + previous_time_unix_nano = metric_data.time_unix_nano + + self.assertEqual( + metric_data.positive.bucket_counts, + [ + 1, + *[0] * 17, + 1, + *[0] * 36, + 1, + *[0] * 15, + 2, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 40, + ], + ) + self.assertEqual(metric_data.negative.bucket_counts, [0]) + + results = [] + + for _ in range(10): + results.append(reader.get_metrics_data()) + + provider.shutdown() + + metric_data = ( + results[0] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + start_time_unix_nano = metric_data.start_time_unix_nano + + self.assertLess( + metric_data.start_time_unix_nano, + metric_data.time_unix_nano, + ) + self.assertEqual(metric_data.min, min(self.test_values)) + self.assertEqual(metric_data.max, max(self.test_values)) + self.assertAlmostEqual(metric_data.sum, sum(self.test_values)) + + previous_metric_data = metric_data + + for index, metrics_data in enumerate(results[1:]): + metric_data = ( + metrics_data.resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + self.assertEqual( + previous_metric_data.start_time_unix_nano, + metric_data.start_time_unix_nano, + ) + self.assertEqual(previous_metric_data.min, metric_data.min) + self.assertEqual(previous_metric_data.max, metric_data.max) + self.assertAlmostEqual(previous_metric_data.sum, metric_data.sum) + + self.assertEqual( + metric_data.positive.bucket_counts, + [ + 1, + *[0] * 17, + 1, + *[0] * 36, + 1, + *[0] * 15, + 2, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 40, + ], + ) + self.assertEqual(metric_data.negative.bucket_counts, [0]) + + self.assertLess( + previous_metric_data.time_unix_nano, + metric_data.time_unix_nano, + ) diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 9d61da72a04..7ea463ec8a8 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -68,7 +68,7 @@ def test_aggregate_delta(self): synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) - self.assertEqual(synchronous_sum_aggregation._current_value, 6) + self.assertEqual(synchronous_sum_aggregation._value, 6) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0 @@ -78,7 +78,7 @@ def test_aggregate_delta(self): synchronous_sum_aggregation.aggregate(measurement(-2)) synchronous_sum_aggregation.aggregate(measurement(3)) - self.assertEqual(synchronous_sum_aggregation._current_value, 2) + self.assertEqual(synchronous_sum_aggregation._value, 2) def test_aggregate_cumulative(self): """ @@ -93,7 +93,7 @@ def test_aggregate_cumulative(self): synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) - self.assertEqual(synchronous_sum_aggregation._current_value, 6) + self.assertEqual(synchronous_sum_aggregation._value, 6) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.CUMULATIVE, 0 @@ -103,7 +103,7 @@ def test_aggregate_cumulative(self): synchronous_sum_aggregation.aggregate(measurement(-2)) synchronous_sum_aggregation.aggregate(measurement(3)) - self.assertEqual(synchronous_sum_aggregation._current_value, 2) + self.assertEqual(synchronous_sum_aggregation._value, 2) def test_collect_delta(self): """ @@ -292,24 +292,16 @@ def test_aggregate(self): explicit_bucket_histogram_aggregation.aggregate(measurement(5)) # The first bucket keeps count of values between (-inf, 0] (-1 and 0) - self.assertEqual( - explicit_bucket_histogram_aggregation._current_value[0], 2 - ) + self.assertEqual(explicit_bucket_histogram_aggregation._value[0], 2) # The second bucket keeps count of values between (0, 2] (1 and 2) - self.assertEqual( - explicit_bucket_histogram_aggregation._current_value[1], 2 - ) + self.assertEqual(explicit_bucket_histogram_aggregation._value[1], 2) # The third bucket keeps count of values between (2, 4] (3 and 4) - self.assertEqual( - explicit_bucket_histogram_aggregation._current_value[2], 2 - ) + self.assertEqual(explicit_bucket_histogram_aggregation._value[2], 2) # The fourth bucket keeps count of values between (4, inf) (3 and 4) - self.assertEqual( - explicit_bucket_histogram_aggregation._current_value[3], 1 - ) + self.assertEqual(explicit_bucket_histogram_aggregation._value[3], 1) histo = explicit_bucket_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 From d4e13bdf95190314b0d21a9357f850fa2e6a4cd3 Mon Sep 17 00:00:00 2001 From: soumyadeepm04 <84105194+soumyadeepm04@users.noreply.github.com> Date: Thu, 18 Jul 2024 19:51:51 -0400 Subject: [PATCH 8/8] optional scope attribute for tracer creation (#4028) --- CHANGELOG.md | 2 ++ .../src/opentelemetry/trace/__init__.py | 15 +++++++++++- opentelemetry-api/tests/trace/test_globals.py | 6 +++-- opentelemetry-api/tests/trace/test_proxy.py | 2 ++ .../src/opentelemetry/sdk/trace/__init__.py | 2 ++ opentelemetry-sdk/tests/trace/test_trace.py | 24 +++++++++++++++++++ 6 files changed, 48 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 439a9eb9640..3d0a605580d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## Unreleased +- optional scope attribute for tracer creation + ([#4028](https://github.com/open-telemetry/opentelemetry-python/pull/4028)) - OTLP exporter is encoding invalid span/trace IDs in the logs fix ([#4006](https://github.com/open-telemetry/opentelemetry-python/pull/4006)) - Update sdk process resource detector `process.command_args` attribute to also include the executable itself diff --git a/opentelemetry-api/src/opentelemetry/trace/__init__.py b/opentelemetry-api/src/opentelemetry/trace/__init__.py index 28300f408c3..5de5a240d6f 100644 --- a/opentelemetry-api/src/opentelemetry/trace/__init__.py +++ b/opentelemetry-api/src/opentelemetry/trace/__init__.py @@ -189,6 +189,7 @@ def get_tracer( instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, ) -> "Tracer": """Returns a `Tracer` for use by the given instrumentation library. @@ -216,6 +217,7 @@ def get_tracer( ``importlib.metadata.version(instrumenting_library_name)``. schema_url: Optional. Specifies the Schema URL of the emitted telemetry. + attributes: Optional. Specifies the attributes of the emitted telemetry. """ @@ -230,6 +232,7 @@ def get_tracer( instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, ) -> "Tracer": # pylint:disable=no-self-use,unused-argument return NoOpTracer() @@ -249,17 +252,20 @@ def get_tracer( instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, ) -> "Tracer": if _TRACER_PROVIDER: return _TRACER_PROVIDER.get_tracer( instrumenting_module_name, instrumenting_library_version, schema_url, + attributes, ) return ProxyTracer( instrumenting_module_name, instrumenting_library_version, schema_url, + attributes, ) @@ -407,10 +413,12 @@ def __init__( instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, ): self._instrumenting_module_name = instrumenting_module_name self._instrumenting_library_version = instrumenting_library_version self._schema_url = schema_url + self._attributes = attributes self._real_tracer: Optional[Tracer] = None self._noop_tracer = NoOpTracer() @@ -424,6 +432,7 @@ def _tracer(self) -> Tracer: self._instrumenting_module_name, self._instrumenting_library_version, self._schema_url, + self._attributes, ) return self._real_tracer return self._noop_tracer @@ -492,6 +501,7 @@ def get_tracer( instrumenting_library_version: typing.Optional[str] = None, tracer_provider: Optional[TracerProvider] = None, schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, ) -> "Tracer": """Returns a `Tracer` for use by the given instrumentation library. @@ -503,7 +513,10 @@ def get_tracer( if tracer_provider is None: tracer_provider = get_tracer_provider() return tracer_provider.get_tracer( - instrumenting_module_name, instrumenting_library_version, schema_url + instrumenting_module_name, + instrumenting_library_version, + schema_url, + attributes, ) diff --git a/opentelemetry-api/tests/trace/test_globals.py b/opentelemetry-api/tests/trace/test_globals.py index fdb213bae93..6860f98e9e4 100644 --- a/opentelemetry-api/tests/trace/test_globals.py +++ b/opentelemetry-api/tests/trace/test_globals.py @@ -33,10 +33,12 @@ class TestGlobals(TraceGlobalsTest, unittest.TestCase): def test_get_tracer(mock_tracer_provider): # type: ignore """trace.get_tracer should proxy to the global tracer provider.""" trace.get_tracer("foo", "var") - mock_tracer_provider.get_tracer.assert_called_with("foo", "var", None) + mock_tracer_provider.get_tracer.assert_called_with( + "foo", "var", None, None + ) mock_provider = Mock() trace.get_tracer("foo", "var", mock_provider) - mock_provider.get_tracer.assert_called_with("foo", "var", None) + mock_provider.get_tracer.assert_called_with("foo", "var", None, None) class TestGlobalsConcurrency(TraceGlobalsTest, ConcurrencyTestBase): diff --git a/opentelemetry-api/tests/trace/test_proxy.py b/opentelemetry-api/tests/trace/test_proxy.py index 8c20d054913..caf847777cf 100644 --- a/opentelemetry-api/tests/trace/test_proxy.py +++ b/opentelemetry-api/tests/trace/test_proxy.py @@ -24,6 +24,7 @@ Span, ) from opentelemetry.util._decorator import _agnosticcontextmanager +from opentelemetry.util.types import Attributes class TestProvider(trace.NoOpTracerProvider): @@ -32,6 +33,7 @@ def get_tracer( instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, + attributes: typing.Optional[Attributes] = None, ) -> trace.Tracer: return TestTracer() diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py index a7094b547c9..58cbf01e08b 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py @@ -1230,6 +1230,7 @@ def get_tracer( instrumenting_module_name: str, instrumenting_library_version: typing.Optional[str] = None, schema_url: typing.Optional[str] = None, + attributes: typing.Optional[types.Attributes] = None, ) -> "trace_api.Tracer": if self._disabled: logger.warning("SDK is disabled.") @@ -1267,6 +1268,7 @@ def get_tracer( instrumenting_module_name, instrumenting_library_version, schema_url, + attributes, ), ) diff --git a/opentelemetry-sdk/tests/trace/test_trace.py b/opentelemetry-sdk/tests/trace/test_trace.py index 9f375643092..d039df51ae4 100644 --- a/opentelemetry-sdk/tests/trace/test_trace.py +++ b/opentelemetry-sdk/tests/trace/test_trace.py @@ -165,6 +165,30 @@ def test_tracer_provider_accepts_concurrent_multi_span_processor(self): span_processor, tracer_provider._active_span_processor ) + def test_get_tracer_sdk(self): + tracer_provider = trace.TracerProvider() + tracer = tracer_provider.get_tracer( + "module_name", + "library_version", + "schema_url", + {"key1": "value1", "key2": 6}, + ) + # pylint: disable=protected-access + self.assertEqual(tracer._instrumentation_scope._name, "module_name") + # pylint: disable=protected-access + self.assertEqual( + tracer._instrumentation_scope._version, "library_version" + ) + # pylint: disable=protected-access + self.assertEqual( + tracer._instrumentation_scope._schema_url, "schema_url" + ) + # pylint: disable=protected-access + self.assertEqual( + tracer._instrumentation_scope._attributes, + {"key1": "value1", "key2": 6}, + ) + @mock.patch.dict("os.environ", {OTEL_SDK_DISABLED: "true"}) def test_get_tracer_with_sdk_disabled(self): tracer_provider = trace.TracerProvider()