From 306286e382aee3b178da4a783dd1d742a68e5813 Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Fri, 5 Jan 2024 08:56:35 -0800 Subject: [PATCH 01/13] fix(proto): use camelCase for json (de)serialization. (#1462) Part of the effort of #1327 as we need json formats for assertation in integration tests. ## Changes - add configuration to serde to deserialize the json in camelCase field name - add custom (de)serialization for traceId, spanId as they are case-insensitive hex encoded string(see [here](https://opentelemetry.io/docs/specs/otlp/#json-protobuf-encoding)) - add custom (de)serialization for `KeyValue` - add tests for above, and a test using example json files ## Merge requirement checklist * [ x] [CONTRIBUTING](https://github.com/open-telemetry/opentelemetry-rust/blob/main/CONTRIBUTING.md) guidelines followed * [x] Unit tests added/updated (if applicable) * [x] Appropriate `CHANGELOG.md` files updated for non-trivial, user-facing changes * [] Changes in public API reviewed (if applicable) --- opentelemetry-proto/Cargo.toml | 9 +- opentelemetry-proto/src/proto.rs | 74 +++++++++ .../opentelemetry.proto.collector.logs.v1.rs | 3 + ...pentelemetry.proto.collector.metrics.v1.rs | 3 + .../opentelemetry.proto.collector.trace.v1.rs | 3 + .../tonic/opentelemetry.proto.common.v1.rs | 14 ++ .../tonic/opentelemetry.proto.logs.v1.rs | 6 + .../tonic/opentelemetry.proto.metrics.v1.rs | 21 +++ .../tonic/opentelemetry.proto.resource.v1.rs | 2 + .../tonic/opentelemetry.proto.trace.v1.rs | 34 +++++ .../tonic/opentelemetry.proto.tracez.v1.rs | 4 + opentelemetry-proto/tests/grpc_build.rs | 42 +++++- opentelemetry-proto/tests/json_deserialize.rs | 142 ++++++++++++++++++ 13 files changed, 355 insertions(+), 2 deletions(-) create mode 100644 opentelemetry-proto/tests/json_deserialize.rs diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index d9192982ce0..e3c11e81dcc 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -23,6 +23,11 @@ doctest = false name = "grpc_build" path = "tests/grpc_build.rs" +[[test]] +name = "json_deserialize" +path = "tests/json_deserialize.rs" + + [features] default = [] @@ -42,7 +47,7 @@ zpages = ["trace"] # add ons with-schemars = ["schemars"] -with-serde = ["serde"] +with-serde = ["serde", "hex"] [dependencies] grpcio = { workspace = true, optional = true, features = ["prost-codec"] } @@ -52,9 +57,11 @@ opentelemetry = { version = "0.21", default-features = false, path = "../opentel opentelemetry_sdk = { version = "0.21", default-features = false, path = "../opentelemetry-sdk" } schemars = { version = "0.8", optional = true } serde = { workspace = true, optional = true, features = ["serde_derive"] } +hex = { version = "0.4.3", optional = true } [dev-dependencies] grpcio-compiler = { version = "0.12.1", default-features = false, features = ["prost-codec"] } tonic-build = { version = "0.9.0" } prost-build = { version = "0.11.1" } tempfile = "3.3.0" +serde_json = "1.0" \ No newline at end of file diff --git a/opentelemetry-proto/src/proto.rs b/opentelemetry-proto/src/proto.rs index 419302a01f4..ea5396dba3f 100644 --- a/opentelemetry-proto/src/proto.rs +++ b/opentelemetry-proto/src/proto.rs @@ -1,3 +1,77 @@ +/// provide serde support for proto traceIds and spanIds. +/// Those are hex encoded strings in the jsons but they are byte arrays in the proto. +/// See https://opentelemetry.io/docs/specs/otlp/#json-protobuf-encoding for more details +#[cfg(all(feature = "with-serde", feature = "gen-tonic-messages"))] +pub(crate) mod serializers { + use crate::tonic::common::v1::any_value::Value; + use crate::tonic::common::v1::AnyValue; + use serde::de::{self, MapAccess, Visitor}; + use serde::ser::SerializeStruct; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use std::fmt; + + // hex string <-> bytes conversion + + pub fn serialize_to_hex_string(bytes: &[u8], serializer: S) -> Result + where + S: Serializer, + { + let hex_string = hex::encode(bytes); + serializer.serialize_str(&hex_string) + } + + pub fn deserialize_from_hex_string<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + struct BytesVisitor; + + impl<'de> Visitor<'de> for BytesVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string representing hex-encoded bytes") + } + + fn visit_str(self, value: &str) -> Result, E> + where + E: de::Error, + { + hex::decode(value).map_err(E::custom) + } + } + + deserializer.deserialize_str(BytesVisitor) + } + + // AnyValue <-> KeyValue conversion + pub fn serialize_to_value(value: &Option, serializer: S) -> Result + where + S: Serializer, + { + // Serialize any_value::Value using its own implementation + // If value is None, it will be serialized as such + match value { + Some(value) => value.value.serialize(serializer), + None => serializer.serialize_none(), + } + + } + + pub fn deserialize_from_value<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + // Deserialize any_value::Value using its own implementation + let value = Option::::deserialize(deserializer)?; + + // Wrap the deserialized value in AnyValue + Ok(Some(AnyValue { value })) + } + + +} + #[cfg(feature = "gen-tonic-messages")] #[path = "proto/tonic"] /// Generated files using [`tonic`](https://docs.rs/crate/tonic) and [`prost`](https://docs.rs/crate/prost) diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs index bd8e8e0c426..464656dd64b 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.logs.v1.rs @@ -1,5 +1,6 @@ #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportLogsServiceRequest { @@ -15,6 +16,7 @@ pub struct ExportLogsServiceRequest { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportLogsServiceResponse { @@ -38,6 +40,7 @@ pub struct ExportLogsServiceResponse { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportLogsPartialSuccess { diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs index c230e2fc966..af2bd67b3d6 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.metrics.v1.rs @@ -1,5 +1,6 @@ #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportMetricsServiceRequest { @@ -15,6 +16,7 @@ pub struct ExportMetricsServiceRequest { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportMetricsServiceResponse { @@ -38,6 +40,7 @@ pub struct ExportMetricsServiceResponse { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportMetricsPartialSuccess { diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs index 118cf7d8c67..93bb8f238d4 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.collector.trace.v1.rs @@ -1,5 +1,6 @@ #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportTraceServiceRequest { @@ -15,6 +16,7 @@ pub struct ExportTraceServiceRequest { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportTraceServiceResponse { @@ -38,6 +40,7 @@ pub struct ExportTraceServiceResponse { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportTracePartialSuccess { diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs index 95ee152f135..097f2b055d4 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.common.v1.rs @@ -3,6 +3,7 @@ /// object containing arrays, key-value lists and primitives. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnyValue { @@ -17,6 +18,7 @@ pub mod any_value { /// in which case this AnyValue is considered to be "empty". #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { @@ -40,6 +42,7 @@ pub mod any_value { /// since oneof in AnyValue does not allow repeated fields. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ArrayValue { @@ -54,6 +57,7 @@ pub struct ArrayValue { /// are semantically equivalent. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct KeyValueList { @@ -68,18 +72,28 @@ pub struct KeyValueList { /// attributes, etc. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct KeyValue { #[prost(string, tag = "1")] pub key: ::prost::alloc::string::String, #[prost(message, optional, tag = "2")] + #[cfg_attr( + feature = "with-serde", + serde( + serialize_with = "crate::proto::serializers::serialize_to_value", + deserialize_with = "crate::proto::serializers::deserialize_from_value" + ) + )] pub value: ::core::option::Option, } /// InstrumentationScope is a message representing the instrumentation scope information /// such as the fully qualified name and version. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] +#[cfg_attr(feature = "with-serde", serde(default))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct InstrumentationScope { diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs index 8edf2be3f96..f35572d1643 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs @@ -10,6 +10,7 @@ /// as well. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LogsData { @@ -24,6 +25,7 @@ pub struct LogsData { /// A collection of ScopeLogs from a Resource. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ResourceLogs { @@ -42,6 +44,7 @@ pub struct ResourceLogs { /// A collection of Logs produced by a Scope. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScopeLogs { @@ -61,6 +64,7 @@ pub struct ScopeLogs { /// #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LogRecord { @@ -144,6 +148,7 @@ pub struct LogRecord { /// Possible values for LogRecord.SeverityNumber. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum SeverityNumber { @@ -248,6 +253,7 @@ impl SeverityNumber { /// #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum LogRecordFlags { diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs index 6322b594b01..8f3675479bb 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs @@ -10,6 +10,7 @@ /// as well. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct MetricsData { @@ -24,6 +25,7 @@ pub struct MetricsData { /// A collection of ScopeMetrics from a Resource. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ResourceMetrics { @@ -42,6 +44,7 @@ pub struct ResourceMetrics { /// A collection of Metrics produced by an Scope. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScopeMetrics { @@ -144,6 +147,7 @@ pub struct ScopeMetrics { /// strongly encouraged. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Metric { @@ -170,6 +174,7 @@ pub mod metric { /// the time interval over which they are reported. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Data { @@ -196,6 +201,7 @@ pub mod metric { /// "StartTimeUnixNano" is ignored for all data points. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Gauge { @@ -206,6 +212,7 @@ pub struct Gauge { /// reported measurements over a time interval. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Sum { @@ -223,6 +230,7 @@ pub struct Sum { /// as a Histogram of all reported measurements over a time interval. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Histogram { @@ -237,6 +245,7 @@ pub struct Histogram { /// as a ExponentialHistogram of all reported double measurements over a time interval. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExponentialHistogram { @@ -255,6 +264,7 @@ pub struct ExponentialHistogram { /// recommended for new applications. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Summary { @@ -265,6 +275,7 @@ pub struct Summary { /// time-varying scalar value of a metric. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct NumberDataPoint { @@ -306,6 +317,7 @@ pub mod number_data_point { /// value fields is not present inside this oneof. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { @@ -327,6 +339,7 @@ pub mod number_data_point { /// "sum" are known. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct HistogramDataPoint { @@ -410,6 +423,7 @@ pub struct HistogramDataPoint { /// #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExponentialHistogramDataPoint { @@ -509,6 +523,7 @@ pub mod exponential_histogram_data_point { /// of counts. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Buckets { @@ -534,6 +549,7 @@ pub mod exponential_histogram_data_point { /// time-varying values of a Summary metric. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct SummaryDataPoint { @@ -590,6 +606,7 @@ pub mod summary_data_point { /// #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ValueAtQuantile { @@ -610,6 +627,7 @@ pub mod summary_data_point { /// exemplar was recorded. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Exemplar { @@ -649,6 +667,7 @@ pub mod exemplar { /// inside this oneof. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { @@ -663,6 +682,7 @@ pub mod exemplar { /// which they are aggregated. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum AggregationTemporality { @@ -761,6 +781,7 @@ impl AggregationTemporality { /// #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum DataPointFlags { diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs index 6ce1b1ed250..2623aeee996 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.resource.v1.rs @@ -1,6 +1,8 @@ /// Resource information. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] +#[cfg_attr(feature = "with-serde", serde(default))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Resource { diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs index 67314a02118..c119fc031c5 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs @@ -10,6 +10,7 @@ /// as well. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TracesData { @@ -24,6 +25,8 @@ pub struct TracesData { /// A collection of ScopeSpans from a Resource. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] +#[cfg_attr(feature = "with-serde", serde(default))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ResourceSpans { @@ -42,6 +45,8 @@ pub struct ResourceSpans { /// A collection of Spans produced by an InstrumentationScope. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] +#[cfg_attr(feature = "with-serde", serde(default))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ScopeSpans { @@ -62,6 +67,8 @@ pub struct ScopeSpans { /// The next available field id is 17. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] +#[cfg_attr(feature = "with-serde", serde(default))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Span { @@ -72,6 +79,13 @@ pub struct Span { /// /// This field is required. #[prost(bytes = "vec", tag = "1")] + #[cfg_attr( + feature = "with-serde", + serde( + serialize_with = "crate::proto::serializers::serialize_to_hex_string", + deserialize_with = "crate::proto::serializers::deserialize_from_hex_string" + ) + )] pub trace_id: ::prost::alloc::vec::Vec, /// A unique identifier for a span within a trace, assigned when the span /// is created. The ID is an 8-byte array. An ID with all zeroes OR of length @@ -80,6 +94,13 @@ pub struct Span { /// /// This field is required. #[prost(bytes = "vec", tag = "2")] + #[cfg_attr( + feature = "with-serde", + serde( + serialize_with = "crate::proto::serializers::serialize_to_hex_string", + deserialize_with = "crate::proto::serializers::deserialize_from_hex_string" + ) + )] pub span_id: ::prost::alloc::vec::Vec, /// trace_state conveys information about request position in multiple distributed tracing graphs. /// It is a trace_state in w3c-trace-context format: @@ -89,6 +110,13 @@ pub struct Span { /// The `span_id` of this span's parent span. If this is a root span, then this /// field must be empty. The ID is an 8-byte array. #[prost(bytes = "vec", tag = "4")] + #[cfg_attr( + feature = "with-serde", + serde( + serialize_with = "crate::proto::serializers::serialize_to_hex_string", + deserialize_with = "crate::proto::serializers::deserialize_from_hex_string" + ) + )] pub parent_span_id: ::prost::alloc::vec::Vec, /// A description of the span's operation. /// @@ -169,6 +197,7 @@ pub mod span { /// text description and key-value pairs. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Event { @@ -197,6 +226,8 @@ pub mod span { /// traces or when the handler receives a request from a different project. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] + #[cfg_attr(feature = "with-serde", serde(default))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Link { @@ -226,6 +257,7 @@ pub mod span { /// in addition to a parent/child relationship. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[derive( Clone, Copy, @@ -293,6 +325,7 @@ pub mod span { /// programming environments, including REST APIs and RPC APIs. #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Status { @@ -309,6 +342,7 @@ pub mod status { /// #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[derive( Clone, Copy, diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.tracez.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.tracez.v1.rs index 88989fcde28..0c07f0b9252 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.tracez.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.tracez.v1.rs @@ -1,5 +1,6 @@ #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TracezCounts { @@ -15,6 +16,7 @@ pub struct TracezCounts { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LatencyData { @@ -40,6 +42,7 @@ pub struct LatencyData { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct RunningData { @@ -63,6 +66,7 @@ pub struct RunningData { } #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ErrorData { diff --git a/opentelemetry-proto/tests/grpc_build.rs b/opentelemetry-proto/tests/grpc_build.rs index 4ba0a3c64d4..e1ba5fcb0c1 100644 --- a/opentelemetry-proto/tests/grpc_build.rs +++ b/opentelemetry-proto/tests/grpc_build.rs @@ -54,7 +54,7 @@ fn build_tonic() { let out_dir = TempDir::new().expect("failed to create temp dir to store the generated files"); // build the generated files into OUT_DIR for now so we don't have to touch the src unless we have to - tonic_build::configure() + let mut builder = tonic_build::configure() .build_server(true) .build_client(true) .server_mod_attribute(".", "#[cfg(feature = \"gen-tonic\")]") @@ -67,6 +67,46 @@ fn build_tonic() { ".", "#[cfg_attr(feature = \"with-serde\", derive(serde::Serialize, serde::Deserialize))]", ) + .type_attribute( + ".", + "#[cfg_attr(feature = \"with-serde\", serde(rename_all = \"camelCase\"))]", + ); + + // optional numeric and String field need to default it to 0 otherwise JSON files without those field + // cannot deserialize + // we cannot add serde(default) to all generated types because enums cannot be annotated with serde(default) + for path in [ + "trace.v1.Span", + "trace.v1.Span.Link", + "trace.v1.ScopeSpans", + "trace.v1.ResourceSpans", + "common.v1.InstrumentationScope", + "resource.v1.Resource", + ] { + builder = builder.type_attribute( + path, + "#[cfg_attr(feature = \"with-serde\", serde(default))]", + ) + } + + // special serializer and deserializer for traceId and spanId + // OTLP/JSON format uses hex string for traceId and spanId + // the proto file uses bytes for traceId and spanId + // Thus, special serializer and deserializer are needed + for path in [ + "trace.v1.Span.trace_id", + "trace.v1.Span.span_id", + "trace.v1.Span.parent_span_id", + ] { + builder = builder + .field_attribute(path, "#[cfg_attr(feature = \"with-serde\", serde(serialize_with = \"crate::proto::serializers::serialize_to_hex_string\", deserialize_with = \"crate::proto::serializers::deserialize_from_hex_string\"))]") + } + + // add custom serializer and deserializer for AnyValue + builder = builder + .field_attribute("common.v1.KeyValue.value", "#[cfg_attr(feature =\"with-serde\", serde(serialize_with = \"crate::proto::serializers::serialize_to_value\", deserialize_with = \"crate::proto::serializers::deserialize_from_value\"))]"); + + builder .out_dir(out_dir.path()) .compile(TONIC_PROTO_FILES, TONIC_INCLUDES) .expect("cannot compile protobuf using tonic"); diff --git a/opentelemetry-proto/tests/json_deserialize.rs b/opentelemetry-proto/tests/json_deserialize.rs new file mode 100644 index 00000000000..8a463fb4291 --- /dev/null +++ b/opentelemetry-proto/tests/json_deserialize.rs @@ -0,0 +1,142 @@ +#[cfg(all(feature = "with-serde", feature = "gen-tonic-messages"))] +mod json_deserialize { + use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; + use opentelemetry_proto::tonic::common::v1::any_value::Value; + use opentelemetry_proto::tonic::common::v1::KeyValue; + + // copied from example json file + // see https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/examples/trace.json + const TRACES_JSON: &str = r#" + { + "resourceSpans": [ + { + "resource": { + "attributes": [ + { + "key": "service.name", + "value": { + "stringValue": "my.service" + } + } + ] + }, + "scopeSpans": [ + { + "scope": { + "name": "my.library", + "version": "1.0.0", + "attributes": [ + { + "key": "my.scope.attribute", + "value": { + "stringValue": "some scope attribute" + } + } + ] + }, + "spans": [ + { + "traceId": "5B8EFFF798038103D269B633813FC60C", + "spanId": "EEE19B7EC3C1B174", + "parentSpanId": "EEE19B7EC3C1B173", + "name": "I'm a server span", + "startTimeUnixNano": 1544712660000000000, + "endTimeUnixNano": 1544712661000000000, + "kind": 2, + "attributes": [ + { + "key": "my.span.attr", + "value": { + "stringValue": "some value" + } + } + ] + } + ] + } + ] + } + ] +} + "#; + + const KEY_VALUES_JSON: &str = r#" + { + "key": "service.name", + "value": { + "stringValue": "my.service" + } + } + "#; + + #[test] + fn test_deserialize_traces() { + let request: ExportTraceServiceRequest = serde_json::from_str(TRACES_JSON).unwrap(); + assert_eq!( + request.resource_spans[0].scope_spans[0].spans[0].trace_id, + hex::decode("5B8EFFF798038103D269B633813FC60C").unwrap() + ) + } + + #[test] + fn test_deserialize_values() { + // strings + { + let value: Value = serde_json::from_str( + r#" + { + "stringValue": "my.service" + } + "#, + ) + .unwrap(); + assert_eq!(value, Value::StringValue("my.service".to_string())); + } + // bools + { + let value: Value = serde_json::from_str( + r#" + { + "boolValue": true + } + "#, + ) + .unwrap(); + assert_eq!(value, Value::BoolValue(true)); + } + // ints + { + let value: Value = serde_json::from_str( + r#" + { + "intValue": 123 + }"#, + ) + .unwrap(); + assert_eq!(value, Value::IntValue(123)); + } + // doubles + { + let value: Value = serde_json::from_str( + r#" + { + "doubleValue": 123.456 + }"#, + ) + .unwrap(); + assert_eq!(value, Value::DoubleValue(123.456)); + } + // todo(zhongyang): add tests for arrays and objects(need an example from other language) + } + + #[test] + fn test_deserialize_key_values() { + let keyvalue: KeyValue = serde_json::from_str(KEY_VALUES_JSON).unwrap(); + + assert_eq!(keyvalue.key, "service.name".to_string()); + assert_eq!( + keyvalue.value.unwrap().value.unwrap(), + Value::StringValue("my.service".to_string()) + ); + } +} From 27d338dd954ef7681242ead47a8a61c964dcc428 Mon Sep 17 00:00:00 2001 From: Olivier Soucy Date: Sun, 7 Jan 2024 23:43:42 -0500 Subject: [PATCH 02/13] Add new benches for logs (#1450) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement new bench ideas from this [PR](https://github.com/open-telemetry/opentelemetry-rust/pull/1431)’s comments. Please provide a brief description of the changes here. ## Merge requirement checklist * [ ] [CONTRIBUTING](https://github.com/open-telemetry/opentelemetry-rust/blob/main/CONTRIBUTING.md) guidelines followed * [ ] Unit tests added/updated (if applicable) * [ ] Appropriate `CHANGELOG.md` files updated for non-trivial, user-facing changes * [ ] Changes in public API reviewed (if applicable) Co-authored-by: Lalit Kumar Bhasin --- opentelemetry-sdk/benches/log.rs | 168 ++++++++++++++++++++++++++++++- 1 file changed, 163 insertions(+), 5 deletions(-) diff --git a/opentelemetry-sdk/benches/log.rs b/opentelemetry-sdk/benches/log.rs index 0bdeb01dedc..9b66ad0447c 100644 --- a/opentelemetry-sdk/benches/log.rs +++ b/opentelemetry-sdk/benches/log.rs @@ -1,11 +1,13 @@ +use std::collections::HashMap; use std::time::SystemTime; use async_trait::async_trait; use criterion::{criterion_group, criterion_main, Criterion}; -use opentelemetry::logs::{LogRecord, LogResult, Logger, LoggerProvider as _, Severity}; +use opentelemetry::logs::{AnyValue, LogRecord, LogResult, Logger, LoggerProvider as _, Severity}; use opentelemetry::trace::Tracer; use opentelemetry::trace::TracerProvider as _; +use opentelemetry::Key; use opentelemetry_sdk::export::logs::{LogData, LogExporter}; use opentelemetry_sdk::logs::LoggerProvider; use opentelemetry_sdk::trace::{config, Sampler, TracerProvider}; @@ -60,6 +62,129 @@ fn criterion_benchmark(c: &mut Criterion) { logger.emit(LogRecord::builder().with_body("simple log").build()) }); + log_benchmark_group(c, "simple-log-with-int", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testint", 2) + .build(), + ) + }); + + log_benchmark_group(c, "simple-log-with-double", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testdouble", 2.2) + .build(), + ) + }); + + log_benchmark_group(c, "simple-log-with-string", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("teststring", "test") + .build(), + ) + }); + + log_benchmark_group(c, "simple-log-with-bool", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testbool", AnyValue::Boolean(true)) + .build(), + ) + }); + + let bytes = AnyValue::Bytes(vec![25u8, 30u8, 40u8]); + log_benchmark_group(c, "simple-log-with-bytes", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testbytes", bytes.clone()) + .build(), + ) + }); + + let bytes = AnyValue::Bytes(vec![ + 25u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, + 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, + 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, + 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, + 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, + 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, 30u8, 40u8, + ]); + log_benchmark_group(c, "simple-log-with-a-lot-of-bytes", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testbytes", bytes.clone()) + .build(), + ) + }); + + let vec_any_values = AnyValue::ListAny(vec![AnyValue::Int(25), "test".into(), true.into()]); + log_benchmark_group(c, "simple-log-with-vec-any-value", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testvec", vec_any_values.clone()) + .build(), + ) + }); + + let vec_any_values = AnyValue::ListAny(vec![AnyValue::Int(25), "test".into(), true.into()]); + let vec_any_values = AnyValue::ListAny(vec![ + AnyValue::Int(25), + "test".into(), + true.into(), + vec_any_values, + ]); + log_benchmark_group(c, "simple-log-with-inner-vec-any-value", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testvec", vec_any_values.clone()) + .build(), + ) + }); + + let map_any_values = AnyValue::Map(HashMap::from([ + ("testint".into(), 2.into()), + ("testdouble".into(), 2.2.into()), + ("teststring".into(), "test".into()), + ])); + log_benchmark_group(c, "simple-log-with-map-any-value", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testmap", map_any_values.clone()) + .build(), + ) + }); + + let map_any_values = AnyValue::Map(HashMap::from([ + ("testint".into(), 2.into()), + ("testdouble".into(), 2.2.into()), + ("teststring".into(), "test".into()), + ])); + let map_any_values = AnyValue::Map(HashMap::from([ + ("testint".into(), 2.into()), + ("testdouble".into(), 2.2.into()), + ("teststring".into(), "test".into()), + ("testmap".into(), map_any_values), + ])); + log_benchmark_group(c, "simple-log-with-inner-map-any-value", |logger| { + logger.emit( + LogRecord::builder() + .with_body("simple log") + .with_attribute("testmap", map_any_values.clone()) + .build(), + ) + }); + log_benchmark_group(c, "long-log", |logger| { logger.emit(LogRecord::builder().with_body("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Gravida in fermentum et sollicitudin ac orci phasellus. Ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at augue. Magna etiam tempor orci eu. Sed tempus urna et pharetra pharetra massa.").build()) }); @@ -105,14 +230,47 @@ fn criterion_benchmark(c: &mut Criterion) { .with_attribute("event.id", 20) .with_attribute("user.name", "otel") .with_attribute("user.email", "otel@opentelemetry.io") - .with_attribute("log.source.file.name", "log.rs") - .with_attribute("log.source.file.path", "opentelemetry_sdk/benches/log.rs") - .with_attribute("log.source.file.line", 96) - .with_attribute("log.module.path", "opentelemetry_sdk::benches::log") + .with_attribute("code.filename", "log.rs") + .with_attribute("code.filepath", "opentelemetry_sdk/benches/log.rs") + .with_attribute("code.lineno", 96) + .with_attribute("code.namespace", "opentelemetry_sdk::benches::log") .with_attribute("log.target", "opentelemetry_sdk::benches::log") .build(), ) }); + + let attributes: Vec<(Key, AnyValue)> = vec![ + ("name".into(), "my-event-name".into()), + ("event-id".into(), 20.into()), + ("user.name".into(), "otel".into()), + ("user.email".into(), "otel@opentelemetry.io".into()), + ("code.filename".into(), "log.rs".into()), + ( + "code.filepath".into(), + "opentelemetry_sdk/benches/log.rs".into(), + ), + ("code.lineno".into(), 96.into()), + ( + "code.namespace".into(), + "opentelemetry_sdk::benches::log".into(), + ), + ( + "log.target".into(), + "opentelemetry_sdk::benches::log".into(), + ), + ]; + log_benchmark_group(c, "full-log-with-attributes", |logger| { + logger.emit( + LogRecord::builder() + .with_body("full log") + .with_timestamp(now) + .with_observed_timestamp(now) + .with_severity_number(Severity::Warn) + .with_severity_text(Severity::Warn.name()) + .with_attributes(attributes.clone()) + .build(), + ) + }); } criterion_group!(benches, criterion_benchmark); From 67e6a71d8ca5ee487ccd374ed7dafe480df9547d Mon Sep 17 00:00:00 2001 From: Cosmin Lazar Date: Mon, 22 Jan 2024 18:12:32 +0100 Subject: [PATCH 03/13] Expose log batchconfig (#1471) --- opentelemetry-otlp/src/logs.rs | 21 +- opentelemetry-sdk/CHANGELOG.md | 1 + opentelemetry-sdk/src/logs/log_processor.rs | 308 +++++++++++++++++++- 3 files changed, 321 insertions(+), 9 deletions(-) diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index 21f8fbb0ed8..bb69b062382 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -42,6 +42,7 @@ impl OtlpPipeline { OtlpLogPipeline { log_config: None, exporter_builder: NoExporterConfig(()), + batch_config: None, } } } @@ -124,6 +125,7 @@ impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { pub struct OtlpLogPipeline { exporter_builder: EB, log_config: Option, + batch_config: Option, } impl OtlpLogPipeline { @@ -132,6 +134,12 @@ impl OtlpLogPipeline { self.log_config = Some(log_config); self } + + /// Set the batch log processor configuration, and it will override the env vars. + pub fn with_batch_config(mut self, batch_config: opentelemetry_sdk::logs::BatchConfig) -> Self { + self.batch_config = Some(batch_config); + self + } } impl OtlpLogPipeline { @@ -143,6 +151,7 @@ impl OtlpLogPipeline { OtlpLogPipeline { exporter_builder: pipeline.into(), log_config: self.log_config, + batch_config: self.batch_config, } } } @@ -160,7 +169,7 @@ impl OtlpLogPipeline { )) } - /// Install the configured log exporter and a batch span processor using the + /// Install the configured log exporter and a batch log processor using the /// specified runtime. /// /// Returns a [`Logger`] with the name `opentelemetry-otlp` and the current crate version. @@ -174,6 +183,7 @@ impl OtlpLogPipeline { self.exporter_builder.build_log_exporter()?, self.log_config, runtime, + self.batch_config, )) } } @@ -202,9 +212,14 @@ fn build_batch_with_exporter( exporter: LogExporter, log_config: Option, runtime: R, + batch_config: Option, ) -> opentelemetry_sdk::logs::Logger { - let mut provider_builder = - opentelemetry_sdk::logs::LoggerProvider::builder().with_batch_exporter(exporter, runtime); + let mut provider_builder = opentelemetry_sdk::logs::LoggerProvider::builder(); + let batch_processor = opentelemetry_sdk::logs::BatchLogProcessor::builder(exporter, runtime) + .with_batch_config(batch_config.unwrap_or_default()) + .build(); + provider_builder = provider_builder.with_log_processor(batch_processor); + if let Some(config) = log_config { provider_builder = provider_builder.with_config(config); } diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 363e71e5d76..a68a812d9c1 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -5,6 +5,7 @@ ### Added - [#1410](https://github.com/open-telemetry/opentelemetry-rust/pull/1410) Add experimental synchronous gauge +- [#1471](https://github.com/open-telemetry/opentelemetry-rust/pull/1471) Configure batch log record processor via [`OTEL_BLRP_*`](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#batch-logrecord-processor) environment variables and via `OtlpLogPipeline::with_batch_config` ### Changed diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index cc3ffd5e13a..f62e68413b4 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -13,12 +13,30 @@ use opentelemetry::{ global, logs::{LogError, LogResult}, }; -use std::sync::Mutex; +use std::{env, sync::Mutex}; use std::{ fmt::{self, Debug, Formatter}, + str::FromStr, time::Duration, }; +/// Delay interval between two consecutive exports. +const OTEL_BLRP_SCHEDULE_DELAY: &str = "OTEL_BLRP_SCHEDULE_DELAY"; +/// Default delay interval between two consecutive exports. +const OTEL_BLRP_SCHEDULE_DELAY_DEFAULT: u64 = 1_000; +/// Maximum allowed time to export data. +const OTEL_BLRP_EXPORT_TIMEOUT: &str = "OTEL_BLRP_EXPORT_TIMEOUT"; +/// Default maximum allowed time to export data. +const OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT: u64 = 30_000; +/// Maximum queue size. +const OTEL_BLRP_MAX_QUEUE_SIZE: &str = "OTEL_BLRP_MAX_QUEUE_SIZE"; +/// Default maximum queue size. +const OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT: usize = 2_048; +/// Maximum batch size, must be less than or equal to OTEL_BLRP_MAX_QUEUE_SIZE. +const OTEL_BLRP_MAX_EXPORT_BATCH_SIZE: &str = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"; +/// Default maximum batch size. +const OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT: usize = 512; + /// The interface for plugging into a [`Logger`]. /// /// [`Logger`]: crate::logs::Logger @@ -281,12 +299,87 @@ pub struct BatchConfig { impl Default for BatchConfig { fn default() -> Self { - BatchConfig { - max_queue_size: 2_048, - scheduled_delay: Duration::from_millis(1_000), - max_export_batch_size: 512, - max_export_timeout: Duration::from_millis(30_000), + let mut config = BatchConfig { + max_queue_size: OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, + scheduled_delay: Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT), + max_export_batch_size: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, + max_export_timeout: Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT), + }; + + if let Some(max_queue_size) = env::var(OTEL_BLRP_MAX_QUEUE_SIZE) + .ok() + .and_then(|queue_size| usize::from_str(&queue_size).ok()) + { + config.max_queue_size = max_queue_size; + } + + if let Some(max_export_batch_size) = env::var(OTEL_BLRP_MAX_EXPORT_BATCH_SIZE) + .ok() + .and_then(|batch_size| usize::from_str(&batch_size).ok()) + { + config.max_export_batch_size = max_export_batch_size; + } + + // max export batch size must be less or equal to max queue size. + // we set max export batch size to max queue size if it's larger than max queue size. + if config.max_export_batch_size > config.max_queue_size { + config.max_export_batch_size = config.max_queue_size; } + + if let Some(scheduled_delay) = env::var(OTEL_BLRP_SCHEDULE_DELAY) + .ok() + .or_else(|| env::var("OTEL_BLRP_SCHEDULE_DELAY_MILLIS").ok()) + .and_then(|delay| u64::from_str(&delay).ok()) + { + config.scheduled_delay = Duration::from_millis(scheduled_delay); + } + + if let Some(max_export_timeout) = env::var(OTEL_BLRP_EXPORT_TIMEOUT) + .ok() + .or_else(|| env::var("OTEL_BLRP_EXPORT_TIMEOUT_MILLIS").ok()) + .and_then(|s| u64::from_str(&s).ok()) + { + config.max_export_timeout = Duration::from_millis(max_export_timeout); + } + + config + } +} + +impl BatchConfig { + /// Set max_queue_size for [`BatchConfig`]. + /// It's the maximum queue size to buffer logs for delayed processing. + /// If the queue gets full it will drop the logs. + /// The default value of is 2048. + pub fn with_max_queue_size(mut self, max_queue_size: usize) -> Self { + self.max_queue_size = max_queue_size; + self + } + + /// Set scheduled_delay for [`BatchConfig`]. + /// It's the delay interval in milliseconds between two consecutive processing of batches. + /// The default value is 1000 milliseconds. + pub fn with_scheduled_delay(mut self, scheduled_delay: Duration) -> Self { + self.scheduled_delay = scheduled_delay; + self + } + + /// Set max_export_timeout for [`BatchConfig`]. + /// It's the maximum duration to export a batch of data. + /// The default value is 30000 milliseconds. + pub fn with_max_export_timeout(mut self, max_export_timeout: Duration) -> Self { + self.max_export_timeout = max_export_timeout; + self + } + + /// Set max_export_batch_size for [`BatchConfig`]. + /// It's the maximum number of logs to process in a single batch. If there are + /// more than one batch worth of logs then it processes multiple batches + /// of logs one batch after the other without any delay. + /// The default value is 512. + pub fn with_max_export_batch_size(mut self, max_export_batch_size: usize) -> Self { + self.max_export_batch_size = max_export_batch_size; + self } } @@ -342,6 +435,11 @@ where BatchLogProcessorBuilder { config, ..self } } + /// Set the BatchConfig for [`BatchLogProcessorBuilder`] + pub fn with_batch_config(self, config: BatchConfig) -> Self { + BatchLogProcessorBuilder { config, ..self } + } + /// Build a batch processor pub fn build(self) -> BatchLogProcessor { BatchLogProcessor::new(Box::new(self.exporter), self.config, self.runtime) @@ -360,3 +458,201 @@ enum BatchMessage { /// Shut down the worker thread, push all logs in buffer to the backend. Shutdown(oneshot::Sender), } + +#[cfg(all(test, feature = "testing", feature = "logs"))] +mod tests { + use super::{ + BatchLogProcessor, OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, + OTEL_BLRP_MAX_QUEUE_SIZE, OTEL_BLRP_SCHEDULE_DELAY, + }; + use crate::{ + logs::{ + log_processor::{ + OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, + OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, + }, + BatchConfig, + }, + runtime, + testing::logs::InMemoryLogsExporter, + }; + use std::time::Duration; + + #[test] + fn test_default_const_values() { + assert_eq!(OTEL_BLRP_SCHEDULE_DELAY, "OTEL_BLRP_SCHEDULE_DELAY"); + assert_eq!(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, 1_000); + assert_eq!(OTEL_BLRP_EXPORT_TIMEOUT, "OTEL_BLRP_EXPORT_TIMEOUT"); + assert_eq!(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, 30_000); + assert_eq!(OTEL_BLRP_MAX_QUEUE_SIZE, "OTEL_BLRP_MAX_QUEUE_SIZE"); + assert_eq!(OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, 2_048); + assert_eq!( + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, + "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE" + ); + assert_eq!(OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, 512); + } + + #[test] + fn test_default_batch_config_adheres_to_specification() { + let config = BatchConfig::default(); + + assert_eq!( + config.scheduled_delay, + Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) + ); + assert_eq!( + config.max_export_timeout, + Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT) + ); + assert_eq!(config.max_queue_size, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT); + assert_eq!( + config.max_export_batch_size, + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT + ); + } + + #[test] + fn test_batch_config_configurable_by_env_vars() { + let env_vars = vec![ + (OTEL_BLRP_SCHEDULE_DELAY, Some("2000")), + (OTEL_BLRP_EXPORT_TIMEOUT, Some("60000")), + (OTEL_BLRP_MAX_QUEUE_SIZE, Some("4096")), + (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("1024")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.scheduled_delay, Duration::from_millis(2000)); + assert_eq!(config.max_export_timeout, Duration::from_millis(60000)); + assert_eq!(config.max_queue_size, 4096); + assert_eq!(config.max_export_batch_size, 1024); + } + + #[test] + fn test_batch_config_configurable_by_env_vars_millis() { + let env_vars = vec![ + ("OTEL_BLRP_SCHEDULE_DELAY_MILLIS", Some("3000")), + ("OTEL_BLRP_EXPORT_TIMEOUT_MILLIS", Some("70000")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.scheduled_delay, Duration::from_millis(3000)); + assert_eq!(config.max_export_timeout, Duration::from_millis(70000)); + assert_eq!(config.max_queue_size, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT); + assert_eq!( + config.max_export_batch_size, + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT + ); + } + + #[test] + fn test_batch_config_configurable_by_env_vars_precedence() { + let env_vars = vec![ + (OTEL_BLRP_SCHEDULE_DELAY, Some("2000")), + ("OTEL_BLRP_SCHEDULE_DELAY_MILLIS", Some("3000")), + (OTEL_BLRP_EXPORT_TIMEOUT, Some("60000")), + ("OTEL_BLRP_EXPORT_TIMEOUT_MILLIS", Some("70000")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.scheduled_delay, Duration::from_millis(2000)); + assert_eq!(config.max_export_timeout, Duration::from_millis(60000)); + assert_eq!(config.max_queue_size, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT); + assert_eq!( + config.max_export_batch_size, + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT + ); + } + + #[test] + fn test_batch_config_max_export_batch_size_validation() { + let env_vars = vec![ + (OTEL_BLRP_MAX_QUEUE_SIZE, Some("256")), + (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("1024")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.max_queue_size, 256); + assert_eq!(config.max_export_batch_size, 256); + assert_eq!( + config.scheduled_delay, + Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) + ); + assert_eq!( + config.max_export_timeout, + Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT) + ); + } + + #[test] + fn test_batch_config_with_fields() { + let batch = BatchConfig::default() + .with_max_export_batch_size(1) + .with_scheduled_delay(Duration::from_millis(2)) + .with_max_export_timeout(Duration::from_millis(3)) + .with_max_queue_size(4); + + assert_eq!(batch.max_export_batch_size, 1); + assert_eq!(batch.scheduled_delay, Duration::from_millis(2)); + assert_eq!(batch.max_export_timeout, Duration::from_millis(3)); + assert_eq!(batch.max_queue_size, 4); + } + + #[test] + fn test_build_batch_log_processor_builder() { + let mut env_vars = vec![ + (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("500")), + (OTEL_BLRP_SCHEDULE_DELAY, Some("I am not number")), + (OTEL_BLRP_EXPORT_TIMEOUT, Some("2046")), + ]; + temp_env::with_vars(env_vars.clone(), || { + let builder = + BatchLogProcessor::builder(InMemoryLogsExporter::default(), runtime::Tokio); + + assert_eq!(builder.config.max_export_batch_size, 500); + assert_eq!( + builder.config.scheduled_delay, + Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) + ); + assert_eq!( + builder.config.max_queue_size, + OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT + ); + assert_eq!( + builder.config.max_export_timeout, + Duration::from_millis(2046) + ); + }); + + env_vars.push((OTEL_BLRP_MAX_QUEUE_SIZE, Some("120"))); + + temp_env::with_vars(env_vars, || { + let builder = + BatchLogProcessor::builder(InMemoryLogsExporter::default(), runtime::Tokio); + assert_eq!(builder.config.max_export_batch_size, 120); + assert_eq!(builder.config.max_queue_size, 120); + }); + } + + #[test] + fn test_build_batch_log_processor_builder_with_custom_config() { + let expected = BatchConfig::default() + .with_max_export_batch_size(1) + .with_scheduled_delay(Duration::from_millis(2)) + .with_max_export_timeout(Duration::from_millis(3)) + .with_max_queue_size(4); + + let builder = BatchLogProcessor::builder(InMemoryLogsExporter::default(), runtime::Tokio) + .with_batch_config(expected); + + let actual = &builder.config; + assert_eq!(actual.max_export_batch_size, 1); + assert_eq!(actual.scheduled_delay, Duration::from_millis(2)); + assert_eq!(actual.max_export_timeout, Duration::from_millis(3)); + assert_eq!(actual.max_queue_size, 4); + } +} From 5b456ddfde4442a25d09fe78a4a7698581abca38 Mon Sep 17 00:00:00 2001 From: Zhongyang Wu Date: Mon, 22 Jan 2024 11:53:16 -0800 Subject: [PATCH 04/13] fix(proto): add `serde(default)` to `Events` and `Status` (#1485) This should allow all fields in event to be optional, which is needed to decode JSON strings ## Changes - add `serde(default)` to `Events` for tonic generated types - add `serde(default)` to `Status` for tonic generated types ## Merge requirement checklist * [x] [CONTRIBUTING](https://github.com/open-telemetry/opentelemetry-rust/blob/main/CONTRIBUTING.md) guidelines followed * [x] Unit tests added/updated (if applicable) * [ ] Appropriate `CHANGELOG.md` files updated for non-trivial, user-facing changes * [ ] Changes in public API reviewed (if applicable) --- .../proto/tonic/opentelemetry.proto.trace.v1.rs | 1 + opentelemetry-proto/tests/grpc_build.rs | 2 ++ opentelemetry-proto/tests/json_deserialize.rs | 15 +++++++++++++++ 3 files changed, 18 insertions(+) diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs index c119fc031c5..80e2067e9d2 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs @@ -198,6 +198,7 @@ pub mod span { #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] + #[cfg_attr(feature = "with-serde", serde(default))] #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Event { diff --git a/opentelemetry-proto/tests/grpc_build.rs b/opentelemetry-proto/tests/grpc_build.rs index e1ba5fcb0c1..dcc9b5062b5 100644 --- a/opentelemetry-proto/tests/grpc_build.rs +++ b/opentelemetry-proto/tests/grpc_build.rs @@ -82,6 +82,8 @@ fn build_tonic() { "trace.v1.ResourceSpans", "common.v1.InstrumentationScope", "resource.v1.Resource", + "trace.v1.Span.Event", + "trace.v1.Span.Status", ] { builder = builder.type_attribute( path, diff --git a/opentelemetry-proto/tests/json_deserialize.rs b/opentelemetry-proto/tests/json_deserialize.rs index 8a463fb4291..e8f757c70f1 100644 --- a/opentelemetry-proto/tests/json_deserialize.rs +++ b/opentelemetry-proto/tests/json_deserialize.rs @@ -3,6 +3,7 @@ mod json_deserialize { use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; use opentelemetry_proto::tonic::common::v1::any_value::Value; use opentelemetry_proto::tonic::common::v1::KeyValue; + use opentelemetry_proto::tonic::trace::v1::span::Event; // copied from example json file // see https://github.com/open-telemetry/opentelemetry-proto/blob/v1.0.0/examples/trace.json @@ -69,6 +70,13 @@ mod json_deserialize { } "#; + const EVENT_JSON: &str = r#" + { + "name": "my_event", + "time_unix_nano": 1234567890 + } + "#; + #[test] fn test_deserialize_traces() { let request: ExportTraceServiceRequest = serde_json::from_str(TRACES_JSON).unwrap(); @@ -139,4 +147,11 @@ mod json_deserialize { Value::StringValue("my.service".to_string()) ); } + + #[test] + fn test_event() { + let event_json: Event = serde_json::from_str(EVENT_JSON).unwrap(); + assert_eq!(event_json.name, "my_event".to_string()); + assert_eq!(event_json.attributes.len(), 0); + } } From 16fd1abb879c48fdb9c87e65e9aaabbe1afdf3bc Mon Sep 17 00:00:00 2001 From: Matthew Shapiro Date: Wed, 24 Jan 2024 15:10:30 -0500 Subject: [PATCH 05/13] Allow precreation of AttributeSets for metrics (#1421) --- examples/metrics-basic/src/main.rs | 16 +- .../examples/basic-otlp-http/src/main.rs | 9 +- .../examples/basic-otlp/src/main.rs | 11 +- opentelemetry-prometheus/examples/hyper.rs | 10 +- opentelemetry-prometheus/src/lib.rs | 9 +- .../tests/integration_test.rs | 131 +++--- opentelemetry-sdk/benches/attribute_set.rs | 6 +- opentelemetry-sdk/benches/metric.rs | 21 +- opentelemetry-sdk/benches/metric_counter.rs | 14 + opentelemetry-sdk/src/attributes/mod.rs | 3 - opentelemetry-sdk/src/attributes/set.rs | 181 -------- opentelemetry-sdk/src/lib.rs | 10 +- opentelemetry-sdk/src/metrics/data/mod.rs | 3 +- opentelemetry-sdk/src/metrics/instrument.rs | 23 +- .../src/metrics/internal/aggregate.rs | 49 +-- .../metrics/internal/exponential_histogram.rs | 11 +- .../src/metrics/internal/histogram.rs | 4 +- .../src/metrics/internal/last_value.rs | 4 +- opentelemetry-sdk/src/metrics/internal/sum.rs | 3 +- opentelemetry-sdk/src/metrics/meter.rs | 8 +- opentelemetry-sdk/src/metrics/mod.rs | 21 +- opentelemetry-sdk/src/resource/mod.rs | 31 ++ .../src/testing/metrics/in_memory_exporter.rs | 4 +- opentelemetry-stdout/src/common.rs | 4 +- opentelemetry-stdout/src/logs/transform.rs | 4 +- opentelemetry-stdout/src/trace/transform.rs | 4 +- opentelemetry/CHANGELOG.md | 10 + opentelemetry/Cargo.toml | 1 + opentelemetry/src/attributes/mod.rs | 5 + opentelemetry/src/attributes/set.rs | 406 ++++++++++++++++++ opentelemetry/src/global/mod.rs | 10 +- opentelemetry/src/lib.rs | 10 +- .../src/metrics/instruments/counter.rs | 16 +- .../src/metrics/instruments/gauge.rs | 12 +- .../src/metrics/instruments/histogram.rs | 12 +- opentelemetry/src/metrics/instruments/mod.rs | 4 +- .../metrics/instruments/up_down_counter.rs | 16 +- opentelemetry/src/metrics/meter.rs | 133 ++---- opentelemetry/src/metrics/noop.rs | 11 +- stress/Cargo.toml | 5 + stress/src/metrics_cached_attrs.rs | 53 +++ 41 files changed, 795 insertions(+), 503 deletions(-) delete mode 100644 opentelemetry-sdk/src/attributes/mod.rs delete mode 100644 opentelemetry-sdk/src/attributes/set.rs create mode 100644 opentelemetry/src/attributes/mod.rs create mode 100644 opentelemetry/src/attributes/set.rs create mode 100644 stress/src/metrics_cached_attrs.rs diff --git a/examples/metrics-basic/src/main.rs b/examples/metrics-basic/src/main.rs index 78dda47cad9..a9232342737 100644 --- a/examples/metrics-basic/src/main.rs +++ b/examples/metrics-basic/src/main.rs @@ -1,4 +1,5 @@ use opentelemetry::metrics::Unit; +use opentelemetry::AttributeSet; use opentelemetry::{metrics::MeterProvider as _, KeyValue}; use opentelemetry_sdk::metrics::{PeriodicReader, SdkMeterProvider}; use opentelemetry_sdk::{runtime, Resource}; @@ -52,11 +53,10 @@ async fn main() -> Result<(), Box> { observer.observe_u64( &observable_counter, 100, - [ + AttributeSet::from(&[ KeyValue::new("mykey1", "myvalue1"), KeyValue::new("mykey2", "myvalue2"), - ] - .as_ref(), + ]), ) })?; @@ -84,11 +84,10 @@ async fn main() -> Result<(), Box> { observer.observe_i64( &observable_up_down_counter, 100, - [ + AttributeSet::from(&[ KeyValue::new("mykey1", "myvalue1"), KeyValue::new("mykey2", "myvalue2"), - ] - .as_ref(), + ]), ) })?; @@ -142,11 +141,10 @@ async fn main() -> Result<(), Box> { observer.observe_f64( &observable_gauge, 1.0, - [ + AttributeSet::from(&[ KeyValue::new("mykey1", "myvalue1"), KeyValue::new("mykey2", "myvalue2"), - ] - .as_ref(), + ]), ) })?; diff --git a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs index 0f320dcb8d3..aa028b920b6 100644 --- a/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp-http/src/main.rs @@ -11,6 +11,7 @@ use opentelemetry_sdk::metrics as sdkmetrics; use opentelemetry_sdk::resource; use opentelemetry_sdk::trace as sdktrace; +use opentelemetry::AttributeSet; use std::error::Error; use tracing::info; use tracing_subscriber::prelude::*; @@ -62,13 +63,13 @@ fn init_metrics() -> metrics::Result { const LEMONS_KEY: Key = Key::from_static_str("ex.com/lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); -static COMMON_ATTRIBUTES: Lazy<[KeyValue; 4]> = Lazy::new(|| { - [ +static COMMON_ATTRIBUTES: Lazy = Lazy::new(|| { + AttributeSet::from(&[ LEMONS_KEY.i64(10), KeyValue::new("A", "1"), KeyValue::new("B", "2"), KeyValue::new("C", "3"), - ] + ]) }); #[tokio::main] @@ -104,7 +105,7 @@ async fn main() -> Result<(), Box> { info!(target: "my-target", "hello from {}. My price is {}", "apple", 1.99); let histogram = meter.f64_histogram("ex.com.two").init(); - histogram.record(5.5, COMMON_ATTRIBUTES.as_ref()); + histogram.record(5.5, COMMON_ATTRIBUTES.clone()); global::shutdown_tracer_provider(); global::shutdown_logger_provider(); diff --git a/opentelemetry-otlp/examples/basic-otlp/src/main.rs b/opentelemetry-otlp/examples/basic-otlp/src/main.rs index 1bf5f7e4216..1c252906f9e 100644 --- a/opentelemetry-otlp/examples/basic-otlp/src/main.rs +++ b/opentelemetry-otlp/examples/basic-otlp/src/main.rs @@ -4,6 +4,7 @@ use opentelemetry::global; use opentelemetry::global::{logger_provider, shutdown_logger_provider, shutdown_tracer_provider}; use opentelemetry::logs::LogError; use opentelemetry::trace::TraceError; +use opentelemetry::AttributeSet; use opentelemetry::{ metrics, trace::{TraceContextExt, Tracer}, @@ -72,13 +73,13 @@ fn init_logs() -> Result { const LEMONS_KEY: Key = Key::from_static_str("lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); -static COMMON_ATTRIBUTES: Lazy<[KeyValue; 4]> = Lazy::new(|| { - [ +static COMMON_ATTRIBUTES: Lazy = Lazy::new(|| { + AttributeSet::from(&[ LEMONS_KEY.i64(10), KeyValue::new("A", "1"), KeyValue::new("B", "2"), KeyValue::new("C", "3"), - ] + ]) }); #[tokio::main] @@ -109,11 +110,11 @@ async fn main() -> Result<(), Box> { .init(); meter.register_callback(&[gauge.as_any()], move |observer| { - observer.observe_f64(&gauge, 1.0, COMMON_ATTRIBUTES.as_ref()) + observer.observe_f64(&gauge, 1.0, COMMON_ATTRIBUTES.clone()) })?; let histogram = meter.f64_histogram("ex.com.two").init(); - histogram.record(5.5, COMMON_ATTRIBUTES.as_ref()); + histogram.record(5.5, COMMON_ATTRIBUTES.clone()); tracer.in_span("operation", |cx| { let span = cx.span(); diff --git a/opentelemetry-prometheus/examples/hyper.rs b/opentelemetry-prometheus/examples/hyper.rs index 943ba617b64..a97bb4320ee 100644 --- a/opentelemetry-prometheus/examples/hyper.rs +++ b/opentelemetry-prometheus/examples/hyper.rs @@ -4,6 +4,7 @@ use hyper::{ Body, Method, Request, Response, Server, }; use once_cell::sync::Lazy; +use opentelemetry::AttributeSet; use opentelemetry::{ metrics::{Counter, Histogram, MeterProvider as _, Unit}, KeyValue, @@ -14,7 +15,8 @@ use std::convert::Infallible; use std::sync::Arc; use std::time::SystemTime; -static HANDLER_ALL: Lazy<[KeyValue; 1]> = Lazy::new(|| [KeyValue::new("handler", "all")]); +static HANDLER_ALL: Lazy = + Lazy::new(|| AttributeSet::from(&[KeyValue::new("handler", "all")])); async fn serve_req( req: Request, @@ -23,7 +25,7 @@ async fn serve_req( println!("Receiving request at path {}", req.uri()); let request_start = SystemTime::now(); - state.http_counter.add(1, HANDLER_ALL.as_ref()); + state.http_counter.add(1, HANDLER_ALL.clone()); let response = match (req.method(), req.uri().path()) { (&Method::GET, "/metrics") => { @@ -33,7 +35,7 @@ async fn serve_req( encoder.encode(&metric_families, &mut buffer).unwrap(); state .http_body_gauge - .record(buffer.len() as u64, HANDLER_ALL.as_ref()); + .record(buffer.len() as u64, HANDLER_ALL.clone()); Response::builder() .status(200) @@ -53,7 +55,7 @@ async fn serve_req( state.http_req_histogram.record( request_start.elapsed().map_or(0.0, |d| d.as_secs_f64()), - &[], + AttributeSet::default(), ); Ok(response) } diff --git a/opentelemetry-prometheus/src/lib.rs b/opentelemetry-prometheus/src/lib.rs index 6ff87d3ed05..a28061e5139 100644 --- a/opentelemetry-prometheus/src/lib.rs +++ b/opentelemetry-prometheus/src/lib.rs @@ -3,13 +3,14 @@ //! [Prometheus]: https://prometheus.io //! //! ``` -//! use opentelemetry::{metrics::MeterProvider, KeyValue}; +//! use opentelemetry::{AttributeSet, metrics::MeterProvider, KeyValue}; //! use opentelemetry_sdk::metrics::SdkMeterProvider; //! use prometheus::{Encoder, TextEncoder}; //! //! # fn main() -> Result<(), Box> { //! //! // create a new prometheus registry +//! use opentelemetry::AttributeSet; //! let registry = prometheus::Registry::new(); //! //! // configure OpenTelemetry to use this registry @@ -31,8 +32,10 @@ //! .with_description("Records values") //! .init(); //! -//! counter.add(100, &[KeyValue::new("key", "value")]); -//! histogram.record(100, &[KeyValue::new("key", "value")]); +//! let attributes = AttributeSet::from(&[KeyValue::new("key", "value")]); +//! +//! counter.add(100, attributes.clone()); +//! histogram.record(100, attributes); //! //! // Encode data as text or protobuf //! let encoder = TextEncoder::new(); diff --git a/opentelemetry-prometheus/tests/integration_test.rs b/opentelemetry-prometheus/tests/integration_test.rs index 0786b1a4c09..b81e4ec2413 100644 --- a/opentelemetry-prometheus/tests/integration_test.rs +++ b/opentelemetry-prometheus/tests/integration_test.rs @@ -3,6 +3,7 @@ use std::path::Path; use std::time::Duration; use opentelemetry::metrics::{Meter, MeterProvider as _, Unit}; +use opentelemetry::AttributeSet; use opentelemetry::Key; use opentelemetry::KeyValue; use opentelemetry_prometheus::ExporterBuilder; @@ -44,27 +45,29 @@ fn prometheus_exporter_integration() { name: "counter", expected_file: "counter.txt", record_metrics: Box::new(|meter| { - let attrs = vec![ + let attrs = AttributeSet::from(&[ Key::new("A").string("B"), Key::new("C").string("D"), Key::new("E").bool(true), Key::new("F").i64(42), - ]; + ]); + let counter = meter .f64_counter("foo") .with_description("a simple counter") .with_unit(Unit::new("ms")) .init(); - counter.add(5.0, &attrs); - counter.add(10.3, &attrs); - counter.add(9.0, &attrs); - let attrs2 = vec![ + counter.add(5.0, attrs.clone()); + counter.add(10.3, attrs.clone()); + counter.add(9.0, attrs); + + let attrs2 = AttributeSet::from(&[ Key::new("A").string("D"), Key::new("C").string("B"), Key::new("E").bool(true), Key::new("F").i64(42), - ]; - counter.add(5.0, &attrs2); + ]); + counter.add(5.0, attrs2); }), ..Default::default() }, @@ -73,27 +76,28 @@ fn prometheus_exporter_integration() { expected_file: "counter_disabled_suffix.txt", builder: ExporterBuilder::default().without_counter_suffixes(), record_metrics: Box::new(|meter| { - let attrs = vec![ + let attrs = AttributeSet::from(&[ Key::new("A").string("B"), Key::new("C").string("D"), Key::new("E").bool(true), Key::new("F").i64(42), - ]; + ]); + let counter = meter .f64_counter("foo") .with_description("a simple counter without a total suffix") .with_unit(Unit::new("ms")) .init(); - counter.add(5.0, &attrs); - counter.add(10.3, &attrs); - counter.add(9.0, &attrs); - let attrs2 = vec![ + counter.add(5.0, attrs.clone()); + counter.add(10.3, attrs.clone()); + counter.add(9.0, attrs); + let attrs2 = AttributeSet::from(&[ Key::new("A").string("D"), Key::new("C").string("B"), Key::new("E").bool(true), Key::new("F").i64(42), - ]; - counter.add(5.0, &attrs2); + ]); + counter.add(5.0, attrs2); }), ..Default::default() }, @@ -101,14 +105,15 @@ fn prometheus_exporter_integration() { name: "gauge", expected_file: "gauge.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = + AttributeSet::from(&[Key::new("A").string("B"), Key::new("C").string("D")]); let gauge = meter .f64_up_down_counter("bar") .with_description("a fun little gauge") .with_unit(Unit::new("1")) .init(); - gauge.add(1.0, &attrs); - gauge.add(-0.25, &attrs); + gauge.add(1.0, attrs.clone()); + gauge.add(-0.25, attrs); }), ..Default::default() }, @@ -116,16 +121,17 @@ fn prometheus_exporter_integration() { name: "histogram", expected_file: "histogram.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = + AttributeSet::from(&[Key::new("A").string("B"), Key::new("C").string("D")]); let histogram = meter .f64_histogram("histogram_baz") .with_description("a very nice histogram") .with_unit(Unit::new("By")) .init(); - histogram.record(23.0, &attrs); - histogram.record(7.0, &attrs); - histogram.record(101.0, &attrs); - histogram.record(105.0, &attrs); + histogram.record(23.0, attrs.clone()); + histogram.record(7.0, attrs.clone()); + histogram.record(101.0, attrs.clone()); + histogram.record(105.0, attrs); }), ..Default::default() }, @@ -134,23 +140,23 @@ fn prometheus_exporter_integration() { expected_file: "sanitized_labels.txt", builder: ExporterBuilder::default().without_units(), record_metrics: Box::new(|meter| { - let attrs = vec![ + let attrs = AttributeSet::from(&[ // exact match, value should be overwritten Key::new("A.B").string("X"), Key::new("A.B").string("Q"), // unintended match due to sanitization, values should be concatenated Key::new("C.D").string("Y"), Key::new("C/D").string("Z"), - ]; + ]); let counter = meter .f64_counter("foo") .with_description("a sanitary counter") // This unit is not added to .with_unit(Unit::new("By")) .init(); - counter.add(5.0, &attrs); - counter.add(10.3, &attrs); - counter.add(9.0, &attrs); + counter.add(5.0, attrs.clone()); + counter.add(10.3, attrs.clone()); + counter.add(9.0, attrs); }), ..Default::default() }, @@ -158,33 +164,34 @@ fn prometheus_exporter_integration() { name: "invalid instruments are renamed", expected_file: "sanitized_names.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = + AttributeSet::from(&[Key::new("A").string("B"), Key::new("C").string("D")]); // Valid. let mut gauge = meter .f64_up_down_counter("bar") .with_description("a fun little gauge") .init(); - gauge.add(100., &attrs); - gauge.add(-25.0, &attrs); + gauge.add(100., attrs.clone()); + gauge.add(-25.0, attrs.clone()); // Invalid, will be renamed. gauge = meter .f64_up_down_counter("invalid.gauge.name") .with_description("a gauge with an invalid name") .init(); - gauge.add(100.0, &attrs); + gauge.add(100.0, attrs.clone()); let counter = meter .f64_counter("0invalid.counter.name") .with_description("a counter with an invalid name") .init(); - counter.add(100.0, &attrs); + counter.add(100.0, attrs.clone()); let histogram = meter .f64_histogram("invalid.hist.name") .with_description("a histogram with an invalid name") .init(); - histogram.record(23.0, &attrs); + histogram.record(23.0, attrs); }), ..Default::default() }, @@ -193,19 +200,19 @@ fn prometheus_exporter_integration() { empty_resource: true, expected_file: "empty_resource.txt", record_metrics: Box::new(|meter| { - let attrs = vec![ + let attrs = AttributeSet::from(&[ Key::new("A").string("B"), Key::new("C").string("D"), Key::new("E").bool(true), Key::new("F").i64(42), - ]; + ]); let counter = meter .f64_counter("foo") .with_description("a simple counter") .init(); - counter.add(5.0, &attrs); - counter.add(10.3, &attrs); - counter.add(9.0, &attrs); + counter.add(5.0, attrs.clone()); + counter.add(10.3, attrs.clone()); + counter.add(9.0, attrs.clone()); }), ..Default::default() }, @@ -214,19 +221,19 @@ fn prometheus_exporter_integration() { custom_resource_attrs: vec![Key::new("A").string("B"), Key::new("C").string("D")], expected_file: "custom_resource.txt", record_metrics: Box::new(|meter| { - let attrs = vec![ + let attrs = AttributeSet::from(&[ Key::new("A").string("B"), Key::new("C").string("D"), Key::new("E").bool(true), Key::new("F").i64(42), - ]; + ]); let counter = meter .f64_counter("foo") .with_description("a simple counter") .init(); - counter.add(5., &attrs); - counter.add(10.3, &attrs); - counter.add(9.0, &attrs); + counter.add(5., attrs.clone()); + counter.add(10.3, attrs.clone()); + counter.add(9.0, attrs); }), ..Default::default() }, @@ -235,19 +242,19 @@ fn prometheus_exporter_integration() { builder: ExporterBuilder::default().without_target_info(), expected_file: "without_target_info.txt", record_metrics: Box::new(|meter| { - let attrs = vec![ + let attrs = AttributeSet::from(&[ Key::new("A").string("B"), Key::new("C").string("D"), Key::new("E").bool(true), Key::new("F").i64(42), - ]; + ]); let counter = meter .f64_counter("foo") .with_description("a simple counter") .init(); - counter.add(5.0, &attrs); - counter.add(10.3, &attrs); - counter.add(9.0, &attrs); + counter.add(5.0, attrs.clone()); + counter.add(10.3, attrs.clone()); + counter.add(9.0, attrs); }), ..Default::default() }, @@ -256,14 +263,15 @@ fn prometheus_exporter_integration() { builder: ExporterBuilder::default().without_scope_info(), expected_file: "without_scope_info.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = + AttributeSet::from(&[Key::new("A").string("B"), Key::new("C").string("D")]); let gauge = meter .i64_up_down_counter("bar") .with_description("a fun little gauge") .with_unit(Unit::new("1")) .init(); - gauge.add(2, &attrs); - gauge.add(-1, &attrs); + gauge.add(2, attrs.clone()); + gauge.add(-1, attrs); }), ..Default::default() }, @@ -274,14 +282,15 @@ fn prometheus_exporter_integration() { .without_target_info(), expected_file: "without_scope_and_target_info.txt", record_metrics: Box::new(|meter| { - let attrs = vec![Key::new("A").string("B"), Key::new("C").string("D")]; + let attrs = + AttributeSet::from(&[Key::new("A").string("B"), Key::new("C").string("D")]); let counter = meter .u64_counter("bar") .with_description("a fun little counter") .with_unit(Unit::new("By")) .init(); - counter.add(2, &attrs); - counter.add(1, &attrs); + counter.add(2, attrs.clone()); + counter.add(1, attrs); }), ..Default::default() }, @@ -290,20 +299,20 @@ fn prometheus_exporter_integration() { builder: ExporterBuilder::default().with_namespace("test"), expected_file: "with_namespace.txt", record_metrics: Box::new(|meter| { - let attrs = vec![ + let attrs = AttributeSet::from(&[ Key::new("A").string("B"), Key::new("C").string("D"), Key::new("E").bool(true), Key::new("F").i64(42), - ]; + ]); let counter = meter .f64_counter("foo") .with_description("a simple counter") .init(); - counter.add(5.0, &attrs); - counter.add(10.3, &attrs); - counter.add(9.0, &attrs); + counter.add(5.0, attrs.clone()); + counter.add(10.3, attrs.clone()); + counter.add(9.0, attrs); }), ..Default::default() }, diff --git a/opentelemetry-sdk/benches/attribute_set.rs b/opentelemetry-sdk/benches/attribute_set.rs index 6f3360b9cf2..1fb6dccc1ee 100644 --- a/opentelemetry-sdk/benches/attribute_set.rs +++ b/opentelemetry-sdk/benches/attribute_set.rs @@ -1,6 +1,6 @@ use criterion::{criterion_group, criterion_main, Criterion}; +use opentelemetry::AttributeSet; use opentelemetry::KeyValue; -use opentelemetry_sdk::AttributeSet; // Run this benchmark with: // cargo bench --bench metric_counter @@ -12,7 +12,7 @@ fn criterion_benchmark(c: &mut Criterion) { fn attribute_set(c: &mut Criterion) { c.bench_function("AttributeSet_without_duplicates", |b| { b.iter(|| { - let attributes: &[KeyValue] = &[ + let attributes = &[ KeyValue::new("attribute1", "value1"), KeyValue::new("attribute2", "value2"), KeyValue::new("attribute3", "value3"), @@ -24,7 +24,7 @@ fn attribute_set(c: &mut Criterion) { c.bench_function("AttributeSet_with_duplicates", |b| { b.iter(|| { - let attributes: &[KeyValue] = &[ + let attributes = &[ KeyValue::new("attribute1", "value1"), KeyValue::new("attribute3", "value3"), KeyValue::new("attribute3", "value3"), diff --git a/opentelemetry-sdk/benches/metric.rs b/opentelemetry-sdk/benches/metric.rs index d018634e04d..3c668d58a85 100644 --- a/opentelemetry-sdk/benches/metric.rs +++ b/opentelemetry-sdk/benches/metric.rs @@ -2,6 +2,7 @@ use rand::Rng; use std::sync::{Arc, Weak}; use criterion::{criterion_group, criterion_main, Bencher, Criterion}; +use opentelemetry::AttributeSet; use opentelemetry::{ metrics::{Counter, Histogram, MeterProvider as _, Result}, Key, KeyValue, @@ -166,8 +167,12 @@ fn counters(c: &mut Criterion) { let (_, cntr3) = bench_counter(None, "cumulative"); let mut group = c.benchmark_group("Counter"); - group.bench_function("AddNoAttrs", |b| b.iter(|| cntr.add(1, &[]))); - group.bench_function("AddNoAttrsDelta", |b| b.iter(|| cntr2.add(1, &[]))); + group.bench_function("AddNoAttrs", |b| { + b.iter(|| cntr.add(1, AttributeSet::default())) + }); + group.bench_function("AddNoAttrsDelta", |b| { + b.iter(|| cntr2.add(1, AttributeSet::default())) + }); group.bench_function("AddOneAttr", |b| { b.iter(|| cntr.add(1, &[KeyValue::new("K", "V")])) @@ -274,14 +279,16 @@ fn counters(c: &mut Criterion) { } group.bench_function("AddOneTillMaxAttr", |b| { - b.iter(|| cntr3.add(1, &max_attributes)) + b.iter(|| cntr3.add(1, max_attributes.as_slice())) }); for i in MAX_DATA_POINTS..MAX_DATA_POINTS * 2 { max_attributes.push(KeyValue::new(i.to_string(), i)) } - group.bench_function("AddMaxAttr", |b| b.iter(|| cntr3.add(1, &max_attributes))); + group.bench_function("AddMaxAttr", |b| { + b.iter(|| cntr3.add(1, max_attributes.as_slice())) + }); group.bench_function("AddInvalidAttr", |b| { b.iter(|| cntr.add(1, &[KeyValue::new("", "V"), KeyValue::new("K", "V")])) @@ -393,10 +400,12 @@ fn histograms(c: &mut Criterion) { format!("V,{},{},{}", bound_size, attr_size, i), )) } + + let attributes = AttributeSet::from(&attributes); let value: u64 = rng.gen_range(0..MAX_BOUND).try_into().unwrap(); group.bench_function( format!("Record{}Attrs{}bounds", attr_size, bound_size), - |b| b.iter(|| hist.record(value, &attributes)), + |b| b.iter(|| hist.record(value, attributes.clone())), ); } } @@ -415,7 +424,7 @@ fn benchmark_collect_histogram(b: &mut Bencher, n: usize) { for i in 0..n { let h = mtr.u64_histogram(format!("fake_data_{i}")).init(); - h.record(1, &[]); + h.record(1, AttributeSet::default()); } let mut rm = ResourceMetrics { diff --git a/opentelemetry-sdk/benches/metric_counter.rs b/opentelemetry-sdk/benches/metric_counter.rs index 4bb4c84e6a4..9e12243fdfa 100644 --- a/opentelemetry-sdk/benches/metric_counter.rs +++ b/opentelemetry-sdk/benches/metric_counter.rs @@ -1,4 +1,5 @@ use criterion::{criterion_group, criterion_main, Criterion}; +use opentelemetry::AttributeSet; use opentelemetry::{ metrics::{Counter, MeterProvider as _}, KeyValue, @@ -67,6 +68,19 @@ fn counter_add(c: &mut Criterion) { ); }); }); + + c.bench_function("Counter_Add_Cached_Attributes", |b| { + let attributes = AttributeSet::from(&[ + KeyValue::new("attribute2", attribute_values[0]), + KeyValue::new("attribute3", attribute_values[1]), + KeyValue::new("attribute1", attribute_values[2]), + KeyValue::new("attribute4", attribute_values[3]), + ]); + + b.iter(|| { + counter.add(1, attributes.clone()); + }); + }); } criterion_group!(benches, criterion_benchmark); diff --git a/opentelemetry-sdk/src/attributes/mod.rs b/opentelemetry-sdk/src/attributes/mod.rs deleted file mode 100644 index 1182e996fb2..00000000000 --- a/opentelemetry-sdk/src/attributes/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod set; - -pub use set::AttributeSet; diff --git a/opentelemetry-sdk/src/attributes/set.rs b/opentelemetry-sdk/src/attributes/set.rs deleted file mode 100644 index ae5d5a4a736..00000000000 --- a/opentelemetry-sdk/src/attributes/set.rs +++ /dev/null @@ -1,181 +0,0 @@ -use std::collections::hash_map::DefaultHasher; -use std::collections::HashSet; -use std::{ - cmp::Ordering, - hash::{Hash, Hasher}, -}; - -use opentelemetry::{Array, Key, KeyValue, Value}; -use ordered_float::OrderedFloat; - -use crate::Resource; - -#[derive(Clone, Debug)] -struct HashKeyValue(KeyValue); - -impl Hash for HashKeyValue { - fn hash(&self, state: &mut H) { - self.0.key.hash(state); - match &self.0.value { - Value::F64(f) => OrderedFloat(*f).hash(state), - Value::Array(a) => match a { - Array::Bool(b) => b.hash(state), - Array::I64(i) => i.hash(state), - Array::F64(f) => f.iter().for_each(|f| OrderedFloat(*f).hash(state)), - Array::String(s) => s.hash(state), - }, - Value::Bool(b) => b.hash(state), - Value::I64(i) => i.hash(state), - Value::String(s) => s.hash(state), - }; - } -} - -impl PartialOrd for HashKeyValue { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for HashKeyValue { - fn cmp(&self, other: &Self) -> Ordering { - match self.0.key.cmp(&other.0.key) { - Ordering::Equal => match type_order(&self.0.value).cmp(&type_order(&other.0.value)) { - Ordering::Equal => match (&self.0.value, &other.0.value) { - (Value::F64(f), Value::F64(of)) => OrderedFloat(*f).cmp(&OrderedFloat(*of)), - (Value::Array(Array::Bool(b)), Value::Array(Array::Bool(ob))) => b.cmp(ob), - (Value::Array(Array::I64(i)), Value::Array(Array::I64(oi))) => i.cmp(oi), - (Value::Array(Array::String(s)), Value::Array(Array::String(os))) => s.cmp(os), - (Value::Array(Array::F64(f)), Value::Array(Array::F64(of))) => { - match f.len().cmp(&of.len()) { - Ordering::Equal => f - .iter() - .map(|x| OrderedFloat(*x)) - .collect::>() - .cmp(&of.iter().map(|x| OrderedFloat(*x)).collect()), - other => other, - } - } - (Value::Bool(b), Value::Bool(ob)) => b.cmp(ob), - (Value::I64(i), Value::I64(oi)) => i.cmp(oi), - (Value::String(s), Value::String(os)) => s.cmp(os), - _ => Ordering::Equal, - }, - other => other, // 2nd order by value types - }, - other => other, // 1st order by key - } - } -} - -fn type_order(v: &Value) -> u8 { - match v { - Value::Bool(_) => 1, - Value::I64(_) => 2, - Value::F64(_) => 3, - Value::String(_) => 4, - Value::Array(a) => match a { - Array::Bool(_) => 5, - Array::I64(_) => 6, - Array::F64(_) => 7, - Array::String(_) => 8, - }, - } -} - -impl PartialEq for HashKeyValue { - fn eq(&self, other: &Self) -> bool { - self.0.key == other.0.key - && match (&self.0.value, &other.0.value) { - (Value::F64(f), Value::F64(of)) => OrderedFloat(*f).eq(&OrderedFloat(*of)), - (Value::Array(Array::F64(f)), Value::Array(Array::F64(of))) => { - f.len() == of.len() - && f.iter() - .zip(of.iter()) - .all(|(f, of)| OrderedFloat(*f).eq(&OrderedFloat(*of))) - } - (non_float, other_non_float) => non_float.eq(other_non_float), - } - } -} - -impl Eq for HashKeyValue {} - -/// A unique set of attributes that can be used as instrument identifiers. -/// -/// This must implement [Hash], [PartialEq], and [Eq] so it may be used as -/// HashMap keys and other de-duplication methods. -#[derive(Clone, Default, Debug, PartialEq, Eq)] -pub struct AttributeSet(Vec, u64); - -impl From<&[KeyValue]> for AttributeSet { - fn from(values: &[KeyValue]) -> Self { - let mut seen_keys = HashSet::with_capacity(values.len()); - let vec = values - .iter() - .rev() - .filter_map(|kv| { - if seen_keys.insert(kv.key.clone()) { - Some(HashKeyValue(kv.clone())) - } else { - None - } - }) - .collect::>(); - - AttributeSet::new(vec) - } -} - -impl From<&Resource> for AttributeSet { - fn from(values: &Resource) -> Self { - let vec = values - .iter() - .map(|(key, value)| HashKeyValue(KeyValue::new(key.clone(), value.clone()))) - .collect::>(); - - AttributeSet::new(vec) - } -} - -impl AttributeSet { - fn new(mut values: Vec) -> Self { - values.sort_unstable(); - let mut hasher = DefaultHasher::new(); - values.iter().fold(&mut hasher, |mut hasher, item| { - item.hash(&mut hasher); - hasher - }); - - AttributeSet(values, hasher.finish()) - } - - /// Returns the number of elements in the set. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Returns `true` if the set contains no elements. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Retains only the attributes specified by the predicate. - pub fn retain(&mut self, f: F) - where - F: Fn(&KeyValue) -> bool, - { - self.0.retain(|kv| f(&kv.0)) - } - - /// Iterate over key value pairs in the set - pub fn iter(&self) -> impl Iterator { - self.0.iter().map(|kv| (&kv.0.key, &kv.0.value)) - } -} - -impl Hash for AttributeSet { - fn hash(&self, state: &mut H) { - state.write_u64(self.1) - } -} diff --git a/opentelemetry-sdk/src/lib.rs b/opentelemetry-sdk/src/lib.rs index b021ea5bcd2..aefcd6f93c3 100644 --- a/opentelemetry-sdk/src/lib.rs +++ b/opentelemetry-sdk/src/lib.rs @@ -52,9 +52,10 @@ //! ### Creating instruments and recording measurements //! //! ``` -//! # #[cfg(feature = "metrics")] +//! # use opentelemetry::AttributeSet; +//! #[cfg(feature = "metrics")] //! # { -//! use opentelemetry::{global, KeyValue}; +//! use opentelemetry::{AttributeSet, global, KeyValue}; //! //! // get a meter from a provider //! let meter = global::meter("my_service"); @@ -63,7 +64,8 @@ //! let counter = meter.u64_counter("my_counter").init(); //! //! // record a measurement -//! counter.add(1, &[KeyValue::new("http.client_ip", "83.164.160.102")]); +//! let attributes = AttributeSet::from(&[KeyValue::new("http.client_ip", "83.164.160.102")]); +//! counter.add(1, attributes); //! # } //! ``` //! @@ -111,7 +113,6 @@ )] #![cfg_attr(test, deny(warnings))] -pub(crate) mod attributes; pub mod export; mod instrumentation; #[cfg(feature = "logs")] @@ -135,7 +136,6 @@ pub mod trace; #[doc(hidden)] pub mod util; -pub use attributes::*; pub use instrumentation::{InstrumentationLibrary, Scope}; #[doc(inline)] pub use resource::Resource; diff --git a/opentelemetry-sdk/src/metrics/data/mod.rs b/opentelemetry-sdk/src/metrics/data/mod.rs index e827006c956..ec429ba1f82 100644 --- a/opentelemetry-sdk/src/metrics/data/mod.rs +++ b/opentelemetry-sdk/src/metrics/data/mod.rs @@ -4,7 +4,8 @@ use std::{any, borrow::Cow, fmt, time::SystemTime}; use opentelemetry::{metrics::Unit, KeyValue}; -use crate::{attributes::AttributeSet, instrumentation::Scope, Resource}; +use crate::{instrumentation::Scope, Resource}; +use opentelemetry::AttributeSet; pub use self::temporality::Temporality; diff --git a/opentelemetry-sdk/src/metrics/instrument.rs b/opentelemetry-sdk/src/metrics/instrument.rs index 3ecae355b5e..5b342fd3ffd 100644 --- a/opentelemetry-sdk/src/metrics/instrument.rs +++ b/opentelemetry-sdk/src/metrics/instrument.rs @@ -5,11 +5,10 @@ use opentelemetry::{ AsyncInstrument, MetricsError, Result, SyncCounter, SyncGauge, SyncHistogram, SyncUpDownCounter, Unit, }, - Key, KeyValue, + AttributeSet, Key, }; use crate::{ - attributes::AttributeSet, instrumentation::Scope, metrics::{aggregation::Aggregation, internal::Measure}, }; @@ -259,33 +258,33 @@ pub(crate) struct ResolvedMeasures { } impl SyncCounter for ResolvedMeasures { - fn add(&self, val: T, attrs: &[KeyValue]) { + fn add(&self, val: T, attrs: AttributeSet) { for measure in &self.measures { - measure.call(val, AttributeSet::from(attrs)) + measure.call(val, attrs.clone()) } } } impl SyncUpDownCounter for ResolvedMeasures { - fn add(&self, val: T, attrs: &[KeyValue]) { + fn add(&self, val: T, attrs: AttributeSet) { for measure in &self.measures { - measure.call(val, AttributeSet::from(attrs)) + measure.call(val, attrs.clone()) } } } impl SyncGauge for ResolvedMeasures { - fn record(&self, val: T, attrs: &[KeyValue]) { + fn record(&self, val: T, attrs: AttributeSet) { for measure in &self.measures { - measure.call(val, AttributeSet::from(attrs)) + measure.call(val, attrs.clone()) } } } impl SyncHistogram for ResolvedMeasures { - fn record(&self, val: T, attrs: &[KeyValue]) { + fn record(&self, val: T, attrs: AttributeSet) { for measure in &self.measures { - measure.call(val, AttributeSet::from(attrs)) + measure.call(val, attrs.clone()) } } } @@ -377,9 +376,9 @@ impl Observable { } impl AsyncInstrument for Observable { - fn observe(&self, measurement: T, attrs: &[KeyValue]) { + fn observe(&self, measurement: T, attrs: AttributeSet) { for measure in &self.measures { - measure.call(measurement, AttributeSet::from(attrs)) + measure.call(measurement, attrs.clone()) } } diff --git a/opentelemetry-sdk/src/metrics/internal/aggregate.rs b/opentelemetry-sdk/src/metrics/internal/aggregate.rs index 08d6feec04a..84868927014 100644 --- a/opentelemetry-sdk/src/metrics/internal/aggregate.rs +++ b/opentelemetry-sdk/src/metrics/internal/aggregate.rs @@ -1,12 +1,9 @@ use std::{marker, sync::Arc}; use once_cell::sync::Lazy; -use opentelemetry::KeyValue; +use opentelemetry::{AttributeSet, KeyValue}; -use crate::{ - metrics::data::{Aggregation, Gauge, Temporality}, - AttributeSet, -}; +use crate::metrics::data::{Aggregation, Gauge, Temporality}; use super::{ exponential_histogram::ExpoHistogram, @@ -17,10 +14,8 @@ use super::{ }; const STREAM_CARDINALITY_LIMIT: u32 = 2000; -pub(crate) static STREAM_OVERFLOW_ATTRIBUTE_SET: Lazy = Lazy::new(|| { - let key_values: [KeyValue; 1] = [KeyValue::new("otel.metric.overflow", "true")]; - AttributeSet::from(&key_values[..]) -}); +pub(crate) static STREAM_OVERFLOW_ATTRIBUTE_SET: Lazy = + Lazy::new(|| AttributeSet::from(&[KeyValue::new("otel.metric.overflow", "true")])); /// Checks whether aggregator has hit cardinality limit for metric streams pub(crate) fn is_under_cardinality_limit(size: usize) -> bool { @@ -96,7 +91,7 @@ impl> AggregateBuilder { let filter = self.filter.as_ref().map(Arc::clone); move |n, mut attrs: AttributeSet| { if let Some(filter) = &filter { - attrs.retain(filter.as_ref()); + attrs = attrs.clone_with(filter.as_ref()); } f.call(n, attrs) } @@ -226,7 +221,7 @@ mod tests { let (measure, agg) = AggregateBuilder::::new(None, None).last_value(); let mut a = Gauge { data_points: vec![DataPoint { - attributes: AttributeSet::from(&[KeyValue::new("a", 1)][..]), + attributes: AttributeSet::from(&[KeyValue::new("a", 1)]), start_time: Some(SystemTime::now()), time: Some(SystemTime::now()), value: 1u64, @@ -234,7 +229,7 @@ mod tests { }], }; let new_attributes = [KeyValue::new("b", 2)]; - measure.call(2, AttributeSet::from(&new_attributes[..])); + measure.call(2, AttributeSet::from(&new_attributes)); let (count, new_agg) = agg.call(Some(&mut a)); @@ -243,7 +238,7 @@ mod tests { assert_eq!(a.data_points.len(), 1); assert_eq!( a.data_points[0].attributes, - AttributeSet::from(&new_attributes[..]) + AttributeSet::from(&new_attributes) ); assert_eq!(a.data_points[0].value, 2); } @@ -256,14 +251,14 @@ mod tests { let mut a = Sum { data_points: vec![ DataPoint { - attributes: AttributeSet::from(&[KeyValue::new("a1", 1)][..]), + attributes: AttributeSet::from(&[KeyValue::new("a1", 1)]), start_time: Some(SystemTime::now()), time: Some(SystemTime::now()), value: 1u64, exemplars: vec![], }, DataPoint { - attributes: AttributeSet::from(&[KeyValue::new("a2", 2)][..]), + attributes: AttributeSet::from(&[KeyValue::new("a2", 2)]), start_time: Some(SystemTime::now()), time: Some(SystemTime::now()), value: 2u64, @@ -278,7 +273,7 @@ mod tests { is_monotonic: false, }; let new_attributes = [KeyValue::new("b", 2)]; - measure.call(3, AttributeSet::from(&new_attributes[..])); + measure.call(3, AttributeSet::from(&new_attributes)); let (count, new_agg) = agg.call(Some(&mut a)); @@ -289,7 +284,7 @@ mod tests { assert_eq!(a.data_points.len(), 1); assert_eq!( a.data_points[0].attributes, - AttributeSet::from(&new_attributes[..]) + AttributeSet::from(&new_attributes) ); assert_eq!(a.data_points[0].value, 3); } @@ -302,14 +297,14 @@ mod tests { let mut a = Sum { data_points: vec![ DataPoint { - attributes: AttributeSet::from(&[KeyValue::new("a1", 1)][..]), + attributes: AttributeSet::from(&[KeyValue::new("a1", 1)]), start_time: Some(SystemTime::now()), time: Some(SystemTime::now()), value: 1u64, exemplars: vec![], }, DataPoint { - attributes: AttributeSet::from(&[KeyValue::new("a2", 2)][..]), + attributes: AttributeSet::from(&[KeyValue::new("a2", 2)]), start_time: Some(SystemTime::now()), time: Some(SystemTime::now()), value: 2u64, @@ -324,7 +319,7 @@ mod tests { is_monotonic: false, }; let new_attributes = [KeyValue::new("b", 2)]; - measure.call(3, AttributeSet::from(&new_attributes[..])); + measure.call(3, AttributeSet::from(&new_attributes)); let (count, new_agg) = agg.call(Some(&mut a)); @@ -335,7 +330,7 @@ mod tests { assert_eq!(a.data_points.len(), 1); assert_eq!( a.data_points[0].attributes, - AttributeSet::from(&new_attributes[..]) + AttributeSet::from(&new_attributes) ); assert_eq!(a.data_points[0].value, 3); } @@ -348,7 +343,7 @@ mod tests { .explicit_bucket_histogram(vec![1.0], true, true); let mut a = Histogram { data_points: vec![HistogramDataPoint { - attributes: AttributeSet::from(&[KeyValue::new("a2", 2)][..]), + attributes: AttributeSet::from(&[KeyValue::new("a2", 2)]), start_time: SystemTime::now(), time: SystemTime::now(), count: 2, @@ -366,7 +361,7 @@ mod tests { }, }; let new_attributes = [KeyValue::new("b", 2)]; - measure.call(3, AttributeSet::from(&new_attributes[..])); + measure.call(3, AttributeSet::from(&new_attributes)); let (count, new_agg) = agg.call(Some(&mut a)); @@ -376,7 +371,7 @@ mod tests { assert_eq!(a.data_points.len(), 1); assert_eq!( a.data_points[0].attributes, - AttributeSet::from(&new_attributes[..]) + AttributeSet::from(&new_attributes) ); assert_eq!(a.data_points[0].count, 1); assert_eq!(a.data_points[0].bounds, vec![1.0]); @@ -394,7 +389,7 @@ mod tests { .exponential_bucket_histogram(4, 20, true, true); let mut a = ExponentialHistogram { data_points: vec![ExponentialHistogramDataPoint { - attributes: AttributeSet::from(&[KeyValue::new("a2", 2)][..]), + attributes: AttributeSet::from(&[KeyValue::new("a2", 2)]), start_time: SystemTime::now(), time: SystemTime::now(), count: 2, @@ -421,7 +416,7 @@ mod tests { }, }; let new_attributes = [KeyValue::new("b", 2)]; - measure.call(3, AttributeSet::from(&new_attributes[..])); + measure.call(3, AttributeSet::from(&new_attributes.clone())); let (count, new_agg) = agg.call(Some(&mut a)); @@ -431,7 +426,7 @@ mod tests { assert_eq!(a.data_points.len(), 1); assert_eq!( a.data_points[0].attributes, - AttributeSet::from(&new_attributes[..]) + AttributeSet::from(&new_attributes) ); assert_eq!(a.data_points[0].count, 1); assert_eq!(a.data_points[0].min, Some(3)); diff --git a/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs b/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs index 189b61c553e..5411637e057 100644 --- a/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs +++ b/opentelemetry-sdk/src/metrics/internal/exponential_histogram.rs @@ -1,12 +1,9 @@ use std::{collections::HashMap, f64::consts::LOG2_E, sync::Mutex, time::SystemTime}; use once_cell::sync::Lazy; -use opentelemetry::metrics::MetricsError; +use opentelemetry::{metrics::MetricsError, AttributeSet}; -use crate::{ - metrics::data::{self, Aggregation, Temporality}, - AttributeSet, -}; +use crate::metrics::data::{self, Aggregation, Temporality}; use super::Number; @@ -627,7 +624,7 @@ mod tests { } fn run_min_max_sum_f64() { - let alice = AttributeSet::from(&[KeyValue::new("user", "alice")][..]); + let alice = AttributeSet::from(&[KeyValue::new("user", "alice")]); struct Expected { min: f64, max: f64, @@ -688,7 +685,7 @@ mod tests { } fn run_min_max_sum + From>() { - let alice = AttributeSet::from(&[KeyValue::new("user", "alice")][..]); + let alice = AttributeSet::from(&[KeyValue::new("user", "alice")]); struct Expected { min: T, max: T, diff --git a/opentelemetry-sdk/src/metrics/internal/histogram.rs b/opentelemetry-sdk/src/metrics/internal/histogram.rs index 45ac569e2b6..75acfe3f9ef 100644 --- a/opentelemetry-sdk/src/metrics/internal/histogram.rs +++ b/opentelemetry-sdk/src/metrics/internal/histogram.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, sync::Mutex, time::SystemTime}; +use crate::metrics::data::HistogramDataPoint; use crate::metrics::data::{self, Aggregation, Temporality}; -use crate::{attributes::AttributeSet, metrics::data::HistogramDataPoint}; -use opentelemetry::{global, metrics::MetricsError}; +use opentelemetry::{global, metrics::MetricsError, AttributeSet}; use super::{ aggregate::{is_under_cardinality_limit, STREAM_OVERFLOW_ATTRIBUTE_SET}, diff --git a/opentelemetry-sdk/src/metrics/internal/last_value.rs b/opentelemetry-sdk/src/metrics/internal/last_value.rs index e5b2364b5bd..f5d9f8d9974 100644 --- a/opentelemetry-sdk/src/metrics/internal/last_value.rs +++ b/opentelemetry-sdk/src/metrics/internal/last_value.rs @@ -4,8 +4,8 @@ use std::{ time::SystemTime, }; -use crate::{attributes::AttributeSet, metrics::data::DataPoint}; -use opentelemetry::{global, metrics::MetricsError}; +use crate::metrics::data::DataPoint; +use opentelemetry::{global, metrics::MetricsError, AttributeSet}; use super::{ aggregate::{is_under_cardinality_limit, STREAM_OVERFLOW_ATTRIBUTE_SET}, diff --git a/opentelemetry-sdk/src/metrics/internal/sum.rs b/opentelemetry-sdk/src/metrics/internal/sum.rs index 3fac77c4595..b6d32332175 100644 --- a/opentelemetry-sdk/src/metrics/internal/sum.rs +++ b/opentelemetry-sdk/src/metrics/internal/sum.rs @@ -4,9 +4,8 @@ use std::{ time::SystemTime, }; -use crate::attributes::AttributeSet; use crate::metrics::data::{self, Aggregation, DataPoint, Temporality}; -use opentelemetry::{global, metrics::MetricsError}; +use opentelemetry::{global, metrics::MetricsError, AttributeSet}; use super::{ aggregate::{is_under_cardinality_limit, STREAM_OVERFLOW_ATTRIBUTE_SET}, diff --git a/opentelemetry-sdk/src/metrics/meter.rs b/opentelemetry-sdk/src/metrics/meter.rs index c801adcb0ac..26501f85d71 100644 --- a/opentelemetry-sdk/src/metrics/meter.rs +++ b/opentelemetry-sdk/src/metrics/meter.rs @@ -1,6 +1,7 @@ use core::fmt; use std::{any::Any, borrow::Cow, collections::HashSet, sync::Arc}; +use opentelemetry::AttributeSet; use opentelemetry::{ global, metrics::{ @@ -9,7 +10,6 @@ use opentelemetry::{ InstrumentProvider, MetricsError, ObservableCounter, ObservableGauge, ObservableUpDownCounter, Observer as ApiObserver, Result, Unit, UpDownCounter, }, - KeyValue, }; use crate::instrumentation::Scope; @@ -647,7 +647,7 @@ impl Observer { } impl ApiObserver for Observer { - fn observe_f64(&self, inst: &dyn AsyncInstrument, measurement: f64, attrs: &[KeyValue]) { + fn observe_f64(&self, inst: &dyn AsyncInstrument, measurement: f64, attrs: AttributeSet) { if let Some(f64_obs) = inst.as_any().downcast_ref::>() { if self.f64s.contains(&f64_obs.id) { f64_obs.observe(measurement, attrs) @@ -666,7 +666,7 @@ impl ApiObserver for Observer { } } - fn observe_u64(&self, inst: &dyn AsyncInstrument, measurement: u64, attrs: &[KeyValue]) { + fn observe_u64(&self, inst: &dyn AsyncInstrument, measurement: u64, attrs: AttributeSet) { if let Some(u64_obs) = inst.as_any().downcast_ref::>() { if self.u64s.contains(&u64_obs.id) { u64_obs.observe(measurement, attrs) @@ -685,7 +685,7 @@ impl ApiObserver for Observer { } } - fn observe_i64(&self, inst: &dyn AsyncInstrument, measurement: i64, attrs: &[KeyValue]) { + fn observe_i64(&self, inst: &dyn AsyncInstrument, measurement: i64, attrs: AttributeSet) { if let Some(i64_obs) = inst.as_any().downcast_ref::>() { if self.i64s.contains(&i64_obs.id) { i64_obs.observe(measurement, attrs) diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 021ee3d469f..622133aa70b 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -13,6 +13,7 @@ //! metrics::{MeterProvider, Unit}, //! KeyValue, //! }; +//! use opentelemetry::AttributeSet; //! use opentelemetry_sdk::{metrics::SdkMeterProvider, Resource}; //! //! // Generate SDK configuration, resource, views, etc @@ -31,7 +32,8 @@ //! .init(); //! //! // use instruments to record measurements -//! counter.add(10, &[KeyValue::new("rate", "standard")]); +//! let attributes = AttributeSet::from(&[KeyValue::new("rate", "standard")]); +//! counter.add(10, attributes); //! ``` //! //! [Resource]: crate::Resource @@ -62,6 +64,7 @@ pub use view::*; mod tests { use super::*; use crate::{runtime, testing::metrics::InMemoryMetricsExporter}; + use opentelemetry::AttributeSet; use opentelemetry::{ metrics::{MeterProvider as _, Unit}, KeyValue, @@ -180,9 +183,9 @@ mod tests { .with_description("my_description") .init(); - let attribute = vec![KeyValue::new("key1", "value1")]; - counter.add(10, &attribute); - counter_duplicated.add(5, &attribute); + let attribute = AttributeSet::from(&[KeyValue::new("key1", "value1")]); + counter.add(10, attribute.clone()); + counter_duplicated.add(5, attribute); meter_provider.force_flush().unwrap(); @@ -266,7 +269,6 @@ mod tests { // "multi_thread" tokio flavor must be used else flush won't // be able to make progress! #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - #[ignore = "Spatial aggregation is not yet implemented."] async fn spatial_aggregation_when_view_drops_attributes_observable_counter() { // cargo test spatial_aggregation_when_view_drops_attributes_observable_counter --features=metrics,testing @@ -299,7 +301,8 @@ mod tests { KeyValue::new("statusCode", "200"), KeyValue::new("verb", "get"), ] - .as_ref(), + .as_slice() + .into(), ); observer.observe_u64( @@ -309,7 +312,8 @@ mod tests { KeyValue::new("statusCode", "200"), KeyValue::new("verb", "post"), ] - .as_ref(), + .as_slice() + .into(), ); observer.observe_u64( @@ -319,7 +323,8 @@ mod tests { KeyValue::new("statusCode", "500"), KeyValue::new("verb", "get"), ] - .as_ref(), + .as_slice() + .into(), ); }) .expect("Expected to register callback"); diff --git a/opentelemetry-sdk/src/resource/mod.rs b/opentelemetry-sdk/src/resource/mod.rs index 79ce0122ebb..cf6f18440a5 100644 --- a/opentelemetry-sdk/src/resource/mod.rs +++ b/opentelemetry-sdk/src/resource/mod.rs @@ -30,6 +30,7 @@ pub use os::OsResourceDetector; pub use process::ProcessResourceDetector; pub use telemetry::TelemetryResourceDetector; +use opentelemetry::AttributeSet; use opentelemetry::{Key, KeyValue, Value}; use std::borrow::Cow; use std::collections::{hash_map, HashMap}; @@ -190,6 +191,16 @@ impl Resource { pub fn get(&self, key: Key) -> Option { self.attrs.get(&key).cloned() } + + /// Creates a new attribute set from the resource's current attributes + pub fn to_attribute_set(&self) -> AttributeSet { + let key_values = self + .iter() + .map(|(key, value)| KeyValue::new(key.clone(), value.clone())) + .collect::>(); + + AttributeSet::from(&key_values) + } } /// An owned iterator over the entries of a `Resource`. @@ -365,4 +376,24 @@ mod tests { }, ) } + + #[test] + fn can_create_attribute_set_from_resource() { + let resource = Resource::new([KeyValue::new("key1", "value1"), KeyValue::new("key2", 3)]); + + let set = resource.to_attribute_set(); + let mut kvs = set.iter().collect::>(); + + assert_eq!(kvs.len(), 2, "Incorrect number of attributes"); + + kvs.sort_by(|kv1, kv2| kv1.0.cmp(kv2.0)); + assert_eq!(kvs[0].0, &Key::from("key1"), "Unexpected first key"); + assert_eq!( + kvs[0].1, + &Value::String("value1".into()), + "Unexpected first value" + ); + assert_eq!(kvs[1].0, &Key::from("key2"), "Unexpected second key"); + assert_eq!(kvs[1].1, &Value::I64(3), "Unexpected second value"); + } } diff --git a/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs b/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs index d28cd4062f5..3edff3d2dd1 100644 --- a/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/testing/metrics/in_memory_exporter.rs @@ -29,6 +29,7 @@ use std::sync::{Arc, Mutex}; /// ``` ///# use opentelemetry_sdk::{metrics, runtime}; ///# use opentelemetry::{KeyValue}; +///# use opentelemetry::AttributeSet; ///# use opentelemetry::metrics::MeterProvider; ///# use opentelemetry_sdk::testing::metrics::InMemoryMetricsExporter; ///# use opentelemetry_sdk::metrics::PeriodicReader; @@ -46,7 +47,8 @@ use std::sync::{Arc, Mutex}; /// // Create and record metrics using the MeterProvider /// let meter = meter_provider.meter(std::borrow::Cow::Borrowed("example")); /// let counter = meter.u64_counter("my_counter").init(); -/// counter.add(1, &[KeyValue::new("key", "value")]); +/// let attributes = AttributeSet::from(&[KeyValue::new("key", "value")]); +/// counter.add(1, attributes); /// /// meter_provider.force_flush().unwrap(); /// diff --git a/opentelemetry-stdout/src/common.rs b/opentelemetry-stdout/src/common.rs index 0098b1a6b31..0897fbb22a5 100644 --- a/opentelemetry-stdout/src/common.rs +++ b/opentelemetry-stdout/src/common.rs @@ -12,8 +12,8 @@ use serde::{Serialize, Serializer}; #[derive(Debug, Serialize, Clone, Hash, Eq, PartialEq)] pub(crate) struct AttributeSet(pub BTreeMap); -impl From<&opentelemetry_sdk::AttributeSet> for AttributeSet { - fn from(value: &opentelemetry_sdk::AttributeSet) -> Self { +impl From<&opentelemetry::AttributeSet> for AttributeSet { + fn from(value: &opentelemetry::AttributeSet) -> Self { AttributeSet( value .iter() diff --git a/opentelemetry-stdout/src/logs/transform.rs b/opentelemetry-stdout/src/logs/transform.rs index 9612cf1ff65..dba3b830ca1 100644 --- a/opentelemetry-stdout/src/logs/transform.rs +++ b/opentelemetry-stdout/src/logs/transform.rs @@ -4,7 +4,7 @@ use crate::common::{ as_human_readable, as_opt_human_readable, as_opt_unix_nano, as_unix_nano, KeyValue, Resource, Scope, Value, }; -use opentelemetry_sdk::AttributeSet; +use opentelemetry::AttributeSet; use serde::Serialize; /// Transformed logs data that can be serialized. @@ -26,7 +26,7 @@ impl From> for LogData { let resource: Resource = sdk_log.resource.as_ref().into(); let rl = resource_logs - .entry(sdk_log.resource.as_ref().into()) + .entry(sdk_log.resource.as_ref().to_attribute_set()) .or_insert_with(move || ResourceLogs { resource, scope_logs: Vec::with_capacity(1), diff --git a/opentelemetry-stdout/src/trace/transform.rs b/opentelemetry-stdout/src/trace/transform.rs index 07725c9483d..9af26986c60 100644 --- a/opentelemetry-stdout/src/trace/transform.rs +++ b/opentelemetry-stdout/src/trace/transform.rs @@ -1,5 +1,5 @@ use crate::common::{as_human_readable, as_unix_nano, KeyValue, Resource, Scope}; -use opentelemetry_sdk::AttributeSet; +use opentelemetry::AttributeSet; use serde::{Serialize, Serializer}; use std::{borrow::Cow, collections::HashMap, time::SystemTime}; @@ -20,7 +20,7 @@ impl From> for SpanData { let resource = sdk_span.resource.as_ref().into(); let rs = resource_spans - .entry(sdk_span.resource.as_ref().into()) + .entry(sdk_span.resource.as_ref().to_attribute_set()) .or_insert_with(move || ResourceSpans { resource, scope_spans: Vec::with_capacity(1), diff --git a/opentelemetry/CHANGELOG.md b/opentelemetry/CHANGELOG.md index b57647e5ab1..db83c515ae4 100644 --- a/opentelemetry/CHANGELOG.md +++ b/opentelemetry/CHANGELOG.md @@ -17,6 +17,16 @@ gains, and avoids `IndexMap` dependency. This affects `body` and `attributes` of `LogRecord`. [#1353](https://github.com/open-telemetry/opentelemetry-rust/pull/1353) - Add `TextMapCompositePropagator` [#1373](https://github.com/open-telemetry/opentelemetry-rust/pull/1373) +- `Counters`, `UpDownCounters`, `Gauges`, and `Histograms` now take an + `Into` as the parameter type for recording metric values. This + allows passing in a precreated `AttributeSet` for better performance when the + same set of attributes are used across instruments. This is backward + compatible with previous calls passing in `&[KeyValue]`. + [#1421](https://github.com/open-telemetry/opentelemetry-rust/pull/1421) +- Observable instruments no longer accept `&[KeyValue]` parameters for + `observe()` calls, and only accept a precreated `AttributeSet` + value. + [#1421](https://github.com/open-telemetry/opentelemetry-rust/pull/1421) ### Removed diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index 388cc24cce0..8a4ad4b6c01 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -24,6 +24,7 @@ rustdoc-args = ["--cfg", "docsrs"] futures-core = { workspace = true } futures-sink = "0.3" once_cell = "1.13.0" +ordered-float = "4.0" pin-project-lite = { workspace = true, optional = true } thiserror = { workspace = true } urlencoding = "2.1.2" diff --git a/opentelemetry/src/attributes/mod.rs b/opentelemetry/src/attributes/mod.rs new file mode 100644 index 00000000000..ca3029549bb --- /dev/null +++ b/opentelemetry/src/attributes/mod.rs @@ -0,0 +1,5 @@ +//! Utilities for managing attributes for metrics + +mod set; + +pub use set::{AttributeSet, ToKeyValue}; diff --git a/opentelemetry/src/attributes/set.rs b/opentelemetry/src/attributes/set.rs new file mode 100644 index 00000000000..4f20c8fdcbf --- /dev/null +++ b/opentelemetry/src/attributes/set.rs @@ -0,0 +1,406 @@ +use once_cell::sync::Lazy; +use std::collections::hash_map::DefaultHasher; +use std::collections::HashSet; +use std::sync::Arc; +use std::{ + cmp::Ordering, + hash::{Hash, Hasher}, +}; + +use crate::{Array, Key, KeyValue, Value}; +use ordered_float::OrderedFloat; + +#[derive(Clone, Debug)] +struct HashKeyValue(KeyValue); + +impl Hash for HashKeyValue { + fn hash(&self, state: &mut H) { + self.0.key.hash(state); + match &self.0.value { + Value::F64(f) => OrderedFloat(*f).hash(state), + Value::Array(a) => match a { + Array::Bool(b) => b.hash(state), + Array::I64(i) => i.hash(state), + Array::F64(f) => f.iter().for_each(|f| OrderedFloat(*f).hash(state)), + Array::String(s) => s.hash(state), + }, + Value::Bool(b) => b.hash(state), + Value::I64(i) => i.hash(state), + Value::String(s) => s.hash(state), + }; + } +} + +impl PartialOrd for HashKeyValue { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for HashKeyValue { + fn cmp(&self, other: &Self) -> Ordering { + match self.0.key.cmp(&other.0.key) { + Ordering::Equal => match type_order(&self.0.value).cmp(&type_order(&other.0.value)) { + Ordering::Equal => match (&self.0.value, &other.0.value) { + (Value::F64(f), Value::F64(of)) => OrderedFloat(*f).cmp(&OrderedFloat(*of)), + (Value::Array(Array::Bool(b)), Value::Array(Array::Bool(ob))) => b.cmp(ob), + (Value::Array(Array::I64(i)), Value::Array(Array::I64(oi))) => i.cmp(oi), + (Value::Array(Array::String(s)), Value::Array(Array::String(os))) => s.cmp(os), + (Value::Array(Array::F64(f)), Value::Array(Array::F64(of))) => { + match f.len().cmp(&of.len()) { + Ordering::Equal => f + .iter() + .map(|x| OrderedFloat(*x)) + .collect::>() + .cmp(&of.iter().map(|x| OrderedFloat(*x)).collect()), + other => other, + } + } + (Value::Bool(b), Value::Bool(ob)) => b.cmp(ob), + (Value::I64(i), Value::I64(oi)) => i.cmp(oi), + (Value::String(s), Value::String(os)) => s.cmp(os), + _ => Ordering::Equal, + }, + other => other, // 2nd order by value types + }, + other => other, // 1st order by key + } + } +} + +fn type_order(v: &Value) -> u8 { + match v { + Value::Bool(_) => 1, + Value::I64(_) => 2, + Value::F64(_) => 3, + Value::String(_) => 4, + Value::Array(a) => match a { + Array::Bool(_) => 5, + Array::I64(_) => 6, + Array::F64(_) => 7, + Array::String(_) => 8, + }, + } +} + +impl PartialEq for HashKeyValue { + fn eq(&self, other: &Self) -> bool { + self.0.key == other.0.key + && match (&self.0.value, &other.0.value) { + (Value::F64(f), Value::F64(of)) => OrderedFloat(*f).eq(&OrderedFloat(*of)), + (Value::Array(Array::F64(f)), Value::Array(Array::F64(of))) => { + f.len() == of.len() + && f.iter() + .zip(of.iter()) + .all(|(f, of)| OrderedFloat(*f).eq(&OrderedFloat(*of))) + } + (non_float, other_non_float) => non_float.eq(other_non_float), + } + } +} + +impl Eq for HashKeyValue {} + +static EMPTY_SET: Lazy> = + Lazy::new(|| Arc::new(InternalAttributeSet::new(Vec::with_capacity(0)))); + +#[derive(Eq, PartialEq, Debug)] +struct InternalAttributeSet { + key_values: Vec, + hash: u64, +} + +impl InternalAttributeSet { + fn new(mut values: Vec) -> Self { + values.sort_unstable(); + let mut hasher = DefaultHasher::new(); + values.iter().fold(&mut hasher, |mut hasher, item| { + item.hash(&mut hasher); + hasher + }); + + InternalAttributeSet { + key_values: values, + hash: hasher.finish(), + } + } +} + +impl> From for InternalAttributeSet +where + ::IntoIter: DoubleEndedIterator + ExactSizeIterator, +{ + fn from(value: I) -> Self { + let iter = value.into_iter(); + let mut seen_keys = HashSet::with_capacity(iter.len()); + let vec = iter + .into_iter() + .rev() + .filter_map(|kv| { + if seen_keys.contains(&kv.key) { + None + } else { + seen_keys.insert(kv.key.clone()); + Some(HashKeyValue(kv)) + } + }) + .collect::>(); + + InternalAttributeSet::new(vec) + } +} + +impl Hash for InternalAttributeSet { + fn hash(&self, state: &mut H) { + state.write_u64(self.hash) + } +} + +/// Trait declaring that a type can be converted into a `KeyValue` +pub trait ToKeyValue { + /// Create a `KeyValue` from the current instance. + fn to_key_value(self) -> KeyValue; +} + +impl ToKeyValue for KeyValue { + fn to_key_value(self) -> KeyValue { + self + } +} + +impl ToKeyValue for &KeyValue { + fn to_key_value(self) -> KeyValue { + self.clone() + } +} + +/// A unique set of attributes that can be used as instrument identifiers. +/// +/// Cloning of an attribute set is cheap, as all clones share a reference to the underlying +/// attribute data. +/// +/// This must implement [Hash], [PartialEq], and [Eq] so it may be used as +/// HashMap keys and other de-duplication methods. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct AttributeSet(Arc); + +impl From for AttributeSet +where + KV: ToKeyValue, + I: IntoIterator, + ::IntoIter: DoubleEndedIterator + ExactSizeIterator, +{ + fn from(values: I) -> Self { + AttributeSet(Arc::new(InternalAttributeSet::from( + values.into_iter().map(ToKeyValue::to_key_value), + ))) + } +} + +impl AttributeSet { + /// Returns the number of elements in the set. + pub fn len(&self) -> usize { + self.0.key_values.len() + } + + /// Returns `true` if the set contains no elements. + pub fn is_empty(&self) -> bool { + self.0.key_values.is_empty() + } + + /// Creates a new attribute set that retains only the attributes specified by the predicate. + pub fn clone_with(&self, f: F) -> AttributeSet + where + F: Fn(&KeyValue) -> bool, + { + let key_values = self + .0 + .key_values + .iter() + .filter(|kv| f(&kv.0)) + .cloned() + .collect::>(); + + AttributeSet(Arc::new(InternalAttributeSet::new(key_values))) + } + + /// Iterate over key value pairs in the set + pub fn iter(&self) -> impl Iterator { + self.0.key_values.iter().map(|kv| (&kv.0.key, &kv.0.value)) + } +} + +impl Default for AttributeSet { + fn default() -> Self { + AttributeSet(EMPTY_SET.clone()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::StringValue; + + #[test] + fn can_create_attribute_set_from_array() { + let array = [KeyValue::new("key1", "value1"), KeyValue::new("key2", 3)]; + + let set = AttributeSet::from(&array); + let mut kvs = set.iter().collect::>(); + + assert_eq!(kvs.len(), 2, "Incorrect number of attributes"); + + kvs.sort_by(|kv1, kv2| kv1.0.cmp(kv2.0)); + assert_eq!(kvs[0].0, &Key::from("key1"), "Unexpected first key"); + assert_eq!( + kvs[0].1, + &Value::String("value1".into()), + "Unexpected first value" + ); + assert_eq!(kvs[1].0, &Key::from("key2"), "Unexpected second key"); + assert_eq!(kvs[1].1, &Value::I64(3), "Unexpected second value"); + } + + #[test] + fn can_create_attribute_set_from_owned_vec() { + let vec = vec![KeyValue::new("key1", "value1"), KeyValue::new("key2", 3)]; + + let set = AttributeSet::from(vec); + let mut kvs = set.iter().collect::>(); + + assert_eq!(kvs.len(), 2, "Incorrect number of attributes"); + + kvs.sort_by(|kv1, kv2| kv1.0.cmp(kv2.0)); + assert_eq!(kvs[0].0, &Key::from("key1"), "Unexpected first key"); + assert_eq!( + kvs[0].1, + &Value::String("value1".into()), + "Unexpected first value" + ); + assert_eq!(kvs[1].0, &Key::from("key2"), "Unexpected second key"); + assert_eq!(kvs[1].1, &Value::I64(3), "Unexpected second value"); + } + + #[test] + fn two_sets_with_same_key_values_in_different_orders_are_equal() { + let array1 = [ + KeyValue::new("key1", "value1"), + KeyValue::new("key2", 3), + KeyValue::new("key3", Value::Array(Array::Bool(vec![true]))), + KeyValue::new("key4", Value::Array(Array::F64(vec![1.5]))), + KeyValue::new("key5", Value::Array(Array::I64(vec![15]))), + KeyValue::new( + "key6", + Value::Array(Array::String(vec![StringValue::from("test")])), + ), + ]; + + let array2 = [ + KeyValue::new( + "key6", + Value::Array(Array::String(vec![StringValue::from("test")])), + ), + KeyValue::new("key1", "value1"), + KeyValue::new("key3", Value::Array(Array::Bool(vec![true]))), + KeyValue::new("key4", Value::Array(Array::F64(vec![1.5]))), + KeyValue::new("key5", Value::Array(Array::I64(vec![15]))), + KeyValue::new("key2", 3), + ]; + + let set1 = AttributeSet::from(&array1); + let set2 = AttributeSet::from(&array2); + + assert_eq!(set1, set2); + } + + #[test] + fn two_sets_with_same_key_values_in_different_orders_have_same_hash() { + let array1 = [ + KeyValue::new("key1", "value1"), + KeyValue::new("key2", 3), + KeyValue::new("key3", Value::Array(Array::Bool(vec![true]))), + KeyValue::new("key4", Value::Array(Array::F64(vec![1.5]))), + KeyValue::new("key5", Value::Array(Array::I64(vec![15]))), + KeyValue::new( + "key6", + Value::Array(Array::String(vec![StringValue::from("test")])), + ), + ]; + + let array2 = [ + KeyValue::new( + "key6", + Value::Array(Array::String(vec![StringValue::from("test")])), + ), + KeyValue::new("key1", "value1"), + KeyValue::new("key3", Value::Array(Array::Bool(vec![true]))), + KeyValue::new("key4", Value::Array(Array::F64(vec![1.5]))), + KeyValue::new("key5", Value::Array(Array::I64(vec![15]))), + KeyValue::new("key2", 3), + ]; + + let set1 = AttributeSet::from(&array1); + let set2 = AttributeSet::from(&array2); + + let mut hasher1 = DefaultHasher::new(); + let mut hasher2 = DefaultHasher::new(); + set1.hash(&mut hasher1); + set2.hash(&mut hasher2); + + assert_eq!(hasher1.finish(), hasher2.finish()); + } + + #[test] + fn clone_with_removes_unspecified_key_values() { + let array = [ + KeyValue::new("key1", "value1"), + KeyValue::new("key2", 3), + KeyValue::new("key3", 4), + ]; + + let set = AttributeSet::from(&array); + let set2 = set.clone_with(|kv| kv.key == Key::new("key2")); + + assert_ne!(set, set2, "Both sets were unexpectedly equal"); + assert_eq!(set2.len(), 1, "Expected only one attribute in new set"); + + let kvs = set2.iter().collect::>(); + assert_eq!(kvs[0].0, &Key::from("key2"), "Unexpected key"); + assert_eq!(kvs[0].1, &Value::I64(3), "Unexpected value"); + } + + #[test] + fn len_returns_accurate_value() { + let array = [KeyValue::new("key1", "value1"), KeyValue::new("key2", 3)]; + + let set = AttributeSet::from(&array); + let kvs = set.iter().collect::>(); + + assert_eq!(set.len(), kvs.len()); + } + + #[test] + fn empty_when_no_attributes_provided() { + let set = AttributeSet::from(&[]); + assert!(set.is_empty()); + } + + #[test] + fn default_set_has_no_attributes() { + let set = AttributeSet::default(); + assert!(set.is_empty()); + assert_eq!(set.len(), 0); + } + + #[test] + fn last_key_wins_for_deduplication() { + let array = [KeyValue::new("key1", "value1"), KeyValue::new("key1", 3)]; + + let set = AttributeSet::from(&array); + let kvs = set.iter().collect::>(); + + assert_eq!(set.len(), 1, "Expected only a single key value pair"); + assert_eq!(kvs[0].0, &Key::new("key1"), "Unexpected key"); + assert_eq!(kvs[0].1, &Value::I64(3), "Unexpected value"); + } +} diff --git a/opentelemetry/src/global/mod.rs b/opentelemetry/src/global/mod.rs index 790343968c3..3af9145dc54 100644 --- a/opentelemetry/src/global/mod.rs +++ b/opentelemetry/src/global/mod.rs @@ -92,7 +92,7 @@ //! # #[cfg(feature="metrics")] //! # { //! use opentelemetry::metrics::{Meter, noop::NoopMeterProvider}; -//! use opentelemetry::{global, KeyValue}; +//! use opentelemetry::{AttributeSet, global, KeyValue}; //! //! fn init_meter() { //! let provider = NoopMeterProvider::new(); @@ -108,7 +108,8 @@ //! let counter = meter.u64_counter("my_counter").init(); //! //! // record metrics -//! counter.add(1, &[KeyValue::new("mykey", "myvalue")]); +//! let attributes = AttributeSet::from(&[KeyValue::new("mykey", "myvalue")]); +//! counter.add(1, attributes); //! } //! //! // in main or other app start @@ -122,7 +123,7 @@ //! ``` //! # #[cfg(feature="metrics")] //! # { -//! use opentelemetry::{global, KeyValue}; +//! use opentelemetry::{AttributeSet, global, KeyValue}; //! //! pub fn my_traced_library_function() { //! // End users of your library will configure their global meter provider @@ -131,7 +132,8 @@ //! let counter = tracer.u64_counter("my_counter").init(); //! //! // record metrics -//! counter.add(1, &[KeyValue::new("mykey", "myvalue")]); +//! let attributes = AttributeSet::from(&[KeyValue::new("mykey", "myvalue")]); +//! counter.add(1, attributes); //! } //! # } //! ``` diff --git a/opentelemetry/src/lib.rs b/opentelemetry/src/lib.rs index 9d2088f2ace..c6753174fe6 100644 --- a/opentelemetry/src/lib.rs +++ b/opentelemetry/src/lib.rs @@ -72,7 +72,7 @@ //! ``` //! # #[cfg(feature = "metrics")] //! # { -//! use opentelemetry::{global, KeyValue}; +//! use opentelemetry::{AttributeSet, global, KeyValue}; //! //! // get a meter from a provider //! let meter = global::meter("my_service"); @@ -80,8 +80,11 @@ //! // create an instrument //! let counter = meter.u64_counter("my_counter").init(); //! +//! // Form the attributes +//! let attributes = AttributeSet::from(&[KeyValue::new("http.client_ip", "83.164.160.102")]); +//! //! // record a measurement -//! counter.add(1, &[KeyValue::new("http.client_ip", "83.164.160.102")]); +//! counter.add(1, attributes); //! # } //! ``` //! @@ -206,6 +209,9 @@ pub mod global; pub mod baggage; +mod attributes; +pub use attributes::{AttributeSet, ToKeyValue}; + mod context; pub use context::{Context, ContextGuard}; diff --git a/opentelemetry/src/metrics/instruments/counter.rs b/opentelemetry/src/metrics/instruments/counter.rs index 171b6a49eab..f437e080994 100644 --- a/opentelemetry/src/metrics/instruments/counter.rs +++ b/opentelemetry/src/metrics/instruments/counter.rs @@ -1,7 +1,5 @@ -use crate::{ - metrics::{AsyncInstrument, AsyncInstrumentBuilder, InstrumentBuilder, MetricsError}, - KeyValue, -}; +use crate::attributes::AttributeSet; +use crate::metrics::{AsyncInstrument, AsyncInstrumentBuilder, InstrumentBuilder, MetricsError}; use core::fmt; use std::sync::Arc; use std::{any::Any, convert::TryFrom}; @@ -9,7 +7,7 @@ use std::{any::Any, convert::TryFrom}; /// An SDK implemented instrument that records increasing values. pub trait SyncCounter { /// Records an increment to the counter. - fn add(&self, value: T, attributes: &[KeyValue]); + fn add(&self, value: T, attributes: AttributeSet); } /// An instrument that records increasing values. @@ -32,8 +30,8 @@ impl Counter { } /// Records an increment to the counter. - pub fn add(&self, value: T, attributes: &[KeyValue]) { - self.0.add(value, attributes) + pub fn add(&self, value: T, attributes: impl Into) { + self.0.add(value, attributes.into()) } } @@ -87,7 +85,7 @@ impl ObservableCounter { /// It is only valid to call this within a callback. If called outside of the /// registered callback it should have no effect on the instrument, and an /// error will be reported via the error handler. - pub fn observe(&self, value: T, attributes: &[KeyValue]) { + pub fn observe(&self, value: T, attributes: AttributeSet) { self.0.observe(value, attributes) } @@ -98,7 +96,7 @@ impl ObservableCounter { } impl AsyncInstrument for ObservableCounter { - fn observe(&self, measurement: T, attributes: &[KeyValue]) { + fn observe(&self, measurement: T, attributes: AttributeSet) { self.0.observe(measurement, attributes) } diff --git a/opentelemetry/src/metrics/instruments/gauge.rs b/opentelemetry/src/metrics/instruments/gauge.rs index ab9fb2e05da..0f64c01018f 100644 --- a/opentelemetry/src/metrics/instruments/gauge.rs +++ b/opentelemetry/src/metrics/instruments/gauge.rs @@ -1,6 +1,6 @@ use crate::{ + attributes::AttributeSet, metrics::{AsyncInstrument, AsyncInstrumentBuilder, InstrumentBuilder, MetricsError}, - KeyValue, }; use core::fmt; use std::sync::Arc; @@ -9,7 +9,7 @@ use std::{any::Any, convert::TryFrom}; /// An SDK implemented instrument that records independent values pub trait SyncGauge { /// Records an independent value. - fn record(&self, value: T, attributes: &[KeyValue]); + fn record(&self, value: T, attributes: AttributeSet); } /// An instrument that records independent values @@ -32,8 +32,8 @@ impl Gauge { } /// Records an independent value. - pub fn record(&self, value: T, attributes: &[KeyValue]) { - self.0.record(value, attributes) + pub fn record(&self, value: T, attributes: impl Into) { + self.0.record(value, attributes.into()) } } @@ -92,7 +92,7 @@ impl ObservableGauge { /// It is only valid to call this within a callback. If called outside of the /// registered callback it should have no effect on the instrument, and an /// error will be reported via the error handler. - pub fn observe(&self, measurement: T, attributes: &[KeyValue]) { + pub fn observe(&self, measurement: T, attributes: AttributeSet) { self.0.observe(measurement, attributes) } @@ -103,7 +103,7 @@ impl ObservableGauge { } impl AsyncInstrument for ObservableGauge { - fn observe(&self, measurement: M, attributes: &[KeyValue]) { + fn observe(&self, measurement: M, attributes: AttributeSet) { self.observe(measurement, attributes) } diff --git a/opentelemetry/src/metrics/instruments/histogram.rs b/opentelemetry/src/metrics/instruments/histogram.rs index c6246ebee29..d4ec5cad619 100644 --- a/opentelemetry/src/metrics/instruments/histogram.rs +++ b/opentelemetry/src/metrics/instruments/histogram.rs @@ -1,7 +1,5 @@ -use crate::{ - metrics::{InstrumentBuilder, MetricsError}, - KeyValue, -}; +use crate::attributes::AttributeSet; +use crate::metrics::{InstrumentBuilder, MetricsError}; use core::fmt; use std::convert::TryFrom; use std::sync::Arc; @@ -9,7 +7,7 @@ use std::sync::Arc; /// An SDK implemented instrument that records a distribution of values. pub trait SyncHistogram { /// Adds an additional value to the distribution. - fn record(&self, value: T, attributes: &[KeyValue]); + fn record(&self, value: T, attributes: AttributeSet); } /// An instrument that records a distribution of values. @@ -32,8 +30,8 @@ impl Histogram { } /// Adds an additional value to the distribution. - pub fn record(&self, value: T, attributes: &[KeyValue]) { - self.0.record(value, attributes) + pub fn record(&self, value: T, attributes: impl Into) { + self.0.record(value, attributes.into()) } } diff --git a/opentelemetry/src/metrics/instruments/mod.rs b/opentelemetry/src/metrics/instruments/mod.rs index 137712d7358..446e60c7056 100644 --- a/opentelemetry/src/metrics/instruments/mod.rs +++ b/opentelemetry/src/metrics/instruments/mod.rs @@ -1,5 +1,5 @@ +use crate::attributes::AttributeSet; use crate::metrics::{Meter, MetricsError, Result, Unit}; -use crate::KeyValue; use core::fmt; use std::any::Any; use std::borrow::Cow; @@ -17,7 +17,7 @@ pub trait AsyncInstrument: Send + Sync { /// Observes the state of the instrument. /// /// It is only valid to call this within a callback. - fn observe(&self, measurement: T, attributes: &[KeyValue]); + fn observe(&self, measurement: T, attributes: AttributeSet); /// Used for SDKs to downcast instruments in callbacks. fn as_any(&self) -> Arc; diff --git a/opentelemetry/src/metrics/instruments/up_down_counter.rs b/opentelemetry/src/metrics/instruments/up_down_counter.rs index 1134ecedadf..0ac1833d555 100644 --- a/opentelemetry/src/metrics/instruments/up_down_counter.rs +++ b/opentelemetry/src/metrics/instruments/up_down_counter.rs @@ -1,7 +1,5 @@ -use crate::{ - metrics::{InstrumentBuilder, MetricsError}, - KeyValue, -}; +use crate::attributes::AttributeSet; +use crate::metrics::{InstrumentBuilder, MetricsError}; use core::fmt; use std::sync::Arc; use std::{any::Any, convert::TryFrom}; @@ -11,7 +9,7 @@ use super::{AsyncInstrument, AsyncInstrumentBuilder}; /// An SDK implemented instrument that records increasing or decreasing values. pub trait SyncUpDownCounter { /// Records an increment or decrement to the counter. - fn add(&self, value: T, attributes: &[KeyValue]); + fn add(&self, value: T, attributes: AttributeSet); } /// An instrument that records increasing or decreasing values. @@ -37,8 +35,8 @@ impl UpDownCounter { } /// Records an increment or decrement to the counter. - pub fn add(&self, value: T, attributes: &[KeyValue]) { - self.0.add(value, attributes) + pub fn add(&self, value: T, attributes: impl Into) { + self.0.add(value, attributes.into()) } } @@ -93,7 +91,7 @@ impl ObservableUpDownCounter { /// It is only valid to call this within a callback. If called outside of the /// registered callback it should have no effect on the instrument, and an /// error will be reported via the error handler. - pub fn observe(&self, value: T, attributes: &[KeyValue]) { + pub fn observe(&self, value: T, attributes: AttributeSet) { self.0.observe(value, attributes) } @@ -104,7 +102,7 @@ impl ObservableUpDownCounter { } impl AsyncInstrument for ObservableUpDownCounter { - fn observe(&self, measurement: T, attributes: &[KeyValue]) { + fn observe(&self, measurement: T, attributes: AttributeSet) { self.0.observe(measurement, attributes) } diff --git a/opentelemetry/src/metrics/meter.rs b/opentelemetry/src/metrics/meter.rs index f64fae69762..999c0e9b1f0 100644 --- a/opentelemetry/src/metrics/meter.rs +++ b/opentelemetry/src/metrics/meter.rs @@ -1,3 +1,4 @@ +use crate::attributes::AttributeSet; use core::fmt; use std::any::Any; use std::borrow::Cow; @@ -71,7 +72,7 @@ pub trait MeterProvider { /// Provides access to instrument instances for recording measurements. /// /// ``` -/// use opentelemetry::{global, KeyValue}; +/// use opentelemetry::{AttributeSet, global, KeyValue}; /// /// let meter = global::meter("my-meter"); /// @@ -80,181 +81,107 @@ pub trait MeterProvider { /// // u64 Counter /// let u64_counter = meter.u64_counter("my_u64_counter").init(); /// -/// // Record measurements using the counter instrument add() -/// u64_counter.add( -/// 10, -/// [ +/// // Define the attributes the counters will use +/// let attributes = AttributeSet::from(&[ /// KeyValue::new("mykey1", "myvalue1"), /// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref() -/// ); +/// ]); +/// +/// // Record measurements using the counter instrument add() +/// u64_counter.add(10, attributes.clone()); /// /// // f64 Counter /// let f64_counter = meter.f64_counter("my_f64_counter").init(); /// /// // Record measurements using the counter instrument add() -/// f64_counter.add( -/// 3.15, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref() -/// ); +/// f64_counter.add(3.15, attributes.clone()); /// /// // u6 observable counter /// let observable_u4_counter = meter.u64_observable_counter("my_observable_u64_counter").init(); /// /// // Register a callback to this meter for an asynchronous instrument to record measurements +/// let observer_attributes = attributes.clone(); /// meter.register_callback(&[observable_u4_counter.as_any()], move |observer| { -/// observer.observe_u64( -/// &observable_u4_counter, -/// 1, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ) +/// observer.observe_u64(&observable_u4_counter, 1, observer_attributes.clone()) /// }); /// /// // f64 observable counter /// let observable_f64_counter = meter.f64_observable_counter("my_observable_f64_counter").init(); /// /// // Register a callback to this meter for an asynchronous instrument to record measurements +/// let observer_attributes = attributes.clone(); /// meter.register_callback(&[observable_f64_counter.as_any()], move |observer| { -/// observer.observe_f64( -/// &observable_f64_counter, -/// 1.55, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ) +/// observer.observe_f64(&observable_f64_counter, 1.55, observer_attributes.clone()) /// }); /// /// // i64 updown counter /// let updown_i64_counter = meter.i64_up_down_counter("my_updown_i64_counter").init(); /// /// // Record measurements using the updown counter instrument add() -/// updown_i64_counter.add( -/// -10, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ); +/// updown_i64_counter.add(-10, attributes.clone()); /// /// // f64 updown counter /// let updown_f64_counter = meter.f64_up_down_counter("my_updown_f64_counter").init(); /// /// // Record measurements using the updown counter instrument add() -/// updown_f64_counter.add( -/// -10.67, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ); +/// updown_f64_counter.add(-10.67, attributes.clone()); /// /// // i64 observable updown counter /// let observable_i64_up_down_counter = meter.i64_observable_up_down_counter("my_observable_i64_updown_counter").init(); /// /// // Register a callback to this meter for an asynchronous instrument to record measurements +/// let observer_attributes = attributes.clone(); /// meter.register_callback(&[observable_i64_up_down_counter.as_any()], move |observer| { -/// observer.observe_i64( -/// &observable_i64_up_down_counter, -/// 1, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ) +/// observer.observe_i64(&observable_i64_up_down_counter, 1, observer_attributes.clone()) /// }); /// /// // f64 observable updown counter /// let observable_f64_up_down_counter = meter.f64_observable_up_down_counter("my_observable_f64_updown_counter").init(); /// /// // Register a callback to this meter for an asynchronous instrument to record measurements +/// let observer_attributes = attributes.clone(); /// meter.register_callback(&[observable_f64_up_down_counter.as_any()], move |observer| { -/// observer.observe_f64( -/// &observable_f64_up_down_counter, -/// 1.16, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ) +/// observer.observe_f64(&observable_f64_up_down_counter, 1.16, observer_attributes.clone()) /// }); /// /// // Observable f64 gauge /// let f64_gauge = meter.f64_observable_gauge("my_f64_gauge").init(); /// /// // Register a callback to this meter for an asynchronous instrument to record measurements +/// let observer_attributes = attributes.clone(); /// meter.register_callback(&[f64_gauge.as_any()], move |observer| { -/// observer.observe_f64( -/// &f64_gauge, -/// 2.32, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ) +/// observer.observe_f64(&f64_gauge, 2.32, observer_attributes.clone()) /// }); /// /// // Observable i64 gauge /// let i64_gauge = meter.i64_observable_gauge("my_i64_gauge").init(); /// /// // Register a callback to this meter for an asynchronous instrument to record measurements +/// let observer_attributes = attributes.clone(); /// meter.register_callback(&[i64_gauge.as_any()], move |observer| { -/// observer.observe_i64( -/// &i64_gauge, -/// 12, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ) +/// observer.observe_i64(&i64_gauge, 12, observer_attributes.clone()) /// }); /// /// // Observable u64 gauge /// let u64_gauge = meter.u64_observable_gauge("my_u64_gauge").init(); /// /// // Register a callback to this meter for an asynchronous instrument to record measurements +/// let observer_attributes = attributes.clone(); /// meter.register_callback(&[u64_gauge.as_any()], move |observer| { -/// observer.observe_u64( -/// &u64_gauge, -/// 1, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ].as_ref(), -/// ) +/// observer.observe_u64(&u64_gauge, 1, observer_attributes.clone()) /// }); /// /// // f64 histogram /// let f64_histogram = meter.f64_histogram("my_f64_histogram").init(); /// /// // Record measurements using the histogram instrument record() -/// f64_histogram.record( -/// 10.5, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ] -/// .as_ref(), -/// ); +/// f64_histogram.record(10.5, attributes.clone()); /// /// // u64 histogram /// let u64_histogram = meter.u64_histogram("my_u64_histogram").init(); /// /// // Record measurements using the histogram instrument record() -/// u64_histogram.record( -/// 12, -/// [ -/// KeyValue::new("mykey1", "myvalue1"), -/// KeyValue::new("mykey2", "myvalue2"), -/// ] -/// .as_ref(), -/// ); +/// u64_histogram.record(12, attributes); /// /// ``` #[derive(Clone)] @@ -438,13 +365,13 @@ pub trait CallbackRegistration: Send + Sync { /// Records measurements for multiple instruments in a callback. pub trait Observer { /// Records the f64 value with attributes for the observable. - fn observe_f64(&self, inst: &dyn AsyncInstrument, measurement: f64, attrs: &[KeyValue]); + fn observe_f64(&self, inst: &dyn AsyncInstrument, measurement: f64, attrs: AttributeSet); /// Records the u64 value with attributes for the observable. - fn observe_u64(&self, inst: &dyn AsyncInstrument, measurement: u64, attrs: &[KeyValue]); + fn observe_u64(&self, inst: &dyn AsyncInstrument, measurement: u64, attrs: AttributeSet); /// Records the i64 value with attributes for the observable. - fn observe_i64(&self, inst: &dyn AsyncInstrument, measurement: i64, attrs: &[KeyValue]); + fn observe_i64(&self, inst: &dyn AsyncInstrument, measurement: i64, attrs: AttributeSet); } impl fmt::Debug for Meter { diff --git a/opentelemetry/src/metrics/noop.rs b/opentelemetry/src/metrics/noop.rs index adf4b03da3b..3f675ed4623 100644 --- a/opentelemetry/src/metrics/noop.rs +++ b/opentelemetry/src/metrics/noop.rs @@ -3,6 +3,7 @@ //! This implementation is returned as the global Meter if no `Meter` //! has been set. It is also useful for testing purposes as it is intended //! to have minimal resource utilization and runtime impact. +use crate::attributes::AttributeSet; use crate::{ metrics::{ AsyncInstrument, CallbackRegistration, InstrumentProvider, Meter, MeterProvider, Observer, @@ -93,25 +94,25 @@ impl NoopSyncInstrument { } impl SyncCounter for NoopSyncInstrument { - fn add(&self, _value: T, _attributes: &[KeyValue]) { + fn add(&self, _value: T, _attributes: AttributeSet) { // Ignored } } impl SyncUpDownCounter for NoopSyncInstrument { - fn add(&self, _value: T, _attributes: &[KeyValue]) { + fn add(&self, _value: T, _attributes: AttributeSet) { // Ignored } } impl SyncHistogram for NoopSyncInstrument { - fn record(&self, _value: T, _attributes: &[KeyValue]) { + fn record(&self, _value: T, _attributes: AttributeSet) { // Ignored } } impl SyncGauge for NoopSyncInstrument { - fn record(&self, _value: T, _attributes: &[KeyValue]) { + fn record(&self, _value: T, _attributes: AttributeSet) { // Ignored } } @@ -130,7 +131,7 @@ impl NoopAsyncInstrument { } impl AsyncInstrument for NoopAsyncInstrument { - fn observe(&self, _value: T, _attributes: &[KeyValue]) { + fn observe(&self, _value: T, _attributes: AttributeSet) { // Ignored } diff --git a/stress/Cargo.toml b/stress/Cargo.toml index bd9c2911cd8..a6383a7f6ac 100644 --- a/stress/Cargo.toml +++ b/stress/Cargo.toml @@ -9,6 +9,11 @@ name = "metrics" path = "src/metrics.rs" doc = false +[[bin]] # Bin to run the metrics cached attributes stress tests +name = "metrics_cached_attrs" +path = "src/metrics_cached_attrs.rs" +doc = false + [[bin]] # Bin to run the logs stress tests name = "logs" path = "src/logs.rs" diff --git a/stress/src/metrics_cached_attrs.rs b/stress/src/metrics_cached_attrs.rs new file mode 100644 index 00000000000..867ffa2e3ec --- /dev/null +++ b/stress/src/metrics_cached_attrs.rs @@ -0,0 +1,53 @@ +use lazy_static::lazy_static; +use opentelemetry::AttributeSet; +use opentelemetry::{ + metrics::{Counter, MeterProvider as _}, + KeyValue, +}; +use opentelemetry_sdk::metrics::{ManualReader, SdkMeterProvider}; +use rand::{rngs::SmallRng, Rng, SeedableRng}; +use std::borrow::Cow; + +mod throughput; + +lazy_static! { + static ref PROVIDER: SdkMeterProvider = SdkMeterProvider::builder() + .with_reader(ManualReader::builder().build()) + .build(); + static ref ATTRIBUTE_VALUES: [&'static str; 10] = [ + "value1", "value2", "value3", "value4", "value5", "value6", "value7", "value8", "value9", + "value10" + ]; + static ref COUNTER: Counter = PROVIDER + .meter(<&str as Into>>::into("test")) + .u64_counter("hello") + .init(); + static ref ATTRIBUTE_SETS: Vec = { + let mut vec = Vec::new(); + for i0 in 0..ATTRIBUTE_VALUES.len() { + for i1 in 0..ATTRIBUTE_VALUES.len() { + for i2 in 0..ATTRIBUTE_VALUES.len() { + vec.push(AttributeSet::from(&[ + KeyValue::new("attribute1", ATTRIBUTE_VALUES[i0]), + KeyValue::new("attribute2", ATTRIBUTE_VALUES[i1]), + KeyValue::new("attribute3", ATTRIBUTE_VALUES[i2]), + ])) + } + } + } + + vec + }; +} + +fn main() { + throughput::test_throughput(test_counter); +} + +fn test_counter() { + let mut rng = SmallRng::from_entropy(); + let len = ATTRIBUTE_SETS.len(); + let index = rng.gen_range(0..len); + + COUNTER.add(1, ATTRIBUTE_SETS[index].clone()); +} From de8e7506c594c27199794ea9496a46133d23cbb4 Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Wed, 24 Jan 2024 17:55:52 -0800 Subject: [PATCH 06/13] Add note about stdout exporter (#1491) --- .cspell.json | 1 - CONTRIBUTING.md | 2 +- opentelemetry-stdout/src/lib.rs | 18 ++++++++++++++++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.cspell.json b/.cspell.json index da552a52652..613c082c229 100644 --- a/.cspell.json +++ b/.cspell.json @@ -42,7 +42,6 @@ "Lalit", "msrv", "Ochtman", - "openetelemetry", "opentelemetry", "OTLP", "protoc", diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ec88c2f9982..2d2aa4a5241 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -126,7 +126,7 @@ For a deeper discussion, see: Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. -The Opentelemetry Rust SDK comes with an error type `openetelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricsError`. +The Opentelemetry Rust SDK comes with an error type `opentelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricsError`. All errors returned by logs module MUST be wrapped in `opentelemetry::logs::LogsError`. For users that want to implement their own exporters. It's RECOMMENDED to wrap all errors from the exporter into a crate-level error type, and implement `ExporterError` trait. diff --git a/opentelemetry-stdout/src/lib.rs b/opentelemetry-stdout/src/lib.rs index 22690da2c38..31fc2e2d99d 100644 --- a/opentelemetry-stdout/src/lib.rs +++ b/opentelemetry-stdout/src/lib.rs @@ -1,9 +1,12 @@ //! Export telemetry signals to stdout. +//! This exporter is designed for debugging and learning purposes. It is not +//! recommended for use in production environments. The output format might not be +//! exhaustive and is subject to change at any time. //! //! # Examples //! //! ```no_run -//! # #[cfg(all(feature = "metrics", feature = "trace"))] +//! # #[cfg(all(feature = "metrics", feature = "trace", feature = "logs"))] //! { //! use opentelemetry::metrics::MeterProvider; //! use opentelemetry::trace::{Span, Tracer, TracerProvider as _}; @@ -13,6 +16,8 @@ //! use opentelemetry_sdk::runtime; //! use opentelemetry_sdk::trace::TracerProvider; //! +//! use opentelemetry_sdk::logs::LoggerProvider; +//! //! fn init_trace() -> TracerProvider { //! let exporter = opentelemetry_stdout::SpanExporter::default(); //! TracerProvider::builder() @@ -26,13 +31,22 @@ //! SdkMeterProvider::builder().with_reader(reader).build() //! } //! +//! fn init_logs() -> LoggerProvider { +//! let exporter = opentelemetry_stdout::LogExporter::default(); +//! LoggerProvider::builder() +//! .with_simple_exporter(exporter) +//! .build() +//! } +//! //! let tracer_provider = init_trace(); //! let meter_provider = init_metrics(); +//! let logger_provider = init_logs(); //! -//! // recorded traces and metrics will now be sent to stdout: +//! // recorded traces, metrics and logs will now be sent to stdout: //! //! // {"resourceMetrics":{"resource":{"attributes":[{"key":"service.name","value":{"str.. //! // {"resourceSpans":[{"resource":{"attributes":[{"key":"service.name","value":{"stri.. +//! // {"resourceLogs": [{"resource": {"attributes": [{"key": "service.name", "value": {"str.. //! # } //! ``` #![warn(missing_debug_implementations, missing_docs)] From c7ab26b6efe68291171f92f6fbda811db934b865 Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Wed, 24 Jan 2024 18:26:41 -0800 Subject: [PATCH 07/13] Code coverage config cleanup (#1492) --- .github/codecov.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/codecov.yaml b/.github/codecov.yaml index de1952b664b..317422a6f22 100644 --- a/.github/codecov.yaml +++ b/.github/codecov.yaml @@ -22,9 +22,9 @@ ignore: - "opentelemetry-jaeger/examples" - "opentelemetry-zipkin/examples" - "opentelemetry-otlp/examples" - - "opentelemetry-aws/examples" - - "opentelemetry-datadog/examples" - - "opentelemetry-dynatrace/examples" - "opentelemetry-http/examples" - "opentelemetry-prometheus/examples" - - "opentelemetry-zpages/examples" + - "opentelemetry-appender-tracing/examples" + - "opentelemetry-appender-log/examples" + # stress test + - "stress" From 27b19b60261f342cec0559f26634ca8f02ed02ac Mon Sep 17 00:00:00 2001 From: Lalit Kumar Bhasin Date: Wed, 24 Jan 2024 20:08:55 -0800 Subject: [PATCH 08/13] Update OTLP proto version to 1.1.0 (#1482) --- opentelemetry-proto/CHANGELOG.md | 1 + .../grpcio/opentelemetry.proto.logs.v1.rs | 14 +++- .../grpcio/opentelemetry.proto.metrics.v1.rs | 8 +- .../grpcio/opentelemetry.proto.trace.v1.rs | 75 ++++++++++++++++++ .../src/proto/opentelemetry-proto | 2 +- .../tonic/opentelemetry.proto.logs.v1.rs | 14 +++- .../tonic/opentelemetry.proto.metrics.v1.rs | 8 +- .../tonic/opentelemetry.proto.trace.v1.rs | 78 +++++++++++++++++++ opentelemetry-proto/src/transform/trace.rs | 4 + opentelemetry-stdout/src/trace/transform.rs | 2 + 10 files changed, 197 insertions(+), 9 deletions(-) diff --git a/opentelemetry-proto/CHANGELOG.md b/opentelemetry-proto/CHANGELOG.md index b0f410a9cb4..f0a0d916c5a 100644 --- a/opentelemetry-proto/CHANGELOG.md +++ b/opentelemetry-proto/CHANGELOG.md @@ -5,6 +5,7 @@ ### Added - Add `schemars::JsonSchema` trait support with `with-schemars` feature (#1419) +- Update protobuf definitions to v1.1.0 (#1154) ## v0.4.0 diff --git a/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.logs.v1.rs b/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.logs.v1.rs index 41cc23b1e35..3c2310e3e8e 100644 --- a/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.logs.v1.rs +++ b/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.logs.v1.rs @@ -30,6 +30,9 @@ pub struct ResourceLogs { /// A list of ScopeLogs that originate from a resource. #[prost(message, repeated, tag = "2")] pub scope_logs: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the resource data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_logs" field which have their own schema_url field. #[prost(string, tag = "3")] @@ -47,6 +50,9 @@ pub struct ScopeLogs { /// A list of log records. #[prost(message, repeated, tag = "2")] pub log_records: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the log data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to all logs in the "logs" field. #[prost(string, tag = "3")] pub schema_url: ::prost::alloc::string::String, @@ -230,9 +236,11 @@ impl SeverityNumber { } } } -/// LogRecordFlags is defined as a protobuf 'uint32' type and is to be used as -/// bit-fields. Each non-zero value defined in this enum is a bit-mask. -/// To extract the bit-field, for example, use an expression like: +/// LogRecordFlags represents constants used to interpret the +/// LogRecord.flags field, which is protobuf 'fixed32' type and is to +/// be used as bit-fields. Each non-zero value defined in this enum is +/// a bit-mask. To extract the bit-field, for example, use an +/// expression like: /// /// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) /// diff --git a/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.metrics.v1.rs b/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.metrics.v1.rs index 3322eb69834..ce044ccc659 100644 --- a/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.metrics.v1.rs +++ b/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.metrics.v1.rs @@ -30,6 +30,9 @@ pub struct ResourceMetrics { /// A list of metrics that originate from a resource. #[prost(message, repeated, tag = "2")] pub scope_metrics: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the resource data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_metrics" field which have their own schema_url field. #[prost(string, tag = "3")] @@ -47,6 +50,9 @@ pub struct ScopeMetrics { /// A list of metrics that originate from an instrumentation library. #[prost(message, repeated, tag = "2")] pub metrics: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the metric data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to all metrics in the "metrics" field. #[prost(string, tag = "3")] pub schema_url: ::prost::alloc::string::String, @@ -139,7 +145,7 @@ pub struct ScopeMetrics { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Metric { - /// name of the metric, including its DNS name prefix. It must be unique. + /// name of the metric. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// description of the metric, which can be used in documentation. diff --git a/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.trace.v1.rs b/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.trace.v1.rs index 3a5411d1b8a..9843c9e000f 100644 --- a/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.trace.v1.rs +++ b/opentelemetry-proto/src/proto/grpcio/opentelemetry.proto.trace.v1.rs @@ -30,6 +30,9 @@ pub struct ResourceSpans { /// A list of ScopeSpans that originate from a resource. #[prost(message, repeated, tag = "2")] pub scope_spans: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the resource data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_spans" field which have their own schema_url field. #[prost(string, tag = "3")] @@ -47,6 +50,9 @@ pub struct ScopeSpans { /// A list of Spans that originate from an instrumentation scope. #[prost(message, repeated, tag = "2")] pub spans: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the span data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to all spans and span events in the "spans" field. #[prost(string, tag = "3")] pub schema_url: ::prost::alloc::string::String, @@ -82,6 +88,22 @@ pub struct Span { /// field must be empty. The ID is an 8-byte array. #[prost(bytes = "vec", tag = "4")] pub parent_span_id: ::prost::alloc::vec::Vec, + /// Flags, a bit field. 8 least significant bits are the trace + /// flags as defined in W3C Trace Context specification. Readers + /// MUST not assume that 24 most significant bits will be zero. + /// To read the 8-bit W3C trace flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + /// + /// When creating span messages, if the message is logically forwarded from another source + /// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + /// be copied as-is. If creating from a source that does not have an equivalent flags field + /// (such as a runtime representation of an OpenTelemetry span), the high 24 bits MUST + /// be set to zero. + /// + /// \[Optional\]. + /// + /// See for the flag definitions. + #[prost(fixed32, tag = "16")] + pub flags: u32, /// A description of the span's operation. /// /// For example, the name can be a qualified method name or a file name @@ -209,6 +231,16 @@ pub mod span { /// then no attributes were dropped. #[prost(uint32, tag = "5")] pub dropped_attributes_count: u32, + /// Flags, a bit field. 8 least significant bits are the trace + /// flags as defined in W3C Trace Context specification. Readers + /// MUST not assume that 24 most significant bits will be zero. + /// When creating new spans, the most-significant 24-bits MUST be + /// zero. To read the 8-bit W3C trace flag (use flags & + /// SPAN_FLAGS_TRACE_FLAGS_MASK). \[Optional\]. + /// + /// See for the flag definitions. + #[prost(fixed32, tag = "6")] + pub flags: u32, } /// SpanKind is the type of span. Can be used to specify additional relationships between spans /// in addition to a parent/child relationship. @@ -335,3 +367,46 @@ pub mod status { } } } +/// SpanFlags represents constants used to interpret the +/// Span.flags field, which is protobuf 'fixed32' type and is to +/// be used as bit-fields. Each non-zero value defined in this enum is +/// a bit-mask. To extract the bit-field, for example, use an +/// expression like: +/// +/// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +/// +/// See for the flag definitions. +/// +/// Note that Span flags were introduced in version 1.1 of the +/// OpenTelemetry protocol. Older Span producers do not set this +/// field, consequently consumers should not rely on the absence of a +/// particular flag bit to indicate the presence of a particular feature. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SpanFlags { + /// The zero value for the enum. Should not be used for comparisons. + /// Instead use bitwise "and" with the appropriate mask as shown above. + DoNotUse = 0, + /// Bits 0-7 are used for trace flags. + TraceFlagsMask = 255, +} +impl SpanFlags { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SpanFlags::DoNotUse => "SPAN_FLAGS_DO_NOT_USE", + SpanFlags::TraceFlagsMask => "SPAN_FLAGS_TRACE_FLAGS_MASK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SPAN_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse), + "SPAN_FLAGS_TRACE_FLAGS_MASK" => Some(Self::TraceFlagsMask), + _ => None, + } + } +} diff --git a/opentelemetry-proto/src/proto/opentelemetry-proto b/opentelemetry-proto/src/proto/opentelemetry-proto index c4dfbc51f3c..4ca4f0335c6 160000 --- a/opentelemetry-proto/src/proto/opentelemetry-proto +++ b/opentelemetry-proto/src/proto/opentelemetry-proto @@ -1 +1 @@ -Subproject commit c4dfbc51f3cd4089778555a2ac5d9bc093ed2956 +Subproject commit 4ca4f0335c63cda7ab31ea7ed70d6553aee14dce diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs index f35572d1643..f8f5f6d0f07 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.logs.v1.rs @@ -36,6 +36,9 @@ pub struct ResourceLogs { /// A list of ScopeLogs that originate from a resource. #[prost(message, repeated, tag = "2")] pub scope_logs: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the resource data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_logs" field which have their own schema_url field. #[prost(string, tag = "3")] @@ -56,6 +59,9 @@ pub struct ScopeLogs { /// A list of log records. #[prost(message, repeated, tag = "2")] pub log_records: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the log data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to all logs in the "logs" field. #[prost(string, tag = "3")] pub schema_url: ::prost::alloc::string::String, @@ -245,9 +251,11 @@ impl SeverityNumber { } } } -/// LogRecordFlags is defined as a protobuf 'uint32' type and is to be used as -/// bit-fields. Each non-zero value defined in this enum is a bit-mask. -/// To extract the bit-field, for example, use an expression like: +/// LogRecordFlags represents constants used to interpret the +/// LogRecord.flags field, which is protobuf 'fixed32' type and is to +/// be used as bit-fields. Each non-zero value defined in this enum is +/// a bit-mask. To extract the bit-field, for example, use an +/// expression like: /// /// (logRecord.flags & LOG_RECORD_FLAGS_TRACE_FLAGS_MASK) /// diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs index 8f3675479bb..46d262bc190 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.metrics.v1.rs @@ -36,6 +36,9 @@ pub struct ResourceMetrics { /// A list of metrics that originate from a resource. #[prost(message, repeated, tag = "2")] pub scope_metrics: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the resource data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_metrics" field which have their own schema_url field. #[prost(string, tag = "3")] @@ -56,6 +59,9 @@ pub struct ScopeMetrics { /// A list of metrics that originate from an instrumentation library. #[prost(message, repeated, tag = "2")] pub metrics: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the metric data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to all metrics in the "metrics" field. #[prost(string, tag = "3")] pub schema_url: ::prost::alloc::string::String, @@ -151,7 +157,7 @@ pub struct ScopeMetrics { #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Metric { - /// name of the metric, including its DNS name prefix. It must be unique. + /// name of the metric. #[prost(string, tag = "1")] pub name: ::prost::alloc::string::String, /// description of the metric, which can be used in documentation. diff --git a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs index 80e2067e9d2..c8a6d72c8e9 100644 --- a/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs +++ b/opentelemetry-proto/src/proto/tonic/opentelemetry.proto.trace.v1.rs @@ -37,6 +37,9 @@ pub struct ResourceSpans { /// A list of ScopeSpans that originate from a resource. #[prost(message, repeated, tag = "2")] pub scope_spans: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the resource data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to the data in the "resource" field. It does not apply /// to the data in the "scope_spans" field which have their own schema_url field. #[prost(string, tag = "3")] @@ -58,6 +61,9 @@ pub struct ScopeSpans { /// A list of Spans that originate from an instrumentation scope. #[prost(message, repeated, tag = "2")] pub spans: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the span data + /// is recorded in. To learn more about Schema URL see + /// /// This schema_url applies to all spans and span events in the "spans" field. #[prost(string, tag = "3")] pub schema_url: ::prost::alloc::string::String, @@ -118,6 +124,22 @@ pub struct Span { ) )] pub parent_span_id: ::prost::alloc::vec::Vec, + /// Flags, a bit field. 8 least significant bits are the trace + /// flags as defined in W3C Trace Context specification. Readers + /// MUST not assume that 24 most significant bits will be zero. + /// To read the 8-bit W3C trace flag, use `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + /// + /// When creating span messages, if the message is logically forwarded from another source + /// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + /// be copied as-is. If creating from a source that does not have an equivalent flags field + /// (such as a runtime representation of an OpenTelemetry span), the high 24 bits MUST + /// be set to zero. + /// + /// \[Optional\]. + /// + /// See for the flag definitions. + #[prost(fixed32, tag = "16")] + pub flags: u32, /// A description of the span's operation. /// /// For example, the name can be a qualified method name or a file name @@ -253,6 +275,16 @@ pub mod span { /// then no attributes were dropped. #[prost(uint32, tag = "5")] pub dropped_attributes_count: u32, + /// Flags, a bit field. 8 least significant bits are the trace + /// flags as defined in W3C Trace Context specification. Readers + /// MUST not assume that 24 most significant bits will be zero. + /// When creating new spans, the most-significant 24-bits MUST be + /// zero. To read the 8-bit W3C trace flag (use flags & + /// SPAN_FLAGS_TRACE_FLAGS_MASK). \[Optional\]. + /// + /// See for the flag definitions. + #[prost(fixed32, tag = "6")] + pub flags: u32, } /// SpanKind is the type of span. Can be used to specify additional relationships between spans /// in addition to a parent/child relationship. @@ -388,3 +420,49 @@ pub mod status { } } } +/// SpanFlags represents constants used to interpret the +/// Span.flags field, which is protobuf 'fixed32' type and is to +/// be used as bit-fields. Each non-zero value defined in this enum is +/// a bit-mask. To extract the bit-field, for example, use an +/// expression like: +/// +/// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +/// +/// See for the flag definitions. +/// +/// Note that Span flags were introduced in version 1.1 of the +/// OpenTelemetry protocol. Older Span producers do not set this +/// field, consequently consumers should not rely on the absence of a +/// particular flag bit to indicate the presence of a particular feature. +#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))] +#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SpanFlags { + /// The zero value for the enum. Should not be used for comparisons. + /// Instead use bitwise "and" with the appropriate mask as shown above. + DoNotUse = 0, + /// Bits 0-7 are used for trace flags. + TraceFlagsMask = 255, +} +impl SpanFlags { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SpanFlags::DoNotUse => "SPAN_FLAGS_DO_NOT_USE", + SpanFlags::TraceFlagsMask => "SPAN_FLAGS_TRACE_FLAGS_MASK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SPAN_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse), + "SPAN_FLAGS_TRACE_FLAGS_MASK" => Some(Self::TraceFlagsMask), + _ => None, + } + } +} diff --git a/opentelemetry-proto/src/transform/trace.rs b/opentelemetry-proto/src/transform/trace.rs index a2dbf2dfe7a..a6597a50a56 100644 --- a/opentelemetry-proto/src/transform/trace.rs +++ b/opentelemetry-proto/src/transform/trace.rs @@ -40,6 +40,7 @@ pub mod tonic { trace_state: link.span_context.trace_state().header(), attributes: Attributes::from(link.attributes).0, dropped_attributes_count: link.dropped_attributes_count, + flags: link.span_context.trace_flags().to_u8() as u32, } } } @@ -76,6 +77,7 @@ pub mod tonic { vec![] } }, + flags: source_span.span_context.trace_flags().to_u8() as u32, name: source_span.name.into_owned(), kind: span_kind as i32, start_time_unix_nano: to_nanos(source_span.start_time), @@ -151,6 +153,7 @@ pub mod grpcio { trace_state: link.span_context.trace_state().header(), attributes: Attributes::from(link.attributes).0, dropped_attributes_count: link.dropped_attributes_count, + flags: link.span_context.trace_flags().to_u8() as u32, } } } @@ -187,6 +190,7 @@ pub mod grpcio { vec![] } }, + flags: source_span.span_context.trace_flags().to_u8() as u32, name: source_span.name.into_owned(), kind: span_kind as i32, start_time_unix_nano: to_nanos(source_span.start_time), diff --git a/opentelemetry-stdout/src/trace/transform.rs b/opentelemetry-stdout/src/trace/transform.rs index 9af26986c60..335ddc12858 100644 --- a/opentelemetry-stdout/src/trace/transform.rs +++ b/opentelemetry-stdout/src/trace/transform.rs @@ -84,6 +84,7 @@ struct Span { #[serde(skip_serializing_if = "Vec::is_empty")] events: Vec, dropped_events_count: u32, + flags: u32, #[serde(skip_serializing_if = "Vec::is_empty")] links: Vec, dropped_links_count: u32, @@ -108,6 +109,7 @@ impl From for Span { dropped_attributes_count: value.dropped_attributes_count, attributes: value.attributes.into_iter().map(Into::into).collect(), dropped_events_count: value.events.dropped_count, + flags: value.span_context.trace_flags().to_u8() as u32, events: value.events.into_iter().map(Into::into).collect(), dropped_links_count: value.links.dropped_count, links: value.links.iter().map(Into::into).collect(), From 26600476b3c235450928c1fdc9af457e1b803686 Mon Sep 17 00:00:00 2001 From: Cosmin Lazar Date: Thu, 25 Jan 2024 18:17:47 +0100 Subject: [PATCH 09/13] Consolidate BatchConfig creation and validation via BatchConfigBuilder (#1480) --- .../src/exporter/config/agent.rs | 6 +- .../src/exporter/config/collector/mod.rs | 6 +- opentelemetry-sdk/CHANGELOG.md | 2 + .../benches/batch_span_processor.rs | 10 +- opentelemetry-sdk/src/logs/log_processor.rs | 175 +++++----- opentelemetry-sdk/src/logs/mod.rs | 3 +- opentelemetry-sdk/src/trace/mod.rs | 3 +- opentelemetry-sdk/src/trace/span_processor.rs | 319 ++++++++++++------ 8 files changed, 320 insertions(+), 204 deletions(-) diff --git a/opentelemetry-jaeger/src/exporter/config/agent.rs b/opentelemetry-jaeger/src/exporter/config/agent.rs index 8bc3945c71c..f65b31625c6 100644 --- a/opentelemetry-jaeger/src/exporter/config/agent.rs +++ b/opentelemetry-jaeger/src/exporter/config/agent.rs @@ -237,11 +237,13 @@ impl AgentPipeline { /// # Examples /// Set max queue size. /// ```rust - /// use opentelemetry_sdk::trace::BatchConfig; + /// use opentelemetry_sdk::trace::BatchConfigBuilder; /// /// let pipeline = opentelemetry_jaeger::new_agent_pipeline() /// .with_batch_processor_config( - /// BatchConfig::default().with_max_queue_size(200) + /// BatchConfigBuilder::default() + /// .with_max_queue_size(200) + /// .build() /// ); /// /// ``` diff --git a/opentelemetry-jaeger/src/exporter/config/collector/mod.rs b/opentelemetry-jaeger/src/exporter/config/collector/mod.rs index 0b6794ffffe..f97ba931a50 100644 --- a/opentelemetry-jaeger/src/exporter/config/collector/mod.rs +++ b/opentelemetry-jaeger/src/exporter/config/collector/mod.rs @@ -380,11 +380,13 @@ impl CollectorPipeline { /// # Examples /// Set max queue size. /// ```rust - /// use opentelemetry_sdk::trace::BatchConfig; + /// use opentelemetry_sdk::trace::BatchConfigBuilder; /// /// let pipeline = opentelemetry_jaeger::new_collector_pipeline() /// .with_batch_processor_config( - /// BatchConfig::default().with_max_queue_size(200) + /// BatchConfigBuilder::default() + /// .with_max_queue_size(200) + /// .build() /// ); /// /// ``` diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index a68a812d9c1..27e8bee4c7b 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -29,6 +29,8 @@ - **Breaking** Remove `TextMapCompositePropagator` [#1373](https://github.com/open-telemetry/opentelemetry-rust/pull/1373). Use `TextMapCompositePropagator` in opentelemetry API. - [#1375](https://github.com/open-telemetry/opentelemetry-rust/pull/1375/) Fix metric collections during PeriodicReader shutdown +- **Breaking** [#1480](https://github.com/open-telemetry/opentelemetry-rust/pull/1480) Remove fine grained `BatchConfig` configurations from `BatchLogProcessorBuilder` and `BatchSpanProcessorBuilder`. Use `BatchConfigBuilder` to construct a `BatchConfig` instance and pass it using `BatchLogProcessorBuilder::with_batch_config` or `BatchSpanProcessorBuilder::with_batch_config`. +- **Breaking** [#1480](https://github.com/open-telemetry/opentelemetry-rust/pull/1480) Remove mutating functions from `BatchConfig`, use `BatchConfigBuilder` to construct a `BatchConfig` instance. ## v0.21.2 diff --git a/opentelemetry-sdk/benches/batch_span_processor.rs b/opentelemetry-sdk/benches/batch_span_processor.rs index 4e2301e203d..7b6c096f4e5 100644 --- a/opentelemetry-sdk/benches/batch_span_processor.rs +++ b/opentelemetry-sdk/benches/batch_span_processor.rs @@ -5,7 +5,9 @@ use opentelemetry::trace::{ use opentelemetry_sdk::export::trace::SpanData; use opentelemetry_sdk::runtime::Tokio; use opentelemetry_sdk::testing::trace::NoopSpanExporter; -use opentelemetry_sdk::trace::{BatchSpanProcessor, SpanEvents, SpanLinks, SpanProcessor}; +use opentelemetry_sdk::trace::{ + BatchConfigBuilder, BatchSpanProcessor, SpanEvents, SpanLinks, SpanProcessor, +}; use opentelemetry_sdk::Resource; use std::borrow::Cow; use std::sync::Arc; @@ -52,7 +54,11 @@ fn criterion_benchmark(c: &mut Criterion) { rt.block_on(async move { let span_processor = BatchSpanProcessor::builder(NoopSpanExporter::new(), Tokio) - .with_max_queue_size(10_000) + .with_batch_config( + BatchConfigBuilder::default() + .with_max_queue_size(10_000) + .build(), + ) .build(); let mut shared_span_processor = Arc::new(span_processor); let mut handles = Vec::with_capacity(10); diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index f62e68413b4..82f0ce39d63 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -13,7 +13,7 @@ use opentelemetry::{ global, logs::{LogError, LogResult}, }; -use std::{env, sync::Mutex}; +use std::{cmp::min, env, sync::Mutex}; use std::{ fmt::{self, Debug, Formatter}, str::FromStr, @@ -246,7 +246,7 @@ impl BatchLogProcessor { { BatchLogProcessorBuilder { exporter, - config: BatchConfig::default(), + config: Default::default(), runtime, } } @@ -276,7 +276,8 @@ where } } -/// Batch log processor configuration +/// Batch log processor configuration. +/// Use [`BatchConfigBuilder`] to configure your own instance of [`BatchConfig`]. #[derive(Debug)] pub struct BatchConfig { /// The maximum queue size to buffer logs for delayed processing. If the @@ -299,55 +300,36 @@ pub struct BatchConfig { impl Default for BatchConfig { fn default() -> Self { - let mut config = BatchConfig { + BatchConfigBuilder::default().build() + } +} + +/// A builder for creating [`BatchConfig`] instances. +#[derive(Debug)] +pub struct BatchConfigBuilder { + max_queue_size: usize, + scheduled_delay: Duration, + max_export_batch_size: usize, + max_export_timeout: Duration, +} + +impl Default for BatchConfigBuilder { + /// Create a new [`BatchConfigBuilder`] initialized with default batch config values as per the specs. + /// The values are overriden by environment variables if set. + /// For a list of supported environment variables see [Batch LogRecord Processor](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#batch-logrecord-processor). + fn default() -> Self { + BatchConfigBuilder { max_queue_size: OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, scheduled_delay: Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT), max_export_batch_size: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, max_export_timeout: Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT), - }; - - if let Some(max_queue_size) = env::var(OTEL_BLRP_MAX_QUEUE_SIZE) - .ok() - .and_then(|queue_size| usize::from_str(&queue_size).ok()) - { - config.max_queue_size = max_queue_size; - } - - if let Some(max_export_batch_size) = env::var(OTEL_BLRP_MAX_EXPORT_BATCH_SIZE) - .ok() - .and_then(|batch_size| usize::from_str(&batch_size).ok()) - { - config.max_export_batch_size = max_export_batch_size; } - - // max export batch size must be less or equal to max queue size. - // we set max export batch size to max queue size if it's larger than max queue size. - if config.max_export_batch_size > config.max_queue_size { - config.max_export_batch_size = config.max_queue_size; - } - - if let Some(scheduled_delay) = env::var(OTEL_BLRP_SCHEDULE_DELAY) - .ok() - .or_else(|| env::var("OTEL_BLRP_SCHEDULE_DELAY_MILLIS").ok()) - .and_then(|delay| u64::from_str(&delay).ok()) - { - config.scheduled_delay = Duration::from_millis(scheduled_delay); - } - - if let Some(max_export_timeout) = env::var(OTEL_BLRP_EXPORT_TIMEOUT) - .ok() - .or_else(|| env::var("OTEL_BLRP_EXPORT_TIMEOUT_MILLIS").ok()) - .and_then(|s| u64::from_str(&s).ok()) - { - config.max_export_timeout = Duration::from_millis(max_export_timeout); - } - - config + .init_from_env_vars() } } -impl BatchConfig { - /// Set max_queue_size for [`BatchConfig`]. +impl BatchConfigBuilder { + /// Set max_queue_size for [`BatchConfigBuilder`]. /// It's the maximum queue size to buffer logs for delayed processing. /// If the queue gets full it will drop the logs. /// The default value of is 2048. @@ -356,7 +338,7 @@ impl BatchConfig { self } - /// Set scheduled_delay for [`BatchConfig`]. + /// Set scheduled_delay for [`BatchConfigBuilder`]. /// It's the delay interval in milliseconds between two consecutive processing of batches. /// The default value is 1000 milliseconds. pub fn with_scheduled_delay(mut self, scheduled_delay: Duration) -> Self { @@ -364,7 +346,7 @@ impl BatchConfig { self } - /// Set max_export_timeout for [`BatchConfig`]. + /// Set max_export_timeout for [`BatchConfigBuilder`]. /// It's the maximum duration to export a batch of data. /// The default value is 30000 milliseconds. pub fn with_max_export_timeout(mut self, max_export_timeout: Duration) -> Self { @@ -372,7 +354,7 @@ impl BatchConfig { self } - /// Set max_export_batch_size for [`BatchConfig`]. + /// Set max_export_batch_size for [`BatchConfigBuilder`]. /// It's the maximum number of logs to process in a single batch. If there are /// more than one batch worth of logs then it processes multiple batches /// of logs one batch after the other without any delay. @@ -381,6 +363,55 @@ impl BatchConfig { self.max_export_batch_size = max_export_batch_size; self } + + /// Builds a `BatchConfig` enforcing the following invariants: + /// * `max_export_batch_size` must be less than or equal to `max_queue_size`. + pub fn build(self) -> BatchConfig { + // max export batch size must be less or equal to max queue size. + // we set max export batch size to max queue size if it's larger than max queue size. + let max_export_batch_size = min(self.max_export_batch_size, self.max_queue_size); + + BatchConfig { + max_queue_size: self.max_queue_size, + scheduled_delay: self.scheduled_delay, + max_export_timeout: self.max_export_timeout, + max_export_batch_size, + } + } + + fn init_from_env_vars(mut self) -> Self { + if let Some(max_queue_size) = env::var(OTEL_BLRP_MAX_QUEUE_SIZE) + .ok() + .and_then(|queue_size| usize::from_str(&queue_size).ok()) + { + self.max_queue_size = max_queue_size; + } + + if let Some(max_export_batch_size) = env::var(OTEL_BLRP_MAX_EXPORT_BATCH_SIZE) + .ok() + .and_then(|batch_size| usize::from_str(&batch_size).ok()) + { + self.max_export_batch_size = max_export_batch_size; + } + + if let Some(scheduled_delay) = env::var(OTEL_BLRP_SCHEDULE_DELAY) + .ok() + .or_else(|| env::var("OTEL_BLRP_SCHEDULE_DELAY_MILLIS").ok()) + .and_then(|delay| u64::from_str(&delay).ok()) + { + self.scheduled_delay = Duration::from_millis(scheduled_delay); + } + + if let Some(max_export_timeout) = env::var(OTEL_BLRP_EXPORT_TIMEOUT) + .ok() + .or_else(|| env::var("OTEL_BLRP_EXPORT_TIMEOUT_MILLIS").ok()) + .and_then(|s| u64::from_str(&s).ok()) + { + self.max_export_timeout = Duration::from_millis(max_export_timeout); + } + + self + } } /// A builder for creating [`BatchLogProcessor`] instances. @@ -397,44 +428,6 @@ where E: LogExporter + 'static, R: RuntimeChannel, { - /// Set max queue size for batches - pub fn with_max_queue_size(self, size: usize) -> Self { - let mut config = self.config; - config.max_queue_size = size; - - BatchLogProcessorBuilder { config, ..self } - } - - /// Set scheduled delay for batches - pub fn with_scheduled_delay(self, delay: Duration) -> Self { - let mut config = self.config; - config.scheduled_delay = delay; - - BatchLogProcessorBuilder { config, ..self } - } - - /// Set max timeout for exporting. - pub fn with_max_timeout(self, timeout: Duration) -> Self { - let mut config = self.config; - config.max_export_timeout = timeout; - - BatchLogProcessorBuilder { config, ..self } - } - - /// Set max export size for batches, should always less than or equals to max queue size. - /// - /// If input is larger than max queue size, will lower it to be equal to max queue size - pub fn with_max_export_batch_size(self, size: usize) -> Self { - let mut config = self.config; - if size > config.max_queue_size { - config.max_export_batch_size = config.max_queue_size; - } else { - config.max_export_batch_size = size; - } - - BatchLogProcessorBuilder { config, ..self } - } - /// Set the BatchConfig for [`BatchLogProcessorBuilder`] pub fn with_batch_config(self, config: BatchConfig) -> Self { BatchLogProcessorBuilder { config, ..self } @@ -471,7 +464,7 @@ mod tests { OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, }, - BatchConfig, + BatchConfig, BatchConfigBuilder, }, runtime, testing::logs::InMemoryLogsExporter, @@ -590,11 +583,12 @@ mod tests { #[test] fn test_batch_config_with_fields() { - let batch = BatchConfig::default() + let batch = BatchConfigBuilder::default() .with_max_export_batch_size(1) .with_scheduled_delay(Duration::from_millis(2)) .with_max_export_timeout(Duration::from_millis(3)) - .with_max_queue_size(4); + .with_max_queue_size(4) + .build(); assert_eq!(batch.max_export_batch_size, 1); assert_eq!(batch.scheduled_delay, Duration::from_millis(2)); @@ -640,11 +634,12 @@ mod tests { #[test] fn test_build_batch_log_processor_builder_with_custom_config() { - let expected = BatchConfig::default() + let expected = BatchConfigBuilder::default() .with_max_export_batch_size(1) .with_scheduled_delay(Duration::from_millis(2)) .with_max_export_timeout(Duration::from_millis(3)) - .with_max_queue_size(4); + .with_max_queue_size(4) + .build(); let builder = BatchLogProcessor::builder(InMemoryLogsExporter::default(), runtime::Tokio) .with_batch_config(expected); diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index 2b92f9a9730..e4c476d15c9 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -7,7 +7,8 @@ mod log_processor; pub use config::{config, Config}; pub use log_emitter::{Builder, Logger, LoggerProvider}; pub use log_processor::{ - BatchConfig, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, SimpleLogProcessor, + BatchConfig, BatchConfigBuilder, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, + SimpleLogProcessor, }; #[cfg(all(test, feature = "testing"))] diff --git a/opentelemetry-sdk/src/trace/mod.rs b/opentelemetry-sdk/src/trace/mod.rs index ea1ab74856f..df9478a6af1 100644 --- a/opentelemetry-sdk/src/trace/mod.rs +++ b/opentelemetry-sdk/src/trace/mod.rs @@ -26,7 +26,8 @@ pub use sampler::{Sampler, ShouldSample}; pub use span::Span; pub use span_limit::SpanLimits; pub use span_processor::{ - BatchConfig, BatchSpanProcessor, BatchSpanProcessorBuilder, SimpleSpanProcessor, SpanProcessor, + BatchConfig, BatchConfigBuilder, BatchSpanProcessor, BatchSpanProcessorBuilder, + SimpleSpanProcessor, SpanProcessor, }; pub use tracer::Tracer; diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index cd156fd9524..53f497eedff 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -49,6 +49,7 @@ use opentelemetry::{ trace::{TraceError, TraceResult}, Context, }; +use std::cmp::min; use std::{env, fmt, str::FromStr, thread, time::Duration}; /// Delay interval between two consecutive exports. @@ -227,6 +228,7 @@ enum Message { /// # { /// use opentelemetry::global; /// use opentelemetry_sdk::{runtime, testing::trace::NoopSpanExporter, trace}; +/// use opentelemetry_sdk::trace::BatchConfigBuilder; /// use std::time::Duration; /// /// #[tokio::main] @@ -236,7 +238,7 @@ enum Message { /// /// // Create a batch span processor using an exporter and a runtime /// let batch = trace::BatchSpanProcessor::builder(exporter, runtime::Tokio) -/// .with_max_queue_size(4096) +/// .with_batch_config(BatchConfigBuilder::default().with_max_queue_size(4096).build()) /// .build(); /// /// // Then use the `with_batch_exporter` method to have the provider export spans in batches. @@ -497,13 +499,14 @@ impl BatchSpanProcessor { { BatchSpanProcessorBuilder { exporter, - config: BatchConfig::default(), + config: Default::default(), runtime, } } } -/// Batch span processor configuration +/// Batch span processor configuration. +/// Use [`BatchConfigBuilder`] to configure your own instance of [`BatchConfig`]. #[derive(Debug)] pub struct BatchConfig { /// The maximum queue size to buffer spans for delayed processing. If the @@ -533,63 +536,38 @@ pub struct BatchConfig { impl Default for BatchConfig { fn default() -> Self { - let mut config = BatchConfig { + BatchConfigBuilder::default().build() + } +} + +/// A builder for creating [`BatchConfig`] instances. +#[derive(Debug)] +pub struct BatchConfigBuilder { + max_queue_size: usize, + scheduled_delay: Duration, + max_export_batch_size: usize, + max_export_timeout: Duration, + max_concurrent_exports: usize, +} + +impl Default for BatchConfigBuilder { + /// Create a new [`BatchConfigBuilder`] initialized with default batch config values as per the specs. + /// The values are overriden by environment variables if set. + /// For a list of supported environment variables see [Batch LogRecord Processor](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#batch-span-processor). + fn default() -> Self { + BatchConfigBuilder { max_queue_size: OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT, scheduled_delay: Duration::from_millis(OTEL_BSP_SCHEDULE_DELAY_DEFAULT), max_export_batch_size: OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT, max_export_timeout: Duration::from_millis(OTEL_BSP_EXPORT_TIMEOUT_DEFAULT), max_concurrent_exports: OTEL_BSP_MAX_CONCURRENT_EXPORTS_DEFAULT, - }; - - if let Some(max_concurrent_exports) = env::var(OTEL_BSP_MAX_CONCURRENT_EXPORTS) - .ok() - .and_then(|max_concurrent_exports| usize::from_str(&max_concurrent_exports).ok()) - { - config.max_concurrent_exports = max_concurrent_exports; - } - - if let Some(max_queue_size) = env::var(OTEL_BSP_MAX_QUEUE_SIZE) - .ok() - .and_then(|queue_size| usize::from_str(&queue_size).ok()) - { - config.max_queue_size = max_queue_size; - } - - if let Some(scheduled_delay) = env::var(OTEL_BSP_SCHEDULE_DELAY) - .ok() - .or_else(|| env::var("OTEL_BSP_SCHEDULE_DELAY_MILLIS").ok()) - .and_then(|delay| u64::from_str(&delay).ok()) - { - config.scheduled_delay = Duration::from_millis(scheduled_delay); - } - - if let Some(max_export_batch_size) = env::var(OTEL_BSP_MAX_EXPORT_BATCH_SIZE) - .ok() - .and_then(|batch_size| usize::from_str(&batch_size).ok()) - { - config.max_export_batch_size = max_export_batch_size; - } - - // max export batch size must be less or equal to max queue size. - // we set max export batch size to max queue size if it's larger than max queue size. - if config.max_export_batch_size > config.max_queue_size { - config.max_export_batch_size = config.max_queue_size; - } - - if let Some(max_export_timeout) = env::var(OTEL_BSP_EXPORT_TIMEOUT) - .ok() - .or_else(|| env::var("OTEL_BSP_EXPORT_TIMEOUT_MILLIS").ok()) - .and_then(|timeout| u64::from_str(&timeout).ok()) - { - config.max_export_timeout = Duration::from_millis(max_export_timeout); } - - config + .init_from_env_vars() } } -impl BatchConfig { - /// Set max_queue_size for [`BatchConfig`]. +impl BatchConfigBuilder { + /// Set max_queue_size for [`BatchConfigBuilder`]. /// It's the maximum queue size to buffer spans for delayed processing. /// If the queue gets full it will drops the spans. /// The default value of is 2048. @@ -598,7 +576,7 @@ impl BatchConfig { self } - /// Set max_export_batch_size for [`BatchConfig`]. + /// Set max_export_batch_size for [`BatchConfigBuilder`]. /// It's the maximum number of spans to process in a single batch. If there are /// more than one batch worth of spans then it processes multiple batches /// of spans one batch after the other without any delay. The default value @@ -608,7 +586,7 @@ impl BatchConfig { self } - /// Set max_concurrent_exports for [`BatchConfig`]. + /// Set max_concurrent_exports for [`BatchConfigBuilder`]. /// It's the maximum number of concurrent exports. /// Limits the number of spawned tasks for exports and thus memory consumed by an exporter. /// The default value is 1. @@ -619,7 +597,7 @@ impl BatchConfig { self } - /// Set scheduled_delay_duration for [`BatchConfig`]. + /// Set scheduled_delay_duration for [`BatchConfigBuilder`]. /// It's the delay interval in milliseconds between two consecutive processing of batches. /// The default value is 5000 milliseconds. pub fn with_scheduled_delay(mut self, scheduled_delay: Duration) -> Self { @@ -627,13 +605,76 @@ impl BatchConfig { self } - /// Set max_export_timeout for [`BatchConfig`]. + /// Set max_export_timeout for [`BatchConfigBuilder`]. /// It's the maximum duration to export a batch of data. /// The The default value is 30000 milliseconds. pub fn with_max_export_timeout(mut self, max_export_timeout: Duration) -> Self { self.max_export_timeout = max_export_timeout; self } + + /// Builds a `BatchConfig` enforcing the following invariants: + /// * `max_export_batch_size` must be less than or equal to `max_queue_size`. + pub fn build(self) -> BatchConfig { + // max export batch size must be less or equal to max queue size. + // we set max export batch size to max queue size if it's larger than max queue size. + let max_export_batch_size = min(self.max_export_batch_size, self.max_queue_size); + + BatchConfig { + max_queue_size: self.max_queue_size, + scheduled_delay: self.scheduled_delay, + max_export_timeout: self.max_export_timeout, + max_concurrent_exports: self.max_concurrent_exports, + max_export_batch_size, + } + } + + fn init_from_env_vars(mut self) -> Self { + if let Some(max_concurrent_exports) = env::var(OTEL_BSP_MAX_CONCURRENT_EXPORTS) + .ok() + .and_then(|max_concurrent_exports| usize::from_str(&max_concurrent_exports).ok()) + { + self.max_concurrent_exports = max_concurrent_exports; + } + + if let Some(max_queue_size) = env::var(OTEL_BSP_MAX_QUEUE_SIZE) + .ok() + .and_then(|queue_size| usize::from_str(&queue_size).ok()) + { + self.max_queue_size = max_queue_size; + } + + if let Some(scheduled_delay) = env::var(OTEL_BSP_SCHEDULE_DELAY) + .ok() + .or_else(|| env::var("OTEL_BSP_SCHEDULE_DELAY_MILLIS").ok()) + .and_then(|delay| u64::from_str(&delay).ok()) + { + self.scheduled_delay = Duration::from_millis(scheduled_delay); + } + + if let Some(max_export_batch_size) = env::var(OTEL_BSP_MAX_EXPORT_BATCH_SIZE) + .ok() + .and_then(|batch_size| usize::from_str(&batch_size).ok()) + { + self.max_export_batch_size = max_export_batch_size; + } + + // max export batch size must be less or equal to max queue size. + // we set max export batch size to max queue size if it's larger than max queue size. + if self.max_export_batch_size > self.max_queue_size { + self.max_export_batch_size = self.max_queue_size; + } + + if let Some(max_export_timeout) = env::var(OTEL_BSP_EXPORT_TIMEOUT) + .ok() + .or_else(|| env::var("OTEL_BSP_EXPORT_TIMEOUT_MILLIS").ok()) + .and_then(|timeout| u64::from_str(&timeout).ok()) + { + self.max_export_timeout = Duration::from_millis(max_export_timeout); + } + + self + } } /// A builder for creating [`BatchSpanProcessor`] instances. @@ -650,54 +691,6 @@ where E: SpanExporter + 'static, R: RuntimeChannel, { - /// Set max queue size for batches - pub fn with_max_queue_size(self, size: usize) -> Self { - let mut config = self.config; - config.max_queue_size = size; - - BatchSpanProcessorBuilder { config, ..self } - } - - /// Set scheduled delay for batches - pub fn with_scheduled_delay(self, delay: Duration) -> Self { - let mut config = self.config; - config.scheduled_delay = delay; - - BatchSpanProcessorBuilder { config, ..self } - } - - /// Set max timeout for exporting. - pub fn with_max_timeout(self, timeout: Duration) -> Self { - let mut config = self.config; - config.max_export_timeout = timeout; - - BatchSpanProcessorBuilder { config, ..self } - } - - /// Set max export size for batches, should always less than or equals to max queue size. - /// - /// If input is larger than max queue size, will lower it to be equal to max queue size - pub fn with_max_export_batch_size(self, size: usize) -> Self { - let mut config = self.config; - if size > config.max_queue_size { - config.max_export_batch_size = config.max_queue_size; - } else { - config.max_export_batch_size = size; - } - - BatchSpanProcessorBuilder { config, ..self } - } - - /// Set the maximum number of concurrent exports - /// - /// This setting may be useful for limiting network throughput or memory - /// consumption. - pub fn with_max_concurrent_exports(self, max: usize) -> Self { - let mut config = self.config; - config.max_concurrent_exports = max; - BatchSpanProcessorBuilder { config, ..self } - } - /// Set the BatchConfig for [BatchSpanProcessorBuilder] pub fn with_batch_config(self, config: BatchConfig) -> Self { BatchSpanProcessorBuilder { config, ..self } @@ -721,7 +714,10 @@ mod tests { use crate::testing::trace::{ new_test_export_span_data, new_test_exporter, new_tokio_test_exporter, }; - use crate::trace::{BatchConfig, SpanEvents, SpanLinks}; + use crate::trace::span_processor::{ + OTEL_BSP_EXPORT_TIMEOUT_DEFAULT, OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT, + }; + use crate::trace::{BatchConfig, BatchConfigBuilder, SpanEvents, SpanLinks}; use async_trait::async_trait; use opentelemetry::trace::{SpanContext, SpanId, SpanKind, Status}; use std::fmt::Debug; @@ -768,14 +764,125 @@ mod tests { assert!(rx_shutdown.try_recv().is_ok()); } + #[test] + fn test_default_const_values() { + assert_eq!(OTEL_BSP_MAX_QUEUE_SIZE, "OTEL_BSP_MAX_QUEUE_SIZE"); + assert_eq!(OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT, 2048); + assert_eq!(OTEL_BSP_SCHEDULE_DELAY, "OTEL_BSP_SCHEDULE_DELAY"); + assert_eq!(OTEL_BSP_SCHEDULE_DELAY_DEFAULT, 5000); + assert_eq!( + OTEL_BSP_MAX_EXPORT_BATCH_SIZE, + "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" + ); + assert_eq!(OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT, 512); + assert_eq!(OTEL_BSP_EXPORT_TIMEOUT, "OTEL_BSP_EXPORT_TIMEOUT"); + assert_eq!(OTEL_BSP_EXPORT_TIMEOUT_DEFAULT, 30000); + } + + #[test] + fn test_default_batch_config_adheres_to_specification() { + let config = BatchConfig::default(); + + assert_eq!( + config.scheduled_delay, + Duration::from_millis(OTEL_BSP_SCHEDULE_DELAY_DEFAULT) + ); + assert_eq!( + config.max_export_timeout, + Duration::from_millis(OTEL_BSP_EXPORT_TIMEOUT_DEFAULT) + ); + assert_eq!(config.max_queue_size, OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT); + assert_eq!( + config.max_export_batch_size, + OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT + ); + } + + #[test] + fn test_batch_config_configurable_by_env_vars() { + let env_vars = vec![ + (OTEL_BSP_SCHEDULE_DELAY, Some("2000")), + (OTEL_BSP_EXPORT_TIMEOUT, Some("60000")), + (OTEL_BSP_MAX_QUEUE_SIZE, Some("4096")), + (OTEL_BSP_MAX_EXPORT_BATCH_SIZE, Some("1024")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.scheduled_delay, Duration::from_millis(2000)); + assert_eq!(config.max_export_timeout, Duration::from_millis(60000)); + assert_eq!(config.max_queue_size, 4096); + assert_eq!(config.max_export_batch_size, 1024); + } + + #[test] + fn test_batch_config_configurable_by_env_vars_millis() { + let env_vars = vec![ + ("OTEL_BSP_SCHEDULE_DELAY_MILLIS", Some("3000")), + ("OTEL_BSP_EXPORT_TIMEOUT_MILLIS", Some("70000")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.scheduled_delay, Duration::from_millis(3000)); + assert_eq!(config.max_export_timeout, Duration::from_millis(70000)); + assert_eq!(config.max_queue_size, OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT); + assert_eq!( + config.max_export_batch_size, + OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT + ); + } + + #[test] + fn test_batch_config_configurable_by_env_vars_precedence() { + let env_vars = vec![ + (OTEL_BSP_SCHEDULE_DELAY, Some("2000")), + ("OTEL_BSP_SCHEDULE_DELAY_MILLIS", Some("3000")), + (OTEL_BSP_EXPORT_TIMEOUT, Some("60000")), + ("OTEL_BSP_EXPORT_TIMEOUT_MILLIS", Some("70000")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.scheduled_delay, Duration::from_millis(2000)); + assert_eq!(config.max_export_timeout, Duration::from_millis(60000)); + assert_eq!(config.max_queue_size, OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT); + assert_eq!( + config.max_export_batch_size, + OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT + ); + } + + #[test] + fn test_batch_config_max_export_batch_size_validation() { + let env_vars = vec![ + (OTEL_BSP_MAX_QUEUE_SIZE, Some("256")), + (OTEL_BSP_MAX_EXPORT_BATCH_SIZE, Some("1024")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.max_queue_size, 256); + assert_eq!(config.max_export_batch_size, 256); + assert_eq!( + config.scheduled_delay, + Duration::from_millis(OTEL_BSP_SCHEDULE_DELAY_DEFAULT) + ); + assert_eq!( + config.max_export_timeout, + Duration::from_millis(OTEL_BSP_EXPORT_TIMEOUT_DEFAULT) + ); + } + #[test] fn test_batch_config_with_fields() { - let batch = BatchConfig::default() + let batch = BatchConfigBuilder::default() .with_max_export_batch_size(10) .with_scheduled_delay(Duration::from_millis(10)) .with_max_export_timeout(Duration::from_millis(10)) .with_max_concurrent_exports(10) - .with_max_queue_size(10); + .with_max_queue_size(10) + .build(); assert_eq!(batch.max_export_batch_size, 10); assert_eq!(batch.scheduled_delay, Duration::from_millis(10)); assert_eq!(batch.max_export_timeout, Duration::from_millis(10)); From 85f678a6012661eaf3b6dd272a54ce9b82958d74 Mon Sep 17 00:00:00 2001 From: Cosmin Lazar Date: Thu, 25 Jan 2024 23:12:31 +0100 Subject: [PATCH 10/13] Remove Batch LogRecord&Span Processor configuration via non-standard environment variables (#1495) --- opentelemetry-sdk/CHANGELOG.md | 8 ++++ opentelemetry-sdk/src/logs/log_processor.rs | 46 ++---------------- opentelemetry-sdk/src/trace/span_processor.rs | 47 +++---------------- 3 files changed, 19 insertions(+), 82 deletions(-) diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 27e8bee4c7b..8eb3fd3d391 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -31,6 +31,14 @@ - [#1375](https://github.com/open-telemetry/opentelemetry-rust/pull/1375/) Fix metric collections during PeriodicReader shutdown - **Breaking** [#1480](https://github.com/open-telemetry/opentelemetry-rust/pull/1480) Remove fine grained `BatchConfig` configurations from `BatchLogProcessorBuilder` and `BatchSpanProcessorBuilder`. Use `BatchConfigBuilder` to construct a `BatchConfig` instance and pass it using `BatchLogProcessorBuilder::with_batch_config` or `BatchSpanProcessorBuilder::with_batch_config`. - **Breaking** [#1480](https://github.com/open-telemetry/opentelemetry-rust/pull/1480) Remove mutating functions from `BatchConfig`, use `BatchConfigBuilder` to construct a `BatchConfig` instance. +- **Breaking** [#1495](https://github.com/open-telemetry/opentelemetry-rust/pull/1495) Remove Batch LogRecord&Span Processor configuration via non-standard environment variables. Use the following table to migrate from the no longer supported non-standard environment variables to the standard ones. + +| No longer supported | Standard equivalent | +|---------------------------------|---------------------------| +| OTEL_BLRP_SCHEDULE_DELAY_MILLIS | OTEL_BLRP_SCHEDULE_DELAY | +| OTEL_BLRP_EXPORT_TIMEOUT_MILLIS | OTEL_BLRP_EXPORT_TIMEOUT | +| OTEL_BSP_SCHEDULE_DELAY_MILLIS | OTEL_BSP_SCHEDULE_DELAY | +| OTEL_BSP_EXPORT_TIMEOUT_MILLIS | OTEL_BSP_EXPORT_TIMEOUT | ## v0.21.2 diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index 82f0ce39d63..4b7c10499b0 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -316,7 +316,11 @@ pub struct BatchConfigBuilder { impl Default for BatchConfigBuilder { /// Create a new [`BatchConfigBuilder`] initialized with default batch config values as per the specs. /// The values are overriden by environment variables if set. - /// For a list of supported environment variables see [Batch LogRecord Processor](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#batch-logrecord-processor). + /// The supported environment variables are: + /// * `OTEL_BLRP_MAX_QUEUE_SIZE` + /// * `OTEL_BLRP_SCHEDULE_DELAY` + /// * `OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` + /// * `OTEL_BLRP_EXPORT_TIMEOUT` fn default() -> Self { BatchConfigBuilder { max_queue_size: OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, @@ -396,7 +400,6 @@ impl BatchConfigBuilder { if let Some(scheduled_delay) = env::var(OTEL_BLRP_SCHEDULE_DELAY) .ok() - .or_else(|| env::var("OTEL_BLRP_SCHEDULE_DELAY_MILLIS").ok()) .and_then(|delay| u64::from_str(&delay).ok()) { self.scheduled_delay = Duration::from_millis(scheduled_delay); @@ -404,7 +407,6 @@ impl BatchConfigBuilder { if let Some(max_export_timeout) = env::var(OTEL_BLRP_EXPORT_TIMEOUT) .ok() - .or_else(|| env::var("OTEL_BLRP_EXPORT_TIMEOUT_MILLIS").ok()) .and_then(|s| u64::from_str(&s).ok()) { self.max_export_timeout = Duration::from_millis(max_export_timeout); @@ -522,44 +524,6 @@ mod tests { assert_eq!(config.max_export_batch_size, 1024); } - #[test] - fn test_batch_config_configurable_by_env_vars_millis() { - let env_vars = vec![ - ("OTEL_BLRP_SCHEDULE_DELAY_MILLIS", Some("3000")), - ("OTEL_BLRP_EXPORT_TIMEOUT_MILLIS", Some("70000")), - ]; - - let config = temp_env::with_vars(env_vars, BatchConfig::default); - - assert_eq!(config.scheduled_delay, Duration::from_millis(3000)); - assert_eq!(config.max_export_timeout, Duration::from_millis(70000)); - assert_eq!(config.max_queue_size, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT); - assert_eq!( - config.max_export_batch_size, - OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT - ); - } - - #[test] - fn test_batch_config_configurable_by_env_vars_precedence() { - let env_vars = vec![ - (OTEL_BLRP_SCHEDULE_DELAY, Some("2000")), - ("OTEL_BLRP_SCHEDULE_DELAY_MILLIS", Some("3000")), - (OTEL_BLRP_EXPORT_TIMEOUT, Some("60000")), - ("OTEL_BLRP_EXPORT_TIMEOUT_MILLIS", Some("70000")), - ]; - - let config = temp_env::with_vars(env_vars, BatchConfig::default); - - assert_eq!(config.scheduled_delay, Duration::from_millis(2000)); - assert_eq!(config.max_export_timeout, Duration::from_millis(60000)); - assert_eq!(config.max_queue_size, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT); - assert_eq!( - config.max_export_batch_size, - OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT - ); - } - #[test] fn test_batch_config_max_export_batch_size_validation() { let env_vars = vec![ diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index 53f497eedff..c9230a16037 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -553,7 +553,12 @@ pub struct BatchConfigBuilder { impl Default for BatchConfigBuilder { /// Create a new [`BatchConfigBuilder`] initialized with default batch config values as per the specs. /// The values are overriden by environment variables if set. - /// For a list of supported environment variables see [Batch LogRecord Processor](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#batch-span-processor). + /// The supported environment variables are: + /// * `OTEL_BSP_MAX_QUEUE_SIZE` + /// * `OTEL_BSP_SCHEDULE_DELAY` + /// * `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + /// * `OTEL_BSP_EXPORT_TIMEOUT` + /// * `OTEL_BSP_MAX_CONCURRENT_EXPORTS` fn default() -> Self { BatchConfigBuilder { max_queue_size: OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT, @@ -646,7 +651,6 @@ impl BatchConfigBuilder { if let Some(scheduled_delay) = env::var(OTEL_BSP_SCHEDULE_DELAY) .ok() - .or_else(|| env::var("OTEL_BSP_SCHEDULE_DELAY_MILLIS").ok()) .and_then(|delay| u64::from_str(&delay).ok()) { self.scheduled_delay = Duration::from_millis(scheduled_delay); @@ -667,7 +671,6 @@ impl BatchConfigBuilder { if let Some(max_export_timeout) = env::var(OTEL_BSP_EXPORT_TIMEOUT) .ok() - .or_else(|| env::var("OTEL_BSP_EXPORT_TIMEOUT_MILLIS").ok()) .and_then(|timeout| u64::from_str(&timeout).ok()) { self.max_export_timeout = Duration::from_millis(max_export_timeout); @@ -815,44 +818,6 @@ mod tests { assert_eq!(config.max_export_batch_size, 1024); } - #[test] - fn test_batch_config_configurable_by_env_vars_millis() { - let env_vars = vec![ - ("OTEL_BSP_SCHEDULE_DELAY_MILLIS", Some("3000")), - ("OTEL_BSP_EXPORT_TIMEOUT_MILLIS", Some("70000")), - ]; - - let config = temp_env::with_vars(env_vars, BatchConfig::default); - - assert_eq!(config.scheduled_delay, Duration::from_millis(3000)); - assert_eq!(config.max_export_timeout, Duration::from_millis(70000)); - assert_eq!(config.max_queue_size, OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT); - assert_eq!( - config.max_export_batch_size, - OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT - ); - } - - #[test] - fn test_batch_config_configurable_by_env_vars_precedence() { - let env_vars = vec![ - (OTEL_BSP_SCHEDULE_DELAY, Some("2000")), - ("OTEL_BSP_SCHEDULE_DELAY_MILLIS", Some("3000")), - (OTEL_BSP_EXPORT_TIMEOUT, Some("60000")), - ("OTEL_BSP_EXPORT_TIMEOUT_MILLIS", Some("70000")), - ]; - - let config = temp_env::with_vars(env_vars, BatchConfig::default); - - assert_eq!(config.scheduled_delay, Duration::from_millis(2000)); - assert_eq!(config.max_export_timeout, Duration::from_millis(60000)); - assert_eq!(config.max_queue_size, OTEL_BSP_MAX_QUEUE_SIZE_DEFAULT); - assert_eq!( - config.max_export_batch_size, - OTEL_BSP_MAX_EXPORT_BATCH_SIZE_DEFAULT - ); - } - #[test] fn test_batch_config_max_export_batch_size_validation() { let env_vars = vec![ From 433c1b34e27ed2b4659b89f69fc1559dbd4e887e Mon Sep 17 00:00:00 2001 From: Cijo Thomas Date: Thu, 25 Jan 2024 17:53:23 -0800 Subject: [PATCH 11/13] Fix tracing grpc example (#1493) Cleaned up readme as we already removed tracing-opentelemetry and jaeger from it. --- examples/README.md | 5 +++-- examples/tracing-grpc/Cargo.toml | 1 + examples/tracing-grpc/README.md | 23 ++++++++++++----------- examples/tracing-grpc/src/client.rs | 12 ++++++++++-- examples/tracing-grpc/src/server.rs | 12 ++++++++++-- 5 files changed, 36 insertions(+), 17 deletions(-) diff --git a/examples/README.md b/examples/README.md index d35a65eaa38..4984d12298f 100644 --- a/examples/README.md +++ b/examples/README.md @@ -32,9 +32,10 @@ This example uses following crates from this repo: **Tracing** This example uses following crates from this repo: + - opentelemetry(tracing) -- opentelemetry-jaeger +- opentelemetry-stdout The application is built using `tokio`. -Check this example if you want to understand *how to integrate tracing with opentelemetry*. \ No newline at end of file +Check this example if you want to understand *how to create spans and propagate/restore context in OpenTelemetry*. diff --git a/examples/tracing-grpc/Cargo.toml b/examples/tracing-grpc/Cargo.toml index 400a8f4e695..786fa7bd2e9 100644 --- a/examples/tracing-grpc/Cargo.toml +++ b/examples/tracing-grpc/Cargo.toml @@ -20,6 +20,7 @@ opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["trace prost = { workspace = true } tokio = { workspace = true, features = ["full"] } tonic = { workspace = true } +serde_json = { workspace = true } [build-dependencies] tonic-build = "0.9.2" diff --git a/examples/tracing-grpc/README.md b/examples/tracing-grpc/README.md index 4e37a7370c1..69c25503017 100644 --- a/examples/tracing-grpc/README.md +++ b/examples/tracing-grpc/README.md @@ -1,24 +1,25 @@ # GRPC example -Example showing [Tonic] client and server interaction with OpenTelemetry context propagation. [tracing_opentelemetry](https://docs.rs/tracing-opentelemetry/0.4.0/tracing_opentelemetry/) is used to hook into the [tracing](https://github.com/tokio-rs/tracing) ecosystem, which enables drop-in replacements for [log](https://github.com/rust-lang/log) macros and an `#[instrument]` macro that will automatically add spans to your functions. +Example showing [Tonic] client and server interaction with OpenTelemetry context +propagation. Traces are exported to stdout. [Tonic]: https://github.com/hyperium/tonic -Examples --------- +## Running the example ```shell -# Run jaeger in background -$ docker run -d -p6831:6831/udp -p6832:6832/udp -p16686:16686 jaegertracing/all-in-one:latest - -# Run the server +# Run the server first $ cargo run --bin grpc-server # Now run the client to make a request to the server $ cargo run --bin grpc-client - -# View spans (see the image below) -$ firefox http://localhost:16686/ ``` -![Jaeger UI](trace.png) +Observe that the traces are exported to stdout, and that they share the same +TraceId. Also, the server span would be parented to the client span. The example +demonstrates how to propagate and restore OpenTelemetry context when making +out-of-process calls, so as to ensure the same trace is continued in the next +process. The client here initiates the trace by creating the root client span, +and it propagates its context to the server. The server, extracts the context, +and creates its own server span using the extracted context, ensuring both spans +are correlated. diff --git a/examples/tracing-grpc/src/client.rs b/examples/tracing-grpc/src/client.rs index 02e15dea6f3..c35dee079e2 100644 --- a/examples/tracing-grpc/src/client.rs +++ b/examples/tracing-grpc/src/client.rs @@ -4,7 +4,7 @@ use opentelemetry::{global, propagation::Injector}; use opentelemetry_sdk::{ propagation::TraceContextPropagator, runtime::Tokio, trace::TracerProvider, }; -use opentelemetry_stdout::SpanExporter; +use opentelemetry_stdout::SpanExporterBuilder; use opentelemetry::{ trace::{SpanKind, TraceContextExt, Tracer}, @@ -15,7 +15,15 @@ fn init_tracer() { global::set_text_map_propagator(TraceContextPropagator::new()); // Install stdout exporter pipeline to be able to retrieve the collected spans. let provider = TracerProvider::builder() - .with_batch_exporter(SpanExporter::default(), Tokio) + .with_batch_exporter( + SpanExporterBuilder::default() + .with_encoder(|writer, data| { + serde_json::to_writer_pretty(writer, &data).unwrap(); + Ok(()) + }) + .build(), + Tokio, + ) .build(); global::set_tracer_provider(provider); diff --git a/examples/tracing-grpc/src/server.rs b/examples/tracing-grpc/src/server.rs index 3831907cf00..c3ffe444729 100644 --- a/examples/tracing-grpc/src/server.rs +++ b/examples/tracing-grpc/src/server.rs @@ -8,14 +8,22 @@ use opentelemetry::{ use opentelemetry_sdk::{ propagation::TraceContextPropagator, runtime::Tokio, trace::TracerProvider, }; -use opentelemetry_stdout::SpanExporter; +use opentelemetry_stdout::SpanExporterBuilder; use tonic::{transport::Server, Request, Response, Status}; fn init_tracer() { global::set_text_map_propagator(TraceContextPropagator::new()); // Install stdout exporter pipeline to be able to retrieve the collected spans. let provider = TracerProvider::builder() - .with_batch_exporter(SpanExporter::default(), Tokio) + .with_batch_exporter( + SpanExporterBuilder::default() + .with_encoder(|writer, data| { + serde_json::to_writer_pretty(writer, &data).unwrap(); + Ok(()) + }) + .build(), + Tokio, + ) .build(); global::set_tracer_provider(provider); From 0101233973ca8d635970bf7231c7eccda0e9764e Mon Sep 17 00:00:00 2001 From: Matthew Boddewyn <31598686+mattbodd@users.noreply.github.com> Date: Fri, 26 Jan 2024 08:54:16 -0800 Subject: [PATCH 12/13] Create new opentelemetry-jaeger-propagator library (#1487) --- Cargo.toml | 1 + opentelemetry-jaeger-propagator/CHANGELOG.md | 9 + opentelemetry-jaeger-propagator/Cargo.toml | 37 + opentelemetry-jaeger-propagator/LICENSE | 201 +++++ opentelemetry-jaeger-propagator/README.md | 39 + opentelemetry-jaeger-propagator/src/lib.rs | 95 +++ .../src/propagator.rs | 725 ++++++++++++++++++ .../src/testing/jaeger_api_v2.rs | 448 +++++++++++ .../src/testing/mod.rs | 85 ++ opentelemetry-jaeger/CHANGELOG.md | 5 + opentelemetry-jaeger/Cargo.toml | 3 +- opentelemetry-jaeger/README.md | 8 +- opentelemetry-jaeger/src/lib.rs | 540 +------------ scripts/lint.sh | 2 + 14 files changed, 1664 insertions(+), 534 deletions(-) create mode 100644 opentelemetry-jaeger-propagator/CHANGELOG.md create mode 100644 opentelemetry-jaeger-propagator/Cargo.toml create mode 100644 opentelemetry-jaeger-propagator/LICENSE create mode 100644 opentelemetry-jaeger-propagator/README.md create mode 100644 opentelemetry-jaeger-propagator/src/lib.rs create mode 100644 opentelemetry-jaeger-propagator/src/propagator.rs create mode 100644 opentelemetry-jaeger-propagator/src/testing/jaeger_api_v2.rs create mode 100644 opentelemetry-jaeger-propagator/src/testing/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 9cb1ec12466..e5254c884d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "opentelemetry-jaeger", "opentelemetry-jaeger/examples/actix-udp", "opentelemetry-jaeger/examples/remote-sampler", + "opentelemetry-jaeger-propagator", "opentelemetry-appender-log", "opentelemetry-appender-tracing", "opentelemetry-otlp", diff --git a/opentelemetry-jaeger-propagator/CHANGELOG.md b/opentelemetry-jaeger-propagator/CHANGELOG.md new file mode 100644 index 00000000000..bf7d1c9a424 --- /dev/null +++ b/opentelemetry-jaeger-propagator/CHANGELOG.md @@ -0,0 +1,9 @@ +# Changelog + +## vNext + +## v0.1.0 + +### Added + +- As part of the gradual deprecation of the exporter functionality of the opentelemetry-jaeger crate, move the opentelemetry-jaeger propagator functionality to a new crate named opentelemetry-jaeger-propagator [#1487](https://github.com/open-telemetry/opentelemetry-rust/pull/1487) \ No newline at end of file diff --git a/opentelemetry-jaeger-propagator/Cargo.toml b/opentelemetry-jaeger-propagator/Cargo.toml new file mode 100644 index 00000000000..55da916ad8b --- /dev/null +++ b/opentelemetry-jaeger-propagator/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "opentelemetry-jaeger-propagator" +version = "0.1.0" +description = "Jaeger propagator for OpenTelemetry" +homepage = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger-propagator" +repository = "https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger-propagator" +readme = "README.md" +categories = [ + "development-tools::debugging", + "development-tools::profiling", + "asynchronous", +] +keywords = ["opentelemetry", "jaeger", "propagator"] +license = "Apache-2.0" +edition = "2021" +rust-version = "1.65" + + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +opentelemetry = { version = "0.21", default-features = false, features = [ + "trace", +], path = "../opentelemetry" } + +tonic = { workspace = true, optional = true } +prost = { version = "0.11.6", optional = true } +prost-types = { version = "0.11.6", optional = true } + +[dev-dependencies] +opentelemetry_sdk = { features = ["testing"], path = "../opentelemetry-sdk" } + +[features] +default = [] +integration_test = [] diff --git a/opentelemetry-jaeger-propagator/LICENSE b/opentelemetry-jaeger-propagator/LICENSE new file mode 100644 index 00000000000..23a2acabc4e --- /dev/null +++ b/opentelemetry-jaeger-propagator/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 The OpenTelemetry Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-jaeger-propagator/README.md b/opentelemetry-jaeger-propagator/README.md new file mode 100644 index 00000000000..191f1aad3f7 --- /dev/null +++ b/opentelemetry-jaeger-propagator/README.md @@ -0,0 +1,39 @@ +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png + +# OpenTelemetry Jaeger Propagator + +[`Jaeger`] propagator integration for applications instrumented with [`OpenTelemetry`]. To export telemetry to Jaeger, use the opentelemetry-otlp crate. + +[![Crates.io: opentelemetry-jaeger](https://img.shields.io/crates/v/opentelemetry-jaeger.svg)](https://crates.io/crates/opentelemetry-jaeger) +[![Documentation](https://docs.rs/opentelemetry-jaeger/badge.svg)](https://docs.rs/opentelemetry-jaeger) +[![LICENSE](https://img.shields.io/crates/l/opentelemetry-jaeger)](./LICENSE) +[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) + +## Overview + +[`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, +generate, collect, and export telemetry data (metrics, logs, and traces) for +analysis in order to understand your software's performance and behavior. This +crate provides the ability to create and interact with a Jaeger propagator. + +*Compiler support: [requires `rustc` 1.64+][msrv]* + +[`Jaeger`]: https://www.jaegertracing.io/ +[`OpenTelemetry`]: https://crates.io/crates/opentelemetry +[msrv]: #supported-rust-versions + +## Supported Rust Versions + +OpenTelemetry is built against the latest stable release. The minimum supported +version is 1.64. The current OpenTelemetry version is not guaranteed to build +on Rust versions earlier than the minimum supported version. + +The current stable Rust compiler and the three most recent minor versions +before it will always be supported. For example, if the current stable compiler +version is 1.49, the minimum supported version will not be increased past 1.46, +three minor versions prior. Increasing the minimum supported compiler version +is not considered a semver breaking change as long as doing so complies with +this policy. diff --git a/opentelemetry-jaeger-propagator/src/lib.rs b/opentelemetry-jaeger-propagator/src/lib.rs new file mode 100644 index 00000000000..79f08a3355d --- /dev/null +++ b/opentelemetry-jaeger-propagator/src/lib.rs @@ -0,0 +1,95 @@ +//! *Compiler support: [requires `rustc` 1.64+][msrv]* +//! +//! [Jaeger Docs]: https://www.jaegertracing.io/docs/ +//! [jaeger-deprecation]: https://github.com/open-telemetry/opentelemetry-specification/pull/2858/files +//! [jaeger-otlp]: https://www.jaegertracing.io/docs/1.38/apis/#opentelemetry-protocol-stable +//! [otlp-exporter]: https://docs.rs/opentelemetry-otlp/latest/opentelemetry_otlp/ +//! [msrv]: #supported-rust-versions +//! [jaeger propagation format]: https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format +//! +//! # Supported Rust Versions +//! +//! OpenTelemetry is built against the latest stable release. The minimum +//! supported version is 1.64. The current OpenTelemetry version is not +//! guaranteed to build on Rust versions earlier than the minimum supported +//! version. +//! +//! The current stable Rust compiler and the three most recent minor versions +//! before it will always be supported. For example, if the current stable +//! compiler version is 1.64, the minimum supported version will not be +//! increased past 1.46, three minor versions prior. Increasing the minimum +//! supported compiler version is not considered a semver breaking change as +//! long as doing so complies with this policy. +#![warn( + future_incompatible, + missing_debug_implementations, + missing_docs, + nonstandard_style, + rust_2018_idioms, + unreachable_pub, + unused +)] +#![cfg_attr( + docsrs, + feature(doc_cfg, doc_auto_cfg), + deny(rustdoc::broken_intra_doc_links) +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo.svg" +)] +#![cfg_attr(test, deny(warnings))] + +/// The Jaeger propagator propagates span contexts in [Jaeger propagation format]. +/// +/// Cross-cutting concerns send their state to the next process using `Propagator`s, +/// which are defined as objects used to read and write context data to and from messages +/// exchanged by the applications. Each concern creates a set of `Propagator`s for every +/// supported `Propagator` type. +/// +/// Note that a jaeger header can be set in http header or encoded as url. +/// +/// ## Examples +/// ``` +/// # use opentelemetry::{global, trace::{Tracer, TraceContextExt}, Context}; +/// # use opentelemetry_jaeger_propagator::Propagator as JaegerPropagator; +/// # fn send_request() { +/// // setup jaeger propagator +/// global::set_text_map_propagator(JaegerPropagator::default()); +/// // You also can init propagator with custom header name +/// // global::set_text_map_propagator(JaegerPropagator::with_custom_header("my-custom-header")); +/// +/// // before sending requests to downstream services. +/// let mut headers = std::collections::HashMap::new(); // replace by http header of the outgoing request +/// let caller_span = global::tracer("caller").start("say hello"); +/// let cx = Context::current_with_span(caller_span); +/// global::get_text_map_propagator(|propagator| { +/// propagator.inject_context(&cx, &mut headers); // propagator serialize the tracing context +/// }); +/// // Send the request.. +/// # } +/// +/// +/// # fn receive_request() { +/// // Receive the request sent above on the other service... +/// // setup jaeger propagator +/// global::set_text_map_propagator(JaegerPropagator::new()); +/// // You also can init propagator with custom header name +/// // global::set_text_map_propagator(JaegerPropagator::with_custom_header("my-custom-header")); +/// +/// let headers = std::collections::HashMap::new(); // replace this with http header map from incoming requests. +/// let parent_context = global::get_text_map_propagator(|propagator| { +/// propagator.extract(&headers) +/// }); +/// +/// // this span's parent span will be caller_span in send_request functions. +/// let receiver_span = global::tracer("receiver").start_with_context("hello", &parent_context); +/// # } +/// ``` +/// +/// [jaeger propagation format]: https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format +pub mod propagator; +#[cfg(feature = "integration_test")] +#[doc(hidden)] +pub mod testing; + +pub use propagator::Propagator; diff --git a/opentelemetry-jaeger-propagator/src/propagator.rs b/opentelemetry-jaeger-propagator/src/propagator.rs new file mode 100644 index 00000000000..f6dcc864535 --- /dev/null +++ b/opentelemetry-jaeger-propagator/src/propagator.rs @@ -0,0 +1,725 @@ +use opentelemetry::{ + global::{self, Error}, + propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, + trace::{SpanContext, SpanId, TraceContextExt, TraceError, TraceFlags, TraceId, TraceState}, + Context, +}; +use std::borrow::Cow; +use std::str::FromStr; + +const JAEGER_HEADER: &str = "uber-trace-id"; +const JAEGER_BAGGAGE_PREFIX: &str = "uberctx-"; +const DEPRECATED_PARENT_SPAN: &str = "0"; + +const TRACE_FLAG_DEBUG: TraceFlags = TraceFlags::new(0x04); + +/// `Propagator` implements the [Jaeger propagation format]. +#[derive(Clone, Debug)] +pub struct Propagator { + baggage_prefix: &'static str, + header_name: &'static str, + fields: [String; 1], +} + +// Implement default using Propagator::new() to not break compatibility with previous versions +impl Default for Propagator { + fn default() -> Self { + Propagator::new() + } +} + +impl Propagator { + /// Create a Jaeger propagator + pub fn new() -> Self { + Self::with_custom_header_and_baggage(JAEGER_HEADER, JAEGER_BAGGAGE_PREFIX) + } + + /// Create a Jaeger propagator with custom header name + pub fn with_custom_header(custom_header_name: &'static str) -> Self { + Self::with_custom_header_and_baggage(custom_header_name, JAEGER_BAGGAGE_PREFIX) + } + + /// Create a Jaeger propagator with custom header name and baggage prefix + /// + /// NOTE: it'll implicitly fallback to the default header names when the name of provided custom_* is empty + /// Default header-name is `uber-trace-id` and baggage-prefix is `uberctx-` + /// The format of serialized contexts and baggages stays unchanged and does not depend + /// on provided header name and prefix. + pub fn with_custom_header_and_baggage( + custom_header_name: &'static str, + custom_baggage_prefix: &'static str, + ) -> Self { + let custom_header_name = if custom_header_name.trim().is_empty() { + JAEGER_HEADER + } else { + custom_header_name + }; + + let custom_baggage_prefix = if custom_baggage_prefix.trim().is_empty() { + JAEGER_BAGGAGE_PREFIX + } else { + custom_baggage_prefix + }; + + Propagator { + baggage_prefix: custom_baggage_prefix.trim(), + header_name: custom_header_name.trim(), + fields: [custom_header_name.to_owned()], + } + } + + /// Extract span context from header value + fn extract_span_context(&self, extractor: &dyn Extractor) -> Result { + let mut header_value = Cow::from(extractor.get(self.header_name).unwrap_or("")); + // if there is no :, it means header_value could be encoded as url, try decode first + if !header_value.contains(':') { + header_value = Cow::from(header_value.replace("%3A", ":")); + } + + let parts = header_value.split_terminator(':').collect::>(); + if parts.len() != 4 { + return Err(()); + } + + // extract trace id + let trace_id = self.extract_trace_id(parts[0])?; + let span_id = self.extract_span_id(parts[1])?; + // Ignore parent span id since it's deprecated. + let flags = self.extract_trace_flags(parts[3])?; + let state = self.extract_trace_state(extractor)?; + + Ok(SpanContext::new(trace_id, span_id, flags, true, state)) + } + + /// Extract trace id from the header. + fn extract_trace_id(&self, trace_id: &str) -> Result { + if trace_id.len() > 32 { + return Err(()); + } + + TraceId::from_hex(trace_id).map_err(|_| ()) + } + + /// Extract span id from the header. + fn extract_span_id(&self, span_id: &str) -> Result { + match span_id.len() { + // exact 16 + 16 => SpanId::from_hex(span_id).map_err(|_| ()), + // more than 16 is invalid + 17.. => Err(()), + // less than 16 will result in padding on left + _ => { + let padded = format!("{span_id:0>16}"); + SpanId::from_hex(&padded).map_err(|_| ()) + } + } + } + + /// Extract flag from the header + /// + /// First bit controls whether to sample + /// Second bit controls whether it's a debug trace + /// Third bit is not used. + /// Forth bit is firehose flag, which is not supported in OT now. + fn extract_trace_flags(&self, flag: &str) -> Result { + if flag.len() > 2 { + return Err(()); + } + let flag = u8::from_str(flag).map_err(|_| ())?; + if flag & 0x01 == 0x01 { + if flag & 0x02 == 0x02 { + Ok(TraceFlags::SAMPLED | TRACE_FLAG_DEBUG) + } else { + Ok(TraceFlags::SAMPLED) + } + } else { + // Debug flag should only be set when sampled flag is set. + // So if debug flag is set alone. We will just use not sampled flag + Ok(TraceFlags::default()) + } + } + + fn extract_trace_state(&self, extractor: &dyn Extractor) -> Result { + let baggage_keys = extractor + .keys() + .into_iter() + .filter(|key| key.starts_with(self.baggage_prefix)) + .filter_map(|key| { + extractor + .get(key) + .map(|value| (key.to_string(), value.to_string())) + }); + + match TraceState::from_key_value(baggage_keys) { + Ok(trace_state) => Ok(trace_state), + Err(trace_state_err) => { + global::handle_error(Error::Trace(TraceError::Other(Box::new(trace_state_err)))); + Err(()) //todo: assign an error type instead of using () + } + } + } +} + +impl TextMapPropagator for Propagator { + fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { + let span = cx.span(); + let span_context = span.span_context(); + if span_context.is_valid() { + let flag: u8 = if span_context.is_sampled() { + if span_context.trace_flags() & TRACE_FLAG_DEBUG == TRACE_FLAG_DEBUG { + 0x03 + } else { + 0x01 + } + } else { + 0x00 + }; + let header_value = format!( + "{}:{}:{:01}:{:01x}", + span_context.trace_id(), + span_context.span_id(), + DEPRECATED_PARENT_SPAN, + flag, + ); + injector.set(self.header_name, header_value); + } + } + + fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { + self.extract_span_context(extractor) + .map(|sc| cx.with_remote_span_context(sc)) + .unwrap_or_else(|_| cx.clone()) + } + + fn fields(&self) -> FieldIter<'_> { + FieldIter::new(self.fields.as_ref()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use opentelemetry::{ + propagation::{Injector, TextMapPropagator}, + testing::trace::TestSpan, + trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, + Context, + }; + use std::collections::HashMap; + + const LONG_TRACE_ID_STR: &str = "000000000000004d0000000000000016"; + const SHORT_TRACE_ID_STR: &str = "4d0000000000000016"; + const TRACE_ID: u128 = 0x0000_0000_0000_004d_0000_0000_0000_0016; + const SPAN_ID_STR: &str = "0000000000017c29"; + const SHORT_SPAN_ID_STR: &str = "17c29"; + const SPAN_ID: u64 = 0x0000_0000_0001_7c29; + + fn get_extract_data() -> Vec<(&'static str, &'static str, u8, SpanContext)> { + vec![ + ( + LONG_TRACE_ID_STR, + SPAN_ID_STR, + 1, + SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TraceFlags::SAMPLED, + true, + TraceState::default(), + ), + ), + ( + SHORT_TRACE_ID_STR, + SPAN_ID_STR, + 1, + SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TraceFlags::SAMPLED, + true, + TraceState::default(), + ), + ), + ( + SHORT_TRACE_ID_STR, + SHORT_SPAN_ID_STR, + 1, + SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TraceFlags::SAMPLED, + true, + TraceState::default(), + ), + ), + ( + LONG_TRACE_ID_STR, + SPAN_ID_STR, + 3, + SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TRACE_FLAG_DEBUG | TraceFlags::SAMPLED, + true, + TraceState::default(), + ), + ), + ( + LONG_TRACE_ID_STR, + SPAN_ID_STR, + 0, + SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TraceFlags::default(), + true, + TraceState::default(), + ), + ), + ( + "invalidtractid", + SPAN_ID_STR, + 0, + SpanContext::empty_context(), + ), + ( + LONG_TRACE_ID_STR, + "invalidspanID", + 0, + SpanContext::empty_context(), + ), + ( + LONG_TRACE_ID_STR, + SPAN_ID_STR, + 120, + SpanContext::empty_context(), + ), + ] + } + + fn get_inject_data() -> Vec<(SpanContext, String)> { + vec![ + ( + SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TraceFlags::SAMPLED, + true, + TraceState::default(), + ), + format!("{}:{}:0:1", LONG_TRACE_ID_STR, SPAN_ID_STR), + ), + ( + SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TraceFlags::default(), + true, + TraceState::default(), + ), + format!("{}:{}:0:0", LONG_TRACE_ID_STR, SPAN_ID_STR), + ), + ( + SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TRACE_FLAG_DEBUG | TraceFlags::SAMPLED, + true, + TraceState::default(), + ), + format!("{}:{}:0:3", LONG_TRACE_ID_STR, SPAN_ID_STR), + ), + ] + } + + /// Try to extract the context using the created Propagator with custom header name + /// from the Extractor under the `context_key` key. + fn _test_extract_with_header(construct_header: &'static str, context_key: &'static str) { + let propagator = Propagator::with_custom_header(construct_header); + for (trace_id, span_id, flag, expected) in get_extract_data() { + let mut map: HashMap = HashMap::new(); + map.set(context_key, format!("{}:{}:0:{}", trace_id, span_id, flag)); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &expected); + } + } + + /// Try to inject the context using the created Propagator with custom header name + /// and expect the serialized context existence under `expect_header` key. + fn _test_inject_with_header(construct_header: &'static str, expect_header: &'static str) { + let propagator = Propagator::with_custom_header(construct_header); + for (span_context, header_value) in get_inject_data() { + let mut injector = HashMap::new(); + propagator.inject_context( + &Context::current_with_span(TestSpan(span_context)), + &mut injector, + ); + assert_eq!(injector.get(expect_header), Some(&header_value)); + } + } + + #[test] + fn test_propagator_creation_methods() { + // Without specifying any custom header or baggage prefix, the header and prefix wil be the default values + let default_propagator = Propagator::new(); + assert_eq!(default_propagator.header_name, JAEGER_HEADER); + assert_eq!(default_propagator.baggage_prefix, JAEGER_BAGGAGE_PREFIX); + + // Propagators are cloneable + let cloned_propagator = default_propagator.clone(); + assert_eq!( + default_propagator.header_name, + cloned_propagator.header_name + ); + + // Propagators implement debug + assert_eq!( + format!("{:?}", default_propagator), + format!( + "Propagator {{ baggage_prefix: \"{}\", header_name: \"{}\", fields: [\"{}\"] }}", + JAEGER_BAGGAGE_PREFIX, JAEGER_HEADER, JAEGER_HEADER + ) + ); + + let custom_header_propagator = Propagator::with_custom_header("custom-header"); + assert_eq!(custom_header_propagator.header_name, "custom-header"); + assert_eq!( + custom_header_propagator.baggage_prefix, + JAEGER_BAGGAGE_PREFIX + ); + + // An empty custom header will result in the default header name + let propgator_with_empty_custom_header = Propagator::with_custom_header(""); + assert_eq!( + propgator_with_empty_custom_header.header_name, + JAEGER_HEADER + ); + assert_eq!( + propgator_with_empty_custom_header.baggage_prefix, + JAEGER_BAGGAGE_PREFIX + ); + + let propagator_with_custom_header_and_baggage_prefixes = + Propagator::with_custom_header_and_baggage("custom-header", "custom-baggage-prefix"); + assert_eq!( + propagator_with_custom_header_and_baggage_prefixes.header_name, + "custom-header" + ); + assert_eq!( + propagator_with_custom_header_and_baggage_prefixes.baggage_prefix, + "custom-baggage-prefix" + ); + + let propagator_with_empty_prefix = + Propagator::with_custom_header_and_baggage("custom-header", ""); + assert_eq!(propagator_with_empty_prefix.header_name, "custom-header"); + assert_eq!( + propagator_with_empty_prefix.baggage_prefix, + JAEGER_BAGGAGE_PREFIX + ); + } + + #[test] + fn test_extract_span_context() { + let propagator_with_custom_header = + Propagator::with_custom_header_and_baggage("custom_header", "custom_baggage"); + let mut map: HashMap = HashMap::new(); + map.insert( + "custom_header".to_owned(), + "12345:54321:ignored_parent_span_id:3".to_owned(), + ); + assert_eq!( + propagator_with_custom_header.extract_span_context(&map), + Ok(SpanContext::new( + TraceId::from_hex("12345").unwrap(), + SpanId::from_hex("54321").unwrap(), + TRACE_FLAG_DEBUG | TraceFlags::SAMPLED, + true, + TraceState::default(), + )) + ); + + map.clear(); + let mut map: HashMap = HashMap::new(); + map.insert( + "custom_header".to_owned(), + "12345%3A54321%3Aignored_parent_span_id%3A3".to_owned(), // URL encoded + ); + assert_eq!( + propagator_with_custom_header.extract_span_context(&map), + Ok(SpanContext::new( + TraceId::from_hex("12345").unwrap(), + SpanId::from_hex("54321").unwrap(), + TRACE_FLAG_DEBUG | TraceFlags::SAMPLED, + true, + TraceState::default(), + )) + ); + + map.clear(); + map.set( + "custom_header", + "not:4:parts:long:delimited:by:colons".to_owned(), + ); + assert_eq!( + propagator_with_custom_header.extract_span_context(&map), + Err(()) + ); + + map.clear(); + map.set( + "custom_header", + "invalid_trace_id:54321:ignored_parent_span_id:3".to_owned(), + ); + assert_eq!( + propagator_with_custom_header.extract_span_context(&map), + Err(()) + ); + + map.clear(); + map.set( + "custom_header", + "12345:invalid_span_id:ignored_parent_span_id:3".to_owned(), + ); + assert_eq!( + propagator_with_custom_header.extract_span_context(&map), + Err(()) + ); + + map.clear(); + map.set( + "custom_header", + "12345:54321:ignored_parent_span_id:invalid_flag".to_owned(), + ); + assert_eq!( + propagator_with_custom_header.extract_span_context(&map), + Err(()) + ); + + map.clear(); + let mut map: HashMap = HashMap::new(); + map.set( + "custom_header", + "12345%3A54321%3Aignored_parent_span_id%3A3".to_owned(), // URL encoded + ); + let too_long_baggage_key = format!("{}{}", "custom_baggage", "_".repeat(256)); // A baggage key cannot be longer than 256 characters + map.set(&too_long_baggage_key, "baggage_value".to_owned()); + assert_eq!( + propagator_with_custom_header.extract_span_context(&map), + Err(()) + ); + } + + #[test] + fn test_extract_trace_id() { + let propagator = Propagator::new(); + + assert_eq!( + propagator.extract_trace_id("12345"), + Ok(TraceId::from_hex("12345").unwrap()) + ); + + // A trace cannot be more than 32 characters + assert_eq!( + propagator.extract_trace_id("1".repeat(33).as_str()), + Err(()) + ); + + // A trace id must be a valid hex-string + assert_eq!(propagator.extract_trace_id("invalid"), Err(())); + } + + #[test] + fn test_extract_span_id() { + let propgator = Propagator::new(); + assert_eq!( + propgator.extract_span_id("12345"), + Ok(SpanId::from_u64(74565)) + ); + + // Fail to extract span id with an invalid hex-string + assert_eq!(propgator.extract_span_id("invalid"), Err(())); + + // Fail to extract span id with a hex-string that is too long + assert_eq!(propgator.extract_span_id("1".repeat(17).as_str()), Err(())); + } + + #[test] + fn test_extract_trace_flags() { + let propgator = Propagator::new(); + + // Extract TraceFlags::SAMPLED flag + assert_eq!(propgator.extract_trace_flags("1"), Ok(TraceFlags::SAMPLED)); + + // Extract TraceFlags::DEBUG flag - requires TraceFlags::SAMPLED to be set + assert_eq!( + propgator.extract_trace_flags("3"), + Ok(TRACE_FLAG_DEBUG | TraceFlags::SAMPLED) + ); + + // Attempt to extract the TraceFlags::DEBUG flag without the TraceFlags::SAMPLED flag and receive the default TraceFlags + assert_eq!( + propgator.extract_trace_flags("2"), + Ok(TraceFlags::default()) + ); + } + + #[test] + fn test_extract_trace_state() { + let propagator = Propagator::with_custom_header_and_baggage("header", "baggage"); + + // When a type that implements Extractor has keys that start with the custom baggage prefix, they and their associated + // values are extracted into a TraceState + // In this case, no keys start with the custom baggage prefix + let mut map_of_keys_without_custom_baggage_prefix: HashMap = HashMap::new(); + map_of_keys_without_custom_baggage_prefix.set("different_prefix_1", "value_1".to_string()); + let empty_trace_state = propagator + .extract_trace_state(&map_of_keys_without_custom_baggage_prefix) + .unwrap(); + assert_eq!(empty_trace_state, TraceState::NONE); + + // In this case, all keys start with the custom baggage prefix + let mut map_of_keys_with_custom_baggage_prefix: HashMap = HashMap::new(); + map_of_keys_with_custom_baggage_prefix.set("baggage_1", "value_1".to_string()); + + let trace_state = propagator + .extract_trace_state(&map_of_keys_with_custom_baggage_prefix) + .unwrap(); + assert_eq!( + trace_state, + TraceState::from_key_value(vec![("baggage_1", "value_1")]).unwrap() + ); + + // If a key that starts with the custom baggage prefix is an invalid TraceState, the result will be Err(()) + let too_long_baggage_key = format!("{}{}", "baggage_1", "_".repeat(256)); // A baggage key cannot be longer than 256 characters + let mut map_of_invalid_keys_with_custom_baggage_prefix: HashMap = + HashMap::new(); + map_of_invalid_keys_with_custom_baggage_prefix + .set(&too_long_baggage_key, "value_1".to_string()); + assert_eq!( + propagator.extract_trace_state(&map_of_invalid_keys_with_custom_baggage_prefix), + Err(()) + ); + } + + #[test] + fn test_extract_empty() { + let map: HashMap = HashMap::new(); + let propagator = Propagator::new(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &SpanContext::empty_context()) + } + + #[test] + fn test_inject_extract_with_default() { + let propagator = Propagator::default(); + for (span_context, header_value) in get_inject_data() { + let mut injector = HashMap::new(); + propagator.inject_context( + &Context::current_with_span(TestSpan(span_context)), + &mut injector, + ); + assert_eq!(injector.get(JAEGER_HEADER), Some(&header_value)); + } + for (trace_id, span_id, flag, expected) in get_extract_data() { + let mut map: HashMap = HashMap::new(); + map.set( + JAEGER_HEADER, + format!("{}:{}:0:{}", trace_id, span_id, flag), + ); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &expected); + } + } + + #[test] + fn test_extract_too_many_parts() { + let mut map: HashMap = HashMap::new(); + map.set( + JAEGER_HEADER, + format!("{}:{}:0:1:aa", LONG_TRACE_ID_STR, SPAN_ID_STR), + ); + let propagator = Propagator::new(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &SpanContext::empty_context()); + } + + #[test] + fn test_extract_invalid_flag() { + let mut map: HashMap = HashMap::new(); + map.set( + JAEGER_HEADER, + format!("{}:{}:0:aa", LONG_TRACE_ID_STR, SPAN_ID_STR), + ); + let propagator = Propagator::new(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &SpanContext::empty_context()); + } + + #[test] + fn test_extract_from_url() { + let mut map: HashMap = HashMap::new(); + map.set( + JAEGER_HEADER, + format!("{}%3A{}%3A0%3A1", LONG_TRACE_ID_STR, SPAN_ID_STR), + ); + let propagator = Propagator::new(); + let context = propagator.extract(&map); + assert_eq!( + context.span().span_context(), + &SpanContext::new( + TraceId::from_u128(TRACE_ID), + SpanId::from_u64(SPAN_ID), + TraceFlags::SAMPLED, + true, + TraceState::default(), + ) + ); + } + + #[test] + fn test_extract() { + _test_extract_with_header(JAEGER_HEADER, JAEGER_HEADER) + } + + #[test] + fn test_inject() { + _test_inject_with_header(JAEGER_HEADER, JAEGER_HEADER) + } + + #[test] + fn test_extract_with_invalid_header() { + for construct in &["", " "] { + _test_extract_with_header(construct, JAEGER_HEADER) + } + } + + #[test] + fn test_extract_with_valid_header() { + for construct in &["custom-header", "custom-header ", " custom-header "] { + _test_extract_with_header(construct, "custom-header") + } + } + + #[test] + fn test_inject_with_invalid_header() { + for construct in &["", " "] { + _test_inject_with_header(construct, JAEGER_HEADER) + } + } + + #[test] + fn test_inject_with_valid_header() { + for construct in &["custom-header", "custom-header ", " custom-header "] { + _test_inject_with_header(construct, "custom-header") + } + } + + #[test] + fn test_fields() { + let propagator = Propagator::new(); + let fields = propagator.fields().collect::>(); + assert_eq!(fields.len(), 1); + assert_eq!(fields.first().unwrap(), &JAEGER_HEADER); + } +} diff --git a/opentelemetry-jaeger-propagator/src/testing/jaeger_api_v2.rs b/opentelemetry-jaeger-propagator/src/testing/jaeger_api_v2.rs new file mode 100644 index 00000000000..467ef9f2b14 --- /dev/null +++ b/opentelemetry-jaeger-propagator/src/testing/jaeger_api_v2.rs @@ -0,0 +1,448 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyValue { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(enumeration = "ValueType", tag = "2")] + pub v_type: i32, + #[prost(string, tag = "3")] + pub v_str: ::prost::alloc::string::String, + #[prost(bool, tag = "4")] + pub v_bool: bool, + #[prost(int64, tag = "5")] + pub v_int64: i64, + #[prost(double, tag = "6")] + pub v_float64: f64, + #[prost(bytes = "vec", tag = "7")] + pub v_binary: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Log { + #[prost(message, optional, tag = "1")] + pub timestamp: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, repeated, tag = "2")] + pub fields: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SpanRef { + #[prost(bytes = "vec", tag = "1")] + pub trace_id: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub span_id: ::prost::alloc::vec::Vec, + #[prost(enumeration = "SpanRefType", tag = "3")] + pub ref_type: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Process { + #[prost(string, tag = "1")] + pub service_name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub tags: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Span { + #[prost(bytes = "vec", tag = "1")] + pub trace_id: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "2")] + pub span_id: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub operation_name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "4")] + pub references: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "5")] + pub flags: u32, + #[prost(message, optional, tag = "6")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "7")] + pub duration: ::core::option::Option<::prost_types::Duration>, + #[prost(message, repeated, tag = "8")] + pub tags: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "9")] + pub logs: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "10")] + pub process: ::core::option::Option, + #[prost(string, tag = "11")] + pub process_id: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "12")] + pub warnings: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Trace { + #[prost(message, repeated, tag = "1")] + pub spans: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "2")] + pub process_map: ::prost::alloc::vec::Vec, + #[prost(string, repeated, tag = "3")] + pub warnings: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Nested message and enum types in `Trace`. +pub mod trace { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ProcessMapping { + #[prost(string, tag = "1")] + pub process_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub process: ::core::option::Option, + } +} +/// Note that both Span and Batch may contain a Process. +/// This is different from the Thrift model which was only used +/// for transport, because Proto model is also used by the backend +/// as the domain model, where once a batch is received it is split +/// into individual spans which are all processed independently, +/// and therefore they all need a Process. As far as on-the-wire +/// semantics, both Batch and Spans in the same message may contain +/// their own instances of Process, with span.Process taking priority +/// over batch.Process. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Batch { + #[prost(message, repeated, tag = "1")] + pub spans: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub process: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DependencyLink { + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub child: ::prost::alloc::string::String, + #[prost(uint64, tag = "3")] + pub call_count: u64, + #[prost(string, tag = "4")] + pub source: ::prost::alloc::string::String, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ValueType { + String = 0, + Bool = 1, + Int64 = 2, + Float64 = 3, + Binary = 4, +} +impl ValueType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ValueType::String => "STRING", + ValueType::Bool => "BOOL", + ValueType::Int64 => "INT64", + ValueType::Float64 => "FLOAT64", + ValueType::Binary => "BINARY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STRING" => Some(Self::String), + "BOOL" => Some(Self::Bool), + "INT64" => Some(Self::Int64), + "FLOAT64" => Some(Self::Float64), + "BINARY" => Some(Self::Binary), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SpanRefType { + ChildOf = 0, + FollowsFrom = 1, +} +impl SpanRefType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SpanRefType::ChildOf => "CHILD_OF", + SpanRefType::FollowsFrom => "FOLLOWS_FROM", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CHILD_OF" => Some(Self::ChildOf), + "FOLLOWS_FROM" => Some(Self::FollowsFrom), + _ => None, + } + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTraceRequest { + #[prost(bytes = "vec", tag = "1")] + pub trace_id: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SpansResponseChunk { + #[prost(message, repeated, tag = "1")] + pub spans: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArchiveTraceRequest { + #[prost(bytes = "vec", tag = "1")] + pub trace_id: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArchiveTraceResponse {} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TraceQueryParameters { + #[prost(string, tag = "1")] + pub service_name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub operation_name: ::prost::alloc::string::String, + #[prost(map = "string, string", tag = "3")] + pub tags: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + #[prost(message, optional, tag = "4")] + pub start_time_min: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "5")] + pub start_time_max: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "6")] + pub duration_min: ::core::option::Option<::prost_types::Duration>, + #[prost(message, optional, tag = "7")] + pub duration_max: ::core::option::Option<::prost_types::Duration>, + #[prost(int32, tag = "8")] + pub search_depth: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FindTracesRequest { + #[prost(message, optional, tag = "1")] + pub query: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetServicesRequest {} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetServicesResponse { + #[prost(string, repeated, tag = "1")] + pub services: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOperationsRequest { + #[prost(string, tag = "1")] + pub service: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub span_kind: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Operation { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub span_kind: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOperationsResponse { + /// deprecated + #[prost(string, repeated, tag = "1")] + pub operation_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(message, repeated, tag = "2")] + pub operations: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDependenciesRequest { + #[prost(message, optional, tag = "1")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "2")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDependenciesResponse { + #[prost(message, repeated, tag = "1")] + pub dependencies: ::prost::alloc::vec::Vec, +} +/// Generated client implementations. +pub mod query_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; + use tonic::codegen::*; + #[derive(Debug, Clone)] + pub struct QueryServiceClient { + inner: tonic::client::Grpc, + } + impl QueryServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + QueryServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + pub async fn get_trace( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/jaeger.api_v2.QueryService/GetTrace"); + self.inner + .server_streaming(request.into_request(), path, codec) + .await + } + pub async fn archive_trace( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/jaeger.api_v2.QueryService/ArchiveTrace"); + self.inner.unary(request.into_request(), path, codec).await + } + pub async fn find_traces( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/jaeger.api_v2.QueryService/FindTraces"); + self.inner + .server_streaming(request.into_request(), path, codec) + .await + } + pub async fn get_services( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/jaeger.api_v2.QueryService/GetServices"); + self.inner.unary(request.into_request(), path, codec).await + } + pub async fn get_operations( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/jaeger.api_v2.QueryService/GetOperations"); + self.inner.unary(request.into_request(), path, codec).await + } + pub async fn get_dependencies( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/jaeger.api_v2.QueryService/GetDependencies"); + self.inner.unary(request.into_request(), path, codec).await + } + } +} diff --git a/opentelemetry-jaeger-propagator/src/testing/mod.rs b/opentelemetry-jaeger-propagator/src/testing/mod.rs new file mode 100644 index 00000000000..36d2c3a222c --- /dev/null +++ b/opentelemetry-jaeger-propagator/src/testing/mod.rs @@ -0,0 +1,85 @@ +#[allow(unused, missing_docs, clippy::derive_partial_eq_without_eq)] +// tonic don't derive Eq. We shouldn't manually change it.)] +pub mod jaeger_api_v2; + +#[allow(missing_docs)] +pub mod jaeger_client { + use crate::testing::jaeger_api_v2::query_service_client::QueryServiceClient; + use crate::testing::jaeger_api_v2::{ + FindTracesRequest, GetServicesRequest, GetTraceRequest, Span as JaegerSpan, + TraceQueryParameters, + }; + use tonic::transport::Channel; + + #[derive(Debug)] + pub struct JaegerTestClient { + query_service_client: QueryServiceClient, + } + + impl JaegerTestClient { + pub fn new(jaeger_url: &'static str) -> JaegerTestClient { + let channel = Channel::from_static(jaeger_url).connect_lazy(); + + JaegerTestClient { + query_service_client: QueryServiceClient::new(channel), + } + } + + /// Check if the jaeger contains the service + pub async fn contain_service(&mut self, service_name: &String) -> bool { + self.query_service_client + .get_services(GetServicesRequest {}) + .await + .unwrap() + .get_ref() + .services + .iter() + .any(|svc_name| *svc_name == *service_name) + } + + /// Find trace by trace id. + /// Note that `trace_id` should be a u128 in hex. + pub async fn get_trace(&mut self, trace_id: String) -> Vec { + let trace_id = u128::from_str_radix(trace_id.as_ref(), 16).expect("invalid trace id"); + let mut resp = self + .query_service_client + .get_trace(GetTraceRequest { + trace_id: trace_id.to_be_bytes().into(), + }) + .await + .unwrap(); + + if let Some(spans) = resp + .get_mut() + .message() + .await + .expect("jaeger returns error") + { + spans.spans + } else { + vec![] + } + } + + /// Find traces belongs the service. + /// It assumes the service exists. + pub async fn find_traces_from_services(&mut self, service_name: &str) -> Vec { + let request = FindTracesRequest { + query: Some(TraceQueryParameters { + service_name: service_name.to_owned(), + ..Default::default() + }), + }; + self.query_service_client + .find_traces(request) + .await + .unwrap() + .get_mut() + .message() + .await + .expect("jaeger returns error") + .unwrap_or_default() + .spans + } + } +} diff --git a/opentelemetry-jaeger/CHANGELOG.md b/opentelemetry-jaeger/CHANGELOG.md index c109b677dbb..88fe37d60f7 100644 --- a/opentelemetry-jaeger/CHANGELOG.md +++ b/opentelemetry-jaeger/CHANGELOG.md @@ -2,6 +2,11 @@ ## vNext +## v0.21.0 + +### Changed +- Previously, the opentelemetry-jaeger crate exposed both a Jaeger exporter and a Jaeger propagator. Going forwards, the Jaeger propagator functionality has been moved to a new crate [opentelemetry-jaeger-propagator](../opentelemetry-jaeger-propagator/) to prepare for opentelemetry-jaeger exporter deprecation. Starting with [Jaeger v1.35](https://github.com/jaegertracing/jaeger/releases/tag/v1.35.0), Jaeger supports the OpenTelemetry Protocol (OTLP). [OpenTelemetry has recommended](https://opentelemetry.io/blog/2022/jaeger-native-otlp/) that Jaeger exporters be deprecated from OpenTelemetry SDKs in favor of sending traces to Jaeger clients using OTLP. An example and further discussion of how to consume OpenTelemetry spans with Jaeger can be found at [Introducing native support for OpenTelemetry in Jaeger](https://medium.com/jaegertracing/introducing-native-support-for-opentelemetry-in-jaeger-eb661be8183c). + ## v0.20.0 ### Changed diff --git a/opentelemetry-jaeger/Cargo.toml b/opentelemetry-jaeger/Cargo.toml index 805cf9df6ce..54bc16e5543 100644 --- a/opentelemetry-jaeger/Cargo.toml +++ b/opentelemetry-jaeger/Cargo.toml @@ -33,6 +33,7 @@ opentelemetry = { version = "0.21", default-features = false, features = ["trace opentelemetry_sdk = { version = "0.21", default-features = false, features = ["trace"], path = "../opentelemetry-sdk" } opentelemetry-http = { version = "0.10", path = "../opentelemetry-http", optional = true } opentelemetry-semantic-conventions = { version = "0.13", path = "../opentelemetry-semantic-conventions" } +opentelemetry-jaeger-propagator = { version = "0.1", path = "../opentelemetry-jaeger-propagator" } pin-project-lite = { workspace = true, optional = true } reqwest = { workspace = true, default-features = false, optional = true } surf = { workspace = true, optional = true } @@ -83,7 +84,7 @@ full = [ "rt-tokio", "rt-tokio-current-thread", "rt-async-std", - "integration_test" + "integration_test", ] default = [] collector_client = ["http", "opentelemetry-http"] diff --git a/opentelemetry-jaeger/README.md b/opentelemetry-jaeger/README.md index 8678fc25205..aaa79ec4a99 100644 --- a/opentelemetry-jaeger/README.md +++ b/opentelemetry-jaeger/README.md @@ -12,6 +12,11 @@ [![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) [![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) +**WARNING** +[Jaeger](https://www.jaegertracing.io/) supports the OpenTelemetry Protocol (OTLP) as of [v1.35.0](https://github.com/jaegertracing/jaeger/releases/tag/v1.35.0) and as a result, language specific Jaeger exporters within OpenTelemetry SDKs are [recommended for deprecation by the OpenTelemetry project](https://opentelemetry.io/blog/2022/jaeger-native-otlp/). More information and examples of using OTLP with Jaeger can be found in [Introducing native support for OpenTelemetry in Jaeger](https://medium.com/jaegertracing/introducing-native-support-for-opentelemetry-in-jaeger-eb661be8183c) and [Exporting OTLP traces to Jaeger](https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples/tracing-jaeger). + +The opentelemetry-jaeger crate previously contained both a Jaeger exporter and a Jaeger propagator. To prepare for the deprecation of the Jaeger exporter, the Jaeger propagator implementation has been migrated to [opentelemetry-jaeger-propagator](../opentelemetry-jaeger-propagator/). + ## Overview [`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, @@ -41,9 +46,10 @@ exporting telemetry: ```rust use opentelemetry::global; use opentelemetry::trace::Tracer; +use opentelemetry_jaeger_propagator; fn main() -> Result<(), Box> { - global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); + global::set_text_map_propagator(opentelemetry_jaeger_propagator::Propagator::new()); let tracer = opentelemetry_jaeger::new_agent_pipeline().install_simple()?; tracer.in_span("doing_work", |cx| { diff --git a/opentelemetry-jaeger/src/lib.rs b/opentelemetry-jaeger/src/lib.rs index 3febd3a0de5..151dab020ca 100644 --- a/opentelemetry-jaeger/src/lib.rs +++ b/opentelemetry-jaeger/src/lib.rs @@ -29,10 +29,11 @@ //! //! ```no_run //! use opentelemetry::{global, trace::{Tracer, TraceError}}; +//! use opentelemetry_jaeger_propagator; //! //! #[tokio::main] //! async fn main() -> Result<(), TraceError> { -//! global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); +//! global::set_text_map_propagator(opentelemetry_jaeger_propagator::Propagator::new()); //! let tracer = opentelemetry_jaeger::new_agent_pipeline().install_simple()?; //! //! tracer.in_span("doing_work", |cx| { @@ -49,9 +50,10 @@ //! ```no_run //! use opentelemetry::{global, trace::{Tracer, TraceError}}; //! use opentelemetry_sdk::runtime::Tokio; +//! use opentelemetry_jaeger_propagator; //! //! fn main() -> Result<(), TraceError> { -//! global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); +//! global::set_text_map_propagator(opentelemetry_jaeger_propagator::Propagator::new()); //! let tracer = opentelemetry_jaeger::new_agent_pipeline().install_batch(Tokio)?; //! //! tracer.in_span("doing_work", |cx| { @@ -174,9 +176,10 @@ //! ```no_run //! use opentelemetry::{global, KeyValue, trace::{Tracer, TraceError}}; //! use opentelemetry_sdk::{trace::{config, RandomIdGenerator, Sampler}, Resource}; +//! use opentelemetry_jaeger_propagator; //! //! fn main() -> Result<(), TraceError> { -//! global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); +//! global::set_text_map_propagator(opentelemetry_jaeger_propagator::Propagator::new()); //! let tracer = opentelemetry_jaeger::new_agent_pipeline() //! .with_endpoint("localhost:6831") //! .with_service_name("my_app") @@ -211,9 +214,10 @@ //! ```ignore //! use opentelemetry::{global, KeyValue, trace::{Tracer, TraceError}}; //! use opentelemetry_sdk::{trace::{config, RandomIdGenerator, Sampler}, Resource}; +//! use opentelemetry_jaeger_propagator; //! //! fn main() -> Result<(), TraceError> { -//! global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); +//! global::set_text_map_propagator(opentelemetry_jaeger_propagator::Propagator::new()); //! let tracer = opentelemetry_jaeger::new_collector_pipeline() //! .with_endpoint("http://localhost:14250/api/trace") // set collector endpoint //! .with_service_name("my_app") // the name of the application @@ -322,537 +326,9 @@ pub use exporter::config::collector::new_wasm_collector_pipeline; pub use exporter::{ config::agent::new_agent_pipeline, runtime::JaegerTraceRuntime, Error, Exporter, Process, }; -pub use propagator::Propagator; mod exporter; #[cfg(feature = "integration_test")] #[doc(hidden)] pub mod testing; - -mod propagator { - use opentelemetry::{ - global::{self, Error}, - propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, - trace::{ - SpanContext, SpanId, TraceContextExt, TraceError, TraceFlags, TraceId, TraceState, - }, - Context, - }; - use std::borrow::Cow; - use std::str::FromStr; - - const JAEGER_HEADER: &str = "uber-trace-id"; - const JAEGER_BAGGAGE_PREFIX: &str = "uberctx-"; - const DEPRECATED_PARENT_SPAN: &str = "0"; - - const TRACE_FLAG_DEBUG: TraceFlags = TraceFlags::new(0x04); - - /// The Jaeger propagator propagates span contexts in [Jaeger propagation format]. - /// - /// Cross-cutting concerns send their state to the next process using `Propagator`s, - /// which are defined as objects used to read and write context data to and from messages - /// exchanged by the applications. Each concern creates a set of `Propagator`s for every - /// supported `Propagator` type. - /// - /// Note that jaeger header can be set in http header or encoded as url. - /// - /// ## Examples - /// ``` - /// # use opentelemetry::{global, trace::{Tracer, TraceContextExt}, Context}; - /// # use opentelemetry_jaeger::Propagator as JaegerPropagator; - /// # fn send_request() { - /// // setup jaeger propagator - /// global::set_text_map_propagator(JaegerPropagator::default()); - /// // You also can init propagator with custom header name - /// // global::set_text_map_propagator(JaegerPropagator::with_custom_header("my-custom-header")); - /// - /// // before sending requests to downstream services. - /// let mut headers = std::collections::HashMap::new(); // replace by http header of the outgoing request - /// let caller_span = global::tracer("caller").start("say hello"); - /// let cx = Context::current_with_span(caller_span); - /// global::get_text_map_propagator(|propagator| { - /// propagator.inject_context(&cx, &mut headers); // propagator serialize the tracing context - /// }); - /// // Send the request.. - /// # } - /// - /// - /// # fn receive_request() { - /// // Receive the request sent above on the other service... - /// // setup jaeger propagator - /// global::set_text_map_propagator(JaegerPropagator::new()); - /// // You also can init propagator with custom header name - /// // global::set_text_map_propagator(JaegerPropagator::with_custom_header("my-custom-header")); - /// - /// let headers = std::collections::HashMap::new(); // replace this with http header map from incoming requests. - /// let parent_context = global::get_text_map_propagator(|propagator| { - /// propagator.extract(&headers) - /// }); - /// - /// // this span's parent span will be caller_span in send_request functions. - /// let receiver_span = global::tracer("receiver").start_with_context("hello", &parent_context); - /// # } - /// ``` - /// - /// [jaeger propagation format]: https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format - #[derive(Clone, Debug)] - pub struct Propagator { - baggage_prefix: &'static str, - header_name: &'static str, - fields: [String; 1], - } - - // Implement default using Propagator::new() to not break compatibility with previous versions - impl Default for Propagator { - fn default() -> Self { - Propagator::new() - } - } - - impl Propagator { - /// Create a Jaeger propagator - pub fn new() -> Self { - Self::with_custom_header_and_baggage(JAEGER_HEADER, JAEGER_BAGGAGE_PREFIX) - } - - /// Create a Jaeger propagator with custom header name - pub fn with_custom_header(custom_header_name: &'static str) -> Self { - Self::with_custom_header_and_baggage(custom_header_name, JAEGER_BAGGAGE_PREFIX) - } - - /// Create a Jaeger propagator with custom header name and baggage prefix - /// - /// NOTE: it's implicitly fallback to the default header names when the ane of provided custom_* is empty - /// Default header-name is `uber-trace-id` and baggage-prefix is `uberctx-` - /// The format of serialized context and baggage's stays unchanged and not depending - /// on provided header name and prefix. - pub fn with_custom_header_and_baggage( - custom_header_name: &'static str, - custom_baggage_prefix: &'static str, - ) -> Self { - let custom_header_name = if custom_header_name.trim().is_empty() { - JAEGER_HEADER - } else { - custom_header_name - }; - - let custom_baggage_prefix = if custom_baggage_prefix.trim().is_empty() { - JAEGER_BAGGAGE_PREFIX - } else { - custom_baggage_prefix - }; - - Propagator { - baggage_prefix: custom_baggage_prefix.trim(), - header_name: custom_header_name.trim(), - fields: [custom_header_name.to_owned()], - } - } - - /// Extract span context from header value - fn extract_span_context(&self, extractor: &dyn Extractor) -> Result { - let mut header_value = Cow::from(extractor.get(self.header_name).unwrap_or("")); - // if there is no :, it means header_value could be encoded as url, try decode first - if !header_value.contains(':') { - header_value = Cow::from(header_value.replace("%3A", ":")); - } - - let parts = header_value.split_terminator(':').collect::>(); - if parts.len() != 4 { - return Err(()); - } - - // extract trace id - let trace_id = self.extract_trace_id(parts[0])?; - let span_id = self.extract_span_id(parts[1])?; - // Ignore parent span id since it's deprecated. - let flags = self.extract_trace_flags(parts[3])?; - let state = self.extract_trace_state(extractor)?; - - Ok(SpanContext::new(trace_id, span_id, flags, true, state)) - } - - /// Extract trace id from the header. - fn extract_trace_id(&self, trace_id: &str) -> Result { - if trace_id.len() > 32 { - return Err(()); - } - - TraceId::from_hex(trace_id).map_err(|_| ()) - } - - /// Extract span id from the header. - fn extract_span_id(&self, span_id: &str) -> Result { - match span_id.len() { - // exact 16 - 16 => SpanId::from_hex(span_id).map_err(|_| ()), - // more than 16 is invalid - 17.. => Err(()), - // less than 16 will result padding on left - _ => { - let padded = format!("{span_id:0>16}"); - SpanId::from_hex(&padded).map_err(|_| ()) - } - } - } - - /// Extract flag from the header - /// - /// First bit control whether to sample - /// Second bit control whether it's a debug trace - /// Third bit is not used. - /// Forth bit is firehose flag, which is not supported in OT now. - fn extract_trace_flags(&self, flag: &str) -> Result { - if flag.len() > 2 { - return Err(()); - } - let flag = u8::from_str(flag).map_err(|_| ())?; - if flag & 0x01 == 0x01 { - if flag & 0x02 == 0x02 { - Ok(TraceFlags::SAMPLED | TRACE_FLAG_DEBUG) - } else { - Ok(TraceFlags::SAMPLED) - } - } else { - // Debug flag should only be set when sampled flag is set. - // So if debug flag is set alone. We will just use not sampled flag - Ok(TraceFlags::default()) - } - } - - fn extract_trace_state(&self, extractor: &dyn Extractor) -> Result { - let baggage_keys = extractor - .keys() - .into_iter() - .filter(|key| key.starts_with(self.baggage_prefix)) - .filter_map(|key| { - extractor - .get(key) - .map(|value| (key.to_string(), value.to_string())) - }); - - match TraceState::from_key_value(baggage_keys) { - Ok(trace_state) => Ok(trace_state), - Err(trace_state_err) => { - global::handle_error(Error::Trace(TraceError::Other(Box::new( - trace_state_err, - )))); - Err(()) //todo: assign an error type instead of using () - } - } - } - } - - impl TextMapPropagator for Propagator { - fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { - let span = cx.span(); - let span_context = span.span_context(); - if span_context.is_valid() { - let flag: u8 = if span_context.is_sampled() { - if span_context.trace_flags() & TRACE_FLAG_DEBUG == TRACE_FLAG_DEBUG { - 0x03 - } else { - 0x01 - } - } else { - 0x00 - }; - let header_value = format!( - "{}:{}:{:01}:{:01x}", - span_context.trace_id(), - span_context.span_id(), - DEPRECATED_PARENT_SPAN, - flag, - ); - injector.set(self.header_name, header_value); - } - } - - fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { - self.extract_span_context(extractor) - .map(|sc| cx.with_remote_span_context(sc)) - .unwrap_or_else(|_| cx.clone()) - } - - fn fields(&self) -> FieldIter<'_> { - FieldIter::new(self.fields.as_ref()) - } - } - - #[cfg(test)] - mod tests { - use super::*; - use opentelemetry::{ - propagation::{Injector, TextMapPropagator}, - testing::trace::TestSpan, - trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, - Context, - }; - use std::collections::HashMap; - - const LONG_TRACE_ID_STR: &str = "000000000000004d0000000000000016"; - const SHORT_TRACE_ID_STR: &str = "4d0000000000000016"; - const TRACE_ID: u128 = 0x0000_0000_0000_004d_0000_0000_0000_0016; - const SPAN_ID_STR: &str = "0000000000017c29"; - const SHORT_SPAN_ID_STR: &str = "17c29"; - const SPAN_ID: u64 = 0x0000_0000_0001_7c29; - - fn get_extract_data() -> Vec<(&'static str, &'static str, u8, SpanContext)> { - vec![ - ( - LONG_TRACE_ID_STR, - SPAN_ID_STR, - 1, - SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TraceFlags::SAMPLED, - true, - TraceState::default(), - ), - ), - ( - SHORT_TRACE_ID_STR, - SPAN_ID_STR, - 1, - SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TraceFlags::SAMPLED, - true, - TraceState::default(), - ), - ), - ( - SHORT_TRACE_ID_STR, - SHORT_SPAN_ID_STR, - 1, - SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TraceFlags::SAMPLED, - true, - TraceState::default(), - ), - ), - ( - LONG_TRACE_ID_STR, - SPAN_ID_STR, - 3, - SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TRACE_FLAG_DEBUG | TraceFlags::SAMPLED, - true, - TraceState::default(), - ), - ), - ( - LONG_TRACE_ID_STR, - SPAN_ID_STR, - 0, - SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TraceFlags::default(), - true, - TraceState::default(), - ), - ), - ( - "invalidtractid", - SPAN_ID_STR, - 0, - SpanContext::empty_context(), - ), - ( - LONG_TRACE_ID_STR, - "invalidspanID", - 0, - SpanContext::empty_context(), - ), - ( - LONG_TRACE_ID_STR, - SPAN_ID_STR, - 120, - SpanContext::empty_context(), - ), - ] - } - - fn get_inject_data() -> Vec<(SpanContext, String)> { - vec![ - ( - SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TraceFlags::SAMPLED, - true, - TraceState::default(), - ), - format!("{}:{}:0:1", LONG_TRACE_ID_STR, SPAN_ID_STR), - ), - ( - SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TraceFlags::default(), - true, - TraceState::default(), - ), - format!("{}:{}:0:0", LONG_TRACE_ID_STR, SPAN_ID_STR), - ), - ( - SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TRACE_FLAG_DEBUG | TraceFlags::SAMPLED, - true, - TraceState::default(), - ), - format!("{}:{}:0:3", LONG_TRACE_ID_STR, SPAN_ID_STR), - ), - ] - } - - /// Try to extract the context using the created Propagator with custom header name - /// from the Extractor under the `context_key` key. - fn _test_extract_with_header(construct_header: &'static str, context_key: &'static str) { - let propagator = Propagator::with_custom_header(construct_header); - for (trace_id, span_id, flag, expected) in get_extract_data() { - let mut map: HashMap = HashMap::new(); - map.set(context_key, format!("{}:{}:0:{}", trace_id, span_id, flag)); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &expected); - } - } - - /// Try to inject the context using the created Propagator with custom header name - /// and expect the serialized context existence under `expect_header` key. - fn _test_inject_with_header(construct_header: &'static str, expect_header: &'static str) { - let propagator = Propagator::with_custom_header(construct_header); - for (span_context, header_value) in get_inject_data() { - let mut injector = HashMap::new(); - propagator.inject_context( - &Context::current_with_span(TestSpan(span_context)), - &mut injector, - ); - assert_eq!(injector.get(expect_header), Some(&header_value)); - } - } - - #[test] - fn test_extract_empty() { - let map: HashMap = HashMap::new(); - let propagator = Propagator::new(); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &SpanContext::empty_context()) - } - - #[test] - fn test_inject_extract_with_default() { - let propagator = Propagator::default(); - for (span_context, header_value) in get_inject_data() { - let mut injector = HashMap::new(); - propagator.inject_context( - &Context::current_with_span(TestSpan(span_context)), - &mut injector, - ); - assert_eq!(injector.get(JAEGER_HEADER), Some(&header_value)); - } - for (trace_id, span_id, flag, expected) in get_extract_data() { - let mut map: HashMap = HashMap::new(); - map.set( - JAEGER_HEADER, - format!("{}:{}:0:{}", trace_id, span_id, flag), - ); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &expected); - } - } - - #[test] - fn test_extract_too_many_parts() { - let mut map: HashMap = HashMap::new(); - map.set( - JAEGER_HEADER, - format!("{}:{}:0:1:aa", LONG_TRACE_ID_STR, SPAN_ID_STR), - ); - let propagator = Propagator::new(); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &SpanContext::empty_context()); - } - - #[test] - fn test_extract_invalid_flag() { - let mut map: HashMap = HashMap::new(); - map.set( - JAEGER_HEADER, - format!("{}:{}:0:aa", LONG_TRACE_ID_STR, SPAN_ID_STR), - ); - let propagator = Propagator::new(); - let context = propagator.extract(&map); - assert_eq!(context.span().span_context(), &SpanContext::empty_context()); - } - - #[test] - fn test_extract_from_url() { - let mut map: HashMap = HashMap::new(); - map.set( - JAEGER_HEADER, - format!("{}%3A{}%3A0%3A1", LONG_TRACE_ID_STR, SPAN_ID_STR), - ); - let propagator = Propagator::new(); - let context = propagator.extract(&map); - assert_eq!( - context.span().span_context(), - &SpanContext::new( - TraceId::from_u128(TRACE_ID), - SpanId::from_u64(SPAN_ID), - TraceFlags::SAMPLED, - true, - TraceState::default(), - ) - ); - } - - #[test] - fn test_extract() { - _test_extract_with_header(JAEGER_HEADER, JAEGER_HEADER) - } - - #[test] - fn test_inject() { - _test_inject_with_header(JAEGER_HEADER, JAEGER_HEADER) - } - - #[test] - fn test_extract_with_invalid_header() { - for construct in &["", " "] { - _test_extract_with_header(construct, JAEGER_HEADER) - } - } - - #[test] - fn test_extract_with_valid_header() { - for construct in &["custom-header", "custom-header ", " custom-header "] { - _test_extract_with_header(construct, "custom-header") - } - } - - #[test] - fn test_inject_with_invalid_header() { - for construct in &["", " "] { - _test_inject_with_header(construct, JAEGER_HEADER) - } - } - - #[test] - fn test_inject_with_valid_header() { - for construct in &["custom-header", "custom-header ", " custom-header "] { - _test_inject_with_header(construct, "custom-header") - } - } - } -} diff --git a/scripts/lint.sh b/scripts/lint.sh index 1a01f2a0cad..009b420731f 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -40,6 +40,8 @@ if rustup component add clippy; then cargo_feature opentelemetry-jaeger "collector_client, wasm_collector_client" cargo_feature opentelemetry-jaeger "default" + cargo_feature opentelemetry-jaeger-propagator "default" + cargo_feature opentelemetry-proto "default" cargo_feature opentelemetry-proto "full" cargo_feature opentelemetry-proto "gen-tonic,trace" From 8237f87f42aa6d4935ed6a7e9fb9b225fb20640e Mon Sep 17 00:00:00 2001 From: Bhargav Date: Fri, 26 Jan 2024 15:21:26 -0800 Subject: [PATCH 13/13] Add event_name field to `LogRecord` (#1488) --- opentelemetry/src/logs/record.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/opentelemetry/src/logs/record.rs b/opentelemetry/src/logs/record.rs index 1d380cff487..ca705bac588 100644 --- a/opentelemetry/src/logs/record.rs +++ b/opentelemetry/src/logs/record.rs @@ -9,6 +9,9 @@ use std::{borrow::Cow, collections::HashMap, time::SystemTime}; /// LogRecord represents all data carried by a log record, and /// is provided to `LogExporter`s as input. pub struct LogRecord { + /// Event name. Optional as not all the logging API support it. + pub event_name: Option>, + /// Record timestamp pub timestamp: Option, @@ -33,6 +36,7 @@ pub struct LogRecord { impl Default for LogRecord { fn default() -> Self { LogRecord { + event_name: None, timestamp: None, observed_timestamp: SystemTime::now(), trace_context: None, @@ -368,6 +372,16 @@ impl LogRecordBuilder { self } + /// Sets the `event_name` of a record. + pub fn with_name(self, name: Cow<'static, str>) -> Self { + Self { + record: LogRecord { + event_name: Some(name), + ..self.record + }, + } + } + /// Build the record, consuming the Builder pub fn build(self) -> LogRecord { self.record