From 6eaf89fb124c421b66b43b195879d458a3a31f86 Mon Sep 17 00:00:00 2001 From: Anchit Jain <112778471+anchitj@users.noreply.github.com> Date: Mon, 8 Jul 2024 13:17:43 +0530 Subject: [PATCH] KIP 714 with compression support (#4721) KIP 714 with compression support (#4721) implemented GetTelemetrySubscriptions and PushTelemetry to send client telemetry to the requesting broker. Available metrics: * producer.connection.creation.rate * producer.connection.creation.total * producer.node.request.latency.avg * producer.node.request.latency.max * producer.produce.throttle.time.avg * producer.produce.throttle.time.max * producer.record.queue.time.avg * producer.record.queue.time.max * consumer.connection.creation.rate * consumer.connection.creation.total * consumer.node.request.latency.avg * consumer.node.request.latency.max * consumer.coordinator.assigned.partitions Compression is supported with zstd, zlib, lz4, or snappy. --------- Co-authored-by: Milind L --- .formatignore | 13 + CHANGELOG.md | 2 + CONFIGURATION.md | 3 +- INTRODUCTION.md | 3 + LICENSE.nanopb | 22 + LICENSE.opentelemetry | 203 ++++ LICENSES.txt | 232 ++++ Makefile | 2 +- src/CMakeLists.txt | 9 + src/Makefile | 8 +- src/nanopb/pb.h | 917 +++++++++++++++ src/nanopb/pb_common.c | 388 +++++++ src/nanopb/pb_common.h | 49 + src/nanopb/pb_decode.c | 1727 +++++++++++++++++++++++++++++ src/nanopb/pb_decode.h | 193 ++++ src/nanopb/pb_encode.c | 1000 +++++++++++++++++ src/nanopb/pb_encode.h | 185 +++ src/opentelemetry/common.pb.c | 32 + src/opentelemetry/common.pb.h | 170 +++ src/opentelemetry/metrics.options | 2 + src/opentelemetry/metrics.pb.c | 67 ++ src/opentelemetry/metrics.pb.h | 966 ++++++++++++++++ src/opentelemetry/resource.pb.c | 12 + src/opentelemetry/resource.pb.h | 58 + src/rd.h | 5 + src/rdkafka.c | 37 +- src/rdkafka.h | 6 + src/rdkafka_broker.c | 148 ++- src/rdkafka_broker.h | 49 + src/rdkafka_conf.c | 8 +- src/rdkafka_conf.h | 2 + src/rdkafka_int.h | 82 ++ src/rdkafka_mock.c | 66 +- src/rdkafka_mock.h | 27 + src/rdkafka_mock_handlers.c | 214 ++++ src/rdkafka_mock_int.h | 11 +- src/rdkafka_msgset.h | 16 + src/rdkafka_msgset_writer.c | 126 ++- src/rdkafka_op.c | 24 +- src/rdkafka_op.h | 15 + src/rdkafka_proto.h | 35 +- src/rdkafka_protocol.h | 8 +- src/rdkafka_request.c | 238 +++- src/rdkafka_request.h | 37 + src/rdkafka_telemetry.c | 697 ++++++++++++ src/rdkafka_telemetry.h | 52 + src/rdkafka_telemetry_decode.c | 559 ++++++++++ src/rdkafka_telemetry_decode.h | 59 + src/rdkafka_telemetry_encode.c | 833 ++++++++++++++ src/rdkafka_telemetry_encode.h | 214 ++++ src/rdunittest.c | 3 + tests/0150-telemetry_mock.c | 546 +++++++++ tests/CMakeLists.txt | 1 + tests/test.c | 2 + win32/librdkafka.vcxproj | 20 +- win32/tests/tests.vcxproj | 1 + 56 files changed, 10282 insertions(+), 122 deletions(-) create mode 100644 LICENSE.nanopb create mode 100644 LICENSE.opentelemetry create mode 100644 src/nanopb/pb.h create mode 100644 src/nanopb/pb_common.c create mode 100644 src/nanopb/pb_common.h create mode 100644 src/nanopb/pb_decode.c create mode 100644 src/nanopb/pb_decode.h create mode 100644 src/nanopb/pb_encode.c create mode 100644 src/nanopb/pb_encode.h create mode 100644 src/opentelemetry/common.pb.c create mode 100644 src/opentelemetry/common.pb.h create mode 100644 src/opentelemetry/metrics.options create mode 100644 src/opentelemetry/metrics.pb.c create mode 100644 src/opentelemetry/metrics.pb.h create mode 100644 src/opentelemetry/resource.pb.c create mode 100644 src/opentelemetry/resource.pb.h create mode 100644 src/rdkafka_telemetry.c create mode 100644 src/rdkafka_telemetry.h create mode 100644 src/rdkafka_telemetry_decode.c create mode 100644 src/rdkafka_telemetry_decode.h create mode 100644 src/rdkafka_telemetry_encode.c create mode 100644 src/rdkafka_telemetry_encode.h create mode 100644 tests/0150-telemetry_mock.c diff --git a/.formatignore b/.formatignore index c417327912..ed5d1b43d2 100644 --- a/.formatignore +++ b/.formatignore @@ -18,3 +18,16 @@ src/snappy_compat.h src/tinycthread.c src/tinycthread.h src/regexp.h +src/nanopb/pb_common.c +src/nanopb/pb_common.h +src/nanopb/pb_decode.c +src/nanopb/pb_decode.h +src/nanopb/pb_encode.c +src/nanopb/pb_encode.h +src/nanopb/pb.h +src/opentelemetry/common.pb.c +src/opentelemetry/common.pb.h +src/opentelemetry/metrics.pb.c +src/opentelemetry/metrics.pb.h +src/opentelemetry/resource.pb.c +src/opentelemetry/resource.pb.h diff --git a/CHANGELOG.md b/CHANGELOG.md index 177bba8a55..68142d0d3c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ librdkafka v2.5.0 is a feature release. * Fix for an idempotent producer error, with a message batch not reconstructed identically when retried (#4750) * Removed support for CentOS 6 and CentOS 7 (#4775). +* [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) Client + metrics and observability (#4721). ## Upgrade considerations diff --git a/CONFIGURATION.md b/CONFIGURATION.md index 749342c284..4a44ee9797 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -19,7 +19,7 @@ topic.metadata.refresh.fast.cnt | * | 0 .. 1000 | 10 topic.metadata.refresh.sparse | * | true, false | true | low | Sparse metadata requests (consumes less network bandwidth)
*Type: boolean* topic.metadata.propagation.max.ms | * | 0 .. 3600000 | 30000 | low | Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce().
*Type: integer* topic.blacklist | * | | | low | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.
*Type: pattern list* -debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, admin, eos, mock, assignor, conf, all | | medium | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
*Type: CSV flags* +debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, admin, eos, mock, assignor, conf, telemetry, all | | medium | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
*Type: CSV flags* socket.timeout.ms | * | 10 .. 300000 | 60000 | low | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value.
*Type: integer* socket.blocking.max.ms | * | 1 .. 60000 | 1000 | low | **DEPRECATED** No longer used.
*Type: integer* socket.send.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket send buffer size. System default is used if 0.
*Type: integer* @@ -156,6 +156,7 @@ dr_cb | P | | dr_msg_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())
*Type: see dedicated API* sticky.partitioning.linger.ms | P | 0 .. 900000 | 10 | low | Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages.
*Type: integer* client.dns.lookup | * | use_all_dns_ips, resolve_canonical_bootstrap_servers_only | use_all_dns_ips | low | Controls how the client uses DNS lookups. By default, when the lookup returns multiple IP addresses for a hostname, they will all be attempted for connection before the connection is considered failed. This applies to both bootstrap and advertised servers. If the value is set to `resolve_canonical_bootstrap_servers_only`, each entry will be resolved and expanded into a list of canonical names. **WARNING**: `resolve_canonical_bootstrap_servers_only` must only be used with `GSSAPI` (Kerberos) as `sasl.mechanism`, as it's the only purpose of this configuration value. **NOTE**: Default here is different from the Java client's default behavior, which connects only to the first IP address returned for a hostname.
*Type: enum value* +enable.metrics.push | * | true, false | true | low | Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client
*Type: boolean* ## Topic configuration properties diff --git a/INTRODUCTION.md b/INTRODUCTION.md index 1449d01dd6..cbe9516071 100644 --- a/INTRODUCTION.md +++ b/INTRODUCTION.md @@ -2051,6 +2051,7 @@ The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/conf | KIP-602 - Use all resolved addresses by default | 2.6.0 | Supported | | KIP-651 - Support PEM format for SSL certs and keys | 2.7.0 | Supported | | KIP-654 - Aborted txns with non-flushed msgs should not be fatal | 2.7.0 | Supported | +| KIP-714 - Client metrics and observability | 3.7.0 | Supported | | KIP-735 - Increase default consumer session timeout | 3.0.0 | Supported | | KIP-768 - SASL/OAUTHBEARER OIDC support | 3.0 | Supported | | KIP-881 - Rack-aware Partition Assignment for Kafka Consumers | 3.5.0 | Supported | @@ -2106,6 +2107,8 @@ release of librdkafka. | 50 | DescribeUserScramCredentials | 0 | 0 | | 51 | AlterUserScramCredentials | 0 | 0 | | 68 | ConsumerGroupHeartbeat | 0 | 0 | +| 71 | GetTelemetrySubscriptions | 0 | 0 | +| 72 | PushTelemetry | 0 | 0 | # Recommendations for language binding developers diff --git a/LICENSE.nanopb b/LICENSE.nanopb new file mode 100644 index 0000000000..497ec8cd79 --- /dev/null +++ b/LICENSE.nanopb @@ -0,0 +1,22 @@ +For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt + +Copyright (c) 2011 Petteri Aimonen + +This software is provided 'as-is', without any express or +implied warranty. In no event will the authors be held liable +for any damages arising from the use of this software. + +Permission is granted to anyone to use this software for any +purpose, including commercial applications, and to alter it and +redistribute it freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you use + this software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and + must not be misrepresented as being the original software. + +3. This notice may not be removed or altered from any source + distribution. diff --git a/LICENSE.opentelemetry b/LICENSE.opentelemetry new file mode 100644 index 0000000000..819ea6a0eb --- /dev/null +++ b/LICENSE.opentelemetry @@ -0,0 +1,203 @@ +For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/LICENSES.txt b/LICENSES.txt index 53ffbe8ba9..1621ba0996 100644 --- a/LICENSES.txt +++ b/LICENSES.txt @@ -198,6 +198,238 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +LICENSE.nanopb +-------------------------------------------------------------- +For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt + +Copyright (c) 2011 Petteri Aimonen + +This software is provided 'as-is', without any express or +implied warranty. In no event will the authors be held liable +for any damages arising from the use of this software. + +Permission is granted to anyone to use this software for any +purpose, including commercial applications, and to alter it and +redistribute it freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you use + this software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and + must not be misrepresented as being the original software. + +3. This notice may not be removed or altered from any source + distribution. + + +LICENSE.opentelemetry +-------------------------------------------------------------- +For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + LICENSE.pycrc -------------------------------------------------------------- The following license applies to the files rdcrc32.c and rdcrc32.h which diff --git a/Makefile b/Makefile index d5e168b783..3188b84a2e 100755 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ VERSION?= $(shell python3 packaging/get_version.py src/rdkafka.h) BUILD_NUMBER ?= 1 # Skip copyright check in the following paths -MKL_COPYRIGHT_SKIP?=^(tests|packaging) +MKL_COPYRIGHT_SKIP?=^(tests|packaging|src/nanopb|src/opentelemetry) .PHONY: diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 5737b71547..bbe63cff48 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -53,6 +53,15 @@ set( rdkafka_mock_cgrp.c rdkafka_error.c rdkafka_fetcher.c + rdkafka_telemetry.c + rdkafka_telemetry_decode.c + rdkafka_telemetry_encode.c + nanopb/pb_encode.c + nanopb/pb_decode.c + nanopb/pb_common.c + opentelemetry/metrics.pb.c + opentelemetry/common.pb.c + opentelemetry/resource.pb.c rdlist.c rdlog.c rdmurmur2.c diff --git a/src/Makefile b/src/Makefile index 1c43f0b017..0d0635ce30 100644 --- a/src/Makefile +++ b/src/Makefile @@ -9,6 +9,9 @@ ifneq ($(wildcard ../.git),) CPPFLAGS += -DLIBRDKAFKA_GIT_VERSION="\"$(shell git describe --abbrev=6 --dirty --tags 2>/dev/null)\"" endif +CPPFLAGS += -I. + + SRCS_$(WITH_SASL_CYRUS) += rdkafka_sasl_cyrus.c SRCS_$(WITH_SASL_SCRAM) += rdkafka_sasl_scram.c SRCS_$(WITH_SASL_OAUTHBEARER) += rdkafka_sasl_oauthbearer.c @@ -55,7 +58,10 @@ SRCS= rdkafka.c rdkafka_broker.c rdkafka_msg.c rdkafka_topic.c \ rdkafka_txnmgr.c rdkafka_coord.c rdbase64.c \ rdvarint.c rdbuf.c rdmap.c rdunittest.c \ rdkafka_mock.c rdkafka_mock_handlers.c rdkafka_mock_cgrp.c \ - rdkafka_error.c rdkafka_fetcher.c \ + rdkafka_error.c rdkafka_fetcher.c rdkafka_telemetry.c \ + rdkafka_telemetry_encode.c rdkafka_telemetry_decode.c \ + nanopb/pb_encode.c nanopb/pb_decode.c nanopb/pb_common.c \ + opentelemetry/metrics.pb.c opentelemetry/common.pb.c opentelemetry/resource.pb.c \ $(SRCS_y) HDRS= rdkafka.h rdkafka_mock.h diff --git a/src/nanopb/pb.h b/src/nanopb/pb.h new file mode 100644 index 0000000000..ef3d83e95a --- /dev/null +++ b/src/nanopb/pb.h @@ -0,0 +1,917 @@ +/* Common parts of the nanopb library. Most of these are quite low-level + * stuff. For the high-level interface, see pb_encode.h and pb_decode.h. + */ + +#ifndef PB_H_INCLUDED +#define PB_H_INCLUDED + +/***************************************************************** + * Nanopb compilation time options. You can change these here by * + * uncommenting the lines, or on the compiler command line. * + *****************************************************************/ + +/* Enable support for dynamically allocated fields */ +/* #define PB_ENABLE_MALLOC 1 */ + +/* Define this if your CPU / compiler combination does not support + * unaligned memory access to packed structures. Note that packed + * structures are only used when requested in .proto options. */ +/* #define PB_NO_PACKED_STRUCTS 1 */ + +/* Increase the number of required fields that are tracked. + * A compiler warning will tell if you need this. */ +/* #define PB_MAX_REQUIRED_FIELDS 256 */ + +/* Add support for tag numbers > 65536 and fields larger than 65536 bytes. */ +/* #define PB_FIELD_32BIT 1 */ + +/* Disable support for error messages in order to save some code space. */ +/* #define PB_NO_ERRMSG 1 */ + +/* Disable support for custom streams (support only memory buffers). */ +/* #define PB_BUFFER_ONLY 1 */ + +/* Disable support for 64-bit datatypes, for compilers without int64_t + or to save some code space. */ +/* #define PB_WITHOUT_64BIT 1 */ + +/* Don't encode scalar arrays as packed. This is only to be used when + * the decoder on the receiving side cannot process packed scalar arrays. + * Such example is older protobuf.js. */ +/* #define PB_ENCODE_ARRAYS_UNPACKED 1 */ + +/* Enable conversion of doubles to floats for platforms that do not + * support 64-bit doubles. Most commonly AVR. */ +/* #define PB_CONVERT_DOUBLE_FLOAT 1 */ + +/* Check whether incoming strings are valid UTF-8 sequences. Slows down + * the string processing slightly and slightly increases code size. */ +/* #define PB_VALIDATE_UTF8 1 */ + +/* This can be defined if the platform is little-endian and has 8-bit bytes. + * Normally it is automatically detected based on __BYTE_ORDER__ macro. */ +/* #define PB_LITTLE_ENDIAN_8BIT 1 */ + +/* Configure static assert mechanism. Instead of changing these, set your + * compiler to C11 standard mode if possible. */ +/* #define PB_C99_STATIC_ASSERT 1 */ +/* #define PB_NO_STATIC_ASSERT 1 */ + +/****************************************************************** + * You usually don't need to change anything below this line. * + * Feel free to look around and use the defined macros, though. * + ******************************************************************/ + + +/* Version of the nanopb library. Just in case you want to check it in + * your own program. */ +#define NANOPB_VERSION "nanopb-0.4.8-dev" + +/* Include all the system headers needed by nanopb. You will need the + * definitions of the following: + * - strlen, memcpy, memset functions + * - [u]int_least8_t, uint_fast8_t, [u]int_least16_t, [u]int32_t, [u]int64_t + * - size_t + * - bool + * + * If you don't have the standard header files, you can instead provide + * a custom header that defines or includes all this. In that case, + * define PB_SYSTEM_HEADER to the path of this file. + */ +#ifdef PB_SYSTEM_HEADER +#include PB_SYSTEM_HEADER +#else +#include +#include +#include +#include +#include + +#ifdef PB_ENABLE_MALLOC +#include +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* Macro for defining packed structures (compiler dependent). + * This just reduces memory requirements, but is not required. + */ +#if defined(PB_NO_PACKED_STRUCTS) + /* Disable struct packing */ +# define PB_PACKED_STRUCT_START +# define PB_PACKED_STRUCT_END +# define pb_packed +#elif defined(__GNUC__) || defined(__clang__) + /* For GCC and clang */ +# define PB_PACKED_STRUCT_START +# define PB_PACKED_STRUCT_END +# define pb_packed __attribute__((packed)) +#elif defined(__ICCARM__) || defined(__CC_ARM) + /* For IAR ARM and Keil MDK-ARM compilers */ +# define PB_PACKED_STRUCT_START _Pragma("pack(push, 1)") +# define PB_PACKED_STRUCT_END _Pragma("pack(pop)") +# define pb_packed +#elif defined(_MSC_VER) && (_MSC_VER >= 1500) + /* For Microsoft Visual C++ */ +# define PB_PACKED_STRUCT_START __pragma(pack(push, 1)) +# define PB_PACKED_STRUCT_END __pragma(pack(pop)) +# define pb_packed +#else + /* Unknown compiler */ +# define PB_PACKED_STRUCT_START +# define PB_PACKED_STRUCT_END +# define pb_packed +#endif + +/* Detect endianness */ +#ifndef PB_LITTLE_ENDIAN_8BIT +#if ((defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || \ + defined(__LITTLE_ENDIAN__) || defined(__ARMEL__) || \ + defined(__THUMBEL__) || defined(__AARCH64EL__) || defined(_MIPSEL) || \ + defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM)) \ + && CHAR_BIT == 8 +#define PB_LITTLE_ENDIAN_8BIT 1 +#endif +#endif + +/* Handly macro for suppressing unreferenced-parameter compiler warnings. */ +#ifndef PB_UNUSED +#define PB_UNUSED(x) (void)(x) +#endif + +/* Harvard-architecture processors may need special attributes for storing + * field information in program memory. */ +#ifndef PB_PROGMEM +#ifdef __AVR__ +#include +#define PB_PROGMEM PROGMEM +#define PB_PROGMEM_READU32(x) pgm_read_dword(&x) +#else +#define PB_PROGMEM +#define PB_PROGMEM_READU32(x) (x) +#endif +#endif + +/* Compile-time assertion, used for checking compatible compilation options. + * If this does not work properly on your compiler, use + * #define PB_NO_STATIC_ASSERT to disable it. + * + * But before doing that, check carefully the error message / place where it + * comes from to see if the error has a real cause. Unfortunately the error + * message is not always very clear to read, but you can see the reason better + * in the place where the PB_STATIC_ASSERT macro was called. + */ +#ifndef PB_NO_STATIC_ASSERT +# ifndef PB_STATIC_ASSERT +# if defined(__ICCARM__) + /* IAR has static_assert keyword but no _Static_assert */ +# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG); +# elif defined(_MSC_VER) && (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112) + /* MSVC in C89 mode supports static_assert() keyword anyway */ +# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG); +# elif defined(PB_C99_STATIC_ASSERT) + /* Classic negative-size-array static assert mechanism */ +# define PB_STATIC_ASSERT(COND,MSG) typedef char PB_STATIC_ASSERT_MSG(MSG, __LINE__, __COUNTER__)[(COND)?1:-1]; +# define PB_STATIC_ASSERT_MSG(MSG, LINE, COUNTER) PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) +# define PB_STATIC_ASSERT_MSG_(MSG, LINE, COUNTER) pb_static_assertion_##MSG##_##LINE##_##COUNTER +# elif defined(__cplusplus) + /* C++11 standard static_assert mechanism */ +# define PB_STATIC_ASSERT(COND,MSG) static_assert(COND,#MSG); +# else + /* C11 standard _Static_assert mechanism */ +# define PB_STATIC_ASSERT(COND,MSG) _Static_assert(COND,#MSG); +# endif +# endif +#else + /* Static asserts disabled by PB_NO_STATIC_ASSERT */ +# define PB_STATIC_ASSERT(COND,MSG) +#endif + +/* Test that PB_STATIC_ASSERT works + * If you get errors here, you may need to do one of these: + * - Enable C11 standard support in your compiler + * - Define PB_C99_STATIC_ASSERT to enable C99 standard support + * - Define PB_NO_STATIC_ASSERT to disable static asserts altogether + */ +PB_STATIC_ASSERT(1, STATIC_ASSERT_IS_NOT_WORKING) + +/* Number of required fields to keep track of. */ +#ifndef PB_MAX_REQUIRED_FIELDS +#define PB_MAX_REQUIRED_FIELDS 64 +#endif + +#if PB_MAX_REQUIRED_FIELDS < 64 +#error You should not lower PB_MAX_REQUIRED_FIELDS from the default value (64). +#endif + +#ifdef PB_WITHOUT_64BIT +#ifdef PB_CONVERT_DOUBLE_FLOAT +/* Cannot use doubles without 64-bit types */ +#undef PB_CONVERT_DOUBLE_FLOAT +#endif +#endif + +/* List of possible field types. These are used in the autogenerated code. + * Least-significant 4 bits tell the scalar type + * Most-significant 4 bits specify repeated/required/packed etc. + */ + +typedef uint_least8_t pb_type_t; + +/**** Field data types ****/ + +/* Numeric types */ +#define PB_LTYPE_BOOL 0x00U /* bool */ +#define PB_LTYPE_VARINT 0x01U /* int32, int64, enum, bool */ +#define PB_LTYPE_UVARINT 0x02U /* uint32, uint64 */ +#define PB_LTYPE_SVARINT 0x03U /* sint32, sint64 */ +#define PB_LTYPE_FIXED32 0x04U /* fixed32, sfixed32, float */ +#define PB_LTYPE_FIXED64 0x05U /* fixed64, sfixed64, double */ + +/* Marker for last packable field type. */ +#define PB_LTYPE_LAST_PACKABLE 0x05U + +/* Byte array with pre-allocated buffer. + * data_size is the length of the allocated PB_BYTES_ARRAY structure. */ +#define PB_LTYPE_BYTES 0x06U + +/* String with pre-allocated buffer. + * data_size is the maximum length. */ +#define PB_LTYPE_STRING 0x07U + +/* Submessage + * submsg_fields is pointer to field descriptions */ +#define PB_LTYPE_SUBMESSAGE 0x08U + +/* Submessage with pre-decoding callback + * The pre-decoding callback is stored as pb_callback_t right before pSize. + * submsg_fields is pointer to field descriptions */ +#define PB_LTYPE_SUBMSG_W_CB 0x09U + +/* Extension pseudo-field + * The field contains a pointer to pb_extension_t */ +#define PB_LTYPE_EXTENSION 0x0AU + +/* Byte array with inline, pre-allocated byffer. + * data_size is the length of the inline, allocated buffer. + * This differs from PB_LTYPE_BYTES by defining the element as + * pb_byte_t[data_size] rather than pb_bytes_array_t. */ +#define PB_LTYPE_FIXED_LENGTH_BYTES 0x0BU + +/* Number of declared LTYPES */ +#define PB_LTYPES_COUNT 0x0CU +#define PB_LTYPE_MASK 0x0FU + +/**** Field repetition rules ****/ + +#define PB_HTYPE_REQUIRED 0x00U +#define PB_HTYPE_OPTIONAL 0x10U +#define PB_HTYPE_SINGULAR 0x10U +#define PB_HTYPE_REPEATED 0x20U +#define PB_HTYPE_FIXARRAY 0x20U +#define PB_HTYPE_ONEOF 0x30U +#define PB_HTYPE_MASK 0x30U + +/**** Field allocation types ****/ + +#define PB_ATYPE_STATIC 0x00U +#define PB_ATYPE_POINTER 0x80U +#define PB_ATYPE_CALLBACK 0x40U +#define PB_ATYPE_MASK 0xC0U + +#define PB_ATYPE(x) ((x) & PB_ATYPE_MASK) +#define PB_HTYPE(x) ((x) & PB_HTYPE_MASK) +#define PB_LTYPE(x) ((x) & PB_LTYPE_MASK) +#define PB_LTYPE_IS_SUBMSG(x) (PB_LTYPE(x) == PB_LTYPE_SUBMESSAGE || \ + PB_LTYPE(x) == PB_LTYPE_SUBMSG_W_CB) + +/* Data type used for storing sizes of struct fields + * and array counts. + */ +#if defined(PB_FIELD_32BIT) + typedef uint32_t pb_size_t; + typedef int32_t pb_ssize_t; +#else + typedef uint_least16_t pb_size_t; + typedef int_least16_t pb_ssize_t; +#endif +#define PB_SIZE_MAX ((pb_size_t)-1) + +/* Data type for storing encoded data and other byte streams. + * This typedef exists to support platforms where uint8_t does not exist. + * You can regard it as equivalent on uint8_t on other platforms. + */ +typedef uint_least8_t pb_byte_t; + +/* Forward declaration of struct types */ +typedef struct pb_istream_s pb_istream_t; +typedef struct pb_ostream_s pb_ostream_t; +typedef struct pb_field_iter_s pb_field_iter_t; + +/* This structure is used in auto-generated constants + * to specify struct fields. + */ +typedef struct pb_msgdesc_s pb_msgdesc_t; +struct pb_msgdesc_s { + const uint32_t *field_info; + const pb_msgdesc_t * const * submsg_info; + const pb_byte_t *default_value; + + bool (*field_callback)(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_iter_t *field); + + pb_size_t field_count; + pb_size_t required_field_count; + pb_size_t largest_tag; +}; + +/* Iterator for message descriptor */ +struct pb_field_iter_s { + const pb_msgdesc_t *descriptor; /* Pointer to message descriptor constant */ + void *message; /* Pointer to start of the structure */ + + pb_size_t index; /* Index of the field */ + pb_size_t field_info_index; /* Index to descriptor->field_info array */ + pb_size_t required_field_index; /* Index that counts only the required fields */ + pb_size_t submessage_index; /* Index that counts only submessages */ + + pb_size_t tag; /* Tag of current field */ + pb_size_t data_size; /* sizeof() of a single item */ + pb_size_t array_size; /* Number of array entries */ + pb_type_t type; /* Type of current field */ + + void *pField; /* Pointer to current field in struct */ + void *pData; /* Pointer to current data contents. Different than pField for arrays and pointers. */ + void *pSize; /* Pointer to count/has field */ + + const pb_msgdesc_t *submsg_desc; /* For submessage fields, pointer to field descriptor for the submessage. */ +}; + +/* For compatibility with legacy code */ +typedef pb_field_iter_t pb_field_t; + +/* Make sure that the standard integer types are of the expected sizes. + * Otherwise fixed32/fixed64 fields can break. + * + * If you get errors here, it probably means that your stdint.h is not + * correct for your platform. + */ +#ifndef PB_WITHOUT_64BIT +PB_STATIC_ASSERT(sizeof(int64_t) == 2 * sizeof(int32_t), INT64_T_WRONG_SIZE) +PB_STATIC_ASSERT(sizeof(uint64_t) == 2 * sizeof(uint32_t), UINT64_T_WRONG_SIZE) +#endif + +/* This structure is used for 'bytes' arrays. + * It has the number of bytes in the beginning, and after that an array. + * Note that actual structs used will have a different length of bytes array. + */ +#define PB_BYTES_ARRAY_T(n) struct { pb_size_t size; pb_byte_t bytes[n]; } +#define PB_BYTES_ARRAY_T_ALLOCSIZE(n) ((size_t)n + offsetof(pb_bytes_array_t, bytes)) + +struct pb_bytes_array_s { + pb_size_t size; + pb_byte_t bytes[1]; +}; +typedef struct pb_bytes_array_s pb_bytes_array_t; + +/* This structure is used for giving the callback function. + * It is stored in the message structure and filled in by the method that + * calls pb_decode. + * + * The decoding callback will be given a limited-length stream + * If the wire type was string, the length is the length of the string. + * If the wire type was a varint/fixed32/fixed64, the length is the length + * of the actual value. + * The function may be called multiple times (especially for repeated types, + * but also otherwise if the message happens to contain the field multiple + * times.) + * + * The encoding callback will receive the actual output stream. + * It should write all the data in one call, including the field tag and + * wire type. It can write multiple fields. + * + * The callback can be null if you want to skip a field. + */ +typedef struct pb_callback_s pb_callback_t; +struct pb_callback_s { + /* Callback functions receive a pointer to the arg field. + * You can access the value of the field as *arg, and modify it if needed. + */ + union { + bool (*decode)(pb_istream_t *stream, const pb_field_t *field, void **arg); + bool (*encode)(pb_ostream_t *stream, const pb_field_t *field, void * const *arg); + } funcs; + + /* Free arg for use by callback */ + void *arg; +}; + +extern bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field); + +/* Wire types. Library user needs these only in encoder callbacks. */ +typedef enum { + PB_WT_VARINT = 0, + PB_WT_64BIT = 1, + PB_WT_STRING = 2, + PB_WT_32BIT = 5, + PB_WT_PACKED = 255 /* PB_WT_PACKED is internal marker for packed arrays. */ +} pb_wire_type_t; + +/* Structure for defining the handling of unknown/extension fields. + * Usually the pb_extension_type_t structure is automatically generated, + * while the pb_extension_t structure is created by the user. However, + * if you want to catch all unknown fields, you can also create a custom + * pb_extension_type_t with your own callback. + */ +typedef struct pb_extension_type_s pb_extension_type_t; +typedef struct pb_extension_s pb_extension_t; +struct pb_extension_type_s { + /* Called for each unknown field in the message. + * If you handle the field, read off all of its data and return true. + * If you do not handle the field, do not read anything and return true. + * If you run into an error, return false. + * Set to NULL for default handler. + */ + bool (*decode)(pb_istream_t *stream, pb_extension_t *extension, + uint32_t tag, pb_wire_type_t wire_type); + + /* Called once after all regular fields have been encoded. + * If you have something to write, do so and return true. + * If you do not have anything to write, just return true. + * If you run into an error, return false. + * Set to NULL for default handler. + */ + bool (*encode)(pb_ostream_t *stream, const pb_extension_t *extension); + + /* Free field for use by the callback. */ + const void *arg; +}; + +struct pb_extension_s { + /* Type describing the extension field. Usually you'll initialize + * this to a pointer to the automatically generated structure. */ + const pb_extension_type_t *type; + + /* Destination for the decoded data. This must match the datatype + * of the extension field. */ + void *dest; + + /* Pointer to the next extension handler, or NULL. + * If this extension does not match a field, the next handler is + * automatically called. */ + pb_extension_t *next; + + /* The decoder sets this to true if the extension was found. + * Ignored for encoding. */ + bool found; +}; + +#define pb_extension_init_zero {NULL,NULL,NULL,false} + +/* Memory allocation functions to use. You can define pb_realloc and + * pb_free to custom functions if you want. */ +#ifdef PB_ENABLE_MALLOC +# ifndef pb_realloc +# define pb_realloc(ptr, size) realloc(ptr, size) +# endif +# ifndef pb_free +# define pb_free(ptr) free(ptr) +# endif +#endif + +/* This is used to inform about need to regenerate .pb.h/.pb.c files. */ +#define PB_PROTO_HEADER_VERSION 40 + +/* These macros are used to declare pb_field_t's in the constant array. */ +/* Size of a structure member, in bytes. */ +#define pb_membersize(st, m) (sizeof ((st*)0)->m) +/* Number of entries in an array. */ +#define pb_arraysize(st, m) (pb_membersize(st, m) / pb_membersize(st, m[0])) +/* Delta from start of one member to the start of another member. */ +#define pb_delta(st, m1, m2) ((int)offsetof(st, m1) - (int)offsetof(st, m2)) + +/* Force expansion of macro value */ +#define PB_EXPAND(x) x + +/* Binding of a message field set into a specific structure */ +#define PB_BIND(msgname, structname, width) \ + const uint32_t structname ## _field_info[] PB_PROGMEM = \ + { \ + msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ ## width, structname) \ + 0 \ + }; \ + const pb_msgdesc_t* const structname ## _submsg_info[] = \ + { \ + msgname ## _FIELDLIST(PB_GEN_SUBMSG_INFO, structname) \ + NULL \ + }; \ + const pb_msgdesc_t structname ## _msg = \ + { \ + structname ## _field_info, \ + structname ## _submsg_info, \ + msgname ## _DEFAULT, \ + msgname ## _CALLBACK, \ + 0 msgname ## _FIELDLIST(PB_GEN_FIELD_COUNT, structname), \ + 0 msgname ## _FIELDLIST(PB_GEN_REQ_FIELD_COUNT, structname), \ + 0 msgname ## _FIELDLIST(PB_GEN_LARGEST_TAG, structname), \ + }; \ + msgname ## _FIELDLIST(PB_GEN_FIELD_INFO_ASSERT_ ## width, structname) + +#define PB_GEN_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) +1 +#define PB_GEN_REQ_FIELD_COUNT(structname, atype, htype, ltype, fieldname, tag) \ + + (PB_HTYPE_ ## htype == PB_HTYPE_REQUIRED) +#define PB_GEN_LARGEST_TAG(structname, atype, htype, ltype, fieldname, tag) \ + * 0 + tag + +/* X-macro for generating the entries in struct_field_info[] array. */ +#define PB_GEN_FIELD_INFO_1(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_2(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_4(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_8(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_AUTO(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \ + tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_FIELDINFO_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \ + PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) + +#define PB_FIELDINFO_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \ + PB_FIELDINFO_ ## width(tag, type, data_offset, data_size, size_offset, array_size) + +/* X-macro for generating asserts that entries fit in struct_field_info[] array. + * The structure of macros here must match the structure above in PB_GEN_FIELD_INFO_x(), + * but it is not easily reused because of how macro substitutions work. */ +#define PB_GEN_FIELD_INFO_ASSERT_1(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_1(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_ASSERT_2(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_2(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_ASSERT_4(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_4(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_ASSERT_8(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_8(tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_GEN_FIELD_INFO_ASSERT_AUTO(structname, atype, htype, ltype, fieldname, tag) \ + PB_FIELDINFO_ASSERT_AUTO2(PB_FIELDINFO_WIDTH_AUTO(_PB_ATYPE_ ## atype, _PB_HTYPE_ ## htype, _PB_LTYPE_ ## ltype), \ + tag, PB_ATYPE_ ## atype | PB_HTYPE_ ## htype | PB_LTYPE_MAP_ ## ltype, \ + PB_DATA_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_DATA_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_SIZE_OFFSET_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname), \ + PB_ARRAY_SIZE_ ## atype(_PB_HTYPE_ ## htype, structname, fieldname)) + +#define PB_FIELDINFO_ASSERT_AUTO2(width, tag, type, data_offset, data_size, size_offset, array_size) \ + PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) + +#define PB_FIELDINFO_ASSERT_AUTO3(width, tag, type, data_offset, data_size, size_offset, array_size) \ + PB_FIELDINFO_ASSERT_ ## width(tag, type, data_offset, data_size, size_offset, array_size) + +#define PB_DATA_OFFSET_STATIC(htype, structname, fieldname) PB_DO ## htype(structname, fieldname) +#define PB_DATA_OFFSET_POINTER(htype, structname, fieldname) PB_DO ## htype(structname, fieldname) +#define PB_DATA_OFFSET_CALLBACK(htype, structname, fieldname) PB_DO ## htype(structname, fieldname) +#define PB_DO_PB_HTYPE_REQUIRED(structname, fieldname) offsetof(structname, fieldname) +#define PB_DO_PB_HTYPE_SINGULAR(structname, fieldname) offsetof(structname, fieldname) +#define PB_DO_PB_HTYPE_ONEOF(structname, fieldname) offsetof(structname, PB_ONEOF_NAME(FULL, fieldname)) +#define PB_DO_PB_HTYPE_OPTIONAL(structname, fieldname) offsetof(structname, fieldname) +#define PB_DO_PB_HTYPE_REPEATED(structname, fieldname) offsetof(structname, fieldname) +#define PB_DO_PB_HTYPE_FIXARRAY(structname, fieldname) offsetof(structname, fieldname) + +#define PB_SIZE_OFFSET_STATIC(htype, structname, fieldname) PB_SO ## htype(structname, fieldname) +#define PB_SIZE_OFFSET_POINTER(htype, structname, fieldname) PB_SO_PTR ## htype(structname, fieldname) +#define PB_SIZE_OFFSET_CALLBACK(htype, structname, fieldname) PB_SO_CB ## htype(structname, fieldname) +#define PB_SO_PB_HTYPE_REQUIRED(structname, fieldname) 0 +#define PB_SO_PB_HTYPE_SINGULAR(structname, fieldname) 0 +#define PB_SO_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF2(structname, PB_ONEOF_NAME(FULL, fieldname), PB_ONEOF_NAME(UNION, fieldname)) +#define PB_SO_PB_HTYPE_ONEOF2(structname, fullname, unionname) PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname) +#define PB_SO_PB_HTYPE_ONEOF3(structname, fullname, unionname) pb_delta(structname, fullname, which_ ## unionname) +#define PB_SO_PB_HTYPE_OPTIONAL(structname, fieldname) pb_delta(structname, fieldname, has_ ## fieldname) +#define PB_SO_PB_HTYPE_REPEATED(structname, fieldname) pb_delta(structname, fieldname, fieldname ## _count) +#define PB_SO_PB_HTYPE_FIXARRAY(structname, fieldname) 0 +#define PB_SO_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 0 +#define PB_SO_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 0 +#define PB_SO_PTR_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname) +#define PB_SO_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 0 +#define PB_SO_PTR_PB_HTYPE_REPEATED(structname, fieldname) PB_SO_PB_HTYPE_REPEATED(structname, fieldname) +#define PB_SO_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_REQUIRED(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_SINGULAR(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_ONEOF(structname, fieldname) PB_SO_PB_HTYPE_ONEOF(structname, fieldname) +#define PB_SO_CB_PB_HTYPE_OPTIONAL(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_REPEATED(structname, fieldname) 0 +#define PB_SO_CB_PB_HTYPE_FIXARRAY(structname, fieldname) 0 + +#define PB_ARRAY_SIZE_STATIC(htype, structname, fieldname) PB_AS ## htype(structname, fieldname) +#define PB_ARRAY_SIZE_POINTER(htype, structname, fieldname) PB_AS_PTR ## htype(structname, fieldname) +#define PB_ARRAY_SIZE_CALLBACK(htype, structname, fieldname) 1 +#define PB_AS_PB_HTYPE_REQUIRED(structname, fieldname) 1 +#define PB_AS_PB_HTYPE_SINGULAR(structname, fieldname) 1 +#define PB_AS_PB_HTYPE_OPTIONAL(structname, fieldname) 1 +#define PB_AS_PB_HTYPE_ONEOF(structname, fieldname) 1 +#define PB_AS_PB_HTYPE_REPEATED(structname, fieldname) pb_arraysize(structname, fieldname) +#define PB_AS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname) +#define PB_AS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_ONEOF(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_REPEATED(structname, fieldname) 1 +#define PB_AS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_arraysize(structname, fieldname[0]) + +#define PB_DATA_SIZE_STATIC(htype, structname, fieldname) PB_DS ## htype(structname, fieldname) +#define PB_DATA_SIZE_POINTER(htype, structname, fieldname) PB_DS_PTR ## htype(structname, fieldname) +#define PB_DATA_SIZE_CALLBACK(htype, structname, fieldname) PB_DS_CB ## htype(structname, fieldname) +#define PB_DS_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)) +#define PB_DS_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)[0]) +#define PB_DS_PTR_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname[0]) +#define PB_DS_PTR_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname[0][0]) +#define PB_DS_CB_PB_HTYPE_REQUIRED(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_CB_PB_HTYPE_SINGULAR(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_CB_PB_HTYPE_OPTIONAL(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_CB_PB_HTYPE_ONEOF(structname, fieldname) pb_membersize(structname, PB_ONEOF_NAME(FULL, fieldname)) +#define PB_DS_CB_PB_HTYPE_REPEATED(structname, fieldname) pb_membersize(structname, fieldname) +#define PB_DS_CB_PB_HTYPE_FIXARRAY(structname, fieldname) pb_membersize(structname, fieldname) + +#define PB_ONEOF_NAME(type, tuple) PB_EXPAND(PB_ONEOF_NAME_ ## type tuple) +#define PB_ONEOF_NAME_UNION(unionname,membername,fullname) unionname +#define PB_ONEOF_NAME_MEMBER(unionname,membername,fullname) membername +#define PB_ONEOF_NAME_FULL(unionname,membername,fullname) fullname + +#define PB_GEN_SUBMSG_INFO(structname, atype, htype, ltype, fieldname, tag) \ + PB_SUBMSG_INFO_ ## htype(_PB_LTYPE_ ## ltype, structname, fieldname) + +#define PB_SUBMSG_INFO_REQUIRED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SUBMSG_INFO_SINGULAR(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SUBMSG_INFO_OPTIONAL(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SUBMSG_INFO_ONEOF(ltype, structname, fieldname) PB_SUBMSG_INFO_ONEOF2(ltype, structname, PB_ONEOF_NAME(UNION, fieldname), PB_ONEOF_NAME(MEMBER, fieldname)) +#define PB_SUBMSG_INFO_ONEOF2(ltype, structname, unionname, membername) PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername) +#define PB_SUBMSG_INFO_ONEOF3(ltype, structname, unionname, membername) PB_SI ## ltype(structname ## _ ## unionname ## _ ## membername ## _MSGTYPE) +#define PB_SUBMSG_INFO_REPEATED(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SUBMSG_INFO_FIXARRAY(ltype, structname, fieldname) PB_SI ## ltype(structname ## _ ## fieldname ## _MSGTYPE) +#define PB_SI_PB_LTYPE_BOOL(t) +#define PB_SI_PB_LTYPE_BYTES(t) +#define PB_SI_PB_LTYPE_DOUBLE(t) +#define PB_SI_PB_LTYPE_ENUM(t) +#define PB_SI_PB_LTYPE_UENUM(t) +#define PB_SI_PB_LTYPE_FIXED32(t) +#define PB_SI_PB_LTYPE_FIXED64(t) +#define PB_SI_PB_LTYPE_FLOAT(t) +#define PB_SI_PB_LTYPE_INT32(t) +#define PB_SI_PB_LTYPE_INT64(t) +#define PB_SI_PB_LTYPE_MESSAGE(t) PB_SUBMSG_DESCRIPTOR(t) +#define PB_SI_PB_LTYPE_MSG_W_CB(t) PB_SUBMSG_DESCRIPTOR(t) +#define PB_SI_PB_LTYPE_SFIXED32(t) +#define PB_SI_PB_LTYPE_SFIXED64(t) +#define PB_SI_PB_LTYPE_SINT32(t) +#define PB_SI_PB_LTYPE_SINT64(t) +#define PB_SI_PB_LTYPE_STRING(t) +#define PB_SI_PB_LTYPE_UINT32(t) +#define PB_SI_PB_LTYPE_UINT64(t) +#define PB_SI_PB_LTYPE_EXTENSION(t) +#define PB_SI_PB_LTYPE_FIXED_LENGTH_BYTES(t) +#define PB_SUBMSG_DESCRIPTOR(t) &(t ## _msg), + +/* The field descriptors use a variable width format, with width of either + * 1, 2, 4 or 8 of 32-bit words. The two lowest bytes of the first byte always + * encode the descriptor size, 6 lowest bits of field tag number, and 8 bits + * of the field type. + * + * Descriptor size is encoded as 0 = 1 word, 1 = 2 words, 2 = 4 words, 3 = 8 words. + * + * Formats, listed starting with the least significant bit of the first word. + * 1 word: [2-bit len] [6-bit tag] [8-bit type] [8-bit data_offset] [4-bit size_offset] [4-bit data_size] + * + * 2 words: [2-bit len] [6-bit tag] [8-bit type] [12-bit array_size] [4-bit size_offset] + * [16-bit data_offset] [12-bit data_size] [4-bit tag>>6] + * + * 4 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit array_size] + * [8-bit size_offset] [24-bit tag>>6] + * [32-bit data_offset] + * [32-bit data_size] + * + * 8 words: [2-bit len] [6-bit tag] [8-bit type] [16-bit reserved] + * [8-bit size_offset] [24-bit tag>>6] + * [32-bit data_offset] + * [32-bit data_size] + * [32-bit array_size] + * [32-bit reserved] + * [32-bit reserved] + * [32-bit reserved] + */ + +#define PB_FIELDINFO_1(tag, type, data_offset, data_size, size_offset, array_size) \ + (0 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(data_offset) & 0xFF) << 16) | \ + (((uint32_t)(size_offset) & 0x0F) << 24) | (((uint32_t)(data_size) & 0x0F) << 28)), + +#define PB_FIELDINFO_2(tag, type, data_offset, data_size, size_offset, array_size) \ + (1 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFF) << 16) | (((uint32_t)(size_offset) & 0x0F) << 28)), \ + (((uint32_t)(data_offset) & 0xFFFF) | (((uint32_t)(data_size) & 0xFFF) << 16) | (((uint32_t)(tag) & 0x3c0) << 22)), + +#define PB_FIELDINFO_4(tag, type, data_offset, data_size, size_offset, array_size) \ + (2 | (((tag) << 2) & 0xFF) | ((type) << 8) | (((uint32_t)(array_size) & 0xFFFF) << 16)), \ + ((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \ + (data_offset), (data_size), + +#define PB_FIELDINFO_8(tag, type, data_offset, data_size, size_offset, array_size) \ + (3 | (((tag) << 2) & 0xFF) | ((type) << 8)), \ + ((uint32_t)(int_least8_t)(size_offset) | (((uint32_t)(tag) << 2) & 0xFFFFFF00)), \ + (data_offset), (data_size), (array_size), 0, 0, 0, + +/* These assertions verify that the field information fits in the allocated space. + * The generator tries to automatically determine the correct width that can fit all + * data associated with a message. These asserts will fail only if there has been a + * problem in the automatic logic - this may be worth reporting as a bug. As a workaround, + * you can increase the descriptor width by defining PB_FIELDINFO_WIDTH or by setting + * descriptorsize option in .options file. + */ +#define PB_FITS(value,bits) ((uint32_t)(value) < ((uint32_t)1<2GB messages with nanopb anyway. + */ +#define PB_FIELDINFO_ASSERT_4(tag, type, data_offset, data_size, size_offset, array_size) \ + PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,16), FIELDINFO_DOES_NOT_FIT_width4_field ## tag) + +#define PB_FIELDINFO_ASSERT_8(tag, type, data_offset, data_size, size_offset, array_size) \ + PB_STATIC_ASSERT(PB_FITS(tag,30) && PB_FITS(data_offset,31) && PB_FITS(size_offset,8) && PB_FITS(data_size,31) && PB_FITS(array_size,31), FIELDINFO_DOES_NOT_FIT_width8_field ## tag) +#endif + + +/* Automatic picking of FIELDINFO width: + * Uses width 1 when possible, otherwise resorts to width 2. + * This is used when PB_BIND() is called with "AUTO" as the argument. + * The generator will give explicit size argument when it knows that a message + * structure grows beyond 1-word format limits. + */ +#define PB_FIELDINFO_WIDTH_AUTO(atype, htype, ltype) PB_FI_WIDTH ## atype(htype, ltype) +#define PB_FI_WIDTH_PB_ATYPE_STATIC(htype, ltype) PB_FI_WIDTH ## htype(ltype) +#define PB_FI_WIDTH_PB_ATYPE_POINTER(htype, ltype) PB_FI_WIDTH ## htype(ltype) +#define PB_FI_WIDTH_PB_ATYPE_CALLBACK(htype, ltype) 2 +#define PB_FI_WIDTH_PB_HTYPE_REQUIRED(ltype) PB_FI_WIDTH ## ltype +#define PB_FI_WIDTH_PB_HTYPE_SINGULAR(ltype) PB_FI_WIDTH ## ltype +#define PB_FI_WIDTH_PB_HTYPE_OPTIONAL(ltype) PB_FI_WIDTH ## ltype +#define PB_FI_WIDTH_PB_HTYPE_ONEOF(ltype) PB_FI_WIDTH ## ltype +#define PB_FI_WIDTH_PB_HTYPE_REPEATED(ltype) 2 +#define PB_FI_WIDTH_PB_HTYPE_FIXARRAY(ltype) 2 +#define PB_FI_WIDTH_PB_LTYPE_BOOL 1 +#define PB_FI_WIDTH_PB_LTYPE_BYTES 2 +#define PB_FI_WIDTH_PB_LTYPE_DOUBLE 1 +#define PB_FI_WIDTH_PB_LTYPE_ENUM 1 +#define PB_FI_WIDTH_PB_LTYPE_UENUM 1 +#define PB_FI_WIDTH_PB_LTYPE_FIXED32 1 +#define PB_FI_WIDTH_PB_LTYPE_FIXED64 1 +#define PB_FI_WIDTH_PB_LTYPE_FLOAT 1 +#define PB_FI_WIDTH_PB_LTYPE_INT32 1 +#define PB_FI_WIDTH_PB_LTYPE_INT64 1 +#define PB_FI_WIDTH_PB_LTYPE_MESSAGE 2 +#define PB_FI_WIDTH_PB_LTYPE_MSG_W_CB 2 +#define PB_FI_WIDTH_PB_LTYPE_SFIXED32 1 +#define PB_FI_WIDTH_PB_LTYPE_SFIXED64 1 +#define PB_FI_WIDTH_PB_LTYPE_SINT32 1 +#define PB_FI_WIDTH_PB_LTYPE_SINT64 1 +#define PB_FI_WIDTH_PB_LTYPE_STRING 2 +#define PB_FI_WIDTH_PB_LTYPE_UINT32 1 +#define PB_FI_WIDTH_PB_LTYPE_UINT64 1 +#define PB_FI_WIDTH_PB_LTYPE_EXTENSION 1 +#define PB_FI_WIDTH_PB_LTYPE_FIXED_LENGTH_BYTES 2 + +/* The mapping from protobuf types to LTYPEs is done using these macros. */ +#define PB_LTYPE_MAP_BOOL PB_LTYPE_BOOL +#define PB_LTYPE_MAP_BYTES PB_LTYPE_BYTES +#define PB_LTYPE_MAP_DOUBLE PB_LTYPE_FIXED64 +#define PB_LTYPE_MAP_ENUM PB_LTYPE_VARINT +#define PB_LTYPE_MAP_UENUM PB_LTYPE_UVARINT +#define PB_LTYPE_MAP_FIXED32 PB_LTYPE_FIXED32 +#define PB_LTYPE_MAP_FIXED64 PB_LTYPE_FIXED64 +#define PB_LTYPE_MAP_FLOAT PB_LTYPE_FIXED32 +#define PB_LTYPE_MAP_INT32 PB_LTYPE_VARINT +#define PB_LTYPE_MAP_INT64 PB_LTYPE_VARINT +#define PB_LTYPE_MAP_MESSAGE PB_LTYPE_SUBMESSAGE +#define PB_LTYPE_MAP_MSG_W_CB PB_LTYPE_SUBMSG_W_CB +#define PB_LTYPE_MAP_SFIXED32 PB_LTYPE_FIXED32 +#define PB_LTYPE_MAP_SFIXED64 PB_LTYPE_FIXED64 +#define PB_LTYPE_MAP_SINT32 PB_LTYPE_SVARINT +#define PB_LTYPE_MAP_SINT64 PB_LTYPE_SVARINT +#define PB_LTYPE_MAP_STRING PB_LTYPE_STRING +#define PB_LTYPE_MAP_UINT32 PB_LTYPE_UVARINT +#define PB_LTYPE_MAP_UINT64 PB_LTYPE_UVARINT +#define PB_LTYPE_MAP_EXTENSION PB_LTYPE_EXTENSION +#define PB_LTYPE_MAP_FIXED_LENGTH_BYTES PB_LTYPE_FIXED_LENGTH_BYTES + +/* These macros are used for giving out error messages. + * They are mostly a debugging aid; the main error information + * is the true/false return value from functions. + * Some code space can be saved by disabling the error + * messages if not used. + * + * PB_SET_ERROR() sets the error message if none has been set yet. + * msg must be a constant string literal. + * PB_GET_ERROR() always returns a pointer to a string. + * PB_RETURN_ERROR() sets the error and returns false from current + * function. + */ +#ifdef PB_NO_ERRMSG +#define PB_SET_ERROR(stream, msg) PB_UNUSED(stream) +#define PB_GET_ERROR(stream) "(errmsg disabled)" +#else +#define PB_SET_ERROR(stream, msg) (stream->errmsg = (stream)->errmsg ? (stream)->errmsg : (msg)) +#define PB_GET_ERROR(stream) ((stream)->errmsg ? (stream)->errmsg : "(none)") +#endif + +#define PB_RETURN_ERROR(stream, msg) return PB_SET_ERROR(stream, msg), false + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus +#if __cplusplus >= 201103L +#define PB_CONSTEXPR constexpr +#else // __cplusplus >= 201103L +#define PB_CONSTEXPR +#endif // __cplusplus >= 201103L + +#if __cplusplus >= 201703L +#define PB_INLINE_CONSTEXPR inline constexpr +#else // __cplusplus >= 201703L +#define PB_INLINE_CONSTEXPR PB_CONSTEXPR +#endif // __cplusplus >= 201703L + +extern "C++" +{ +namespace nanopb { +// Each type will be partially specialized by the generator. +template struct MessageDescriptor; +} // namespace nanopb +} +#endif /* __cplusplus */ + +#endif diff --git a/src/nanopb/pb_common.c b/src/nanopb/pb_common.c new file mode 100644 index 0000000000..e4765d8a6c --- /dev/null +++ b/src/nanopb/pb_common.c @@ -0,0 +1,388 @@ +/* pb_common.c: Common support functions for pb_encode.c and pb_decode.c. + * + * 2014 Petteri Aimonen + */ + +#include "nanopb/pb_common.h" + +static bool load_descriptor_values(pb_field_iter_t *iter) +{ + uint32_t word0; + uint32_t data_offset; + int_least8_t size_offset; + + if (iter->index >= iter->descriptor->field_count) + return false; + + word0 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]); + iter->type = (pb_type_t)((word0 >> 8) & 0xFF); + + switch(word0 & 3) + { + case 0: { + /* 1-word format */ + iter->array_size = 1; + iter->tag = (pb_size_t)((word0 >> 2) & 0x3F); + size_offset = (int_least8_t)((word0 >> 24) & 0x0F); + data_offset = (word0 >> 16) & 0xFF; + iter->data_size = (pb_size_t)((word0 >> 28) & 0x0F); + break; + } + + case 1: { + /* 2-word format */ + uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]); + + iter->array_size = (pb_size_t)((word0 >> 16) & 0x0FFF); + iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 28) << 6)); + size_offset = (int_least8_t)((word0 >> 28) & 0x0F); + data_offset = word1 & 0xFFFF; + iter->data_size = (pb_size_t)((word1 >> 16) & 0x0FFF); + break; + } + + case 2: { + /* 4-word format */ + uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]); + uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]); + uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]); + + iter->array_size = (pb_size_t)(word0 >> 16); + iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6)); + size_offset = (int_least8_t)(word1 & 0xFF); + data_offset = word2; + iter->data_size = (pb_size_t)word3; + break; + } + + default: { + /* 8-word format */ + uint32_t word1 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 1]); + uint32_t word2 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 2]); + uint32_t word3 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 3]); + uint32_t word4 = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index + 4]); + + iter->array_size = (pb_size_t)word4; + iter->tag = (pb_size_t)(((word0 >> 2) & 0x3F) | ((word1 >> 8) << 6)); + size_offset = (int_least8_t)(word1 & 0xFF); + data_offset = word2; + iter->data_size = (pb_size_t)word3; + break; + } + } + + if (!iter->message) + { + /* Avoid doing arithmetic on null pointers, it is undefined */ + iter->pField = NULL; + iter->pSize = NULL; + } + else + { + iter->pField = (char*)iter->message + data_offset; + + if (size_offset) + { + iter->pSize = (char*)iter->pField - size_offset; + } + else if (PB_HTYPE(iter->type) == PB_HTYPE_REPEATED && + (PB_ATYPE(iter->type) == PB_ATYPE_STATIC || + PB_ATYPE(iter->type) == PB_ATYPE_POINTER)) + { + /* Fixed count array */ + iter->pSize = &iter->array_size; + } + else + { + iter->pSize = NULL; + } + + if (PB_ATYPE(iter->type) == PB_ATYPE_POINTER && iter->pField != NULL) + { + iter->pData = *(void**)iter->pField; + } + else + { + iter->pData = iter->pField; + } + } + + if (PB_LTYPE_IS_SUBMSG(iter->type)) + { + iter->submsg_desc = iter->descriptor->submsg_info[iter->submessage_index]; + } + else + { + iter->submsg_desc = NULL; + } + + return true; +} + +static void advance_iterator(pb_field_iter_t *iter) +{ + iter->index++; + + if (iter->index >= iter->descriptor->field_count) + { + /* Restart */ + iter->index = 0; + iter->field_info_index = 0; + iter->submessage_index = 0; + iter->required_field_index = 0; + } + else + { + /* Increment indexes based on previous field type. + * All field info formats have the following fields: + * - lowest 2 bits tell the amount of words in the descriptor (2^n words) + * - bits 2..7 give the lowest bits of tag number. + * - bits 8..15 give the field type. + */ + uint32_t prev_descriptor = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]); + pb_type_t prev_type = (prev_descriptor >> 8) & 0xFF; + pb_size_t descriptor_len = (pb_size_t)(1 << (prev_descriptor & 3)); + + /* Add to fields. + * The cast to pb_size_t is needed to avoid -Wconversion warning. + * Because the data is is constants from generator, there is no danger of overflow. + */ + iter->field_info_index = (pb_size_t)(iter->field_info_index + descriptor_len); + iter->required_field_index = (pb_size_t)(iter->required_field_index + (PB_HTYPE(prev_type) == PB_HTYPE_REQUIRED)); + iter->submessage_index = (pb_size_t)(iter->submessage_index + PB_LTYPE_IS_SUBMSG(prev_type)); + } +} + +bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message) +{ + memset(iter, 0, sizeof(*iter)); + + iter->descriptor = desc; + iter->message = message; + + return load_descriptor_values(iter); +} + +bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension) +{ + const pb_msgdesc_t *msg = (const pb_msgdesc_t*)extension->type->arg; + bool status; + + uint32_t word0 = PB_PROGMEM_READU32(msg->field_info[0]); + if (PB_ATYPE(word0 >> 8) == PB_ATYPE_POINTER) + { + /* For pointer extensions, the pointer is stored directly + * in the extension structure. This avoids having an extra + * indirection. */ + status = pb_field_iter_begin(iter, msg, &extension->dest); + } + else + { + status = pb_field_iter_begin(iter, msg, extension->dest); + } + + iter->pSize = &extension->found; + return status; +} + +bool pb_field_iter_next(pb_field_iter_t *iter) +{ + advance_iterator(iter); + (void)load_descriptor_values(iter); + return iter->index != 0; +} + +bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag) +{ + if (iter->tag == tag) + { + return true; /* Nothing to do, correct field already. */ + } + else if (tag > iter->descriptor->largest_tag) + { + return false; + } + else + { + pb_size_t start = iter->index; + uint32_t fieldinfo; + + if (tag < iter->tag) + { + /* Fields are in tag number order, so we know that tag is between + * 0 and our start position. Setting index to end forces + * advance_iterator() call below to restart from beginning. */ + iter->index = iter->descriptor->field_count; + } + + do + { + /* Advance iterator but don't load values yet */ + advance_iterator(iter); + + /* Do fast check for tag number match */ + fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]); + + if (((fieldinfo >> 2) & 0x3F) == (tag & 0x3F)) + { + /* Good candidate, check further */ + (void)load_descriptor_values(iter); + + if (iter->tag == tag && + PB_LTYPE(iter->type) != PB_LTYPE_EXTENSION) + { + /* Found it */ + return true; + } + } + } while (iter->index != start); + + /* Searched all the way back to start, and found nothing. */ + (void)load_descriptor_values(iter); + return false; + } +} + +bool pb_field_iter_find_extension(pb_field_iter_t *iter) +{ + if (PB_LTYPE(iter->type) == PB_LTYPE_EXTENSION) + { + return true; + } + else + { + pb_size_t start = iter->index; + uint32_t fieldinfo; + + do + { + /* Advance iterator but don't load values yet */ + advance_iterator(iter); + + /* Do fast check for field type */ + fieldinfo = PB_PROGMEM_READU32(iter->descriptor->field_info[iter->field_info_index]); + + if (PB_LTYPE((fieldinfo >> 8) & 0xFF) == PB_LTYPE_EXTENSION) + { + return load_descriptor_values(iter); + } + } while (iter->index != start); + + /* Searched all the way back to start, and found nothing. */ + (void)load_descriptor_values(iter); + return false; + } +} + +static void *pb_const_cast(const void *p) +{ + /* Note: this casts away const, in order to use the common field iterator + * logic for both encoding and decoding. The cast is done using union + * to avoid spurious compiler warnings. */ + union { + void *p1; + const void *p2; + } t; + t.p2 = p; + return t.p1; +} + +bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message) +{ + return pb_field_iter_begin(iter, desc, pb_const_cast(message)); +} + +bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension) +{ + return pb_field_iter_begin_extension(iter, (pb_extension_t*)pb_const_cast(extension)); +} + +bool pb_default_field_callback(pb_istream_t *istream, pb_ostream_t *ostream, const pb_field_t *field) +{ + if (field->data_size == sizeof(pb_callback_t)) + { + pb_callback_t *pCallback = (pb_callback_t*)field->pData; + + if (pCallback != NULL) + { + if (istream != NULL && pCallback->funcs.decode != NULL) + { + return pCallback->funcs.decode(istream, field, &pCallback->arg); + } + + if (ostream != NULL && pCallback->funcs.encode != NULL) + { + return pCallback->funcs.encode(ostream, field, &pCallback->arg); + } + } + } + + return true; /* Success, but didn't do anything */ + +} + +#ifdef PB_VALIDATE_UTF8 + +/* This function checks whether a string is valid UTF-8 text. + * + * Algorithm is adapted from https://www.cl.cam.ac.uk/~mgk25/ucs/utf8_check.c + * Original copyright: Markus Kuhn 2005-03-30 + * Licensed under "Short code license", which allows use under MIT license or + * any compatible with it. + */ + +bool pb_validate_utf8(const char *str) +{ + const pb_byte_t *s = (const pb_byte_t*)str; + while (*s) + { + if (*s < 0x80) + { + /* 0xxxxxxx */ + s++; + } + else if ((s[0] & 0xe0) == 0xc0) + { + /* 110XXXXx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[0] & 0xfe) == 0xc0) /* overlong? */ + return false; + else + s += 2; + } + else if ((s[0] & 0xf0) == 0xe0) + { + /* 1110XXXX 10Xxxxxx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[0] == 0xe0 && (s[1] & 0xe0) == 0x80) || /* overlong? */ + (s[0] == 0xed && (s[1] & 0xe0) == 0xa0) || /* surrogate? */ + (s[0] == 0xef && s[1] == 0xbf && + (s[2] & 0xfe) == 0xbe)) /* U+FFFE or U+FFFF? */ + return false; + else + s += 3; + } + else if ((s[0] & 0xf8) == 0xf0) + { + /* 11110XXX 10XXxxxx 10xxxxxx 10xxxxxx */ + if ((s[1] & 0xc0) != 0x80 || + (s[2] & 0xc0) != 0x80 || + (s[3] & 0xc0) != 0x80 || + (s[0] == 0xf0 && (s[1] & 0xf0) == 0x80) || /* overlong? */ + (s[0] == 0xf4 && s[1] > 0x8f) || s[0] > 0xf4) /* > U+10FFFF? */ + return false; + else + s += 4; + } + else + { + return false; + } + } + + return true; +} + +#endif + diff --git a/src/nanopb/pb_common.h b/src/nanopb/pb_common.h new file mode 100644 index 0000000000..dda3af3b96 --- /dev/null +++ b/src/nanopb/pb_common.h @@ -0,0 +1,49 @@ +/* pb_common.h: Common support functions for pb_encode.c and pb_decode.c. + * These functions are rarely needed by applications directly. + */ + +#ifndef PB_COMMON_H_INCLUDED +#define PB_COMMON_H_INCLUDED + +#include "nanopb/pb.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Initialize the field iterator structure to beginning. + * Returns false if the message type is empty. */ +bool pb_field_iter_begin(pb_field_iter_t *iter, const pb_msgdesc_t *desc, void *message); + +/* Get a field iterator for extension field. */ +bool pb_field_iter_begin_extension(pb_field_iter_t *iter, pb_extension_t *extension); + +/* Same as pb_field_iter_begin(), but for const message pointer. + * Note that the pointers in pb_field_iter_t will be non-const but shouldn't + * be written to when using these functions. */ +bool pb_field_iter_begin_const(pb_field_iter_t *iter, const pb_msgdesc_t *desc, const void *message); +bool pb_field_iter_begin_extension_const(pb_field_iter_t *iter, const pb_extension_t *extension); + +/* Advance the iterator to the next field. + * Returns false when the iterator wraps back to the first field. */ +bool pb_field_iter_next(pb_field_iter_t *iter); + +/* Advance the iterator until it points at a field with the given tag. + * Returns false if no such field exists. */ +bool pb_field_iter_find(pb_field_iter_t *iter, uint32_t tag); + +/* Find a field with type PB_LTYPE_EXTENSION, or return false if not found. + * There can be only one extension range field per message. */ +bool pb_field_iter_find_extension(pb_field_iter_t *iter); + +#ifdef PB_VALIDATE_UTF8 +/* Validate UTF-8 text string */ +bool pb_validate_utf8(const char *s); +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif + diff --git a/src/nanopb/pb_decode.c b/src/nanopb/pb_decode.c new file mode 100644 index 0000000000..28ad344f57 --- /dev/null +++ b/src/nanopb/pb_decode.c @@ -0,0 +1,1727 @@ +/* pb_decode.c -- decode a protobuf using minimal resources + * + * 2011 Petteri Aimonen + */ + +/* Use the GCC warn_unused_result attribute to check that all return values + * are propagated correctly. On other compilers and gcc before 3.4.0 just + * ignore the annotation. + */ +#if !defined(__GNUC__) || ( __GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 4) + #define checkreturn +#else + #define checkreturn __attribute__((warn_unused_result)) +#endif + +#include "nanopb/pb.h" +#include "nanopb/pb_decode.h" +#include "nanopb/pb_common.h" + +/************************************** + * Declarations internal to this file * + **************************************/ + +static bool checkreturn buf_read(pb_istream_t *stream, pb_byte_t *buf, size_t count); +static bool checkreturn pb_decode_varint32_eof(pb_istream_t *stream, uint32_t *dest, bool *eof); +static bool checkreturn read_raw_value(pb_istream_t *stream, pb_wire_type_t wire_type, pb_byte_t *buf, size_t *size); +static bool checkreturn decode_basic_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn decode_static_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn decode_pointer_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn decode_callback_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn decode_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field); +static bool checkreturn default_extension_decoder(pb_istream_t *stream, pb_extension_t *extension, uint32_t tag, pb_wire_type_t wire_type); +static bool checkreturn decode_extension(pb_istream_t *stream, uint32_t tag, pb_wire_type_t wire_type, pb_extension_t *extension); +static bool pb_field_set_to_default(pb_field_iter_t *field); +static bool pb_message_set_to_defaults(pb_field_iter_t *iter); +static bool checkreturn pb_dec_bool(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_varint(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_bytes(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_string(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_submessage(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_dec_fixed_length_bytes(pb_istream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_skip_varint(pb_istream_t *stream); +static bool checkreturn pb_skip_string(pb_istream_t *stream); + +#ifdef PB_ENABLE_MALLOC +static bool checkreturn allocate_field(pb_istream_t *stream, void *pData, size_t data_size, size_t array_size); +static void initialize_pointer_field(void *pItem, pb_field_iter_t *field); +static bool checkreturn pb_release_union_field(pb_istream_t *stream, pb_field_iter_t *field); +static void pb_release_single_field(pb_field_iter_t *field); +#endif + +#ifdef PB_WITHOUT_64BIT +#define pb_int64_t int32_t +#define pb_uint64_t uint32_t +#else +#define pb_int64_t int64_t +#define pb_uint64_t uint64_t +#endif + +typedef struct { + uint32_t bitfield[(PB_MAX_REQUIRED_FIELDS + 31) / 32]; +} pb_fields_seen_t; + +/******************************* + * pb_istream_t implementation * + *******************************/ + +static bool checkreturn buf_read(pb_istream_t *stream, pb_byte_t *buf, size_t count) +{ + const pb_byte_t *source = (const pb_byte_t*)stream->state; + stream->state = (pb_byte_t*)stream->state + count; + + if (buf != NULL) + { + memcpy(buf, source, count * sizeof(pb_byte_t)); + } + + return true; +} + +bool checkreturn pb_read(pb_istream_t *stream, pb_byte_t *buf, size_t count) +{ + if (count == 0) + return true; + +#ifndef PB_BUFFER_ONLY + if (buf == NULL && stream->callback != buf_read) + { + /* Skip input bytes */ + pb_byte_t tmp[16]; + while (count > 16) + { + if (!pb_read(stream, tmp, 16)) + return false; + + count -= 16; + } + + return pb_read(stream, tmp, count); + } +#endif + + if (stream->bytes_left < count) + PB_RETURN_ERROR(stream, "end-of-stream"); + +#ifndef PB_BUFFER_ONLY + if (!stream->callback(stream, buf, count)) + PB_RETURN_ERROR(stream, "io error"); +#else + if (!buf_read(stream, buf, count)) + return false; +#endif + + if (stream->bytes_left < count) + stream->bytes_left = 0; + else + stream->bytes_left -= count; + + return true; +} + +/* Read a single byte from input stream. buf may not be NULL. + * This is an optimization for the varint decoding. */ +static bool checkreturn pb_readbyte(pb_istream_t *stream, pb_byte_t *buf) +{ + if (stream->bytes_left == 0) + PB_RETURN_ERROR(stream, "end-of-stream"); + +#ifndef PB_BUFFER_ONLY + if (!stream->callback(stream, buf, 1)) + PB_RETURN_ERROR(stream, "io error"); +#else + *buf = *(const pb_byte_t*)stream->state; + stream->state = (pb_byte_t*)stream->state + 1; +#endif + + stream->bytes_left--; + + return true; +} + +pb_istream_t pb_istream_from_buffer(const pb_byte_t *buf, size_t msglen) +{ + pb_istream_t stream; + /* Cast away the const from buf without a compiler error. We are + * careful to use it only in a const manner in the callbacks. + */ + union { + void *state; + const void *c_state; + } state; +#ifdef PB_BUFFER_ONLY + stream.callback = NULL; +#else + stream.callback = &buf_read; +#endif + state.c_state = buf; + stream.state = state.state; + stream.bytes_left = msglen; +#ifndef PB_NO_ERRMSG + stream.errmsg = NULL; +#endif + return stream; +} + +/******************** + * Helper functions * + ********************/ + +static bool checkreturn pb_decode_varint32_eof(pb_istream_t *stream, uint32_t *dest, bool *eof) +{ + pb_byte_t byte; + uint32_t result; + + if (!pb_readbyte(stream, &byte)) + { + if (stream->bytes_left == 0) + { + if (eof) + { + *eof = true; + } + } + + return false; + } + + if ((byte & 0x80) == 0) + { + /* Quick case, 1 byte value */ + result = byte; + } + else + { + /* Multibyte case */ + uint_fast8_t bitpos = 7; + result = byte & 0x7F; + + do + { + if (!pb_readbyte(stream, &byte)) + return false; + + if (bitpos >= 32) + { + /* Note: The varint could have trailing 0x80 bytes, or 0xFF for negative. */ + pb_byte_t sign_extension = (bitpos < 63) ? 0xFF : 0x01; + bool valid_extension = ((byte & 0x7F) == 0x00 || + ((result >> 31) != 0 && byte == sign_extension)); + + if (bitpos >= 64 || !valid_extension) + { + PB_RETURN_ERROR(stream, "varint overflow"); + } + } + else if (bitpos == 28) + { + if ((byte & 0x70) != 0 && (byte & 0x78) != 0x78) + { + PB_RETURN_ERROR(stream, "varint overflow"); + } + result |= (uint32_t)(byte & 0x0F) << bitpos; + } + else + { + result |= (uint32_t)(byte & 0x7F) << bitpos; + } + bitpos = (uint_fast8_t)(bitpos + 7); + } while (byte & 0x80); + } + + *dest = result; + return true; +} + +bool checkreturn pb_decode_varint32(pb_istream_t *stream, uint32_t *dest) +{ + return pb_decode_varint32_eof(stream, dest, NULL); +} + +#ifndef PB_WITHOUT_64BIT +bool checkreturn pb_decode_varint(pb_istream_t *stream, uint64_t *dest) +{ + pb_byte_t byte; + uint_fast8_t bitpos = 0; + uint64_t result = 0; + + do + { + if (!pb_readbyte(stream, &byte)) + return false; + + if (bitpos >= 63 && (byte & 0xFE) != 0) + PB_RETURN_ERROR(stream, "varint overflow"); + + result |= (uint64_t)(byte & 0x7F) << bitpos; + bitpos = (uint_fast8_t)(bitpos + 7); + } while (byte & 0x80); + + *dest = result; + return true; +} +#endif + +bool checkreturn pb_skip_varint(pb_istream_t *stream) +{ + pb_byte_t byte; + do + { + if (!pb_read(stream, &byte, 1)) + return false; + } while (byte & 0x80); + return true; +} + +bool checkreturn pb_skip_string(pb_istream_t *stream) +{ + uint32_t length; + if (!pb_decode_varint32(stream, &length)) + return false; + + if ((size_t)length != length) + { + PB_RETURN_ERROR(stream, "size too large"); + } + + return pb_read(stream, NULL, (size_t)length); +} + +bool checkreturn pb_decode_tag(pb_istream_t *stream, pb_wire_type_t *wire_type, uint32_t *tag, bool *eof) +{ + uint32_t temp; + *eof = false; + *wire_type = (pb_wire_type_t) 0; + *tag = 0; + + if (!pb_decode_varint32_eof(stream, &temp, eof)) + { + return false; + } + + *tag = temp >> 3; + *wire_type = (pb_wire_type_t)(temp & 7); + return true; +} + +bool checkreturn pb_skip_field(pb_istream_t *stream, pb_wire_type_t wire_type) +{ + switch (wire_type) + { + case PB_WT_VARINT: return pb_skip_varint(stream); + case PB_WT_64BIT: return pb_read(stream, NULL, 8); + case PB_WT_STRING: return pb_skip_string(stream); + case PB_WT_32BIT: return pb_read(stream, NULL, 4); + default: PB_RETURN_ERROR(stream, "invalid wire_type"); + } +} + +/* Read a raw value to buffer, for the purpose of passing it to callback as + * a substream. Size is maximum size on call, and actual size on return. + */ +static bool checkreturn read_raw_value(pb_istream_t *stream, pb_wire_type_t wire_type, pb_byte_t *buf, size_t *size) +{ + size_t max_size = *size; + switch (wire_type) + { + case PB_WT_VARINT: + *size = 0; + do + { + (*size)++; + if (*size > max_size) + PB_RETURN_ERROR(stream, "varint overflow"); + + if (!pb_read(stream, buf, 1)) + return false; + } while (*buf++ & 0x80); + return true; + + case PB_WT_64BIT: + *size = 8; + return pb_read(stream, buf, 8); + + case PB_WT_32BIT: + *size = 4; + return pb_read(stream, buf, 4); + + case PB_WT_STRING: + /* Calling read_raw_value with a PB_WT_STRING is an error. + * Explicitly handle this case and fallthrough to default to avoid + * compiler warnings. + */ + + default: PB_RETURN_ERROR(stream, "invalid wire_type"); + } +} + +/* Decode string length from stream and return a substream with limited length. + * Remember to close the substream using pb_close_string_substream(). + */ +bool checkreturn pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream) +{ + uint32_t size; + if (!pb_decode_varint32(stream, &size)) + return false; + + *substream = *stream; + if (substream->bytes_left < size) + PB_RETURN_ERROR(stream, "parent stream too short"); + + substream->bytes_left = (size_t)size; + stream->bytes_left -= (size_t)size; + return true; +} + +bool checkreturn pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream) +{ + if (substream->bytes_left) { + if (!pb_read(substream, NULL, substream->bytes_left)) + return false; + } + + stream->state = substream->state; + +#ifndef PB_NO_ERRMSG + stream->errmsg = substream->errmsg; +#endif + return true; +} + +/************************* + * Decode a single field * + *************************/ + +static bool checkreturn decode_basic_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ + switch (PB_LTYPE(field->type)) + { + case PB_LTYPE_BOOL: + if (wire_type != PB_WT_VARINT && wire_type != PB_WT_PACKED) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_bool(stream, field); + + case PB_LTYPE_VARINT: + case PB_LTYPE_UVARINT: + case PB_LTYPE_SVARINT: + if (wire_type != PB_WT_VARINT && wire_type != PB_WT_PACKED) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_varint(stream, field); + + case PB_LTYPE_FIXED32: + if (wire_type != PB_WT_32BIT && wire_type != PB_WT_PACKED) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_decode_fixed32(stream, field->pData); + + case PB_LTYPE_FIXED64: + if (wire_type != PB_WT_64BIT && wire_type != PB_WT_PACKED) + PB_RETURN_ERROR(stream, "wrong wire type"); + +#ifdef PB_CONVERT_DOUBLE_FLOAT + if (field->data_size == sizeof(float)) + { + return pb_decode_double_as_float(stream, (float*)field->pData); + } +#endif + +#ifdef PB_WITHOUT_64BIT + PB_RETURN_ERROR(stream, "invalid data_size"); +#else + return pb_decode_fixed64(stream, field->pData); +#endif + + case PB_LTYPE_BYTES: + if (wire_type != PB_WT_STRING) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_bytes(stream, field); + + case PB_LTYPE_STRING: + if (wire_type != PB_WT_STRING) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_string(stream, field); + + case PB_LTYPE_SUBMESSAGE: + case PB_LTYPE_SUBMSG_W_CB: + if (wire_type != PB_WT_STRING) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_submessage(stream, field); + + case PB_LTYPE_FIXED_LENGTH_BYTES: + if (wire_type != PB_WT_STRING) + PB_RETURN_ERROR(stream, "wrong wire type"); + + return pb_dec_fixed_length_bytes(stream, field); + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +} + +static bool checkreturn decode_static_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ + switch (PB_HTYPE(field->type)) + { + case PB_HTYPE_REQUIRED: + return decode_basic_field(stream, wire_type, field); + + case PB_HTYPE_OPTIONAL: + if (field->pSize != NULL) + *(bool*)field->pSize = true; + return decode_basic_field(stream, wire_type, field); + + case PB_HTYPE_REPEATED: + if (wire_type == PB_WT_STRING + && PB_LTYPE(field->type) <= PB_LTYPE_LAST_PACKABLE) + { + /* Packed array */ + bool status = true; + pb_istream_t substream; + pb_size_t *size = (pb_size_t*)field->pSize; + field->pData = (char*)field->pField + field->data_size * (*size); + + if (!pb_make_string_substream(stream, &substream)) + return false; + + while (substream.bytes_left > 0 && *size < field->array_size) + { + if (!decode_basic_field(&substream, PB_WT_PACKED, field)) + { + status = false; + break; + } + (*size)++; + field->pData = (char*)field->pData + field->data_size; + } + + if (substream.bytes_left != 0) + PB_RETURN_ERROR(stream, "array overflow"); + if (!pb_close_string_substream(stream, &substream)) + return false; + + return status; + } + else + { + /* Repeated field */ + pb_size_t *size = (pb_size_t*)field->pSize; + field->pData = (char*)field->pField + field->data_size * (*size); + + if ((*size)++ >= field->array_size) + PB_RETURN_ERROR(stream, "array overflow"); + + return decode_basic_field(stream, wire_type, field); + } + + case PB_HTYPE_ONEOF: + if (PB_LTYPE_IS_SUBMSG(field->type) && + *(pb_size_t*)field->pSize != field->tag) + { + /* We memset to zero so that any callbacks are set to NULL. + * This is because the callbacks might otherwise have values + * from some other union field. + * If callbacks are needed inside oneof field, use .proto + * option submsg_callback to have a separate callback function + * that can set the fields before submessage is decoded. + * pb_dec_submessage() will set any default values. */ + memset(field->pData, 0, (size_t)field->data_size); + + /* Set default values for the submessage fields. */ + if (field->submsg_desc->default_value != NULL || + field->submsg_desc->field_callback != NULL || + field->submsg_desc->submsg_info[0] != NULL) + { + pb_field_iter_t submsg_iter; + if (pb_field_iter_begin(&submsg_iter, field->submsg_desc, field->pData)) + { + if (!pb_message_set_to_defaults(&submsg_iter)) + PB_RETURN_ERROR(stream, "failed to set defaults"); + } + } + } + *(pb_size_t*)field->pSize = field->tag; + + return decode_basic_field(stream, wire_type, field); + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +} + +#ifdef PB_ENABLE_MALLOC +/* Allocate storage for the field and store the pointer at iter->pData. + * array_size is the number of entries to reserve in an array. + * Zero size is not allowed, use pb_free() for releasing. + */ +static bool checkreturn allocate_field(pb_istream_t *stream, void *pData, size_t data_size, size_t array_size) +{ + void *ptr = *(void**)pData; + + if (data_size == 0 || array_size == 0) + PB_RETURN_ERROR(stream, "invalid size"); + +#ifdef __AVR__ + /* Workaround for AVR libc bug 53284: http://savannah.nongnu.org/bugs/?53284 + * Realloc to size of 1 byte can cause corruption of the malloc structures. + */ + if (data_size == 1 && array_size == 1) + { + data_size = 2; + } +#endif + + /* Check for multiplication overflows. + * This code avoids the costly division if the sizes are small enough. + * Multiplication is safe as long as only half of bits are set + * in either multiplicand. + */ + { + const size_t check_limit = (size_t)1 << (sizeof(size_t) * 4); + if (data_size >= check_limit || array_size >= check_limit) + { + const size_t size_max = (size_t)-1; + if (size_max / array_size < data_size) + { + PB_RETURN_ERROR(stream, "size too large"); + } + } + } + + /* Allocate new or expand previous allocation */ + /* Note: on failure the old pointer will remain in the structure, + * the message must be freed by caller also on error return. */ + ptr = pb_realloc(ptr, array_size * data_size); + if (ptr == NULL) + PB_RETURN_ERROR(stream, "realloc failed"); + + *(void**)pData = ptr; + return true; +} + +/* Clear a newly allocated item in case it contains a pointer, or is a submessage. */ +static void initialize_pointer_field(void *pItem, pb_field_iter_t *field) +{ + if (PB_LTYPE(field->type) == PB_LTYPE_STRING || + PB_LTYPE(field->type) == PB_LTYPE_BYTES) + { + *(void**)pItem = NULL; + } + else if (PB_LTYPE_IS_SUBMSG(field->type)) + { + /* We memset to zero so that any callbacks are set to NULL. + * Default values will be set by pb_dec_submessage(). */ + memset(pItem, 0, field->data_size); + } +} +#endif + +static bool checkreturn decode_pointer_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ +#ifndef PB_ENABLE_MALLOC + PB_UNUSED(wire_type); + PB_UNUSED(field); + PB_RETURN_ERROR(stream, "no malloc support"); +#else + switch (PB_HTYPE(field->type)) + { + case PB_HTYPE_REQUIRED: + case PB_HTYPE_OPTIONAL: + case PB_HTYPE_ONEOF: + if (PB_LTYPE_IS_SUBMSG(field->type) && *(void**)field->pField != NULL) + { + /* Duplicate field, have to release the old allocation first. */ + /* FIXME: Does this work correctly for oneofs? */ + pb_release_single_field(field); + } + + if (PB_HTYPE(field->type) == PB_HTYPE_ONEOF) + { + *(pb_size_t*)field->pSize = field->tag; + } + + if (PB_LTYPE(field->type) == PB_LTYPE_STRING || + PB_LTYPE(field->type) == PB_LTYPE_BYTES) + { + /* pb_dec_string and pb_dec_bytes handle allocation themselves */ + field->pData = field->pField; + return decode_basic_field(stream, wire_type, field); + } + else + { + if (!allocate_field(stream, field->pField, field->data_size, 1)) + return false; + + field->pData = *(void**)field->pField; + initialize_pointer_field(field->pData, field); + return decode_basic_field(stream, wire_type, field); + } + + case PB_HTYPE_REPEATED: + if (wire_type == PB_WT_STRING + && PB_LTYPE(field->type) <= PB_LTYPE_LAST_PACKABLE) + { + /* Packed array, multiple items come in at once. */ + bool status = true; + pb_size_t *size = (pb_size_t*)field->pSize; + size_t allocated_size = *size; + pb_istream_t substream; + + if (!pb_make_string_substream(stream, &substream)) + return false; + + while (substream.bytes_left) + { + if (*size == PB_SIZE_MAX) + { +#ifndef PB_NO_ERRMSG + stream->errmsg = "too many array entries"; +#endif + status = false; + break; + } + + if ((size_t)*size + 1 > allocated_size) + { + /* Allocate more storage. This tries to guess the + * number of remaining entries. Round the division + * upwards. */ + size_t remain = (substream.bytes_left - 1) / field->data_size + 1; + if (remain < PB_SIZE_MAX - allocated_size) + allocated_size += remain; + else + allocated_size += 1; + + if (!allocate_field(&substream, field->pField, field->data_size, allocated_size)) + { + status = false; + break; + } + } + + /* Decode the array entry */ + field->pData = *(char**)field->pField + field->data_size * (*size); + if (field->pData == NULL) + { + /* Shouldn't happen, but satisfies static analyzers */ + status = false; + break; + } + initialize_pointer_field(field->pData, field); + if (!decode_basic_field(&substream, PB_WT_PACKED, field)) + { + status = false; + break; + } + + (*size)++; + } + if (!pb_close_string_substream(stream, &substream)) + return false; + + return status; + } + else + { + /* Normal repeated field, i.e. only one item at a time. */ + pb_size_t *size = (pb_size_t*)field->pSize; + + if (*size == PB_SIZE_MAX) + PB_RETURN_ERROR(stream, "too many array entries"); + + if (!allocate_field(stream, field->pField, field->data_size, (size_t)(*size + 1))) + return false; + + field->pData = *(char**)field->pField + field->data_size * (*size); + (*size)++; + initialize_pointer_field(field->pData, field); + return decode_basic_field(stream, wire_type, field); + } + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +#endif +} + +static bool checkreturn decode_callback_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ + if (!field->descriptor->field_callback) + return pb_skip_field(stream, wire_type); + + if (wire_type == PB_WT_STRING) + { + pb_istream_t substream; + size_t prev_bytes_left; + + if (!pb_make_string_substream(stream, &substream)) + return false; + + do + { + prev_bytes_left = substream.bytes_left; + if (!field->descriptor->field_callback(&substream, NULL, field)) + { + PB_SET_ERROR(stream, substream.errmsg ? substream.errmsg : "callback failed"); + return false; + } + } while (substream.bytes_left > 0 && substream.bytes_left < prev_bytes_left); + + if (!pb_close_string_substream(stream, &substream)) + return false; + + return true; + } + else + { + /* Copy the single scalar value to stack. + * This is required so that we can limit the stream length, + * which in turn allows to use same callback for packed and + * not-packed fields. */ + pb_istream_t substream; + pb_byte_t buffer[10]; + size_t size = sizeof(buffer); + + if (!read_raw_value(stream, wire_type, buffer, &size)) + return false; + substream = pb_istream_from_buffer(buffer, size); + + return field->descriptor->field_callback(&substream, NULL, field); + } +} + +static bool checkreturn decode_field(pb_istream_t *stream, pb_wire_type_t wire_type, pb_field_iter_t *field) +{ +#ifdef PB_ENABLE_MALLOC + /* When decoding an oneof field, check if there is old data that must be + * released first. */ + if (PB_HTYPE(field->type) == PB_HTYPE_ONEOF) + { + if (!pb_release_union_field(stream, field)) + return false; + } +#endif + + switch (PB_ATYPE(field->type)) + { + case PB_ATYPE_STATIC: + return decode_static_field(stream, wire_type, field); + + case PB_ATYPE_POINTER: + return decode_pointer_field(stream, wire_type, field); + + case PB_ATYPE_CALLBACK: + return decode_callback_field(stream, wire_type, field); + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +} + +/* Default handler for extension fields. Expects to have a pb_msgdesc_t + * pointer in the extension->type->arg field, pointing to a message with + * only one field in it. */ +static bool checkreturn default_extension_decoder(pb_istream_t *stream, + pb_extension_t *extension, uint32_t tag, pb_wire_type_t wire_type) +{ + pb_field_iter_t iter; + + if (!pb_field_iter_begin_extension(&iter, extension)) + PB_RETURN_ERROR(stream, "invalid extension"); + + if (iter.tag != tag || !iter.message) + return true; + + extension->found = true; + return decode_field(stream, wire_type, &iter); +} + +/* Try to decode an unknown field as an extension field. Tries each extension + * decoder in turn, until one of them handles the field or loop ends. */ +static bool checkreturn decode_extension(pb_istream_t *stream, + uint32_t tag, pb_wire_type_t wire_type, pb_extension_t *extension) +{ + size_t pos = stream->bytes_left; + + while (extension != NULL && pos == stream->bytes_left) + { + bool status; + if (extension->type->decode) + status = extension->type->decode(stream, extension, tag, wire_type); + else + status = default_extension_decoder(stream, extension, tag, wire_type); + + if (!status) + return false; + + extension = extension->next; + } + + return true; +} + +/* Initialize message fields to default values, recursively */ +static bool pb_field_set_to_default(pb_field_iter_t *field) +{ + pb_type_t type; + type = field->type; + + if (PB_LTYPE(type) == PB_LTYPE_EXTENSION) + { + pb_extension_t *ext = *(pb_extension_t* const *)field->pData; + while (ext != NULL) + { + pb_field_iter_t ext_iter; + if (pb_field_iter_begin_extension(&ext_iter, ext)) + { + ext->found = false; + if (!pb_message_set_to_defaults(&ext_iter)) + return false; + } + ext = ext->next; + } + } + else if (PB_ATYPE(type) == PB_ATYPE_STATIC) + { + bool init_data = true; + if (PB_HTYPE(type) == PB_HTYPE_OPTIONAL && field->pSize != NULL) + { + /* Set has_field to false. Still initialize the optional field + * itself also. */ + *(bool*)field->pSize = false; + } + else if (PB_HTYPE(type) == PB_HTYPE_REPEATED || + PB_HTYPE(type) == PB_HTYPE_ONEOF) + { + /* REPEATED: Set array count to 0, no need to initialize contents. + ONEOF: Set which_field to 0. */ + *(pb_size_t*)field->pSize = 0; + init_data = false; + } + + if (init_data) + { + if (PB_LTYPE_IS_SUBMSG(field->type) && + (field->submsg_desc->default_value != NULL || + field->submsg_desc->field_callback != NULL || + field->submsg_desc->submsg_info[0] != NULL)) + { + /* Initialize submessage to defaults. + * Only needed if it has default values + * or callback/submessage fields. */ + pb_field_iter_t submsg_iter; + if (pb_field_iter_begin(&submsg_iter, field->submsg_desc, field->pData)) + { + if (!pb_message_set_to_defaults(&submsg_iter)) + return false; + } + } + else + { + /* Initialize to zeros */ + memset(field->pData, 0, (size_t)field->data_size); + } + } + } + else if (PB_ATYPE(type) == PB_ATYPE_POINTER) + { + /* Initialize the pointer to NULL. */ + *(void**)field->pField = NULL; + + /* Initialize array count to 0. */ + if (PB_HTYPE(type) == PB_HTYPE_REPEATED || + PB_HTYPE(type) == PB_HTYPE_ONEOF) + { + *(pb_size_t*)field->pSize = 0; + } + } + else if (PB_ATYPE(type) == PB_ATYPE_CALLBACK) + { + /* Don't overwrite callback */ + } + + return true; +} + +static bool pb_message_set_to_defaults(pb_field_iter_t *iter) +{ + pb_istream_t defstream = PB_ISTREAM_EMPTY; + uint32_t tag = 0; + pb_wire_type_t wire_type = PB_WT_VARINT; + bool eof; + + if (iter->descriptor->default_value) + { + defstream = pb_istream_from_buffer(iter->descriptor->default_value, (size_t)-1); + if (!pb_decode_tag(&defstream, &wire_type, &tag, &eof)) + return false; + } + + do + { + if (!pb_field_set_to_default(iter)) + return false; + + if (tag != 0 && iter->tag == tag) + { + /* We have a default value for this field in the defstream */ + if (!decode_field(&defstream, wire_type, iter)) + return false; + if (!pb_decode_tag(&defstream, &wire_type, &tag, &eof)) + return false; + + if (iter->pSize) + *(bool*)iter->pSize = false; + } + } while (pb_field_iter_next(iter)); + + return true; +} + +/********************* + * Decode all fields * + *********************/ + +static bool checkreturn pb_decode_inner(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags) +{ + uint32_t extension_range_start = 0; + pb_extension_t *extensions = NULL; + + /* 'fixed_count_field' and 'fixed_count_size' track position of a repeated fixed + * count field. This can only handle _one_ repeated fixed count field that + * is unpacked and unordered among other (non repeated fixed count) fields. + */ + pb_size_t fixed_count_field = PB_SIZE_MAX; + pb_size_t fixed_count_size = 0; + pb_size_t fixed_count_total_size = 0; + + pb_fields_seen_t fields_seen = {{0, 0}}; + const uint32_t allbits = ~(uint32_t)0; + pb_field_iter_t iter; + + if (pb_field_iter_begin(&iter, fields, dest_struct)) + { + if ((flags & PB_DECODE_NOINIT) == 0) + { + if (!pb_message_set_to_defaults(&iter)) + PB_RETURN_ERROR(stream, "failed to set defaults"); + } + } + + while (stream->bytes_left) + { + uint32_t tag; + pb_wire_type_t wire_type; + bool eof; + + if (!pb_decode_tag(stream, &wire_type, &tag, &eof)) + { + if (eof) + break; + else + return false; + } + + if (tag == 0) + { + if (flags & PB_DECODE_NULLTERMINATED) + { + break; + } + else + { + PB_RETURN_ERROR(stream, "zero tag"); + } + } + + if (!pb_field_iter_find(&iter, tag) || PB_LTYPE(iter.type) == PB_LTYPE_EXTENSION) + { + /* No match found, check if it matches an extension. */ + if (extension_range_start == 0) + { + if (pb_field_iter_find_extension(&iter)) + { + extensions = *(pb_extension_t* const *)iter.pData; + extension_range_start = iter.tag; + } + + if (!extensions) + { + extension_range_start = (uint32_t)-1; + } + } + + if (tag >= extension_range_start) + { + size_t pos = stream->bytes_left; + + if (!decode_extension(stream, tag, wire_type, extensions)) + return false; + + if (pos != stream->bytes_left) + { + /* The field was handled */ + continue; + } + } + + /* No match found, skip data */ + if (!pb_skip_field(stream, wire_type)) + return false; + continue; + } + + /* If a repeated fixed count field was found, get size from + * 'fixed_count_field' as there is no counter contained in the struct. + */ + if (PB_HTYPE(iter.type) == PB_HTYPE_REPEATED && iter.pSize == &iter.array_size) + { + if (fixed_count_field != iter.index) { + /* If the new fixed count field does not match the previous one, + * check that the previous one is NULL or that it finished + * receiving all the expected data. + */ + if (fixed_count_field != PB_SIZE_MAX && + fixed_count_size != fixed_count_total_size) + { + PB_RETURN_ERROR(stream, "wrong size for fixed count field"); + } + + fixed_count_field = iter.index; + fixed_count_size = 0; + fixed_count_total_size = iter.array_size; + } + + iter.pSize = &fixed_count_size; + } + + if (PB_HTYPE(iter.type) == PB_HTYPE_REQUIRED + && iter.required_field_index < PB_MAX_REQUIRED_FIELDS) + { + uint32_t tmp = ((uint32_t)1 << (iter.required_field_index & 31)); + fields_seen.bitfield[iter.required_field_index >> 5] |= tmp; + } + + if (!decode_field(stream, wire_type, &iter)) + return false; + } + + /* Check that all elements of the last decoded fixed count field were present. */ + if (fixed_count_field != PB_SIZE_MAX && + fixed_count_size != fixed_count_total_size) + { + PB_RETURN_ERROR(stream, "wrong size for fixed count field"); + } + + /* Check that all required fields were present. */ + { + pb_size_t req_field_count = iter.descriptor->required_field_count; + + if (req_field_count > 0) + { + pb_size_t i; + + if (req_field_count > PB_MAX_REQUIRED_FIELDS) + req_field_count = PB_MAX_REQUIRED_FIELDS; + + /* Check the whole words */ + for (i = 0; i < (req_field_count >> 5); i++) + { + if (fields_seen.bitfield[i] != allbits) + PB_RETURN_ERROR(stream, "missing required field"); + } + + /* Check the remaining bits (if any) */ + if ((req_field_count & 31) != 0) + { + if (fields_seen.bitfield[req_field_count >> 5] != + (allbits >> (uint_least8_t)(32 - (req_field_count & 31)))) + { + PB_RETURN_ERROR(stream, "missing required field"); + } + } + } + } + + return true; +} + +bool checkreturn pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags) +{ + bool status; + + if ((flags & PB_DECODE_DELIMITED) == 0) + { + status = pb_decode_inner(stream, fields, dest_struct, flags); + } + else + { + pb_istream_t substream; + if (!pb_make_string_substream(stream, &substream)) + return false; + + status = pb_decode_inner(&substream, fields, dest_struct, flags); + + if (!pb_close_string_substream(stream, &substream)) + return false; + } + +#ifdef PB_ENABLE_MALLOC + if (!status) + pb_release(fields, dest_struct); +#endif + + return status; +} + +bool checkreturn pb_decode(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct) +{ + bool status; + + status = pb_decode_inner(stream, fields, dest_struct, 0); + +#ifdef PB_ENABLE_MALLOC + if (!status) + pb_release(fields, dest_struct); +#endif + + return status; +} + +#ifdef PB_ENABLE_MALLOC +/* Given an oneof field, if there has already been a field inside this oneof, + * release it before overwriting with a different one. */ +static bool pb_release_union_field(pb_istream_t *stream, pb_field_iter_t *field) +{ + pb_field_iter_t old_field = *field; + pb_size_t old_tag = *(pb_size_t*)field->pSize; /* Previous which_ value */ + pb_size_t new_tag = field->tag; /* New which_ value */ + + if (old_tag == 0) + return true; /* Ok, no old data in union */ + + if (old_tag == new_tag) + return true; /* Ok, old data is of same type => merge */ + + /* Release old data. The find can fail if the message struct contains + * invalid data. */ + if (!pb_field_iter_find(&old_field, old_tag)) + PB_RETURN_ERROR(stream, "invalid union tag"); + + pb_release_single_field(&old_field); + + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) + { + /* Initialize the pointer to NULL to make sure it is valid + * even in case of error return. */ + *(void**)field->pField = NULL; + field->pData = NULL; + } + + return true; +} + +static void pb_release_single_field(pb_field_iter_t *field) +{ + pb_type_t type; + type = field->type; + + if (PB_HTYPE(type) == PB_HTYPE_ONEOF) + { + if (*(pb_size_t*)field->pSize != field->tag) + return; /* This is not the current field in the union */ + } + + /* Release anything contained inside an extension or submsg. + * This has to be done even if the submsg itself is statically + * allocated. */ + if (PB_LTYPE(type) == PB_LTYPE_EXTENSION) + { + /* Release fields from all extensions in the linked list */ + pb_extension_t *ext = *(pb_extension_t**)field->pData; + while (ext != NULL) + { + pb_field_iter_t ext_iter; + if (pb_field_iter_begin_extension(&ext_iter, ext)) + { + pb_release_single_field(&ext_iter); + } + ext = ext->next; + } + } + else if (PB_LTYPE_IS_SUBMSG(type) && PB_ATYPE(type) != PB_ATYPE_CALLBACK) + { + /* Release fields in submessage or submsg array */ + pb_size_t count = 1; + + if (PB_ATYPE(type) == PB_ATYPE_POINTER) + { + field->pData = *(void**)field->pField; + } + else + { + field->pData = field->pField; + } + + if (PB_HTYPE(type) == PB_HTYPE_REPEATED) + { + count = *(pb_size_t*)field->pSize; + + if (PB_ATYPE(type) == PB_ATYPE_STATIC && count > field->array_size) + { + /* Protect against corrupted _count fields */ + count = field->array_size; + } + } + + if (field->pData) + { + for (; count > 0; count--) + { + pb_release(field->submsg_desc, field->pData); + field->pData = (char*)field->pData + field->data_size; + } + } + } + + if (PB_ATYPE(type) == PB_ATYPE_POINTER) + { + if (PB_HTYPE(type) == PB_HTYPE_REPEATED && + (PB_LTYPE(type) == PB_LTYPE_STRING || + PB_LTYPE(type) == PB_LTYPE_BYTES)) + { + /* Release entries in repeated string or bytes array */ + void **pItem = *(void***)field->pField; + pb_size_t count = *(pb_size_t*)field->pSize; + for (; count > 0; count--) + { + pb_free(*pItem); + *pItem++ = NULL; + } + } + + if (PB_HTYPE(type) == PB_HTYPE_REPEATED) + { + /* We are going to release the array, so set the size to 0 */ + *(pb_size_t*)field->pSize = 0; + } + + /* Release main pointer */ + pb_free(*(void**)field->pField); + *(void**)field->pField = NULL; + } +} + +void pb_release(const pb_msgdesc_t *fields, void *dest_struct) +{ + pb_field_iter_t iter; + + if (!dest_struct) + return; /* Ignore NULL pointers, similar to free() */ + + if (!pb_field_iter_begin(&iter, fields, dest_struct)) + return; /* Empty message type */ + + do + { + pb_release_single_field(&iter); + } while (pb_field_iter_next(&iter)); +} +#else +void pb_release(const pb_msgdesc_t *fields, void *dest_struct) +{ + /* Nothing to release without PB_ENABLE_MALLOC. */ + PB_UNUSED(fields); + PB_UNUSED(dest_struct); +} +#endif + +/* Field decoders */ + +bool pb_decode_bool(pb_istream_t *stream, bool *dest) +{ + uint32_t value; + if (!pb_decode_varint32(stream, &value)) + return false; + + *(bool*)dest = (value != 0); + return true; +} + +bool pb_decode_svarint(pb_istream_t *stream, pb_int64_t *dest) +{ + pb_uint64_t value; + if (!pb_decode_varint(stream, &value)) + return false; + + if (value & 1) + *dest = (pb_int64_t)(~(value >> 1)); + else + *dest = (pb_int64_t)(value >> 1); + + return true; +} + +bool pb_decode_fixed32(pb_istream_t *stream, void *dest) +{ + union { + uint32_t fixed32; + pb_byte_t bytes[4]; + } u; + + if (!pb_read(stream, u.bytes, 4)) + return false; + +#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1 + /* fast path - if we know that we're on little endian, assign directly */ + *(uint32_t*)dest = u.fixed32; +#else + *(uint32_t*)dest = ((uint32_t)u.bytes[0] << 0) | + ((uint32_t)u.bytes[1] << 8) | + ((uint32_t)u.bytes[2] << 16) | + ((uint32_t)u.bytes[3] << 24); +#endif + return true; +} + +#ifndef PB_WITHOUT_64BIT +bool pb_decode_fixed64(pb_istream_t *stream, void *dest) +{ + union { + uint64_t fixed64; + pb_byte_t bytes[8]; + } u; + + if (!pb_read(stream, u.bytes, 8)) + return false; + +#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1 + /* fast path - if we know that we're on little endian, assign directly */ + *(uint64_t*)dest = u.fixed64; +#else + *(uint64_t*)dest = ((uint64_t)u.bytes[0] << 0) | + ((uint64_t)u.bytes[1] << 8) | + ((uint64_t)u.bytes[2] << 16) | + ((uint64_t)u.bytes[3] << 24) | + ((uint64_t)u.bytes[4] << 32) | + ((uint64_t)u.bytes[5] << 40) | + ((uint64_t)u.bytes[6] << 48) | + ((uint64_t)u.bytes[7] << 56); +#endif + return true; +} +#endif + +static bool checkreturn pb_dec_bool(pb_istream_t *stream, const pb_field_iter_t *field) +{ + return pb_decode_bool(stream, (bool*)field->pData); +} + +static bool checkreturn pb_dec_varint(pb_istream_t *stream, const pb_field_iter_t *field) +{ + if (PB_LTYPE(field->type) == PB_LTYPE_UVARINT) + { + pb_uint64_t value, clamped; + if (!pb_decode_varint(stream, &value)) + return false; + + /* Cast to the proper field size, while checking for overflows */ + if (field->data_size == sizeof(pb_uint64_t)) + clamped = *(pb_uint64_t*)field->pData = value; + else if (field->data_size == sizeof(uint32_t)) + clamped = *(uint32_t*)field->pData = (uint32_t)value; + else if (field->data_size == sizeof(uint_least16_t)) + clamped = *(uint_least16_t*)field->pData = (uint_least16_t)value; + else if (field->data_size == sizeof(uint_least8_t)) + clamped = *(uint_least8_t*)field->pData = (uint_least8_t)value; + else + PB_RETURN_ERROR(stream, "invalid data_size"); + + if (clamped != value) + PB_RETURN_ERROR(stream, "integer too large"); + + return true; + } + else + { + pb_uint64_t value; + pb_int64_t svalue; + pb_int64_t clamped; + + if (PB_LTYPE(field->type) == PB_LTYPE_SVARINT) + { + if (!pb_decode_svarint(stream, &svalue)) + return false; + } + else + { + if (!pb_decode_varint(stream, &value)) + return false; + + /* See issue 97: Google's C++ protobuf allows negative varint values to + * be cast as int32_t, instead of the int64_t that should be used when + * encoding. Nanopb versions before 0.2.5 had a bug in encoding. In order to + * not break decoding of such messages, we cast <=32 bit fields to + * int32_t first to get the sign correct. + */ + if (field->data_size == sizeof(pb_int64_t)) + svalue = (pb_int64_t)value; + else + svalue = (int32_t)value; + } + + /* Cast to the proper field size, while checking for overflows */ + if (field->data_size == sizeof(pb_int64_t)) + clamped = *(pb_int64_t*)field->pData = svalue; + else if (field->data_size == sizeof(int32_t)) + clamped = *(int32_t*)field->pData = (int32_t)svalue; + else if (field->data_size == sizeof(int_least16_t)) + clamped = *(int_least16_t*)field->pData = (int_least16_t)svalue; + else if (field->data_size == sizeof(int_least8_t)) + clamped = *(int_least8_t*)field->pData = (int_least8_t)svalue; + else + PB_RETURN_ERROR(stream, "invalid data_size"); + + if (clamped != svalue) + PB_RETURN_ERROR(stream, "integer too large"); + + return true; + } +} + +static bool checkreturn pb_dec_bytes(pb_istream_t *stream, const pb_field_iter_t *field) +{ + uint32_t size; + size_t alloc_size; + pb_bytes_array_t *dest; + + if (!pb_decode_varint32(stream, &size)) + return false; + + if (size > PB_SIZE_MAX) + PB_RETURN_ERROR(stream, "bytes overflow"); + + alloc_size = PB_BYTES_ARRAY_T_ALLOCSIZE(size); + if (size > alloc_size) + PB_RETURN_ERROR(stream, "size too large"); + + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) + { +#ifndef PB_ENABLE_MALLOC + PB_RETURN_ERROR(stream, "no malloc support"); +#else + if (stream->bytes_left < size) + PB_RETURN_ERROR(stream, "end-of-stream"); + + if (!allocate_field(stream, field->pData, alloc_size, 1)) + return false; + dest = *(pb_bytes_array_t**)field->pData; +#endif + } + else + { + if (alloc_size > field->data_size) + PB_RETURN_ERROR(stream, "bytes overflow"); + dest = (pb_bytes_array_t*)field->pData; + } + + dest->size = (pb_size_t)size; + return pb_read(stream, dest->bytes, (size_t)size); +} + +static bool checkreturn pb_dec_string(pb_istream_t *stream, const pb_field_iter_t *field) +{ + uint32_t size; + size_t alloc_size; + pb_byte_t *dest = (pb_byte_t*)field->pData; + + if (!pb_decode_varint32(stream, &size)) + return false; + + if (size == (uint32_t)-1) + PB_RETURN_ERROR(stream, "size too large"); + + /* Space for null terminator */ + alloc_size = (size_t)(size + 1); + + if (alloc_size < size) + PB_RETURN_ERROR(stream, "size too large"); + + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) + { +#ifndef PB_ENABLE_MALLOC + PB_RETURN_ERROR(stream, "no malloc support"); +#else + if (stream->bytes_left < size) + PB_RETURN_ERROR(stream, "end-of-stream"); + + if (!allocate_field(stream, field->pData, alloc_size, 1)) + return false; + dest = *(pb_byte_t**)field->pData; +#endif + } + else + { + if (alloc_size > field->data_size) + PB_RETURN_ERROR(stream, "string overflow"); + } + + dest[size] = 0; + + if (!pb_read(stream, dest, (size_t)size)) + return false; + +#ifdef PB_VALIDATE_UTF8 + if (!pb_validate_utf8((const char*)dest)) + PB_RETURN_ERROR(stream, "invalid utf8"); +#endif + + return true; +} + +static bool checkreturn pb_dec_submessage(pb_istream_t *stream, const pb_field_iter_t *field) +{ + bool status = true; + bool submsg_consumed = false; + pb_istream_t substream; + + if (!pb_make_string_substream(stream, &substream)) + return false; + + if (field->submsg_desc == NULL) + PB_RETURN_ERROR(stream, "invalid field descriptor"); + + /* Submessages can have a separate message-level callback that is called + * before decoding the message. Typically it is used to set callback fields + * inside oneofs. */ + if (PB_LTYPE(field->type) == PB_LTYPE_SUBMSG_W_CB && field->pSize != NULL) + { + /* Message callback is stored right before pSize. */ + pb_callback_t *callback = (pb_callback_t*)field->pSize - 1; + if (callback->funcs.decode) + { + status = callback->funcs.decode(&substream, field, &callback->arg); + + if (substream.bytes_left == 0) + { + submsg_consumed = true; + } + } + } + + /* Now decode the submessage contents */ + if (status && !submsg_consumed) + { + unsigned int flags = 0; + + /* Static required/optional fields are already initialized by top-level + * pb_decode(), no need to initialize them again. */ + if (PB_ATYPE(field->type) == PB_ATYPE_STATIC && + PB_HTYPE(field->type) != PB_HTYPE_REPEATED) + { + flags = PB_DECODE_NOINIT; + } + + status = pb_decode_inner(&substream, field->submsg_desc, field->pData, flags); + } + + if (!pb_close_string_substream(stream, &substream)) + return false; + + return status; +} + +static bool checkreturn pb_dec_fixed_length_bytes(pb_istream_t *stream, const pb_field_iter_t *field) +{ + uint32_t size; + + if (!pb_decode_varint32(stream, &size)) + return false; + + if (size > PB_SIZE_MAX) + PB_RETURN_ERROR(stream, "bytes overflow"); + + if (size == 0) + { + /* As a special case, treat empty bytes string as all zeros for fixed_length_bytes. */ + memset(field->pData, 0, (size_t)field->data_size); + return true; + } + + if (size != field->data_size) + PB_RETURN_ERROR(stream, "incorrect fixed length bytes size"); + + return pb_read(stream, (pb_byte_t*)field->pData, (size_t)field->data_size); +} + +#ifdef PB_CONVERT_DOUBLE_FLOAT +bool pb_decode_double_as_float(pb_istream_t *stream, float *dest) +{ + uint_least8_t sign; + int exponent; + uint32_t mantissa; + uint64_t value; + union { float f; uint32_t i; } out; + + if (!pb_decode_fixed64(stream, &value)) + return false; + + /* Decompose input value */ + sign = (uint_least8_t)((value >> 63) & 1); + exponent = (int)((value >> 52) & 0x7FF) - 1023; + mantissa = (value >> 28) & 0xFFFFFF; /* Highest 24 bits */ + + /* Figure if value is in range representable by floats. */ + if (exponent == 1024) + { + /* Special value */ + exponent = 128; + mantissa >>= 1; + } + else + { + if (exponent > 127) + { + /* Too large, convert to infinity */ + exponent = 128; + mantissa = 0; + } + else if (exponent < -150) + { + /* Too small, convert to zero */ + exponent = -127; + mantissa = 0; + } + else if (exponent < -126) + { + /* Denormalized */ + mantissa |= 0x1000000; + mantissa >>= (-126 - exponent); + exponent = -127; + } + + /* Round off mantissa */ + mantissa = (mantissa + 1) >> 1; + + /* Check if mantissa went over 2.0 */ + if (mantissa & 0x800000) + { + exponent += 1; + mantissa &= 0x7FFFFF; + mantissa >>= 1; + } + } + + /* Combine fields */ + out.i = mantissa; + out.i |= (uint32_t)(exponent + 127) << 23; + out.i |= (uint32_t)sign << 31; + + *dest = out.f; + return true; +} +#endif diff --git a/src/nanopb/pb_decode.h b/src/nanopb/pb_decode.h new file mode 100644 index 0000000000..02f11653a2 --- /dev/null +++ b/src/nanopb/pb_decode.h @@ -0,0 +1,193 @@ +/* pb_decode.h: Functions to decode protocol buffers. Depends on pb_decode.c. + * The main function is pb_decode. You also need an input stream, and the + * field descriptions created by nanopb_generator.py. + */ + +#ifndef PB_DECODE_H_INCLUDED +#define PB_DECODE_H_INCLUDED + +#include "nanopb/pb.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Structure for defining custom input streams. You will need to provide + * a callback function to read the bytes from your storage, which can be + * for example a file or a network socket. + * + * The callback must conform to these rules: + * + * 1) Return false on IO errors. This will cause decoding to abort. + * 2) You can use state to store your own data (e.g. buffer pointer), + * and rely on pb_read to verify that no-body reads past bytes_left. + * 3) Your callback may be used with substreams, in which case bytes_left + * is different than from the main stream. Don't use bytes_left to compute + * any pointers. + */ +struct pb_istream_s +{ +#ifdef PB_BUFFER_ONLY + /* Callback pointer is not used in buffer-only configuration. + * Having an int pointer here allows binary compatibility but + * gives an error if someone tries to assign callback function. + */ + int *callback; +#else + bool (*callback)(pb_istream_t *stream, pb_byte_t *buf, size_t count); +#endif + + void *state; /* Free field for use by callback implementation */ + size_t bytes_left; + +#ifndef PB_NO_ERRMSG + const char *errmsg; +#endif +}; + +#ifndef PB_NO_ERRMSG +#define PB_ISTREAM_EMPTY {0,0,0,0} +#else +#define PB_ISTREAM_EMPTY {0,0,0} +#endif + +/*************************** + * Main decoding functions * + ***************************/ + +/* Decode a single protocol buffers message from input stream into a C structure. + * Returns true on success, false on any failure. + * The actual struct pointed to by dest must match the description in fields. + * Callback fields of the destination structure must be initialized by caller. + * All other fields will be initialized by this function. + * + * Example usage: + * MyMessage msg = {}; + * uint8_t buffer[64]; + * pb_istream_t stream; + * + * // ... read some data into buffer ... + * + * stream = pb_istream_from_buffer(buffer, count); + * pb_decode(&stream, MyMessage_fields, &msg); + */ +bool pb_decode(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct); + +/* Extended version of pb_decode, with several options to control + * the decoding process: + * + * PB_DECODE_NOINIT: Do not initialize the fields to default values. + * This is slightly faster if you do not need the default + * values and instead initialize the structure to 0 using + * e.g. memset(). This can also be used for merging two + * messages, i.e. combine already existing data with new + * values. + * + * PB_DECODE_DELIMITED: Input message starts with the message size as varint. + * Corresponds to parseDelimitedFrom() in Google's + * protobuf API. + * + * PB_DECODE_NULLTERMINATED: Stop reading when field tag is read as 0. This allows + * reading null terminated messages. + * NOTE: Until nanopb-0.4.0, pb_decode() also allows + * null-termination. This behaviour is not supported in + * most other protobuf implementations, so PB_DECODE_DELIMITED + * is a better option for compatibility. + * + * Multiple flags can be combined with bitwise or (| operator) + */ +#define PB_DECODE_NOINIT 0x01U +#define PB_DECODE_DELIMITED 0x02U +#define PB_DECODE_NULLTERMINATED 0x04U +bool pb_decode_ex(pb_istream_t *stream, const pb_msgdesc_t *fields, void *dest_struct, unsigned int flags); + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define pb_decode_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NOINIT) +#define pb_decode_delimited(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED) +#define pb_decode_delimited_noinit(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_DELIMITED | PB_DECODE_NOINIT) +#define pb_decode_nullterminated(s,f,d) pb_decode_ex(s,f,d, PB_DECODE_NULLTERMINATED) + +/* Release any allocated pointer fields. If you use dynamic allocation, you should + * call this for any successfully decoded message when you are done with it. If + * pb_decode() returns with an error, the message is already released. + */ +void pb_release(const pb_msgdesc_t *fields, void *dest_struct); + +/************************************** + * Functions for manipulating streams * + **************************************/ + +/* Create an input stream for reading from a memory buffer. + * + * msglen should be the actual length of the message, not the full size of + * allocated buffer. + * + * Alternatively, you can use a custom stream that reads directly from e.g. + * a file or a network socket. + */ +pb_istream_t pb_istream_from_buffer(const pb_byte_t *buf, size_t msglen); + +/* Function to read from a pb_istream_t. You can use this if you need to + * read some custom header data, or to read data in field callbacks. + */ +bool pb_read(pb_istream_t *stream, pb_byte_t *buf, size_t count); + + +/************************************************ + * Helper functions for writing field callbacks * + ************************************************/ + +/* Decode the tag for the next field in the stream. Gives the wire type and + * field tag. At end of the message, returns false and sets eof to true. */ +bool pb_decode_tag(pb_istream_t *stream, pb_wire_type_t *wire_type, uint32_t *tag, bool *eof); + +/* Skip the field payload data, given the wire type. */ +bool pb_skip_field(pb_istream_t *stream, pb_wire_type_t wire_type); + +/* Decode an integer in the varint format. This works for enum, int32, + * int64, uint32 and uint64 field types. */ +#ifndef PB_WITHOUT_64BIT +bool pb_decode_varint(pb_istream_t *stream, uint64_t *dest); +#else +#define pb_decode_varint pb_decode_varint32 +#endif + +/* Decode an integer in the varint format. This works for enum, int32, + * and uint32 field types. */ +bool pb_decode_varint32(pb_istream_t *stream, uint32_t *dest); + +/* Decode a bool value in varint format. */ +bool pb_decode_bool(pb_istream_t *stream, bool *dest); + +/* Decode an integer in the zig-zagged svarint format. This works for sint32 + * and sint64. */ +#ifndef PB_WITHOUT_64BIT +bool pb_decode_svarint(pb_istream_t *stream, int64_t *dest); +#else +bool pb_decode_svarint(pb_istream_t *stream, int32_t *dest); +#endif + +/* Decode a fixed32, sfixed32 or float value. You need to pass a pointer to + * a 4-byte wide C variable. */ +bool pb_decode_fixed32(pb_istream_t *stream, void *dest); + +#ifndef PB_WITHOUT_64BIT +/* Decode a fixed64, sfixed64 or double value. You need to pass a pointer to + * a 8-byte wide C variable. */ +bool pb_decode_fixed64(pb_istream_t *stream, void *dest); +#endif + +#ifdef PB_CONVERT_DOUBLE_FLOAT +/* Decode a double value into float variable. */ +bool pb_decode_double_as_float(pb_istream_t *stream, float *dest); +#endif + +/* Make a limited-length substream for reading a PB_WT_STRING field. */ +bool pb_make_string_substream(pb_istream_t *stream, pb_istream_t *substream); +bool pb_close_string_substream(pb_istream_t *stream, pb_istream_t *substream); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/nanopb/pb_encode.c b/src/nanopb/pb_encode.c new file mode 100644 index 0000000000..d85e03185a --- /dev/null +++ b/src/nanopb/pb_encode.c @@ -0,0 +1,1000 @@ +/* pb_encode.c -- encode a protobuf using minimal resources + * + * 2011 Petteri Aimonen + */ + +#include "nanopb/pb.h" +#include "nanopb/pb_encode.h" +#include "nanopb/pb_common.h" + +/* Use the GCC warn_unused_result attribute to check that all return values + * are propagated correctly. On other compilers and gcc before 3.4.0 just + * ignore the annotation. + */ +#if !defined(__GNUC__) || ( __GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 4) + #define checkreturn +#else + #define checkreturn __attribute__((warn_unused_result)) +#endif + +/************************************** + * Declarations internal to this file * + **************************************/ +static bool checkreturn buf_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count); +static bool checkreturn encode_array(pb_ostream_t *stream, pb_field_iter_t *field); +static bool checkreturn pb_check_proto3_default_value(const pb_field_iter_t *field); +static bool checkreturn encode_basic_field(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn encode_callback_field(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn encode_field(pb_ostream_t *stream, pb_field_iter_t *field); +static bool checkreturn encode_extension_field(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn default_extension_encoder(pb_ostream_t *stream, const pb_extension_t *extension); +static bool checkreturn pb_encode_varint_32(pb_ostream_t *stream, uint32_t low, uint32_t high); +static bool checkreturn pb_enc_bool(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_varint(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_fixed(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_bytes(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_string(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_submessage(pb_ostream_t *stream, const pb_field_iter_t *field); +static bool checkreturn pb_enc_fixed_length_bytes(pb_ostream_t *stream, const pb_field_iter_t *field); + +#ifdef PB_WITHOUT_64BIT +#define pb_int64_t int32_t +#define pb_uint64_t uint32_t +#else +#define pb_int64_t int64_t +#define pb_uint64_t uint64_t +#endif + +/******************************* + * pb_ostream_t implementation * + *******************************/ + +static bool checkreturn buf_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count) +{ + pb_byte_t *dest = (pb_byte_t*)stream->state; + stream->state = dest + count; + + memcpy(dest, buf, count * sizeof(pb_byte_t)); + + return true; +} + +pb_ostream_t pb_ostream_from_buffer(pb_byte_t *buf, size_t bufsize) +{ + pb_ostream_t stream; +#ifdef PB_BUFFER_ONLY + /* In PB_BUFFER_ONLY configuration the callback pointer is just int*. + * NULL pointer marks a sizing field, so put a non-NULL value to mark a buffer stream. + */ + static const int marker = 0; + stream.callback = ▮ +#else + stream.callback = &buf_write; +#endif + stream.state = buf; + stream.max_size = bufsize; + stream.bytes_written = 0; +#ifndef PB_NO_ERRMSG + stream.errmsg = NULL; +#endif + return stream; +} + +bool checkreturn pb_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count) +{ + if (count > 0 && stream->callback != NULL) + { + if (stream->bytes_written + count < stream->bytes_written || + stream->bytes_written + count > stream->max_size) + { + PB_RETURN_ERROR(stream, "stream full"); + } + +#ifdef PB_BUFFER_ONLY + if (!buf_write(stream, buf, count)) + PB_RETURN_ERROR(stream, "io error"); +#else + if (!stream->callback(stream, buf, count)) + PB_RETURN_ERROR(stream, "io error"); +#endif + } + + stream->bytes_written += count; + return true; +} + +/************************* + * Encode a single field * + *************************/ + +/* Read a bool value without causing undefined behavior even if the value + * is invalid. See issue #434 and + * https://stackoverflow.com/questions/27661768/weird-results-for-conditional + */ +static bool safe_read_bool(const void *pSize) +{ + const char *p = (const char *)pSize; + size_t i; + for (i = 0; i < sizeof(bool); i++) + { + if (p[i] != 0) + return true; + } + return false; +} + +/* Encode a static array. Handles the size calculations and possible packing. */ +static bool checkreturn encode_array(pb_ostream_t *stream, pb_field_iter_t *field) +{ + pb_size_t i; + pb_size_t count; +#ifndef PB_ENCODE_ARRAYS_UNPACKED + size_t size; +#endif + + count = *(pb_size_t*)field->pSize; + + if (count == 0) + return true; + + if (PB_ATYPE(field->type) != PB_ATYPE_POINTER && count > field->array_size) + PB_RETURN_ERROR(stream, "array max size exceeded"); + +#ifndef PB_ENCODE_ARRAYS_UNPACKED + /* We always pack arrays if the datatype allows it. */ + if (PB_LTYPE(field->type) <= PB_LTYPE_LAST_PACKABLE) + { + if (!pb_encode_tag(stream, PB_WT_STRING, field->tag)) + return false; + + /* Determine the total size of packed array. */ + if (PB_LTYPE(field->type) == PB_LTYPE_FIXED32) + { + size = 4 * (size_t)count; + } + else if (PB_LTYPE(field->type) == PB_LTYPE_FIXED64) + { + size = 8 * (size_t)count; + } + else + { + pb_ostream_t sizestream = PB_OSTREAM_SIZING; + void *pData_orig = field->pData; + for (i = 0; i < count; i++) + { + if (!pb_enc_varint(&sizestream, field)) + PB_RETURN_ERROR(stream, PB_GET_ERROR(&sizestream)); + field->pData = (char*)field->pData + field->data_size; + } + field->pData = pData_orig; + size = sizestream.bytes_written; + } + + if (!pb_encode_varint(stream, (pb_uint64_t)size)) + return false; + + if (stream->callback == NULL) + return pb_write(stream, NULL, size); /* Just sizing.. */ + + /* Write the data */ + for (i = 0; i < count; i++) + { + if (PB_LTYPE(field->type) == PB_LTYPE_FIXED32 || PB_LTYPE(field->type) == PB_LTYPE_FIXED64) + { + if (!pb_enc_fixed(stream, field)) + return false; + } + else + { + if (!pb_enc_varint(stream, field)) + return false; + } + + field->pData = (char*)field->pData + field->data_size; + } + } + else /* Unpacked fields */ +#endif + { + for (i = 0; i < count; i++) + { + /* Normally the data is stored directly in the array entries, but + * for pointer-type string and bytes fields, the array entries are + * actually pointers themselves also. So we have to dereference once + * more to get to the actual data. */ + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER && + (PB_LTYPE(field->type) == PB_LTYPE_STRING || + PB_LTYPE(field->type) == PB_LTYPE_BYTES)) + { + bool status; + void *pData_orig = field->pData; + field->pData = *(void* const*)field->pData; + + if (!field->pData) + { + /* Null pointer in array is treated as empty string / bytes */ + status = pb_encode_tag_for_field(stream, field) && + pb_encode_varint(stream, 0); + } + else + { + status = encode_basic_field(stream, field); + } + + field->pData = pData_orig; + + if (!status) + return false; + } + else + { + if (!encode_basic_field(stream, field)) + return false; + } + field->pData = (char*)field->pData + field->data_size; + } + } + + return true; +} + +/* In proto3, all fields are optional and are only encoded if their value is "non-zero". + * This function implements the check for the zero value. */ +static bool checkreturn pb_check_proto3_default_value(const pb_field_iter_t *field) +{ + pb_type_t type = field->type; + + if (PB_ATYPE(type) == PB_ATYPE_STATIC) + { + if (PB_HTYPE(type) == PB_HTYPE_REQUIRED) + { + /* Required proto2 fields inside proto3 submessage, pretty rare case */ + return false; + } + else if (PB_HTYPE(type) == PB_HTYPE_REPEATED) + { + /* Repeated fields inside proto3 submessage: present if count != 0 */ + return *(const pb_size_t*)field->pSize == 0; + } + else if (PB_HTYPE(type) == PB_HTYPE_ONEOF) + { + /* Oneof fields */ + return *(const pb_size_t*)field->pSize == 0; + } + else if (PB_HTYPE(type) == PB_HTYPE_OPTIONAL && field->pSize != NULL) + { + /* Proto2 optional fields inside proto3 message, or proto3 + * submessage fields. */ + return safe_read_bool(field->pSize) == false; + } + else if (field->descriptor->default_value) + { + /* Proto3 messages do not have default values, but proto2 messages + * can contain optional fields without has_fields (generator option 'proto3'). + * In this case they must always be encoded, to make sure that the + * non-zero default value is overwritten. + */ + return false; + } + + /* Rest is proto3 singular fields */ + if (PB_LTYPE(type) <= PB_LTYPE_LAST_PACKABLE) + { + /* Simple integer / float fields */ + pb_size_t i; + const char *p = (const char*)field->pData; + for (i = 0; i < field->data_size; i++) + { + if (p[i] != 0) + { + return false; + } + } + + return true; + } + else if (PB_LTYPE(type) == PB_LTYPE_BYTES) + { + const pb_bytes_array_t *bytes = (const pb_bytes_array_t*)field->pData; + return bytes->size == 0; + } + else if (PB_LTYPE(type) == PB_LTYPE_STRING) + { + return *(const char*)field->pData == '\0'; + } + else if (PB_LTYPE(type) == PB_LTYPE_FIXED_LENGTH_BYTES) + { + /* Fixed length bytes is only empty if its length is fixed + * as 0. Which would be pretty strange, but we can check + * it anyway. */ + return field->data_size == 0; + } + else if (PB_LTYPE_IS_SUBMSG(type)) + { + /* Check all fields in the submessage to find if any of them + * are non-zero. The comparison cannot be done byte-per-byte + * because the C struct may contain padding bytes that must + * be skipped. Note that usually proto3 submessages have + * a separate has_field that is checked earlier in this if. + */ + pb_field_iter_t iter; + if (pb_field_iter_begin(&iter, field->submsg_desc, field->pData)) + { + do + { + if (!pb_check_proto3_default_value(&iter)) + { + return false; + } + } while (pb_field_iter_next(&iter)); + } + return true; + } + } + else if (PB_ATYPE(type) == PB_ATYPE_POINTER) + { + return field->pData == NULL; + } + else if (PB_ATYPE(type) == PB_ATYPE_CALLBACK) + { + if (PB_LTYPE(type) == PB_LTYPE_EXTENSION) + { + const pb_extension_t *extension = *(const pb_extension_t* const *)field->pData; + return extension == NULL; + } + else if (field->descriptor->field_callback == pb_default_field_callback) + { + pb_callback_t *pCallback = (pb_callback_t*)field->pData; + return pCallback->funcs.encode == NULL; + } + else + { + return field->descriptor->field_callback == NULL; + } + } + + return false; /* Not typically reached, safe default for weird special cases. */ +} + +/* Encode a field with static or pointer allocation, i.e. one whose data + * is available to the encoder directly. */ +static bool checkreturn encode_basic_field(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + if (!field->pData) + { + /* Missing pointer field */ + return true; + } + + if (!pb_encode_tag_for_field(stream, field)) + return false; + + switch (PB_LTYPE(field->type)) + { + case PB_LTYPE_BOOL: + return pb_enc_bool(stream, field); + + case PB_LTYPE_VARINT: + case PB_LTYPE_UVARINT: + case PB_LTYPE_SVARINT: + return pb_enc_varint(stream, field); + + case PB_LTYPE_FIXED32: + case PB_LTYPE_FIXED64: + return pb_enc_fixed(stream, field); + + case PB_LTYPE_BYTES: + return pb_enc_bytes(stream, field); + + case PB_LTYPE_STRING: + return pb_enc_string(stream, field); + + case PB_LTYPE_SUBMESSAGE: + case PB_LTYPE_SUBMSG_W_CB: + return pb_enc_submessage(stream, field); + + case PB_LTYPE_FIXED_LENGTH_BYTES: + return pb_enc_fixed_length_bytes(stream, field); + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } +} + +/* Encode a field with callback semantics. This means that a user function is + * called to provide and encode the actual data. */ +static bool checkreturn encode_callback_field(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + if (field->descriptor->field_callback != NULL) + { + if (!field->descriptor->field_callback(NULL, stream, field)) + PB_RETURN_ERROR(stream, "callback error"); + } + return true; +} + +/* Encode a single field of any callback, pointer or static type. */ +static bool checkreturn encode_field(pb_ostream_t *stream, pb_field_iter_t *field) +{ + /* Check field presence */ + if (PB_HTYPE(field->type) == PB_HTYPE_ONEOF) + { + if (*(const pb_size_t*)field->pSize != field->tag) + { + /* Different type oneof field */ + return true; + } + } + else if (PB_HTYPE(field->type) == PB_HTYPE_OPTIONAL) + { + if (field->pSize) + { + if (safe_read_bool(field->pSize) == false) + { + /* Missing optional field */ + return true; + } + } + else if (PB_ATYPE(field->type) == PB_ATYPE_STATIC) + { + /* Proto3 singular field */ + if (pb_check_proto3_default_value(field)) + return true; + } + } + + if (!field->pData) + { + if (PB_HTYPE(field->type) == PB_HTYPE_REQUIRED) + PB_RETURN_ERROR(stream, "missing required field"); + + /* Pointer field set to NULL */ + return true; + } + + /* Then encode field contents */ + if (PB_ATYPE(field->type) == PB_ATYPE_CALLBACK) + { + return encode_callback_field(stream, field); + } + else if (PB_HTYPE(field->type) == PB_HTYPE_REPEATED) + { + return encode_array(stream, field); + } + else + { + return encode_basic_field(stream, field); + } +} + +/* Default handler for extension fields. Expects to have a pb_msgdesc_t + * pointer in the extension->type->arg field, pointing to a message with + * only one field in it. */ +static bool checkreturn default_extension_encoder(pb_ostream_t *stream, const pb_extension_t *extension) +{ + pb_field_iter_t iter; + + if (!pb_field_iter_begin_extension_const(&iter, extension)) + PB_RETURN_ERROR(stream, "invalid extension"); + + return encode_field(stream, &iter); +} + + +/* Walk through all the registered extensions and give them a chance + * to encode themselves. */ +static bool checkreturn encode_extension_field(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + const pb_extension_t *extension = *(const pb_extension_t* const *)field->pData; + + while (extension) + { + bool status; + if (extension->type->encode) + status = extension->type->encode(stream, extension); + else + status = default_extension_encoder(stream, extension); + + if (!status) + return false; + + extension = extension->next; + } + + return true; +} + +/********************* + * Encode all fields * + *********************/ + +bool checkreturn pb_encode(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct) +{ + pb_field_iter_t iter; + if (!pb_field_iter_begin_const(&iter, fields, src_struct)) + return true; /* Empty message type */ + + do { + if (PB_LTYPE(iter.type) == PB_LTYPE_EXTENSION) + { + /* Special case for the extension field placeholder */ + if (!encode_extension_field(stream, &iter)) + return false; + } + else + { + /* Regular field */ + if (!encode_field(stream, &iter)) + return false; + } + } while (pb_field_iter_next(&iter)); + + return true; +} + +bool checkreturn pb_encode_ex(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct, unsigned int flags) +{ + if ((flags & PB_ENCODE_DELIMITED) != 0) + { + return pb_encode_submessage(stream, fields, src_struct); + } + else if ((flags & PB_ENCODE_NULLTERMINATED) != 0) + { + const pb_byte_t zero = 0; + + if (!pb_encode(stream, fields, src_struct)) + return false; + + return pb_write(stream, &zero, 1); + } + else + { + return pb_encode(stream, fields, src_struct); + } +} + +bool pb_get_encoded_size(size_t *size, const pb_msgdesc_t *fields, const void *src_struct) +{ + pb_ostream_t stream = PB_OSTREAM_SIZING; + + if (!pb_encode(&stream, fields, src_struct)) + return false; + + *size = stream.bytes_written; + return true; +} + +/******************** + * Helper functions * + ********************/ + +/* This function avoids 64-bit shifts as they are quite slow on many platforms. */ +static bool checkreturn pb_encode_varint_32(pb_ostream_t *stream, uint32_t low, uint32_t high) +{ + size_t i = 0; + pb_byte_t buffer[10]; + pb_byte_t byte = (pb_byte_t)(low & 0x7F); + low >>= 7; + + while (i < 4 && (low != 0 || high != 0)) + { + byte |= 0x80; + buffer[i++] = byte; + byte = (pb_byte_t)(low & 0x7F); + low >>= 7; + } + + if (high) + { + byte = (pb_byte_t)(byte | ((high & 0x07) << 4)); + high >>= 3; + + while (high) + { + byte |= 0x80; + buffer[i++] = byte; + byte = (pb_byte_t)(high & 0x7F); + high >>= 7; + } + } + + buffer[i++] = byte; + + return pb_write(stream, buffer, i); +} + +bool checkreturn pb_encode_varint(pb_ostream_t *stream, pb_uint64_t value) +{ + if (value <= 0x7F) + { + /* Fast path: single byte */ + pb_byte_t byte = (pb_byte_t)value; + return pb_write(stream, &byte, 1); + } + else + { +#ifdef PB_WITHOUT_64BIT + return pb_encode_varint_32(stream, value, 0); +#else + return pb_encode_varint_32(stream, (uint32_t)value, (uint32_t)(value >> 32)); +#endif + } +} + +bool checkreturn pb_encode_svarint(pb_ostream_t *stream, pb_int64_t value) +{ + pb_uint64_t zigzagged; + pb_uint64_t mask = ((pb_uint64_t)-1) >> 1; /* Satisfy clang -fsanitize=integer */ + if (value < 0) + zigzagged = ~(((pb_uint64_t)value & mask) << 1); + else + zigzagged = (pb_uint64_t)value << 1; + + return pb_encode_varint(stream, zigzagged); +} + +bool checkreturn pb_encode_fixed32(pb_ostream_t *stream, const void *value) +{ +#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1 + /* Fast path if we know that we're on little endian */ + return pb_write(stream, (const pb_byte_t*)value, 4); +#else + uint32_t val = *(const uint32_t*)value; + pb_byte_t bytes[4]; + bytes[0] = (pb_byte_t)(val & 0xFF); + bytes[1] = (pb_byte_t)((val >> 8) & 0xFF); + bytes[2] = (pb_byte_t)((val >> 16) & 0xFF); + bytes[3] = (pb_byte_t)((val >> 24) & 0xFF); + return pb_write(stream, bytes, 4); +#endif +} + +#ifndef PB_WITHOUT_64BIT +bool checkreturn pb_encode_fixed64(pb_ostream_t *stream, const void *value) +{ +#if defined(PB_LITTLE_ENDIAN_8BIT) && PB_LITTLE_ENDIAN_8BIT == 1 + /* Fast path if we know that we're on little endian */ + return pb_write(stream, (const pb_byte_t*)value, 8); +#else + uint64_t val = *(const uint64_t*)value; + pb_byte_t bytes[8]; + bytes[0] = (pb_byte_t)(val & 0xFF); + bytes[1] = (pb_byte_t)((val >> 8) & 0xFF); + bytes[2] = (pb_byte_t)((val >> 16) & 0xFF); + bytes[3] = (pb_byte_t)((val >> 24) & 0xFF); + bytes[4] = (pb_byte_t)((val >> 32) & 0xFF); + bytes[5] = (pb_byte_t)((val >> 40) & 0xFF); + bytes[6] = (pb_byte_t)((val >> 48) & 0xFF); + bytes[7] = (pb_byte_t)((val >> 56) & 0xFF); + return pb_write(stream, bytes, 8); +#endif +} +#endif + +bool checkreturn pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number) +{ + pb_uint64_t tag = ((pb_uint64_t)field_number << 3) | wiretype; + return pb_encode_varint(stream, tag); +} + +bool pb_encode_tag_for_field ( pb_ostream_t* stream, const pb_field_iter_t* field ) +{ + pb_wire_type_t wiretype; + switch (PB_LTYPE(field->type)) + { + case PB_LTYPE_BOOL: + case PB_LTYPE_VARINT: + case PB_LTYPE_UVARINT: + case PB_LTYPE_SVARINT: + wiretype = PB_WT_VARINT; + break; + + case PB_LTYPE_FIXED32: + wiretype = PB_WT_32BIT; + break; + + case PB_LTYPE_FIXED64: + wiretype = PB_WT_64BIT; + break; + + case PB_LTYPE_BYTES: + case PB_LTYPE_STRING: + case PB_LTYPE_SUBMESSAGE: + case PB_LTYPE_SUBMSG_W_CB: + case PB_LTYPE_FIXED_LENGTH_BYTES: + wiretype = PB_WT_STRING; + break; + + default: + PB_RETURN_ERROR(stream, "invalid field type"); + } + + return pb_encode_tag(stream, wiretype, field->tag); +} + +bool checkreturn pb_encode_string(pb_ostream_t *stream, const pb_byte_t *buffer, size_t size) +{ + if (!pb_encode_varint(stream, (pb_uint64_t)size)) + return false; + + return pb_write(stream, buffer, size); +} + +bool checkreturn pb_encode_submessage(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct) +{ + /* First calculate the message size using a non-writing substream. */ + pb_ostream_t substream = PB_OSTREAM_SIZING; + size_t size; + bool status; + + if (!pb_encode(&substream, fields, src_struct)) + { +#ifndef PB_NO_ERRMSG + stream->errmsg = substream.errmsg; +#endif + return false; + } + + size = substream.bytes_written; + + if (!pb_encode_varint(stream, (pb_uint64_t)size)) + return false; + + if (stream->callback == NULL) + return pb_write(stream, NULL, size); /* Just sizing */ + + if (stream->bytes_written + size > stream->max_size) + PB_RETURN_ERROR(stream, "stream full"); + + /* Use a substream to verify that a callback doesn't write more than + * what it did the first time. */ + substream.callback = stream->callback; + substream.state = stream->state; + substream.max_size = size; + substream.bytes_written = 0; +#ifndef PB_NO_ERRMSG + substream.errmsg = NULL; +#endif + + status = pb_encode(&substream, fields, src_struct); + + stream->bytes_written += substream.bytes_written; + stream->state = substream.state; +#ifndef PB_NO_ERRMSG + stream->errmsg = substream.errmsg; +#endif + + if (substream.bytes_written != size) + PB_RETURN_ERROR(stream, "submsg size changed"); + + return status; +} + +/* Field encoders */ + +static bool checkreturn pb_enc_bool(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + uint32_t value = safe_read_bool(field->pData) ? 1 : 0; + PB_UNUSED(field); + return pb_encode_varint(stream, value); +} + +static bool checkreturn pb_enc_varint(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + if (PB_LTYPE(field->type) == PB_LTYPE_UVARINT) + { + /* Perform unsigned integer extension */ + pb_uint64_t value = 0; + + if (field->data_size == sizeof(uint_least8_t)) + value = *(const uint_least8_t*)field->pData; + else if (field->data_size == sizeof(uint_least16_t)) + value = *(const uint_least16_t*)field->pData; + else if (field->data_size == sizeof(uint32_t)) + value = *(const uint32_t*)field->pData; + else if (field->data_size == sizeof(pb_uint64_t)) + value = *(const pb_uint64_t*)field->pData; + else + PB_RETURN_ERROR(stream, "invalid data_size"); + + return pb_encode_varint(stream, value); + } + else + { + /* Perform signed integer extension */ + pb_int64_t value = 0; + + if (field->data_size == sizeof(int_least8_t)) + value = *(const int_least8_t*)field->pData; + else if (field->data_size == sizeof(int_least16_t)) + value = *(const int_least16_t*)field->pData; + else if (field->data_size == sizeof(int32_t)) + value = *(const int32_t*)field->pData; + else if (field->data_size == sizeof(pb_int64_t)) + value = *(const pb_int64_t*)field->pData; + else + PB_RETURN_ERROR(stream, "invalid data_size"); + + if (PB_LTYPE(field->type) == PB_LTYPE_SVARINT) + return pb_encode_svarint(stream, value); +#ifdef PB_WITHOUT_64BIT + else if (value < 0) + return pb_encode_varint_32(stream, (uint32_t)value, (uint32_t)-1); +#endif + else + return pb_encode_varint(stream, (pb_uint64_t)value); + + } +} + +static bool checkreturn pb_enc_fixed(pb_ostream_t *stream, const pb_field_iter_t *field) +{ +#ifdef PB_CONVERT_DOUBLE_FLOAT + if (field->data_size == sizeof(float) && PB_LTYPE(field->type) == PB_LTYPE_FIXED64) + { + return pb_encode_float_as_double(stream, *(float*)field->pData); + } +#endif + + if (field->data_size == sizeof(uint32_t)) + { + return pb_encode_fixed32(stream, field->pData); + } +#ifndef PB_WITHOUT_64BIT + else if (field->data_size == sizeof(uint64_t)) + { + return pb_encode_fixed64(stream, field->pData); + } +#endif + else + { + PB_RETURN_ERROR(stream, "invalid data_size"); + } +} + +static bool checkreturn pb_enc_bytes(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + const pb_bytes_array_t *bytes = NULL; + + bytes = (const pb_bytes_array_t*)field->pData; + + if (bytes == NULL) + { + /* Treat null pointer as an empty bytes field */ + return pb_encode_string(stream, NULL, 0); + } + + if (PB_ATYPE(field->type) == PB_ATYPE_STATIC && + bytes->size > field->data_size - offsetof(pb_bytes_array_t, bytes)) + { + PB_RETURN_ERROR(stream, "bytes size exceeded"); + } + + return pb_encode_string(stream, bytes->bytes, (size_t)bytes->size); +} + +static bool checkreturn pb_enc_string(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + size_t size = 0; + size_t max_size = (size_t)field->data_size; + const char *str = (const char*)field->pData; + + if (PB_ATYPE(field->type) == PB_ATYPE_POINTER) + { + max_size = (size_t)-1; + } + else + { + /* pb_dec_string() assumes string fields end with a null + * terminator when the type isn't PB_ATYPE_POINTER, so we + * shouldn't allow more than max-1 bytes to be written to + * allow space for the null terminator. + */ + if (max_size == 0) + PB_RETURN_ERROR(stream, "zero-length string"); + + max_size -= 1; + } + + + if (str == NULL) + { + size = 0; /* Treat null pointer as an empty string */ + } + else + { + const char *p = str; + + /* strnlen() is not always available, so just use a loop */ + while (size < max_size && *p != '\0') + { + size++; + p++; + } + + if (*p != '\0') + { + PB_RETURN_ERROR(stream, "unterminated string"); + } + } + +#ifdef PB_VALIDATE_UTF8 + if (!pb_validate_utf8(str)) + PB_RETURN_ERROR(stream, "invalid utf8"); +#endif + + return pb_encode_string(stream, (const pb_byte_t*)str, size); +} + +static bool checkreturn pb_enc_submessage(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + if (field->submsg_desc == NULL) + PB_RETURN_ERROR(stream, "invalid field descriptor"); + + if (PB_LTYPE(field->type) == PB_LTYPE_SUBMSG_W_CB && field->pSize != NULL) + { + /* Message callback is stored right before pSize. */ + pb_callback_t *callback = (pb_callback_t*)field->pSize - 1; + if (callback->funcs.encode) + { + if (!callback->funcs.encode(stream, field, &callback->arg)) + return false; + } + } + + return pb_encode_submessage(stream, field->submsg_desc, field->pData); +} + +static bool checkreturn pb_enc_fixed_length_bytes(pb_ostream_t *stream, const pb_field_iter_t *field) +{ + return pb_encode_string(stream, (const pb_byte_t*)field->pData, (size_t)field->data_size); +} + +#ifdef PB_CONVERT_DOUBLE_FLOAT +bool pb_encode_float_as_double(pb_ostream_t *stream, float value) +{ + union { float f; uint32_t i; } in; + uint_least8_t sign; + int exponent; + uint64_t mantissa; + + in.f = value; + + /* Decompose input value */ + sign = (uint_least8_t)((in.i >> 31) & 1); + exponent = (int)((in.i >> 23) & 0xFF) - 127; + mantissa = in.i & 0x7FFFFF; + + if (exponent == 128) + { + /* Special value (NaN etc.) */ + exponent = 1024; + } + else if (exponent == -127) + { + if (!mantissa) + { + /* Zero */ + exponent = -1023; + } + else + { + /* Denormalized */ + mantissa <<= 1; + while (!(mantissa & 0x800000)) + { + mantissa <<= 1; + exponent--; + } + mantissa &= 0x7FFFFF; + } + } + + /* Combine fields */ + mantissa <<= 29; + mantissa |= (uint64_t)(exponent + 1023) << 52; + mantissa |= (uint64_t)sign << 63; + + return pb_encode_fixed64(stream, &mantissa); +} +#endif diff --git a/src/nanopb/pb_encode.h b/src/nanopb/pb_encode.h new file mode 100644 index 0000000000..f3805e711d --- /dev/null +++ b/src/nanopb/pb_encode.h @@ -0,0 +1,185 @@ +/* pb_encode.h: Functions to encode protocol buffers. Depends on pb_encode.c. + * The main function is pb_encode. You also need an output stream, and the + * field descriptions created by nanopb_generator.py. + */ + +#ifndef PB_ENCODE_H_INCLUDED +#define PB_ENCODE_H_INCLUDED + +#include "nanopb/pb.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Structure for defining custom output streams. You will need to provide + * a callback function to write the bytes to your storage, which can be + * for example a file or a network socket. + * + * The callback must conform to these rules: + * + * 1) Return false on IO errors. This will cause encoding to abort. + * 2) You can use state to store your own data (e.g. buffer pointer). + * 3) pb_write will update bytes_written after your callback runs. + * 4) Substreams will modify max_size and bytes_written. Don't use them + * to calculate any pointers. + */ +struct pb_ostream_s +{ +#ifdef PB_BUFFER_ONLY + /* Callback pointer is not used in buffer-only configuration. + * Having an int pointer here allows binary compatibility but + * gives an error if someone tries to assign callback function. + * Also, NULL pointer marks a 'sizing stream' that does not + * write anything. + */ + const int *callback; +#else + bool (*callback)(pb_ostream_t *stream, const pb_byte_t *buf, size_t count); +#endif + void *state; /* Free field for use by callback implementation. */ + size_t max_size; /* Limit number of output bytes written (or use SIZE_MAX). */ + size_t bytes_written; /* Number of bytes written so far. */ + +#ifndef PB_NO_ERRMSG + const char *errmsg; +#endif +}; + +/*************************** + * Main encoding functions * + ***************************/ + +/* Encode a single protocol buffers message from C structure into a stream. + * Returns true on success, false on any failure. + * The actual struct pointed to by src_struct must match the description in fields. + * All required fields in the struct are assumed to have been filled in. + * + * Example usage: + * MyMessage msg = {}; + * uint8_t buffer[64]; + * pb_ostream_t stream; + * + * msg.field1 = 42; + * stream = pb_ostream_from_buffer(buffer, sizeof(buffer)); + * pb_encode(&stream, MyMessage_fields, &msg); + */ +bool pb_encode(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct); + +/* Extended version of pb_encode, with several options to control the + * encoding process: + * + * PB_ENCODE_DELIMITED: Prepend the length of message as a varint. + * Corresponds to writeDelimitedTo() in Google's + * protobuf API. + * + * PB_ENCODE_NULLTERMINATED: Append a null byte to the message for termination. + * NOTE: This behaviour is not supported in most other + * protobuf implementations, so PB_ENCODE_DELIMITED + * is a better option for compatibility. + */ +#define PB_ENCODE_DELIMITED 0x02U +#define PB_ENCODE_NULLTERMINATED 0x04U +bool pb_encode_ex(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct, unsigned int flags); + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define pb_encode_delimited(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_DELIMITED) +#define pb_encode_nullterminated(s,f,d) pb_encode_ex(s,f,d, PB_ENCODE_NULLTERMINATED) + +/* Encode the message to get the size of the encoded data, but do not store + * the data. */ +bool pb_get_encoded_size(size_t *size, const pb_msgdesc_t *fields, const void *src_struct); + +/************************************** + * Functions for manipulating streams * + **************************************/ + +/* Create an output stream for writing into a memory buffer. + * The number of bytes written can be found in stream.bytes_written after + * encoding the message. + * + * Alternatively, you can use a custom stream that writes directly to e.g. + * a file or a network socket. + */ +pb_ostream_t pb_ostream_from_buffer(pb_byte_t *buf, size_t bufsize); + +/* Pseudo-stream for measuring the size of a message without actually storing + * the encoded data. + * + * Example usage: + * MyMessage msg = {}; + * pb_ostream_t stream = PB_OSTREAM_SIZING; + * pb_encode(&stream, MyMessage_fields, &msg); + * printf("Message size is %d\n", stream.bytes_written); + */ +#ifndef PB_NO_ERRMSG +#define PB_OSTREAM_SIZING {0,0,0,0,0} +#else +#define PB_OSTREAM_SIZING {0,0,0,0} +#endif + +/* Function to write into a pb_ostream_t stream. You can use this if you need + * to append or prepend some custom headers to the message. + */ +bool pb_write(pb_ostream_t *stream, const pb_byte_t *buf, size_t count); + + +/************************************************ + * Helper functions for writing field callbacks * + ************************************************/ + +/* Encode field header based on type and field number defined in the field + * structure. Call this from the callback before writing out field contents. */ +bool pb_encode_tag_for_field(pb_ostream_t *stream, const pb_field_iter_t *field); + +/* Encode field header by manually specifying wire type. You need to use this + * if you want to write out packed arrays from a callback field. */ +bool pb_encode_tag(pb_ostream_t *stream, pb_wire_type_t wiretype, uint32_t field_number); + +/* Encode an integer in the varint format. + * This works for bool, enum, int32, int64, uint32 and uint64 field types. */ +#ifndef PB_WITHOUT_64BIT +bool pb_encode_varint(pb_ostream_t *stream, uint64_t value); +#else +bool pb_encode_varint(pb_ostream_t *stream, uint32_t value); +#endif + +/* Encode an integer in the zig-zagged svarint format. + * This works for sint32 and sint64. */ +#ifndef PB_WITHOUT_64BIT +bool pb_encode_svarint(pb_ostream_t *stream, int64_t value); +#else +bool pb_encode_svarint(pb_ostream_t *stream, int32_t value); +#endif + +/* Encode a string or bytes type field. For strings, pass strlen(s) as size. */ +bool pb_encode_string(pb_ostream_t *stream, const pb_byte_t *buffer, size_t size); + +/* Encode a fixed32, sfixed32 or float value. + * You need to pass a pointer to a 4-byte wide C variable. */ +bool pb_encode_fixed32(pb_ostream_t *stream, const void *value); + +#ifndef PB_WITHOUT_64BIT +/* Encode a fixed64, sfixed64 or double value. + * You need to pass a pointer to a 8-byte wide C variable. */ +bool pb_encode_fixed64(pb_ostream_t *stream, const void *value); +#endif + +#ifdef PB_CONVERT_DOUBLE_FLOAT +/* Encode a float value so that it appears like a double in the encoded + * message. */ +bool pb_encode_float_as_double(pb_ostream_t *stream, float value); +#endif + +/* Encode a submessage field. + * You need to pass the pb_field_t array and pointer to struct, just like + * with pb_encode(). This internally encodes the submessage twice, first to + * calculate message size and then to actually write it out. + */ +bool pb_encode_submessage(pb_ostream_t *stream, const pb_msgdesc_t *fields, const void *src_struct); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/opentelemetry/common.pb.c b/src/opentelemetry/common.pb.c new file mode 100644 index 0000000000..e03889b577 --- /dev/null +++ b/src/opentelemetry/common.pb.c @@ -0,0 +1,32 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.4.8-dev */ + +#include "opentelemetry/common.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(opentelemetry_proto_common_v1_AnyValue, opentelemetry_proto_common_v1_AnyValue, AUTO) + + +PB_BIND(opentelemetry_proto_common_v1_ArrayValue, opentelemetry_proto_common_v1_ArrayValue, AUTO) + + +PB_BIND(opentelemetry_proto_common_v1_KeyValueList, opentelemetry_proto_common_v1_KeyValueList, AUTO) + + +PB_BIND(opentelemetry_proto_common_v1_KeyValue, opentelemetry_proto_common_v1_KeyValue, AUTO) + + +PB_BIND(opentelemetry_proto_common_v1_InstrumentationScope, opentelemetry_proto_common_v1_InstrumentationScope, AUTO) + + + +#ifndef PB_CONVERT_DOUBLE_FLOAT +/* On some platforms (such as AVR), double is really float. + * To be able to encode/decode double on these platforms, you need. + * to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line. + */ +PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES) +#endif + diff --git a/src/opentelemetry/common.pb.h b/src/opentelemetry/common.pb.h new file mode 100644 index 0000000000..4a02adda66 --- /dev/null +++ b/src/opentelemetry/common.pb.h @@ -0,0 +1,170 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.4.8-dev */ + +#ifndef PB_OPENTELEMETRY_PROTO_COMMON_V1_OPENTELEMETRY_PROTO_COMMON_V1_COMMON_PB_H_INCLUDED +#define PB_OPENTELEMETRY_PROTO_COMMON_V1_OPENTELEMETRY_PROTO_COMMON_V1_COMMON_PB_H_INCLUDED +#include + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Struct definitions */ +/* ArrayValue is a list of AnyValue messages. We need ArrayValue as a message + since oneof in AnyValue does not allow repeated fields. */ +typedef struct _opentelemetry_proto_common_v1_ArrayValue { + /* Array of values. The array may be empty (contain 0 elements). */ + pb_callback_t values; +} opentelemetry_proto_common_v1_ArrayValue; + +/* KeyValueList is a list of KeyValue messages. We need KeyValueList as a message + since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need + a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to + avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches + are semantically equivalent. */ +typedef struct _opentelemetry_proto_common_v1_KeyValueList { + /* A collection of key/value pairs of key-value pairs. The list may be empty (may + contain 0 elements). + The keys MUST be unique (it is not allowed to have more than one + value with the same key). */ + pb_callback_t values; +} opentelemetry_proto_common_v1_KeyValueList; + +/* AnyValue is used to represent any type of attribute value. AnyValue may contain a + primitive value such as a string or integer or it may contain an arbitrary nested + object containing arrays, key-value lists and primitives. */ +typedef struct _opentelemetry_proto_common_v1_AnyValue { + pb_size_t which_value; + union { + pb_callback_t string_value; + bool bool_value; + int64_t int_value; + double double_value; + opentelemetry_proto_common_v1_ArrayValue array_value; + opentelemetry_proto_common_v1_KeyValueList kvlist_value; + pb_callback_t bytes_value; + } value; +} opentelemetry_proto_common_v1_AnyValue; + +/* KeyValue is a key-value pair that is used to store Span attributes, Link + attributes, etc. */ +typedef struct _opentelemetry_proto_common_v1_KeyValue { + pb_callback_t key; + bool has_value; + opentelemetry_proto_common_v1_AnyValue value; +} opentelemetry_proto_common_v1_KeyValue; + +/* InstrumentationScope is a message representing the instrumentation scope information + such as the fully qualified name and version. */ +typedef struct _opentelemetry_proto_common_v1_InstrumentationScope { + /* An empty instrumentation scope name means the name is unknown. */ + pb_callback_t name; + pb_callback_t version; + /* Additional attributes that describe the scope. [Optional]. + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + uint32_t dropped_attributes_count; +} opentelemetry_proto_common_v1_InstrumentationScope; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Initializer values for message structs */ +#define opentelemetry_proto_common_v1_AnyValue_init_default {0, {{{NULL}, NULL}}} +#define opentelemetry_proto_common_v1_ArrayValue_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_common_v1_KeyValueList_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_common_v1_KeyValue_init_default {{{NULL}, NULL}, false, opentelemetry_proto_common_v1_AnyValue_init_default} +#define opentelemetry_proto_common_v1_InstrumentationScope_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_common_v1_AnyValue_init_zero {0, {{{NULL}, NULL}}} +#define opentelemetry_proto_common_v1_ArrayValue_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_common_v1_KeyValueList_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_common_v1_KeyValue_init_zero {{{NULL}, NULL}, false, opentelemetry_proto_common_v1_AnyValue_init_zero} +#define opentelemetry_proto_common_v1_InstrumentationScope_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0} + +/* Field tags (for use in manual encoding/decoding) */ +#define opentelemetry_proto_common_v1_ArrayValue_values_tag 1 +#define opentelemetry_proto_common_v1_KeyValueList_values_tag 1 +#define opentelemetry_proto_common_v1_AnyValue_string_value_tag 1 +#define opentelemetry_proto_common_v1_AnyValue_bool_value_tag 2 +#define opentelemetry_proto_common_v1_AnyValue_int_value_tag 3 +#define opentelemetry_proto_common_v1_AnyValue_double_value_tag 4 +#define opentelemetry_proto_common_v1_AnyValue_array_value_tag 5 +#define opentelemetry_proto_common_v1_AnyValue_kvlist_value_tag 6 +#define opentelemetry_proto_common_v1_AnyValue_bytes_value_tag 7 +#define opentelemetry_proto_common_v1_KeyValue_key_tag 1 +#define opentelemetry_proto_common_v1_KeyValue_value_tag 2 +#define opentelemetry_proto_common_v1_InstrumentationScope_name_tag 1 +#define opentelemetry_proto_common_v1_InstrumentationScope_version_tag 2 +#define opentelemetry_proto_common_v1_InstrumentationScope_attributes_tag 3 +#define opentelemetry_proto_common_v1_InstrumentationScope_dropped_attributes_count_tag 4 + +/* Struct field encoding specification for nanopb */ +#define opentelemetry_proto_common_v1_AnyValue_FIELDLIST(X, a) \ +X(a, CALLBACK, ONEOF, STRING, (value,string_value,value.string_value), 1) \ +X(a, STATIC, ONEOF, BOOL, (value,bool_value,value.bool_value), 2) \ +X(a, STATIC, ONEOF, INT64, (value,int_value,value.int_value), 3) \ +X(a, STATIC, ONEOF, DOUBLE, (value,double_value,value.double_value), 4) \ +X(a, STATIC, ONEOF, MESSAGE, (value,array_value,value.array_value), 5) \ +X(a, STATIC, ONEOF, MESSAGE, (value,kvlist_value,value.kvlist_value), 6) \ +X(a, CALLBACK, ONEOF, BYTES, (value,bytes_value,value.bytes_value), 7) +#define opentelemetry_proto_common_v1_AnyValue_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_AnyValue_DEFAULT NULL +#define opentelemetry_proto_common_v1_AnyValue_value_array_value_MSGTYPE opentelemetry_proto_common_v1_ArrayValue +#define opentelemetry_proto_common_v1_AnyValue_value_kvlist_value_MSGTYPE opentelemetry_proto_common_v1_KeyValueList + +#define opentelemetry_proto_common_v1_ArrayValue_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, values, 1) +#define opentelemetry_proto_common_v1_ArrayValue_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_ArrayValue_DEFAULT NULL +#define opentelemetry_proto_common_v1_ArrayValue_values_MSGTYPE opentelemetry_proto_common_v1_AnyValue + +#define opentelemetry_proto_common_v1_KeyValueList_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, values, 1) +#define opentelemetry_proto_common_v1_KeyValueList_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_KeyValueList_DEFAULT NULL +#define opentelemetry_proto_common_v1_KeyValueList_values_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +#define opentelemetry_proto_common_v1_KeyValue_FIELDLIST(X, a) \ +X(a, CALLBACK, SINGULAR, STRING, key, 1) \ +X(a, STATIC, OPTIONAL, MESSAGE, value, 2) +#define opentelemetry_proto_common_v1_KeyValue_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_KeyValue_DEFAULT NULL +#define opentelemetry_proto_common_v1_KeyValue_value_MSGTYPE opentelemetry_proto_common_v1_AnyValue + +#define opentelemetry_proto_common_v1_InstrumentationScope_FIELDLIST(X, a) \ +X(a, CALLBACK, SINGULAR, STRING, name, 1) \ +X(a, CALLBACK, SINGULAR, STRING, version, 2) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 3) \ +X(a, STATIC, SINGULAR, UINT32, dropped_attributes_count, 4) +#define opentelemetry_proto_common_v1_InstrumentationScope_CALLBACK pb_default_field_callback +#define opentelemetry_proto_common_v1_InstrumentationScope_DEFAULT NULL +#define opentelemetry_proto_common_v1_InstrumentationScope_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +extern const pb_msgdesc_t opentelemetry_proto_common_v1_AnyValue_msg; +extern const pb_msgdesc_t opentelemetry_proto_common_v1_ArrayValue_msg; +extern const pb_msgdesc_t opentelemetry_proto_common_v1_KeyValueList_msg; +extern const pb_msgdesc_t opentelemetry_proto_common_v1_KeyValue_msg; +extern const pb_msgdesc_t opentelemetry_proto_common_v1_InstrumentationScope_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define opentelemetry_proto_common_v1_AnyValue_fields &opentelemetry_proto_common_v1_AnyValue_msg +#define opentelemetry_proto_common_v1_ArrayValue_fields &opentelemetry_proto_common_v1_ArrayValue_msg +#define opentelemetry_proto_common_v1_KeyValueList_fields &opentelemetry_proto_common_v1_KeyValueList_msg +#define opentelemetry_proto_common_v1_KeyValue_fields &opentelemetry_proto_common_v1_KeyValue_msg +#define opentelemetry_proto_common_v1_InstrumentationScope_fields &opentelemetry_proto_common_v1_InstrumentationScope_msg + +/* Maximum encoded size of messages (where known) */ +/* opentelemetry_proto_common_v1_AnyValue_size depends on runtime parameters */ +/* opentelemetry_proto_common_v1_ArrayValue_size depends on runtime parameters */ +/* opentelemetry_proto_common_v1_KeyValueList_size depends on runtime parameters */ +/* opentelemetry_proto_common_v1_KeyValue_size depends on runtime parameters */ +/* opentelemetry_proto_common_v1_InstrumentationScope_size depends on runtime parameters */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/opentelemetry/metrics.options b/src/opentelemetry/metrics.options new file mode 100644 index 0000000000..d5ab8d33c4 --- /dev/null +++ b/src/opentelemetry/metrics.options @@ -0,0 +1,2 @@ +# Needed to generate callback for data types within Metrics which isn't generated for oneof types by default +opentelemetry.proto.metrics.v1.Metric submsg_callback:true; diff --git a/src/opentelemetry/metrics.pb.c b/src/opentelemetry/metrics.pb.c new file mode 100644 index 0000000000..2b74de9272 --- /dev/null +++ b/src/opentelemetry/metrics.pb.c @@ -0,0 +1,67 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.4.8-dev */ + +#include "opentelemetry/metrics.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(opentelemetry_proto_metrics_v1_MetricsData, opentelemetry_proto_metrics_v1_MetricsData, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ResourceMetrics, opentelemetry_proto_metrics_v1_ResourceMetrics, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ScopeMetrics, opentelemetry_proto_metrics_v1_ScopeMetrics, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Metric, opentelemetry_proto_metrics_v1_Metric, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Gauge, opentelemetry_proto_metrics_v1_Gauge, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Sum, opentelemetry_proto_metrics_v1_Sum, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Histogram, opentelemetry_proto_metrics_v1_Histogram, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogram, opentelemetry_proto_metrics_v1_ExponentialHistogram, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Summary, opentelemetry_proto_metrics_v1_Summary, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_NumberDataPoint, opentelemetry_proto_metrics_v1_NumberDataPoint, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_HistogramDataPoint, opentelemetry_proto_metrics_v1_HistogramDataPoint, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_SummaryDataPoint, opentelemetry_proto_metrics_v1_SummaryDataPoint, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile, opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile, AUTO) + + +PB_BIND(opentelemetry_proto_metrics_v1_Exemplar, opentelemetry_proto_metrics_v1_Exemplar, AUTO) + + + + + +#ifndef PB_CONVERT_DOUBLE_FLOAT +/* On some platforms (such as AVR), double is really float. + * To be able to encode/decode double on these platforms, you need. + * to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line. + */ +PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES) +#endif + diff --git a/src/opentelemetry/metrics.pb.h b/src/opentelemetry/metrics.pb.h new file mode 100644 index 0000000000..7c812c2d45 --- /dev/null +++ b/src/opentelemetry/metrics.pb.h @@ -0,0 +1,966 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.4.8-dev */ + +#ifndef PB_OPENTELEMETRY_PROTO_METRICS_V1_OPENTELEMETRY_PROTO_METRICS_V1_METRICS_PB_H_INCLUDED +#define PB_OPENTELEMETRY_PROTO_METRICS_V1_OPENTELEMETRY_PROTO_METRICS_V1_METRICS_PB_H_INCLUDED +#include +#include "opentelemetry/common.pb.h" +#include "opentelemetry/resource.pb.h" + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Enum definitions */ +/* AggregationTemporality defines how a metric aggregator reports aggregated + values. It describes how those values relate to the time interval over + which they are aggregated. */ +typedef enum _opentelemetry_proto_metrics_v1_AggregationTemporality { + /* UNSPECIFIED is the default AggregationTemporality, it MUST not be used. */ + opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED = 0, + /* DELTA is an AggregationTemporality for a metric aggregator which reports + changes since last report time. Successive metrics contain aggregation of + values from continuous and non-overlapping intervals. + + The values for a DELTA metric are based only on the time interval + associated with one measurement cycle. There is no dependency on + previous measurements like is the case for CUMULATIVE metrics. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + DELTA metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0+1 to + t_0+2 with a value of 2. */ + opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA = 1, + /* CUMULATIVE is an AggregationTemporality for a metric aggregator which + reports changes since a fixed start time. This means that current values + of a CUMULATIVE metric depend on all previous measurements since the + start time. Because of this, the sender is required to retain this state + in some form. If this state is lost or invalidated, the CUMULATIVE metric + values MUST be reset and a new fixed start time following the last + reported measurement time sent MUST be used. + + For example, consider a system measuring the number of requests that + it receives and reports the sum of these requests every second as a + CUMULATIVE metric: + + 1. The system starts receiving at time=t_0. + 2. A request is received, the system measures 1 request. + 3. A request is received, the system measures 1 request. + 4. A request is received, the system measures 1 request. + 5. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+1 with a value of 3. + 6. A request is received, the system measures 1 request. + 7. A request is received, the system measures 1 request. + 8. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_0 to + t_0+2 with a value of 5. + 9. The system experiences a fault and loses state. + 10. The system recovers and resumes receiving at time=t_1. + 11. A request is received, the system measures 1 request. + 12. The 1 second collection cycle ends. A metric is exported for the + number of requests received over the interval of time t_1 to + t_0+1 with a value of 1. + + Note: Even though, when reporting changes since last report time, using + CUMULATIVE is valid, it is not recommended. This may cause problems for + systems that do not use start_time to determine when the aggregation + value was reset (e.g. Prometheus). */ + opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE = 2 +} opentelemetry_proto_metrics_v1_AggregationTemporality; + +/* DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a + bit-field representing 32 distinct boolean flags. Each flag defined in this + enum is a bit-mask. To test the presence of a single flag in the flags of + a data point, for example, use an expression like: + + (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK */ +typedef enum _opentelemetry_proto_metrics_v1_DataPointFlags { + /* The zero value for the enum. Should not be used for comparisons. + Instead use bitwise "and" with the appropriate mask as shown above. */ + opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE = 0, + /* This DataPoint is valid but has no recorded value. This value + SHOULD be used to reflect explicitly missing data in a series, as + for an equivalent to the Prometheus "staleness marker". */ + opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK = 1 +} opentelemetry_proto_metrics_v1_DataPointFlags; + +/* Struct definitions */ +/* MetricsData represents the metrics data that can be stored in a persistent + storage, OR can be embedded by other protocols that transfer OTLP metrics + data but do not implement the OTLP protocol. + + The main difference between this message and collector protocol is that + in this message there will not be any "control" or "metadata" specific to + OTLP protocol. + + When new fields are added into this message, the OTLP request MUST be updated + as well. */ +typedef struct _opentelemetry_proto_metrics_v1_MetricsData { + /* An array of ResourceMetrics. + For data coming from a single resource this array will typically contain + one element. Intermediary nodes that receive data from multiple origins + typically batch the data before forwarding further and in that case this + array will contain multiple elements. */ + pb_callback_t resource_metrics; +} opentelemetry_proto_metrics_v1_MetricsData; + +/* A collection of ScopeMetrics from a Resource. */ +typedef struct _opentelemetry_proto_metrics_v1_ResourceMetrics { + /* The resource for the metrics in this message. + If this field is not set then no resource info is known. */ + bool has_resource; + opentelemetry_proto_resource_v1_Resource resource; + /* A list of metrics that originate from a resource. */ + pb_callback_t scope_metrics; + /* This schema_url applies to the data in the "resource" field. It does not apply + to the data in the "scope_metrics" field which have their own schema_url field. */ + pb_callback_t schema_url; +} opentelemetry_proto_metrics_v1_ResourceMetrics; + +/* A collection of Metrics produced by an Scope. */ +typedef struct _opentelemetry_proto_metrics_v1_ScopeMetrics { + /* The instrumentation scope information for the metrics in this message. + Semantically when InstrumentationScope isn't set, it is equivalent with + an empty instrumentation scope name (unknown). */ + bool has_scope; + opentelemetry_proto_common_v1_InstrumentationScope scope; + /* A list of metrics that originate from an instrumentation library. */ + pb_callback_t metrics; + /* This schema_url applies to all metrics in the "metrics" field. */ + pb_callback_t schema_url; +} opentelemetry_proto_metrics_v1_ScopeMetrics; + +/* Gauge represents the type of a scalar metric that always exports the + "current value" for every data point. It should be used for an "unknown" + aggregation. + + A Gauge does not support different aggregation temporalities. Given the + aggregation is unknown, points cannot be combined using the same + aggregation, regardless of aggregation temporalities. Therefore, + AggregationTemporality is not included. Consequently, this also means + "StartTimeUnixNano" is ignored for all data points. */ +typedef struct _opentelemetry_proto_metrics_v1_Gauge { + pb_callback_t data_points; +} opentelemetry_proto_metrics_v1_Gauge; + +/* Sum represents the type of a scalar metric that is calculated as a sum of all + reported measurements over a time interval. */ +typedef struct _opentelemetry_proto_metrics_v1_Sum { + pb_callback_t data_points; + /* aggregation_temporality describes if the aggregator reports delta changes + since last report time, or cumulative changes since a fixed start time. */ + opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality; + /* If "true" means that the sum is monotonic. */ + bool is_monotonic; +} opentelemetry_proto_metrics_v1_Sum; + +/* Histogram represents the type of a metric that is calculated by aggregating + as a Histogram of all reported measurements over a time interval. */ +typedef struct _opentelemetry_proto_metrics_v1_Histogram { + pb_callback_t data_points; + /* aggregation_temporality describes if the aggregator reports delta changes + since last report time, or cumulative changes since a fixed start time. */ + opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality; +} opentelemetry_proto_metrics_v1_Histogram; + +/* ExponentialHistogram represents the type of a metric that is calculated by aggregating + as a ExponentialHistogram of all reported double measurements over a time interval. */ +typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogram { + pb_callback_t data_points; + /* aggregation_temporality describes if the aggregator reports delta changes + since last report time, or cumulative changes since a fixed start time. */ + opentelemetry_proto_metrics_v1_AggregationTemporality aggregation_temporality; +} opentelemetry_proto_metrics_v1_ExponentialHistogram; + +/* Summary metric data are used to convey quantile summaries, + a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) + and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) + data type. These data points cannot always be merged in a meaningful way. + While they can be useful in some applications, histogram data points are + recommended for new applications. */ +typedef struct _opentelemetry_proto_metrics_v1_Summary { + pb_callback_t data_points; +} opentelemetry_proto_metrics_v1_Summary; + +/* Defines a Metric which has one or more timeseries. The following is a + brief summary of the Metric data model. For more details, see: + + https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md + + + The data model and relation between entities is shown in the + diagram below. Here, "DataPoint" is the term used to refer to any + one of the specific data point value types, and "points" is the term used + to refer to any one of the lists of points contained in the Metric. + + - Metric is composed of a metadata and data. + - Metadata part contains a name, description, unit. + - Data is one of the possible types (Sum, Gauge, Histogram, Summary). + - DataPoint contains timestamps, attributes, and one of the possible value type + fields. + + Metric + +------------+ + |name | + |description | + |unit | +------------------------------------+ + |data |---> |Gauge, Sum, Histogram, Summary, ... | + +------------+ +------------------------------------+ + + Data [One of Gauge, Sum, Histogram, Summary, ...] + +-----------+ + |... | // Metadata about the Data. + |points |--+ + +-----------+ | + | +---------------------------+ + | |DataPoint 1 | + v |+------+------+ +------+ | + +-----+ ||label |label |...|label | | + | 1 |-->||value1|value2|...|valueN| | + +-----+ |+------+------+ +------+ | + | . | |+-----+ | + | . | ||value| | + | . | |+-----+ | + | . | +---------------------------+ + | . | . + | . | . + | . | . + | . | +---------------------------+ + | . | |DataPoint M | + +-----+ |+------+------+ +------+ | + | M |-->||label |label |...|label | | + +-----+ ||value1|value2|...|valueN| | + |+------+------+ +------+ | + |+-----+ | + ||value| | + |+-----+ | + +---------------------------+ + + Each distinct type of DataPoint represents the output of a specific + aggregation function, the result of applying the DataPoint's + associated function of to one or more measurements. + + All DataPoint types have three common fields: + - Attributes includes key-value pairs associated with the data point + - TimeUnixNano is required, set to the end time of the aggregation + - StartTimeUnixNano is optional, but strongly encouraged for DataPoints + having an AggregationTemporality field, as discussed below. + + Both TimeUnixNano and StartTimeUnixNano values are expressed as + UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + + # TimeUnixNano + + This field is required, having consistent interpretation across + DataPoint types. TimeUnixNano is the moment corresponding to when + the data point's aggregate value was captured. + + Data points with the 0 value for TimeUnixNano SHOULD be rejected + by consumers. + + # StartTimeUnixNano + + StartTimeUnixNano in general allows detecting when a sequence of + observations is unbroken. This field indicates to consumers the + start time for points with cumulative and delta + AggregationTemporality, and it should be included whenever possible + to support correct rate calculation. Although it may be omitted + when the start time is truly unknown, setting StartTimeUnixNano is + strongly encouraged. */ +typedef struct _opentelemetry_proto_metrics_v1_Metric { + /* name of the metric, including its DNS name prefix. It must be unique. */ + pb_callback_t name; + /* description of the metric, which can be used in documentation. */ + pb_callback_t description; + /* unit in which the metric value is reported. Follows the format + described by http://unitsofmeasure.org/ucum.html. */ + pb_callback_t unit; + pb_callback_t cb_data; + pb_size_t which_data; + union { + opentelemetry_proto_metrics_v1_Gauge gauge; + opentelemetry_proto_metrics_v1_Sum sum; + opentelemetry_proto_metrics_v1_Histogram histogram; + opentelemetry_proto_metrics_v1_ExponentialHistogram exponential_histogram; + opentelemetry_proto_metrics_v1_Summary summary; + } data; +} opentelemetry_proto_metrics_v1_Metric; + +/* NumberDataPoint is a single data point in a timeseries that describes the + time-varying scalar value of a metric. */ +typedef struct _opentelemetry_proto_metrics_v1_NumberDataPoint { + /* StartTimeUnixNano is optional but strongly encouraged, see the + the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t start_time_unix_nano; + /* TimeUnixNano is required, see the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + pb_size_t which_value; + union { + double as_double; + int64_t as_int; + } value; + /* (Optional) List of exemplars collected from + measurements that were used to form the data point */ + pb_callback_t exemplars; + /* The set of key/value pairs that uniquely identify the timeseries from + where this point belongs. The list may be empty (may contain 0 elements). + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* Flags that apply to this specific data point. See DataPointFlags + for the available flags and their meaning. */ + uint32_t flags; +} opentelemetry_proto_metrics_v1_NumberDataPoint; + +/* HistogramDataPoint is a single data point in a timeseries that describes the + time-varying values of a Histogram. A Histogram contains summary statistics + for a population of values, it may optionally contain the distribution of + those values across a set of buckets. + + If the histogram contains the distribution of values, then both + "explicit_bounds" and "bucket counts" fields must be defined. + If the histogram does not contain the distribution of values, then both + "explicit_bounds" and "bucket_counts" must be omitted and only "count" and + "sum" are known. */ +typedef struct _opentelemetry_proto_metrics_v1_HistogramDataPoint { + /* StartTimeUnixNano is optional but strongly encouraged, see the + the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t start_time_unix_nano; + /* TimeUnixNano is required, see the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + /* count is the number of values in the population. Must be non-negative. This + value must be equal to the sum of the "count" fields in buckets if a + histogram is provided. */ + uint64_t count; + /* sum of the values in the population. If count is zero then this field + must be zero. + + Note: Sum should only be filled out when measuring non-negative discrete + events, and is assumed to be monotonic over the values of these events. + Negative events *can* be recorded, but sum should not be filled out when + doing so. This is specifically to enforce compatibility w/ OpenMetrics, + see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram */ + bool has_sum; + double sum; + /* bucket_counts is an optional field contains the count values of histogram + for each bucket. + + The sum of the bucket_counts must equal the value in the count field. + + The number of elements in bucket_counts array must be by one greater than + the number of elements in explicit_bounds array. */ + pb_callback_t bucket_counts; + /* explicit_bounds specifies buckets with explicitly defined bounds for values. + + The boundaries for bucket at index i are: + + (-infinity, explicit_bounds[i]] for i == 0 + (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + + The values in the explicit_bounds array must be strictly increasing. + + Histogram buckets are inclusive of their upper boundary, except the last + bucket where the boundary is at infinity. This format is intentionally + compatible with the OpenMetrics histogram definition. */ + pb_callback_t explicit_bounds; + /* (Optional) List of exemplars collected from + measurements that were used to form the data point */ + pb_callback_t exemplars; + /* The set of key/value pairs that uniquely identify the timeseries from + where this point belongs. The list may be empty (may contain 0 elements). + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* Flags that apply to this specific data point. See DataPointFlags + for the available flags and their meaning. */ + uint32_t flags; + /* min is the minimum value over (start_time, end_time]. */ + bool has_min; + double min; + /* max is the maximum value over (start_time, end_time]. */ + bool has_max; + double max; +} opentelemetry_proto_metrics_v1_HistogramDataPoint; + +/* Buckets are a set of bucket counts, encoded in a contiguous array + of counts. */ +typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets { + /* Offset is the bucket index of the first entry in the bucket_counts array. + + Note: This uses a varint encoding as a simple form of compression. */ + int32_t offset; + /* bucket_counts is an array of count values, where bucket_counts[i] carries + the count of the bucket at index (offset+i). bucket_counts[i] is the count + of values greater than base^(offset+i) and less than or equal to + base^(offset+i+1). + + Note: By contrast, the explicit HistogramDataPoint uses + fixed64. This field is expected to have many buckets, + especially zeros, so uint64 has been selected to ensure + varint encoding. */ + pb_callback_t bucket_counts; +} opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets; + +/* ExponentialHistogramDataPoint is a single data point in a timeseries that describes the + time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains + summary statistics for a population of values, it may optionally contain the + distribution of those values across a set of buckets. */ +typedef struct _opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint { + /* The set of key/value pairs that uniquely identify the timeseries from + where this point belongs. The list may be empty (may contain 0 elements). + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* StartTimeUnixNano is optional but strongly encouraged, see the + the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t start_time_unix_nano; + /* TimeUnixNano is required, see the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + /* count is the number of values in the population. Must be + non-negative. This value must be equal to the sum of the "bucket_counts" + values in the positive and negative Buckets plus the "zero_count" field. */ + uint64_t count; + /* sum of the values in the population. If count is zero then this field + must be zero. + + Note: Sum should only be filled out when measuring non-negative discrete + events, and is assumed to be monotonic over the values of these events. + Negative events *can* be recorded, but sum should not be filled out when + doing so. This is specifically to enforce compatibility w/ OpenMetrics, + see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram */ + bool has_sum; + double sum; + /* scale describes the resolution of the histogram. Boundaries are + located at powers of the base, where: + + base = (2^(2^-scale)) + + The histogram bucket identified by `index`, a signed integer, + contains values that are greater than (base^index) and + less than or equal to (base^(index+1)). + + The positive and negative ranges of the histogram are expressed + separately. Negative values are mapped by their absolute value + into the negative range using the same scale as the positive range. + + scale is not restricted by the protocol, as the permissible + values depend on the range of the data. */ + int32_t scale; + /* zero_count is the count of values that are either exactly zero or + within the region considered zero by the instrumentation at the + tolerated degree of precision. This bucket stores values that + cannot be expressed using the standard exponential formula as + well as values that have been rounded to zero. + + Implementations MAY consider the zero bucket to have probability + mass equal to (zero_count / count). */ + uint64_t zero_count; + /* positive carries the positive range of exponential bucket counts. */ + bool has_positive; + opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets positive; + /* negative carries the negative range of exponential bucket counts. */ + bool has_negative; + opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets negative; + /* Flags that apply to this specific data point. See DataPointFlags + for the available flags and their meaning. */ + uint32_t flags; + /* (Optional) List of exemplars collected from + measurements that were used to form the data point */ + pb_callback_t exemplars; + /* min is the minimum value over (start_time, end_time]. */ + bool has_min; + double min; + /* max is the maximum value over (start_time, end_time]. */ + bool has_max; + double max; + /* ZeroThreshold may be optionally set to convey the width of the zero + region. Where the zero region is defined as the closed interval + [-ZeroThreshold, ZeroThreshold]. + When ZeroThreshold is 0, zero count bucket stores values that cannot be + expressed using the standard exponential formula as well as values that + have been rounded to zero. */ + double zero_threshold; +} opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint; + +/* SummaryDataPoint is a single data point in a timeseries that describes the + time-varying values of a Summary metric. */ +typedef struct _opentelemetry_proto_metrics_v1_SummaryDataPoint { + /* StartTimeUnixNano is optional but strongly encouraged, see the + the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t start_time_unix_nano; + /* TimeUnixNano is required, see the detailed comments above Metric. + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + /* count is the number of values in the population. Must be non-negative. */ + uint64_t count; + /* sum of the values in the population. If count is zero then this field + must be zero. + + Note: Sum should only be filled out when measuring non-negative discrete + events, and is assumed to be monotonic over the values of these events. + Negative events *can* be recorded, but sum should not be filled out when + doing so. This is specifically to enforce compatibility w/ OpenMetrics, + see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary */ + double sum; + /* (Optional) list of values at different quantiles of the distribution calculated + from the current snapshot. The quantiles must be strictly increasing. */ + pb_callback_t quantile_values; + /* The set of key/value pairs that uniquely identify the timeseries from + where this point belongs. The list may be empty (may contain 0 elements). + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* Flags that apply to this specific data point. See DataPointFlags + for the available flags and their meaning. */ + uint32_t flags; +} opentelemetry_proto_metrics_v1_SummaryDataPoint; + +/* Represents the value at a given quantile of a distribution. + + To record Min and Max values following conventions are used: + - The 1.0 quantile is equivalent to the maximum value observed. + - The 0.0 quantile is equivalent to the minimum value observed. + + See the following issue for more context: + https://github.com/open-telemetry/opentelemetry-proto/issues/125 */ +typedef struct _opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile { + /* The quantile of a distribution. Must be in the interval + [0.0, 1.0]. */ + double quantile; + /* The value at the given quantile of a distribution. + + Quantile values must NOT be negative. */ + double value; +} opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile; + +/* A representation of an exemplar, which is a sample input measurement. + Exemplars also hold information about the environment when the measurement + was recorded, for example the span and trace ID of the active span when the + exemplar was recorded. */ +typedef struct _opentelemetry_proto_metrics_v1_Exemplar { + /* time_unix_nano is the exact time when this exemplar was recorded + + Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + 1970. */ + uint64_t time_unix_nano; + pb_size_t which_value; + union { + double as_double; + int64_t as_int; + } value; + /* (Optional) Span ID of the exemplar trace. + span_id may be missing if the measurement is not recorded inside a trace + or if the trace is not sampled. */ + pb_callback_t span_id; + /* (Optional) Trace ID of the exemplar trace. + trace_id may be missing if the measurement is not recorded inside a trace + or if the trace is not sampled. */ + pb_callback_t trace_id; + /* The set of key/value pairs that were filtered out by the aggregator, but + recorded alongside the original measurement. Only key/value pairs that were + filtered out by the aggregator should be included */ + pb_callback_t filtered_attributes; +} opentelemetry_proto_metrics_v1_Exemplar; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Helper constants for enums */ +#define _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +#define _opentelemetry_proto_metrics_v1_AggregationTemporality_MAX opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE +#define _opentelemetry_proto_metrics_v1_AggregationTemporality_ARRAYSIZE ((opentelemetry_proto_metrics_v1_AggregationTemporality)(opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE+1)) + +#define _opentelemetry_proto_metrics_v1_DataPointFlags_MIN opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_DO_NOT_USE +#define _opentelemetry_proto_metrics_v1_DataPointFlags_MAX opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK +#define _opentelemetry_proto_metrics_v1_DataPointFlags_ARRAYSIZE ((opentelemetry_proto_metrics_v1_DataPointFlags)(opentelemetry_proto_metrics_v1_DataPointFlags_DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK+1)) + + + + + + +#define opentelemetry_proto_metrics_v1_Sum_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality + +#define opentelemetry_proto_metrics_v1_Histogram_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality + +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_aggregation_temporality_ENUMTYPE opentelemetry_proto_metrics_v1_AggregationTemporality + + + + + + + + + + +/* Initializer values for message structs */ +#define opentelemetry_proto_metrics_v1_MetricsData_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_ResourceMetrics_init_default {false, opentelemetry_proto_resource_v1_Resource_init_default, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_ScopeMetrics_init_default {false, opentelemetry_proto_common_v1_InstrumentationScope_init_default, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_Metric_init_default {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, {opentelemetry_proto_metrics_v1_Gauge_init_default}} +#define opentelemetry_proto_metrics_v1_Gauge_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_Sum_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN, 0} +#define opentelemetry_proto_metrics_v1_Histogram_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN} +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_init_default {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN} +#define opentelemetry_proto_metrics_v1_Summary_init_default {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_NumberDataPoint_init_default {0, 0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_init_default {0, 0, 0, false, 0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, false, 0, false, 0} +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_init_default {{{NULL}, NULL}, 0, 0, 0, false, 0, 0, 0, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default, 0, {{NULL}, NULL}, false, 0, false, 0, 0} +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_default {0, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_init_default {0, 0, 0, 0, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_init_default {0, 0} +#define opentelemetry_proto_metrics_v1_Exemplar_init_default {0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_MetricsData_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_ResourceMetrics_init_zero {false, opentelemetry_proto_resource_v1_Resource_init_zero, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_ScopeMetrics_init_zero {false, opentelemetry_proto_common_v1_InstrumentationScope_init_zero, {{NULL}, NULL}, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_Metric_init_zero {{{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, {opentelemetry_proto_metrics_v1_Gauge_init_zero}} +#define opentelemetry_proto_metrics_v1_Gauge_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_Sum_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN, 0} +#define opentelemetry_proto_metrics_v1_Histogram_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN} +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_init_zero {{{NULL}, NULL}, _opentelemetry_proto_metrics_v1_AggregationTemporality_MIN} +#define opentelemetry_proto_metrics_v1_Summary_init_zero {{{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_NumberDataPoint_init_zero {0, 0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_init_zero {0, 0, 0, false, 0, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}, 0, false, 0, false, 0} +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_init_zero {{{NULL}, NULL}, 0, 0, 0, false, 0, 0, 0, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero, false, opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero, 0, {{NULL}, NULL}, false, 0, false, 0, 0} +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_init_zero {0, {{NULL}, NULL}} +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_init_zero {0, 0, 0, 0, {{NULL}, NULL}, {{NULL}, NULL}, 0} +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_init_zero {0, 0} +#define opentelemetry_proto_metrics_v1_Exemplar_init_zero {0, 0, {0}, {{NULL}, NULL}, {{NULL}, NULL}, {{NULL}, NULL}} + +/* Field tags (for use in manual encoding/decoding) */ +#define opentelemetry_proto_metrics_v1_MetricsData_resource_metrics_tag 1 +#define opentelemetry_proto_metrics_v1_ResourceMetrics_resource_tag 1 +#define opentelemetry_proto_metrics_v1_ResourceMetrics_scope_metrics_tag 2 +#define opentelemetry_proto_metrics_v1_ResourceMetrics_schema_url_tag 3 +#define opentelemetry_proto_metrics_v1_ScopeMetrics_scope_tag 1 +#define opentelemetry_proto_metrics_v1_ScopeMetrics_metrics_tag 2 +#define opentelemetry_proto_metrics_v1_ScopeMetrics_schema_url_tag 3 +#define opentelemetry_proto_metrics_v1_Gauge_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_Sum_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_Sum_aggregation_temporality_tag 2 +#define opentelemetry_proto_metrics_v1_Sum_is_monotonic_tag 3 +#define opentelemetry_proto_metrics_v1_Histogram_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_Histogram_aggregation_temporality_tag 2 +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_aggregation_temporality_tag 2 +#define opentelemetry_proto_metrics_v1_Summary_data_points_tag 1 +#define opentelemetry_proto_metrics_v1_Metric_name_tag 1 +#define opentelemetry_proto_metrics_v1_Metric_description_tag 2 +#define opentelemetry_proto_metrics_v1_Metric_unit_tag 3 +#define opentelemetry_proto_metrics_v1_Metric_gauge_tag 5 +#define opentelemetry_proto_metrics_v1_Metric_sum_tag 7 +#define opentelemetry_proto_metrics_v1_Metric_histogram_tag 9 +#define opentelemetry_proto_metrics_v1_Metric_exponential_histogram_tag 10 +#define opentelemetry_proto_metrics_v1_Metric_summary_tag 11 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_start_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_time_unix_nano_tag 3 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_as_double_tag 4 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_as_int_tag 6 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_exemplars_tag 5 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_attributes_tag 7 +#define opentelemetry_proto_metrics_v1_NumberDataPoint_flags_tag 8 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_start_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_time_unix_nano_tag 3 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_count_tag 4 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_sum_tag 5 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_bucket_counts_tag 6 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_explicit_bounds_tag 7 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_exemplars_tag 8 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_attributes_tag 9 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_flags_tag 10 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_min_tag 11 +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_max_tag 12 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_offset_tag 1 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_bucket_counts_tag 2 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_attributes_tag 1 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_start_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_time_unix_nano_tag 3 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_count_tag 4 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_sum_tag 5 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_scale_tag 6 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_zero_count_tag 7 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_positive_tag 8 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_negative_tag 9 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_flags_tag 10 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_exemplars_tag 11 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_min_tag 12 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_max_tag 13 +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_zero_threshold_tag 14 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_start_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_time_unix_nano_tag 3 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_count_tag 4 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_sum_tag 5 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_quantile_values_tag 6 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_attributes_tag 7 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_flags_tag 8 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_quantile_tag 1 +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_value_tag 2 +#define opentelemetry_proto_metrics_v1_Exemplar_time_unix_nano_tag 2 +#define opentelemetry_proto_metrics_v1_Exemplar_as_double_tag 3 +#define opentelemetry_proto_metrics_v1_Exemplar_as_int_tag 6 +#define opentelemetry_proto_metrics_v1_Exemplar_span_id_tag 4 +#define opentelemetry_proto_metrics_v1_Exemplar_trace_id_tag 5 +#define opentelemetry_proto_metrics_v1_Exemplar_filtered_attributes_tag 7 + +/* Struct field encoding specification for nanopb */ +#define opentelemetry_proto_metrics_v1_MetricsData_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, resource_metrics, 1) +#define opentelemetry_proto_metrics_v1_MetricsData_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_MetricsData_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_MetricsData_resource_metrics_MSGTYPE opentelemetry_proto_metrics_v1_ResourceMetrics + +#define opentelemetry_proto_metrics_v1_ResourceMetrics_FIELDLIST(X, a) \ +X(a, STATIC, OPTIONAL, MESSAGE, resource, 1) \ +X(a, CALLBACK, REPEATED, MESSAGE, scope_metrics, 2) \ +X(a, CALLBACK, SINGULAR, STRING, schema_url, 3) +#define opentelemetry_proto_metrics_v1_ResourceMetrics_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ResourceMetrics_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_ResourceMetrics_resource_MSGTYPE opentelemetry_proto_resource_v1_Resource +#define opentelemetry_proto_metrics_v1_ResourceMetrics_scope_metrics_MSGTYPE opentelemetry_proto_metrics_v1_ScopeMetrics + +#define opentelemetry_proto_metrics_v1_ScopeMetrics_FIELDLIST(X, a) \ +X(a, STATIC, OPTIONAL, MESSAGE, scope, 1) \ +X(a, CALLBACK, REPEATED, MESSAGE, metrics, 2) \ +X(a, CALLBACK, SINGULAR, STRING, schema_url, 3) +#define opentelemetry_proto_metrics_v1_ScopeMetrics_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ScopeMetrics_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_ScopeMetrics_scope_MSGTYPE opentelemetry_proto_common_v1_InstrumentationScope +#define opentelemetry_proto_metrics_v1_ScopeMetrics_metrics_MSGTYPE opentelemetry_proto_metrics_v1_Metric + +#define opentelemetry_proto_metrics_v1_Metric_FIELDLIST(X, a) \ +X(a, CALLBACK, SINGULAR, STRING, name, 1) \ +X(a, CALLBACK, SINGULAR, STRING, description, 2) \ +X(a, CALLBACK, SINGULAR, STRING, unit, 3) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,gauge,data.gauge), 5) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,sum,data.sum), 7) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,histogram,data.histogram), 9) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,exponential_histogram,data.exponential_histogram), 10) \ +X(a, STATIC, ONEOF, MSG_W_CB, (data,summary,data.summary), 11) +#define opentelemetry_proto_metrics_v1_Metric_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Metric_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Metric_data_gauge_MSGTYPE opentelemetry_proto_metrics_v1_Gauge +#define opentelemetry_proto_metrics_v1_Metric_data_sum_MSGTYPE opentelemetry_proto_metrics_v1_Sum +#define opentelemetry_proto_metrics_v1_Metric_data_histogram_MSGTYPE opentelemetry_proto_metrics_v1_Histogram +#define opentelemetry_proto_metrics_v1_Metric_data_exponential_histogram_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogram +#define opentelemetry_proto_metrics_v1_Metric_data_summary_MSGTYPE opentelemetry_proto_metrics_v1_Summary + +#define opentelemetry_proto_metrics_v1_Gauge_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) +#define opentelemetry_proto_metrics_v1_Gauge_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Gauge_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Gauge_data_points_MSGTYPE opentelemetry_proto_metrics_v1_NumberDataPoint + +#define opentelemetry_proto_metrics_v1_Sum_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \ +X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2) \ +X(a, STATIC, SINGULAR, BOOL, is_monotonic, 3) +#define opentelemetry_proto_metrics_v1_Sum_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Sum_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Sum_data_points_MSGTYPE opentelemetry_proto_metrics_v1_NumberDataPoint + +#define opentelemetry_proto_metrics_v1_Histogram_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \ +X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2) +#define opentelemetry_proto_metrics_v1_Histogram_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Histogram_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Histogram_data_points_MSGTYPE opentelemetry_proto_metrics_v1_HistogramDataPoint + +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) \ +X(a, STATIC, SINGULAR, UENUM, aggregation_temporality, 2) +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_data_points_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint + +#define opentelemetry_proto_metrics_v1_Summary_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, data_points, 1) +#define opentelemetry_proto_metrics_v1_Summary_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Summary_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Summary_data_points_MSGTYPE opentelemetry_proto_metrics_v1_SummaryDataPoint + +#define opentelemetry_proto_metrics_v1_NumberDataPoint_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \ +X(a, STATIC, ONEOF, DOUBLE, (value,as_double,value.as_double), 4) \ +X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 5) \ +X(a, STATIC, ONEOF, SFIXED64, (value,as_int,value.as_int), 6) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 7) \ +X(a, STATIC, SINGULAR, UINT32, flags, 8) +#define opentelemetry_proto_metrics_v1_NumberDataPoint_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_NumberDataPoint_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_NumberDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar +#define opentelemetry_proto_metrics_v1_NumberDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \ +X(a, STATIC, SINGULAR, FIXED64, count, 4) \ +X(a, STATIC, OPTIONAL, DOUBLE, sum, 5) \ +X(a, CALLBACK, REPEATED, FIXED64, bucket_counts, 6) \ +X(a, CALLBACK, REPEATED, DOUBLE, explicit_bounds, 7) \ +X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 8) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 9) \ +X(a, STATIC, SINGULAR, UINT32, flags, 10) \ +X(a, STATIC, OPTIONAL, DOUBLE, min, 11) \ +X(a, STATIC, OPTIONAL, DOUBLE, max, 12) +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 1) \ +X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \ +X(a, STATIC, SINGULAR, FIXED64, count, 4) \ +X(a, STATIC, OPTIONAL, DOUBLE, sum, 5) \ +X(a, STATIC, SINGULAR, SINT32, scale, 6) \ +X(a, STATIC, SINGULAR, FIXED64, zero_count, 7) \ +X(a, STATIC, OPTIONAL, MESSAGE, positive, 8) \ +X(a, STATIC, OPTIONAL, MESSAGE, negative, 9) \ +X(a, STATIC, SINGULAR, UINT32, flags, 10) \ +X(a, CALLBACK, REPEATED, MESSAGE, exemplars, 11) \ +X(a, STATIC, OPTIONAL, DOUBLE, min, 12) \ +X(a, STATIC, OPTIONAL, DOUBLE, max, 13) \ +X(a, STATIC, SINGULAR, DOUBLE, zero_threshold, 14) +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_positive_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_negative_MSGTYPE opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_exemplars_MSGTYPE opentelemetry_proto_metrics_v1_Exemplar + +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, SINT32, offset, 1) \ +X(a, CALLBACK, REPEATED, UINT64, bucket_counts, 2) +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_DEFAULT NULL + +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, FIXED64, start_time_unix_nano, 2) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 3) \ +X(a, STATIC, SINGULAR, FIXED64, count, 4) \ +X(a, STATIC, SINGULAR, DOUBLE, sum, 5) \ +X(a, CALLBACK, REPEATED, MESSAGE, quantile_values, 6) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 7) \ +X(a, STATIC, SINGULAR, UINT32, flags, 8) +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_quantile_values_MSGTYPE opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, DOUBLE, quantile, 1) \ +X(a, STATIC, SINGULAR, DOUBLE, value, 2) +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_CALLBACK NULL +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_DEFAULT NULL + +#define opentelemetry_proto_metrics_v1_Exemplar_FIELDLIST(X, a) \ +X(a, STATIC, SINGULAR, FIXED64, time_unix_nano, 2) \ +X(a, STATIC, ONEOF, DOUBLE, (value,as_double,value.as_double), 3) \ +X(a, CALLBACK, SINGULAR, BYTES, span_id, 4) \ +X(a, CALLBACK, SINGULAR, BYTES, trace_id, 5) \ +X(a, STATIC, ONEOF, SFIXED64, (value,as_int,value.as_int), 6) \ +X(a, CALLBACK, REPEATED, MESSAGE, filtered_attributes, 7) +#define opentelemetry_proto_metrics_v1_Exemplar_CALLBACK pb_default_field_callback +#define opentelemetry_proto_metrics_v1_Exemplar_DEFAULT NULL +#define opentelemetry_proto_metrics_v1_Exemplar_filtered_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_MetricsData_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ResourceMetrics_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ScopeMetrics_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Metric_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Gauge_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Sum_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Histogram_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogram_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Summary_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_NumberDataPoint_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_HistogramDataPoint_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_SummaryDataPoint_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_msg; +extern const pb_msgdesc_t opentelemetry_proto_metrics_v1_Exemplar_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define opentelemetry_proto_metrics_v1_MetricsData_fields &opentelemetry_proto_metrics_v1_MetricsData_msg +#define opentelemetry_proto_metrics_v1_ResourceMetrics_fields &opentelemetry_proto_metrics_v1_ResourceMetrics_msg +#define opentelemetry_proto_metrics_v1_ScopeMetrics_fields &opentelemetry_proto_metrics_v1_ScopeMetrics_msg +#define opentelemetry_proto_metrics_v1_Metric_fields &opentelemetry_proto_metrics_v1_Metric_msg +#define opentelemetry_proto_metrics_v1_Gauge_fields &opentelemetry_proto_metrics_v1_Gauge_msg +#define opentelemetry_proto_metrics_v1_Sum_fields &opentelemetry_proto_metrics_v1_Sum_msg +#define opentelemetry_proto_metrics_v1_Histogram_fields &opentelemetry_proto_metrics_v1_Histogram_msg +#define opentelemetry_proto_metrics_v1_ExponentialHistogram_fields &opentelemetry_proto_metrics_v1_ExponentialHistogram_msg +#define opentelemetry_proto_metrics_v1_Summary_fields &opentelemetry_proto_metrics_v1_Summary_msg +#define opentelemetry_proto_metrics_v1_NumberDataPoint_fields &opentelemetry_proto_metrics_v1_NumberDataPoint_msg +#define opentelemetry_proto_metrics_v1_HistogramDataPoint_fields &opentelemetry_proto_metrics_v1_HistogramDataPoint_msg +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_fields &opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_msg +#define opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_fields &opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_msg +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_fields &opentelemetry_proto_metrics_v1_SummaryDataPoint_msg +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_fields &opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_msg +#define opentelemetry_proto_metrics_v1_Exemplar_fields &opentelemetry_proto_metrics_v1_Exemplar_msg + +/* Maximum encoded size of messages (where known) */ +/* opentelemetry_proto_metrics_v1_MetricsData_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ResourceMetrics_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ScopeMetrics_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Metric_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Gauge_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Sum_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Histogram_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ExponentialHistogram_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Summary_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_NumberDataPoint_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_HistogramDataPoint_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_ExponentialHistogramDataPoint_Buckets_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_SummaryDataPoint_size depends on runtime parameters */ +/* opentelemetry_proto_metrics_v1_Exemplar_size depends on runtime parameters */ +#define opentelemetry_proto_metrics_v1_SummaryDataPoint_ValueAtQuantile_size 18 + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/opentelemetry/resource.pb.c b/src/opentelemetry/resource.pb.c new file mode 100644 index 0000000000..39cc42767b --- /dev/null +++ b/src/opentelemetry/resource.pb.c @@ -0,0 +1,12 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-0.4.8-dev */ + +#include "opentelemetry/resource.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(opentelemetry_proto_resource_v1_Resource, opentelemetry_proto_resource_v1_Resource, AUTO) + + + diff --git a/src/opentelemetry/resource.pb.h b/src/opentelemetry/resource.pb.h new file mode 100644 index 0000000000..232c0b0244 --- /dev/null +++ b/src/opentelemetry/resource.pb.h @@ -0,0 +1,58 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-0.4.8-dev */ + +#ifndef PB_OPENTELEMETRY_PROTO_RESOURCE_V1_OPENTELEMETRY_PROTO_RESOURCE_V1_RESOURCE_PB_H_INCLUDED +#define PB_OPENTELEMETRY_PROTO_RESOURCE_V1_OPENTELEMETRY_PROTO_RESOURCE_V1_RESOURCE_PB_H_INCLUDED +#include +#include "opentelemetry/common.pb.h" + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Struct definitions */ +/* Resource information. */ +typedef struct _opentelemetry_proto_resource_v1_Resource { + /* Set of attributes that describe the resource. + Attribute keys MUST be unique (it is not allowed to have more than one + attribute with the same key). */ + pb_callback_t attributes; + /* dropped_attributes_count is the number of dropped attributes. If the value is 0, then + no attributes were dropped. */ + uint32_t dropped_attributes_count; +} opentelemetry_proto_resource_v1_Resource; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Initializer values for message structs */ +#define opentelemetry_proto_resource_v1_Resource_init_default {{{NULL}, NULL}, 0} +#define opentelemetry_proto_resource_v1_Resource_init_zero {{{NULL}, NULL}, 0} + +/* Field tags (for use in manual encoding/decoding) */ +#define opentelemetry_proto_resource_v1_Resource_attributes_tag 1 +#define opentelemetry_proto_resource_v1_Resource_dropped_attributes_count_tag 2 + +/* Struct field encoding specification for nanopb */ +#define opentelemetry_proto_resource_v1_Resource_FIELDLIST(X, a) \ +X(a, CALLBACK, REPEATED, MESSAGE, attributes, 1) \ +X(a, STATIC, SINGULAR, UINT32, dropped_attributes_count, 2) +#define opentelemetry_proto_resource_v1_Resource_CALLBACK pb_default_field_callback +#define opentelemetry_proto_resource_v1_Resource_DEFAULT NULL +#define opentelemetry_proto_resource_v1_Resource_attributes_MSGTYPE opentelemetry_proto_common_v1_KeyValue + +extern const pb_msgdesc_t opentelemetry_proto_resource_v1_Resource_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define opentelemetry_proto_resource_v1_Resource_fields &opentelemetry_proto_resource_v1_Resource_msg + +/* Maximum encoded size of messages (where known) */ +/* opentelemetry_proto_resource_v1_Resource_size depends on runtime parameters */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/src/rd.h b/src/rd.h index fd6c307fd0..559f37d45e 100644 --- a/src/rd.h +++ b/src/rd.h @@ -2,6 +2,7 @@ * librd - Rapid Development C library * * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -424,6 +425,10 @@ static RD_INLINE RD_UNUSED int rd_refcnt_get(rd_refcnt_t *R) { } while (0) +#define RD_INTERFACE_CALL(i, name, ...) (i->name(i->opaque, __VA_ARGS__)) + +#define RD_CEIL_INTEGER_DIVISION(X, DEN) (((X) + ((DEN)-1)) / (DEN)) + /** * @brief Utility types to hold memory,size tuple. */ diff --git a/src/rdkafka.c b/src/rdkafka.c index 9c2cf3ac89..901f3117db 100644 --- a/src/rdkafka.c +++ b/src/rdkafka.c @@ -46,6 +46,7 @@ #include "rdkafka_topic.h" #include "rdkafka_partition.h" #include "rdkafka_offset.h" +#include "rdkafka_telemetry.h" #include "rdkafka_transport.h" #include "rdkafka_cgrp.h" #include "rdkafka_assignor.h" @@ -395,14 +396,6 @@ void rd_kafka_set_log_level(rd_kafka_t *rk, int level) { -static const char *rd_kafka_type2str(rd_kafka_type_t type) { - static const char *types[] = { - [RD_KAFKA_PRODUCER] = "producer", - [RD_KAFKA_CONSUMER] = "consumer", - }; - return types[type]; -} - #define _ERR_DESC(ENUM, DESC) \ [ENUM - RD_KAFKA_RESP_ERR__BEGIN] = {ENUM, &(#ENUM)[18] /*pfx*/, DESC} @@ -715,6 +708,12 @@ static const struct rd_kafka_err_desc rd_kafka_err_descs[] = { "the consumer group"), _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH, "Broker: The member epoch is stale"), + _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID, + "Broker: Client sent a push telemetry request with an invalid or " + "outdated subscription ID"), + _ERR_DESC(RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE, + "Broker: Client sent a push telemetry request larger than the " + "maximum size the broker will accept"), _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL)}; @@ -941,6 +940,8 @@ void rd_kafka_destroy_final(rd_kafka_t *rk) { rd_kafka_wrlock(rk); rd_kafka_wrunlock(rk); + rd_kafka_telemetry_clear(rk, rd_true /*clear_control_flow_fields*/); + /* Terminate SASL provider */ if (rk->rk_conf.sasl.provider) rd_kafka_sasl_term(rk); @@ -1091,7 +1092,13 @@ static void rd_kafka_destroy_app(rd_kafka_t *rk, int flags) { rd_kafka_consumer_close(rk); } - /* With the consumer closed, terminate the rest of librdkafka. */ + /* Await telemetry termination. This method blocks until the last + * PushTelemetry request is sent (if possible). */ + if (!(flags & RD_KAFKA_DESTROY_F_IMMEDIATE)) + rd_kafka_telemetry_await_termination(rk); + + /* With the consumer and telemetry closed, terminate the rest of + * librdkafka. */ rd_atomic32_set(&rk->rk_terminate, flags | RD_KAFKA_DESTROY_F_TERMINATE); @@ -2266,6 +2273,9 @@ rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_interval_init(&rk->rk_suppress.sparse_connect_random); mtx_init(&rk->rk_suppress.sparse_connect_lock, mtx_plain); + mtx_init(&rk->rk_telemetry.lock, mtx_plain); + cnd_init(&rk->rk_telemetry.termination_cnd); + rd_atomic64_init(&rk->rk_ts_last_poll, rk->rk_ts_created); rd_atomic32_init(&rk->rk_flushing, 0); @@ -4078,6 +4088,15 @@ rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk, rd_kafka_purge(rk, rko->rko_u.purge.flags); break; + case RD_KAFKA_OP_SET_TELEMETRY_BROKER: + rd_kafka_set_telemetry_broker_maybe( + rk, rko->rko_u.telemetry_broker.rkb); + break; + + case RD_KAFKA_OP_TERMINATE_TELEMETRY: + rd_kafka_telemetry_schedule_termination(rko->rko_rk); + break; + case RD_KAFKA_OP_METADATA_UPDATE: res = rd_kafka_metadata_update_op(rk, rko->rko_u.metadata.mdi); break; diff --git a/src/rdkafka.h b/src/rdkafka.h index 655516d92d..7d4ae8112b 100644 --- a/src/rdkafka.h +++ b/src/rdkafka.h @@ -646,6 +646,12 @@ typedef enum { RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR = 112, /** The member epoch is stale */ RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH = 113, + /** Client sent a push telemetry request with an invalid or outdated + * subscription ID. */ + RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID = 117, + /** Client sent a push telemetry request larger than the maximum size + * the broker will accept. */ + RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE = 118, RD_KAFKA_RESP_ERR_END_ALL, } rd_kafka_resp_err_t; diff --git a/src/rdkafka_broker.c b/src/rdkafka_broker.c index 685cf5bfc6..1beeece2e8 100644 --- a/src/rdkafka_broker.c +++ b/src/rdkafka_broker.c @@ -58,6 +58,7 @@ #include "rdkafka_partition.h" #include "rdkafka_broker.h" #include "rdkafka_offset.h" +#include "rdkafka_telemetry.h" #include "rdkafka_transport.h" #include "rdkafka_proto.h" #include "rdkafka_buf.h" @@ -234,31 +235,37 @@ static void rd_kafka_broker_features_set(rd_kafka_broker_t *rkb, int features) { rd_kafka_features2str(rkb->rkb_features)); } - /** * @brief Check and return supported ApiVersion for \p ApiKey. * * @returns the highest supported ApiVersion in the specified range (inclusive) * or -1 if the ApiKey is not supported or no matching ApiVersion. * The current feature set is also returned in \p featuresp - * @locks none + * + * @remark Same as rd_kafka_broker_ApiVersion_supported except for locking. + * + * @locks rd_kafka_broker_lock() if do_lock is rd_false + * @locks_acquired rd_kafka_broker_lock() if do_lock is rd_true * @locality any */ -int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, - int16_t ApiKey, - int16_t minver, - int16_t maxver, - int *featuresp) { +int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int16_t minver, + int16_t maxver, + int *featuresp, + rd_bool_t do_lock) { struct rd_kafka_ApiVersion skel = {.ApiKey = ApiKey}; struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp; - rd_kafka_broker_lock(rkb); + if (do_lock) + rd_kafka_broker_lock(rkb); if (featuresp) *featuresp = rkb->rkb_features; if (rkb->rkb_features & RD_KAFKA_FEATURE_UNITTEST) { /* For unit tests let the broker support everything. */ - rd_kafka_broker_unlock(rkb); + if (do_lock) + rd_kafka_broker_unlock(rkb); return maxver; } @@ -267,7 +274,9 @@ int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp); if (retp) ret = *retp; - rd_kafka_broker_unlock(rkb); + + if (do_lock) + rd_kafka_broker_unlock(rkb); if (!retp) return -1; @@ -283,6 +292,24 @@ int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, return maxver; } +/** + * @brief Check and return supported ApiVersion for \p ApiKey. + * + * @returns the highest supported ApiVersion in the specified range (inclusive) + * or -1 if the ApiKey is not supported or no matching ApiVersion. + * The current feature set is also returned in \p featuresp + * @locks none + * @locks_acquired rd_kafka_broker_lock() + * @locality any + */ +int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int16_t minver, + int16_t maxver, + int *featuresp) { + return rd_kafka_broker_ApiVersion_supported0( + rkb, ApiKey, minver, maxver, featuresp, rd_true /* do_lock */); +} /** * @brief Set broker state. @@ -668,6 +695,19 @@ void rd_kafka_broker_fail(rd_kafka_broker_t *rkb, } } + /* If the broker is the preferred telemetry broker, remove it. */ + /* TODO(milind): check if this right. */ + mtx_lock(&rkb->rkb_rk->rk_telemetry.lock); + if (rkb->rkb_rk->rk_telemetry.preferred_broker == rkb) { + rd_kafka_dbg(rkb->rkb_rk, TELEMETRY, "TELBRKLOST", + "Lost telemetry broker %s due to state change", + rkb->rkb_name); + rd_kafka_broker_destroy( + rkb->rkb_rk->rk_telemetry.preferred_broker); + rkb->rkb_rk->rk_telemetry.preferred_broker = NULL; + } + mtx_unlock(&rkb->rkb_rk->rk_telemetry.lock); + /* Query for topic leaders to quickly pick up on failover. */ if (err != RD_KAFKA_RESP_ERR__DESTROY && old_state >= RD_KAFKA_BROKER_STATE_UP) @@ -941,11 +981,22 @@ static void rd_kafka_broker_timeout_scan(rd_kafka_broker_t *rkb, rd_ts_t now) { char rttinfo[32]; /* Print average RTT (if avail) to help diagnose. */ rd_avg_calc(&rkb->rkb_avg_rtt, now); + rd_avg_calc( + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt, + now); if (rkb->rkb_avg_rtt.ra_v.avg) rd_snprintf(rttinfo, sizeof(rttinfo), " (average rtt %.3fms)", (float)(rkb->rkb_avg_rtt.ra_v.avg / 1000.0f)); + else if (rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt + .ra_v.avg) + rd_snprintf( + rttinfo, sizeof(rttinfo), + " (average rtt %.3fms)", + (float)(rkb->rkb_telemetry.rd_avg_current + .rkb_avg_rtt.ra_v.avg / + 1000.0f)); else rttinfo[0] = 0; rd_kafka_broker_fail(rkb, LOG_ERR, @@ -1338,15 +1389,15 @@ void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk) { * @locks rd_kafka_*lock() MUST be held * @locality any */ -static rd_kafka_broker_t * -rd_kafka_broker_random0(const char *func, - int line, - rd_kafka_t *rk, - rd_bool_t is_up, - int state, - int *filtered_cnt, - int (*filter)(rd_kafka_broker_t *rk, void *opaque), - void *opaque) { +rd_kafka_broker_t *rd_kafka_broker_random0(const char *func, + int line, + rd_kafka_t *rk, + rd_bool_t is_up, + int state, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rk, + void *opaque), + void *opaque) { rd_kafka_broker_t *rkb, *good = NULL; int cnt = 0; int fcnt = 0; @@ -1381,11 +1432,6 @@ rd_kafka_broker_random0(const char *func, return good; } -#define rd_kafka_broker_random(rk, state, filter, opaque) \ - rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \ - NULL, filter, opaque) - - /** * @returns the broker (with refcnt increased) with the highest weight based * based on the provided weighing function. @@ -1825,6 +1871,8 @@ static rd_kafka_buf_t *rd_kafka_waitresp_find(rd_kafka_broker_t *rkb, /* Convert ts_sent to RTT */ rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent; rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent); + rd_avg_add(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt, + rkbuf->rkbuf_ts_sent); if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 1) @@ -2245,6 +2293,7 @@ static int rd_kafka_broker_connect(rd_kafka_broker_t *rkb) { * @locality Broker thread */ void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb) { + int features; rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight; rkb->rkb_reauth_in_progress = rd_false; @@ -2260,6 +2309,18 @@ void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb) { NULL, rkb, rd_false /*dont force*/, "connected") == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) rd_kafka_metadata_refresh_brokers(NULL, rkb, "connected"); + + if (rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_GetTelemetrySubscriptions, 0, 0, &features) != + -1 && + rkb->rkb_rk->rk_conf.enable_metrics_push) { + rd_kafka_t *rk = rkb->rkb_rk; + rd_kafka_op_t *rko = + rd_kafka_op_new(RD_KAFKA_OP_SET_TELEMETRY_BROKER); + rd_kafka_broker_keep(rkb); + rko->rko_u.telemetry_broker.rkb = rkb; + rd_kafka_q_enq(rk->rk_ops, rko); + } } @@ -2798,6 +2859,10 @@ int rd_kafka_send(rd_kafka_broker_t *rkb) { /* Add to outbuf_latency averager */ rd_avg_add(&rkb->rkb_avg_outbuf_latency, rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq); + rd_avg_add( + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency, + rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq); + if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING && rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1) @@ -4736,6 +4801,14 @@ void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb) { rd_avg_destroy(&rkb->rkb_avg_outbuf_latency); rd_avg_destroy(&rkb->rkb_avg_rtt); rd_avg_destroy(&rkb->rkb_avg_throttle); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency); mtx_lock(&rkb->rkb_logname_lock); rd_free(rkb->rkb_logname); @@ -4823,13 +4896,32 @@ rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk, rd_kafka_bufq_init(&rkb->rkb_retrybufs); rkb->rkb_ops = rd_kafka_q_new(rk); rd_avg_init(&rkb->rkb_avg_int_latency, RD_AVG_GAUGE, 0, 100 * 1000, 2, - rk->rk_conf.stats_interval_ms ? 1 : 0); + rk->rk_conf.stats_interval_ms); rd_avg_init(&rkb->rkb_avg_outbuf_latency, RD_AVG_GAUGE, 0, 100 * 1000, - 2, rk->rk_conf.stats_interval_ms ? 1 : 0); + 2, rk->rk_conf.stats_interval_ms); rd_avg_init(&rkb->rkb_avg_rtt, RD_AVG_GAUGE, 0, 500 * 1000, 2, - rk->rk_conf.stats_interval_ms ? 1 : 0); + rk->rk_conf.stats_interval_ms); rd_avg_init(&rkb->rkb_avg_throttle, RD_AVG_GAUGE, 0, 5000 * 1000, 2, - rk->rk_conf.stats_interval_ms ? 1 : 0); + rk->rk_conf.stats_interval_ms); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt, + RD_AVG_GAUGE, 0, 500 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt, + RD_AVG_GAUGE, 0, 500 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle, + RD_AVG_GAUGE, 0, 5000 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle, + RD_AVG_GAUGE, 0, 5000 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency, + RD_AVG_GAUGE, 0, 100 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency, + RD_AVG_GAUGE, 0, 100 * 1000, 2, + rk->rk_conf.enable_metrics_push); + rd_refcnt_init(&rkb->rkb_refcnt, 0); rd_kafka_broker_keep(rkb); /* rk_broker's refcount */ diff --git a/src/rdkafka_broker.h b/src/rdkafka_broker.h index 41bc3d3eaf..643e51edcd 100644 --- a/src/rdkafka_broker.h +++ b/src/rdkafka_broker.h @@ -193,6 +193,29 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */ rd_atomic64_t ts_recv; /**< Timestamp of last receive */ } rkb_c; + struct { + struct { + int32_t connects; /**< Connection attempts, + * successful or not. */ + } rkb_historic_c; + struct { + rd_avg_t rkb_avg_rtt; /* Current RTT avg */ + rd_avg_t rkb_avg_throttle; /* Current throttle avg */ + rd_avg_t + rkb_avg_outbuf_latency; /**< Current latency + * between buf_enq0 + * and writing to socket + */ + } rd_avg_current; + struct { + rd_avg_t rkb_avg_rtt; /**< Rolled over RTT avg */ + rd_avg_t + rkb_avg_throttle; /**< Rolled over throttle avg */ + rd_avg_t rkb_avg_outbuf_latency; /**< Rolled over outbuf + * latency avg */ + } rd_avg_rollover; + } rkb_telemetry; + int rkb_req_timeouts; /* Current value */ thrd_t rkb_thread; @@ -411,6 +434,13 @@ int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb, int16_t maxver, int *featuresp); +int16_t rd_kafka_broker_ApiVersion_supported0(rd_kafka_broker_t *rkb, + int16_t ApiKey, + int16_t minver, + int16_t maxver, + int *featuresp, + rd_bool_t do_lock); + rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func, int line, rd_kafka_t *rk, @@ -572,6 +602,25 @@ int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk, rd_kafka_enq_once_t *eonce); void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk); +rd_kafka_broker_t *rd_kafka_broker_random0(const char *func, + int line, + rd_kafka_t *rk, + rd_bool_t is_up, + int state, + int *filtered_cnt, + int (*filter)(rd_kafka_broker_t *rk, + void *opaque), + void *opaque); + +#define rd_kafka_broker_random(rk, state, filter, opaque) \ + rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \ + NULL, filter, opaque) + +#define rd_kafka_broker_random_up(rk, filter, opaque) \ + rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_true, \ + RD_KAFKA_BROKER_STATE_UP, NULL, filter, \ + opaque) + /** diff --git a/src/rdkafka_conf.c b/src/rdkafka_conf.c index 00013992dc..84262d56e4 100644 --- a/src/rdkafka_conf.c +++ b/src/rdkafka_conf.c @@ -90,7 +90,7 @@ struct rd_kafka_property { const char *str; const char *unsupported; /**< Reason for value not being * supported in this build. */ - } s2i[20]; /* _RK_C_S2I and _RK_C_S2F */ + } s2i[21]; /* _RK_C_S2I and _RK_C_S2F */ const char *unsupported; /**< Reason for propery not being supported * in this build. @@ -511,6 +511,7 @@ static const struct rd_kafka_property rd_kafka_properties[] = { {RD_KAFKA_DBG_MOCK, "mock"}, {RD_KAFKA_DBG_ASSIGNOR, "assignor"}, {RD_KAFKA_DBG_CONF, "conf"}, + {RD_KAFKA_DBG_TELEMETRY, "telemetry"}, {RD_KAFKA_DBG_ALL, "all"}}}, {_RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms), "Default timeout for network requests. " @@ -1486,6 +1487,11 @@ static const struct rd_kafka_property rd_kafka_properties[] = { .s2i = {{RD_KAFKA_USE_ALL_DNS_IPS, "use_all_dns_ips"}, {RD_KAFKA_RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY, "resolve_canonical_bootstrap_servers_only"}}}, + {_RK_GLOBAL, "enable.metrics.push", _RK_C_BOOL, _RK(enable_metrics_push), + "Whether to enable pushing of client metrics to the cluster, if the " + "cluster has a client metrics subscription which matches this client", + 0, 1, 1}, + /* diff --git a/src/rdkafka_conf.h b/src/rdkafka_conf.h index ccc95947a2..5c41513043 100644 --- a/src/rdkafka_conf.h +++ b/src/rdkafka_conf.h @@ -2,6 +2,7 @@ * librdkafka - Apache Kafka C library * * Copyright (c) 2014-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -353,6 +354,7 @@ struct rd_kafka_conf_s { /* Client group configuration */ int coord_query_intvl_ms; int max_poll_interval_ms; + int enable_metrics_push; int builtin_features; /* diff --git a/src/rdkafka_int.h b/src/rdkafka_int.h index fde85ab136..ac6bb004a5 100644 --- a/src/rdkafka_int.h +++ b/src/rdkafka_int.h @@ -234,7 +234,50 @@ rd_kafka_txn_state2str(rd_kafka_txn_state_t state) { return names[state]; } +/** + * @enum Telemetry States + */ +typedef enum { + /** Initial state, awaiting telemetry broker to be assigned */ + RD_KAFKA_TELEMETRY_AWAIT_BROKER, + /** Telemetry broker assigned and GetSubscriptions scheduled */ + RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED, + /** GetSubscriptions request sent to the assigned broker */ + RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SENT, + /** PushTelemetry scheduled to send */ + RD_KAFKA_TELEMETRY_PUSH_SCHEDULED, + /** PushTelemetry sent to the assigned broker */ + RD_KAFKA_TELEMETRY_PUSH_SENT, + /** Client is being terminated and last PushTelemetry is scheduled to + * send */ + RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SCHEDULED, + /** Client is being terminated and last PushTelemetry is sent */ + RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SENT, + /** Telemetry is terminated */ + RD_KAFKA_TELEMETRY_TERMINATED, +} rd_kafka_telemetry_state_t; + +static RD_UNUSED const char * +rd_kafka_telemetry_state2str(rd_kafka_telemetry_state_t state) { + static const char *names[] = {"AwaitBroker", + "GetSubscriptionsScheduled", + "GetSubscriptionsSent", + "PushScheduled", + "PushSent", + "TerminatingPushScheduled", + "TerminatingPushSent", + "Terminated"}; + return names[state]; +} + +static RD_UNUSED const char *rd_kafka_type2str(rd_kafka_type_t type) { + static const char *types[] = { + [RD_KAFKA_PRODUCER] = "producer", + [RD_KAFKA_CONSUMER] = "consumer", + }; + return types[type]; +} /** * Kafka handle, internal representation of the application's rd_kafka_t. @@ -619,6 +662,44 @@ struct rd_kafka_s { rd_kafka_q_t *callback_q; /**< SASL callback queue, if any. */ } rk_sasl; + struct { + /* Fields for the control flow - unless guarded by lock, only + * accessed from main thread. */ + /**< Current state of the telemetry state machine. */ + rd_kafka_telemetry_state_t state; + /**< Preferred broker for sending telemetry (Lock protected). */ + rd_kafka_broker_t *preferred_broker; + /**< Timer for all the requests we schedule. */ + rd_kafka_timer_t request_timer; + /**< Lock for preferred telemetry broker and state. */ + mtx_t lock; + /**< Used to wait for termination (Lock protected). */ + cnd_t termination_cnd; + + /* Fields obtained from broker as a result of GetSubscriptions - + * only accessed from main thread. + */ + rd_kafka_Uuid_t client_instance_id; + int32_t subscription_id; + rd_kafka_compression_t *accepted_compression_types; + size_t accepted_compression_types_cnt; + int32_t push_interval_ms; + int32_t telemetry_max_bytes; + rd_bool_t delta_temporality; + char **requested_metrics; + size_t requested_metrics_cnt; + /* TODO: Use rd_list_t to store the metrics */ + int *matched_metrics; + size_t matched_metrics_cnt; + + struct { + rd_ts_t ts_last; /**< Timestamp of last push */ + rd_ts_t ts_start; /**< Timestamp from when collection + * started */ + } rk_historic_c; + + } rk_telemetry; + /* Test mocks */ struct { rd_kafka_mock_cluster_t *cluster; /**< Mock cluster, created @@ -860,6 +941,7 @@ const char *rd_kafka_purge_flags2str(int flags); #define RD_KAFKA_DBG_MOCK 0x10000 #define RD_KAFKA_DBG_ASSIGNOR 0x20000 #define RD_KAFKA_DBG_CONF 0x40000 +#define RD_KAFKA_DBG_TELEMETRY 0x80000 #define RD_KAFKA_DBG_ALL 0xfffff #define RD_KAFKA_DBG_NONE 0x0 diff --git a/src/rdkafka_mock.c b/src/rdkafka_mock.c index baf7a27386..48e1b03947 100644 --- a/src/rdkafka_mock.c +++ b/src/rdkafka_mock.c @@ -46,7 +46,7 @@ typedef struct rd_kafka_mock_request_s rd_kafka_mock_request_t; static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster); static rd_kafka_mock_request_t * rd_kafka_mock_request_new(int32_t id, int16_t api_key, int64_t timestamp_us); - +static void rd_kafka_mock_request_free(void *element); static rd_kafka_mock_broker_t * rd_kafka_mock_broker_find(const rd_kafka_mock_cluster_t *mcluster, @@ -2237,6 +2237,39 @@ rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster, rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); } +rd_kafka_resp_err_t +rd_kafka_mock_telemetry_set_requested_metrics(rd_kafka_mock_cluster_t *mcluster, + char **metrics, + size_t metrics_cnt) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.hi = metrics_cnt; + rko->rko_u.mock.metrics = NULL; + if (metrics_cnt) { + size_t i; + rko->rko_u.mock.metrics = + rd_calloc(metrics_cnt, sizeof(char *)); + for (i = 0; i < metrics_cnt; i++) + rko->rko_u.mock.metrics[i] = rd_strdup(metrics[i]); + } + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_REQUESTED_METRICS_SET; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + +rd_kafka_resp_err_t +rd_kafka_mock_telemetry_set_push_interval(rd_kafka_mock_cluster_t *mcluster, + int64_t push_interval_ms) { + rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK); + + rko->rko_u.mock.hi = push_interval_ms; + rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TELEMETRY_PUSH_INTERVAL_SET; + + return rd_kafka_op_err_destroy( + rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE)); +} + /** * @brief Apply command to specific broker. @@ -2346,6 +2379,7 @@ rd_kafka_mock_cluster_cmd(rd_kafka_mock_cluster_t *mcluster, rd_kafka_mock_topic_t *mtopic; rd_kafka_mock_partition_t *mpart; rd_kafka_mock_broker_t *mrkb; + size_t i; switch (rko->rko_u.mock.cmd) { case RD_KAFKA_MOCK_CMD_TOPIC_CREATE: @@ -2476,6 +2510,22 @@ rd_kafka_mock_cluster_cmd(rd_kafka_mock_cluster_t *mcluster, .MaxVersion = (int16_t)rko->rko_u.mock.hi; break; + case RD_KAFKA_MOCK_CMD_REQUESTED_METRICS_SET: + mcluster->metrics_cnt = rko->rko_u.mock.hi; + if (!mcluster->metrics_cnt) + break; + + mcluster->metrics = + rd_calloc(mcluster->metrics_cnt, sizeof(char *)); + for (i = 0; i < mcluster->metrics_cnt; i++) + mcluster->metrics[i] = + rd_strdup(rko->rko_u.mock.metrics[i]); + break; + + case RD_KAFKA_MOCK_CMD_TELEMETRY_PUSH_INTERVAL_SET: + mcluster->telemetry_push_interval_ms = rko->rko_u.mock.hi; + break; + default: rd_assert(!*"unknown mock cmd"); break; @@ -2525,6 +2575,7 @@ static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster) { rd_kafka_mock_error_stack_t *errstack; thrd_t dummy_rkb_thread; int ret; + size_t i; while ((mtopic = TAILQ_FIRST(&mcluster->topics))) rd_kafka_mock_topic_destroy(mtopic); @@ -2545,6 +2596,8 @@ static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster) { rd_kafka_mock_error_stack_destroy(errstack); } + rd_list_destroy(&mcluster->request_list); + /* * Destroy dummy broker */ @@ -2574,6 +2627,13 @@ static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster) { rd_socket_close(mcluster->wakeup_fds[0]); rd_socket_close(mcluster->wakeup_fds[1]); + + if (mcluster->metrics) { + for (i = 0; i < mcluster->metrics_cnt; i++) { + rd_free(mcluster->metrics[i]); + } + rd_free(mcluster->metrics); + } } @@ -2647,6 +2707,8 @@ rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk, memcpy(mcluster->api_handlers, rd_kafka_mock_api_handlers, sizeof(mcluster->api_handlers)); + rd_list_init(&mcluster->request_list, 0, rd_kafka_mock_request_free); + /* Use an op queue for controlling the cluster in * a thread-safe manner without locking. */ mcluster->ops = rd_kafka_q_new(rk); @@ -2764,7 +2826,7 @@ static void rd_kafka_mock_request_free(void *element) { void rd_kafka_mock_start_request_tracking(rd_kafka_mock_cluster_t *mcluster) { mtx_lock(&mcluster->lock); mcluster->track_requests = rd_true; - rd_list_init(&mcluster->request_list, 32, rd_kafka_mock_request_free); + rd_list_clear(&mcluster->request_list); mtx_unlock(&mcluster->lock); } diff --git a/src/rdkafka_mock.h b/src/rdkafka_mock.h index 737b768339..e13d7d5e9e 100644 --- a/src/rdkafka_mock.h +++ b/src/rdkafka_mock.h @@ -68,6 +68,7 @@ extern "C" { * - Low-level consumer * - High-level balanced consumer groups with offset commits * - Topic Metadata and auto creation + * - Telemetry (KIP-714) * * @remark This is an experimental public API that is NOT covered by the * librdkafka API or ABI stability guarantees. @@ -447,6 +448,32 @@ rd_kafka_mock_get_requests(rd_kafka_mock_cluster_t *mcluster, size_t *cntp); */ RD_EXPORT void rd_kafka_mock_clear_requests(rd_kafka_mock_cluster_t *mcluster); +/** + * @brief Set the metrics that are expected by the broker for telemetry + * collection. + * + * @param metrics List of prefixes of metric names or NULL. + * @param metrics_cnt + * + * @note if \p metrics is NULL, no metrics will be expected by the broker. If + * the first elements of \p metrics is an empty string, that indicates the + * broker expects all metrics. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_telemetry_set_requested_metrics(rd_kafka_mock_cluster_t *mcluster, + char **metrics, + size_t metrics_cnt); + + +/** + * @brief Set push frequency to be sent to the client for telemetry collection. + * when the broker receives GetTelemetrySubscription requests. + * + * @param push_interval_ms time for push in milliseconds. Must be more than 0. + */ +RD_EXPORT rd_kafka_resp_err_t +rd_kafka_mock_telemetry_set_push_interval(rd_kafka_mock_cluster_t *mcluster, + int64_t push_interval_ms); /**@}*/ #ifdef __cplusplus diff --git a/src/rdkafka_mock_handlers.c b/src/rdkafka_mock_handlers.c index 2f75eb50f2..45626b5381 100644 --- a/src/rdkafka_mock_handlers.c +++ b/src/rdkafka_mock_handlers.c @@ -39,6 +39,7 @@ #include "rdkafka_mock_int.h" #include "rdkafka_transport_int.h" #include "rdkafka_offset.h" +#include "rdkafka_telemetry_decode.h" @@ -2488,6 +2489,215 @@ rd_kafka_mock_handle_OffsetForLeaderEpoch(rd_kafka_mock_connection_t *mconn, return -1; } +/** + * @brief Handle GetTelemetrySubscriptions + */ +static int rd_kafka_mock_handle_GetTelemetrySubscriptions( + rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_resp_err_t err; + size_t i; + rd_kafka_Uuid_t ClientInstanceId; + rd_kafka_Uuid_t zero_uuid = RD_KAFKA_UUID_ZERO; + + /* Request: ClientInstanceId */ + rd_kafka_buf_read_uuid(rkbuf, &ClientInstanceId); + if (ClientInstanceId.least_significant_bits == + zero_uuid.least_significant_bits && + ClientInstanceId.most_significant_bits == + zero_uuid.most_significant_bits) { + /* Some random numbers */ + ClientInstanceId.least_significant_bits = 129; + ClientInstanceId.most_significant_bits = 298; + } + + /* Response: ThrottleTimeMs */ + rd_kafka_buf_write_i32(resp, 0); + + /* Inject error */ + err = rd_kafka_mock_next_request_error(mconn, resp); + + /* Response: ErrorCode */ + rd_kafka_buf_write_i16(resp, err); + + /* Response: ClientInstanceId*/ + rd_kafka_buf_write_uuid(resp, &ClientInstanceId); + + /* Response: SubscriptionId */ + // TODO: Calculate subscription ID. + rd_kafka_buf_write_i32(resp, 0); + + /* Response: #AcceptedCompressionTypes */ + rd_kafka_buf_write_arraycnt(resp, 4); + + /* Response: AcceptedCompressionTypes */ + rd_kafka_buf_write_i8(resp, RD_KAFKA_COMPRESSION_ZSTD); + rd_kafka_buf_write_i8(resp, RD_KAFKA_COMPRESSION_LZ4); + rd_kafka_buf_write_i8(resp, RD_KAFKA_COMPRESSION_GZIP); + rd_kafka_buf_write_i8(resp, RD_KAFKA_COMPRESSION_SNAPPY); + + /* Response: PushIntervalMs */ + /* We use the value in telemetry_push_interval_ms, and if not set, the + * default of 5 minutes. */ + rd_kafka_buf_write_i32(resp, mcluster->telemetry_push_interval_ms > 0 + ? mcluster->telemetry_push_interval_ms + : (5 * 60 * 1000)); + + /* Response: TelemetryMaxBytes */ + rd_kafka_buf_write_i32(resp, 10000); + + /* Response: DeltaTemporality */ + rd_kafka_buf_write_bool(resp, rd_true); + + /* Response: #RequestedMetrics */ + rd_kafka_buf_write_arraycnt(resp, mcluster->metrics_cnt); + + for (i = 0; i < mcluster->metrics_cnt; i++) + rd_kafka_buf_write_str(resp, mcluster->metrics[i], -1); + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} + +/** + * @brief Handle PushTelemetry + */ + +static void rd_kafka_mock_handle_PushTelemetry_decoded_NumberDataPoint( + void *opaque, + const opentelemetry_proto_metrics_v1_NumberDataPoint *decoded) { + rd_kafka_broker_t *rkb = opaque; + if (decoded->which_value == + opentelemetry_proto_metrics_v1_NumberDataPoint_as_int_tag) + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", + "NumberDataPoint int value: %" PRId64 + " time: %" PRIu64, + decoded->value.as_int, decoded->time_unix_nano); + else if (decoded->which_value == + opentelemetry_proto_metrics_v1_NumberDataPoint_as_double_tag) + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", + "NumberDataPoint double value: %f time: %" PRIu64, + decoded->value.as_double, decoded->time_unix_nano); +} + +static void +rd_kafka_mock_handle_PushTelemetry_decoded_int64(void *opaque, + int64_t int64_value) { + rd_kafka_broker_t *rkb = opaque; + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", "int64 value: %" PRId64, + int64_value); +} + +static void +rd_kafka_mock_handle_PushTelemetry_decoded_string(void *opaque, + const uint8_t *decoded) { + rd_kafka_broker_t *rkb = opaque; + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", "string value: %s", decoded); +} + +static void rd_kafka_mock_handle_PushTelemetry_decoded_type( + void *opaque, + rd_kafka_telemetry_metric_type_t type) { + rd_kafka_broker_t *rkb = opaque; + rd_rkb_log(rkb, LOG_INFO, "MOCKTELEMETRY", "Metric type: %d", type); +} + +static void rd_kafka_mock_handle_PushTelemetry_decode_error(void *opaque, + const char *error, + ...) { + rd_kafka_broker_t *rkb = opaque; + va_list ap; + va_start(ap, error); + rd_rkb_log(rkb, LOG_ERR, "MOCKTELEMETRY", error, ap); + va_end(ap); + rd_assert(!*"Failure while decoding telemetry data"); +} + +void rd_kafka_mock_handle_PushTelemetry_payload(rd_kafka_broker_t *rkb, + void *payload, + size_t size) { + rd_kafka_telemetry_decode_interface_t decode_interface = { + .decoded_string = rd_kafka_mock_handle_PushTelemetry_decoded_string, + .decoded_NumberDataPoint = + rd_kafka_mock_handle_PushTelemetry_decoded_NumberDataPoint, + .decoded_int64 = rd_kafka_mock_handle_PushTelemetry_decoded_int64, + .decoded_type = rd_kafka_mock_handle_PushTelemetry_decoded_type, + .decode_error = rd_kafka_mock_handle_PushTelemetry_decode_error, + .opaque = rkb, + }; + rd_kafka_telemetry_decode_metrics(&decode_interface, payload, size); +} + +static int rd_kafka_mock_handle_PushTelemetry(rd_kafka_mock_connection_t *mconn, + rd_kafka_buf_t *rkbuf) { + rd_kafka_broker_t *rkb = mconn->broker->cluster->dummy_rkb; + const rd_bool_t log_decode_errors = rd_true; + rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster; + rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf); + rd_kafka_Uuid_t ClientInstanceId; + int32_t SubscriptionId; + rd_bool_t terminating; + rd_kafka_compression_t compression_type = RD_KAFKA_COMPRESSION_NONE; + rd_kafkap_bytes_t metrics; + rd_kafka_resp_err_t err; + + rd_kafka_buf_read_uuid(rkbuf, &ClientInstanceId); + rd_kafka_buf_read_i32(rkbuf, &SubscriptionId); + rd_kafka_buf_read_bool(rkbuf, &terminating); + rd_kafka_buf_read_i8(rkbuf, &compression_type); + rd_kafka_buf_read_kbytes(rkbuf, &metrics); + + void *uncompressed_payload = NULL; + size_t uncompressed_payload_len = 0; + + if (compression_type != RD_KAFKA_COMPRESSION_NONE) { + rd_rkb_log(rkb, LOG_DEBUG, "MOCKTELEMETRY", + "Compression type %s", + rd_kafka_compression2str(compression_type)); + int err_uncompress = + rd_kafka_telemetry_uncompress_metrics_payload( + rkb, compression_type, (void *)metrics.data, + metrics.len, &uncompressed_payload, + &uncompressed_payload_len); + if (err_uncompress) { + rd_kafka_dbg(mcluster->rk, MOCK, "MOCKTELEMETRY", + "Failed to uncompress " + "telemetry payload."); + goto err_parse; + } + } else { + uncompressed_payload = (void *)metrics.data; + uncompressed_payload_len = metrics.len; + } + + rd_kafka_mock_handle_PushTelemetry_payload(rkb, uncompressed_payload, + uncompressed_payload_len); + if (compression_type != RD_KAFKA_COMPRESSION_NONE) + rd_free(uncompressed_payload); + + /* ThrottleTime */ + rd_kafka_buf_write_i32(resp, 0); + + /* ErrorCode */ + err = rd_kafka_mock_next_request_error(mconn, resp); + rd_kafka_buf_write_i16(resp, err); + + rd_kafka_mock_connection_send_response(mconn, resp); + + return 0; + +err_parse: + rd_kafka_buf_destroy(resp); + return -1; +} /** * @brief Default request handlers @@ -2519,6 +2729,10 @@ const struct rd_kafka_mock_api_handler [RD_KAFKAP_EndTxn] = {0, 1, -1, rd_kafka_mock_handle_EndTxn}, [RD_KAFKAP_OffsetForLeaderEpoch] = {2, 2, -1, rd_kafka_mock_handle_OffsetForLeaderEpoch}, + [RD_KAFKAP_GetTelemetrySubscriptions] = + {0, 0, 0, rd_kafka_mock_handle_GetTelemetrySubscriptions}, + [RD_KAFKAP_PushTelemetry] = {0, 0, 0, + rd_kafka_mock_handle_PushTelemetry}, }; diff --git a/src/rdkafka_mock_int.h b/src/rdkafka_mock_int.h index d8c776d55a..b1560f4214 100644 --- a/src/rdkafka_mock_int.h +++ b/src/rdkafka_mock_int.h @@ -408,6 +408,15 @@ struct rd_kafka_mock_cluster_s { /**< Request handlers */ struct rd_kafka_mock_api_handler api_handlers[RD_KAFKAP__NUM]; + /** Requested metrics. */ + char **metrics; + + /** Requested metric count. */ + size_t metrics_cnt; + + /** Telemetry push interval ms. Default is 5 min */ + int64_t telemetry_push_interval_ms; + /**< Appends the requests received to mock cluster if set to true, * defaulted to false for less memory usage. */ rd_bool_t track_requests; @@ -570,8 +579,6 @@ rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster, const rd_kafkap_str_t *ProtocolType); void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster, rd_kafka_mock_connection_t *mconn); - - /** *@} */ diff --git a/src/rdkafka_msgset.h b/src/rdkafka_msgset.h index 9336e0c6b3..ee897b35bd 100644 --- a/src/rdkafka_msgset.h +++ b/src/rdkafka_msgset.h @@ -2,6 +2,7 @@ * librdkafka - Apache Kafka C library * * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -77,6 +78,21 @@ rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf, rd_kafka_aborted_txns_t *aborted_txns, const struct rd_kafka_toppar_ver *tver); +#if WITH_ZLIB +rd_kafka_resp_err_t rd_kafka_gzip_compress(rd_kafka_broker_t *rkb, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp); +#endif + +#if WITH_SNAPPY +rd_kafka_resp_err_t rd_kafka_snappy_compress_slice(rd_kafka_broker_t *rkb, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp); +#endif + int unittest_aborted_txns(void); #endif /* _RDKAFKA_MSGSET_H_ */ diff --git a/src/rdkafka_msgset_writer.c b/src/rdkafka_msgset_writer.c index fbe16a3240..6f71d827f2 100644 --- a/src/rdkafka_msgset_writer.c +++ b/src/rdkafka_msgset_writer.c @@ -2,6 +2,7 @@ * librdkafka - Apache Kafka C library * * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -945,21 +946,18 @@ static int rd_kafka_msgset_writer_write_msgq(rd_kafka_msgset_writer_t *msetw, #if WITH_ZLIB /** - * @brief Compress messageset using gzip/zlib + * @brief Compress slice using gzip/zlib */ -static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, - struct iovec *ciov) { - - rd_kafka_broker_t *rkb = msetw->msetw_rkb; - rd_kafka_toppar_t *rktp = msetw->msetw_rktp; +rd_kafka_resp_err_t rd_kafka_gzip_compress(rd_kafka_broker_t *rkb, + int comp_level, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp) { z_stream strm; size_t len = rd_slice_remains(slice); const void *p; size_t rlen; int r; - int comp_level = - msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; memset(&strm, 0, sizeof(strm)); r = deflateInit2(&strm, comp_level, Z_DEFLATED, 15 + 16, 8, @@ -968,23 +966,21 @@ static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw, rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to initialize gzip for " "compressing %" PRIusz - " bytes in " - "topic %.*s [%" PRId32 - "]: %s (%i): " + " bytes: " + "%s (%i): " "sending uncompressed", - len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, strm.msg ? strm.msg : "", r); - return -1; + len, strm.msg ? strm.msg : "", r); + return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } /* Calculate maximum compressed size and * allocate an output buffer accordingly, being * prefixed with the Message header. */ - ciov->iov_len = deflateBound(&strm, (uLong)rd_slice_remains(slice)); - ciov->iov_base = rd_malloc(ciov->iov_len); + *outlenp = deflateBound(&strm, (uLong)rd_slice_remains(slice)); + *outbuf = rd_malloc(*outlenp); - strm.next_out = (void *)ciov->iov_base; - strm.avail_out = (uInt)ciov->iov_len; + strm.next_out = *outbuf; + strm.avail_out = (uInt)*outlenp; /* Iterate through each segment and compress it. */ while ((rlen = rd_slice_reader(slice, &p))) { @@ -997,18 +993,14 @@ static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw, rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to gzip-compress " "%" PRIusz " bytes (%" PRIusz - " total) for " - "topic %.*s [%" PRId32 - "]: " + " total): " "%s (%i): " "sending uncompressed", - rlen, len, - RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, - strm.msg ? strm.msg : "", r); + rlen, len, strm.msg ? strm.msg : "", r); deflateEnd(&strm); - rd_free(ciov->iov_base); - return -1; + rd_free(*outbuf); + *outbuf = NULL; + return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } rd_kafka_assert(rkb->rkb_rk, strm.avail_in == 0); @@ -1019,51 +1011,62 @@ static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw, rd_rkb_log(rkb, LOG_ERR, "GZIP", "Failed to finish gzip compression " " of %" PRIusz - " bytes for " - "topic %.*s [%" PRId32 - "]: " + " bytes: " "%s (%i): " "sending uncompressed", - len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, strm.msg ? strm.msg : "", r); + len, strm.msg ? strm.msg : "", r); deflateEnd(&strm); - rd_free(ciov->iov_base); - return -1; + rd_free(*outbuf); + *outbuf = NULL; + return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } - ciov->iov_len = strm.total_out; + *outlenp = strm.total_out; /* Deinitialize compression */ deflateEnd(&strm); - return 0; + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Compress messageset using gzip/zlib + */ +static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { + rd_kafka_resp_err_t err; + int comp_level = + msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level; + err = rd_kafka_gzip_compress(msetw->msetw_rkb, comp_level, slice, + &ciov->iov_base, &ciov->iov_len); + return (err ? -1 : 0); } #endif #if WITH_SNAPPY /** - * @brief Compress messageset using Snappy + * @brief Compress slice using Snappy */ -static int -rd_kafka_msgset_writer_compress_snappy(rd_kafka_msgset_writer_t *msetw, - rd_slice_t *slice, - struct iovec *ciov) { - rd_kafka_broker_t *rkb = msetw->msetw_rkb; - rd_kafka_toppar_t *rktp = msetw->msetw_rktp; +rd_kafka_resp_err_t rd_kafka_snappy_compress_slice(rd_kafka_broker_t *rkb, + rd_slice_t *slice, + void **outbuf, + size_t *outlenp) { struct iovec *iov; size_t iov_max, iov_cnt; struct snappy_env senv; size_t len = rd_slice_remains(slice); int r; + struct iovec ciov; /* Initialize snappy compression environment */ rd_kafka_snappy_init_env_sg(&senv, 1 /*iov enable*/); /* Calculate maximum compressed size and * allocate an output buffer accordingly. */ - ciov->iov_len = rd_kafka_snappy_max_compressed_length(len); - ciov->iov_base = rd_malloc(ciov->iov_len); + ciov.iov_len = rd_kafka_snappy_max_compressed_length(len); + ciov.iov_base = rd_malloc(ciov.iov_len); iov_max = slice->buf->rbuf_segment_cnt; iov = rd_alloca(sizeof(*iov) * iov_max); @@ -1072,24 +1075,37 @@ rd_kafka_msgset_writer_compress_snappy(rd_kafka_msgset_writer_t *msetw, /* Compress each message */ if ((r = rd_kafka_snappy_compress_iov(&senv, iov, iov_cnt, len, - ciov)) != 0) { + &ciov)) != 0) { rd_rkb_log(rkb, LOG_ERR, "SNAPPY", "Failed to snappy-compress " "%" PRIusz - " bytes for " - "topic %.*s [%" PRId32 - "]: %s: " + " bytes: %s:" "sending uncompressed", - len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), - rktp->rktp_partition, rd_strerror(-r)); - rd_free(ciov->iov_base); - return -1; + len, rd_strerror(-r)); + rd_free(ciov.iov_base); + return RD_KAFKA_RESP_ERR__BAD_COMPRESSION; } /* rd_free snappy environment */ rd_kafka_snappy_free_env(&senv); - return 0; + *outbuf = ciov.iov_base; + *outlenp = ciov.iov_len; + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +/** + * @brief Compress messageset using Snappy + */ +static int +rd_kafka_msgset_writer_compress_snappy(rd_kafka_msgset_writer_t *msetw, + rd_slice_t *slice, + struct iovec *ciov) { + rd_kafka_resp_err_t err; + err = rd_kafka_snappy_compress_slice(msetw->msetw_rkb, slice, + &ciov->iov_base, &ciov->iov_len); + return (err ? -1 : 0); } #endif diff --git a/src/rdkafka_op.c b/src/rdkafka_op.c index 0955f9175c..5c2e3023f1 100644 --- a/src/rdkafka_op.c +++ b/src/rdkafka_op.c @@ -118,6 +118,11 @@ const char *rd_kafka_op2str(rd_kafka_op_type_t type) { "REPLY:DESCRIBEUSERSCRAMCREDENTIALS", [RD_KAFKA_OP_LISTOFFSETS] = "REPLY:LISTOFFSETS", [RD_KAFKA_OP_METADATA_UPDATE] = "REPLY:METADATA_UPDATE", + [RD_KAFKA_OP_SET_TELEMETRY_BROKER] = + "REPLY:RD_KAFKA_OP_SET_TELEMETRY_BROKER", + [RD_KAFKA_OP_TERMINATE_TELEMETRY] = + "REPLY:RD_KAFKA_OP_TERMINATE_TELEMETRY", + }; if (type & RD_KAFKA_OP_REPLY) @@ -278,6 +283,9 @@ rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type) { sizeof(rko->rko_u.admin_request), [RD_KAFKA_OP_LISTOFFSETS] = sizeof(rko->rko_u.admin_request), [RD_KAFKA_OP_METADATA_UPDATE] = sizeof(rko->rko_u.metadata), + [RD_KAFKA_OP_SET_TELEMETRY_BROKER] = + sizeof(rko->rko_u.telemetry_broker), + [RD_KAFKA_OP_TERMINATE_TELEMETRY] = _RD_KAFKA_OP_EMPTY, }; size_t tsize = op2size[type & ~RD_KAFKA_OP_FLAGMASK]; @@ -453,6 +461,12 @@ void rd_kafka_op_destroy(rd_kafka_op_t *rko) { case RD_KAFKA_OP_MOCK: RD_IF_FREE(rko->rko_u.mock.name, rd_free); RD_IF_FREE(rko->rko_u.mock.str, rd_free); + if (rko->rko_u.mock.metrics) { + int64_t i; + for (i = 0; i < rko->rko_u.mock.hi; i++) + rd_free(rko->rko_u.mock.metrics[i]); + rd_free(rko->rko_u.mock.metrics); + } break; case RD_KAFKA_OP_BROKER_MONITOR: @@ -481,6 +495,11 @@ void rd_kafka_op_destroy(rd_kafka_op_t *rko) { are the in the same memory allocation. */ break; + case RD_KAFKA_OP_SET_TELEMETRY_BROKER: + RD_IF_FREE(rko->rko_u.telemetry_broker.rkb, + rd_kafka_broker_destroy); + break; + default: break; } @@ -839,8 +858,11 @@ void rd_kafka_op_throttle_time(rd_kafka_broker_t *rkb, int throttle_time) { rd_kafka_op_t *rko; - if (unlikely(throttle_time > 0)) + if (unlikely(throttle_time > 0)) { rd_avg_add(&rkb->rkb_avg_throttle, throttle_time); + rd_avg_add(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle, + throttle_time); + } /* We send throttle events when: * - throttle_time > 0 diff --git a/src/rdkafka_op.h b/src/rdkafka_op.h index 135c77e058..d79121e57b 100644 --- a/src/rdkafka_op.h +++ b/src/rdkafka_op.h @@ -181,6 +181,10 @@ typedef enum { u.admin_request >*/ RD_KAFKA_OP_LISTOFFSETS, /**< Admin: ListOffsets u.admin_request >*/ RD_KAFKA_OP_METADATA_UPDATE, /**< Metadata update (KIP 951) **/ + RD_KAFKA_OP_SET_TELEMETRY_BROKER, /**< Set preferred broker for + telemetry. */ + RD_KAFKA_OP_TERMINATE_TELEMETRY, /**< Start termination sequence for + telemetry. */ RD_KAFKA_OP__END } rd_kafka_op_type_t; @@ -573,6 +577,8 @@ struct rd_kafka_op_s { RD_KAFKA_MOCK_CMD_BROKER_SET_RACK, RD_KAFKA_MOCK_CMD_COORD_SET, RD_KAFKA_MOCK_CMD_APIVERSION_SET, + RD_KAFKA_MOCK_CMD_REQUESTED_METRICS_SET, + RD_KAFKA_MOCK_CMD_TELEMETRY_PUSH_INTERVAL_SET, } cmd; rd_kafka_resp_err_t err; /**< Error for: @@ -612,6 +618,8 @@ struct rd_kafka_op_s { * TOPIC_CREATE (repl fact) * PART_SET_FOLLOWER_WMARKS * APIVERSION_SET (maxver) + * REQUESTED_METRICS_SET (metrics_cnt) + * TELEMETRY_PUSH_INTERVAL_SET (interval) */ int32_t leader_id; /**< Leader id, for: * PART_PUSH_LEADER_RESPONSE @@ -619,6 +627,8 @@ struct rd_kafka_op_s { int32_t leader_epoch; /**< Leader epoch, for: * PART_PUSH_LEADER_RESPONSE */ + char **metrics; /**< Metrics requested, for: + * REQUESTED_METRICS_SET */ } mock; struct { @@ -681,6 +691,11 @@ struct rd_kafka_op_s { } leaders; + struct { + /** Preferred broker for telemetry. */ + rd_kafka_broker_t *rkb; + } telemetry_broker; + } rko_u; }; diff --git a/src/rdkafka_proto.h b/src/rdkafka_proto.h index 686e9c7b62..895e338c83 100644 --- a/src/rdkafka_proto.h +++ b/src/rdkafka_proto.h @@ -156,22 +156,25 @@ static RD_UNUSED const char *rd_kafka_ApiKey2str(int16_t ApiKey) { "DescribeUserScramCredentialsRequest", [RD_KAFKAP_AlterUserScramCredentials] = "AlterUserScramCredentialsRequest", - [RD_KAFKAP_Vote] = "VoteRequest", - [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest", - [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest", - [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest", - [RD_KAFKAP_AlterIsr] = "AlterIsrRequest", - [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest", - [RD_KAFKAP_Envelope] = "EnvelopeRequest", - [RD_KAFKAP_FetchSnapshot] = "FetchSnapshot", - [RD_KAFKAP_DescribeCluster] = "DescribeCluster", - [RD_KAFKAP_DescribeProducers] = "DescribeProducers", - [RD_KAFKAP_BrokerHeartbeat] = "BrokerHeartbeat", - [RD_KAFKAP_UnregisterBroker] = "UnregisterBroker", - [RD_KAFKAP_DescribeTransactions] = "DescribeTransactions", - [RD_KAFKAP_ListTransactions] = "ListTransactions", - [RD_KAFKAP_AllocateProducerIds] = "AllocateProducerIds", - [RD_KAFKAP_ConsumerGroupHeartbeat] = "ConsumerGroupHeartbeat", + [RD_KAFKAP_Vote] = "VoteRequest", + [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest", + [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest", + [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest", + [RD_KAFKAP_AlterIsr] = "AlterIsrRequest", + [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest", + [RD_KAFKAP_Envelope] = "EnvelopeRequest", + [RD_KAFKAP_FetchSnapshot] = "FetchSnapshot", + [RD_KAFKAP_DescribeCluster] = "DescribeCluster", + [RD_KAFKAP_DescribeProducers] = "DescribeProducers", + [RD_KAFKAP_BrokerHeartbeat] = "BrokerHeartbeat", + [RD_KAFKAP_UnregisterBroker] = "UnregisterBroker", + [RD_KAFKAP_DescribeTransactions] = "DescribeTransactions", + [RD_KAFKAP_ListTransactions] = "ListTransactions", + [RD_KAFKAP_AllocateProducerIds] = "AllocateProducerIds", + [RD_KAFKAP_ConsumerGroupHeartbeat] = "ConsumerGroupHeartbeat", + [RD_KAFKAP_GetTelemetrySubscriptions] = "GetTelemetrySubscriptions", + [RD_KAFKAP_PushTelemetry] = "PushTelemetry", + }; static RD_TLS char ret[64]; diff --git a/src/rdkafka_protocol.h b/src/rdkafka_protocol.h index 5ca902ddaa..4755494d0b 100644 --- a/src/rdkafka_protocol.h +++ b/src/rdkafka_protocol.h @@ -2,6 +2,7 @@ * librdkafka - The Apache Kafka C/C++ library * * Copyright (c) 2020-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -114,8 +115,13 @@ #define RD_KAFKAP_ListTransactions 66 #define RD_KAFKAP_AllocateProducerIds 67 #define RD_KAFKAP_ConsumerGroupHeartbeat 68 +#define RD_KAFKAP_ConsumerGroupDescribe 69 +#define RD_KAFKAP_ControllerRegistration 70 +#define RD_KAFKAP_GetTelemetrySubscriptions 71 +#define RD_KAFKAP_PushTelemetry 72 +#define RD_KAFKAP_AssignReplicasToDirs 73 -#define RD_KAFKAP__NUM 69 +#define RD_KAFKAP__NUM 74 #endif /* _RDKAFKA_PROTOCOL_H_ */ diff --git a/src/rdkafka_request.c b/src/rdkafka_request.c index 710cc727de..8623be97d3 100644 --- a/src/rdkafka_request.c +++ b/src/rdkafka_request.c @@ -36,6 +36,7 @@ #include "rdkafka_topic.h" #include "rdkafka_partition.h" #include "rdkafka_metadata.h" +#include "rdkafka_telemetry.h" #include "rdkafka_msgset.h" #include "rdkafka_idempotence.h" #include "rdkafka_txnmgr.h" @@ -6202,6 +6203,237 @@ rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb, return RD_KAFKA_RESP_ERR_NO_ERROR; } +rd_kafka_resp_err_t +rd_kafka_GetTelemetrySubscriptionsRequest(rd_kafka_broker_t *rkb, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_GetTelemetrySubscriptions, 0, 0, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "GetTelemetrySubscriptions (KIP-714) not supported " + "by broker, requires broker version >= 3.X.Y"); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + rkbuf = rd_kafka_buf_new_flexver_request( + rkb, RD_KAFKAP_GetTelemetrySubscriptions, 1, + 16 /* client_instance_id */, rd_true); + + rd_kafka_buf_write_uuid(rkbuf, + &rkb->rkb_rk->rk_telemetry.client_instance_id); + + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +rd_kafka_resp_err_t +rd_kafka_PushTelemetryRequest(rd_kafka_broker_t *rkb, + rd_kafka_Uuid_t *client_instance_id, + int32_t subscription_id, + rd_bool_t terminating, + const rd_kafka_compression_t compression_type, + const void *metrics, + size_t metrics_size, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque) { + rd_kafka_buf_t *rkbuf; + int16_t ApiVersion = 0; + + ApiVersion = rd_kafka_broker_ApiVersion_supported( + rkb, RD_KAFKAP_PushTelemetry, 0, 0, NULL); + if (ApiVersion == -1) { + rd_snprintf(errstr, errstr_size, + "PushTelemetryRequest (KIP-714) not supported "); + rd_kafka_replyq_destroy(&replyq); + return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE; + } + + size_t len = sizeof(rd_kafka_Uuid_t) + sizeof(int32_t) + + sizeof(rd_bool_t) + sizeof(compression_type) + + metrics_size; + rkbuf = rd_kafka_buf_new_flexver_request(rkb, RD_KAFKAP_PushTelemetry, + 1, len, rd_true); + + rd_kafka_buf_write_uuid(rkbuf, client_instance_id); + rd_kafka_buf_write_i32(rkbuf, subscription_id); + rd_kafka_buf_write_bool(rkbuf, terminating); + rd_kafka_buf_write_i8(rkbuf, compression_type); + + rd_kafkap_bytes_t *metric_bytes = + rd_kafkap_bytes_new(metrics, metrics_size); + rd_kafka_buf_write_kbytes(rkbuf, metric_bytes); + rd_free(metric_bytes); + + rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES; + + + /* Processing... */ + rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque); + + return RD_KAFKA_RESP_ERR_NO_ERROR; +} + +void rd_kafka_handle_GetTelemetrySubscriptions(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + int16_t ErrorCode = 0; + const int log_decode_errors = LOG_ERR; + int32_t arraycnt; + size_t i; + rd_kafka_Uuid_t prev_client_instance_id = + rk->rk_telemetry.client_instance_id; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) { + /* Termination */ + return; + } + + if (err) + goto err; + + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + + if (ErrorCode) { + err = ErrorCode; + goto err; + } + + rd_kafka_buf_read_uuid(rkbuf, &rk->rk_telemetry.client_instance_id); + rd_kafka_buf_read_i32(rkbuf, &rk->rk_telemetry.subscription_id); + + rd_kafka_dbg( + rk, TELEMETRY, "GETSUBSCRIPTIONS", "Parsing: client instance id %s", + rd_kafka_Uuid_base64str(&rk->rk_telemetry.client_instance_id)); + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Parsing: subscription id %" PRId32, + rk->rk_telemetry.subscription_id); + + rd_kafka_buf_read_arraycnt(rkbuf, &arraycnt, -1); + + if (arraycnt) { + rk->rk_telemetry.accepted_compression_types_cnt = arraycnt; + rk->rk_telemetry.accepted_compression_types = + rd_calloc(arraycnt, sizeof(rd_kafka_compression_t)); + + for (i = 0; i < (size_t)arraycnt; i++) + rd_kafka_buf_read_i8( + rkbuf, + &rk->rk_telemetry.accepted_compression_types[i]); + } else { + rk->rk_telemetry.accepted_compression_types_cnt = 1; + rk->rk_telemetry.accepted_compression_types = + rd_calloc(1, sizeof(rd_kafka_compression_t)); + rk->rk_telemetry.accepted_compression_types[0] = + RD_KAFKA_COMPRESSION_NONE; + } + + rd_kafka_buf_read_i32(rkbuf, &rk->rk_telemetry.push_interval_ms); + rd_kafka_buf_read_i32(rkbuf, &rk->rk_telemetry.telemetry_max_bytes); + rd_kafka_buf_read_bool(rkbuf, &rk->rk_telemetry.delta_temporality); + + + if (rk->rk_telemetry.subscription_id && + rd_kafka_Uuid_cmp(prev_client_instance_id, + rk->rk_telemetry.client_instance_id)) { + rd_kafka_log( + rk, LOG_INFO, "GETSUBSCRIPTIONS", + "Telemetry client instance id changed from %s to %s", + rd_kafka_Uuid_base64str(&prev_client_instance_id), + rd_kafka_Uuid_base64str( + &rk->rk_telemetry.client_instance_id)); + } + + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Parsing: push interval %" PRId32, + rk->rk_telemetry.push_interval_ms); + + rd_kafka_buf_read_arraycnt(rkbuf, &arraycnt, 1000); + + if (arraycnt) { + rk->rk_telemetry.requested_metrics_cnt = arraycnt; + rk->rk_telemetry.requested_metrics = + rd_calloc(arraycnt, sizeof(char *)); + + for (i = 0; i < (size_t)arraycnt; i++) { + rd_kafkap_str_t Metric; + rd_kafka_buf_read_str(rkbuf, &Metric); + rk->rk_telemetry.requested_metrics[i] = + rd_strdup(Metric.str); + } + } + + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Parsing: requested metrics count %" PRIusz, + rk->rk_telemetry.requested_metrics_cnt); + + rd_kafka_handle_get_telemetry_subscriptions(rk, err); + return; + +err_parse: + err = rkbuf->rkbuf_err; + goto err; + +err: + /* TODO: Add error handling actions, possibly call + * rd_kafka_handle_get_telemetry_subscriptions with error. */ + rd_kafka_handle_get_telemetry_subscriptions(rk, err); +} + +void rd_kafka_handle_PushTelemetry(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque) { + const int log_decode_errors = LOG_ERR; + int16_t ErrorCode; + + if (err == RD_KAFKA_RESP_ERR__DESTROY) { + /* Termination */ + return; + } + + if (err) + goto err; + + + rd_kafka_buf_read_throttle_time(rkbuf); + + rd_kafka_buf_read_i16(rkbuf, &ErrorCode); + + if (ErrorCode) { + err = ErrorCode; + goto err; + } + rd_kafka_handle_push_telemetry(rk, err); + return; +err_parse: + err = rkbuf->rkbuf_err; + goto err; + +err: + /* TODO: Add error handling actions, possibly call + * rd_kafka_handle_push_telemetry with error. */ + rd_kafka_handle_push_telemetry(rk, err); +} + /** @@ -6395,7 +6627,8 @@ static int unittest_idempotent_producer(void) { "Expected %d messages in retry queue, not %d", retry_msg_cnt, rd_kafka_msgq_len(&rkmq)); - /* Sleep a short while to make sure the retry backoff expires. */ + /* Sleep a short while to make sure the retry backoff expires. + */ rd_usleep(5 * 1000, NULL); /* 5ms */ /* @@ -6453,7 +6686,8 @@ static int unittest_idempotent_producer(void) { r = rd_kafka_outq_len(rk); RD_UT_ASSERT(r == 0, "expected outq to return 0, not %d", r); - /* Verify the expected number of good delivery reports were seen */ + /* Verify the expected number of good delivery reports were seen + */ RD_UT_ASSERT(drcnt == msgcnt, "expected %d DRs, not %d", msgcnt, drcnt); rd_kafka_Produce_result_destroy(result); diff --git a/src/rdkafka_request.h b/src/rdkafka_request.h index 4da4979816..b291a324a3 100644 --- a/src/rdkafka_request.h +++ b/src/rdkafka_request.h @@ -657,4 +657,41 @@ void rd_kafkap_leader_discovery_set_CurrentLeader( int32_t partition_id, rd_kafkap_CurrentLeader_t *CurrentLeader); +rd_kafka_resp_err_t +rd_kafka_GetTelemetrySubscriptionsRequest(rd_kafka_broker_t *rkb, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +rd_kafka_resp_err_t +rd_kafka_PushTelemetryRequest(rd_kafka_broker_t *rkb, + rd_kafka_Uuid_t *client_instance_id, + int32_t subscription_id, + rd_bool_t terminating, + rd_kafka_compression_t compression_type, + const void *metrics, + size_t metrics_size, + char *errstr, + size_t errstr_size, + rd_kafka_replyq_t replyq, + rd_kafka_resp_cb_t *resp_cb, + void *opaque); + +void rd_kafka_handle_GetTelemetrySubscriptions(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); + +void rd_kafka_handle_PushTelemetry(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_kafka_resp_err_t err, + rd_kafka_buf_t *rkbuf, + rd_kafka_buf_t *request, + void *opaque); + + #endif /* _RDKAFKA_REQUEST_H_ */ diff --git a/src/rdkafka_telemetry.c b/src/rdkafka_telemetry.c new file mode 100644 index 0000000000..3f2fece177 --- /dev/null +++ b/src/rdkafka_telemetry.c @@ -0,0 +1,697 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rdkafka_telemetry.h" +#include "rdkafka_msgset.h" +#include "rdkafka_telemetry_encode.h" +#include "rdkafka_request.h" +#include "nanopb/pb.h" +#include "rdkafka_lz4.h" +#include "snappy.h" + +#if WITH_ZSTD +#include "rdkafka_zstd.h" +#endif + + +#define RD_KAFKA_TELEMETRY_PUSH_JITTER 20 + +/** + * @brief Filters broker by availability of GetTelemetrySubscription. + * + * @return 0 if GetTelemetrySubscription is supported, 1 otherwise. + * + * @locks rd_kafka_broker_lock() + */ +static int +rd_kafka_filter_broker_by_GetTelemetrySubscription(rd_kafka_broker_t *rkb, + void *opaque) { + int features; + if (rd_kafka_broker_ApiVersion_supported0( + rkb, RD_KAFKAP_GetTelemetrySubscriptions, 0, 0, &features, + rd_false) != -1) + return 0; + return 1; +} + +/** + * @brief Returns the preferred metrics broker or NULL if unavailable. + * + * @locks none + * @locks_acquired rk_telemetry.lock, rd_kafka_wrlock() + * @locality main thread + */ +static rd_kafka_broker_t *rd_kafka_get_preferred_broker(rd_kafka_t *rk) { + rd_kafka_broker_t *rkb = NULL; + + mtx_lock(&rk->rk_telemetry.lock); + if (rk->rk_telemetry.preferred_broker) + rkb = rk->rk_telemetry.preferred_broker; + else { + /* If there is no preferred broker, that means that our previous + * one failed. Iterate through all available brokers to find + * one. */ + rd_kafka_wrlock(rk); + rkb = rd_kafka_broker_random_up( + rk, rd_kafka_filter_broker_by_GetTelemetrySubscription, + NULL); + rd_kafka_wrunlock(rk); + + /* No need to increase refcnt as broker_random_up does it + * already. */ + rk->rk_telemetry.preferred_broker = rkb; + + rd_kafka_dbg(rk, TELEMETRY, "SETBROKER", + "Lost preferred broker, switching to new " + "preferred broker %" PRId32 "\n", + rkb ? rd_kafka_broker_id(rkb) : -1); + } + mtx_unlock(&rk->rk_telemetry.lock); + + return rkb; +} + +/** + * @brief Cleans up the rk.rk_telemetry struct and frees any allocations. + * + * @param clear_control_flow_fields This determines if the control flow fields + * need to be cleared. This should only be set + * to true if the rk is terminating. + * @locality main thread + * @locks none + * @locks_acquired rk_telemetry.lock + */ +void rd_kafka_telemetry_clear(rd_kafka_t *rk, + rd_bool_t clear_control_flow_fields) { + if (clear_control_flow_fields) { + mtx_lock(&rk->rk_telemetry.lock); + if (rk->rk_telemetry.preferred_broker) { + rd_kafka_broker_destroy( + rk->rk_telemetry.preferred_broker); + rk->rk_telemetry.preferred_broker = NULL; + } + mtx_unlock(&rk->rk_telemetry.lock); + mtx_destroy(&rk->rk_telemetry.lock); + cnd_destroy(&rk->rk_telemetry.termination_cnd); + } + + if (rk->rk_telemetry.accepted_compression_types_cnt) { + rd_free(rk->rk_telemetry.accepted_compression_types); + rk->rk_telemetry.accepted_compression_types = NULL; + rk->rk_telemetry.accepted_compression_types_cnt = 0; + } + + if (rk->rk_telemetry.requested_metrics_cnt) { + size_t i; + for (i = 0; i < rk->rk_telemetry.requested_metrics_cnt; i++) + rd_free(rk->rk_telemetry.requested_metrics[i]); + rd_free(rk->rk_telemetry.requested_metrics); + rd_free(rk->rk_telemetry.matched_metrics); + rk->rk_telemetry.requested_metrics = NULL; + rk->rk_telemetry.requested_metrics_cnt = 0; + rk->rk_telemetry.matched_metrics = NULL; + rk->rk_telemetry.matched_metrics_cnt = 0; + } + rk->rk_telemetry.telemetry_max_bytes = 0; +} + +/** + * @brief Sets the telemetry state to TERMINATED and signals the conditional + * variable + * + * @locality main thread + * @locks none + * @locks_acquired rk_telemetry.lock + */ +static void rd_kafka_telemetry_set_terminated(rd_kafka_t *rk) { + rd_dassert(thrd_is_current(rk->rk_thread)); + + rd_kafka_dbg(rk, TELEMETRY, "TERM", + "Setting state to TERMINATED and signalling"); + + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_TERMINATED; + rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_telemetry.request_timer, + 1 /*lock*/); + mtx_lock(&rk->rk_telemetry.lock); + cnd_signal(&rk->rk_telemetry.termination_cnd); + mtx_unlock(&rk->rk_telemetry.lock); +} + +static void update_matched_metrics(rd_kafka_t *rk, size_t j) { + rk->rk_telemetry.matched_metrics_cnt++; + rk->rk_telemetry.matched_metrics = + rd_realloc(rk->rk_telemetry.matched_metrics, + sizeof(int) * rk->rk_telemetry.matched_metrics_cnt); + rk->rk_telemetry + .matched_metrics[rk->rk_telemetry.matched_metrics_cnt - 1] = j; +} + +static void rd_kafka_match_requested_metrics(rd_kafka_t *rk) { + size_t metrics_cnt = RD_KAFKA_TELEMETRY_METRIC_CNT(rk), i; + const rd_kafka_telemetry_metric_info_t *info = + RD_KAFKA_TELEMETRY_METRIC_INFO(rk); + + if (rk->rk_telemetry.requested_metrics_cnt == 1 && + !strcmp(rk->rk_telemetry.requested_metrics[0], + RD_KAFKA_TELEMETRY_METRICS_ALL_METRICS_SUBSCRIPTION)) { + size_t j; + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "All metrics subscribed"); + + for (j = 0; j < metrics_cnt; j++) + update_matched_metrics(rk, j); + return; + } + + for (i = 0; i < rk->rk_telemetry.requested_metrics_cnt; i++) { + size_t name_len = strlen(rk->rk_telemetry.requested_metrics[i]), + j; + + for (j = 0; j < metrics_cnt; j++) { + /* Prefix matching the requested metrics with the + * available metrics. */ + char full_metric_name + [RD_KAFKA_TELEMETRY_METRIC_NAME_MAX_LEN]; + rd_snprintf(full_metric_name, sizeof(full_metric_name), + "%s%s", RD_KAFKA_TELEMETRY_METRIC_PREFIX, + info[j].name); + bool name_matches = + strncmp(full_metric_name, + rk->rk_telemetry.requested_metrics[i], + name_len) == 0; + + if (name_matches) + update_matched_metrics(rk, j); + } + } + + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Matched metrics: %" PRIusz, + rk->rk_telemetry.matched_metrics_cnt); +} + +/** + * @brief Enqueues a GetTelemetrySubscriptionsRequest. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +static void rd_kafka_send_get_telemetry_subscriptions(rd_kafka_t *rk, + rd_kafka_broker_t *rkb) { + /* Clear out the telemetry struct, free anything that is malloc'd. */ + rd_kafka_telemetry_clear(rk, rd_false /* clear_control_flow_fields */); + + /* Enqueue on broker transmit queue. + * The preferred broker might change in the meanwhile but let it fail. + */ + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Sending GetTelemetryRequest"); + rd_kafka_GetTelemetrySubscriptionsRequest( + rkb, NULL, 0, RD_KAFKA_REPLYQ(rk->rk_ops, 0), + rd_kafka_handle_GetTelemetrySubscriptions, NULL); + + /* Change state */ + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SENT; +} + +/** + * @brief Compresses the telemetry payload using the available compression + * types. + * + * @param rk The rdkafka instance. + * @param rkb The broker to which the payload is being sent. + * @param payload The payload to be compressed. + * @param compressed_payload The compressed payload. + * @param compressed_payload_size The size of the compressed payload. + * + * @return The compression type used. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +static rd_kafka_compression_t +rd_kafka_push_telemetry_payload_compress(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_buf_t *payload, + void **compressed_payload, + size_t *compressed_payload_size) { + rd_kafka_compression_t compression_used = RD_KAFKA_COMPRESSION_NONE; + rd_slice_t payload_slice; + size_t i; + rd_kafka_resp_err_t r = RD_KAFKA_RESP_ERR_NO_ERROR; + rd_slice_init_full(&payload_slice, payload); + for (i = 0; i < rk->rk_telemetry.accepted_compression_types_cnt; i++) { + rd_kafka_compression_t compression_type = + rk->rk_telemetry.accepted_compression_types[i]; + switch (compression_type) { +#if WITH_ZLIB + case RD_KAFKA_COMPRESSION_GZIP: + /* TODO: Using 0 for compression level for now. */ + r = rd_kafka_gzip_compress(rkb, 0, &payload_slice, + compressed_payload, + compressed_payload_size); + compression_used = RD_KAFKA_COMPRESSION_GZIP; + break; +#endif + case RD_KAFKA_COMPRESSION_LZ4: + /* TODO: Using 0 for compression level for now. */ + r = rd_kafka_lz4_compress( + rkb, rd_true, 0, &payload_slice, compressed_payload, + compressed_payload_size); + compression_used = RD_KAFKA_COMPRESSION_LZ4; + break; +#if WITH_ZSTD + case RD_KAFKA_COMPRESSION_ZSTD: + /* TODO: Using 0 for compression level for now. */ + r = rd_kafka_zstd_compress(rkb, 0, &payload_slice, + compressed_payload, + compressed_payload_size); + compression_used = RD_KAFKA_COMPRESSION_ZSTD; + break; +#endif +#if WITH_SNAPPY + case RD_KAFKA_COMPRESSION_SNAPPY: + r = rd_kafka_snappy_compress_slice( + rkb, &payload_slice, compressed_payload, + compressed_payload_size); + compression_used = RD_KAFKA_COMPRESSION_SNAPPY; + break; +#endif + default: + break; + } + if (compression_used != RD_KAFKA_COMPRESSION_NONE && + r == RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_dbg( + rk, TELEMETRY, "PUSH", + "Compressed payload of size %" PRIusz " to %" PRIusz + " using compression type " + "%s", + payload->rbuf_size, *compressed_payload_size, + rd_kafka_compression2str(compression_used)); + return compression_used; + } + } + if (compression_used != RD_KAFKA_COMPRESSION_NONE && + r != RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Failed to compress payload with available " + "compression types"); + } + rd_kafka_dbg(rk, TELEMETRY, "PUSH", "Sending uncompressed payload"); + *compressed_payload = payload->rbuf_wpos->seg_p; + *compressed_payload_size = payload->rbuf_wpos->seg_of; + return RD_KAFKA_COMPRESSION_NONE; +} + +/** + * @brief Enqueues a PushTelemetryRequest. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +static void rd_kafka_send_push_telemetry(rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + rd_bool_t terminating) { + + rd_buf_t *metrics_payload = rd_kafka_telemetry_encode_metrics(rk); + size_t compressed_metrics_payload_size = 0; + void *compressed_metrics_payload = NULL; + rd_kafka_compression_t compression_used = + rd_kafka_push_telemetry_payload_compress( + rk, rkb, metrics_payload, &compressed_metrics_payload, + &compressed_metrics_payload_size); + if (compressed_metrics_payload_size > + (size_t)rk->rk_telemetry.telemetry_max_bytes) { + rd_kafka_log(rk, LOG_WARNING, "TELEMETRY", + "Metrics payload size %" PRIusz + " exceeds telemetry_max_bytes %" PRId32 + "specified by the broker.", + compressed_metrics_payload_size, + rk->rk_telemetry.telemetry_max_bytes); + } + + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Sending PushTelemetryRequest with terminating = %s", + RD_STR_ToF(terminating)); + rd_kafka_PushTelemetryRequest( + rkb, &rk->rk_telemetry.client_instance_id, + rk->rk_telemetry.subscription_id, terminating, compression_used, + compressed_metrics_payload, compressed_metrics_payload_size, NULL, + 0, RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_handle_PushTelemetry, + NULL); + + rd_buf_destroy_free(metrics_payload); + if (compression_used != RD_KAFKA_COMPRESSION_NONE) + rd_free(compressed_metrics_payload); + + rk->rk_telemetry.state = terminating + ? RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SENT + : RD_KAFKA_TELEMETRY_PUSH_SENT; +} + +/** + * @brief Progress the telemetry state machine. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +static void rd_kafka_telemetry_fsm(rd_kafka_t *rk) { + rd_kafka_broker_t *preferred_broker = NULL; + + rd_dassert(rk); + rd_dassert(thrd_is_current(rk->rk_thread)); + + switch (rk->rk_telemetry.state) { + case RD_KAFKA_TELEMETRY_AWAIT_BROKER: + rd_dassert(!*"Should never be awaiting a broker when the telemetry fsm is called."); + break; + + case RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED: + preferred_broker = rd_kafka_get_preferred_broker(rk); + if (!preferred_broker) { + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_AWAIT_BROKER; + break; + } + rd_kafka_send_get_telemetry_subscriptions(rk, preferred_broker); + break; + + case RD_KAFKA_TELEMETRY_PUSH_SCHEDULED: + preferred_broker = rd_kafka_get_preferred_broker(rk); + if (!preferred_broker) { + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_AWAIT_BROKER; + break; + } + rd_kafka_send_push_telemetry(rk, preferred_broker, rd_false); + break; + + case RD_KAFKA_TELEMETRY_PUSH_SENT: + case RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SENT: + case RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SENT: + rd_dassert(!*"Should never be awaiting response when the telemetry fsm is called."); + break; + + case RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SCHEDULED: + preferred_broker = rd_kafka_get_preferred_broker(rk); + if (!preferred_broker) { + /* If there's no preferred broker, set state to + * terminated immediately to stop the app thread from + * waiting indefinitely. */ + rd_kafka_telemetry_set_terminated(rk); + break; + } + rd_kafka_send_push_telemetry(rk, preferred_broker, rd_true); + break; + + case RD_KAFKA_TELEMETRY_TERMINATED: + rd_dassert(!*"Should not be terminated when the telemetry fsm is called."); + break; + + default: + rd_assert(!*"Unknown state"); + } +} + +/** + * @brief Callback for FSM timer. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +void rd_kafka_telemetry_fsm_tmr_cb(rd_kafka_timers_t *rkts, void *rk) { + rd_kafka_telemetry_fsm(rk); +} + +/** + * @brief Handles parsed GetTelemetrySubscriptions response. + * + * @locks none + * @locks_acquired none + * @locality main thread + */ +void rd_kafka_handle_get_telemetry_subscriptions(rd_kafka_t *rk, + rd_kafka_resp_err_t err) { + rd_ts_t next_scheduled; + double jitter_multiplier = + rd_jitter(100 - RD_KAFKA_TELEMETRY_PUSH_JITTER, + 100 + RD_KAFKA_TELEMETRY_PUSH_JITTER) / + 100.0; + rd_ts_t now_ns = rd_uclock() * 1000; + rd_kafka_broker_t *rkb = NULL; + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "GetTelemetrySubscriptionsRequest failed: %s", + rd_kafka_err2str(err)); + if (rk->rk_telemetry.push_interval_ms == 0) { + rk->rk_telemetry.push_interval_ms = + 30000; /* Default: 5min */ + } + } + + if (err == RD_KAFKA_RESP_ERR_NO_ERROR && + rk->rk_telemetry.requested_metrics_cnt) { + rd_kafka_match_requested_metrics(rk); + + /* Some metrics are requested. Start the timer accordingly */ + next_scheduled = (int)(jitter_multiplier * 1000 * + rk->rk_telemetry.push_interval_ms); + + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_PUSH_SCHEDULED; + + /* Set for the first push */ + if (rk->rk_telemetry.rk_historic_c.ts_start == 0) { + rk->rk_telemetry.rk_historic_c.ts_start = now_ns; + rk->rk_telemetry.rk_historic_c.ts_last = now_ns; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rkb->rkb_telemetry.rkb_historic_c.connects = + rd_atomic32_get(&rkb->rkb_c.connects); + } + } + + } else { + /* No metrics requested, or we're in error. */ + next_scheduled = rk->rk_telemetry.push_interval_ms * 1000; + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED; + } + + rd_kafka_dbg(rk, TELEMETRY, "GETSUBSCRIPTIONS", + "Handled GetTelemetrySubscriptions, scheduling FSM after " + "%" PRId64 + " microseconds, state = %s, err = %s, metrics = %" PRIusz, + next_scheduled, + rd_kafka_telemetry_state2str(rk->rk_telemetry.state), + rd_kafka_err2str(err), + rk->rk_telemetry.requested_metrics_cnt); + + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, rd_false, + next_scheduled, rd_kafka_telemetry_fsm_tmr_cb, rk); +} + +void rd_kafka_handle_push_telemetry(rd_kafka_t *rk, rd_kafka_resp_err_t err) { + + /* We only make a best-effort attempt to push telemetry while + * terminating, and don't care about any errors. */ + if (rk->rk_telemetry.state == + RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SENT) { + rd_kafka_telemetry_set_terminated(rk); + return; + } + + /* There's a possiblity that we sent a PushTelemetryRequest, and + * scheduled a termination before getting the response. In that case, we + * will enter this method in the TERMINATED state when/if we get a + * response, and we should not take any action. */ + if (rk->rk_telemetry.state != RD_KAFKA_TELEMETRY_PUSH_SENT) + return; + + if (err == RD_KAFKA_RESP_ERR_NO_ERROR) { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "PushTelemetryRequest succeeded"); + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_PUSH_SCHEDULED; + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, rd_false, + rk->rk_telemetry.push_interval_ms * 1000, + rd_kafka_telemetry_fsm_tmr_cb, (void *)rk); + } else { /* error */ + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "PushTelemetryRequest failed: %s", + rd_kafka_err2str(err)); + /* Non-retriable errors */ + if (err == RD_KAFKA_RESP_ERR_INVALID_REQUEST || + err == RD_KAFKA_RESP_ERR_INVALID_RECORD) { + rd_kafka_log( + rk, LOG_WARNING, "TELEMETRY", + "PushTelemetryRequest failed with non-retriable " + "error: %s. Stopping telemetry.", + rd_kafka_err2str(err)); + rd_kafka_telemetry_set_terminated(rk); + return; + } + + if (err == RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE) { + rd_kafka_log( + rk, LOG_WARNING, "TELEMETRY", + "PushTelemetryRequest failed because of payload " + "size too large: %s. Continuing telemetry.", + rd_kafka_err2str(err)); + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_PUSH_SCHEDULED; + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, + rd_false, rk->rk_telemetry.push_interval_ms * 1000, + rd_kafka_telemetry_fsm_tmr_cb, (void *)rk); + return; + } + + rd_ts_t next_scheduled = + err == RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID + ? 0 + : rk->rk_telemetry.push_interval_ms * 1000; + + rk->rk_telemetry.state = + RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED; + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, rd_false, + next_scheduled, rd_kafka_telemetry_fsm_tmr_cb, (void *)rk); + } +} + +/** + * @brief This method starts the termination for telemetry and awaits + * completion. + * + * @locks none + * @locks_acquired rk_telemetry.lock + * @locality app thread (normal case) or the main thread (when terminated + * during creation). + */ +void rd_kafka_telemetry_await_termination(rd_kafka_t *rk) { + rd_kafka_op_t *rko; + + /* In the case where we have a termination during creation, we can't + * send any telemetry. */ + if (thrd_is_current(rk->rk_thread) || + !rk->rk_conf.enable_metrics_push) { + rd_kafka_telemetry_set_terminated(rk); + return; + } + + rko = rd_kafka_op_new(RD_KAFKA_OP_TERMINATE_TELEMETRY); + rko->rko_rk = rk; + rd_kafka_q_enq(rk->rk_ops, rko); + + /* Await termination sequence completion. */ + rd_kafka_dbg(rk, TELEMETRY, "TERM", + "Awaiting termination of telemetry."); + mtx_lock(&rk->rk_telemetry.lock); + cnd_timedwait_ms(&rk->rk_telemetry.termination_cnd, + &rk->rk_telemetry.lock, + /* TODO(milind): Evaluate this timeout after completion + of all metrics push, is it too much, or too less if + we include serialization? */ + 1000 /* timeout for waiting */); + mtx_unlock(&rk->rk_telemetry.lock); + rd_kafka_dbg(rk, TELEMETRY, "TERM", + "Ended waiting for termination of telemetry."); +} + +/** + * @brief Send a final push request before terminating. + * + * @locks none + * @locks_acquired none + * @locality main thread + * @note This method is on a best-effort basis. + */ +void rd_kafka_telemetry_schedule_termination(rd_kafka_t *rk) { + rd_kafka_dbg( + rk, TELEMETRY, "TERM", + "Starting rd_kafka_telemetry_schedule_termination in state %s", + rd_kafka_telemetry_state2str(rk->rk_telemetry.state)); + + if (rk->rk_telemetry.state != RD_KAFKA_TELEMETRY_PUSH_SCHEDULED) { + rd_kafka_telemetry_set_terminated(rk); + return; + } + + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_TERMINATING_PUSH_SCHEDULED; + + rd_kafka_dbg(rk, TELEMETRY, "TERM", "Sending final request for Push"); + rd_kafka_timer_override_once( + &rk->rk_timers, &rk->rk_telemetry.request_timer, 0 /* immediate */); +} + + +/** + * @brief Sets telemetry broker if we are in AWAIT_BROKER state. + * + * @locks none + * @locks_acquired rk_telemetry.lock + * @locality main thread + */ +void rd_kafka_set_telemetry_broker_maybe(rd_kafka_t *rk, + rd_kafka_broker_t *rkb) { + rd_dassert(thrd_is_current(rk->rk_thread)); + + /* The op triggering this method is scheduled by brokers without knowing + * if a preferred broker is already set. If it is set, this method is a + * no-op. */ + if (rk->rk_telemetry.state != RD_KAFKA_TELEMETRY_AWAIT_BROKER) + return; + + mtx_lock(&rk->rk_telemetry.lock); + + if (rk->rk_telemetry.preferred_broker) { + mtx_unlock(&rk->rk_telemetry.lock); + return; + } + + rd_kafka_broker_keep(rkb); + rk->rk_telemetry.preferred_broker = rkb; + + mtx_unlock(&rk->rk_telemetry.lock); + + rd_kafka_dbg(rk, TELEMETRY, "SETBROKER", + "Setting telemetry broker to %s\n", rkb->rkb_name); + + rk->rk_telemetry.state = RD_KAFKA_TELEMETRY_GET_SUBSCRIPTIONS_SCHEDULED; + + rd_kafka_timer_start_oneshot( + &rk->rk_timers, &rk->rk_telemetry.request_timer, rd_false, + 0 /* immediate */, rd_kafka_telemetry_fsm_tmr_cb, (void *)rk); +} diff --git a/src/rdkafka_telemetry.h b/src/rdkafka_telemetry.h new file mode 100644 index 0000000000..e7ab0b7eb3 --- /dev/null +++ b/src/rdkafka_telemetry.h @@ -0,0 +1,52 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _RD_KAFKA_TELEMETRY_H_ +#define _RD_KAFKA_TELEMETRY_H_ + +#include "rdkafka_int.h" + +#define RD_KAFKA_TELEMETRY_METRICS_ALL_METRICS_SUBSCRIPTION "*" +#define RD_KAFKA_TELEMETRY_METRIC_NAME_MAX_LEN 128 + +void rd_kafka_handle_get_telemetry_subscriptions(rd_kafka_t *rk, + rd_kafka_resp_err_t err); + +void rd_kafka_handle_push_telemetry(rd_kafka_t *rk, rd_kafka_resp_err_t err); + +void rd_kafka_telemetry_clear(rd_kafka_t *rk, + rd_bool_t clear_control_flow_fields); + +void rd_kafka_telemetry_await_termination(rd_kafka_t *rk); + +void rd_kafka_telemetry_schedule_termination(rd_kafka_t *rk); + +void rd_kafka_set_telemetry_broker_maybe(rd_kafka_t *rk, + rd_kafka_broker_t *rkb); +#endif /* _RD_KAFKA_TELEMETRY_H_ */ diff --git a/src/rdkafka_telemetry_decode.c b/src/rdkafka_telemetry_decode.c new file mode 100644 index 0000000000..e380b964ff --- /dev/null +++ b/src/rdkafka_telemetry_decode.c @@ -0,0 +1,559 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rdkafka_telemetry_decode.h" +#include "nanopb/pb_decode.h" +#include "rdunittest.h" +#include "rdkafka_lz4.h" +#include "rdgz.h" +#include "rdkafka_zstd.h" +#include "snappy.h" +#include "rdfloat.h" + + +#define _NANOPB_STRING_DECODE_MAX_BUFFER_SIZE 1024 + +struct metric_unit_test_data { + rd_kafka_telemetry_metric_type_t type; + int32_t current_field; + char metric_name[_NANOPB_STRING_DECODE_MAX_BUFFER_SIZE]; + char metric_description[_NANOPB_STRING_DECODE_MAX_BUFFER_SIZE]; + int64_t metric_value_int; + double metric_value_double; + uint64_t metric_time; +}; + +static struct metric_unit_test_data unit_test_data; + +static void clear_unit_test_data(void) { + unit_test_data.type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE; + unit_test_data.current_field = 0; + unit_test_data.metric_name[0] = '\0'; + unit_test_data.metric_description[0] = '\0'; + unit_test_data.metric_value_int = 0; + unit_test_data.metric_time = 0; +} + +static bool +decode_string(pb_istream_t *stream, const pb_field_t *field, void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + uint8_t buffer[_NANOPB_STRING_DECODE_MAX_BUFFER_SIZE] = {0}; + + if (stream->bytes_left > sizeof(buffer) - 1) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "String too long for buffer"); + return false; + } + + if (!pb_read(stream, buffer, stream->bytes_left)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to read string"); + return false; + } + + RD_INTERFACE_CALL(decode_interface, decoded_string, buffer); + return true; +} + +static bool +decode_key_value(pb_istream_t *stream, const pb_field_t *field, void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_common_v1_KeyValue key_value = + opentelemetry_proto_common_v1_KeyValue_init_zero; + key_value.key.funcs.decode = &decode_string; + key_value.key.arg = decode_interface; + key_value.value.value.string_value.funcs.decode = &decode_string; + key_value.value.value.string_value.arg = decode_interface; + if (!pb_decode(stream, opentelemetry_proto_common_v1_KeyValue_fields, + &key_value)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode KeyValue: %s", + PB_GET_ERROR(stream)); + return false; + } + + if (key_value.value.which_value == + opentelemetry_proto_common_v1_AnyValue_int_value_tag) { + RD_INTERFACE_CALL(decode_interface, decoded_int64, + key_value.value.value.int_value); + } + + return true; +} + +static bool decode_number_data_point(pb_istream_t *stream, + const pb_field_t *field, + void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_metrics_v1_NumberDataPoint data_point = + opentelemetry_proto_metrics_v1_NumberDataPoint_init_zero; + data_point.attributes.funcs.decode = &decode_key_value; + data_point.attributes.arg = decode_interface; + if (!pb_decode(stream, + opentelemetry_proto_metrics_v1_NumberDataPoint_fields, + &data_point)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode NumberDataPoint: %s", + PB_GET_ERROR(stream)); + return false; + } + + RD_INTERFACE_CALL(decode_interface, decoded_NumberDataPoint, + &data_point); + return true; +} + +// TODO: add support for other data types +static bool +data_msg_callback(pb_istream_t *stream, const pb_field_t *field, void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + if (field->tag == opentelemetry_proto_metrics_v1_Metric_sum_tag) { + opentelemetry_proto_metrics_v1_Sum *sum = field->pData; + sum->data_points.funcs.decode = &decode_number_data_point; + sum->data_points.arg = decode_interface; + if (decode_interface->decoded_type) { + RD_INTERFACE_CALL(decode_interface, decoded_type, + RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM); + } + } else if (field->tag == + opentelemetry_proto_metrics_v1_Metric_gauge_tag) { + opentelemetry_proto_metrics_v1_Gauge *gauge = field->pData; + gauge->data_points.funcs.decode = &decode_number_data_point; + gauge->data_points.arg = decode_interface; + if (decode_interface->decoded_type) { + RD_INTERFACE_CALL(decode_interface, decoded_type, + RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE); + } + } + return true; +} + + +static bool +decode_metric(pb_istream_t *stream, const pb_field_t *field, void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_metrics_v1_Metric metric = + opentelemetry_proto_metrics_v1_Metric_init_zero; + metric.name.funcs.decode = &decode_string; + metric.name.arg = decode_interface; + metric.description.funcs.decode = &decode_string; + metric.description.arg = decode_interface; + metric.cb_data.funcs.decode = &data_msg_callback; + metric.cb_data.arg = decode_interface; + + if (!pb_decode(stream, opentelemetry_proto_metrics_v1_Metric_fields, + &metric)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode Metric: %s", + PB_GET_ERROR(stream)); + return false; + } + + return true; +} + +static bool decode_scope_metrics(pb_istream_t *stream, + const pb_field_t *field, + void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_metrics_v1_ScopeMetrics scope_metrics = + opentelemetry_proto_metrics_v1_ScopeMetrics_init_zero; + scope_metrics.scope.name.funcs.decode = &decode_string; + scope_metrics.scope.name.arg = decode_interface; + scope_metrics.scope.version.funcs.decode = &decode_string; + scope_metrics.scope.version.arg = decode_interface; + scope_metrics.metrics.funcs.decode = &decode_metric; + scope_metrics.metrics.arg = decode_interface; + + if (!pb_decode(stream, + opentelemetry_proto_metrics_v1_ScopeMetrics_fields, + &scope_metrics)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode ScopeMetrics: %s", + PB_GET_ERROR(stream)); + return false; + } + return true; +} + +static bool decode_resource_metrics(pb_istream_t *stream, + const pb_field_t *field, + void **arg) { + rd_kafka_telemetry_decode_interface_t *decode_interface = *arg; + opentelemetry_proto_metrics_v1_ResourceMetrics resource_metrics = + opentelemetry_proto_metrics_v1_ResourceMetrics_init_zero; + resource_metrics.resource.attributes.funcs.decode = &decode_key_value; + resource_metrics.resource.attributes.arg = decode_interface; + resource_metrics.scope_metrics.funcs.decode = &decode_scope_metrics; + resource_metrics.scope_metrics.arg = decode_interface; + if (!pb_decode(stream, + opentelemetry_proto_metrics_v1_ResourceMetrics_fields, + &resource_metrics)) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode ResourceMetrics: %s", + PB_GET_ERROR(stream)); + return false; + } + return true; +} + +#if WITH_SNAPPY + +static int rd_kafka_snappy_decompress(rd_kafka_broker_t *rkb, + const char *compressed, + size_t compressed_size, + void **outbuf, + size_t *outbuf_len) { + struct iovec iov = {.iov_base = NULL, .iov_len = 0}; + + const char *inbuf = compressed; + size_t inlen = compressed_size; + int r; + static const unsigned char snappy_java_magic[] = {0x82, 'S', 'N', 'A', + 'P', 'P', 'Y', 0}; + static const size_t snappy_java_hdrlen = 8 + 4 + 4; + + /* snappy-java adds its own header (SnappyCodec) + * which is not compatible with the official Snappy + * implementation. + * 8: magic, 4: version, 4: compatible + * followed by any number of chunks: + * 4: length + * ...: snappy-compressed data. */ + if (likely(inlen > snappy_java_hdrlen + 4 && + !memcmp(inbuf, snappy_java_magic, 8))) { + /* snappy-java framing */ + char errstr[128]; + + inbuf = inbuf + snappy_java_hdrlen; + inlen -= snappy_java_hdrlen; + iov.iov_base = rd_kafka_snappy_java_uncompress( + inbuf, inlen, &iov.iov_len, errstr, sizeof(errstr)); + + if (unlikely(!iov.iov_base)) { + rd_rkb_dbg( + rkb, MSG, "SNAPPY", + "Snappy decompression for message failed: %s: " + "ignoring message", + errstr); + return -1; // Indicates decompression error + } + + + } else { + /* No framing */ + + /* Acquire uncompressed length */ + if (unlikely(!rd_kafka_snappy_uncompressed_length( + inbuf, inlen, &iov.iov_len))) { + rd_rkb_dbg( + rkb, MSG, "SNAPPY", + "Failed to get length of Snappy compressed payload " + "for message (%" PRIusz + " bytes): " + "ignoring message", + inlen); + return -1; // Indicates decompression error + } + + /* Allocate output buffer for uncompressed data */ + iov.iov_base = rd_malloc(iov.iov_len); + if (unlikely(!iov.iov_base)) { + rd_rkb_dbg(rkb, MSG, "SNAPPY", + "Failed to allocate Snappy decompress " + "buffer of size %" PRIusz + " for message (%" PRIusz + " bytes): %s: " + "ignoring message", + *outbuf_len, inlen, rd_strerror(errno)); + return -1; // Indicates memory allocation error + } + + /* Uncompress to outbuf */ + if (unlikely((r = rd_kafka_snappy_uncompress(inbuf, inlen, + iov.iov_base)))) { + rd_rkb_dbg( + rkb, MSG, "SNAPPY", + "Failed to decompress Snappy payload for message " + "(%" PRIusz + " bytes): %s: " + "ignoring message", + inlen, rd_strerror(errno)); + rd_free(iov.iov_base); + return -1; // Indicates decompression error + } + } + *outbuf = iov.iov_base; + *outbuf_len = iov.iov_len; + return 0; +} +#endif + +/* + * Decompress a payload using the specified compression type. Allocates memory + * for uncompressed payload. + * @returns 0 on success, -1 on failure. Allocated memory in + * uncompressed_payload and its size in uncompressed_payload_size. + */ +int rd_kafka_telemetry_uncompress_metrics_payload( + rd_kafka_broker_t *rkb, + rd_kafka_compression_t compression_type, + void *compressed_payload, + size_t compressed_payload_size, + void **uncompressed_payload, + size_t *uncompressed_payload_size) { + int r = -1; + switch (compression_type) { +#if WITH_ZLIB + case RD_KAFKA_COMPRESSION_GZIP: + *uncompressed_payload = rd_gz_decompress( + compressed_payload, (int)compressed_payload_size, + (uint64_t *)uncompressed_payload_size); + if (*uncompressed_payload == NULL) + r = -1; + else + r = 0; + break; +#endif + case RD_KAFKA_COMPRESSION_LZ4: + r = rd_kafka_lz4_decompress( + rkb, 0, 0, compressed_payload, compressed_payload_size, + uncompressed_payload, uncompressed_payload_size); + break; +#if WITH_ZSTD + case RD_KAFKA_COMPRESSION_ZSTD: + r = rd_kafka_zstd_decompress( + rkb, compressed_payload, compressed_payload_size, + uncompressed_payload, uncompressed_payload_size); + break; +#endif +#if WITH_SNAPPY + case RD_KAFKA_COMPRESSION_SNAPPY: + r = rd_kafka_snappy_decompress( + rkb, compressed_payload, compressed_payload_size, + uncompressed_payload, uncompressed_payload_size); + break; +#endif + default: + rd_kafka_log(rkb->rkb_rk, LOG_WARNING, "TELEMETRY", + "Unknown compression type: %d", compression_type); + break; + } + return r; +} + +/** + * Decode a metric from a buffer encoded with + * opentelemetry_proto_metrics_v1_MetricsData datatype. Used for testing and + * debugging. + * + * @param decode_interface The decode_interface to pass as arg when decoding the + * buffer. + * @param buffer The buffer to decode. + * @param size The size of the buffer. + */ +int rd_kafka_telemetry_decode_metrics( + rd_kafka_telemetry_decode_interface_t *decode_interface, + void *buffer, + size_t size) { + opentelemetry_proto_metrics_v1_MetricsData metricsData = + opentelemetry_proto_metrics_v1_MetricsData_init_zero; + + pb_istream_t stream = pb_istream_from_buffer(buffer, size); + metricsData.resource_metrics.arg = decode_interface; + metricsData.resource_metrics.funcs.decode = &decode_resource_metrics; + + bool status = pb_decode( + &stream, opentelemetry_proto_metrics_v1_MetricsData_fields, + &metricsData); + if (!status) { + RD_INTERFACE_CALL(decode_interface, decode_error, + "Failed to decode MetricsData: %s", + PB_GET_ERROR(&stream)); + } + return status; +} + +static void unit_test_telemetry_decoded_string(void *opaque, + const uint8_t *decoded) { + switch (unit_test_data.current_field) { + case 2: + rd_snprintf(unit_test_data.metric_name, + sizeof(unit_test_data.metric_name), "%s", decoded); + break; + case 3: + rd_snprintf(unit_test_data.metric_description, + sizeof(unit_test_data.metric_description), "%s", + decoded); + break; + default: + break; + } + unit_test_data.current_field++; +} + +static void unit_test_telemetry_decoded_NumberDataPoint( + void *opaque, + const opentelemetry_proto_metrics_v1_NumberDataPoint *decoded) { + unit_test_data.metric_value_int = decoded->value.as_int; + unit_test_data.metric_value_double = decoded->value.as_double; + unit_test_data.metric_time = decoded->time_unix_nano; + unit_test_data.current_field++; +} + +static void +unit_test_telemetry_decoded_type(void *opaque, + rd_kafka_telemetry_metric_type_t type) { + unit_test_data.type = type; + unit_test_data.current_field++; +} + +static void +unit_test_telemetry_decode_error(void *opaque, const char *error, ...) { + char buffer[1024]; + va_list ap; + va_start(ap, error); + rd_vsnprintf(buffer, sizeof(buffer), error, ap); + va_end(ap); + RD_UT_SAY("%s", buffer); + rd_assert(!*"Failure while decoding telemetry data"); +} + +bool unit_test_telemetry(rd_kafka_telemetry_producer_metric_name_t metric_name, + const char *expected_name, + const char *expected_description, + rd_kafka_telemetry_metric_type_t expected_type, + rd_bool_t is_double) { + rd_kafka_t *rk = rd_calloc(1, sizeof(*rk)); + rwlock_init(&rk->rk_lock); + rk->rk_type = RD_KAFKA_PRODUCER; + rk->rk_telemetry.matched_metrics_cnt = 1; + rk->rk_telemetry.matched_metrics = + rd_malloc(sizeof(rd_kafka_telemetry_producer_metric_name_t) * + rk->rk_telemetry.matched_metrics_cnt); + rk->rk_telemetry.matched_metrics[0] = metric_name; + rk->rk_telemetry.rk_historic_c.ts_start = + (rd_uclock() - 1000 * 1000) * 1000; + rk->rk_telemetry.rk_historic_c.ts_last = + (rd_uclock() - 1000 * 1000) * 1000; + rd_strlcpy(rk->rk_name, "unittest", sizeof(rk->rk_name)); + clear_unit_test_data(); + + rd_kafka_telemetry_decode_interface_t decode_interface = { + .decoded_string = unit_test_telemetry_decoded_string, + .decoded_NumberDataPoint = + unit_test_telemetry_decoded_NumberDataPoint, + .decoded_type = unit_test_telemetry_decoded_type, + .decode_error = unit_test_telemetry_decode_error, + .opaque = &unit_test_data, + }; + + TAILQ_INIT(&rk->rk_brokers); + + rd_kafka_broker_t *rkb = rd_calloc(1, sizeof(*rkb)); + rkb->rkb_c.connects.val = 1; + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + rd_avg_init(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle, + RD_AVG_GAUGE, 0, 500 * 1000, 2, rd_true); + TAILQ_INSERT_HEAD(&rk->rk_brokers, rkb, rkb_link); + rd_buf_t *rbuf = rd_kafka_telemetry_encode_metrics(rk); + void *metrics_payload = rbuf->rbuf_wpos->seg_p; + size_t metrics_payload_size = rbuf->rbuf_wpos->seg_of; + RD_UT_SAY("metrics_payload_size: %" PRIusz, metrics_payload_size); + + RD_UT_ASSERT(metrics_payload_size != 0, "Metrics payload zero"); + + bool decode_status = rd_kafka_telemetry_decode_metrics( + &decode_interface, metrics_payload, metrics_payload_size); + + RD_UT_ASSERT(decode_status == 1, "Decoding failed"); + RD_UT_ASSERT(unit_test_data.type == expected_type, + "Metric type mismatch"); + RD_UT_ASSERT(strcmp(unit_test_data.metric_name, expected_name) == 0, + "Metric name mismatch"); + RD_UT_ASSERT(strcmp(unit_test_data.metric_description, + expected_description) == 0, + "Metric description mismatch"); + if (is_double) + RD_UT_ASSERT( + rd_dbl_eq0(unit_test_data.metric_value_double, 1.0, 0.01), + "Metric value mismatch"); + else + RD_UT_ASSERT(unit_test_data.metric_value_int == 1, + "Metric value mismatch"); + RD_UT_ASSERT(unit_test_data.metric_time != 0, "Metric time mismatch"); + + rd_free(rk->rk_telemetry.matched_metrics); + rd_buf_destroy_free(rbuf); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency); + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle); + rd_free(rkb); + rwlock_destroy(&rk->rk_lock); + rd_free(rk); + RD_UT_PASS(); +} + +bool unit_test_telemetry_gauge(void) { + return unit_test_telemetry( + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_RATE, + RD_KAFKA_TELEMETRY_METRIC_PREFIX + "producer.connection.creation.rate", + "The rate of connections established per second.", + RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE, rd_true); +} + +bool unit_test_telemetry_sum(void) { + return unit_test_telemetry( + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_TOTAL, + RD_KAFKA_TELEMETRY_METRIC_PREFIX + "producer.connection.creation.total", + "The total number of connections established.", + RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM, rd_false); +} + +int unittest_telemetry_decode(void) { + int fails = 0; + fails += unit_test_telemetry_gauge(); + fails += unit_test_telemetry_sum(); + return fails; +} diff --git a/src/rdkafka_telemetry_decode.h b/src/rdkafka_telemetry_decode.h new file mode 100644 index 0000000000..25f25a7d4f --- /dev/null +++ b/src/rdkafka_telemetry_decode.h @@ -0,0 +1,59 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_RDKAFKA_TELEMETRY_DECODE_H +#define _RDKAFKA_RDKAFKA_TELEMETRY_DECODE_H +#include "rd.h" +#include "opentelemetry/metrics.pb.h" +#include "rdkafka_telemetry_encode.h" + +typedef struct rd_kafka_telemetry_decode_interface_s { + void (*decoded_string)(void *opaque, const uint8_t *decoded); + void (*decoded_NumberDataPoint)( + void *opaque, + const opentelemetry_proto_metrics_v1_NumberDataPoint *decoded); + void (*decoded_int64)(void *opaque, int64_t decoded); + void (*decoded_type)(void *opaque, + rd_kafka_telemetry_metric_type_t type); + void (*decode_error)(void *opaque, const char *error, ...); + void *opaque; +} rd_kafka_telemetry_decode_interface_t; + +int rd_kafka_telemetry_uncompress_metrics_payload( + rd_kafka_broker_t *rkb, + rd_kafka_compression_t compression_type, + void *compressed_payload, + size_t compressed_payload_size, + void **uncompressed_payload, + size_t *uncompressed_payload_size); +int rd_kafka_telemetry_decode_metrics( + rd_kafka_telemetry_decode_interface_t *decode_interface, + void *buffer, + size_t size); + +#endif /* _RDKAFKA_RDKAFKA_TELEMETRY_DECODE_H */ diff --git a/src/rdkafka_telemetry_encode.c b/src/rdkafka_telemetry_encode.c new file mode 100644 index 0000000000..5e5a5a3dc1 --- /dev/null +++ b/src/rdkafka_telemetry_encode.c @@ -0,0 +1,833 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "rdkafka_telemetry_encode.h" +#include "nanopb/pb_encode.h" +#include "opentelemetry/metrics.pb.h" + +#define THREE_ORDERS_MAGNITUDE 1000 + +typedef struct { + opentelemetry_proto_metrics_v1_Metric **metrics; + size_t count; +} rd_kafka_telemetry_metrics_repeated_t; + +typedef struct { + opentelemetry_proto_common_v1_KeyValue **key_values; + size_t count; +} rd_kafka_telemetry_key_values_repeated_t; + + +static rd_kafka_telemetry_metric_value_t +calculate_connection_creation_total(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t total; + rd_kafka_broker_t *rkb; + + total.int_value = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + const int32_t connects = rd_atomic32_get(&rkb->rkb_c.connects); + if (!rk->rk_telemetry.delta_temporality) + total.int_value += connects; + else + total.int_value += + connects - + rkb->rkb_telemetry.rkb_historic_c.connects; + } + + return total; +} + +static rd_kafka_telemetry_metric_value_t +calculate_connection_creation_rate(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t total; + rd_kafka_broker_t *rkb; + rd_ts_t ts_last = rk->rk_telemetry.rk_historic_c.ts_last; + + total.double_value = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + total.double_value += + rd_atomic32_get(&rkb->rkb_c.connects) - + rkb->rkb_telemetry.rkb_historic_c.connects; + } + double seconds = (now_ns - ts_last) / 1e9; + if (seconds > 1.0) + total.double_value /= seconds; + return total; +} + +static rd_kafka_telemetry_metric_value_t +calculate_broker_avg_rtt(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t avg_rtt = RD_ZERO_INIT; + + rd_avg_t *rkb_avg_rtt_rollover = + &rkb_selected->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt; + + if (rkb_avg_rtt_rollover->ra_v.cnt) { + avg_rtt.double_value = rkb_avg_rtt_rollover->ra_v.sum / + (double)(rkb_avg_rtt_rollover->ra_v.cnt * + THREE_ORDERS_MAGNITUDE); + } + + return avg_rtt; +} + +static rd_kafka_telemetry_metric_value_t +calculate_broker_max_rtt(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t max_rtt; + + max_rtt.int_value = RD_CEIL_INTEGER_DIVISION( + rkb_selected->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt.ra_v.maxv, + THREE_ORDERS_MAGNITUDE); + return max_rtt; +} + +static rd_kafka_telemetry_metric_value_t +calculate_throttle_avg(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t avg_throttle; + rd_kafka_broker_t *rkb; + double avg = 0; + int count = 0; + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_avg_t *rkb_avg_throttle_rollover = + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle; + if (rkb_avg_throttle_rollover->ra_v.cnt) { + avg = (avg * count + + rkb_avg_throttle_rollover->ra_v.sum) / + (double)(count + + rkb_avg_throttle_rollover->ra_v.cnt); + count += rkb_avg_throttle_rollover->ra_v.cnt; + } + } + avg_throttle.double_value = avg; + return avg_throttle; +} + + +static rd_kafka_telemetry_metric_value_t +calculate_throttle_max(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t max_throttle; + rd_kafka_broker_t *rkb; + + max_throttle.int_value = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + max_throttle.int_value = RD_MAX( + max_throttle.int_value, rkb->rkb_telemetry.rd_avg_rollover + .rkb_avg_throttle.ra_v.maxv); + } + return max_throttle; +} + +static rd_kafka_telemetry_metric_value_t +calculate_queue_time_avg(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t avg_queue_time; + rd_kafka_broker_t *rkb; + double avg = 0; + int count = 0; + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_avg_t *rkb_avg_outbuf_latency_rollover = + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency; + if (rkb_avg_outbuf_latency_rollover->ra_v.cnt) { + avg = + (avg * count + + rkb_avg_outbuf_latency_rollover->ra_v.sum) / + (double)(count + + rkb_avg_outbuf_latency_rollover->ra_v.cnt); + count += rkb_avg_outbuf_latency_rollover->ra_v.cnt; + } + } + + avg_queue_time.double_value = avg / THREE_ORDERS_MAGNITUDE; + return avg_queue_time; +} + +static rd_kafka_telemetry_metric_value_t +calculate_queue_time_max(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t max_queue_time; + rd_kafka_broker_t *rkb; + + max_queue_time.int_value = 0; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + max_queue_time.int_value = + RD_MAX(max_queue_time.int_value, + rkb->rkb_telemetry.rd_avg_rollover + .rkb_avg_outbuf_latency.ra_v.maxv); + } + max_queue_time.int_value = RD_CEIL_INTEGER_DIVISION( + max_queue_time.int_value, THREE_ORDERS_MAGNITUDE); + return max_queue_time; +} + +static rd_kafka_telemetry_metric_value_t +calculate_consumer_assigned_partitions(rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_ns) { + rd_kafka_telemetry_metric_value_t assigned_partitions; + + assigned_partitions.int_value = + rk->rk_cgrp ? rk->rk_cgrp->rkcg_c.assignment_size : 0; + return assigned_partitions; +} + + +static void reset_historical_metrics(rd_kafka_t *rk, rd_ts_t now_ns) { + rd_kafka_broker_t *rkb; + + rk->rk_telemetry.rk_historic_c.ts_last = now_ns; + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rkb->rkb_telemetry.rkb_historic_c.connects = + rd_atomic32_get(&rkb->rkb_c.connects); + } +} + +static const rd_kafka_telemetry_metric_value_calculator_t + PRODUCER_METRIC_VALUE_CALCULATORS[RD_KAFKA_TELEMETRY_PRODUCER_METRIC__CNT] = + { + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_RATE] = + &calculate_connection_creation_rate, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_TOTAL] = + &calculate_connection_creation_total, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_AVG] = + &calculate_broker_avg_rtt, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_MAX] = + &calculate_broker_max_rtt, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_AVG] = + &calculate_throttle_avg, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_MAX] = + &calculate_throttle_max, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_AVG] = + &calculate_queue_time_avg, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_MAX] = + &calculate_queue_time_max, +}; + +static const rd_kafka_telemetry_metric_value_calculator_t + CONSUMER_METRIC_VALUE_CALCULATORS[RD_KAFKA_TELEMETRY_CONSUMER_METRIC__CNT] = { + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_RATE] = + &calculate_connection_creation_rate, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_TOTAL] = + &calculate_connection_creation_total, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_AVG] = + &calculate_broker_avg_rtt, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_MAX] = + &calculate_broker_max_rtt, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_COORDINATOR_ASSIGNED_PARTITIONS] = + &calculate_consumer_assigned_partitions, +}; + +static const char *get_client_rack(const rd_kafka_t *rk) { + return rk->rk_conf.client_rack && + RD_KAFKAP_STR_LEN(rk->rk_conf.client_rack) + ? (const char *)rk->rk_conf.client_rack->str + : NULL; +} + +static const char *get_group_id(const rd_kafka_t *rk) { + return rk->rk_conf.group_id_str ? (const char *)rk->rk_conf.group_id_str + : NULL; +} + +static const char *get_group_instance_id(const rd_kafka_t *rk) { + return rk->rk_conf.group_instance_id + ? (const char *)rk->rk_conf.group_instance_id + : NULL; +} + +static const char *get_member_id(const rd_kafka_t *rk) { + return rk->rk_cgrp && rk->rk_cgrp->rkcg_member_id && + rk->rk_cgrp->rkcg_member_id->len > 0 + ? (const char *)rk->rk_cgrp->rkcg_member_id->str + : NULL; +} + +static const char *get_transactional_id(const rd_kafka_t *rk) { + return rk->rk_conf.eos.transactional_id + ? (const char *)rk->rk_conf.eos.transactional_id + : NULL; +} + +static const rd_kafka_telemetry_attribute_config_t producer_attributes[] = { + {"client_rack", get_client_rack}, + {"transactional_id", get_transactional_id}, +}; + +static const rd_kafka_telemetry_attribute_config_t consumer_attributes[] = { + {"client_rack", get_client_rack}, + {"group_id", get_group_id}, + {"group_instance_id", get_group_instance_id}, + {"member_id", get_member_id}, +}; + +static int +count_attributes(rd_kafka_t *rk, + const rd_kafka_telemetry_attribute_config_t *configs, + int config_count) { + int count = 0, i; + for (i = 0; i < config_count; ++i) { + if (configs[i].getValue(rk)) { + count++; + } + } + return count; +} + +static void set_attributes(rd_kafka_t *rk, + rd_kafka_telemetry_resource_attribute_t *attributes, + const rd_kafka_telemetry_attribute_config_t *configs, + int config_count) { + int attr_idx = 0, i; + for (i = 0; i < config_count; ++i) { + const char *value = configs[i].getValue(rk); + if (value) { + attributes[attr_idx].name = configs[i].name; + attributes[attr_idx].value = value; + attr_idx++; + } + } +} + +static int +resource_attributes(rd_kafka_t *rk, + rd_kafka_telemetry_resource_attribute_t **attributes) { + int count = 0; + const rd_kafka_telemetry_attribute_config_t *configs; + int config_count; + + if (rk->rk_type == RD_KAFKA_PRODUCER) { + configs = producer_attributes; + config_count = RD_ARRAY_SIZE(producer_attributes); + } else if (rk->rk_type == RD_KAFKA_CONSUMER) { + configs = consumer_attributes; + config_count = RD_ARRAY_SIZE(consumer_attributes); + } else { + *attributes = NULL; + return 0; + } + + count = count_attributes(rk, configs, config_count); + + if (count == 0) { + *attributes = NULL; + return 0; + } + + *attributes = + rd_malloc(sizeof(rd_kafka_telemetry_resource_attribute_t) * count); + + set_attributes(rk, *attributes, configs, config_count); + + return count; +} + +static bool +encode_string(pb_ostream_t *stream, const pb_field_t *field, void *const *arg) { + if (!pb_encode_tag_for_field(stream, field)) + return false; + return pb_encode_string(stream, (uint8_t *)(*arg), strlen(*arg)); +} + +// TODO: Update to handle multiple data points. +static bool encode_number_data_point(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + opentelemetry_proto_metrics_v1_NumberDataPoint *data_point = + (opentelemetry_proto_metrics_v1_NumberDataPoint *)*arg; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + return pb_encode_submessage( + stream, opentelemetry_proto_metrics_v1_NumberDataPoint_fields, + data_point); +} + +static bool +encode_metric(pb_ostream_t *stream, const pb_field_t *field, void *const *arg) { + rd_kafka_telemetry_metrics_repeated_t *metricArr = + (rd_kafka_telemetry_metrics_repeated_t *)*arg; + size_t i; + + for (i = 0; i < metricArr->count; i++) { + + opentelemetry_proto_metrics_v1_Metric *metric = + metricArr->metrics[i]; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + if (!pb_encode_submessage( + stream, opentelemetry_proto_metrics_v1_Metric_fields, + metric)) + return false; + } + return true; +} + +static bool encode_scope_metrics(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + opentelemetry_proto_metrics_v1_ScopeMetrics *scope_metrics = + (opentelemetry_proto_metrics_v1_ScopeMetrics *)*arg; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + return pb_encode_submessage( + stream, opentelemetry_proto_metrics_v1_ScopeMetrics_fields, + scope_metrics); +} + +static bool encode_resource_metrics(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + opentelemetry_proto_metrics_v1_ResourceMetrics *resource_metrics = + (opentelemetry_proto_metrics_v1_ResourceMetrics *)*arg; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + return pb_encode_submessage( + stream, opentelemetry_proto_metrics_v1_ResourceMetrics_fields, + resource_metrics); +} + +static bool encode_key_value(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + if (!pb_encode_tag_for_field(stream, field)) + return false; + opentelemetry_proto_common_v1_KeyValue *key_value = + (opentelemetry_proto_common_v1_KeyValue *)*arg; + return pb_encode_submessage( + stream, opentelemetry_proto_common_v1_KeyValue_fields, key_value); +} + +static bool encode_key_values(pb_ostream_t *stream, + const pb_field_t *field, + void *const *arg) { + rd_kafka_telemetry_key_values_repeated_t *kv_arr = + (rd_kafka_telemetry_key_values_repeated_t *)*arg; + size_t i; + + for (i = 0; i < kv_arr->count; i++) { + + opentelemetry_proto_common_v1_KeyValue *kv = + kv_arr->key_values[i]; + if (!pb_encode_tag_for_field(stream, field)) + return false; + + if (!pb_encode_submessage( + stream, opentelemetry_proto_common_v1_KeyValue_fields, + kv)) + return false; + } + return true; +} + +static void free_metrics( + opentelemetry_proto_metrics_v1_Metric **metrics, + char **metric_names, + opentelemetry_proto_metrics_v1_NumberDataPoint **data_points, + opentelemetry_proto_common_v1_KeyValue *datapoint_attributes_key_values, + size_t count) { + size_t i; + for (i = 0; i < count; i++) { + rd_free(data_points[i]); + rd_free(metric_names[i]); + rd_free(metrics[i]); + } + rd_free(data_points); + rd_free(metric_names); + rd_free(metrics); + rd_free(datapoint_attributes_key_values); +} + +static void free_resource_attributes( + opentelemetry_proto_common_v1_KeyValue **resource_attributes_key_values, + rd_kafka_telemetry_resource_attribute_t *resource_attributes_struct, + size_t count) { + size_t i; + if (count == 0) + return; + for (i = 0; i < count; i++) + rd_free(resource_attributes_key_values[i]); + rd_free(resource_attributes_struct); + rd_free(resource_attributes_key_values); +} + +static void serialize_Metric( + rd_kafka_t *rk, + rd_kafka_broker_t *rkb, + const rd_kafka_telemetry_metric_info_t *info, + opentelemetry_proto_metrics_v1_Metric **metric, + opentelemetry_proto_metrics_v1_NumberDataPoint **data_point, + opentelemetry_proto_common_v1_KeyValue *data_point_attribute, + rd_kafka_telemetry_metric_value_calculator_t metric_value_calculator, + char **metric_name, + bool is_per_broker, + rd_ts_t now_ns) { + rd_ts_t ts_last = rk->rk_telemetry.rk_historic_c.ts_last, + ts_start = rk->rk_telemetry.rk_historic_c.ts_start; + size_t metric_name_len; + if (info->is_int) { + (*data_point)->which_value = + opentelemetry_proto_metrics_v1_NumberDataPoint_as_int_tag; + (*data_point)->value.as_int = + metric_value_calculator(rk, rkb, now_ns).int_value; + } else { + (*data_point)->which_value = + opentelemetry_proto_metrics_v1_NumberDataPoint_as_double_tag; + (*data_point)->value.as_double = + metric_value_calculator(rk, rkb, now_ns).double_value; + } + + + (*data_point)->time_unix_nano = now_ns; + if (info->type == RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE || + (info->type == RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM && + rk->rk_telemetry.delta_temporality)) + (*data_point)->start_time_unix_nano = ts_last; + else + (*data_point)->start_time_unix_nano = ts_start; + + if (is_per_broker) { + data_point_attribute->key.funcs.encode = &encode_string; + data_point_attribute->key.arg = + RD_KAFKA_TELEMETRY_METRIC_NODE_ID_ATTRIBUTE; + data_point_attribute->has_value = true; + data_point_attribute->value.which_value = + opentelemetry_proto_common_v1_AnyValue_int_value_tag; + + rd_kafka_broker_lock(rkb); + data_point_attribute->value.value.int_value = rkb->rkb_nodeid; + rd_kafka_broker_unlock(rkb); + + (*data_point)->attributes.funcs.encode = &encode_key_value; + (*data_point)->attributes.arg = data_point_attribute; + } + + + switch (info->type) { + + case RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM: { + (*metric)->which_data = + opentelemetry_proto_metrics_v1_Metric_sum_tag; + (*metric)->data.sum.data_points.funcs.encode = + &encode_number_data_point; + (*metric)->data.sum.data_points.arg = *data_point; + (*metric)->data.sum.aggregation_temporality = + rk->rk_telemetry.delta_temporality + ? opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA + : opentelemetry_proto_metrics_v1_AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE; + (*metric)->data.sum.is_monotonic = true; + break; + } + case RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE: { + (*metric)->which_data = + opentelemetry_proto_metrics_v1_Metric_gauge_tag; + (*metric)->data.gauge.data_points.funcs.encode = + &encode_number_data_point; + (*metric)->data.gauge.data_points.arg = *data_point; + break; + } + default: + rd_assert(!"Unknown metric type"); + break; + } + + (*metric)->description.funcs.encode = &encode_string; + (*metric)->description.arg = (void *)info->description; + + metric_name_len = + strlen(RD_KAFKA_TELEMETRY_METRIC_PREFIX) + strlen(info->name) + 1; + *metric_name = rd_calloc(1, metric_name_len); + rd_snprintf(*metric_name, metric_name_len, "%s%s", + RD_KAFKA_TELEMETRY_METRIC_PREFIX, info->name); + + + (*metric)->name.funcs.encode = &encode_string; + (*metric)->name.arg = *metric_name; + + /* Skipping unit as Java client does the same */ +} + +/** + * @brief Encodes the metrics to opentelemetry_proto_metrics_v1_MetricsData and + * returns the serialized data. Currently only supports encoding of connection + * creation total by default + */ +rd_buf_t *rd_kafka_telemetry_encode_metrics(rd_kafka_t *rk) { + rd_buf_t *rbuf = NULL; + rd_kafka_broker_t *rkb; + size_t message_size; + void *buffer = NULL; + pb_ostream_t stream; + bool status; + char **metric_names; + const int *metrics_to_encode = rk->rk_telemetry.matched_metrics; + const size_t metrics_to_encode_count = + rk->rk_telemetry.matched_metrics_cnt; + const rd_kafka_telemetry_metric_info_t *info = + RD_KAFKA_TELEMETRY_METRIC_INFO(rk); + size_t total_metrics_count = metrics_to_encode_count; + size_t i, metric_idx = 0; + opentelemetry_proto_metrics_v1_MetricsData metrics_data = + opentelemetry_proto_metrics_v1_MetricsData_init_zero; + + opentelemetry_proto_metrics_v1_ResourceMetrics resource_metrics = + opentelemetry_proto_metrics_v1_ResourceMetrics_init_zero; + + opentelemetry_proto_metrics_v1_Metric **metrics; + opentelemetry_proto_common_v1_KeyValue * + *resource_attributes_key_values = NULL; + opentelemetry_proto_common_v1_KeyValue + *datapoint_attributes_key_values = NULL; + opentelemetry_proto_metrics_v1_NumberDataPoint **data_points; + rd_kafka_telemetry_metrics_repeated_t metrics_repeated; + rd_kafka_telemetry_key_values_repeated_t resource_attributes_repeated; + rd_kafka_telemetry_resource_attribute_t *resource_attributes_struct = + NULL; + rd_ts_t now_ns = rd_uclock() * 1000; + rd_kafka_rdlock(rk); + + for (i = 0; i < metrics_to_encode_count; i++) { + if (info[metrics_to_encode[i]].is_per_broker) { + total_metrics_count += rk->rk_broker_cnt.val - 1; + } + } + + rd_kafka_dbg(rk, TELEMETRY, "PUSH", "Serializing metrics"); + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + rd_avg_destroy(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt); + rd_avg_rollover(&rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_rtt, + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_rtt); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency); + rd_avg_rollover( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_outbuf_latency, + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_outbuf_latency); + rd_avg_destroy( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle); + rd_avg_rollover( + &rkb->rkb_telemetry.rd_avg_rollover.rkb_avg_throttle, + &rkb->rkb_telemetry.rd_avg_current.rkb_avg_throttle); + } + + int resource_attributes_count = + resource_attributes(rk, &resource_attributes_struct); + rd_kafka_dbg(rk, TELEMETRY, "PUSH", "Resource attributes count: %d", + resource_attributes_count); + if (resource_attributes_count > 0) { + resource_attributes_key_values = + rd_malloc(sizeof(opentelemetry_proto_common_v1_KeyValue *) * + resource_attributes_count); + int ind; + for (ind = 0; ind < resource_attributes_count; ++ind) { + resource_attributes_key_values[ind] = rd_calloc( + 1, sizeof(opentelemetry_proto_common_v1_KeyValue)); + resource_attributes_key_values[ind]->key.funcs.encode = + &encode_string; + resource_attributes_key_values[ind]->key.arg = + (void *)resource_attributes_struct[ind].name; + + resource_attributes_key_values[ind]->has_value = true; + resource_attributes_key_values[ind]->value.which_value = + opentelemetry_proto_common_v1_AnyValue_string_value_tag; + resource_attributes_key_values[ind] + ->value.value.string_value.funcs.encode = + &encode_string; + resource_attributes_key_values[ind] + ->value.value.string_value.arg = + (void *)resource_attributes_struct[ind].value; + } + resource_attributes_repeated.key_values = + resource_attributes_key_values; + resource_attributes_repeated.count = resource_attributes_count; + resource_metrics.has_resource = true; + resource_metrics.resource.attributes.funcs.encode = + &encode_key_values; + resource_metrics.resource.attributes.arg = + &resource_attributes_repeated; + } + + opentelemetry_proto_metrics_v1_ScopeMetrics scope_metrics = + opentelemetry_proto_metrics_v1_ScopeMetrics_init_zero; + + opentelemetry_proto_common_v1_InstrumentationScope + instrumentation_scope = + opentelemetry_proto_common_v1_InstrumentationScope_init_zero; + instrumentation_scope.name.funcs.encode = &encode_string; + instrumentation_scope.name.arg = (void *)rd_kafka_name(rk); + instrumentation_scope.version.funcs.encode = &encode_string; + instrumentation_scope.version.arg = (void *)rd_kafka_version_str(); + + scope_metrics.has_scope = true; + scope_metrics.scope = instrumentation_scope; + + metrics = rd_malloc(sizeof(opentelemetry_proto_metrics_v1_Metric *) * + total_metrics_count); + data_points = + rd_malloc(sizeof(opentelemetry_proto_metrics_v1_NumberDataPoint *) * + total_metrics_count); + datapoint_attributes_key_values = + rd_malloc(sizeof(opentelemetry_proto_common_v1_KeyValue) * + total_metrics_count); + metric_names = rd_malloc(sizeof(char *) * total_metrics_count); + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Total metrics to be encoded count: %" PRIusz, + total_metrics_count); + + + for (i = 0; i < metrics_to_encode_count; i++) { + + rd_kafka_telemetry_metric_value_calculator_t + metric_value_calculator = + (rk->rk_type == RD_KAFKA_PRODUCER) + ? PRODUCER_METRIC_VALUE_CALCULATORS + [metrics_to_encode[i]] + : CONSUMER_METRIC_VALUE_CALCULATORS + [metrics_to_encode[i]]; + if (info[metrics_to_encode[i]].is_per_broker) { + rd_kafka_broker_t *rkb; + + TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) { + metrics[metric_idx] = rd_calloc( + 1, + sizeof( + opentelemetry_proto_metrics_v1_Metric)); + data_points[metric_idx] = rd_calloc( + 1, + sizeof( + opentelemetry_proto_metrics_v1_NumberDataPoint)); + serialize_Metric( + rk, rkb, &info[metrics_to_encode[i]], + &metrics[metric_idx], + &data_points[metric_idx], + &datapoint_attributes_key_values + [metric_idx], + metric_value_calculator, + &metric_names[metric_idx], true, now_ns); + metric_idx++; + } + continue; + } + + metrics[metric_idx] = + rd_calloc(1, sizeof(opentelemetry_proto_metrics_v1_Metric)); + data_points[metric_idx] = rd_calloc( + 1, sizeof(opentelemetry_proto_metrics_v1_NumberDataPoint)); + + serialize_Metric(rk, NULL, &info[metrics_to_encode[i]], + &metrics[metric_idx], &data_points[metric_idx], + &datapoint_attributes_key_values[metric_idx], + metric_value_calculator, + &metric_names[metric_idx], false, now_ns); + metric_idx++; + } + + /* Send empty metrics blob if no metrics are matched */ + if (total_metrics_count > 0) { + metrics_repeated.metrics = metrics; + metrics_repeated.count = total_metrics_count; + + scope_metrics.metrics.funcs.encode = &encode_metric; + scope_metrics.metrics.arg = &metrics_repeated; + + + resource_metrics.scope_metrics.funcs.encode = + &encode_scope_metrics; + resource_metrics.scope_metrics.arg = &scope_metrics; + + metrics_data.resource_metrics.funcs.encode = + &encode_resource_metrics; + metrics_data.resource_metrics.arg = &resource_metrics; + } + + status = pb_get_encoded_size( + &message_size, opentelemetry_proto_metrics_v1_MetricsData_fields, + &metrics_data); + if (!status) { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Failed to get encoded size"); + goto fail; + } + + rbuf = rd_buf_new(1, message_size); + rd_buf_write_ensure(rbuf, message_size, message_size); + message_size = rd_buf_get_writable(rbuf, &buffer); + + stream = pb_ostream_from_buffer(buffer, message_size); + status = pb_encode(&stream, + opentelemetry_proto_metrics_v1_MetricsData_fields, + &metrics_data); + + if (!status) { + rd_kafka_dbg(rk, TELEMETRY, "PUSH", "Encoding failed: %s", + PB_GET_ERROR(&stream)); + rd_buf_destroy_free(rbuf); + goto fail; + } + rd_kafka_dbg(rk, TELEMETRY, "PUSH", + "Push Telemetry metrics encoded, size: %" PRIusz, + stream.bytes_written); + rd_buf_write(rbuf, NULL, stream.bytes_written); + + reset_historical_metrics(rk, now_ns); + + free_metrics(metrics, metric_names, data_points, + datapoint_attributes_key_values, total_metrics_count); + free_resource_attributes(resource_attributes_key_values, + resource_attributes_struct, + resource_attributes_count); + rd_kafka_rdunlock(rk); + + return rbuf; + +fail: + free_metrics(metrics, metric_names, data_points, + datapoint_attributes_key_values, total_metrics_count); + free_resource_attributes(resource_attributes_key_values, + resource_attributes_struct, + resource_attributes_count); + rd_kafka_rdunlock(rk); + + return NULL; +} diff --git a/src/rdkafka_telemetry_encode.h b/src/rdkafka_telemetry_encode.h new file mode 100644 index 0000000000..44445ea2bb --- /dev/null +++ b/src/rdkafka_telemetry_encode.h @@ -0,0 +1,214 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDKAFKA_RDKAFKA_TELEMETRY_ENCODE_H +#define _RDKAFKA_RDKAFKA_TELEMETRY_ENCODE_H + +#include "rdkafka_int.h" +#include "rdtypes.h" + +#define RD_KAFKA_TELEMETRY_METRIC_PREFIX "org.apache.kafka." +#define RD_KAFKA_TELEMETRY_METRIC_NODE_ID_ATTRIBUTE "node.id" + +#define RD_KAFKA_TELEMETRY_METRIC_INFO(rk) \ + (rk->rk_type == RD_KAFKA_PRODUCER \ + ? RD_KAFKA_TELEMETRY_PRODUCER_METRICS_INFO \ + : RD_KAFKA_TELEMETRY_CONSUMER_METRICS_INFO) + +#define RD_KAFKA_TELEMETRY_METRIC_CNT(rk) \ + (rk->rk_type == RD_KAFKA_PRODUCER \ + ? RD_KAFKA_TELEMETRY_PRODUCER_METRIC__CNT \ + : RD_KAFKA_TELEMETRY_CONSUMER_METRIC__CNT) + + +typedef enum { + RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM, + RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE, +} rd_kafka_telemetry_metric_type_t; + +typedef enum { + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_RATE, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_TOTAL, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_AVG, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_MAX, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_AVG, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_MAX, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_AVG, + RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_MAX, + RD_KAFKA_TELEMETRY_PRODUCER_METRIC__CNT +} rd_kafka_telemetry_producer_metric_name_t; + +typedef enum { + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_RATE, + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_TOTAL, + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_AVG, + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_MAX, + RD_KAFKA_TELEMETRY_METRIC_CONSUMER_COORDINATOR_ASSIGNED_PARTITIONS, + RD_KAFKA_TELEMETRY_CONSUMER_METRIC__CNT +} rd_kafka_telemetry_consumer_metric_name_t; + +typedef union { + int64_t int_value; + double double_value; +} rd_kafka_telemetry_metric_value_t; + +typedef rd_kafka_telemetry_metric_value_t ( + *rd_kafka_telemetry_metric_value_calculator_t)( + rd_kafka_t *rk, + rd_kafka_broker_t *rkb_selected, + rd_ts_t now_nanos); + +typedef struct { + const char *name; + const char *value; +} rd_kafka_telemetry_resource_attribute_t; + +typedef struct { + const char *name; + const char *description; + const char *unit; + const rd_bool_t is_int; + const rd_bool_t is_per_broker; + rd_kafka_telemetry_metric_type_t type; + rd_kafka_telemetry_metric_value_calculator_t calculate_value; +} rd_kafka_telemetry_metric_info_t; + +typedef struct { + const char *name; + const char *(*getValue)(const rd_kafka_t *rk); +} rd_kafka_telemetry_attribute_config_t; + +static const rd_kafka_telemetry_metric_info_t + RD_KAFKA_TELEMETRY_PRODUCER_METRICS_INFO + [RD_KAFKA_TELEMETRY_PRODUCER_METRIC__CNT] = { + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_RATE] = + {.name = "producer.connection.creation.rate", + .description = + "The rate of connections established per second.", + .unit = "1", + .is_int = rd_false, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_CONNECTION_CREATION_TOTAL] = + {.name = "producer.connection.creation.total", + .description = "The total number of connections established.", + .unit = "1", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_AVG] = + {.name = "producer.node.request.latency.avg", + .description = "The average request latency in ms for a node.", + .unit = "ms", + .is_int = rd_false, + .is_per_broker = rd_true, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_NODE_REQUEST_LATENCY_MAX] = + {.name = "producer.node.request.latency.max", + .description = "The maximum request latency in ms for a node.", + .unit = "ms", + .is_int = rd_true, + .is_per_broker = rd_true, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_AVG] = + {.name = "producer.produce.throttle.time.avg", + .description = "The average throttle time in ms for a node.", + .unit = "ms", + .is_int = rd_false, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_PRODUCE_THROTTLE_TIME_MAX] = + {.name = "producer.produce.throttle.time.max", + .description = "The maximum throttle time in ms for a node.", + .unit = "ms", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_AVG] = + {.name = "producer.record.queue.time.avg", + .description = "The average time in ms a record spends in the " + "producer queue.", + .unit = "ms", + .is_int = rd_false, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_PRODUCER_RECORD_QUEUE_TIME_MAX] = + {.name = "producer.record.queue.time.max", + .description = "The maximum time in ms a record spends in the " + "producer queue.", + .unit = "ms", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, +}; + +static const rd_kafka_telemetry_metric_info_t + RD_KAFKA_TELEMETRY_CONSUMER_METRICS_INFO + [RD_KAFKA_TELEMETRY_CONSUMER_METRIC__CNT] = { + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_RATE] = + {.name = "consumer.connection.creation.rate", + .description = + "The rate of connections established per second.", + .unit = "1", + .is_int = rd_false, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_CONNECTION_CREATION_TOTAL] = + {.name = "consumer.connection.creation.total", + .description = "The total number of connections established.", + .unit = "1", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_SUM}, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_AVG] = + {.name = "consumer.node.request.latency.avg", + .description = "The average request latency in ms for a node.", + .unit = "ms", + .is_int = rd_false, + .is_per_broker = rd_true, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_NODE_REQUEST_LATENCY_MAX] = + {.name = "consumer.node.request.latency.max", + .description = "The maximum request latency in ms for a node.", + .unit = "ms", + .is_int = rd_true, + .is_per_broker = rd_true, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, + [RD_KAFKA_TELEMETRY_METRIC_CONSUMER_COORDINATOR_ASSIGNED_PARTITIONS] = + {.name = "consumer.coordinator.assigned.partitions", + .description = "The number of partitions currently assigned " + "to this consumer.", + .unit = "1", + .is_int = rd_true, + .is_per_broker = rd_false, + .type = RD_KAFKA_TELEMETRY_METRIC_TYPE_GAUGE}, +}; + +rd_buf_t *rd_kafka_telemetry_encode_metrics(rd_kafka_t *rk); + +#endif /* _RDKAFKA_RDKAFKA_TELEMETRY_ENCODE_H */ diff --git a/src/rdunittest.c b/src/rdunittest.c index 18236ca9ec..fc82c242cd 100644 --- a/src/rdunittest.c +++ b/src/rdunittest.c @@ -2,6 +2,7 @@ * librdkafka - Apache Kafka C library * * Copyright (c) 2017-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -426,6 +427,7 @@ extern int unittest_http(void); #if WITH_OAUTHBEARER_OIDC extern int unittest_sasl_oauthbearer_oidc(void); #endif +extern int unittest_telemetry_decode(void); int rd_unittest(void) { int fails = 0; @@ -466,6 +468,7 @@ int rd_unittest(void) { #if WITH_OAUTHBEARER_OIDC {"sasl_oauthbearer_oidc", unittest_sasl_oauthbearer_oidc}, #endif + {"telemetry", unittest_telemetry_decode}, {NULL} }; int i; diff --git a/tests/0150-telemetry_mock.c b/tests/0150-telemetry_mock.c new file mode 100644 index 0000000000..52fb76032f --- /dev/null +++ b/tests/0150-telemetry_mock.c @@ -0,0 +1,546 @@ +/* + * librdkafka - Apache Kafka C library + * + * Copyright (c) 2023, Confluent Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "test.h" + +#include "../src/rdkafka_proto.h" + +typedef struct { + int16_t ApiKey; + int64_t + expected_diff_ms /* Expected time difference from last request */; + int64_t jitter_percent; /* Jitter to be accounted for while checking + expected diff*/ + int broker_id; /* Broker id of request. */ +} rd_kafka_telemetry_expected_request_t; + +static void test_telemetry_check_protocol_request_times( + rd_kafka_mock_request_t **requests_actual, + size_t actual_cnt, + rd_kafka_telemetry_expected_request_t *requests_expected, + size_t expected_cnt) { + int64_t prev_timestamp = -1; + int64_t curr_timestamp = -1; + size_t expected_idx = 0; + size_t actual_idx = 0; + const int buffer = 200 /* constant buffer time. */; + + if (expected_cnt < 1) + return; + + TEST_ASSERT(actual_cnt >= expected_cnt, + "Expected at least %" PRIusz " requests, have %" PRIusz, + expected_cnt, actual_cnt); + + for (expected_idx = 0, actual_idx = 0; + expected_idx < expected_cnt && actual_idx < actual_cnt; + actual_idx++) { + rd_kafka_mock_request_t *request_actual = + requests_actual[actual_idx]; + int16_t actual_ApiKey = + rd_kafka_mock_request_api_key(request_actual); + int actual_broker_id = rd_kafka_mock_request_id(request_actual); + rd_kafka_telemetry_expected_request_t request_expected = + requests_expected[expected_idx]; + + if (actual_ApiKey != RD_KAFKAP_GetTelemetrySubscriptions && + actual_ApiKey != RD_KAFKAP_PushTelemetry) + continue; + + TEST_ASSERT(actual_ApiKey == request_expected.ApiKey, + "Expected ApiKey %s, got ApiKey %s", + rd_kafka_ApiKey2str(request_expected.ApiKey), + rd_kafka_ApiKey2str(actual_ApiKey)); + + if (request_expected.broker_id != -1) + TEST_ASSERT( + request_expected.broker_id == actual_broker_id, + "Expected request to be sent to broker %d, " + "was sent to %d", + request_expected.broker_id, actual_broker_id); + + prev_timestamp = curr_timestamp; + curr_timestamp = + rd_kafka_mock_request_timestamp(request_actual); + if (prev_timestamp != -1 && + request_expected.expected_diff_ms != -1) { + int64_t diff_ms = + (curr_timestamp - prev_timestamp) / 1000; + int64_t expected_diff_low = + request_expected.expected_diff_ms * + (100 - request_expected.jitter_percent) / 100 - + buffer; + int64_t expected_diff_hi = + request_expected.expected_diff_ms * + (100 + request_expected.jitter_percent) / 100 + + buffer; + + TEST_ASSERT( + diff_ms > expected_diff_low, + "Expected difference to be more than %" PRId64 + ", was " + "%" PRId64, + expected_diff_low, diff_ms); + TEST_ASSERT( + diff_ms < expected_diff_hi, + "Expected difference to be less than %" PRId64 + ", was " + "%" PRId64, + expected_diff_hi, diff_ms); + } + expected_idx++; + } +} + +static void test_clear_request_list(rd_kafka_mock_request_t **requests, + size_t request_cnt) { + size_t i; + for (i = 0; i < request_cnt; i++) { + rd_kafka_mock_request_destroy(requests[i]); + } + rd_free(requests); +} + +static void test_poll_timeout(rd_kafka_t *rk, int64_t duration_ms) { + int64_t start_time = test_clock(); + while ((test_clock() - start_time) / 1000 < duration_ms) + rd_kafka_poll(rk, 500); +} + +/** + * @brief Tests the 'happy path' of GetTelemetrySubscriptions, followed by + * successful PushTelemetry requests. + * See `requests_expected` for detailed expected flow. + */ +void do_test_telemetry_get_subscription_push_telemetry(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t push_interval = 5000; + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = push_interval + jitter : The first PushTelemetry request */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + /* T = push_interval*2 + jitter : The second PushTelemetry request. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + }; + + SUB_TEST(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "debug", "telemetry"); + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Poll for enough time for two pushes to be triggered, and a little + * extra, so 2.5 x push interval. */ + test_poll_timeout(producer, push_interval * 2.5); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + + test_telemetry_check_protocol_request_times( + requests, request_cnt, requests_expected, + RD_ARRAY_SIZE(requests_expected)); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + rd_kafka_destroy(producer); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief When there are no subscriptions, GetTelemetrySubscriptions should be + * resent after the push interval until there are subscriptions. + * See `requests_expected` for detailed expected flow. + */ +void do_test_telemetry_empty_subscriptions_list(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t push_interval = 5000; + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request, returns + * empty subscription. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = push_interval : The second GetTelemetrySubscriptions request, + * returns non-empty subscription */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + /* T = push_interval*2 + jitter : The first PushTelemetry request. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + }; + + + SUB_TEST(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, NULL, 0); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Poll for enough time so that the first GetTelemetrySubscription + * request is triggered. */ + test_poll_timeout(producer, (push_interval * 0.5)); + + /* Set expected_metrics before the second GetTelemetrySubscription is + * triggered. */ + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + + /* Poll for enough time so that the second GetTelemetrySubscriptions and + * subsequent PushTelemetry request is triggered. */ + test_poll_timeout(producer, (push_interval * 2)); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + test_telemetry_check_protocol_request_times(requests, request_cnt, + requests_expected, 3); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + rd_kafka_destroy(producer); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief When a client is terminating, PushIntervalMs is overriden and a final + * push telemetry request should be sent immediately. + * See `requests_expected` for detailed expected flow. + */ +void do_test_telemetry_terminating_push(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t wait_before_termination = 2000; + const int64_t push_interval = 5000; /* Needs to be comfortably larger + than wait_before_termination. */ + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = wait_before_termination : The second PushTelemetry request is + * sent immediately (terminating). + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = wait_before_termination, + .jitter_percent = 0}, + }; + SUB_TEST(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Poll for enough time so that the initial GetTelemetrySubscriptions + * can be sent and handled, and keep polling till it's time to + * terminate. */ + test_poll_timeout(producer, wait_before_termination); + + /* Destroy the client to trigger a terminating push request + * immediately. */ + rd_kafka_destroy(producer); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + test_telemetry_check_protocol_request_times(requests, request_cnt, + requests_expected, 2); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief Preferred broker should be 'sticky' and should not change unless the + * old preferred broker goes down. + * See `requests_expected` for detailed expected flow. + */ +void do_test_telemetry_preferred_broker_change(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t push_interval = 5000; + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = 1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = push_interval + jitter : The first PushTelemetry request, + * sent to the preferred broker 1. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = 1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + /* T = 2*push_interval + jitter : The second PushTelemetry request, + * sent to the preferred broker 1. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = 1, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + /* T = 3*push_interval + jitter: The old preferred broker is set + * down, and this is the first PushTelemetry request to the new + * preferred broker. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = 2, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + /* T = 4*push_interval + jitter + arbitraryT + jitter2 : The second + * PushTelemetry request to the new preferred broker. The old + * broker will be up, but the preferred broker will not chnage. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = 2, + .expected_diff_ms = push_interval, + .jitter_percent = 0}, + }; + SUB_TEST(); + + mcluster = test_mock_cluster_new(2, &bootstraps); + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + /* Set broker 2 down, to make sure broker 1 is the first preferred + * broker. */ + rd_kafka_mock_broker_set_down(mcluster, 2); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "debug", "telemetry"); + // rd_kafka_conf_set_error_cb(conf, test_error_is_not_fatal_cb); + test_curr->is_fatal_cb = test_error_is_not_fatal_cb; + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + + /* Poll for enough time that the initial GetTelemetrySubscription can be + * sent and the first PushTelemetry request can be scheduled. */ + test_poll_timeout(producer, 0.5 * push_interval); + + /* Poll for enough time that 2 PushTelemetry requests can be sent. Set + * the all brokers up during this time, but the preferred broker (1) + * should remain sticky. */ + rd_kafka_mock_broker_set_up(mcluster, 2); + test_poll_timeout(producer, 2 * push_interval); + + /* Set the preferred broker (1) down. */ + rd_kafka_mock_broker_set_down(mcluster, 1); + + /* Poll for enough time that 1 PushTelemetry request can be sent. */ + test_poll_timeout(producer, 1.25 * push_interval); + + /* Poll for enough time that 1 PushTelemetry request can be sent. Set + * the all brokers up during this time, but the preferred broker (2) + * should remain sticky. */ + rd_kafka_mock_broker_set_up(mcluster, 1); + test_poll_timeout(producer, 1.25 * push_interval); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + test_telemetry_check_protocol_request_times(requests, request_cnt, + requests_expected, 5); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + rd_kafka_destroy(producer); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +/** + * @brief Subscription Id change at the broker should trigger a new + * GetTelemetrySubscriptions request. + */ +void do_test_subscription_id_change(void) { + rd_kafka_conf_t *conf; + const char *bootstraps; + rd_kafka_mock_cluster_t *mcluster; + char *expected_metrics[] = {"*"}; + rd_kafka_t *producer = NULL; + rd_kafka_mock_request_t **requests = NULL; + size_t request_cnt; + const int64_t push_interval = 1000; + + rd_kafka_telemetry_expected_request_t requests_expected[] = { + /* T= 0 : The initial GetTelemetrySubscriptions request. */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = -1, + .jitter_percent = 0}, + /* T = push_interval + jitter : The first PushTelemetry request, + * sent to the preferred broker 1. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + /* T = 2*push_interval + jitter : The second PushTelemetry request, + * which will fail with unknown subscription id. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + /* New GetTelemetrySubscriptions request will be sent immediately. + */ + {.ApiKey = RD_KAFKAP_GetTelemetrySubscriptions, + .broker_id = -1, + .expected_diff_ms = 0, + .jitter_percent = 0}, + /* T = 3*push_interval + jitter : The third PushTelemetry request, + * sent to the preferred broker 1 with new subscription id. + */ + {.ApiKey = RD_KAFKAP_PushTelemetry, + .broker_id = -1, + .expected_diff_ms = push_interval, + .jitter_percent = 20}, + }; + SUB_TEST(); + + mcluster = test_mock_cluster_new(1, &bootstraps); + + rd_kafka_mock_telemetry_set_requested_metrics(mcluster, + expected_metrics, 1); + rd_kafka_mock_telemetry_set_push_interval(mcluster, push_interval); + rd_kafka_mock_start_request_tracking(mcluster); + + test_conf_init(&conf, NULL, 30); + test_conf_set(conf, "bootstrap.servers", bootstraps); + test_conf_set(conf, "debug", "telemetry"); + producer = test_create_handle(RD_KAFKA_PRODUCER, conf); + test_poll_timeout(producer, push_interval * 1.2); + + rd_kafka_mock_push_request_errors( + mcluster, RD_KAFKAP_PushTelemetry, 1, + RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID); + + test_poll_timeout(producer, push_interval * 2.5); + + requests = rd_kafka_mock_get_requests(mcluster, &request_cnt); + + test_telemetry_check_protocol_request_times( + requests, request_cnt, requests_expected, + RD_ARRAY_SIZE(requests_expected)); + + /* Clean up. */ + rd_kafka_mock_stop_request_tracking(mcluster); + test_clear_request_list(requests, request_cnt); + rd_kafka_destroy(producer); + test_mock_cluster_destroy(mcluster); + + SUB_TEST_PASS(); +} + +int main_0150_telemetry_mock(int argc, char **argv) { + + if (test_needs_auth()) { + TEST_SKIP("Mock cluster does not support SSL/SASL\n"); + return 0; + } + + do_test_telemetry_get_subscription_push_telemetry(); + + do_test_telemetry_empty_subscriptions_list(); + + do_test_telemetry_terminating_push(); + + do_test_telemetry_preferred_broker_change(); + + do_test_subscription_id_change(); + + return 0; +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 62ce0deb02..93ec0d57d8 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -137,6 +137,7 @@ set( 0144-idempotence_mock.c 0145-pause_resume_mock.c 0146-metadata_mock.c + 0150-telemetry_mock.c 8000-idle.cpp 8001-fetch_from_follower_mock_manual.c test.c diff --git a/tests/test.c b/tests/test.c index 83487f5e5c..8a4a6806c3 100644 --- a/tests/test.c +++ b/tests/test.c @@ -261,6 +261,7 @@ _TEST_DECL(0143_exponential_backoff_mock); _TEST_DECL(0144_idempotence_mock); _TEST_DECL(0145_pause_resume_mock); _TEST_DECL(0146_metadata_mock); +_TEST_DECL(0150_telemetry_mock); /* Manual tests */ _TEST_DECL(8000_idle); @@ -518,6 +519,7 @@ struct test tests[] = { _TEST(0144_idempotence_mock, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)), _TEST(0145_pause_resume_mock, TEST_F_LOCAL), _TEST(0146_metadata_mock, TEST_F_LOCAL), + _TEST(0150_telemetry_mock, 0), /* Manual tests */ diff --git a/win32/librdkafka.vcxproj b/win32/librdkafka.vcxproj index a7f267e89e..b31f895d62 100644 --- a/win32/librdkafka.vcxproj +++ b/win32/librdkafka.vcxproj @@ -12,11 +12,11 @@ - $(VC_IncludePath);$(WindowsSDK_IncludePath) + $(VC_IncludePath);$(WindowsSDK_IncludePath);../src $(VC_LibraryPath_x86);$(WindowsSDK_LibraryPath_x86) - $(VC_IncludePath);$(WindowsSDK_IncludePath) + $(VC_IncludePath);$(WindowsSDK_IncludePath);../src $(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64) @@ -169,6 +169,13 @@ + + + + + + + @@ -232,6 +239,9 @@ + + + @@ -252,6 +262,12 @@ + + + + + + diff --git a/win32/tests/tests.vcxproj b/win32/tests/tests.vcxproj index a354f278f8..b11bfdab75 100644 --- a/win32/tests/tests.vcxproj +++ b/win32/tests/tests.vcxproj @@ -227,6 +227,7 @@ +