From a24f2235f69ac9248622bc6520e2a4e3d7ea2861 Mon Sep 17 00:00:00 2001 From: fpetrini15 Date: Thu, 11 Aug 2022 14:42:17 -0700 Subject: [PATCH 01/34] Initial Commit --- CMakeLists.txt | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6878a11c..38bf21a4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -473,3 +473,22 @@ ExternalProject_Add(aws-sdk-cpp -DCMAKE_INSTALL_PREFIX:PATH=${TRITON_THIRD_PARTY_INSTALL_PREFIX}/aws-sdk-cpp PATCH_COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/tools/install_src.py --src ${INSTALL_SRC_DEST_ARG} ) + +# +# Build modern-cpp-kafka +# +ExternalProject_Add(modern-cpp-kafka + PREFIX modern-cpp-kafka + GIT_REPOSITORY "https://github.com/morganstanley/modern-cpp-kafka.git" + GIT_TAG "v2022.08.01" + SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/modern-cpp-kafka/src/modern-cpp-kafka" + EXCLUDE_FROM_ALL ON + CMAKE_CACHE_ARGS + ${_CMAKE_ARGS_CMAKE_TOOLCHAIN_FILE} + ${_CMAKE_ARGS_VCPKG_TARGET_TRIPLET} + -DBUILD_SHARED_LIBS:BOOL=OFF + -DENABLE_TESTING:BOOL=OFF + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_INSTALL_PREFIX:PATH=${TRITON_THIRD_PARTY_INSTALL_PREFIX}/modern-cpp-kafka + PATCH_COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/tools/install_src.py --src ${INSTALL_SRC_DEST_ARG} +) From 771ac5b00bf61a3026d1561f5f9ba1780ba0704b Mon Sep 17 00:00:00 2001 From: fpetrini15 Date: Thu, 11 Aug 2022 17:05:01 -0700 Subject: [PATCH 02/34] Build type --- CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 38bf21a4..c69d2886 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -489,6 +489,7 @@ ExternalProject_Add(modern-cpp-kafka -DBUILD_SHARED_LIBS:BOOL=OFF -DENABLE_TESTING:BOOL=OFF -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=${TRITON_THIRD_PARTY_INSTALL_PREFIX}/modern-cpp-kafka PATCH_COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/tools/install_src.py --src ${INSTALL_SRC_DEST_ARG} -) +) \ No newline at end of file From 62701d8c7db2bb814a6d8bf0e2ab372d10b3f28f Mon Sep 17 00:00:00 2001 From: fpetrini15 Date: Thu, 11 Aug 2022 17:14:27 -0700 Subject: [PATCH 03/34] Library path --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index c69d2886..337332ff 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -491,5 +491,6 @@ ExternalProject_Add(modern-cpp-kafka -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=${TRITON_THIRD_PARTY_INSTALL_PREFIX}/modern-cpp-kafka + -Dmodern_cpp_kafka_DIR:PATH=${TRITON_THIRD_PARTY_INSTALL_PREFIX}/modern-cpp-kafka/lib/cmake/modern-cpp-kafka PATCH_COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/tools/install_src.py --src ${INSTALL_SRC_DEST_ARG} ) \ No newline at end of file From 140827cdad67662ddc7082df9036a54ae75a94de Mon Sep 17 00:00:00 2001 From: fpetrini15 Date: Tue, 16 Aug 2022 14:20:19 -0700 Subject: [PATCH 04/34] Cannot pull from git, need to edit --- CMakeLists.txt | 9 ++------- modern-cpp-kafka | 1 + 2 files changed, 3 insertions(+), 7 deletions(-) create mode 160000 modern-cpp-kafka diff --git a/CMakeLists.txt b/CMakeLists.txt index 337332ff..7291fb53 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -479,18 +479,13 @@ ExternalProject_Add(aws-sdk-cpp # ExternalProject_Add(modern-cpp-kafka PREFIX modern-cpp-kafka - GIT_REPOSITORY "https://github.com/morganstanley/modern-cpp-kafka.git" - GIT_TAG "v2022.08.01" - SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/modern-cpp-kafka/src/modern-cpp-kafka" + SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/modern-cpp-kafka" EXCLUDE_FROM_ALL ON + DOWNLOAD_COMMAND "" CMAKE_CACHE_ARGS ${_CMAKE_ARGS_CMAKE_TOOLCHAIN_FILE} ${_CMAKE_ARGS_VCPKG_TARGET_TRIPLET} - -DBUILD_SHARED_LIBS:BOOL=OFF - -DENABLE_TESTING:BOOL=OFF -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON - -DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX:PATH=${TRITON_THIRD_PARTY_INSTALL_PREFIX}/modern-cpp-kafka - -Dmodern_cpp_kafka_DIR:PATH=${TRITON_THIRD_PARTY_INSTALL_PREFIX}/modern-cpp-kafka/lib/cmake/modern-cpp-kafka PATCH_COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/tools/install_src.py --src ${INSTALL_SRC_DEST_ARG} ) \ No newline at end of file diff --git a/modern-cpp-kafka b/modern-cpp-kafka new file mode 160000 index 00000000..f7d0e116 --- /dev/null +++ b/modern-cpp-kafka @@ -0,0 +1 @@ +Subproject commit f7d0e116685f918064fac309a0ede7d889c96b71 From ebe56b7c8b5ac663a7b60303df42d89d65515913 Mon Sep 17 00:00:00 2001 From: fpetrini15 Date: Tue, 16 Aug 2022 14:29:21 -0700 Subject: [PATCH 05/34] Removing --- modern-cpp-kafka | 1 - 1 file changed, 1 deletion(-) delete mode 160000 modern-cpp-kafka diff --git a/modern-cpp-kafka b/modern-cpp-kafka deleted file mode 160000 index f7d0e116..00000000 --- a/modern-cpp-kafka +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f7d0e116685f918064fac309a0ede7d889c96b71 From 0020c4494f462a737374b65b6b086e1dc423987f Mon Sep 17 00:00:00 2001 From: fpetrini15 Date: Tue, 16 Aug 2022 14:35:14 -0700 Subject: [PATCH 06/34] Untracked modern-cpp-kafka repo --- CMakeLists.txt | 2 +- modern-cpp-kafka/.bazelrc | 2 + modern-cpp-kafka/.clang-tidy | 46 + modern-cpp-kafka/.gitignore | 6 + modern-cpp-kafka/BUILD.bazel | 12 + modern-cpp-kafka/CMakeLists.txt | 278 ++ modern-cpp-kafka/LICENSE | 202 ++ modern-cpp-kafka/NOTICE | 3 + modern-cpp-kafka/README.md | 228 ++ modern-cpp-kafka/WORKSPACE | 15 + modern-cpp-kafka/_config.yml | 1 + modern-cpp-kafka/builddir/CMakeCache.txt | 394 +++ .../3.24.0-rc2/CMakeCCompiler.cmake | 72 + .../3.24.0-rc2/CMakeCXXCompiler.cmake | 83 + .../CMakeDetermineCompilerABI_C.bin | Bin 0 -> 16656 bytes .../CMakeDetermineCompilerABI_CXX.bin | Bin 0 -> 16680 bytes .../CMakeFiles/3.24.0-rc2/CMakeSystem.cmake | 15 + .../3.24.0-rc2/CompilerIdC/CMakeCCompilerId.c | 838 ++++++ .../CMakeFiles/3.24.0-rc2/CompilerIdC/a.out | Bin 0 -> 16776 bytes .../CompilerIdCXX/CMakeCXXCompilerId.cpp | 826 ++++++ .../CMakeFiles/3.24.0-rc2/CompilerIdCXX/a.out | Bin 0 -> 16784 bytes .../builddir/CMakeFiles/CMakeOutput.log | 441 +++ .../builddir/CMakeFiles/cmake.check_cache | 1 + modern-cpp-kafka/customrules/BUILD.bazel | 0 modern-cpp-kafka/customrules/rapidjson.BUILD | 10 + .../conan_build/CMakeLists.txt | 28 + .../conan_build/conanfile.txt | 5 + modern-cpp-kafka/doc/CMakeLists.txt | 50 + .../doc/GoodPracticesToUseKafkaConsumer.md | 22 + .../doc/GoodPracticesToUseKafkaProducer.md | 48 + .../doc/HowToMakeKafkaProducerReliable.md | 185 ++ .../doc/KafkaBrokerConfiguration.md | 155 + .../doc/KafkaConsumerQuickStart.md | 210 ++ .../doc/KafkaProducerQuickStart.md | 221 ++ modern-cpp-kafka/examples/BUILD.bazel | 50 + modern-cpp-kafka/examples/CMakeLists.txt | 26 + .../kafka_async_producer_copy_payload.cc | 60 + .../kafka_async_producer_not_copy_payload.cc | 64 + .../examples/kafka_auto_commit_consumer.cc | 59 + .../examples/kafka_manual_commit_consumer.cc | 79 + .../examples/kafka_sync_producer.cc | 55 + modern-cpp-kafka/include/CMakeLists.txt | 30 + modern-cpp-kafka/include/kafka/AdminClient.h | 345 +++ .../include/kafka/AdminClientConfig.h | 44 + modern-cpp-kafka/include/kafka/AdminCommon.h | 70 + .../include/kafka/BrokerMetadata.h | 187 ++ .../include/kafka/ConsumerCommon.h | 65 + .../include/kafka/ConsumerConfig.h | 115 + .../include/kafka/ConsumerRecord.h | 154 + modern-cpp-kafka/include/kafka/Error.h | 148 + modern-cpp-kafka/include/kafka/Header.h | 65 + modern-cpp-kafka/include/kafka/KafkaClient.h | 626 ++++ .../include/kafka/KafkaConsumer.h | 1051 +++++++ .../include/kafka/KafkaException.h | 60 + .../include/kafka/KafkaProducer.h | 516 ++++ modern-cpp-kafka/include/kafka/Log.h | 88 + .../include/kafka/ProducerCommon.h | 185 ++ .../include/kafka/ProducerConfig.h | 150 + .../include/kafka/ProducerRecord.h | 109 + modern-cpp-kafka/include/kafka/Project.h | 22 + modern-cpp-kafka/include/kafka/Properties.h | 100 + .../include/kafka/RdKafkaHelper.h | 122 + modern-cpp-kafka/include/kafka/Timestamp.h | 92 + modern-cpp-kafka/include/kafka/Types.h | 192 ++ modern-cpp-kafka/include/kafka/Utility.h | 81 + .../include/kafka/addons/KafkaMetrics.h | 208 ++ .../kafka/addons/KafkaRecoverableProducer.h | 360 +++ .../kafka/addons/UnorderedOffsetCommitQueue.h | 178 ++ modern-cpp-kafka/scripts/doxyfile.cfg | 2511 +++++++++++++++++ modern-cpp-kafka/scripts/markdown2html.py | 87 + .../scripts/start-local-kafka-cluster.py | 187 ++ modern-cpp-kafka/test.cc | 6 + modern-cpp-kafka/tests/BUILD.bazel | 52 + modern-cpp-kafka/tests/CMakeLists.txt | 43 + .../tests/integration/CMakeLists.txt | 19 + .../tests/integration/TestAdminClient.cc | 203 ++ .../tests/integration/TestKafkaConsumer.cc | 2049 ++++++++++++++ .../tests/integration/TestKafkaEnv.cc | 18 + .../tests/integration/TestKafkaProducer.cc | 606 ++++ .../TestKafkaRecoverableProducer.cc | 175 ++ .../tests/integration/TestTransaction.cc | 321 +++ .../tests/robustness/CMakeLists.txt | 18 + .../tests/robustness/TestAdminClient.cc | 150 + .../tests/robustness/TestKafkaConsumer.cc | 358 +++ .../tests/robustness/TestKafkaProducer.cc | 392 +++ .../tests/robustness/TestTransaction.cc | 78 + modern-cpp-kafka/tests/unit/CMakeLists.txt | 36 + .../tests/unit/TestBrokerMetadata.cc | 99 + .../tests/unit/TestConsumerRecord.cc | 91 + modern-cpp-kafka/tests/unit/TestError.cc | 73 + modern-cpp-kafka/tests/unit/TestHeader.cc | 36 + .../unit/TestKafkaClientDefaultProperties.cc | 131 + .../tests/unit/TestKafkaException.cc | 92 + .../tests/unit/TestKafkaMetrics.cc | 533 ++++ .../tests/unit/TestProducerRecord.cc | 43 + modern-cpp-kafka/tests/unit/TestProperties.cc | 98 + modern-cpp-kafka/tests/unit/TestTimestamp.cc | 60 + modern-cpp-kafka/tests/unit/TestTypes.cc | 48 + .../unit/TestUnorderedOffsetCommitQueue.cc | 187 ++ modern-cpp-kafka/tests/unit/TestUtility.cc | 10 + modern-cpp-kafka/tests/utils/TestUtility.h | 290 ++ modern-cpp-kafka/tools/BUILD.bazel | 30 + modern-cpp-kafka/tools/CMakeLists.txt | 48 + .../tools/KafkaConsoleConsumer.cc | 172 ++ .../tools/KafkaConsoleProducer.cc | 138 + modern-cpp-kafka/tools/KafkaTopics.cc | 197 ++ 106 files changed, 19839 insertions(+), 1 deletion(-) create mode 100644 modern-cpp-kafka/.bazelrc create mode 100644 modern-cpp-kafka/.clang-tidy create mode 100755 modern-cpp-kafka/.gitignore create mode 100644 modern-cpp-kafka/BUILD.bazel create mode 100644 modern-cpp-kafka/CMakeLists.txt create mode 100644 modern-cpp-kafka/LICENSE create mode 100644 modern-cpp-kafka/NOTICE create mode 100644 modern-cpp-kafka/README.md create mode 100644 modern-cpp-kafka/WORKSPACE create mode 100644 modern-cpp-kafka/_config.yml create mode 100644 modern-cpp-kafka/builddir/CMakeCache.txt create mode 100644 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeCCompiler.cmake create mode 100644 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeCXXCompiler.cmake create mode 100755 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeDetermineCompilerABI_C.bin create mode 100755 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeDetermineCompilerABI_CXX.bin create mode 100644 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeSystem.cmake create mode 100644 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdC/CMakeCCompilerId.c create mode 100755 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdC/a.out create mode 100644 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdCXX/CMakeCXXCompilerId.cpp create mode 100755 modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdCXX/a.out create mode 100644 modern-cpp-kafka/builddir/CMakeFiles/CMakeOutput.log create mode 100644 modern-cpp-kafka/builddir/CMakeFiles/cmake.check_cache create mode 100644 modern-cpp-kafka/customrules/BUILD.bazel create mode 100644 modern-cpp-kafka/customrules/rapidjson.BUILD create mode 100644 modern-cpp-kafka/demo_projects_for_build/conan_build/CMakeLists.txt create mode 100644 modern-cpp-kafka/demo_projects_for_build/conan_build/conanfile.txt create mode 100644 modern-cpp-kafka/doc/CMakeLists.txt create mode 100644 modern-cpp-kafka/doc/GoodPracticesToUseKafkaConsumer.md create mode 100644 modern-cpp-kafka/doc/GoodPracticesToUseKafkaProducer.md create mode 100644 modern-cpp-kafka/doc/HowToMakeKafkaProducerReliable.md create mode 100644 modern-cpp-kafka/doc/KafkaBrokerConfiguration.md create mode 100644 modern-cpp-kafka/doc/KafkaConsumerQuickStart.md create mode 100644 modern-cpp-kafka/doc/KafkaProducerQuickStart.md create mode 100644 modern-cpp-kafka/examples/BUILD.bazel create mode 100644 modern-cpp-kafka/examples/CMakeLists.txt create mode 100644 modern-cpp-kafka/examples/kafka_async_producer_copy_payload.cc create mode 100644 modern-cpp-kafka/examples/kafka_async_producer_not_copy_payload.cc create mode 100644 modern-cpp-kafka/examples/kafka_auto_commit_consumer.cc create mode 100644 modern-cpp-kafka/examples/kafka_manual_commit_consumer.cc create mode 100644 modern-cpp-kafka/examples/kafka_sync_producer.cc create mode 100644 modern-cpp-kafka/include/CMakeLists.txt create mode 100644 modern-cpp-kafka/include/kafka/AdminClient.h create mode 100644 modern-cpp-kafka/include/kafka/AdminClientConfig.h create mode 100644 modern-cpp-kafka/include/kafka/AdminCommon.h create mode 100644 modern-cpp-kafka/include/kafka/BrokerMetadata.h create mode 100644 modern-cpp-kafka/include/kafka/ConsumerCommon.h create mode 100644 modern-cpp-kafka/include/kafka/ConsumerConfig.h create mode 100644 modern-cpp-kafka/include/kafka/ConsumerRecord.h create mode 100644 modern-cpp-kafka/include/kafka/Error.h create mode 100644 modern-cpp-kafka/include/kafka/Header.h create mode 100644 modern-cpp-kafka/include/kafka/KafkaClient.h create mode 100644 modern-cpp-kafka/include/kafka/KafkaConsumer.h create mode 100644 modern-cpp-kafka/include/kafka/KafkaException.h create mode 100644 modern-cpp-kafka/include/kafka/KafkaProducer.h create mode 100644 modern-cpp-kafka/include/kafka/Log.h create mode 100644 modern-cpp-kafka/include/kafka/ProducerCommon.h create mode 100644 modern-cpp-kafka/include/kafka/ProducerConfig.h create mode 100644 modern-cpp-kafka/include/kafka/ProducerRecord.h create mode 100644 modern-cpp-kafka/include/kafka/Project.h create mode 100644 modern-cpp-kafka/include/kafka/Properties.h create mode 100644 modern-cpp-kafka/include/kafka/RdKafkaHelper.h create mode 100644 modern-cpp-kafka/include/kafka/Timestamp.h create mode 100644 modern-cpp-kafka/include/kafka/Types.h create mode 100644 modern-cpp-kafka/include/kafka/Utility.h create mode 100644 modern-cpp-kafka/include/kafka/addons/KafkaMetrics.h create mode 100644 modern-cpp-kafka/include/kafka/addons/KafkaRecoverableProducer.h create mode 100644 modern-cpp-kafka/include/kafka/addons/UnorderedOffsetCommitQueue.h create mode 100644 modern-cpp-kafka/scripts/doxyfile.cfg create mode 100755 modern-cpp-kafka/scripts/markdown2html.py create mode 100755 modern-cpp-kafka/scripts/start-local-kafka-cluster.py create mode 100644 modern-cpp-kafka/test.cc create mode 100644 modern-cpp-kafka/tests/BUILD.bazel create mode 100644 modern-cpp-kafka/tests/CMakeLists.txt create mode 100644 modern-cpp-kafka/tests/integration/CMakeLists.txt create mode 100644 modern-cpp-kafka/tests/integration/TestAdminClient.cc create mode 100644 modern-cpp-kafka/tests/integration/TestKafkaConsumer.cc create mode 100644 modern-cpp-kafka/tests/integration/TestKafkaEnv.cc create mode 100644 modern-cpp-kafka/tests/integration/TestKafkaProducer.cc create mode 100644 modern-cpp-kafka/tests/integration/TestKafkaRecoverableProducer.cc create mode 100644 modern-cpp-kafka/tests/integration/TestTransaction.cc create mode 100755 modern-cpp-kafka/tests/robustness/CMakeLists.txt create mode 100755 modern-cpp-kafka/tests/robustness/TestAdminClient.cc create mode 100755 modern-cpp-kafka/tests/robustness/TestKafkaConsumer.cc create mode 100755 modern-cpp-kafka/tests/robustness/TestKafkaProducer.cc create mode 100644 modern-cpp-kafka/tests/robustness/TestTransaction.cc create mode 100644 modern-cpp-kafka/tests/unit/CMakeLists.txt create mode 100644 modern-cpp-kafka/tests/unit/TestBrokerMetadata.cc create mode 100644 modern-cpp-kafka/tests/unit/TestConsumerRecord.cc create mode 100644 modern-cpp-kafka/tests/unit/TestError.cc create mode 100644 modern-cpp-kafka/tests/unit/TestHeader.cc create mode 100644 modern-cpp-kafka/tests/unit/TestKafkaClientDefaultProperties.cc create mode 100644 modern-cpp-kafka/tests/unit/TestKafkaException.cc create mode 100644 modern-cpp-kafka/tests/unit/TestKafkaMetrics.cc create mode 100644 modern-cpp-kafka/tests/unit/TestProducerRecord.cc create mode 100644 modern-cpp-kafka/tests/unit/TestProperties.cc create mode 100644 modern-cpp-kafka/tests/unit/TestTimestamp.cc create mode 100644 modern-cpp-kafka/tests/unit/TestTypes.cc create mode 100644 modern-cpp-kafka/tests/unit/TestUnorderedOffsetCommitQueue.cc create mode 100644 modern-cpp-kafka/tests/unit/TestUtility.cc create mode 100644 modern-cpp-kafka/tests/utils/TestUtility.h create mode 100644 modern-cpp-kafka/tools/BUILD.bazel create mode 100644 modern-cpp-kafka/tools/CMakeLists.txt create mode 100644 modern-cpp-kafka/tools/KafkaConsoleConsumer.cc create mode 100644 modern-cpp-kafka/tools/KafkaConsoleProducer.cc create mode 100644 modern-cpp-kafka/tools/KafkaTopics.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index 7291fb53..2d0960c1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -488,4 +488,4 @@ ExternalProject_Add(modern-cpp-kafka -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON -DCMAKE_INSTALL_PREFIX:PATH=${TRITON_THIRD_PARTY_INSTALL_PREFIX}/modern-cpp-kafka PATCH_COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/tools/install_src.py --src ${INSTALL_SRC_DEST_ARG} -) \ No newline at end of file +) diff --git a/modern-cpp-kafka/.bazelrc b/modern-cpp-kafka/.bazelrc new file mode 100644 index 00000000..e1ea66ff --- /dev/null +++ b/modern-cpp-kafka/.bazelrc @@ -0,0 +1,2 @@ +build --copt='-std=c++17' --linkopt='-L/usr/local/lib' + diff --git a/modern-cpp-kafka/.clang-tidy b/modern-cpp-kafka/.clang-tidy new file mode 100644 index 00000000..eda43cde --- /dev/null +++ b/modern-cpp-kafka/.clang-tidy @@ -0,0 +1,46 @@ +Checks: "*,\ + -llvm-header-guard,\ + -llvm-namespace-comment,\ + -llvmlibc-restrict-system-libc-headers,\ + -llvmlibc-callee-namespace,\ + -llvmlibc-implementation-in-namespace,\ + -altera-*,\ + -fuchsia-*,\ + -google-readability-namespace-comments,\ + -google-build-using-namespace,\ + -google-runtime-references,\ + -google-readability-avoid-underscore-in-googletest-name,\ + -modernize-use-nodiscard,\ + -modernize-deprecated-headers,\ + -modernize-use-trailing-return-type,\ + -modernize-concat-nested-namespaces,\ + -hicpp-special-member-functions,\ + -hicpp-vararg,\ + -hicpp-no-malloc,\ + -hicpp-no-array-decay,\ + -hicpp-deprecated-headers,\ + -hicpp-braces-around-statements,\ + -cppcoreguidelines-special-member-function,\ + -cppcoreguidelines-macro-usage,\ + -cppcoreguidelines-avoid-magic-numbers,\ + -cppcoreguidelines-avoid-non-const-global-variables,\ + -cppcoreguidelines-pro-type-vararg,\ + -cppcoreguidelines-pro-bounds-array-to-pointer-decay,\ + -cppcoreguidelines-pro-bounds-pointer-arithmetic,\ + -cppcoreguidelines-special-member-functions,\ + -cppcoreguidelines-owning-memory,\ + -cppcoreguidelines-non-private-member-variables-in-classes,\ + -cppcoreguidelines-pro-type-union-access,\ + -misc-non-private-member-variables-in-classes,\ + -misc-no-recursion,\ + -readability-magic-numbers,\ + -readability-implicit-bool-conversion,\ + -readability-braces-around-statements,\ + -readability-isolate-declaration,\ + -readability-identifier-length,\ + -readability-function-cognitive-complexity,\ + -bugprone-unused-return-value,\ + -bugprone-easily-swappable-parameters,\ + -cert-err58-cpp,\ + -cert-err60-cpp" + diff --git a/modern-cpp-kafka/.gitignore b/modern-cpp-kafka/.gitignore new file mode 100755 index 00000000..0727dffc --- /dev/null +++ b/modern-cpp-kafka/.gitignore @@ -0,0 +1,6 @@ +*.swp +*.pyc +*.*~ + +build/ +install/ \ No newline at end of file diff --git a/modern-cpp-kafka/BUILD.bazel b/modern-cpp-kafka/BUILD.bazel new file mode 100644 index 00000000..5e4db202 --- /dev/null +++ b/modern-cpp-kafka/BUILD.bazel @@ -0,0 +1,12 @@ +cc_library( + name = "modern-cpp-kafka-api", + + hdrs = glob(["include/kafka/*.h", "include/kafka/addons/*.h"]), + + includes = ["include"], + + linkopts = ["-lpthread"], + + visibility = ["//visibility:public"], +) + diff --git a/modern-cpp-kafka/CMakeLists.txt b/modern-cpp-kafka/CMakeLists.txt new file mode 100644 index 00000000..13303adb --- /dev/null +++ b/modern-cpp-kafka/CMakeLists.txt @@ -0,0 +1,278 @@ +cmake_minimum_required(VERSION "3.8") + +project("Modern C++ Kafka API" VERSION 1.0.0) + +get_property(parent_directory DIRECTORY PROPERTY PARENT_DIRECTORY) +if (NOT parent_directory) + set(cppkafka_master_project ON) + # Use Strict Options + if ((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")) + add_compile_options("-Wall" "-Werror" "-Wextra" "-Wshadow" "-Wno-unused-result") + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + add_definitions(-D_CRT_SECURE_NO_WARNINGS) + endif () + if (CMAKE_CXX_STANDARD EQUAL 14) + add_compile_options("-Wno-maybe-uninitialized") + endif () +endif () + +option(CPPKAFKA_ENABLE_TESTS "Generate the test targets" ${cppkafka_master_project}) + +include(CheckCXXCompilerFlag) +include(CMakePushCheckState) + + +#--------------------------- +# C++17 (by default) +#--------------------------- +if (NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif () +set(CMAKE_CXX_STANDARD_REQUIRED False) + + +#--------------------------- +# Check Dependencies +#--------------------------- +if (NOT BUILD_OPTION_DOC_ONLY) + + #--------------------------- + # librdkafka library + #--------------------------- + if (DEFINED ENV{LIBRDKAFKA_ROOT}) + set(LIBRDKAFKA_INCLUDE_DIR $ENV{LIBRDKAFKA_ROOT}/include) + set(LIBRDKAFKA_LIBRARY_DIR $ENV{LIBRDKAFKA_ROOT}/lib) + else () + set(LIBRDKAFKA_INCLUDE_DIR /usr/local/include) + set(LIBRDKAFKA_LIBRARY_DIR /usr/local/lib) + endif () + + if (EXISTS "${LIBRDKAFKA_INCLUDE_DIR}/librdkafka/rdkafka.h") + message(STATUS "librdkafka include directory: ${LIBRDKAFKA_INCLUDE_DIR}") + else () + message(FATAL_ERROR "Could not find headers: librdkafka!") + endif () + + + #--------------------------- + # pthread library (for linux only) + #--------------------------- + if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + find_library(PTHREAD_LIB pthread) + if (PTHREAD_LIB) + message(STATUS "pthread library: ${PTHREAD_LIB}") + else () + message(FATAL_ERROR "Could not find library: pthread!") + endif () + endif () + + + #--------------------------- + # sasl library + #--------------------------- + if (DEFINED ENV{SASL_LIBRARYDIR}) + set(SASL_LIBRARYDIR $ENV{SASL_LIBRARYDIR}) + link_directories(${SASL_LIBRARYDIR}) + message(STATUS "sasl2 library directory: ${SASL_LIBRARYDIR}") + endif () + if (DEFINED ENV{SASL_LIBRARY}) + set(SASL_LIBRARY $ENV{SASL_LIBRARY}) + message(STATUS "sasl2 library: ${SASL_LIBRARY}") + endif () + + + #--------------------------- + # boost headers (for C++14 support) + #--------------------------- + if (CMAKE_CXX_STANDARD EQUAL 14) + if (DEFINED ENV{BOOST_ROOT}) + set(Boost_INCLUDE_DIRS $ENV{BOOST_ROOT}/include) + else () + find_package(Boost) + if (NOT Boost_FOUND) + message(FATAL_ERROR "Cound not find library: boost!") + endif () + endif () + include_directories(SYSTEM ${Boost_INCLUDE_DIRS}) + endif () + +endif () + + +#--------------------------- +# Build Option: UT stubs +#--------------------------- +if (BUILD_OPTION_ENABLE_UT_STUBS) + add_definitions(-DKAFKA_API_ENABLE_UNIT_TEST_STUBS) +endif () + + +#--------------------------- +# Build Option: clang-tidy +#--------------------------- +option(BUILD_OPTION_CLANG_TIDY "Build with clang-tidy enabled" OFF) +if (BUILD_OPTION_CLANG_TIDY) + find_program(CLANG_TIDY_EXE NAMES "clang-tidy") + + if (CLANG_TIDY_EXE) + message(STATUS "Use clang-tidy: ${CLANG_TIDY_EXE}") + set(CMAKE_CXX_CLANG_TIDY clang-tidy -warnings-as-errors=* -header-filter=.*) + else () + message(FATAL_ERROR "The clang-tidy executable not found!") + endif () + +else () + message(STATUS "With NO clang-tidy build option") +endif () + + +#--------------------------- +# Build Option: ASAN +#--------------------------- +option(BUILD_OPTION_USE_ASAN "Build with Address Sanitizer (ASAN) enabled" OFF) + +if (BUILD_OPTION_USE_ASAN) + check_cxx_compiler_flag("-fsanitize=address" HAS_ASAN) + + CMAKE_PUSH_CHECK_STATE(RESET) + # Make check_cxx_compiler_flag pass required flags to linker as well: + set(CMAKE_REQUIRED_FLAGS "-fsanitize=address -static-libasan") + check_cxx_compiler_flag("-fsanitize=address -static-libasan" HAS_ASAN_NEEDS_LIB) + CMAKE_POP_CHECK_STATE() + + if (HAS_ASAN) + add_compile_options("-fsanitize=address") + add_link_options("-fsanitize=address") + elseif (HAS_ASAN_NEEDS_LIB) + add_compile_options("-fsanitize=address" "-static-libasan") + add_link_options("-fsanitize=address" "-static-libasan") + else () + message(FATAL_ERROR "Address Sanitizer requested by BUILD_OPTION_USE_ASAN, but appears to be not supported on this platform") + endif () + + set(MEMORYCHECK_TYPE AddressSanitizer) + + message(STATUS "Use Address Sanitizer") +endif () + + +#--------------------------- +# Build Option: TSAN +#--------------------------- +option(BUILD_OPTION_USE_TSAN "Build with Thread Sanitizer (TSAN) enabled" OFF) + +if (BUILD_OPTION_USE_TSAN) + check_cxx_compiler_flag("-fsanitize=thread" HAS_TSAN) + + CMAKE_PUSH_CHECK_STATE(RESET) + # Make check_cxx_compiler_flag pass required flags to linker as well: + set(CMAKE_REQUIRED_FLAGS "-fsanitize=thread -static-libtsan") + check_cxx_compiler_flag("-fsanitize=thread -static-libtsan" HAS_TSAN_NEEDS_LIB) + CMAKE_POP_CHECK_STATE() + + if (HAS_TSAN) + add_compile_options("-fsanitize=thread") + add_link_options("-fsanitize=thread") + elseif (HAS_TSAN_NEEDS_LIB) + add_compile_options("-fsanitize=thread" "-static-libtsan") + add_link_options("-fsanitize=thread" "-static-libtsan") + else () + message(FATAL_ERROR "Thread Sanitizer requested by BUILD_OPTION_USE_TSAN, but appears to be not supported on this platform") + endif () + + set(MEMORYCHECK_TYPE ThreadSanitizer) + + message(STATUS "Use Thread Sanitizer") +endif () + + +#--------------------------- +# Build Option: UBSAN +#--------------------------- +option(BUILD_OPTION_USE_UBSAN "Build with Undefined Behavior Sanitizer (UBSAN) enabled" OFF) + +if (BUILD_OPTION_USE_UBSAN) + check_cxx_compiler_flag("-fsanitize=undefined" HAS_UBSAN) + + CMAKE_PUSH_CHECK_STATE(RESET) + # Make check_cxx_compiler_flag pass required flags to linker as well: + set(CMAKE_REQUIRED_FLAGS "-fsanitize=undefined -static-libubsan") + check_cxx_compiler_flag("-fsanitize=undefined -static-libubsan" HAS_UBSAN_NEEDS_LIB) + CMAKE_POP_CHECK_STATE() + + if (HAS_UBSAN_NEEDS_LIB) + add_compile_options("-fsanitize=undefined" "-static-libubsan") + add_link_options("-fsanitize=undefined" "-static-libubsan") + elseif (HAS_UBSAN) + add_compile_options("-fsanitize=undefined") + add_link_options("-fsanitize=undefined") + else () + message(FATAL_ERROR "Undefined Behavior Sanitizer requested by BUILD_OPTION_USE_UBSAN, but appears to be not supported on this platform") + endif () + + message(STATUS "Use Undefined Behavior Sanitizer") +endif () + + +#--------------------------- +# Build Option: generate doc +#--------------------------- +option(BUILD_OPTION_GEN_DOC "Generate html files for doxygen/markdown doc" OFF) + + +#--------------------------- +# Build Option: generate coverage report +#--------------------------- +option(BUILD_OPTION_GEN_COVERAGE "Generate code coverage report" OFF) + +if (BUILD_OPTION_GEN_COVERAGE) + check_cxx_compiler_flag("-fprofile-instr-generate -fcoverage-mapping" HAS_CLANG_COV) + + if (HAS_CLANG_COV) + add_compile_options("-fprofile-instr-generate" "-fcoverage-mapping") + add_link_options("-fprofile-instr-generate" "-fcoverage-mapping") + + add_custom_target(coverage_init + COMMENT "Initialize coverage counters" + COMMAND "rm" "-f" "tests/unit/default.profraw" "tests/default.profdata" + ) + + add_custom_target(coverage + COMMENT "Generate coverage report" + COMMAND "llvm-profdata" "merge" "-sparse" "tests/unit/default.profraw" + "-o" "tests/default.profdata" + COMMAND "llvm-cov" "show" "-format" "html" "-instr-profile" "tests/default.profdata" "tests/unit/kafka-unit-test" + ">" "coverage_report.html" + COMMAND "echo" "Coverage report generated: coverage_report.html" + ) + + else () + message(FATAL_ERROR "Coverage report requrested by BUILD_OPTION_GEN_COVERAGE, but only supported with Clang build") + endif () + + message(STATUS "Enable code coverage data generation") +endif () + + +#--------------------------- +# Build Sub-directories +#--------------------------- +if (BUILD_OPTION_DOC_ONLY) + add_subdirectory("doc") +else () + if (BUILD_OPTION_GEN_DOC) + add_subdirectory("doc") + endif () + + add_subdirectory("include") + + if (CPPKAFKA_ENABLE_TESTS) + include(CTest) + add_subdirectory("tests") + endif() + + if (cppkafka_master_project) + add_subdirectory("tools") + add_subdirectory("examples") + endif() +endif () diff --git a/modern-cpp-kafka/LICENSE b/modern-cpp-kafka/LICENSE new file mode 100644 index 00000000..fbf43513 --- /dev/null +++ b/modern-cpp-kafka/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2019] [Morgan Stanley] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/modern-cpp-kafka/NOTICE b/modern-cpp-kafka/NOTICE new file mode 100644 index 00000000..b80d1bb5 --- /dev/null +++ b/modern-cpp-kafka/NOTICE @@ -0,0 +1,3 @@ +Modern C++ Kafka API +Copyright 2020 Morgan Stanley +This project includes software developed at Morgan Stanley. diff --git a/modern-cpp-kafka/README.md b/modern-cpp-kafka/README.md new file mode 100644 index 00000000..51d59177 --- /dev/null +++ b/modern-cpp-kafka/README.md @@ -0,0 +1,228 @@ +# About the *Modern C++ Kafka API* + +![Lifecycle Active](https://badgen.net/badge/Lifecycle/Active/green) + +## Introduction + +The [Modern C++ Kafka API](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/annotated.html) is a layer of C++ wrapper based on [librdkafka](https://github.com/edenhill/librdkafka) (the C part), with high quality, but more friendly to users. + +- By now, [modern-cpp-kafka](https://github.com/morganstanley/modern-cpp-kafka) is compatible with [librdkafka v1.9.0](https://github.com/edenhill/librdkafka/releases/tag/v1.9.0). + +``` +KAFKA is a registered trademark of The Apache Software Foundation and +has been licensed for use by modern-cpp-kafka. modern-cpp-kafka has no +affiliation with and is not endorsed by The Apache Software Foundation. +``` + +## Why it's here + +The ***librdkafka*** is a robust high performance C/C++ library, widely used and well maintained. + +Unfortunately, to maintain C++98 compatibility, the C++ interface of ***librdkafka*** is not quite object-oriented or user-friendly. + +Since C++ is evolving quickly, we want to take advantage of new C++ features, thus make the life easier for developers. And this led us to create a new C++ API for Kafka clients. + +Eventually, we worked out the ***modern-cpp-kafka***, -- a header-only library that uses idiomatic C++ features to provide a safe, efficient and easy to use way of producing and consuming Kafka messages. + +## Features + +* Header-only + + * Easy to deploy, and no extra library required to link + +* Ease of Use + + * Interface/Naming matches the Java API + + * Object-oriented + + * RAII is used for lifetime management + + * ***librdkafka***'s polling and queue management is now hidden + +* Robust + + * Verified with kinds of test cases, which cover many abnormal scenarios (edge cases) + + * Stability test with unstable brokers + + * Memory leak check for failed client with in-flight messages + + * Client failure and taking over, etc. + +* Efficient + + * No extra performance cost (No deep copy introduced internally) + + * Much better (2~4 times throughput) performance result than those native language (Java/Scala) implementation, in most commonly used cases (message size: 256 B ~ 2 KB) + + +## Build + +* No need to build for installation + +* To build its `tools`/`tests`/`examples`, you should + + * Specify library locations with environment variables + + * `LIBRDKAFKA_ROOT` -- ***librdkafka*** headers and libraries + + * `GTEST_ROOT` -- ***googletest*** headers and libraries + + * `BOOST_ROOT` -- ***boost*** headers and libraries + + * `SASL_LIBRARYDIR`/`SASL_LIBRARY` -- if SASL connection support is wanted + + * `RAPIDJSON_INCLUDE_DIRS` -- `addons/KafkaMetrics` requires **rapidjson** headers + + * Create an empty directory for the build, and `cd` to it + + * Build commands + + * Type `cmake path-to-project-root` + + * Type `make` (could follow build options with `-D`) + + * `BUILD_OPTION_USE_ASAN=ON` -- Use Address Sanitizer + + * `BUILD_OPTION_USE_TSAN=ON` -- Use Thread Sanitizer + + * `BUILD_OPTION_USE_UBSAN=ON` -- Use Undefined Behavior Sanitizer + + * `BUILD_OPTION_CLANG_TIDY=ON` -- Enable clang-tidy checking + + * `BUILD_OPTION_GEN_DOC=ON` -- Generate documentation as well + + * `BUILD_OPTION_DOC_ONLY=ON` -- Only generate documentation + + * `BUILD_OPTION_GEN_COVERAGE=ON` -- Generate test coverage, only support by clang currently + + * Type `make install` + +## Install + +* Include the `include/kafka` directory in your project + +* To work together with ***modern-cpp-kafka*** API, the compiler should support + + * Option 1: C++17 + + * Option 2: C++14 (with pre-requirements) + + * Need ***boost*** headers (for `boost::optional`) + + * GCC only (with optimization, e.g. -O2) + +## How to Run Tests + +* Unit test (`tests/unit`) + + * The test could be run with no Kafka cluster depolyed + +* Integration test (`tests/integration`) + + * The test should be run with Kafka cluster depolyed + + * The environment variable `KAFKA_BROKER_LIST` should be set + + * E.g. `export KAFKA_BROKER_LIST=127.0.0.1:29091,127.0.0.1:29092,127.0.0.1:29093` + +* Robustness test (`tests/robustness`) + + * The test should be run with Kafka cluster depolyed locally + + * The environment variable `KAFKA_BROKER_LIST` should be set + + * The environment variable `KAFKA_BROKER_PIDS` should be set + + * Make sure the test runner gets the privilege to stop/resume the pids + + * E.g. `export KAFKA_BROKER_PIDS=61567,61569,61571` + +* Additional settings for clients + + * The environment variable `KAFKA_CLIENT_ADDITIONAL_SETTINGS` could be used for customized test environment + + * Especially for Kafka cluster with SASL(or SSL) connections + + * E.g. `export KAFKA_CLIENT_ADDITIONAL_SETTINGS="security.protocol=SASL_PLAINTEXT;sasl.kerberos.service.name=...;sasl.kerberos.keytab=...;sasl.kerberos.principal=..."` + +## To Start + +* Tutorial + + * Confluent Blog [Debuting a Modern C++ API for Apache Kafka](https://www.confluent.io/blog/modern-cpp-kafka-api-for-safe-easy-messaging) + + * [KafkaProducer Quick Start](doc/KafkaProducerQuickStart.md) + + * [KafkaConsumer Quick Start](doc/KafkaConsumerQuickStart.md) + +* User's Manual + + * [Kafka Client API](http://opensource.morganstanley.com/modern-cpp-kafka/doxygen/annotated.html) + + + * Kafka Client Properties + + * In most cases, the `Properties` settings for ***modern-cpp-kafka*** are identical with [librdkafka configuration](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) + + * With following exceptions + + * KafkaConsumer + + * Properties with random string as default + + * `client.id` + + * `group.id` + + * More properties than ***librdkafka*** + + * `max.poll.records` (default: `500`): The maxmum number of records that a single call to `poll()` would return + + * Property which overrides the one from ***librdkafka*** + + * `enable.auto.commit` (default: `false`): To automatically commit the previously polled offsets on each `poll` operation + + * Properties not supposed to be used (internally shadowed by ***modern-cpp-kafka***) + + * `enable.auto.offset.store` + + * `auto.commit.interval.ms` + + * KafkaProducer + + * Properties with random string as default + + * `client.id` + + * Log level + + * The default `log_level` is `NOTICE` (`5`) for all these clients + +* Test Environment (ZooKeeper/Kafka cluster) Setup + + * [Start the servers](https://kafka.apache.org/documentation/#quickstart_startserver) + + +## How to Achieve High Availability & Performance + +* [Kafka Broker Configuration](doc/KafkaBrokerConfiguration.md) + +* [Good Practices to Use KafkaProducer](doc/GoodPracticesToUseKafkaProducer.md) + +* [Good Practices to Use KafkaConsumer](doc/GoodPracticesToUseKafkaConsumer.md) + +* [How to Make KafkaProducer Reliable](doc/HowToMakeKafkaProducerReliable.md) + + +## Other References + +* Java API for Kafka clients + + * [org.apache.kafka.clients.producer](https://kafka.apache.org/22/javadoc/org/apache/kafka/clients/producer/package-summary.html) + + * [org.apache.kafka.clients.consumer](https://kafka.apache.org/22/javadoc/org/apache/kafka/clients/consumer/package-summary.html) + + * [org.apache.kafka.clients.admin](https://kafka.apache.org/22/javadoc/org/apache/kafka/clients/admin/package-summary.html) + diff --git a/modern-cpp-kafka/WORKSPACE b/modern-cpp-kafka/WORKSPACE new file mode 100644 index 00000000..c71a3290 --- /dev/null +++ b/modern-cpp-kafka/WORKSPACE @@ -0,0 +1,15 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "gtest", + urls = ["https://github.com/google/googletest/archive/refs/tags/release-1.11.0.zip"], + strip_prefix = "googletest-release-1.11.0", +) + +http_archive( + name = "rapidjson", + build_file = "//customrules:rapidjson.BUILD", + urls = ["https://github.com/Tencent/rapidjson/archive/refs/heads/master.zip"], + strip_prefix="rapidjson-master", +) + diff --git a/modern-cpp-kafka/_config.yml b/modern-cpp-kafka/_config.yml new file mode 100644 index 00000000..c7418817 --- /dev/null +++ b/modern-cpp-kafka/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-slate \ No newline at end of file diff --git a/modern-cpp-kafka/builddir/CMakeCache.txt b/modern-cpp-kafka/builddir/CMakeCache.txt new file mode 100644 index 00000000..b75fab46 --- /dev/null +++ b/modern-cpp-kafka/builddir/CMakeCache.txt @@ -0,0 +1,394 @@ +# This is the CMakeCache file. +# For build in directory: /home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir +# It was generated by CMake: /usr/local/bin/cmake +# You can edit this file to change values found and used by cmake. +# If you do not want to change any of the values, simply exit the editor. +# If you do want to change a value, simply edit, save, and exit the editor. +# The syntax for the file is as follows: +# KEY:TYPE=VALUE +# KEY is the name of a variable in the cache. +# TYPE is a hint to GUIs for the type of VALUE, DO NOT EDIT TYPE!. +# VALUE is the current value for the KEY. + +######################## +# EXTERNAL cache entries +######################## + +//Path to a program. +CMAKE_ADDR2LINE:FILEPATH=/usr/bin/addr2line + +//Path to a program. +CMAKE_AR:FILEPATH=/usr/bin/ar + +//Choose the type of build, options are: None Debug Release RelWithDebInfo +// MinSizeRel ... +CMAKE_BUILD_TYPE:STRING= + +//Enable/Disable color output during build. +CMAKE_COLOR_MAKEFILE:BOOL=ON + +//CXX compiler +CMAKE_CXX_COMPILER:FILEPATH=/usr/bin/c++ + +//A wrapper around 'ar' adding the appropriate '--plugin' option +// for the GCC compiler +CMAKE_CXX_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-9 + +//A wrapper around 'ranlib' adding the appropriate '--plugin' option +// for the GCC compiler +CMAKE_CXX_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-9 + +//Flags used by the CXX compiler during all build types. +CMAKE_CXX_FLAGS:STRING= + +//Flags used by the CXX compiler during DEBUG builds. +CMAKE_CXX_FLAGS_DEBUG:STRING=-g + +//Flags used by the CXX compiler during MINSIZEREL builds. +CMAKE_CXX_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG + +//Flags used by the CXX compiler during RELEASE builds. +CMAKE_CXX_FLAGS_RELEASE:STRING=-O3 -DNDEBUG + +//Flags used by the CXX compiler during RELWITHDEBINFO builds. +CMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG + +//C compiler +CMAKE_C_COMPILER:FILEPATH=/usr/bin/cc + +//A wrapper around 'ar' adding the appropriate '--plugin' option +// for the GCC compiler +CMAKE_C_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-9 + +//A wrapper around 'ranlib' adding the appropriate '--plugin' option +// for the GCC compiler +CMAKE_C_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-9 + +//Flags used by the C compiler during all build types. +CMAKE_C_FLAGS:STRING= + +//Flags used by the C compiler during DEBUG builds. +CMAKE_C_FLAGS_DEBUG:STRING=-g + +//Flags used by the C compiler during MINSIZEREL builds. +CMAKE_C_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG + +//Flags used by the C compiler during RELEASE builds. +CMAKE_C_FLAGS_RELEASE:STRING=-O3 -DNDEBUG + +//Flags used by the C compiler during RELWITHDEBINFO builds. +CMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG + +//Path to a program. +CMAKE_DLLTOOL:FILEPATH=CMAKE_DLLTOOL-NOTFOUND + +//Flags used by the linker during all build types. +CMAKE_EXE_LINKER_FLAGS:STRING= + +//Flags used by the linker during DEBUG builds. +CMAKE_EXE_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during MINSIZEREL builds. +CMAKE_EXE_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during RELEASE builds. +CMAKE_EXE_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during RELWITHDEBINFO builds. +CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Enable/Disable output of compile commands during generation. +CMAKE_EXPORT_COMPILE_COMMANDS:BOOL= + +//Value Computed by CMake. +CMAKE_FIND_PACKAGE_REDIRECTS_DIR:STATIC=/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/pkgRedirects + +//Install path prefix, prepended onto install directories. +CMAKE_INSTALL_PREFIX:PATH=/usr/local + +//Path to a program. +CMAKE_LINKER:FILEPATH=/usr/bin/ld + +//Path to a program. +CMAKE_MAKE_PROGRAM:FILEPATH=/usr/bin/make + +//Flags used by the linker during the creation of modules during +// all build types. +CMAKE_MODULE_LINKER_FLAGS:STRING= + +//Flags used by the linker during the creation of modules during +// DEBUG builds. +CMAKE_MODULE_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during the creation of modules during +// MINSIZEREL builds. +CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during the creation of modules during +// RELEASE builds. +CMAKE_MODULE_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during the creation of modules during +// RELWITHDEBINFO builds. +CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Path to a program. +CMAKE_NM:FILEPATH=/usr/bin/nm + +//Path to a program. +CMAKE_OBJCOPY:FILEPATH=/usr/bin/objcopy + +//Path to a program. +CMAKE_OBJDUMP:FILEPATH=/usr/bin/objdump + +//Value Computed by CMake +CMAKE_PROJECT_DESCRIPTION:STATIC= + +//Value Computed by CMake +CMAKE_PROJECT_HOMEPAGE_URL:STATIC= + +//Value Computed by CMake +CMAKE_PROJECT_NAME:STATIC=Modern C++ Kafka API + +//Value Computed by CMake +CMAKE_PROJECT_VERSION:STATIC=1.0.0 + +//Value Computed by CMake +CMAKE_PROJECT_VERSION_MAJOR:STATIC=1 + +//Value Computed by CMake +CMAKE_PROJECT_VERSION_MINOR:STATIC=0 + +//Value Computed by CMake +CMAKE_PROJECT_VERSION_PATCH:STATIC=0 + +//Value Computed by CMake +CMAKE_PROJECT_VERSION_TWEAK:STATIC= + +//Path to a program. +CMAKE_RANLIB:FILEPATH=/usr/bin/ranlib + +//Path to a program. +CMAKE_READELF:FILEPATH=/usr/bin/readelf + +//Flags used by the linker during the creation of shared libraries +// during all build types. +CMAKE_SHARED_LINKER_FLAGS:STRING= + +//Flags used by the linker during the creation of shared libraries +// during DEBUG builds. +CMAKE_SHARED_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during the creation of shared libraries +// during MINSIZEREL builds. +CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during the creation of shared libraries +// during RELEASE builds. +CMAKE_SHARED_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during the creation of shared libraries +// during RELWITHDEBINFO builds. +CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//If set, runtime paths are not added when installing shared libraries, +// but are added when building. +CMAKE_SKIP_INSTALL_RPATH:BOOL=NO + +//If set, runtime paths are not added when using shared libraries. +CMAKE_SKIP_RPATH:BOOL=NO + +//Flags used by the linker during the creation of static libraries +// during all build types. +CMAKE_STATIC_LINKER_FLAGS:STRING= + +//Flags used by the linker during the creation of static libraries +// during DEBUG builds. +CMAKE_STATIC_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during the creation of static libraries +// during MINSIZEREL builds. +CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during the creation of static libraries +// during RELEASE builds. +CMAKE_STATIC_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during the creation of static libraries +// during RELWITHDEBINFO builds. +CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Path to a program. +CMAKE_STRIP:FILEPATH=/usr/bin/strip + +//If this value is on, makefiles will be generated without the +// .SILENT directive, and all commands will be echoed to the console +// during the make. This is useful for debugging only. With Visual +// Studio IDE projects all commands are done without /nologo. +CMAKE_VERBOSE_MAKEFILE:BOOL=FALSE + +//Generate the test targets +CPPKAFKA_ENABLE_TESTS:BOOL=ON + +//Value Computed by CMake +Modern C++ Kafka API_BINARY_DIR:STATIC=/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir + +//Value Computed by CMake +Modern C++ Kafka API_IS_TOP_LEVEL:STATIC=ON + +//Value Computed by CMake +Modern C++ Kafka API_SOURCE_DIR:STATIC=/home/fpetrini/Desktop/Triton/modern-cpp-kafka + + +######################## +# INTERNAL cache entries +######################## + +//ADVANCED property for variable: CMAKE_ADDR2LINE +CMAKE_ADDR2LINE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_AR +CMAKE_AR-ADVANCED:INTERNAL=1 +//This is the directory where this CMakeCache.txt was created +CMAKE_CACHEFILE_DIR:INTERNAL=/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir +//Major version of cmake used to create the current loaded cache +CMAKE_CACHE_MAJOR_VERSION:INTERNAL=3 +//Minor version of cmake used to create the current loaded cache +CMAKE_CACHE_MINOR_VERSION:INTERNAL=24 +//Patch version of cmake used to create the current loaded cache +CMAKE_CACHE_PATCH_VERSION:INTERNAL=0 +//ADVANCED property for variable: CMAKE_COLOR_MAKEFILE +CMAKE_COLOR_MAKEFILE-ADVANCED:INTERNAL=1 +//Path to CMake executable. +CMAKE_COMMAND:INTERNAL=/usr/local/bin/cmake +//Path to cpack program executable. +CMAKE_CPACK_COMMAND:INTERNAL=/usr/local/bin/cpack +//Path to ctest program executable. +CMAKE_CTEST_COMMAND:INTERNAL=/usr/local/bin/ctest +//ADVANCED property for variable: CMAKE_CXX_COMPILER +CMAKE_CXX_COMPILER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_COMPILER_AR +CMAKE_CXX_COMPILER_AR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_COMPILER_RANLIB +CMAKE_CXX_COMPILER_RANLIB-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS +CMAKE_CXX_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_DEBUG +CMAKE_CXX_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_MINSIZEREL +CMAKE_CXX_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELEASE +CMAKE_CXX_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELWITHDEBINFO +CMAKE_CXX_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_COMPILER +CMAKE_C_COMPILER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_COMPILER_AR +CMAKE_C_COMPILER_AR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_COMPILER_RANLIB +CMAKE_C_COMPILER_RANLIB-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS +CMAKE_C_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_DEBUG +CMAKE_C_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_MINSIZEREL +CMAKE_C_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_RELEASE +CMAKE_C_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_RELWITHDEBINFO +CMAKE_C_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_DLLTOOL +CMAKE_DLLTOOL-ADVANCED:INTERNAL=1 +//Path to cache edit program executable. +CMAKE_EDIT_COMMAND:INTERNAL=/usr/local/bin/ccmake +//Executable file format +CMAKE_EXECUTABLE_FORMAT:INTERNAL=ELF +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS +CMAKE_EXE_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_DEBUG +CMAKE_EXE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_MINSIZEREL +CMAKE_EXE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELEASE +CMAKE_EXE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXPORT_COMPILE_COMMANDS +CMAKE_EXPORT_COMPILE_COMMANDS-ADVANCED:INTERNAL=1 +//Name of external makefile project generator. +CMAKE_EXTRA_GENERATOR:INTERNAL= +//Name of generator. +CMAKE_GENERATOR:INTERNAL=Unix Makefiles +//Generator instance identifier. +CMAKE_GENERATOR_INSTANCE:INTERNAL= +//Name of generator platform. +CMAKE_GENERATOR_PLATFORM:INTERNAL= +//Name of generator toolset. +CMAKE_GENERATOR_TOOLSET:INTERNAL= +//Source directory with the top level CMakeLists.txt file for this +// project +CMAKE_HOME_DIRECTORY:INTERNAL=/home/fpetrini/Desktop/Triton/modern-cpp-kafka +//Install .so files without execute permission. +CMAKE_INSTALL_SO_NO_EXE:INTERNAL=1 +//ADVANCED property for variable: CMAKE_LINKER +CMAKE_LINKER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MAKE_PROGRAM +CMAKE_MAKE_PROGRAM-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS +CMAKE_MODULE_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_DEBUG +CMAKE_MODULE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL +CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELEASE +CMAKE_MODULE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_NM +CMAKE_NM-ADVANCED:INTERNAL=1 +//number of local generators +CMAKE_NUMBER_OF_MAKEFILES:INTERNAL=1 +//ADVANCED property for variable: CMAKE_OBJCOPY +CMAKE_OBJCOPY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_OBJDUMP +CMAKE_OBJDUMP-ADVANCED:INTERNAL=1 +//Platform information initialized +CMAKE_PLATFORM_INFO_INITIALIZED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_RANLIB +CMAKE_RANLIB-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_READELF +CMAKE_READELF-ADVANCED:INTERNAL=1 +//Path to CMake installation. +CMAKE_ROOT:INTERNAL=/usr/local/share/cmake-3.24 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS +CMAKE_SHARED_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_DEBUG +CMAKE_SHARED_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL +CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELEASE +CMAKE_SHARED_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SKIP_INSTALL_RPATH +CMAKE_SKIP_INSTALL_RPATH-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SKIP_RPATH +CMAKE_SKIP_RPATH-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS +CMAKE_STATIC_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_DEBUG +CMAKE_STATIC_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL +CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELEASE +CMAKE_STATIC_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STRIP +CMAKE_STRIP-ADVANCED:INTERNAL=1 +//uname command +CMAKE_UNAME:INTERNAL=/usr/bin/uname +//ADVANCED property for variable: CMAKE_VERBOSE_MAKEFILE +CMAKE_VERBOSE_MAKEFILE-ADVANCED:INTERNAL=1 +//linker supports push/pop state +_CMAKE_LINKER_PUSHPOP_STATE_SUPPORTED:INTERNAL=TRUE + diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeCCompiler.cmake b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeCCompiler.cmake new file mode 100644 index 00000000..2fbe287d --- /dev/null +++ b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeCCompiler.cmake @@ -0,0 +1,72 @@ +set(CMAKE_C_COMPILER "/usr/bin/cc") +set(CMAKE_C_COMPILER_ARG1 "") +set(CMAKE_C_COMPILER_ID "GNU") +set(CMAKE_C_COMPILER_VERSION "9.4.0") +set(CMAKE_C_COMPILER_VERSION_INTERNAL "") +set(CMAKE_C_COMPILER_WRAPPER "") +set(CMAKE_C_STANDARD_COMPUTED_DEFAULT "17") +set(CMAKE_C_EXTENSIONS_COMPUTED_DEFAULT "ON") +set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert;c_std_17;c_std_23") +set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes") +set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros") +set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert") +set(CMAKE_C17_COMPILE_FEATURES "c_std_17") +set(CMAKE_C23_COMPILE_FEATURES "c_std_23") + +set(CMAKE_C_PLATFORM_ID "Linux") +set(CMAKE_C_SIMULATE_ID "") +set(CMAKE_C_COMPILER_FRONTEND_VARIANT "") +set(CMAKE_C_SIMULATE_VERSION "") + + + + +set(CMAKE_AR "/usr/bin/ar") +set(CMAKE_C_COMPILER_AR "/usr/bin/gcc-ar-9") +set(CMAKE_RANLIB "/usr/bin/ranlib") +set(CMAKE_C_COMPILER_RANLIB "/usr/bin/gcc-ranlib-9") +set(CMAKE_LINKER "/usr/bin/ld") +set(CMAKE_MT "") +set(CMAKE_COMPILER_IS_GNUCC 1) +set(CMAKE_C_COMPILER_LOADED 1) +set(CMAKE_C_COMPILER_WORKS TRUE) +set(CMAKE_C_ABI_COMPILED TRUE) + +set(CMAKE_C_COMPILER_ENV_VAR "CC") + +set(CMAKE_C_COMPILER_ID_RUN 1) +set(CMAKE_C_SOURCE_FILE_EXTENSIONS c;m) +set(CMAKE_C_IGNORE_EXTENSIONS h;H;o;O;obj;OBJ;def;DEF;rc;RC) +set(CMAKE_C_LINKER_PREFERENCE 10) + +# Save compiler ABI information. +set(CMAKE_C_SIZEOF_DATA_PTR "8") +set(CMAKE_C_COMPILER_ABI "ELF") +set(CMAKE_C_BYTE_ORDER "LITTLE_ENDIAN") +set(CMAKE_C_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") + +if(CMAKE_C_SIZEOF_DATA_PTR) + set(CMAKE_SIZEOF_VOID_P "${CMAKE_C_SIZEOF_DATA_PTR}") +endif() + +if(CMAKE_C_COMPILER_ABI) + set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_C_COMPILER_ABI}") +endif() + +if(CMAKE_C_LIBRARY_ARCHITECTURE) + set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") +endif() + +set(CMAKE_C_CL_SHOWINCLUDES_PREFIX "") +if(CMAKE_C_CL_SHOWINCLUDES_PREFIX) + set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_C_CL_SHOWINCLUDES_PREFIX}") +endif() + + + + + +set(CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/9/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include") +set(CMAKE_C_IMPLICIT_LINK_LIBRARIES "gcc;gcc_s;c;gcc;gcc_s") +set(CMAKE_C_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/9;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib") +set(CMAKE_C_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeCXXCompiler.cmake b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeCXXCompiler.cmake new file mode 100644 index 00000000..425a6f69 --- /dev/null +++ b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeCXXCompiler.cmake @@ -0,0 +1,83 @@ +set(CMAKE_CXX_COMPILER "/usr/bin/c++") +set(CMAKE_CXX_COMPILER_ARG1 "") +set(CMAKE_CXX_COMPILER_ID "GNU") +set(CMAKE_CXX_COMPILER_VERSION "9.4.0") +set(CMAKE_CXX_COMPILER_VERSION_INTERNAL "") +set(CMAKE_CXX_COMPILER_WRAPPER "") +set(CMAKE_CXX_STANDARD_COMPUTED_DEFAULT "14") +set(CMAKE_CXX_EXTENSIONS_COMPUTED_DEFAULT "ON") +set(CMAKE_CXX_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters;cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates;cxx_std_17;cxx_std_20") +set(CMAKE_CXX98_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters") +set(CMAKE_CXX11_COMPILE_FEATURES "cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates") +set(CMAKE_CXX14_COMPILE_FEATURES "cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") +set(CMAKE_CXX17_COMPILE_FEATURES "cxx_std_17") +set(CMAKE_CXX20_COMPILE_FEATURES "cxx_std_20") +set(CMAKE_CXX23_COMPILE_FEATURES "") + +set(CMAKE_CXX_PLATFORM_ID "Linux") +set(CMAKE_CXX_SIMULATE_ID "") +set(CMAKE_CXX_COMPILER_FRONTEND_VARIANT "") +set(CMAKE_CXX_SIMULATE_VERSION "") + + + + +set(CMAKE_AR "/usr/bin/ar") +set(CMAKE_CXX_COMPILER_AR "/usr/bin/gcc-ar-9") +set(CMAKE_RANLIB "/usr/bin/ranlib") +set(CMAKE_CXX_COMPILER_RANLIB "/usr/bin/gcc-ranlib-9") +set(CMAKE_LINKER "/usr/bin/ld") +set(CMAKE_MT "") +set(CMAKE_COMPILER_IS_GNUCXX 1) +set(CMAKE_CXX_COMPILER_LOADED 1) +set(CMAKE_CXX_COMPILER_WORKS TRUE) +set(CMAKE_CXX_ABI_COMPILED TRUE) + +set(CMAKE_CXX_COMPILER_ENV_VAR "CXX") + +set(CMAKE_CXX_COMPILER_ID_RUN 1) +set(CMAKE_CXX_SOURCE_FILE_EXTENSIONS C;M;c++;cc;cpp;cxx;m;mm;mpp;CPP;ixx;cppm) +set(CMAKE_CXX_IGNORE_EXTENSIONS inl;h;hpp;HPP;H;o;O;obj;OBJ;def;DEF;rc;RC) + +foreach (lang C OBJC OBJCXX) + if (CMAKE_${lang}_COMPILER_ID_RUN) + foreach(extension IN LISTS CMAKE_${lang}_SOURCE_FILE_EXTENSIONS) + list(REMOVE_ITEM CMAKE_CXX_SOURCE_FILE_EXTENSIONS ${extension}) + endforeach() + endif() +endforeach() + +set(CMAKE_CXX_LINKER_PREFERENCE 30) +set(CMAKE_CXX_LINKER_PREFERENCE_PROPAGATES 1) + +# Save compiler ABI information. +set(CMAKE_CXX_SIZEOF_DATA_PTR "8") +set(CMAKE_CXX_COMPILER_ABI "ELF") +set(CMAKE_CXX_BYTE_ORDER "LITTLE_ENDIAN") +set(CMAKE_CXX_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") + +if(CMAKE_CXX_SIZEOF_DATA_PTR) + set(CMAKE_SIZEOF_VOID_P "${CMAKE_CXX_SIZEOF_DATA_PTR}") +endif() + +if(CMAKE_CXX_COMPILER_ABI) + set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_CXX_COMPILER_ABI}") +endif() + +if(CMAKE_CXX_LIBRARY_ARCHITECTURE) + set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") +endif() + +set(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX "") +if(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX) + set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_CXX_CL_SHOWINCLUDES_PREFIX}") +endif() + + + + + +set(CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES "/usr/include/c++/9;/usr/include/x86_64-linux-gnu/c++/9;/usr/include/c++/9/backward;/usr/lib/gcc/x86_64-linux-gnu/9/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include") +set(CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "stdc++;m;gcc_s;gcc;c;gcc_s;gcc") +set(CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/9;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib") +set(CMAKE_CXX_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeDetermineCompilerABI_C.bin b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeDetermineCompilerABI_C.bin new file mode 100755 index 0000000000000000000000000000000000000000..299997b13accc73d792048f4c85b8271440e99fc GIT binary patch literal 16656 zcmeHOU2GKB6~4P`gGpe$5C|?1JV}X!n|QFnxQ61gYkTe4%Gea_h6>1Z*E_aX-5+;% zwy~2!sN6#RiN~FBB5A>nsp?{`SBrmDpp)^(cL)xO!mVlEst=j_IbMBn8 zo*6HtYWh-Xt~7h@{m$Q=bMKwKcjn%wqy4>ppHFZKh>r^_g+mq+CdP$blmQYJ9by&y zJ|H%UWl*k>n8goU1h`gu)9<8pga?44UIS*T&`lN$9<0PQM2dQMO0U;i9HxTq1N9_} z%8WDJN%dsPc*UPW9$j>fD#p~Vhq0M&w)EyZom3%zTZytB7Ew>AmOM0vm58z*rW{X< z30@r(Pd8O8D}t$8?-1#^X%Fc!MIV?BUNMvZjlcb*H>*f9!cF&BuwcsmJ_&j9Z!n~9=-Hxc=J=u+itX9$)sO-gKeNsl);4h+$FG#dGvuY*8P7G za@3cBUNnA^k-!~(IsjLy@HPOprV2g;m{J8FxdVPL;15-i7ZAS@3|G6*^^wWAKAOtL z)2Sy7C?ZQQnDM-+XX2@>=E(A?zhD0$v2X2zU|jBH%^9i+~pa zF9QEj1pZvV`5)T!wL0x&?a#Lfp?!JI^p`Ja(?6~|C-uuaUk3QS^24tHsc#f052f$R zQjd`3)^}p1i;rui-)Pgno8LdwKT~@iroP(DszFIQ&w{1$x;&`-WmcQ%d{>9_f$IUxgXtlUSbRt_69~suBJN=l|N>|L4+R4skP`KJuE|;$+;pcqqEm(E?j)Ja3 z{|`?96AcZ6Ug_91tyH}HTJ$DX^JMgz_M$ek?x#VB&Ub1;^*@;NMQ;+i7H{({IQ zrg;m%2m70DVVhoUyj?D9XKw|}#@4qep8ciw`b*b)O23bk*Zx|YKIhYR{j&H+Z12NI zBS#`fBacV)bEAtM(I55%q_Uq>dWNi!_v1yti+~paF9Kc!ya;#^@FL(vz>9zv0WSjo zClT;DzBVqcbZnq^aCZT|+vP^}E;SGv>`m-8r*tEqGm?k%u`a!DAR6l# zhzKbs^%GO3q381OS?_RvY-p%Isz(QUVvzxmko;A8mh7r&p)-YAzR zfSv;SBGA)74*`8?zFa;HRC%{tz5sL2?RsE!G=%N-}^+}39)y>?vL!)B0q1U{t>tY%V8c8+BD&M z8}O$Phz3K?_`6pv^XI`cxB=*EaA{Cy1Nptd&~yG+u;JN2G^otfM1xzuQri=3d1jdw z?3iBO7Yt{E9g$#5B)GLJsB{Gzx`LsuV4eJK`3q2=06X4yKMbUoya;#^@FL(vz>9zv z0WShx1iT1%5x83fcwZy$W29wMWv}8&HXs*60`_@ZX)A3O{ScLTzvMxeQTH8Dr~B$pGT zjvR44<_L#Ppfa(&%~WQ&f0nKKLYyQ7UsPrNuTp*C%djkS9ASAT%a@4%cPewe$o&6~ z@c!@*Xp#4?_H}pfR<;f&idnO$>{8p*mgd%?EVUlr-lDd&sjW>Ges_lf=v56>Zae_* z9C&}a-AVvb5!R_V5R|sxrCw`CM{^Z1= zchb0V;wyzx6<;MTEFDJ&0AGV~rvD-S-$($i$htaz&PaS| ze>)ENYKWUcO8o!v9N;0bzS8b&?Fis&{7c8(^N6#MjeeW#xcm8!04J9=X*j2eM8{iMw0nU%)&4O8k9Y!N!@tB8hX7mv}=uv<}9?6_O zmd+*OX+3G?@&!F!oD?Iu%tYESjilPq(%xEK2#=Vg^mslWhquvLGe0Fp^YM(KCySZP z6sXurI!Kxh(e8cm&l=s`;3kze@{z6>)Gw7C&FPEpuJuG}3=Ahz@$3>-I%S$^!@-4@ z+TgVJKxChM-L1oc6{p*z(0e{T5ZM>&c5>v&7=YkNkRH{@g4T0D=zaZzU6Fo$u($VM zbVwhHbitc+5R~U>MhZn4fPZ^}M&6O_ZNAWrWZaCijm0xL;jrUSj*e2+!5ffup2E?S zg`7Sf&nEGNPHYgQlBuj-EEsTqi&evbLpf0>kQsUMN7vzi4}J{5=-|nps>gYp2Z)?i z6KY{9W5yFe&Adg&IhV@938D$1W^<;Yj%ADLL_RlR22Is+eJd+xM>R`L65Kp1P;K)FS=MnaSD9-2M(E!h2EAjk| zDbMGLEoC{6^uuuBoSXG|p2t)Ho;~qAo3VDt*!2(5ypQP?79`5gMTTQ8eV#WmRalUy zTmLb@{|a+IsQT+NutkEkqNJtj}}-n09>;Cg~Q7vL8Y# z{<(Y`GH8qa^L#Bt`s|10>2|m%0stUJ;uW0rAE_W#hV|`~`;J{SOa@zMVB#!tNx)fYR@h>2pcSisK literal 0 HcmV?d00001 diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeDetermineCompilerABI_CXX.bin b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeDetermineCompilerABI_CXX.bin new file mode 100755 index 0000000000000000000000000000000000000000..ddff7d7eedce64bc9747dded2b49d943564148f6 GIT binary patch literal 16680 zcmeHOZ)_Y#6`#Aa6Q@nim(*$8B-GoIND0LoJ57ue>h{j|*?VxY)5NAOX}a9`*3QBC z?s~UJ?LbSAOQj>YSP0ZsqEb|(egGkbFXRh<90i3UL6soUKNLg?1Pp}MEd&`za=bVD z-g$R@A&Bw?X-7K$W`6Vj%)Xsj&+N{8CK~Va`+R~^K-?>^6b@NTm@4M?PzE3@I>j3J z`yR1btblTzPUP3>f4zUogQk1m{}hB4apFg6RVmfk{_6BY8ejga-Qh2#L2u8Qw2=ycbPW#`+&=w_W=Ab2F(2pVr$uKmOIPe7)n5y=()0q6{X~=PrR|%%cyKv9ABK zkfXi~c;0wvLK4^c(+Rv%qi!Q`>uT^rz$rEO(d+Ox1Ak`?c>(bo#c*{9T_2rH=ws<@ zB9lI9KoP6-qM0a|`a~j~75(v8caOeP-Kp*tdTeN)o-zu?c)DmBg`s^tnOxQwN+dI= zIX;oglBRBn))bWOv7*Sa^g#qMhTJbo8`G;Wy6>m9^Z4X(p{%tk7HQlve}N1%gspn! zx%}#V7LWG|{c*Zr(H*e#)?;yWm<9Q4qzi8v>QHQjbya;#^@FL(vz>9zv0WShx z1iT3RM-li-@rY|*U$Ln9)A%yn1In!S`r%nH$;f&O;?0z2TcPd-I2GH0dP##L( zm8D)G%k6)TmCrw{m4B^G|8`;jP<*ETEKGg1nKgqFx=w?o%K8GR{B>5F>3SA2+D!eI z5cVvZ>%sK^=E3!<%7w<3W9aw{A;_nt{O%;e_G?;sK|B5CecI{8faW`^oxf%_fq|`L zprLYMtg!{!#roLZV_jG+N_P!w(_MbdYUPXOYVCN}3MkBPuT(1YDfn}?{wl1xeTPBU zq5p?Rp%M)ZgI@WuOIo>f;fK*H*v#Y6OWHHq%=(`NAv)i#3pIXk&KJExY!hDQ>sWKD z&fj>-UGqk*n)QEjS-Nug%$VBP@&tMlW3$+d$OG}2uDfC768T81{BmSCR=yk=ij=Pm zYcs7|fIb-CeiiTOe9N^;MLT^pV79csPVtPF--(ys>@EL3Qfc~?HhspY?fGTtk9fZi z9gaK{IUIR7qMsRC@`(PhCm_mxQtcVCLEaxP0$v2X2zU|jBH%^9i+~paF9Kc!ya;#^ z_&?8@ZByqrl;~|;ZR%WVY}2oY_Ko6*PPOgLe5AXD#W_={()$$ zcOW99oYYTFnTDP#z-PTf@z~H%JgP?rdSj6RkdXC>Wcm>7LqG-JroadB+b@1Un|-TN z$pfAMdZ2$XK(}5m*J;Dn+>e*3x>Yvj|H2b3Pgj-OkFg%?F;q2!L}z? zXu;0umHokRHrN>nwnc*5x`Rr0u(>-J>JB!@@0Py+^*q?|zWZSyz2rr}i+~paF9Kc! zya;#^@FL(vz>C0*BEb6^c^@M!o2q*iSF-`R7!t70+e+JLv*>M9=KYeJsLcByabGDW z-ZzQsBTQU>`C281f9~Y##HVZ8sjv1Ur~Ha)|8epw1@|*2-yryYIr&wB@3WJ?NpOF1 z^3XeJ+&KBwLaE8G5p6a3VDg1h+=1~d_b(N?ytAk24Ls+xtj8~2p-o? z`KIc4cJeU#YV^N=xV1X|obnrl2-_JeAAr#us>$=yW=RLfS)F*hvcy<9d~W$gP~=_l zgxwU9KjGrUo0PAM?roa#-IK_@&A+0 z1FzVwg|jb7etAE90r-&EP`&Rg{{-;s{L9DPH<4!{8~&K=xcm8Qz$-R6&dvkB*1tjU z^OyO{$U}=*iT<64I*0>a?@2nJV30x^$cLccHiy}Qg+B!z<-^qfcTjZ`*g<*sxKqgf zcCs(uX@bsH;IW-<`F7wHvT2781AnKT;QVO8ESe?QVKge1j(OSVp{OTHlVUVCkWky1O`I@)Us;SrOxo+uO&@HRSY7N*2lAu(a-snW#6 z6sXurI!Kxh(Vl&YPZ&KTBR$|KoiPfL?wC57&kOyd@%D~%b}XmcUVcf&BKzcPaUBk}IGdLedhde+k$tfqXN^2T z0~8z`(xVz#(0UIDy+1zK9f|9MeSHU`L;6sp8{W8spgeaoS}e&x{ri(PMk-+@SY_#` zPB`p1tD~cobsh&~oriPuR57O?Nn}%aa3?khQmJ%SFBJ{AoTZlFeSmVZSR^y@K#;D( zxgY#SfWg26K{ZeLIL{S1nI6~d^NDB11Wa(S|*3#fS7*9;3N1-`*-&BaFP+`nW zz&PN6_wEqq`S2)#XRwucUdPDue5#hRoJV{w{BSPL`aJ(*RG`kDc>c{?2W0H}BQzgm z+`@u{{Jdm3=F;c+B%{KDgl_!@fd3oJ<+=U5eqm()_}vPVyZs5^aP7kSyuM+aMI}gh zjTmRv=XFUMIJ{)m=Xoq6ua7`PCbVHsk3a#}Dy+}*T}EE_ar;@0@o~_{wGWqhUd*VF zGk5!m7Bi5+n6f_4pBd*!ko~)_|7oblYvOwR*C-PquS1EIzBrHg9Q^G13h6PjIo9Da z#34RhQ3mP?=40gSv(XaDa0dy4cq{z4&1!l$UnKr|P{ z_aTQ(Wc^tZV7#X)X=U90e*&~!{~YNvF1Ymh{xkgwWNhma|GrW#-?43haCAzq;;GEsZ(4f7t&qu^!*Y7l1<%*5~&Vb2K3fL!CXb zKI0-(+Vw@4q}wcHKg3r3bNL!%&=&jWd0dF}*$>Op<#161fIyDKt32!9Rb{LU>)Vn0 zj$JfN2HR+0<2Xi&+r!U4d{4t|x9jKe9|NRumvQ!M)#6;bw*D)fmc(k8f~zS01?<;+ A@c;k- literal 0 HcmV?d00001 diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeSystem.cmake b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeSystem.cmake new file mode 100644 index 00000000..8b7ded2f --- /dev/null +++ b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CMakeSystem.cmake @@ -0,0 +1,15 @@ +set(CMAKE_HOST_SYSTEM "Linux-5.13.0-51-generic") +set(CMAKE_HOST_SYSTEM_NAME "Linux") +set(CMAKE_HOST_SYSTEM_VERSION "5.13.0-51-generic") +set(CMAKE_HOST_SYSTEM_PROCESSOR "x86_64") + + + +set(CMAKE_SYSTEM "Linux-5.13.0-51-generic") +set(CMAKE_SYSTEM_NAME "Linux") +set(CMAKE_SYSTEM_VERSION "5.13.0-51-generic") +set(CMAKE_SYSTEM_PROCESSOR "x86_64") + +set(CMAKE_CROSSCOMPILING "FALSE") + +set(CMAKE_SYSTEM_LOADED 1) diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdC/CMakeCCompilerId.c b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdC/CMakeCCompilerId.c new file mode 100644 index 00000000..2b43aa69 --- /dev/null +++ b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdC/CMakeCCompilerId.c @@ -0,0 +1,838 @@ +#ifdef __cplusplus +# error "A C++ compiler has been selected for C." +#endif + +#if defined(__18CXX) +# define ID_VOID_MAIN +#endif +#if defined(__CLASSIC_C__) +/* cv-qualifiers did not exist in K&R C */ +# define const +# define volatile +#endif + +#if !defined(__has_include) +/* If the compiler does not have __has_include, pretend the answer is + always no. */ +# define __has_include(x) 0 +#endif + + +/* Version number components: V=Version, R=Revision, P=Patch + Version date components: YYYY=Year, MM=Month, DD=Day */ + +#if defined(__INTEL_COMPILER) || defined(__ICC) +# define COMPILER_ID "Intel" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# if defined(__GNUC__) +# define SIMULATE_ID "GNU" +# endif + /* __INTEL_COMPILER = VRP prior to 2021, and then VVVV for 2021 and later, + except that a few beta releases use the old format with V=2021. */ +# if __INTEL_COMPILER < 2021 || __INTEL_COMPILER == 202110 || __INTEL_COMPILER == 202111 +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER/10 % 10) +# if defined(__INTEL_COMPILER_UPDATE) +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER_UPDATE) +# else +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER % 10) +# endif +# else +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER_UPDATE) + /* The third version component from --version is an update index, + but no macro is provided for it. */ +# define COMPILER_VERSION_PATCH DEC(0) +# endif +# if defined(__INTEL_COMPILER_BUILD_DATE) + /* __INTEL_COMPILER_BUILD_DATE = YYYYMMDD */ +# define COMPILER_VERSION_TWEAK DEC(__INTEL_COMPILER_BUILD_DATE) +# endif +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# if defined(__GNUC__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +# elif defined(__GNUG__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUG__) +# endif +# if defined(__GNUC_MINOR__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif (defined(__clang__) && defined(__INTEL_CLANG_COMPILER)) || defined(__INTEL_LLVM_COMPILER) +# define COMPILER_ID "IntelLLVM" +#if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +#endif +#if defined(__GNUC__) +# define SIMULATE_ID "GNU" +#endif +/* __INTEL_LLVM_COMPILER = VVVVRP prior to 2021.2.0, VVVVRRPP for 2021.2.0 and + * later. Look for 6 digit vs. 8 digit version number to decide encoding. + * VVVV is no smaller than the current year when a version is released. + */ +#if __INTEL_LLVM_COMPILER < 1000000L +# define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 10) +#else +# define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/10000) +# define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 100) +#endif +#if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +#endif +#if defined(__GNUC__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +#elif defined(__GNUG__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUG__) +#endif +#if defined(__GNUC_MINOR__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +#endif +#if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +#endif + +#elif defined(__PATHCC__) +# define COMPILER_ID "PathScale" +# define COMPILER_VERSION_MAJOR DEC(__PATHCC__) +# define COMPILER_VERSION_MINOR DEC(__PATHCC_MINOR__) +# if defined(__PATHCC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PATHCC_PATCHLEVEL__) +# endif + +#elif defined(__BORLANDC__) && defined(__CODEGEARC_VERSION__) +# define COMPILER_ID "Embarcadero" +# define COMPILER_VERSION_MAJOR HEX(__CODEGEARC_VERSION__>>24 & 0x00FF) +# define COMPILER_VERSION_MINOR HEX(__CODEGEARC_VERSION__>>16 & 0x00FF) +# define COMPILER_VERSION_PATCH DEC(__CODEGEARC_VERSION__ & 0xFFFF) + +#elif defined(__BORLANDC__) +# define COMPILER_ID "Borland" + /* __BORLANDC__ = 0xVRR */ +# define COMPILER_VERSION_MAJOR HEX(__BORLANDC__>>8) +# define COMPILER_VERSION_MINOR HEX(__BORLANDC__ & 0xFF) + +#elif defined(__WATCOMC__) && __WATCOMC__ < 1200 +# define COMPILER_ID "Watcom" + /* __WATCOMC__ = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(__WATCOMC__ / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__WATCOMC__) +# define COMPILER_ID "OpenWatcom" + /* __WATCOMC__ = VVRP + 1100 */ +# define COMPILER_VERSION_MAJOR DEC((__WATCOMC__ - 1100) / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__SUNPRO_C) +# define COMPILER_ID "SunPro" +# if __SUNPRO_C >= 0x5100 + /* __SUNPRO_C = 0xVRRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_C>>12) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_C>>4 & 0xFF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_C & 0xF) +# else + /* __SUNPRO_CC = 0xVRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_C>>8) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_C>>4 & 0xF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_C & 0xF) +# endif + +#elif defined(__HP_cc) +# define COMPILER_ID "HP" + /* __HP_cc = VVRRPP */ +# define COMPILER_VERSION_MAJOR DEC(__HP_cc/10000) +# define COMPILER_VERSION_MINOR DEC(__HP_cc/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__HP_cc % 100) + +#elif defined(__DECC) +# define COMPILER_ID "Compaq" + /* __DECC_VER = VVRRTPPPP */ +# define COMPILER_VERSION_MAJOR DEC(__DECC_VER/10000000) +# define COMPILER_VERSION_MINOR DEC(__DECC_VER/100000 % 100) +# define COMPILER_VERSION_PATCH DEC(__DECC_VER % 10000) + +#elif defined(__IBMC__) && defined(__COMPILER_VER__) +# define COMPILER_ID "zOS" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__open_xl__) && defined(__clang__) +# define COMPILER_ID "IBMClang" +# define COMPILER_VERSION_MAJOR DEC(__open_xl_version__) +# define COMPILER_VERSION_MINOR DEC(__open_xl_release__) +# define COMPILER_VERSION_PATCH DEC(__open_xl_modification__) +# define COMPILER_VERSION_TWEAK DEC(__open_xl_ptf_fix_level__) + + +#elif defined(__ibmxl__) && defined(__clang__) +# define COMPILER_ID "XLClang" +# define COMPILER_VERSION_MAJOR DEC(__ibmxl_version__) +# define COMPILER_VERSION_MINOR DEC(__ibmxl_release__) +# define COMPILER_VERSION_PATCH DEC(__ibmxl_modification__) +# define COMPILER_VERSION_TWEAK DEC(__ibmxl_ptf_fix_level__) + + +#elif defined(__IBMC__) && !defined(__COMPILER_VER__) && __IBMC__ >= 800 +# define COMPILER_ID "XL" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__IBMC__) && !defined(__COMPILER_VER__) && __IBMC__ < 800 +# define COMPILER_ID "VisualAge" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__NVCOMPILER) +# define COMPILER_ID "NVHPC" +# define COMPILER_VERSION_MAJOR DEC(__NVCOMPILER_MAJOR__) +# define COMPILER_VERSION_MINOR DEC(__NVCOMPILER_MINOR__) +# if defined(__NVCOMPILER_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__NVCOMPILER_PATCHLEVEL__) +# endif + +#elif defined(__PGI) +# define COMPILER_ID "PGI" +# define COMPILER_VERSION_MAJOR DEC(__PGIC__) +# define COMPILER_VERSION_MINOR DEC(__PGIC_MINOR__) +# if defined(__PGIC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PGIC_PATCHLEVEL__) +# endif + +#elif defined(_CRAYC) +# define COMPILER_ID "Cray" +# define COMPILER_VERSION_MAJOR DEC(_RELEASE_MAJOR) +# define COMPILER_VERSION_MINOR DEC(_RELEASE_MINOR) + +#elif defined(__TI_COMPILER_VERSION__) +# define COMPILER_ID "TI" + /* __TI_COMPILER_VERSION__ = VVVRRRPPP */ +# define COMPILER_VERSION_MAJOR DEC(__TI_COMPILER_VERSION__/1000000) +# define COMPILER_VERSION_MINOR DEC(__TI_COMPILER_VERSION__/1000 % 1000) +# define COMPILER_VERSION_PATCH DEC(__TI_COMPILER_VERSION__ % 1000) + +#elif defined(__CLANG_FUJITSU) +# define COMPILER_ID "FujitsuClang" +# define COMPILER_VERSION_MAJOR DEC(__FCC_major__) +# define COMPILER_VERSION_MINOR DEC(__FCC_minor__) +# define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__) +# define COMPILER_VERSION_INTERNAL_STR __clang_version__ + + +#elif defined(__FUJITSU) +# define COMPILER_ID "Fujitsu" +# if defined(__FCC_version__) +# define COMPILER_VERSION __FCC_version__ +# elif defined(__FCC_major__) +# define COMPILER_VERSION_MAJOR DEC(__FCC_major__) +# define COMPILER_VERSION_MINOR DEC(__FCC_minor__) +# define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__) +# endif +# if defined(__fcc_version) +# define COMPILER_VERSION_INTERNAL DEC(__fcc_version) +# elif defined(__FCC_VERSION) +# define COMPILER_VERSION_INTERNAL DEC(__FCC_VERSION) +# endif + + +#elif defined(__ghs__) +# define COMPILER_ID "GHS" +/* __GHS_VERSION_NUMBER = VVVVRP */ +# ifdef __GHS_VERSION_NUMBER +# define COMPILER_VERSION_MAJOR DEC(__GHS_VERSION_NUMBER / 100) +# define COMPILER_VERSION_MINOR DEC(__GHS_VERSION_NUMBER / 10 % 10) +# define COMPILER_VERSION_PATCH DEC(__GHS_VERSION_NUMBER % 10) +# endif + +#elif defined(__TINYC__) +# define COMPILER_ID "TinyCC" + +#elif defined(__BCC__) +# define COMPILER_ID "Bruce" + +#elif defined(__SCO_VERSION__) +# define COMPILER_ID "SCO" + +#elif defined(__ARMCC_VERSION) && !defined(__clang__) +# define COMPILER_ID "ARMCC" +#if __ARMCC_VERSION >= 1000000 + /* __ARMCC_VERSION = VRRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#else + /* __ARMCC_VERSION = VRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/100000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 10) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#endif + + +#elif defined(__clang__) && defined(__apple_build_version__) +# define COMPILER_ID "AppleClang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# define COMPILER_VERSION_TWEAK DEC(__apple_build_version__) + +#elif defined(__clang__) && defined(__ARMCOMPILER_VERSION) +# define COMPILER_ID "ARMClang" + # define COMPILER_VERSION_MAJOR DEC(__ARMCOMPILER_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCOMPILER_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCOMPILER_VERSION % 10000) +# define COMPILER_VERSION_INTERNAL DEC(__ARMCOMPILER_VERSION) + +#elif defined(__clang__) +# define COMPILER_ID "Clang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif + +#elif defined(__LCC__) && (defined(__GNUC__) || defined(__GNUG__) || defined(__MCST__)) +# define COMPILER_ID "LCC" +# define COMPILER_VERSION_MAJOR DEC(1) +# if defined(__LCC__) +# define COMPILER_VERSION_MINOR DEC(__LCC__- 100) +# endif +# if defined(__LCC_MINOR__) +# define COMPILER_VERSION_PATCH DEC(__LCC_MINOR__) +# endif +# if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define SIMULATE_ID "GNU" +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +# if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif +# endif + +#elif defined(__GNUC__) +# define COMPILER_ID "GNU" +# define COMPILER_VERSION_MAJOR DEC(__GNUC__) +# if defined(__GNUC_MINOR__) +# define COMPILER_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif defined(_MSC_VER) +# define COMPILER_ID "MSVC" + /* _MSC_VER = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(_MSC_VER / 100) +# define COMPILER_VERSION_MINOR DEC(_MSC_VER % 100) +# if defined(_MSC_FULL_VER) +# if _MSC_VER >= 1400 + /* _MSC_FULL_VER = VVRRPPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 100000) +# else + /* _MSC_FULL_VER = VVRRPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 10000) +# endif +# endif +# if defined(_MSC_BUILD) +# define COMPILER_VERSION_TWEAK DEC(_MSC_BUILD) +# endif + +#elif defined(_ADI_COMPILER) +# define COMPILER_ID "ADSP" +#if defined(__VERSIONNUM__) + /* __VERSIONNUM__ = 0xVVRRPPTT */ +# define COMPILER_VERSION_MAJOR DEC(__VERSIONNUM__ >> 24 & 0xFF) +# define COMPILER_VERSION_MINOR DEC(__VERSIONNUM__ >> 16 & 0xFF) +# define COMPILER_VERSION_PATCH DEC(__VERSIONNUM__ >> 8 & 0xFF) +# define COMPILER_VERSION_TWEAK DEC(__VERSIONNUM__ & 0xFF) +#endif + +#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) +# define COMPILER_ID "IAR" +# if defined(__VER__) && defined(__ICCARM__) +# define COMPILER_VERSION_MAJOR DEC((__VER__) / 1000000) +# define COMPILER_VERSION_MINOR DEC(((__VER__) / 1000) % 1000) +# define COMPILER_VERSION_PATCH DEC((__VER__) % 1000) +# define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__) +# elif defined(__VER__) && (defined(__ICCAVR__) || defined(__ICCRX__) || defined(__ICCRH850__) || defined(__ICCRL78__) || defined(__ICC430__) || defined(__ICCRISCV__) || defined(__ICCV850__) || defined(__ICC8051__) || defined(__ICCSTM8__)) +# define COMPILER_VERSION_MAJOR DEC((__VER__) / 100) +# define COMPILER_VERSION_MINOR DEC((__VER__) - (((__VER__) / 100)*100)) +# define COMPILER_VERSION_PATCH DEC(__SUBVERSION__) +# define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__) +# endif + +#elif defined(__SDCC_VERSION_MAJOR) || defined(SDCC) +# define COMPILER_ID "SDCC" +# if defined(__SDCC_VERSION_MAJOR) +# define COMPILER_VERSION_MAJOR DEC(__SDCC_VERSION_MAJOR) +# define COMPILER_VERSION_MINOR DEC(__SDCC_VERSION_MINOR) +# define COMPILER_VERSION_PATCH DEC(__SDCC_VERSION_PATCH) +# else + /* SDCC = VRP */ +# define COMPILER_VERSION_MAJOR DEC(SDCC/100) +# define COMPILER_VERSION_MINOR DEC(SDCC/10 % 10) +# define COMPILER_VERSION_PATCH DEC(SDCC % 10) +# endif + + +/* These compilers are either not known or too old to define an + identification macro. Try to identify the platform and guess that + it is the native compiler. */ +#elif defined(__hpux) || defined(__hpua) +# define COMPILER_ID "HP" + +#else /* unknown compiler */ +# define COMPILER_ID "" +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]"; +#ifdef SIMULATE_ID +char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]"; +#endif + +#ifdef __QNXNTO__ +char const* qnxnto = "INFO" ":" "qnxnto[]"; +#endif + +#if defined(__CRAYXT_COMPUTE_LINUX_TARGET) +char const *info_cray = "INFO" ":" "compiler_wrapper[CrayPrgEnv]"; +#endif + +#define STRINGIFY_HELPER(X) #X +#define STRINGIFY(X) STRINGIFY_HELPER(X) + +/* Identify known platforms by name. */ +#if defined(__linux) || defined(__linux__) || defined(linux) +# define PLATFORM_ID "Linux" + +#elif defined(__MSYS__) +# define PLATFORM_ID "MSYS" + +#elif defined(__CYGWIN__) +# define PLATFORM_ID "Cygwin" + +#elif defined(__MINGW32__) +# define PLATFORM_ID "MinGW" + +#elif defined(__APPLE__) +# define PLATFORM_ID "Darwin" + +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) +# define PLATFORM_ID "Windows" + +#elif defined(__FreeBSD__) || defined(__FreeBSD) +# define PLATFORM_ID "FreeBSD" + +#elif defined(__NetBSD__) || defined(__NetBSD) +# define PLATFORM_ID "NetBSD" + +#elif defined(__OpenBSD__) || defined(__OPENBSD) +# define PLATFORM_ID "OpenBSD" + +#elif defined(__sun) || defined(sun) +# define PLATFORM_ID "SunOS" + +#elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__) +# define PLATFORM_ID "AIX" + +#elif defined(__hpux) || defined(__hpux__) +# define PLATFORM_ID "HP-UX" + +#elif defined(__HAIKU__) +# define PLATFORM_ID "Haiku" + +#elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS) +# define PLATFORM_ID "BeOS" + +#elif defined(__QNX__) || defined(__QNXNTO__) +# define PLATFORM_ID "QNX" + +#elif defined(__tru64) || defined(_tru64) || defined(__TRU64__) +# define PLATFORM_ID "Tru64" + +#elif defined(__riscos) || defined(__riscos__) +# define PLATFORM_ID "RISCos" + +#elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__) +# define PLATFORM_ID "SINIX" + +#elif defined(__UNIX_SV__) +# define PLATFORM_ID "UNIX_SV" + +#elif defined(__bsdos__) +# define PLATFORM_ID "BSDOS" + +#elif defined(_MPRAS) || defined(MPRAS) +# define PLATFORM_ID "MP-RAS" + +#elif defined(__osf) || defined(__osf__) +# define PLATFORM_ID "OSF1" + +#elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv) +# define PLATFORM_ID "SCO_SV" + +#elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX) +# define PLATFORM_ID "ULTRIX" + +#elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX) +# define PLATFORM_ID "Xenix" + +#elif defined(__WATCOMC__) +# if defined(__LINUX__) +# define PLATFORM_ID "Linux" + +# elif defined(__DOS__) +# define PLATFORM_ID "DOS" + +# elif defined(__OS2__) +# define PLATFORM_ID "OS2" + +# elif defined(__WINDOWS__) +# define PLATFORM_ID "Windows3x" + +# elif defined(__VXWORKS__) +# define PLATFORM_ID "VxWorks" + +# else /* unknown platform */ +# define PLATFORM_ID +# endif + +#elif defined(__INTEGRITY) +# if defined(INT_178B) +# define PLATFORM_ID "Integrity178" + +# else /* regular Integrity */ +# define PLATFORM_ID "Integrity" +# endif + +# elif defined(_ADI_COMPILER) +# define PLATFORM_ID "ADSP" + +#else /* unknown platform */ +# define PLATFORM_ID + +#endif + +/* For windows compilers MSVC and Intel we can determine + the architecture of the compiler being used. This is because + the compilers do not have flags that can change the architecture, + but rather depend on which compiler is being used +*/ +#if defined(_WIN32) && defined(_MSC_VER) +# if defined(_M_IA64) +# define ARCHITECTURE_ID "IA64" + +# elif defined(_M_ARM64EC) +# define ARCHITECTURE_ID "ARM64EC" + +# elif defined(_M_X64) || defined(_M_AMD64) +# define ARCHITECTURE_ID "x64" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# elif defined(_M_ARM64) +# define ARCHITECTURE_ID "ARM64" + +# elif defined(_M_ARM) +# if _M_ARM == 4 +# define ARCHITECTURE_ID "ARMV4I" +# elif _M_ARM == 5 +# define ARCHITECTURE_ID "ARMV5I" +# else +# define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM) +# endif + +# elif defined(_M_MIPS) +# define ARCHITECTURE_ID "MIPS" + +# elif defined(_M_SH) +# define ARCHITECTURE_ID "SHx" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__WATCOMC__) +# if defined(_M_I86) +# define ARCHITECTURE_ID "I86" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) +# if defined(__ICCARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__ICCRX__) +# define ARCHITECTURE_ID "RX" + +# elif defined(__ICCRH850__) +# define ARCHITECTURE_ID "RH850" + +# elif defined(__ICCRL78__) +# define ARCHITECTURE_ID "RL78" + +# elif defined(__ICCRISCV__) +# define ARCHITECTURE_ID "RISCV" + +# elif defined(__ICCAVR__) +# define ARCHITECTURE_ID "AVR" + +# elif defined(__ICC430__) +# define ARCHITECTURE_ID "MSP430" + +# elif defined(__ICCV850__) +# define ARCHITECTURE_ID "V850" + +# elif defined(__ICC8051__) +# define ARCHITECTURE_ID "8051" + +# elif defined(__ICCSTM8__) +# define ARCHITECTURE_ID "STM8" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__ghs__) +# if defined(__PPC64__) +# define ARCHITECTURE_ID "PPC64" + +# elif defined(__ppc__) +# define ARCHITECTURE_ID "PPC" + +# elif defined(__ARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__x86_64__) +# define ARCHITECTURE_ID "x64" + +# elif defined(__i386__) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__TI_COMPILER_VERSION__) +# if defined(__TI_ARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__MSP430__) +# define ARCHITECTURE_ID "MSP430" + +# elif defined(__TMS320C28XX__) +# define ARCHITECTURE_ID "TMS320C28x" + +# elif defined(__TMS320C6X__) || defined(_TMS320C6X) +# define ARCHITECTURE_ID "TMS320C6x" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +# elif defined(__ADSPSHARC__) +# define ARCHITECTURE_ID "SHARC" + +# elif defined(__ADSPBLACKFIN__) +# define ARCHITECTURE_ID "Blackfin" + +#else +# define ARCHITECTURE_ID +#endif + +/* Convert integer to decimal digit literals. */ +#define DEC(n) \ + ('0' + (((n) / 10000000)%10)), \ + ('0' + (((n) / 1000000)%10)), \ + ('0' + (((n) / 100000)%10)), \ + ('0' + (((n) / 10000)%10)), \ + ('0' + (((n) / 1000)%10)), \ + ('0' + (((n) / 100)%10)), \ + ('0' + (((n) / 10)%10)), \ + ('0' + ((n) % 10)) + +/* Convert integer to hex digit literals. */ +#define HEX(n) \ + ('0' + ((n)>>28 & 0xF)), \ + ('0' + ((n)>>24 & 0xF)), \ + ('0' + ((n)>>20 & 0xF)), \ + ('0' + ((n)>>16 & 0xF)), \ + ('0' + ((n)>>12 & 0xF)), \ + ('0' + ((n)>>8 & 0xF)), \ + ('0' + ((n)>>4 & 0xF)), \ + ('0' + ((n) & 0xF)) + +/* Construct a string literal encoding the version number. */ +#ifdef COMPILER_VERSION +char const* info_version = "INFO" ":" "compiler_version[" COMPILER_VERSION "]"; + +/* Construct a string literal encoding the version number components. */ +#elif defined(COMPILER_VERSION_MAJOR) +char const info_version[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[', + COMPILER_VERSION_MAJOR, +# ifdef COMPILER_VERSION_MINOR + '.', COMPILER_VERSION_MINOR, +# ifdef COMPILER_VERSION_PATCH + '.', COMPILER_VERSION_PATCH, +# ifdef COMPILER_VERSION_TWEAK + '.', COMPILER_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct a string literal encoding the internal version number. */ +#ifdef COMPILER_VERSION_INTERNAL +char const info_version_internal[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_', + 'i','n','t','e','r','n','a','l','[', + COMPILER_VERSION_INTERNAL,']','\0'}; +#elif defined(COMPILER_VERSION_INTERNAL_STR) +char const* info_version_internal = "INFO" ":" "compiler_version_internal[" COMPILER_VERSION_INTERNAL_STR "]"; +#endif + +/* Construct a string literal encoding the version number components. */ +#ifdef SIMULATE_VERSION_MAJOR +char const info_simulate_version[] = { + 'I', 'N', 'F', 'O', ':', + 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[', + SIMULATE_VERSION_MAJOR, +# ifdef SIMULATE_VERSION_MINOR + '.', SIMULATE_VERSION_MINOR, +# ifdef SIMULATE_VERSION_PATCH + '.', SIMULATE_VERSION_PATCH, +# ifdef SIMULATE_VERSION_TWEAK + '.', SIMULATE_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]"; +char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]"; + + + +#if !defined(__STDC__) && !defined(__clang__) +# if defined(_MSC_VER) || defined(__ibmxl__) || defined(__IBMC__) +# define C_VERSION "90" +# else +# define C_VERSION +# endif +#elif __STDC_VERSION__ > 201710L +# define C_VERSION "23" +#elif __STDC_VERSION__ >= 201710L +# define C_VERSION "17" +#elif __STDC_VERSION__ >= 201000L +# define C_VERSION "11" +#elif __STDC_VERSION__ >= 199901L +# define C_VERSION "99" +#else +# define C_VERSION "90" +#endif +const char* info_language_standard_default = + "INFO" ":" "standard_default[" C_VERSION "]"; + +const char* info_language_extensions_default = "INFO" ":" "extensions_default[" +#if (defined(__clang__) || defined(__GNUC__) || defined(__xlC__) || \ + defined(__TI_COMPILER_VERSION__)) && \ + !defined(__STRICT_ANSI__) + "ON" +#else + "OFF" +#endif +"]"; + +/*--------------------------------------------------------------------------*/ + +#ifdef ID_VOID_MAIN +void main() {} +#else +# if defined(__CLASSIC_C__) +int main(argc, argv) int argc; char *argv[]; +# else +int main(int argc, char* argv[]) +# endif +{ + int require = 0; + require += info_compiler[argc]; + require += info_platform[argc]; + require += info_arch[argc]; +#ifdef COMPILER_VERSION_MAJOR + require += info_version[argc]; +#endif +#ifdef COMPILER_VERSION_INTERNAL + require += info_version_internal[argc]; +#endif +#ifdef SIMULATE_ID + require += info_simulate[argc]; +#endif +#ifdef SIMULATE_VERSION_MAJOR + require += info_simulate_version[argc]; +#endif +#if defined(__CRAYXT_COMPUTE_LINUX_TARGET) + require += info_cray[argc]; +#endif + require += info_language_standard_default[argc]; + require += info_language_extensions_default[argc]; + (void)argv; + return require; +} +#endif diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdC/a.out b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdC/a.out new file mode 100755 index 0000000000000000000000000000000000000000..601952a59681fed2c8a9e1390095ec484b87b929 GIT binary patch literal 16776 zcmeHOZ)_Y#6`%9jiIXPhOPUZjDa{g6l#u#jhy1bAn%vnwXAe0}NF5TCy35+Pwolw2 zbGJw1inKvVi>|>Xh)OLVDn&(r3gkoi(27ccqd+NDK#h<}5Fjj|B|u2rkQNt`9PiD% zcivr}gG7-KVn^Eh&HU!QH*ep}?(OW(JRI-ot_cJLr=a+-z*01_C5-F z)U_mWbKq{?OYP0o4`GTwBZXINOs596Zr+$qHlevTbuI^`k$iKO@hjyyY37uaxin3P8hZf$~uLt}Jy4S#J4DqIB+lz4TlC=ntzO+L>Jss2oFX`uQ*vr7con)ZCJUP^`8vJ5?^!G^nE~pm8 z4g2(??KPOyORw9D^yBUGpm1S*xm>=GgrAqBnLt12y7d3}092x(eV|u5d{HkICw~;b zhHmP|m%jot_2co2`crz~)rRLq!}lfv@oT`pI0yfYYP>QqzWh`OqWpYqxMJm4HEFPJ zSL2Vzul1i9iZrY~hQ2{}MP>iQ_V#yS&xN!M;xvZbQ9<*1t{F36|Q@YYqy3|$rL#({)H~P^t0ln?l#XsTrIM^S%FV-Ks zKW3a6nrRcmgd+!0j;qQ@k}ml_egymo_!00U;77oZfFA)r0)7Pi2>223Bk*5}K*062 zakdkQ-tOJo2XmRxRNBfPgs+tSZW&j-@o6hxNaeBzn^kIyP-Zh{n)`*+7)_h@P%fW2 z*n?kJi8S+rBL^uDp9ix^GoOTSfkS37Z69peMm5$U+sa}qg-YG-Ug=d}Rq$^7-ihD9 z`YxBtX}~1l6M!}#e%tJyDwn?nxa`ex`7Gcuz-9270>9N_3Ou-11P+A*s}{|#KS4aM zL1+Yhd~+bW=F2aDYazQCj>${qvLr&`?oi`L8g6-@{)pJIa{C8Av~~@Oquu-9c$wvp zgoHM2IGzQ&&ms{Ig&(cyTs*JlB(x080QCeMQSjHo`tDHp3pI&Qht_?* zt}E31=sZ2tdUXEIP&6BAjfI+Hp>-W0ts~Uf5ej#N>gD&&UxE4v*zv#HVIckFN5GGO z9|1oCegymo_!00U;78#9Cjz{#k=HT86h|h;MH~uR=eU^42WYYAZB*vmYGm zDJEVwiSryxT>sXMat_NUY0)9iNe-G&4vvyHyi@*fGsii1OEe~yKjx=wM zw5(VBzjiDLD^?r1;~e5acsIf8*c~*ikq!!8r|#x!#b>J8sjG}PxBR@yICJy!1&>EJ zUoW^{-TVT<aA;cW+B_ii4p(kkOLDBe@KPTle=;KhfNQTZTT^{rKT zzAMe>;PqH5-Ym~BDu?&C8W9qso_NB#3(24K@Ob(t&-Y7ZMTl*Xfl*uW2iwEc4M(6# z9B`37+Tr^z%3>OWBOqqS^C{q~`9IfreN)<5Dz2i#o>a}wccjd%v-;~5-~)J9_4L=z zNS^Qe+{7C)Gkg5JBkj!Y-{;^2YQ*gE-vInlh|eDSpW*+7)xc}qm;(nTKYN_q4SZOv zsPqe~+y?y8n%UPy2l6cBK$Glv$6*$D%^}Cx!@w`8St0m7%KT%Y2N@f}Dk3$-wnM7v{SJoJYVd1^ydU;^#p|o<~r%l;u3)`{BT6eb(paMn(4K;Dnp*{<4h+!`urTqsIefSSARe7Z^K-H{pa}+Be##=wlI19 zH-W>s5$p5(iSYy~LBeyyII}*_ds4vRDYHI5|1$FY3q)i>8_x78DBxU(_4zrOk>{1{ zKg%&b1o}9);xa!^GiubB*MFkLIAk!Utk2Kgj6C1t_Pyu-Bb&==1YFBg@UDub%&A{Ew0X<8lrR8SvVB&eJ~69~k*P0JrZQ zzb}w}r5TMR34csQ2BNtrz7IK!AM2kb0mgeOk}BhE|0h5@^?BaHIOWmj{%888{W#%tK1&%##zw-PI>%1{d;K@P4@sEl1 zxF5d;9Ez|$zyFw^2frxPITPzMPD7%8s0I70gQbp-2 WWhLO_aW(yIZAxkpJ4#BPqWCB4CAaPX literal 0 HcmV?d00001 diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdCXX/CMakeCXXCompilerId.cpp b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdCXX/CMakeCXXCompilerId.cpp new file mode 100644 index 00000000..486becdc --- /dev/null +++ b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdCXX/CMakeCXXCompilerId.cpp @@ -0,0 +1,826 @@ +/* This source file must have a .cpp extension so that all C++ compilers + recognize the extension without flags. Borland does not know .cxx for + example. */ +#ifndef __cplusplus +# error "A C compiler has been selected for C++." +#endif + +#if !defined(__has_include) +/* If the compiler does not have __has_include, pretend the answer is + always no. */ +# define __has_include(x) 0 +#endif + + +/* Version number components: V=Version, R=Revision, P=Patch + Version date components: YYYY=Year, MM=Month, DD=Day */ + +#if defined(__COMO__) +# define COMPILER_ID "Comeau" + /* __COMO_VERSION__ = VRR */ +# define COMPILER_VERSION_MAJOR DEC(__COMO_VERSION__ / 100) +# define COMPILER_VERSION_MINOR DEC(__COMO_VERSION__ % 100) + +#elif defined(__INTEL_COMPILER) || defined(__ICC) +# define COMPILER_ID "Intel" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# if defined(__GNUC__) +# define SIMULATE_ID "GNU" +# endif + /* __INTEL_COMPILER = VRP prior to 2021, and then VVVV for 2021 and later, + except that a few beta releases use the old format with V=2021. */ +# if __INTEL_COMPILER < 2021 || __INTEL_COMPILER == 202110 || __INTEL_COMPILER == 202111 +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER/10 % 10) +# if defined(__INTEL_COMPILER_UPDATE) +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER_UPDATE) +# else +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER % 10) +# endif +# else +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER_UPDATE) + /* The third version component from --version is an update index, + but no macro is provided for it. */ +# define COMPILER_VERSION_PATCH DEC(0) +# endif +# if defined(__INTEL_COMPILER_BUILD_DATE) + /* __INTEL_COMPILER_BUILD_DATE = YYYYMMDD */ +# define COMPILER_VERSION_TWEAK DEC(__INTEL_COMPILER_BUILD_DATE) +# endif +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# if defined(__GNUC__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +# elif defined(__GNUG__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUG__) +# endif +# if defined(__GNUC_MINOR__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif (defined(__clang__) && defined(__INTEL_CLANG_COMPILER)) || defined(__INTEL_LLVM_COMPILER) +# define COMPILER_ID "IntelLLVM" +#if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +#endif +#if defined(__GNUC__) +# define SIMULATE_ID "GNU" +#endif +/* __INTEL_LLVM_COMPILER = VVVVRP prior to 2021.2.0, VVVVRRPP for 2021.2.0 and + * later. Look for 6 digit vs. 8 digit version number to decide encoding. + * VVVV is no smaller than the current year when a version is released. + */ +#if __INTEL_LLVM_COMPILER < 1000000L +# define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 10) +#else +# define COMPILER_VERSION_MAJOR DEC(__INTEL_LLVM_COMPILER/10000) +# define COMPILER_VERSION_MINOR DEC(__INTEL_LLVM_COMPILER/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__INTEL_LLVM_COMPILER % 100) +#endif +#if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +#endif +#if defined(__GNUC__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +#elif defined(__GNUG__) +# define SIMULATE_VERSION_MAJOR DEC(__GNUG__) +#endif +#if defined(__GNUC_MINOR__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +#endif +#if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +#endif + +#elif defined(__PATHCC__) +# define COMPILER_ID "PathScale" +# define COMPILER_VERSION_MAJOR DEC(__PATHCC__) +# define COMPILER_VERSION_MINOR DEC(__PATHCC_MINOR__) +# if defined(__PATHCC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PATHCC_PATCHLEVEL__) +# endif + +#elif defined(__BORLANDC__) && defined(__CODEGEARC_VERSION__) +# define COMPILER_ID "Embarcadero" +# define COMPILER_VERSION_MAJOR HEX(__CODEGEARC_VERSION__>>24 & 0x00FF) +# define COMPILER_VERSION_MINOR HEX(__CODEGEARC_VERSION__>>16 & 0x00FF) +# define COMPILER_VERSION_PATCH DEC(__CODEGEARC_VERSION__ & 0xFFFF) + +#elif defined(__BORLANDC__) +# define COMPILER_ID "Borland" + /* __BORLANDC__ = 0xVRR */ +# define COMPILER_VERSION_MAJOR HEX(__BORLANDC__>>8) +# define COMPILER_VERSION_MINOR HEX(__BORLANDC__ & 0xFF) + +#elif defined(__WATCOMC__) && __WATCOMC__ < 1200 +# define COMPILER_ID "Watcom" + /* __WATCOMC__ = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(__WATCOMC__ / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__WATCOMC__) +# define COMPILER_ID "OpenWatcom" + /* __WATCOMC__ = VVRP + 1100 */ +# define COMPILER_VERSION_MAJOR DEC((__WATCOMC__ - 1100) / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__SUNPRO_CC) +# define COMPILER_ID "SunPro" +# if __SUNPRO_CC >= 0x5100 + /* __SUNPRO_CC = 0xVRRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_CC>>12) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_CC>>4 & 0xFF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_CC & 0xF) +# else + /* __SUNPRO_CC = 0xVRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_CC>>8) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_CC>>4 & 0xF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_CC & 0xF) +# endif + +#elif defined(__HP_aCC) +# define COMPILER_ID "HP" + /* __HP_aCC = VVRRPP */ +# define COMPILER_VERSION_MAJOR DEC(__HP_aCC/10000) +# define COMPILER_VERSION_MINOR DEC(__HP_aCC/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__HP_aCC % 100) + +#elif defined(__DECCXX) +# define COMPILER_ID "Compaq" + /* __DECCXX_VER = VVRRTPPPP */ +# define COMPILER_VERSION_MAJOR DEC(__DECCXX_VER/10000000) +# define COMPILER_VERSION_MINOR DEC(__DECCXX_VER/100000 % 100) +# define COMPILER_VERSION_PATCH DEC(__DECCXX_VER % 10000) + +#elif defined(__IBMCPP__) && defined(__COMPILER_VER__) +# define COMPILER_ID "zOS" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__open_xl__) && defined(__clang__) +# define COMPILER_ID "IBMClang" +# define COMPILER_VERSION_MAJOR DEC(__open_xl_version__) +# define COMPILER_VERSION_MINOR DEC(__open_xl_release__) +# define COMPILER_VERSION_PATCH DEC(__open_xl_modification__) +# define COMPILER_VERSION_TWEAK DEC(__open_xl_ptf_fix_level__) + + +#elif defined(__ibmxl__) && defined(__clang__) +# define COMPILER_ID "XLClang" +# define COMPILER_VERSION_MAJOR DEC(__ibmxl_version__) +# define COMPILER_VERSION_MINOR DEC(__ibmxl_release__) +# define COMPILER_VERSION_PATCH DEC(__ibmxl_modification__) +# define COMPILER_VERSION_TWEAK DEC(__ibmxl_ptf_fix_level__) + + +#elif defined(__IBMCPP__) && !defined(__COMPILER_VER__) && __IBMCPP__ >= 800 +# define COMPILER_ID "XL" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__IBMCPP__) && !defined(__COMPILER_VER__) && __IBMCPP__ < 800 +# define COMPILER_ID "VisualAge" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__NVCOMPILER) +# define COMPILER_ID "NVHPC" +# define COMPILER_VERSION_MAJOR DEC(__NVCOMPILER_MAJOR__) +# define COMPILER_VERSION_MINOR DEC(__NVCOMPILER_MINOR__) +# if defined(__NVCOMPILER_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__NVCOMPILER_PATCHLEVEL__) +# endif + +#elif defined(__PGI) +# define COMPILER_ID "PGI" +# define COMPILER_VERSION_MAJOR DEC(__PGIC__) +# define COMPILER_VERSION_MINOR DEC(__PGIC_MINOR__) +# if defined(__PGIC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PGIC_PATCHLEVEL__) +# endif + +#elif defined(_CRAYC) +# define COMPILER_ID "Cray" +# define COMPILER_VERSION_MAJOR DEC(_RELEASE_MAJOR) +# define COMPILER_VERSION_MINOR DEC(_RELEASE_MINOR) + +#elif defined(__TI_COMPILER_VERSION__) +# define COMPILER_ID "TI" + /* __TI_COMPILER_VERSION__ = VVVRRRPPP */ +# define COMPILER_VERSION_MAJOR DEC(__TI_COMPILER_VERSION__/1000000) +# define COMPILER_VERSION_MINOR DEC(__TI_COMPILER_VERSION__/1000 % 1000) +# define COMPILER_VERSION_PATCH DEC(__TI_COMPILER_VERSION__ % 1000) + +#elif defined(__CLANG_FUJITSU) +# define COMPILER_ID "FujitsuClang" +# define COMPILER_VERSION_MAJOR DEC(__FCC_major__) +# define COMPILER_VERSION_MINOR DEC(__FCC_minor__) +# define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__) +# define COMPILER_VERSION_INTERNAL_STR __clang_version__ + + +#elif defined(__FUJITSU) +# define COMPILER_ID "Fujitsu" +# if defined(__FCC_version__) +# define COMPILER_VERSION __FCC_version__ +# elif defined(__FCC_major__) +# define COMPILER_VERSION_MAJOR DEC(__FCC_major__) +# define COMPILER_VERSION_MINOR DEC(__FCC_minor__) +# define COMPILER_VERSION_PATCH DEC(__FCC_patchlevel__) +# endif +# if defined(__fcc_version) +# define COMPILER_VERSION_INTERNAL DEC(__fcc_version) +# elif defined(__FCC_VERSION) +# define COMPILER_VERSION_INTERNAL DEC(__FCC_VERSION) +# endif + + +#elif defined(__ghs__) +# define COMPILER_ID "GHS" +/* __GHS_VERSION_NUMBER = VVVVRP */ +# ifdef __GHS_VERSION_NUMBER +# define COMPILER_VERSION_MAJOR DEC(__GHS_VERSION_NUMBER / 100) +# define COMPILER_VERSION_MINOR DEC(__GHS_VERSION_NUMBER / 10 % 10) +# define COMPILER_VERSION_PATCH DEC(__GHS_VERSION_NUMBER % 10) +# endif + +#elif defined(__SCO_VERSION__) +# define COMPILER_ID "SCO" + +#elif defined(__ARMCC_VERSION) && !defined(__clang__) +# define COMPILER_ID "ARMCC" +#if __ARMCC_VERSION >= 1000000 + /* __ARMCC_VERSION = VRRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#else + /* __ARMCC_VERSION = VRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/100000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 10) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#endif + + +#elif defined(__clang__) && defined(__apple_build_version__) +# define COMPILER_ID "AppleClang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# define COMPILER_VERSION_TWEAK DEC(__apple_build_version__) + +#elif defined(__clang__) && defined(__ARMCOMPILER_VERSION) +# define COMPILER_ID "ARMClang" + # define COMPILER_VERSION_MAJOR DEC(__ARMCOMPILER_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCOMPILER_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCOMPILER_VERSION % 10000) +# define COMPILER_VERSION_INTERNAL DEC(__ARMCOMPILER_VERSION) + +#elif defined(__clang__) +# define COMPILER_ID "Clang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif + +#elif defined(__LCC__) && (defined(__GNUC__) || defined(__GNUG__) || defined(__MCST__)) +# define COMPILER_ID "LCC" +# define COMPILER_VERSION_MAJOR DEC(1) +# if defined(__LCC__) +# define COMPILER_VERSION_MINOR DEC(__LCC__- 100) +# endif +# if defined(__LCC_MINOR__) +# define COMPILER_VERSION_PATCH DEC(__LCC_MINOR__) +# endif +# if defined(__GNUC__) && defined(__GNUC_MINOR__) +# define SIMULATE_ID "GNU" +# define SIMULATE_VERSION_MAJOR DEC(__GNUC__) +# define SIMULATE_VERSION_MINOR DEC(__GNUC_MINOR__) +# if defined(__GNUC_PATCHLEVEL__) +# define SIMULATE_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif +# endif + +#elif defined(__GNUC__) || defined(__GNUG__) +# define COMPILER_ID "GNU" +# if defined(__GNUC__) +# define COMPILER_VERSION_MAJOR DEC(__GNUC__) +# else +# define COMPILER_VERSION_MAJOR DEC(__GNUG__) +# endif +# if defined(__GNUC_MINOR__) +# define COMPILER_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif defined(_MSC_VER) +# define COMPILER_ID "MSVC" + /* _MSC_VER = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(_MSC_VER / 100) +# define COMPILER_VERSION_MINOR DEC(_MSC_VER % 100) +# if defined(_MSC_FULL_VER) +# if _MSC_VER >= 1400 + /* _MSC_FULL_VER = VVRRPPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 100000) +# else + /* _MSC_FULL_VER = VVRRPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 10000) +# endif +# endif +# if defined(_MSC_BUILD) +# define COMPILER_VERSION_TWEAK DEC(_MSC_BUILD) +# endif + +#elif defined(_ADI_COMPILER) +# define COMPILER_ID "ADSP" +#if defined(__VERSIONNUM__) + /* __VERSIONNUM__ = 0xVVRRPPTT */ +# define COMPILER_VERSION_MAJOR DEC(__VERSIONNUM__ >> 24 & 0xFF) +# define COMPILER_VERSION_MINOR DEC(__VERSIONNUM__ >> 16 & 0xFF) +# define COMPILER_VERSION_PATCH DEC(__VERSIONNUM__ >> 8 & 0xFF) +# define COMPILER_VERSION_TWEAK DEC(__VERSIONNUM__ & 0xFF) +#endif + +#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) +# define COMPILER_ID "IAR" +# if defined(__VER__) && defined(__ICCARM__) +# define COMPILER_VERSION_MAJOR DEC((__VER__) / 1000000) +# define COMPILER_VERSION_MINOR DEC(((__VER__) / 1000) % 1000) +# define COMPILER_VERSION_PATCH DEC((__VER__) % 1000) +# define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__) +# elif defined(__VER__) && (defined(__ICCAVR__) || defined(__ICCRX__) || defined(__ICCRH850__) || defined(__ICCRL78__) || defined(__ICC430__) || defined(__ICCRISCV__) || defined(__ICCV850__) || defined(__ICC8051__) || defined(__ICCSTM8__)) +# define COMPILER_VERSION_MAJOR DEC((__VER__) / 100) +# define COMPILER_VERSION_MINOR DEC((__VER__) - (((__VER__) / 100)*100)) +# define COMPILER_VERSION_PATCH DEC(__SUBVERSION__) +# define COMPILER_VERSION_INTERNAL DEC(__IAR_SYSTEMS_ICC__) +# endif + + +/* These compilers are either not known or too old to define an + identification macro. Try to identify the platform and guess that + it is the native compiler. */ +#elif defined(__hpux) || defined(__hpua) +# define COMPILER_ID "HP" + +#else /* unknown compiler */ +# define COMPILER_ID "" +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]"; +#ifdef SIMULATE_ID +char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]"; +#endif + +#ifdef __QNXNTO__ +char const* qnxnto = "INFO" ":" "qnxnto[]"; +#endif + +#if defined(__CRAYXT_COMPUTE_LINUX_TARGET) +char const *info_cray = "INFO" ":" "compiler_wrapper[CrayPrgEnv]"; +#endif + +#define STRINGIFY_HELPER(X) #X +#define STRINGIFY(X) STRINGIFY_HELPER(X) + +/* Identify known platforms by name. */ +#if defined(__linux) || defined(__linux__) || defined(linux) +# define PLATFORM_ID "Linux" + +#elif defined(__MSYS__) +# define PLATFORM_ID "MSYS" + +#elif defined(__CYGWIN__) +# define PLATFORM_ID "Cygwin" + +#elif defined(__MINGW32__) +# define PLATFORM_ID "MinGW" + +#elif defined(__APPLE__) +# define PLATFORM_ID "Darwin" + +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) +# define PLATFORM_ID "Windows" + +#elif defined(__FreeBSD__) || defined(__FreeBSD) +# define PLATFORM_ID "FreeBSD" + +#elif defined(__NetBSD__) || defined(__NetBSD) +# define PLATFORM_ID "NetBSD" + +#elif defined(__OpenBSD__) || defined(__OPENBSD) +# define PLATFORM_ID "OpenBSD" + +#elif defined(__sun) || defined(sun) +# define PLATFORM_ID "SunOS" + +#elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__) +# define PLATFORM_ID "AIX" + +#elif defined(__hpux) || defined(__hpux__) +# define PLATFORM_ID "HP-UX" + +#elif defined(__HAIKU__) +# define PLATFORM_ID "Haiku" + +#elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS) +# define PLATFORM_ID "BeOS" + +#elif defined(__QNX__) || defined(__QNXNTO__) +# define PLATFORM_ID "QNX" + +#elif defined(__tru64) || defined(_tru64) || defined(__TRU64__) +# define PLATFORM_ID "Tru64" + +#elif defined(__riscos) || defined(__riscos__) +# define PLATFORM_ID "RISCos" + +#elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__) +# define PLATFORM_ID "SINIX" + +#elif defined(__UNIX_SV__) +# define PLATFORM_ID "UNIX_SV" + +#elif defined(__bsdos__) +# define PLATFORM_ID "BSDOS" + +#elif defined(_MPRAS) || defined(MPRAS) +# define PLATFORM_ID "MP-RAS" + +#elif defined(__osf) || defined(__osf__) +# define PLATFORM_ID "OSF1" + +#elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv) +# define PLATFORM_ID "SCO_SV" + +#elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX) +# define PLATFORM_ID "ULTRIX" + +#elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX) +# define PLATFORM_ID "Xenix" + +#elif defined(__WATCOMC__) +# if defined(__LINUX__) +# define PLATFORM_ID "Linux" + +# elif defined(__DOS__) +# define PLATFORM_ID "DOS" + +# elif defined(__OS2__) +# define PLATFORM_ID "OS2" + +# elif defined(__WINDOWS__) +# define PLATFORM_ID "Windows3x" + +# elif defined(__VXWORKS__) +# define PLATFORM_ID "VxWorks" + +# else /* unknown platform */ +# define PLATFORM_ID +# endif + +#elif defined(__INTEGRITY) +# if defined(INT_178B) +# define PLATFORM_ID "Integrity178" + +# else /* regular Integrity */ +# define PLATFORM_ID "Integrity" +# endif + +# elif defined(_ADI_COMPILER) +# define PLATFORM_ID "ADSP" + +#else /* unknown platform */ +# define PLATFORM_ID + +#endif + +/* For windows compilers MSVC and Intel we can determine + the architecture of the compiler being used. This is because + the compilers do not have flags that can change the architecture, + but rather depend on which compiler is being used +*/ +#if defined(_WIN32) && defined(_MSC_VER) +# if defined(_M_IA64) +# define ARCHITECTURE_ID "IA64" + +# elif defined(_M_ARM64EC) +# define ARCHITECTURE_ID "ARM64EC" + +# elif defined(_M_X64) || defined(_M_AMD64) +# define ARCHITECTURE_ID "x64" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# elif defined(_M_ARM64) +# define ARCHITECTURE_ID "ARM64" + +# elif defined(_M_ARM) +# if _M_ARM == 4 +# define ARCHITECTURE_ID "ARMV4I" +# elif _M_ARM == 5 +# define ARCHITECTURE_ID "ARMV5I" +# else +# define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM) +# endif + +# elif defined(_M_MIPS) +# define ARCHITECTURE_ID "MIPS" + +# elif defined(_M_SH) +# define ARCHITECTURE_ID "SHx" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__WATCOMC__) +# if defined(_M_I86) +# define ARCHITECTURE_ID "I86" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__IAR_SYSTEMS_ICC__) || defined(__IAR_SYSTEMS_ICC) +# if defined(__ICCARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__ICCRX__) +# define ARCHITECTURE_ID "RX" + +# elif defined(__ICCRH850__) +# define ARCHITECTURE_ID "RH850" + +# elif defined(__ICCRL78__) +# define ARCHITECTURE_ID "RL78" + +# elif defined(__ICCRISCV__) +# define ARCHITECTURE_ID "RISCV" + +# elif defined(__ICCAVR__) +# define ARCHITECTURE_ID "AVR" + +# elif defined(__ICC430__) +# define ARCHITECTURE_ID "MSP430" + +# elif defined(__ICCV850__) +# define ARCHITECTURE_ID "V850" + +# elif defined(__ICC8051__) +# define ARCHITECTURE_ID "8051" + +# elif defined(__ICCSTM8__) +# define ARCHITECTURE_ID "STM8" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__ghs__) +# if defined(__PPC64__) +# define ARCHITECTURE_ID "PPC64" + +# elif defined(__ppc__) +# define ARCHITECTURE_ID "PPC" + +# elif defined(__ARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__x86_64__) +# define ARCHITECTURE_ID "x64" + +# elif defined(__i386__) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__TI_COMPILER_VERSION__) +# if defined(__TI_ARM__) +# define ARCHITECTURE_ID "ARM" + +# elif defined(__MSP430__) +# define ARCHITECTURE_ID "MSP430" + +# elif defined(__TMS320C28XX__) +# define ARCHITECTURE_ID "TMS320C28x" + +# elif defined(__TMS320C6X__) || defined(_TMS320C6X) +# define ARCHITECTURE_ID "TMS320C6x" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +# elif defined(__ADSPSHARC__) +# define ARCHITECTURE_ID "SHARC" + +# elif defined(__ADSPBLACKFIN__) +# define ARCHITECTURE_ID "Blackfin" + +#else +# define ARCHITECTURE_ID +#endif + +/* Convert integer to decimal digit literals. */ +#define DEC(n) \ + ('0' + (((n) / 10000000)%10)), \ + ('0' + (((n) / 1000000)%10)), \ + ('0' + (((n) / 100000)%10)), \ + ('0' + (((n) / 10000)%10)), \ + ('0' + (((n) / 1000)%10)), \ + ('0' + (((n) / 100)%10)), \ + ('0' + (((n) / 10)%10)), \ + ('0' + ((n) % 10)) + +/* Convert integer to hex digit literals. */ +#define HEX(n) \ + ('0' + ((n)>>28 & 0xF)), \ + ('0' + ((n)>>24 & 0xF)), \ + ('0' + ((n)>>20 & 0xF)), \ + ('0' + ((n)>>16 & 0xF)), \ + ('0' + ((n)>>12 & 0xF)), \ + ('0' + ((n)>>8 & 0xF)), \ + ('0' + ((n)>>4 & 0xF)), \ + ('0' + ((n) & 0xF)) + +/* Construct a string literal encoding the version number. */ +#ifdef COMPILER_VERSION +char const* info_version = "INFO" ":" "compiler_version[" COMPILER_VERSION "]"; + +/* Construct a string literal encoding the version number components. */ +#elif defined(COMPILER_VERSION_MAJOR) +char const info_version[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[', + COMPILER_VERSION_MAJOR, +# ifdef COMPILER_VERSION_MINOR + '.', COMPILER_VERSION_MINOR, +# ifdef COMPILER_VERSION_PATCH + '.', COMPILER_VERSION_PATCH, +# ifdef COMPILER_VERSION_TWEAK + '.', COMPILER_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct a string literal encoding the internal version number. */ +#ifdef COMPILER_VERSION_INTERNAL +char const info_version_internal[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','_', + 'i','n','t','e','r','n','a','l','[', + COMPILER_VERSION_INTERNAL,']','\0'}; +#elif defined(COMPILER_VERSION_INTERNAL_STR) +char const* info_version_internal = "INFO" ":" "compiler_version_internal[" COMPILER_VERSION_INTERNAL_STR "]"; +#endif + +/* Construct a string literal encoding the version number components. */ +#ifdef SIMULATE_VERSION_MAJOR +char const info_simulate_version[] = { + 'I', 'N', 'F', 'O', ':', + 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[', + SIMULATE_VERSION_MAJOR, +# ifdef SIMULATE_VERSION_MINOR + '.', SIMULATE_VERSION_MINOR, +# ifdef SIMULATE_VERSION_PATCH + '.', SIMULATE_VERSION_PATCH, +# ifdef SIMULATE_VERSION_TWEAK + '.', SIMULATE_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]"; +char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]"; + + + +#if defined(__INTEL_COMPILER) && defined(_MSVC_LANG) && _MSVC_LANG < 201403L +# if defined(__INTEL_CXX11_MODE__) +# if defined(__cpp_aggregate_nsdmi) +# define CXX_STD 201402L +# else +# define CXX_STD 201103L +# endif +# else +# define CXX_STD 199711L +# endif +#elif defined(_MSC_VER) && defined(_MSVC_LANG) +# define CXX_STD _MSVC_LANG +#else +# define CXX_STD __cplusplus +#endif + +const char* info_language_standard_default = "INFO" ":" "standard_default[" +#if CXX_STD > 202002L + "23" +#elif CXX_STD > 201703L + "20" +#elif CXX_STD >= 201703L + "17" +#elif CXX_STD >= 201402L + "14" +#elif CXX_STD >= 201103L + "11" +#else + "98" +#endif +"]"; + +const char* info_language_extensions_default = "INFO" ":" "extensions_default[" +#if (defined(__clang__) || defined(__GNUC__) || defined(__xlC__) || \ + defined(__TI_COMPILER_VERSION__)) && \ + !defined(__STRICT_ANSI__) + "ON" +#else + "OFF" +#endif +"]"; + +/*--------------------------------------------------------------------------*/ + +int main(int argc, char* argv[]) +{ + int require = 0; + require += info_compiler[argc]; + require += info_platform[argc]; +#ifdef COMPILER_VERSION_MAJOR + require += info_version[argc]; +#endif +#ifdef COMPILER_VERSION_INTERNAL + require += info_version_internal[argc]; +#endif +#ifdef SIMULATE_ID + require += info_simulate[argc]; +#endif +#ifdef SIMULATE_VERSION_MAJOR + require += info_simulate_version[argc]; +#endif +#if defined(__CRAYXT_COMPUTE_LINUX_TARGET) + require += info_cray[argc]; +#endif + require += info_language_standard_default[argc]; + require += info_language_extensions_default[argc]; + (void)argv; + return require; +} diff --git a/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdCXX/a.out b/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdCXX/a.out new file mode 100755 index 0000000000000000000000000000000000000000..ab5c4c316cd6c23c21706711a7eeef6d10f7847f GIT binary patch literal 16784 zcmeHOZ)_Y#6`%9PiPPG5moy=6Qkrd=ND0LoJ2>GIYI0}$oIPYaY2r{r>Mm>F**-Ym znY%q=N2CO~6ijMJ)k^dONKuiH5Unb{6sc0dQ6McSrOlU6R1g-WB1Iu}OM(ofIo_Lj z@3OnT1o7tsh#l$fH}iXcX7 z$H?)-nBXT$@${mmyi}U+wd*H4FLsh0Bi4cGz?*jJU#V{&*`3v-8{x%!6)hOKzWczA zx|TF<4&2TAslFTaLxkebNZ}P*vzfsity{CHrfeoxnrNDc?r7T4su%P6Hd!&Y3+uo! zwWs%>05`M9Q5a=ktfDf?BOS{>eI)qwnRXUZ`6{TUmwg15T^L58Z&j75EKR^aaFk2*b5QnC8%g zWe#U@RyK1a4Mh~0McXRa=9rbqi9OxP_6~EKzD?gQ%w%7WnMxPZBblO|F7)+uWb?Un zpEa08%aO5sjx0^3T2)cDM@5li8Gs0440&9X)@GJtbU#RK=lhfI3vGoGEYN++{2V!` zizzwtT#k5nO;dV}9=<@l=Hb6g{ALgTIPuLM{u$z5+bv|mF@o30Y0klOEKqjA!Sk3! ze%`_Jy@&jQgO|UfNczc-fFA)r0)7Pi2>223Bj88ie=Y)l3g7uxW9nj^alH1ITZJ&b zG;7zCUp1zFTz6VtOL_ZKK)+kw^lgCfdV%^-dO?;tg)F!HF*$wiL1X&2#?{G=Mi=;*sG!HUd%()%gb}&^~bQ{(}bY^ znAG3?RfH|qjOlsf)TQ0VsfC~sIAffSbKxm>=Gg0C~uO<(|QUG{Gs1|d2+2zJv)FB;ROxfc^xv6;s4 z)jxxo`tig?<4Gg%YWO)3{^4vOaTWOItMN*4X7x`(5Y6Y7L@L_ftfmcK9<9b7Ph1^1 zt*#TB6~DiGrtNN+$;3aNoIW2vn4G>6?~6}gJ!s4{tpfT$_m=mtk1vF;mCMGd_k#BN zme(o1-P7-OPhaYs{(Zc>>Nm#J>435G*QGyT{~sENKM)^?KNvSp4-f0%^$+9q%KpZY zf%VF9QyD2z?0@+Y@FUk0*0Y3tM1pEm25%446N5GH3e`y2)uGPlHjZF4-?cFt$ z9~;kP(}hE@N;%-xammfkr3=MOK6j{DrJXuuS~=4^Af&~3*0P85g|S23xVlQDRTvsQ zM0r>q%%!YC3f2OLty0!L)Y3|l^n{(xVJ*dqY;Uh@RbXB4Be>p)YhZnEm&;ke6yOtp zHXyET4$PO!PXk^CJd5&o%H>t?ngZ8qF$Iq77lDaLU|s#Px)a3X9)w1)$2SL}bD3NL z+z8oL_{?1@mn9L3bcGr}7QXdx-7&Fy?XC}eXyXP{N52oi=Oxxd5)%5f;qzVa`xp|5 zP~?%Cj)tW*UxAvT8lawlj|S~bqr58=`Fc$<)cCbvBBaeMNrW~(TH6_Feq^Z;icT%t z6N=?R(Ripi9@^X<(%M6f?V(6}s7|hT{tC0g2@?{S6s63K5-v^YwL`%N)5DiEs)8OTh`(tgEdB&ef`xE)ctVAupI z6Z^Z9^$Gu0R_z6KX$mW-vi%pRy|6MY%N#$LX36q-lK+LuTrau(&jIfb|2IAI{#8v= zj(C46??>hRr_YdlPe;ctZS%pwQqC@EJ9U`gH?@>xspX+<&3bdI-m*pU|F~g6*s3@3XK+e`@V0~Rdp8evX_avr6z{9tr*8cPJLOgAAl?3cyEo{ZqgPpdT;aMT!on=KmEbzim-J&H*35Ut3Q< zzD4@{e;|wSbya2-kE<{=G*G4AUj)8JEWR%`0lyOB)=d9D{C}|>c#SJ_;H2aikDvR1 zkBBvu{$i7@z^|-Xe82P~&q@wFOn$uM^I_mMhaP7i2YyA(8o|G>%%4OaTEs^5cOjmY z{2JjsFYp5J7>8Lpmyjk$FOzN`BoBKB0mw*XFpeg zM?c>CuC7M4XA8AQzQY7x4Z!2|di8Hd9^&sk*AN9>qc%8UKkyrz1m$z&M~f+f+bM<$ zcF``uPNgAn{rHG!kC{VwfTReAM^brnB%2?!vS!N87mB7;nh^MAJDaxCDLvY}qoukM z9$m?pR-s@`n(3Tfm=wbWYbMB@Lt|b805j|+%L>M z-Fw^P-R9n|t^oj|Yh=*>{EB(Le_hi%d+%eL6$`e~h5%ynGHL@jkB2Xx)% zbIepRZ;rxyd_238+zVQ%OwKG7(ZGG20phv>&G=NOy2fe zz~LN;?Rg%>cmjoGnA_BhAlGCx-{YE+rG{X~ly$Y4y_o}a%NdEUqMd$0c+V2jtpa{P;yiIC^J#4hUp zZ-KyRuaO-iyJLGUGd|_9=jVP#*1M7Y_dND7GGM&o=`gRq?|bZd9>K`(1Gs+g`27Ld zbNq!ynuI^4A_LJ}6hDF-Hj(Ymk^s3;G#;tB2 N+o?3_JqDhl_&b*2t_uJF literal 0 HcmV?d00001 diff --git a/modern-cpp-kafka/builddir/CMakeFiles/CMakeOutput.log b/modern-cpp-kafka/builddir/CMakeFiles/CMakeOutput.log new file mode 100644 index 00000000..710bbfba --- /dev/null +++ b/modern-cpp-kafka/builddir/CMakeFiles/CMakeOutput.log @@ -0,0 +1,441 @@ +The system is: Linux - 5.13.0-51-generic - x86_64 +Compiling the C compiler identification source file "CMakeCCompilerId.c" succeeded. +Compiler: /usr/bin/cc +Build flags: +Id flags: + +The output was: +0 + + +Compilation of the C compiler identification source "CMakeCCompilerId.c" produced "a.out" + +The C compiler identification is GNU, found in "/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdC/a.out" + +Compiling the CXX compiler identification source file "CMakeCXXCompilerId.cpp" succeeded. +Compiler: /usr/bin/c++ +Build flags: +Id flags: + +The output was: +0 + + +Compilation of the CXX compiler identification source "CMakeCXXCompilerId.cpp" produced "a.out" + +The CXX compiler identification is GNU, found in "/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/3.24.0-rc2/CompilerIdCXX/a.out" + +Detecting C compiler ABI info compiled with the following output: +Change Dir: /home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp + +Run Build Command(s):/usr/bin/make -f Makefile cmTC_fdc06/fast && /usr/bin/make -f CMakeFiles/cmTC_fdc06.dir/build.make CMakeFiles/cmTC_fdc06.dir/build +make[1]: Entering directory '/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o +/usr/bin/cc -v -o CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o -c /usr/local/share/cmake-3.24/Modules/CMakeCCompilerABI.c +Using built-in specs. +COLLECT_GCC=/usr/bin/cc +OFFLOAD_TARGET_NAMES=nvptx-none:hsa +OFFLOAD_TARGET_DEFAULT=1 +Target: x86_64-linux-gnu +Configured with: ../src/configure -v --with-pkgversion='Ubuntu 9.4.0-1ubuntu1~20.04.1' --with-bugurl=file:///usr/share/doc/gcc-9/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,gm2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-9 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-9-Av3uEd/gcc-9-9.4.0/debian/tmp-nvptx/usr,hsa --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu +Thread model: posix +gcc version 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.1) +COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' + /usr/lib/gcc/x86_64-linux-gnu/9/cc1 -quiet -v -imultiarch x86_64-linux-gnu /usr/local/share/cmake-3.24/Modules/CMakeCCompilerABI.c -quiet -dumpbase CMakeCCompilerABI.c -mtune=generic -march=x86-64 -auxbase-strip CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o -version -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection -o /tmp/ccL4XwRx.s +GNU C17 (Ubuntu 9.4.0-1ubuntu1~20.04.1) version 9.4.0 (x86_64-linux-gnu) + compiled by GNU C version 9.4.0, GMP version 6.2.0, MPFR version 4.0.2, MPC version 1.1.0, isl version isl-0.22.1-GMP + +GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 +ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu" +ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/9/include-fixed" +ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/9/../../../../x86_64-linux-gnu/include" +#include "..." search starts here: +#include <...> search starts here: + /usr/lib/gcc/x86_64-linux-gnu/9/include + /usr/local/include + /usr/include/x86_64-linux-gnu + /usr/include +End of search list. +GNU C17 (Ubuntu 9.4.0-1ubuntu1~20.04.1) version 9.4.0 (x86_64-linux-gnu) + compiled by GNU C version 9.4.0, GMP version 6.2.0, MPFR version 4.0.2, MPC version 1.1.0, isl version isl-0.22.1-GMP + +GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 +Compiler executable checksum: c0c95c0b4209efec1c1892d5ff24030b +COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' + as -v --64 -o CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o /tmp/ccL4XwRx.s +GNU assembler version 2.34 (x86_64-linux-gnu) using BFD version (GNU Binutils for Ubuntu) 2.34 +COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/ +LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../:/lib/:/usr/lib/ +COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64' +Linking C executable cmTC_fdc06 +/usr/local/bin/cmake -E cmake_link_script CMakeFiles/cmTC_fdc06.dir/link.txt --verbose=1 +/usr/bin/cc -v CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o -o cmTC_fdc06 +Using built-in specs. +COLLECT_GCC=/usr/bin/cc +COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper +OFFLOAD_TARGET_NAMES=nvptx-none:hsa +OFFLOAD_TARGET_DEFAULT=1 +Target: x86_64-linux-gnu +Configured with: ../src/configure -v --with-pkgversion='Ubuntu 9.4.0-1ubuntu1~20.04.1' --with-bugurl=file:///usr/share/doc/gcc-9/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,gm2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-9 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-9-Av3uEd/gcc-9-9.4.0/debian/tmp-nvptx/usr,hsa --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu +Thread model: posix +gcc version 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.1) +COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/ +LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../:/lib/:/usr/lib/ +COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_fdc06' '-mtune=generic' '-march=x86-64' + /usr/lib/gcc/x86_64-linux-gnu/9/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/9/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper -plugin-opt=-fresolution=/tmp/cc4IEwTY.res -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -dynamic-linker /lib64/ld-linux-x86-64.so.2 -pie -z now -z relro -o cmTC_fdc06 /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o -L/usr/lib/gcc/x86_64-linux-gnu/9 -L/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/9/../../.. CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o -lgcc --push-state --as-needed -lgcc_s --pop-state -lc -lgcc --push-state --as-needed -lgcc_s --pop-state /usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o +COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_fdc06' '-mtune=generic' '-march=x86-64' +make[1]: Leaving directory '/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp' + + + +Parsed C implicit include dir info from above output: rv=done + found start of include info + found start of implicit include info + add: [/usr/lib/gcc/x86_64-linux-gnu/9/include] + add: [/usr/local/include] + add: [/usr/include/x86_64-linux-gnu] + add: [/usr/include] + end of search list found + collapse include dir [/usr/lib/gcc/x86_64-linux-gnu/9/include] ==> [/usr/lib/gcc/x86_64-linux-gnu/9/include] + collapse include dir [/usr/local/include] ==> [/usr/local/include] + collapse include dir [/usr/include/x86_64-linux-gnu] ==> [/usr/include/x86_64-linux-gnu] + collapse include dir [/usr/include] ==> [/usr/include] + implicit include dirs: [/usr/lib/gcc/x86_64-linux-gnu/9/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include] + + +Parsed C implicit link information from above output: + link line regex: [^( *|.*[/\])(ld|CMAKE_LINK_STARTFILE-NOTFOUND|([^/\]+-)?ld|collect2)[^/\]*( |$)] + ignore line: [Change Dir: /home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp] + ignore line: [] + ignore line: [Run Build Command(s):/usr/bin/make -f Makefile cmTC_fdc06/fast && /usr/bin/make -f CMakeFiles/cmTC_fdc06.dir/build.make CMakeFiles/cmTC_fdc06.dir/build] + ignore line: [make[1]: Entering directory '/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp'] + ignore line: [Building C object CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o] + ignore line: [/usr/bin/cc -v -o CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o -c /usr/local/share/cmake-3.24/Modules/CMakeCCompilerABI.c] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/cc] + ignore line: [OFFLOAD_TARGET_NAMES=nvptx-none:hsa] + ignore line: [OFFLOAD_TARGET_DEFAULT=1] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 9.4.0-1ubuntu1~20.04.1' --with-bugurl=file:///usr/share/doc/gcc-9/README.Bugs --enable-languages=c ada c++ go brig d fortran objc obj-c++ gm2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-9 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32 m64 mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-9-Av3uEd/gcc-9-9.4.0/debian/tmp-nvptx/usr hsa --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu] + ignore line: [Thread model: posix] + ignore line: [gcc version 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.1) ] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64'] + ignore line: [ /usr/lib/gcc/x86_64-linux-gnu/9/cc1 -quiet -v -imultiarch x86_64-linux-gnu /usr/local/share/cmake-3.24/Modules/CMakeCCompilerABI.c -quiet -dumpbase CMakeCCompilerABI.c -mtune=generic -march=x86-64 -auxbase-strip CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o -version -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection -o /tmp/ccL4XwRx.s] + ignore line: [GNU C17 (Ubuntu 9.4.0-1ubuntu1~20.04.1) version 9.4.0 (x86_64-linux-gnu)] + ignore line: [ compiled by GNU C version 9.4.0 GMP version 6.2.0 MPFR version 4.0.2 MPC version 1.1.0 isl version isl-0.22.1-GMP] + ignore line: [] + ignore line: [GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072] + ignore line: [ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu"] + ignore line: [ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/9/include-fixed"] + ignore line: [ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/9/../../../../x86_64-linux-gnu/include"] + ignore line: [#include "..." search starts here:] + ignore line: [#include <...> search starts here:] + ignore line: [ /usr/lib/gcc/x86_64-linux-gnu/9/include] + ignore line: [ /usr/local/include] + ignore line: [ /usr/include/x86_64-linux-gnu] + ignore line: [ /usr/include] + ignore line: [End of search list.] + ignore line: [GNU C17 (Ubuntu 9.4.0-1ubuntu1~20.04.1) version 9.4.0 (x86_64-linux-gnu)] + ignore line: [ compiled by GNU C version 9.4.0 GMP version 6.2.0 MPFR version 4.0.2 MPC version 1.1.0 isl version isl-0.22.1-GMP] + ignore line: [] + ignore line: [GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072] + ignore line: [Compiler executable checksum: c0c95c0b4209efec1c1892d5ff24030b] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64'] + ignore line: [ as -v --64 -o CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o /tmp/ccL4XwRx.s] + ignore line: [GNU assembler version 2.34 (x86_64-linux-gnu) using BFD version (GNU Binutils for Ubuntu) 2.34] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o' '-c' '-mtune=generic' '-march=x86-64'] + ignore line: [Linking C executable cmTC_fdc06] + ignore line: [/usr/local/bin/cmake -E cmake_link_script CMakeFiles/cmTC_fdc06.dir/link.txt --verbose=1] + ignore line: [/usr/bin/cc -v CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o -o cmTC_fdc06 ] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/cc] + ignore line: [COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper] + ignore line: [OFFLOAD_TARGET_NAMES=nvptx-none:hsa] + ignore line: [OFFLOAD_TARGET_DEFAULT=1] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 9.4.0-1ubuntu1~20.04.1' --with-bugurl=file:///usr/share/doc/gcc-9/README.Bugs --enable-languages=c ada c++ go brig d fortran objc obj-c++ gm2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-9 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32 m64 mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-9-Av3uEd/gcc-9-9.4.0/debian/tmp-nvptx/usr hsa --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu] + ignore line: [Thread model: posix] + ignore line: [gcc version 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.1) ] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_fdc06' '-mtune=generic' '-march=x86-64'] + link line: [ /usr/lib/gcc/x86_64-linux-gnu/9/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/9/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper -plugin-opt=-fresolution=/tmp/cc4IEwTY.res -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -dynamic-linker /lib64/ld-linux-x86-64.so.2 -pie -z now -z relro -o cmTC_fdc06 /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o -L/usr/lib/gcc/x86_64-linux-gnu/9 -L/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/9/../../.. CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o -lgcc --push-state --as-needed -lgcc_s --pop-state -lc -lgcc --push-state --as-needed -lgcc_s --pop-state /usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/collect2] ==> ignore + arg [-plugin] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/9/liblto_plugin.so] ==> ignore + arg [-plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper] ==> ignore + arg [-plugin-opt=-fresolution=/tmp/cc4IEwTY.res] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [--build-id] ==> ignore + arg [--eh-frame-hdr] ==> ignore + arg [-m] ==> ignore + arg [elf_x86_64] ==> ignore + arg [--hash-style=gnu] ==> ignore + arg [--as-needed] ==> ignore + arg [-dynamic-linker] ==> ignore + arg [/lib64/ld-linux-x86-64.so.2] ==> ignore + arg [-pie] ==> ignore + arg [-znow] ==> ignore + arg [-zrelro] ==> ignore + arg [-o] ==> ignore + arg [cmTC_fdc06] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/9] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/9] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib] + arg [-L/lib/x86_64-linux-gnu] ==> dir [/lib/x86_64-linux-gnu] + arg [-L/lib/../lib] ==> dir [/lib/../lib] + arg [-L/usr/lib/x86_64-linux-gnu] ==> dir [/usr/lib/x86_64-linux-gnu] + arg [-L/usr/lib/../lib] ==> dir [/usr/lib/../lib] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/9/../../..] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../..] + arg [CMakeFiles/cmTC_fdc06.dir/CMakeCCompilerABI.c.o] ==> ignore + arg [-lgcc] ==> lib [gcc] + arg [--push-state] ==> ignore + arg [--as-needed] ==> ignore + arg [-lgcc_s] ==> lib [gcc_s] + arg [--pop-state] ==> ignore + arg [-lc] ==> lib [c] + arg [-lgcc] ==> lib [gcc] + arg [--push-state] ==> ignore + arg [--as-needed] ==> ignore + arg [-lgcc_s] ==> lib [gcc_s] + arg [--pop-state] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o] ==> [/usr/lib/x86_64-linux-gnu/Scrt1.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o] ==> [/usr/lib/x86_64-linux-gnu/crti.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o] ==> [/usr/lib/x86_64-linux-gnu/crtn.o] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/9] ==> [/usr/lib/gcc/x86_64-linux-gnu/9] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib] ==> [/usr/lib] + collapse library dir [/lib/x86_64-linux-gnu] ==> [/lib/x86_64-linux-gnu] + collapse library dir [/lib/../lib] ==> [/lib] + collapse library dir [/usr/lib/x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/../lib] ==> [/usr/lib] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../..] ==> [/usr/lib] + implicit libs: [gcc;gcc_s;c;gcc;gcc_s] + implicit objs: [/usr/lib/x86_64-linux-gnu/Scrt1.o;/usr/lib/x86_64-linux-gnu/crti.o;/usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o;/usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o;/usr/lib/x86_64-linux-gnu/crtn.o] + implicit dirs: [/usr/lib/gcc/x86_64-linux-gnu/9;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib] + implicit fwks: [] + + +Detecting CXX compiler ABI info compiled with the following output: +Change Dir: /home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp + +Run Build Command(s):/usr/bin/make -f Makefile cmTC_5b6c0/fast && /usr/bin/make -f CMakeFiles/cmTC_5b6c0.dir/build.make CMakeFiles/cmTC_5b6c0.dir/build +make[1]: Entering directory '/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp' +Building CXX object CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o +/usr/bin/c++ -v -o CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o -c /usr/local/share/cmake-3.24/Modules/CMakeCXXCompilerABI.cpp +Using built-in specs. +COLLECT_GCC=/usr/bin/c++ +OFFLOAD_TARGET_NAMES=nvptx-none:hsa +OFFLOAD_TARGET_DEFAULT=1 +Target: x86_64-linux-gnu +Configured with: ../src/configure -v --with-pkgversion='Ubuntu 9.4.0-1ubuntu1~20.04.1' --with-bugurl=file:///usr/share/doc/gcc-9/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,gm2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-9 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-9-Av3uEd/gcc-9-9.4.0/debian/tmp-nvptx/usr,hsa --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu +Thread model: posix +gcc version 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.1) +COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' + /usr/lib/gcc/x86_64-linux-gnu/9/cc1plus -quiet -v -imultiarch x86_64-linux-gnu -D_GNU_SOURCE /usr/local/share/cmake-3.24/Modules/CMakeCXXCompilerABI.cpp -quiet -dumpbase CMakeCXXCompilerABI.cpp -mtune=generic -march=x86-64 -auxbase-strip CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o -version -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection -o /tmp/ccA78Ifx.s +GNU C++14 (Ubuntu 9.4.0-1ubuntu1~20.04.1) version 9.4.0 (x86_64-linux-gnu) + compiled by GNU C version 9.4.0, GMP version 6.2.0, MPFR version 4.0.2, MPC version 1.1.0, isl version isl-0.22.1-GMP + +GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 +ignoring duplicate directory "/usr/include/x86_64-linux-gnu/c++/9" +ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu" +ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/9/include-fixed" +ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/9/../../../../x86_64-linux-gnu/include" +#include "..." search starts here: +#include <...> search starts here: + /usr/include/c++/9 + /usr/include/x86_64-linux-gnu/c++/9 + /usr/include/c++/9/backward + /usr/lib/gcc/x86_64-linux-gnu/9/include + /usr/local/include + /usr/include/x86_64-linux-gnu + /usr/include +End of search list. +GNU C++14 (Ubuntu 9.4.0-1ubuntu1~20.04.1) version 9.4.0 (x86_64-linux-gnu) + compiled by GNU C version 9.4.0, GMP version 6.2.0, MPFR version 4.0.2, MPC version 1.1.0, isl version isl-0.22.1-GMP + +GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072 +Compiler executable checksum: 65fe925b83d3956b533de4aaba7dace0 +COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' + as -v --64 -o CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o /tmp/ccA78Ifx.s +GNU assembler version 2.34 (x86_64-linux-gnu) using BFD version (GNU Binutils for Ubuntu) 2.34 +COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/ +LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../:/lib/:/usr/lib/ +COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64' +Linking CXX executable cmTC_5b6c0 +/usr/local/bin/cmake -E cmake_link_script CMakeFiles/cmTC_5b6c0.dir/link.txt --verbose=1 +/usr/bin/c++ -v CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o -o cmTC_5b6c0 +Using built-in specs. +COLLECT_GCC=/usr/bin/c++ +COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper +OFFLOAD_TARGET_NAMES=nvptx-none:hsa +OFFLOAD_TARGET_DEFAULT=1 +Target: x86_64-linux-gnu +Configured with: ../src/configure -v --with-pkgversion='Ubuntu 9.4.0-1ubuntu1~20.04.1' --with-bugurl=file:///usr/share/doc/gcc-9/README.Bugs --enable-languages=c,ada,c++,go,brig,d,fortran,objc,obj-c++,gm2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-9 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-9-Av3uEd/gcc-9-9.4.0/debian/tmp-nvptx/usr,hsa --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu +Thread model: posix +gcc version 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.1) +COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/ +LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../:/lib/:/usr/lib/ +COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_5b6c0' '-shared-libgcc' '-mtune=generic' '-march=x86-64' + /usr/lib/gcc/x86_64-linux-gnu/9/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/9/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper -plugin-opt=-fresolution=/tmp/ccrE6O7Z.res -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -dynamic-linker /lib64/ld-linux-x86-64.so.2 -pie -z now -z relro -o cmTC_5b6c0 /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o -L/usr/lib/gcc/x86_64-linux-gnu/9 -L/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/9/../../.. CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o -lstdc++ -lm -lgcc_s -lgcc -lc -lgcc_s -lgcc /usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o +COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_5b6c0' '-shared-libgcc' '-mtune=generic' '-march=x86-64' +make[1]: Leaving directory '/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp' + + + +Parsed CXX implicit include dir info from above output: rv=done + found start of include info + found start of implicit include info + add: [/usr/include/c++/9] + add: [/usr/include/x86_64-linux-gnu/c++/9] + add: [/usr/include/c++/9/backward] + add: [/usr/lib/gcc/x86_64-linux-gnu/9/include] + add: [/usr/local/include] + add: [/usr/include/x86_64-linux-gnu] + add: [/usr/include] + end of search list found + collapse include dir [/usr/include/c++/9] ==> [/usr/include/c++/9] + collapse include dir [/usr/include/x86_64-linux-gnu/c++/9] ==> [/usr/include/x86_64-linux-gnu/c++/9] + collapse include dir [/usr/include/c++/9/backward] ==> [/usr/include/c++/9/backward] + collapse include dir [/usr/lib/gcc/x86_64-linux-gnu/9/include] ==> [/usr/lib/gcc/x86_64-linux-gnu/9/include] + collapse include dir [/usr/local/include] ==> [/usr/local/include] + collapse include dir [/usr/include/x86_64-linux-gnu] ==> [/usr/include/x86_64-linux-gnu] + collapse include dir [/usr/include] ==> [/usr/include] + implicit include dirs: [/usr/include/c++/9;/usr/include/x86_64-linux-gnu/c++/9;/usr/include/c++/9/backward;/usr/lib/gcc/x86_64-linux-gnu/9/include;/usr/local/include;/usr/include/x86_64-linux-gnu;/usr/include] + + +Parsed CXX implicit link information from above output: + link line regex: [^( *|.*[/\])(ld|CMAKE_LINK_STARTFILE-NOTFOUND|([^/\]+-)?ld|collect2)[^/\]*( |$)] + ignore line: [Change Dir: /home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp] + ignore line: [] + ignore line: [Run Build Command(s):/usr/bin/make -f Makefile cmTC_5b6c0/fast && /usr/bin/make -f CMakeFiles/cmTC_5b6c0.dir/build.make CMakeFiles/cmTC_5b6c0.dir/build] + ignore line: [make[1]: Entering directory '/home/fpetrini/Desktop/Triton/modern-cpp-kafka/builddir/CMakeFiles/CMakeTmp'] + ignore line: [Building CXX object CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o] + ignore line: [/usr/bin/c++ -v -o CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o -c /usr/local/share/cmake-3.24/Modules/CMakeCXXCompilerABI.cpp] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/c++] + ignore line: [OFFLOAD_TARGET_NAMES=nvptx-none:hsa] + ignore line: [OFFLOAD_TARGET_DEFAULT=1] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 9.4.0-1ubuntu1~20.04.1' --with-bugurl=file:///usr/share/doc/gcc-9/README.Bugs --enable-languages=c ada c++ go brig d fortran objc obj-c++ gm2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-9 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32 m64 mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-9-Av3uEd/gcc-9-9.4.0/debian/tmp-nvptx/usr hsa --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu] + ignore line: [Thread model: posix] + ignore line: [gcc version 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.1) ] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64'] + ignore line: [ /usr/lib/gcc/x86_64-linux-gnu/9/cc1plus -quiet -v -imultiarch x86_64-linux-gnu -D_GNU_SOURCE /usr/local/share/cmake-3.24/Modules/CMakeCXXCompilerABI.cpp -quiet -dumpbase CMakeCXXCompilerABI.cpp -mtune=generic -march=x86-64 -auxbase-strip CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o -version -fasynchronous-unwind-tables -fstack-protector-strong -Wformat -Wformat-security -fstack-clash-protection -fcf-protection -o /tmp/ccA78Ifx.s] + ignore line: [GNU C++14 (Ubuntu 9.4.0-1ubuntu1~20.04.1) version 9.4.0 (x86_64-linux-gnu)] + ignore line: [ compiled by GNU C version 9.4.0 GMP version 6.2.0 MPFR version 4.0.2 MPC version 1.1.0 isl version isl-0.22.1-GMP] + ignore line: [] + ignore line: [GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072] + ignore line: [ignoring duplicate directory "/usr/include/x86_64-linux-gnu/c++/9"] + ignore line: [ignoring nonexistent directory "/usr/local/include/x86_64-linux-gnu"] + ignore line: [ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/9/include-fixed"] + ignore line: [ignoring nonexistent directory "/usr/lib/gcc/x86_64-linux-gnu/9/../../../../x86_64-linux-gnu/include"] + ignore line: [#include "..." search starts here:] + ignore line: [#include <...> search starts here:] + ignore line: [ /usr/include/c++/9] + ignore line: [ /usr/include/x86_64-linux-gnu/c++/9] + ignore line: [ /usr/include/c++/9/backward] + ignore line: [ /usr/lib/gcc/x86_64-linux-gnu/9/include] + ignore line: [ /usr/local/include] + ignore line: [ /usr/include/x86_64-linux-gnu] + ignore line: [ /usr/include] + ignore line: [End of search list.] + ignore line: [GNU C++14 (Ubuntu 9.4.0-1ubuntu1~20.04.1) version 9.4.0 (x86_64-linux-gnu)] + ignore line: [ compiled by GNU C version 9.4.0 GMP version 6.2.0 MPFR version 4.0.2 MPC version 1.1.0 isl version isl-0.22.1-GMP] + ignore line: [] + ignore line: [GGC heuristics: --param ggc-min-expand=100 --param ggc-min-heapsize=131072] + ignore line: [Compiler executable checksum: 65fe925b83d3956b533de4aaba7dace0] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64'] + ignore line: [ as -v --64 -o CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o /tmp/ccA78Ifx.s] + ignore line: [GNU assembler version 2.34 (x86_64-linux-gnu) using BFD version (GNU Binutils for Ubuntu) 2.34] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o' '-c' '-shared-libgcc' '-mtune=generic' '-march=x86-64'] + ignore line: [Linking CXX executable cmTC_5b6c0] + ignore line: [/usr/local/bin/cmake -E cmake_link_script CMakeFiles/cmTC_5b6c0.dir/link.txt --verbose=1] + ignore line: [/usr/bin/c++ -v CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o -o cmTC_5b6c0 ] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/c++] + ignore line: [COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper] + ignore line: [OFFLOAD_TARGET_NAMES=nvptx-none:hsa] + ignore line: [OFFLOAD_TARGET_DEFAULT=1] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 9.4.0-1ubuntu1~20.04.1' --with-bugurl=file:///usr/share/doc/gcc-9/README.Bugs --enable-languages=c ada c++ go brig d fortran objc obj-c++ gm2 --prefix=/usr --with-gcc-major-version-only --program-suffix=-9 --program-prefix=x86_64-linux-gnu- --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-plugin --enable-default-pie --with-system-zlib --with-target-system-zlib=auto --enable-objc-gc=auto --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32 m64 mx32 --enable-multilib --with-tune=generic --enable-offload-targets=nvptx-none=/build/gcc-9-Av3uEd/gcc-9-9.4.0/debian/tmp-nvptx/usr hsa --without-cuda-driver --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu] + ignore line: [Thread model: posix] + ignore line: [gcc version 9.4.0 (Ubuntu 9.4.0-1ubuntu1~20.04.1) ] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/9/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/9/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_5b6c0' '-shared-libgcc' '-mtune=generic' '-march=x86-64'] + link line: [ /usr/lib/gcc/x86_64-linux-gnu/9/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/9/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper -plugin-opt=-fresolution=/tmp/ccrE6O7Z.res -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -dynamic-linker /lib64/ld-linux-x86-64.so.2 -pie -z now -z relro -o cmTC_5b6c0 /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o -L/usr/lib/gcc/x86_64-linux-gnu/9 -L/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/9/../../.. CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o -lstdc++ -lm -lgcc_s -lgcc -lc -lgcc_s -lgcc /usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o /usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/collect2] ==> ignore + arg [-plugin] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/9/liblto_plugin.so] ==> ignore + arg [-plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/9/lto-wrapper] ==> ignore + arg [-plugin-opt=-fresolution=/tmp/ccrE6O7Z.res] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [--build-id] ==> ignore + arg [--eh-frame-hdr] ==> ignore + arg [-m] ==> ignore + arg [elf_x86_64] ==> ignore + arg [--hash-style=gnu] ==> ignore + arg [--as-needed] ==> ignore + arg [-dynamic-linker] ==> ignore + arg [/lib64/ld-linux-x86-64.so.2] ==> ignore + arg [-pie] ==> ignore + arg [-znow] ==> ignore + arg [-zrelro] ==> ignore + arg [-o] ==> ignore + arg [cmTC_5b6c0] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/9] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/9] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib] + arg [-L/lib/x86_64-linux-gnu] ==> dir [/lib/x86_64-linux-gnu] + arg [-L/lib/../lib] ==> dir [/lib/../lib] + arg [-L/usr/lib/x86_64-linux-gnu] ==> dir [/usr/lib/x86_64-linux-gnu] + arg [-L/usr/lib/../lib] ==> dir [/usr/lib/../lib] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/9/../../..] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../..] + arg [CMakeFiles/cmTC_5b6c0.dir/CMakeCXXCompilerABI.cpp.o] ==> ignore + arg [-lstdc++] ==> lib [stdc++] + arg [-lm] ==> lib [m] + arg [-lgcc_s] ==> lib [gcc_s] + arg [-lgcc] ==> lib [gcc] + arg [-lc] ==> lib [c] + arg [-lgcc_s] ==> lib [gcc_s] + arg [-lgcc] ==> lib [gcc] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o] ==> obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/Scrt1.o] ==> [/usr/lib/x86_64-linux-gnu/Scrt1.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crti.o] ==> [/usr/lib/x86_64-linux-gnu/crti.o] + collapse obj [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu/crtn.o] ==> [/usr/lib/x86_64-linux-gnu/crtn.o] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/9] ==> [/usr/lib/gcc/x86_64-linux-gnu/9] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../../x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib] ==> [/usr/lib] + collapse library dir [/lib/x86_64-linux-gnu] ==> [/lib/x86_64-linux-gnu] + collapse library dir [/lib/../lib] ==> [/lib] + collapse library dir [/usr/lib/x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/../lib] ==> [/usr/lib] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/9/../../..] ==> [/usr/lib] + implicit libs: [stdc++;m;gcc_s;gcc;c;gcc_s;gcc] + implicit objs: [/usr/lib/x86_64-linux-gnu/Scrt1.o;/usr/lib/x86_64-linux-gnu/crti.o;/usr/lib/gcc/x86_64-linux-gnu/9/crtbeginS.o;/usr/lib/gcc/x86_64-linux-gnu/9/crtendS.o;/usr/lib/x86_64-linux-gnu/crtn.o] + implicit dirs: [/usr/lib/gcc/x86_64-linux-gnu/9;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib] + implicit fwks: [] + + diff --git a/modern-cpp-kafka/builddir/CMakeFiles/cmake.check_cache b/modern-cpp-kafka/builddir/CMakeFiles/cmake.check_cache new file mode 100644 index 00000000..3dccd731 --- /dev/null +++ b/modern-cpp-kafka/builddir/CMakeFiles/cmake.check_cache @@ -0,0 +1 @@ +# This file is generated by cmake for dependency checking of the CMakeCache.txt file diff --git a/modern-cpp-kafka/customrules/BUILD.bazel b/modern-cpp-kafka/customrules/BUILD.bazel new file mode 100644 index 00000000..e69de29b diff --git a/modern-cpp-kafka/customrules/rapidjson.BUILD b/modern-cpp-kafka/customrules/rapidjson.BUILD new file mode 100644 index 00000000..01028dab --- /dev/null +++ b/modern-cpp-kafka/customrules/rapidjson.BUILD @@ -0,0 +1,10 @@ +cc_library( + name = "rapidjson", + + hdrs = glob(["include/rapidjson/**"]), + + includes = ["include"], + + visibility = ["//visibility:public"], +) + diff --git a/modern-cpp-kafka/demo_projects_for_build/conan_build/CMakeLists.txt b/modern-cpp-kafka/demo_projects_for_build/conan_build/CMakeLists.txt new file mode 100644 index 00000000..3ad67345 --- /dev/null +++ b/modern-cpp-kafka/demo_projects_for_build/conan_build/CMakeLists.txt @@ -0,0 +1,28 @@ +cmake_minimum_required(VERSION "3.8") +project("kafka-examples") + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED True) + +include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake) +conan_basic_setup() + +# Target: kafka_sync_producer +add_executable("kafka_sync_producer" "../../examples/kafka_sync_producer.cc") +target_link_libraries("kafka_sync_producer" ${CONAN_LIBS}) + +# Target: kafka_async_producer_copy_payload +add_executable("kafka_async_producer_copy_payload" "../../examples/kafka_async_producer_copy_payload.cc") +target_link_libraries("kafka_async_producer_copy_payload" ${CONAN_LIBS}) + +# Target: kafka_async_producer_not_copy_payload +add_executable("kafka_async_producer_not_copy_payload" "../../examples/kafka_async_producer_not_copy_payload.cc") +target_link_libraries("kafka_async_producer_not_copy_payload" ${CONAN_LIBS}) + +# Target: kafka_auto_commit_consumer +add_executable("kafka_auto_commit_consumer" "../../examples/kafka_auto_commit_consumer.cc") +target_link_libraries("kafka_auto_commit_consumer" ${CONAN_LIBS}) + +# Target: kafka_manual_commit_consumer +add_executable("kafka_manual_commit_consumer" "../../examples/kafka_manual_commit_consumer.cc") +target_link_libraries("kafka_manual_commit_consumer" ${CONAN_LIBS}) diff --git a/modern-cpp-kafka/demo_projects_for_build/conan_build/conanfile.txt b/modern-cpp-kafka/demo_projects_for_build/conan_build/conanfile.txt new file mode 100644 index 00000000..857cf1ea --- /dev/null +++ b/modern-cpp-kafka/demo_projects_for_build/conan_build/conanfile.txt @@ -0,0 +1,5 @@ +[requires] +modern-cpp-kafka/2022.06.15 + +[generators] +cmake diff --git a/modern-cpp-kafka/doc/CMakeLists.txt b/modern-cpp-kafka/doc/CMakeLists.txt new file mode 100644 index 00000000..ec073a73 --- /dev/null +++ b/modern-cpp-kafka/doc/CMakeLists.txt @@ -0,0 +1,50 @@ +find_program (DOXYGEN_EXECUTABLE NAMES doxygen) +if (NOT DOXYGEN_EXECUTABLE) + message(FATAL_ERROR "Could not find doxygen!") +else() + message("Doxygen build started... [${DOXYGEN_EXECUTABLE}]") +endif() + +set(DOXYGEN_INPUT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../include/kafka) + +set(doxyfile_in ${CMAKE_CURRENT_SOURCE_DIR}/../scripts/doxyfile.cfg) +set(doxyfile_out ${CMAKE_CURRENT_BINARY_DIR}/doxyfile.cfg) +configure_file(${doxyfile_in} ${doxyfile_out} @ONLY) + +add_custom_target( + doxygen-doc ALL + COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile_out} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/../include + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating doxygen documents" + VERBATIM +) + +install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doxygen DESTINATION doc) + +message("Markdown build started...") +set (DOC_ENTRY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../README.md) +set (MARKDOWN_EXECUTABLE "${CMAKE_CURRENT_SOURCE_DIR}/../scripts/markdown2html.py") +set (MARKDOWN_OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/markdown") +file(GLOB MARKDOWN_INPUT_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.md) + +add_custom_target( + doc-entry ALL + COMMAND ${MARKDOWN_EXECUTABLE} -i ${DOC_ENTRY_SRC} -o ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${DOC_ENTRY_SRC} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating documents entry" + VERBATIM +) + +add_custom_target( + markdown-doc ALL + COMMAND ${MARKDOWN_EXECUTABLE} -i ${MARKDOWN_INPUT_FILES} -o ${MARKDOWN_OUTPUT_DIR} + DEPENDS ${MARKDOWN_INPUT_FILES} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + COMMENT "Generating markdown documents" + VERBATIM +) + +install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/markdown DESTINATION doc) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/index.html DESTINATION doc) diff --git a/modern-cpp-kafka/doc/GoodPracticesToUseKafkaConsumer.md b/modern-cpp-kafka/doc/GoodPracticesToUseKafkaConsumer.md new file mode 100644 index 00000000..7575f077 --- /dev/null +++ b/modern-cpp-kafka/doc/GoodPracticesToUseKafkaConsumer.md @@ -0,0 +1,22 @@ +# Good Practices to Use a KafkaConsumer + +If we want to achieve high performance/availability, here're some rules of thumb. + +## How to distribute the messages (for the same topics) to different KafkaConsumers + +* Use a consumer group for these KafkaConsumers, thus they will work together -- each one deals with different partitions. + +* Besides `subscribe` (topics), users could also choose to explicitly `assign` certain partitions to a `KafkaConsumer`. + +## How to enhance the throughput + +* Try with a larger `QUEUED_MIN_MESSAGES`, especially for small messages. + +* Use multiple KafkaConsumers to distribute the payload. + +## How to avoid polling duplicated messages + +* To commit the offsets more frequently (e.g, always do commit after finishing processing a message). + +* Don't use quite a large `MAX_POLL_RECORDS` for a `KafkaConsumer` (with `enable.auto.commit=true`) -- you might fail to commit all these messages before crash, thus more duplications with the next `poll`. + diff --git a/modern-cpp-kafka/doc/GoodPracticesToUseKafkaProducer.md b/modern-cpp-kafka/doc/GoodPracticesToUseKafkaProducer.md new file mode 100644 index 00000000..b6af5c74 --- /dev/null +++ b/modern-cpp-kafka/doc/GoodPracticesToUseKafkaProducer.md @@ -0,0 +1,48 @@ +# Good Practices to Use a KafkaProducer + +If we want to achieve high performance/availability, here're some rules of thumb. + +## Avoid using `syncSend` for better throughput + +You should never call `syncSend` if you want to get a high throughput. The `syncSend` is a synchronous operation, and would not go on until the `acks` are received. + +## The `message.max.bytes` must be consistent with Kafka servers' setting + +* Default value: 1000,000 + +* The default setting for brokers is `message.max.bytes = 1000012`, and do MAKE SURE the client side setting no larger than it. Otherwise, it might construct a MessageSet which would be rejected (error: INVALID_MESSAGE_SIZE) by brokers. + +## Calculate `batch.num.messages` with the average message size + +* Default value: 10,000 + +* It defines the maximum number of messages batched in one MessageSet. + + Normally, larger value, better performance. However, since the size of MessageSet is limited by `message.max.bytes`, a too large value would not help any more. + + E.g, with the default `message.max.bytes=1000000` and `batch.num.messages=10000` settings, you could get the best performance while the average message size is larger than 100 bytes. + + However, if the average message size is small, you have to enlarge it (to `message.max.bytes/average_message_size` at least). + +## Choose `acks` wisely + +* The acks parameter controls how many partition replicas must receive the record before the producer can consider the write successful. + + * `acks=0`, the producer will not wait for a reply from the broker before assuming the message was sent successfully. + + * `acks=1`, the producer will receive a success response from the broker the moment the leader replica received the message. + + * `acks=all`, the producer will receive a success response from the broker once all in-sync replicas received the message. + + * Note: if "ack=all", please make sure the topic's replication factor is larger than 1. + +* The `acks=all` setting will highly impact the throughput & latency, and it would be obvious if the traffic latency between kafka brokers is high. But it's mandatory if we want to achieve high availability. + +## How could a message miss after send? + +* The message might even not have been received by the partition leader! (with `acks=0`) + +* Once the message received by the partition leader, the leader crashed just after responding to the producer, but has no chance to synchronize the message to other replicas. (with `acks=1`) + +* Once the message received by the partition leader, the leader crashed just after responding to the producer, but with no in-sync replica to synchronize for the message. (with `acks=all`, while brokers are with `min.insync.replicas=1`) + diff --git a/modern-cpp-kafka/doc/HowToMakeKafkaProducerReliable.md b/modern-cpp-kafka/doc/HowToMakeKafkaProducerReliable.md new file mode 100644 index 00000000..970fb3db --- /dev/null +++ b/modern-cpp-kafka/doc/HowToMakeKafkaProducerReliable.md @@ -0,0 +1,185 @@ +# How to Make KafkaProducer Reliable + +While using message dispatching systems, we always suffer from message lost, duplication and disordering. + +Since the application (using the `KafkaProducer`) might crash/restart, we might consider using certain mechanism to achieve `At most once`/`At least once`, and `Ordering`, -- such as locally persisting the messages until successful delivery, using embedded sequence number to de-duplicate, or responding data-source to acknowledgement the delivery result, etc. These are common topics, which are not quite specific to Kafka. + +Here we'd focus on `KafkaProducer`, together with the `idempotence` feature. Let's see, in which cases problems might happen, how to avoid them, and what's the best practise,-- to achieve `No Message Lost`, `Exactly Once` and `Ordering`. + + +## About `No Message Lost` + +### When might a message actually be lost + +* The producer gets a successful delivery response after sending the message, but the `partition leader` failed to sync it to other `replicas`. + +### How could a message be lost even with successful delivery + +* First, the `partition leader` doesn't sync-up the latest message to enough `in-sync replicas` before responding with the `ack` + + * The `partition leader` just don't need to wait for other `replica`s response + + - E.g, the producer is configured with `acks=1` + + * No available `in-sync replica` to wait for the response + + - E.g, all other replicas are not in-sync + +* Then, the `partition leader` crashes, and one `in-sync replica` becomes new `partition leader` + + * The new `partition leader` has no acknowledgement with the latest messages. Later, while new messages arrive, it would use conflicting record offsets (same with those records which the `partition leader` knows only). Then, even if the previous `partition leader` comes up again, these records have no chance to be recovered (just internally overwritten to be consistent with other replicas). + +### How to make sure `No Message Lost` + +* Make sure the leader would wait for responses from all in-sync replicas before the response + + * Configuration `acks=all` is a MUST for producer + +* Ensure enough `In-Sync partition replicas` + + * Configuration `min.insync.replicas >= 2` is a MUST for brokers + + - Take `min.insync.replicas = 2` for example, it means, + + 1. At most `replication.factor - min.insync.replicas` replicas are out-of-sync, -- the producer would still be able to send messages, otherwise, it could fail with 'no enough replica' error, and keeps retrying. + + 2. Occasionally no more than `min.insync.replicas` in-sync-replica failures. -- otherwise, messages might be missed. In this case, if just one in-sync replica crashes after sending back the ack to the producer, the message would not be lost; if two failed, it would! Since the new leader might be a replica which was not in-sync previously, and has no acknowledgement with these latest messages. + + * Please refer to [Kafka Broker Configuration](KafkaBrokerConfiguration.md) for more details. + + * Then, what would happen if replicas fail + + 1. Fails to send (`not enough in-sync replica failure`), -- while number of `in-sync replicas` could not meet `min.insync.replication` + + 2. Lost messages (after sending messages), -- with no `in-sync replica` survived from multi-failures + + 3. No message lost (while with all `in-sync replicas` acknowledged, and at least one `in-sync replica` available) + + +## About `Exactly Once` + +### How duplications happen + +* After brokers successfully persisted a message, it sent the `ack` to the producer. But for some abnormal reasons (such as network failure, etc), the producer might fail to receive the `ack`. The `librdkafka`'s internal queue would retry, thus another (duplicated) message would be persisted by brokers. + +### How to guarantee `Exactly Once` + +* The `enable.idempotence` configuration is RECOMMENDED. + + +## About `Ordering` + +### No ordering between partitions + +* Make sure these `ProducerRecord`s be with the same partition + + - Explicitly assigned with the same `topic-partition` + + - Use the same `key` for these records + +### How disordering happens within one partition + +* The `librdkafka` uses internal partition queues, and once a message fails to be sent successfully(e.g, brokers are down), it would be put back on the queue and retries again while `retry.backoff.ms` expires. However, before that (retry with the failed message), the brokers might recover and the messages behind (if with configuration `max.in.flight > 1`) happened to be sent successfully. In this case (with configuration `max.in.flight > 1` and `retries > 0`), disordering could happen, and the user would not even be aware of it. + +* Furthermore, while the last retry still failed, delivery callback would eventually be triggered. The user has to determine what to do for that (might want to re-send the message, etc). But there might be a case, -- some later messages had already been saved successfully by the server, thus no way to revert the disordering. + + +## More About `Idempotent producer` + +Please refer to the document from librdkafka, [Idempotent Producer](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md#idempotent-producer) for more details. + +### Extra fields to maintain the message sequence + +The `librdkafka` maintains the original produce() ordering per-partition for all messages produced, using an internal per-partition 64-bit counter called the `msgid` which starts at 1. This `msgid` allows messages to be re-inserted in the partition message queue in the original order in the case of retries. + +The Idempotent Producer functionality in the Kafka protocol also has a per-message `sequence number`, which is a signed 32-bit wrapping counter that is reset each time the `Producer's ID (PID)` or `Epoch` changes. + +The `msgid` is used, (along with a base `msgid` value stored at the time the `PID/Epoch` was bumped), to calculate the Kafka protocol's message `sequence number`. + +### Configuration conflicts + +* Since the following configuration properties are adjusted automatically (if not modified by the user). Producer instantiation will fail if user-supplied configuration is incompatible. + + - `acks = all` + + - `max.in.flight (i.e, `max.in.flight.requests.per.connection`) = 5` + + - `retries = INT32_MAX` + +### Error handling + +* Exception thrown during `send` + + * For these errors which could be detected locally (and could not be recovered with retrying), an exception would be thrown. E.g, invalid message, as RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE (conflicting with local configuration `message.max.bytes`). + +* Permanent errors (respond from brokers) + + * Typical errors are: + + * Invalid message: RD_KAFKA_RESP_ERR_CORRUPT_MESSAGE, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE, RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS, RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT, RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE. + + * Topic/Partition not exist: ERR_UNKNOWN_TOPIC_OR_PART, -- automatic topic creation is disabled on the broker or the application is specifying a partition that does not exist. + + * Authorization failure: ERR_TOPIC_AUTHORIZATION_FAILED, ERR_CLUSTER_AUTHORIZATION_FAILED + + * Normally, `Permanent error` means careless design, or wrong configuration, which should be avoided from the very beginning. + + * Unless with `enable.gapless.guarantee`(EXPERIMENTAL) configured, producer would keep going with the following messages; otherwise, it would purge all messages in-flight/in-queue (with RD_KAFKA_RESP_ERR__PURGE_INFLIGHT/RD_KAFKA_RESP_ERR__PURGE_QUEUE). + +* Temporary errors + + * Apart from those `permanent errors`, most of the left are temporary errors, which will be retried (if retry count permits); and while `message.timeout` expired, message delivery callback would be triggered with `RD_KAFKA_RESP_ERR__TIEMD_OUT`. + +* Be careful with the `RD_KAFKA_RESP_ERR__TIEMD_OUT` failure + + * There's some corner cases, such as a message that has been persisted by brokers but `KafkaProducer` failed to get the response. If `message.timeout.ms` has not expired, the producer could retry and eventually get the response. Otherwise, (i.e, `message.timeout.ms` expired before the producer receives the successful `ack`), it would be considered as a delivery failure by the producer (while the brokers wouldn't). Users might re-transmit the message thus causing duplications. + + * To avoid this tricky situation, a longer `message.timeout.ms` is RECOMMENDED, to make sure there's enough time for transmission retries / on-flight responses. + +### Performance impact + +* The main impact comes from `max.in.flight=5` limitation. Currently, `max.in.flight` means `max.in.flight.per.connection`, -- that's 5 message batches (with size of ~1MB at the most) in flight (not get the `ack` response yet) at the most, towards per broker. Within low-latency networks, it would not be a problem; while in other cases, it might be! Good news is, there might be a plan (in `librdkafka`) to improve that `per.connection` limit to `per.partition`, thus boost the performance a lot. + + +## The best practice for `KafkaProducer` + +* Enable `enable.idempotence` configuration + +* Use a long `message.timeout.ms`, which would let `librdkafka` keep retrying, before triggering the delivery failure callback. + + +## Some examples + +### `KafkaProducer` demo + +```cpp + std::atomic running = true; + + KafkaProducer producer( + Properties({ + { ProducerConfig::BOOTSTRAP_SERVERS, "192.168.0.1:9092,192.168.0.2:9092,192.168.0.3:9092" }, + { ProducerConfig::ENABLE_IDEMPOTENCE, "true" }, + { ProducerConfig::MESSAGE_TIMEOUT_MS, "86400000"} // as long as 1 day + }) + ); + + while (running) { + auto msg = fetchMsgFromUpstream(); + auto record = ProducerRecord(topic, msg.key, msg.value, msg.id); + producer.send(record, + // Ack callback + [&msg](const Producer::RecordMetadata& metadata, std::error_code ec) { + // the message could be identified by `metadata.recordId()` + auto recordId = metadata.recordId(); + if (ec) { + std::cerr << "Cannot send out message with recordId: " << recordId << ", error:" << ec.message() << std::endl; + } else { + commitMsgToUpstream(recordId); + } + }); + } + + producer.close(); +``` + +* With a long `message.timeout.ms`, we're not likely to catch an error with delivery callback, --it would retry for temporary errors anyway. But be aware with permanent errors, it might be caused by careless design. diff --git a/modern-cpp-kafka/doc/KafkaBrokerConfiguration.md b/modern-cpp-kafka/doc/KafkaBrokerConfiguration.md new file mode 100644 index 00000000..914d90c7 --- /dev/null +++ b/modern-cpp-kafka/doc/KafkaBrokerConfiguration.md @@ -0,0 +1,155 @@ +# Kafka Broker Configuration + +## Java Version + +* Recommend the latest released version of JDK 1.8, -- LinkedIn is currently running JDK 1.8 u5. + +## JVM Configuration + +* Here is a sample for `KAFKA_JVM_PERFORMANCE_OPTS` + + -Xmx8g -Xms8g -XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80 + +## Deployment + +* In at least three data centers with high bandwidth and low latency between them. (Commonly, using three availability zones inside one region of a cloud provider) + + * IMPORTANT: the `latency/bandwidth` between brokers could highly impact the `throughput/latency` of a producer client. + +* `rack.id` could be used to identify brokers from different data centers. + + +## Functionality + +* Controller + + * Maintains leaders/replicas info for partitions. + +* Partition Replicas + + * Leader replica + + * All produce/consume requests go through the leader. + + * In-Sync Replica + + * Replicas that are continuously asking for the latest messages; has caught up to the most recent message in 10 seconds (`replica.lag.time.max.ms`). + + * The preferred in-sync replica would be promoted to new leader while the previous one fails. + +## OS tuning + +(Use `sysctl`, or edit `/etc/sysctl.conf` for permanent change) + +* File descriptor limits + + * `fs.file-max` + + * Recommended 100000 or higher. + +* Maximum number of memory map areas for a process + + * `vm.max_map_count` + + * Each log segment uses 2 map areas. + +* Virtual memory + + * It's best to avoid swapping at all costs. + + * Set `vm.swappiness` to a very low value (e.g, 1). + +* Dirty page + + * `vm.dirty_background_ratio=5`, is appropriate in many situations. + + * `vm.dirty_ratio=60(~80)`, is a reasonable number. + +* Networking + + * `net.core.wmem_default` and `net.core.rmem_default`, reasonable setting: 131072 (128KB). + + * `net.core.wmem_max` and `net.core.rmem_max`, reasonable setting: 2097152 (2MB). + + * `net.ipv4.tcp_wmem` and `net.ipv4.tcp_rmem`, an example setting: 4096 (4KB minimum), 65536 (64KB default), 2048000 (2MB maximum). + + * `net.ipv4.tcp_window_scaling=1`. + + * `net.ipv4.tcp_max_syn_backlog`, should be above 1024 (default) to allow more simultaneous connections. + + * `net.core.netdev_max_backlog`, should be above 1000 (default) to allow more packets to be queued to process. + +## Disks and File-system + +* Throughput of the broker disks directly influence the performance of producer clients. + +* EXT4 and XFS are the most popular choices (XFS with better performance). Some companies are even trying with ZFS. + +* Do not use mounted shared drives and any network file systems. + +* Do not share the same drives used for Kafka data with other applications to ensure good latency. + +## Broker Settings + +* Auto-created Topics + + * With `auto.create.topics.enable=true`, a topic could be created while, + + 1. A producer starts writing messages to the topic. + + 2. A consumer starts reading messages from the topic. + + 3. Any client requests metadata for the topic. + + * The auto-created topics might not be what you want + + You might want to override some default configurations + + 1. `default.replication.factor=3` + + * We recommend a replication factor of 3 (at least) for any topic where availability is an issue. + + * The replication factor should be no more than the number of brokers. + + 2. `offsets.topic.replication.factor=3` + + * It's for the internal topic `__consumer_offsets`, -- auto-topic-creation will fail with a GROUP_COORDINATOR_NOT_AVAILABLE error if the cluster can't meet this replication factor requirement. + + 3. `num.partitions=5` (or whatever you want) + + * Unclean leader election + + * Set `unclean.leader.election.enable=false` to avoid out-of-sync replicas. + + * Minimal in-sync replicas + + * Set `min.insync.replicas=2` (at least) for fault-tolerant. + + * Log + + * `log.retention.bytes` and `log.retention.ms/hours`, -- the log segment will be cleared if it exceeds the limits. + + * `log.segment.bytes` and `log.segment.ms`, -- a new log segment will be created if any of the limits is reached. + + * Threads for recovery + + * `num.recovery.threads.per.data.dir` (default 1), could be a larger number to speed up opening/closing log segments, recovering from failure, etc. + + * Maximum message size supported + + * `message.max.bytes` (default 1000012). + + * `replica.fetch.max.bytes` MUST be larger than `message.max.bytes`. + + * MUST be coordinated with (lower than) the `fetch.message.max.bytes` configuration of consumer clients. + + * MUST be coordinated (same) with the `message.max.bytes` configuration of producer clients. + +# Performance tips + +* Factors: Memory, Disk, Partitions, and Ethernet bandwidth. + +* Partitions could be used to improve throughput, by using multiple Producers/Consumers. + +* Suggests that limiting the size of the partition on the disk to less than 6 GB per day of retention often gives satisfactory results. + diff --git a/modern-cpp-kafka/doc/KafkaConsumerQuickStart.md b/modern-cpp-kafka/doc/KafkaConsumerQuickStart.md new file mode 100644 index 00000000..85cbd05a --- /dev/null +++ b/modern-cpp-kafka/doc/KafkaConsumerQuickStart.md @@ -0,0 +1,210 @@ +# KafkaConsumer Quick Start + +Generally speaking, The `Modern C++ Kafka API` is quite similar with [Kafka Java's API](https://kafka.apache.org/22/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html) + +We'd recommend users to cross-reference them, --especially the examples. + +Unlike Java's KafkaConsumer, here we introduced two derived classes, --KafkaAutoCommitConsumer and KafkaManualCommitConsumer, --depending on whether users should call `commit` manually. + +## KafkaConsumer (`enable.auto.commit=true`) + +* Automatically commits previously polled offsets on each `poll` (and the final `close`) operations. + + * Note, the internal `offset commit` is asynchronous, and is not guaranteed to succeed. It's supposed to be triggered (within each `poll` operation) periodically, thus the occasional failure doesn't quite matter. + +### Example +```cpp + // Create configuration object + kafka::Properties props ({ + {"bootstrap.servers", brokers}, + {"enable.auto.commit", "true"} + }); + + // Create a consumer instance + kafka::clients::KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + // Read messages from the topic + std::cout << "% Reading messages from topic: " << topic << std::endl; + while (true) { + auto records = consumer.poll(std::chrono::milliseconds(100)); + for (const auto& record: records) { + // In this example, quit on empty message + if (record.value().size() == 0) return 0; + + if (!record.error()) { + std::cout << "% Got a new message..." << std::endl; + std::cout << " Topic : " << record.topic() << std::endl; + std::cout << " Partition: " << record.partition() << std::endl; + std::cout << " Offset : " << record.offset() << std::endl; + std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; + std::cout << " Headers : " << kafka::toString(record.headers()) << std::endl; + std::cout << " Key [" << record.key().toString() << "]" << std::endl; + std::cout << " Value [" << record.value().toString() << "]" << std::endl; + } else { + std::cerr << record.toString() << std::endl; + } + } + } + + // consumer.close(); // No explicit close is needed, RAII will take care of it +``` + +* `bootstrap.servers` property is mandatory for a Kafka client. + +* `subscribe` could take a topic list. It's a block operation, would wait the consumer to get partitions assigned. + +* `poll` must be called periodically, thus to trigger kinds of callback handling internally. In practice, it could be put in a "while loop". + +* At the end, we could `close` the consumer explicitly, or just leave it to the destructor. + +## KafkaConsumer (`enable.auto.commit=false`) + +* Users must commit the offsets for received records manually. + +### Example +```cpp + // Create configuration object + kafka::Properties props ({ + {"bootstrap.servers", brokers}, + }); + + // Create a consumer instance + kafka::clients::KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + auto lastTimeCommitted = std::chrono::steady_clock::now(); + + // Read messages from the topic + std::cout << "% Reading messages from topic: " << topic << std::endl; + bool allCommitted = true; + bool running = true; + while (running) { + auto records = consumer.poll(std::chrono::milliseconds(100)); + for (const auto& record: records) { + // In this example, quit on empty message + if (record.value().size() == 0) { + running = false; + break; + } + + if (!record.error()) { + std::cout << "% Got a new message..." << std::endl; + std::cout << " Topic : " << record.topic() << std::endl; + std::cout << " Partition: " << record.partition() << std::endl; + std::cout << " Offset : " << record.offset() << std::endl; + std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; + std::cout << " Headers : " << kafka::toString(record.headers()) << std::endl; + std::cout << " Key [" << record.key().toString() << "]" << std::endl; + std::cout << " Value [" << record.value().toString() << "]" << std::endl; + + allCommitted = false; + } else { + std::cerr << record.toString() << std::endl; + } + } + + if (!allCommitted) { + auto now = std::chrono::steady_clock::now(); + if (now - lastTimeCommitted > std::chrono::seconds(1)) { + // Commit offsets for messages polled + std::cout << "% syncCommit offsets: " << kafka::utility::getCurrentTime() << std::endl; + consumer.commitSync(); // or commitAsync() + + lastTimeCommitted = now; + allCommitted = true; + } + } + } + + // consumer.close(); // No explicit close is needed, RAII will take care of it +``` + +* The example is quite similar with the KafkaAutoCommitConsumer, with only 1 more line added for manual-commit. + +* `commitSync` and `commitAsync` are both available for a KafkaManualConsumer. Normally, use `commitSync` to guarantee the commitment, or use `commitAsync`(with `OffsetCommitCallback`) to get a better performance. + +## `KafkaConsumer` with `kafka::clients::KafkaClient::EventsPollingOption` + +While we construct a `KafkaConsumer` with `kafka::clients::KafkaClient::EventsPollingOption::Auto` (i.e. the default option), an internal thread would be created for `OffsetCommit` callbacks handling. + +This might not be what you want, since then you have to use 2 different threads to process the messages and handle the `OffsetCommit` responses. + +Here we have another choice, -- using `kafka::clients::KafkaClient::EventsPollingOption::Manual`, thus the `OffsetCommit` callbacks would be called within member function `pollEvents()`. + +### Example +```cpp + KafkaConsumer consumer(props, kafka::clients::KafkaClient::EventsPollingOption::Manual); + + consumer.subscribe({"topic1", "topic2"}); + + while (true) { + auto records = consumer.poll(std::chrono::milliseconds(100)); + for (auto& record: records) { + // Process the message... + process(record); + + // Here we commit the offset manually + consumer.commitSync(*record); + } + + // Here we call the `OffsetCommit` callbacks + // Note, we can only do this while the consumer was constructed with `EventsPollingOption::Manual`. + consumer.pollEvents(); + } +``` + +## Error handling + +No exception would be thrown from a consumer's `poll` operation. + +Instead, once an error occurs, the `Error` would be embedded in the `Consumer::ConsumerRecord`. + +About `Error`'s `value()`s, there are 2 cases + +1. Success + + - `RD_KAFKA_RESP_ERR__NO_ERROR` (`0`), -- got a message successfully + + - `RD_KAFKA_RESP_ERR__PARTITION_EOF`, -- reached the end of a partition (no message got) + +2. Failure + + - [Error Codes](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + +## Frequently Asked Questions + +* What're the available configurations? + + - [KafkaProducerConfiguration](KafkaClientConfiguration.md#kafkaconsumer-configuration) + + - [Inline doxygen page](../doxygen/classKAFKA__CPP__APIS__NAMESPACE_1_1ConsumerConfig.html) + +* How to enhance the polling performance? + + `ConsumerConfig::QUEUED_MIN_MESSAGES` determines how frequently the consumer would send the FetchRequest towards brokers. + The default configuration (i.e, 100000) might not be good enough for small (less than 1KB) messages, and suggest using a larger value (e.g, 1000000) for it. + +* How many threads would be created by a KafkaConsumer? + + Excluding the user's main thread, if `enable.auto.commit` is `false`, the `KafkaConsumer` would start another (N + 2) threads in the background; otherwise, the `KafkaConsumer` would start (N + 3) background threads. (N means the number of BOOTSTRAP_SERVERS) + + 1. Each broker (in the list of BOOTSTRAP_SERVERS) would take a seperate thread to transmit messages towards a kafka cluster server. + + 2. Another 3 threads will handle internal operations, consumer group operations, and kinds of timers, etc. + + 3. To enable the auto commit, one more thread would be create, which keeps polling/processing the offset-commit callback event. + + E.g, if a KafkaConsumer was created with property of `BOOTSTRAP_SERVERS=127.0.0.1:8888,127.0.0.1:8889,127.0.0.1:8890`, it would take 6 threads in total (including the main thread). + +* Which one of these threads will handle the callbacks? + + There are 2 kinds of callbacks for a KafkaConsumer, + + 1. `RebalanceCallback` will be triggered internally by the user's thread, -- within the `poll` function. + + 2. If `enable.auto.commit=true`, the `OffsetCommitCallback` will be triggered by the user's `poll` thread; otherwise, it would be triggered by a background thread. diff --git a/modern-cpp-kafka/doc/KafkaProducerQuickStart.md b/modern-cpp-kafka/doc/KafkaProducerQuickStart.md new file mode 100644 index 00000000..b702a952 --- /dev/null +++ b/modern-cpp-kafka/doc/KafkaProducerQuickStart.md @@ -0,0 +1,221 @@ +# KafkaProducer Quick Start + +Generally speaking, The `Modern C++ Kafka API` is quite similar to the [Kafka Java's API](https://kafka.apache.org/10/javadoc/org/apache/kafka/clients/producer/KafkaProducer.html). + +We'd recommend users to cross-reference them, --especially the examples. + +## KafkaProducer + +* The `send` is an unblock operation, and the result (including errors) could only be got from the delivery callback. + +### Example +```cpp + using namespace kafka::clients; + + // Create configuration object + kafka::Properties props ({ + {"bootstrap.servers", brokers}, + {"enable.idempotence", "true"}, + }); + + // Create a producer instance + KafkaProducer producer(props); + + // Read messages from stdin and produce to the broker + std::cout << "% Type message value and hit enter to produce message. (empty line to quit)" << std::endl; + + for (auto line = std::make_shared(); + std::getline(std::cin, *line); + line = std::make_shared()) { + // The ProducerRecord doesn't own `line`, it is just a thin wrapper + auto record = producer::ProducerRecord(topic, + kafka::NullKey, + kafka::Value(line->c_str(), line->size())); + + // Send the message + producer.send(record, + // The delivery report handler + // Note: Here we capture the shared_pointer of `line`, + // which holds the content for `record.value()`. + // It makes sure the memory block is valid until the lambda finishes. + [line](const producer::RecordMetadata& metadata, const kafka::Error& error) { + if (!error) { + std::cout << "% Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "% Message delivery failed: " << error.message() << std::endl; + } + }); + + if (line->empty()) break; + } + + // producer.close(); // No explicit close is needed, RAII will take care of it +``` + +* User must guarantee the memory block for `ProducerRecord`'s `key` is valid until being `send`. + +* By default, the memory block for `ProducerRecord`'s `value` must be valid until the delivery callback is called; Otherwise, the `send` should be with option `KafkaProducer::SendOption::ToCopyRecordValue`. + +* It's guaranteed that the delivery callback would be triggered anyway after `send`, -- a producer would even be waiting for it before `close`. So, it's a good way to release these memory resources in the `Producer::Callback` function. + +## `KafkaProducer` with `kafka::clients::KafkaClient::EventsPollingOption` + +While we construct a `KafkaProducer` with `kafka::clients::KafkaClient::EventsPollingOption::Auto` (the default option), an internal thread would be created for `MessageDelivery` callbacks handling. + +This might not be what you want, since then you have to use 2 different threads to send the messages and handle the `MessageDelivery` responses. + +Here we have another choice, -- using `kafka::clients::KafkaClient::EventsPollingOption::Manual`, thus the `MessageDelivery` callbacks would be called within member function `pollEvents()`. + +* Note, if you constructed the `KafkaProducer` with `EventsPollingOption::Manual`, the `send()` would be an `unblocked` operation. +I.e, once the `message buffering queue` becomes full, the `send()` operation would throw an exception (or return an `error code` with the input reference parameter), -- instead of blocking there. +This makes sense, since you might want to call `pollEvents()` later, thus delivery-callback could be called for some messages (which could then be removed from the `message buffering queue`). + +### Example +```cpp + using namespace kafka::clients; + + KafkaProducer producer(props, KafkaClient::EventsPollingOption::Manual); + + // Prepare "msgsToBeSent" + auto std::map> msgsToBeSent = ...; + + for (const auto& msg : msgsToBeSent) { + auto record = producer::ProducerRecord(topic, partition, msg.second.first, msg.second.second, msg.first); + kafka::Error sendError; + producer.send(sendError, + record, + // Ack callback + [&msg](const producer::RecordMetadata& metadata, const kafka::Error& deliveryError) { + // the message could be identified by `metadata.recordId()` + if (deliveryError) { + std::cerr << "% Message delivery failed: " << deliveryError.message() << std::endl; + } else { + msgsToBeSend.erase(metadata.recordId()); // Quite safe here + } + }); + if (sendError) break; + } + + // Here we call the `MessageDelivery` callbacks + // Note, we can only do this while the producer was constructed with `EventsPollingOption::MANUAL`. + producer.pollEvents(); +``` + +## Headers in ProducerRecord + +* A `ProducerRecord` could take extra information with `headers`. + + * Note, the `header` within `headers` contains the pointer of the memory block for its `value`. The memory block MUST be valid until the `ProducerRecord` is read by `producer.send()`. + +### Example +```cpp + using namespace kafka::clients; + + kafak::KafkaProducer producer(props); + + auto record = producer::ProducerRecord(topic, partition, Key(), Value()); + + for (const auto& msg : msgsToBeSent) { + // Prepare record headers + std::string session = msg.session; + std::uint32_t seqno = msg.seqno; + record.headers() = { + { "session", { session.c_str(), session.size()} }, + { "seqno", { &seqno, sizeof(seqno)} } + }; + + record.setKey(msg.key); + record.setValue(msg.value); + + producer.send(record, + // Ack callback + [&msg](const kafka::Producer::RecordMetadata& metadata, , const kafka::Error& error) { + if (error) { + std::cerr << "% Message delivery failed: " << error.message() << std::endl; + } + }); + } +``` + +## Error handling + +`Error` might occur at different places while sending a message, + +1. A `KafkaException` would be triggered if `KafkaProducer` failed to trigger the send operation. + +2. Delivery `Error` would be passed through the delivery-callback. + +About `Error`'s `value()`s, there are 2 cases + +1. Local errors, + + - `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC` -- The topic doesn't exist + + - `RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION` -- The partition doesn't exist + + - `RD_KAFKA_RESP_ERR__INVALID_ARG` -- Invalid topic (topic is null or the length is too long (>512)) + + - `RD_KAFKA_RESP_ERR__MSG_TIMED_OUT` -- No ack received within the time limit + +2. Broker errors, + + - [Error Codes](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + +## Frequently Asked Questions + +### What are the available configurations? + +- [KafkaProducerConfiguration](KafkaClientConfiguration.md#kafkaproducer-configuration) + +- [Inline doxygen page](../doxygen/classKAFKA__CPP__APIS__NAMESPACE_1_1ProducerConfig.html) + +### About the automatic `topic creation` + +If the cluster's configuration is with `auto.create.topics.enable=true`, the producer/consumer could trigger the brokers to create a new topic (with `send`, `subscribe`, etc) + +Note, the default created topic may be not what you want (e.g, with `default.replication.factor=1` configuration as default, etc), thus causing other unexpected problems. + +### How to enhance the sending performance? + +Enlarging the default `BATCH_NUM_MESSAGES` and `LINGER_MS` might improve message batching, thus enhancing the throughput. + +While, on the other hand, `LINGER_MS` would highly impact the latency. + +The `QUEUE_BUFFERING_MAX_MESSAGES` and `QUEUE_BUFFERING_MAX_KBYTES` would determine the `max in flight requests (some materials about Kafka would call it in this way)`. If the queue buffer is full, the `send` operation would be blocked. + +Larger `QUEUE_BUFFERING_MAX_MESSAGES`/`QUEUE_BUFFERING_MAX_KBYTES` might help to improve throughput as well, while also means more messages locally buffering. + +### How to achieve reliable delivery + +* Quick Answer, + + 1. The Kafka cluster should be configured with `min.insync.replicas = 2` at least + + 2. Configure the `KafkaProducer` with property `{ProducerConfig::ENABLE_IDEMPOTENCE, "true"}`, together with proper error-handling (within the delivery callback). + +* Complete Answer, + + * [How to Make KafkaProducer Reliable](HowToMakeKafkaProducerReliable.md) + +### How many threads would be created by a KafkaProducer? + +Excluding the user's main thread, `KafkaProducer` would start (N + 3) background threads. (N means the number of BOOTSTRAP_SERVERS) + +Most of these background threads are started internally by librdkafka. + +Here is a brief introduction what they're used for, + +1. Each broker (in the list of BOOTSTRAP_SERVERS) would take a separate thread to transmit messages towards a kafka cluster server. + +2. Another 2 background threads would handle internal operations and kinds of timers, etc. + +3. One more background thread to keep polling the delivery callback event. + +E.g, if a `KafkaProducer` was created with property of `BOOTSTRAP_SERVERS=127.0.0.1:8888,127.0.0.1:8889,127.0.0.1:8890`, it would take 7 threads in total (including the main thread). + +### Which one of these threads will handle the callbacks + +It will be handled by a background thread, not by the user's thread. + +Note, should be careful if both the `KafkaProducer::send()` and the `producer::Callback` might access the same container at the same time. + diff --git a/modern-cpp-kafka/examples/BUILD.bazel b/modern-cpp-kafka/examples/BUILD.bazel new file mode 100644 index 00000000..19e6dc41 --- /dev/null +++ b/modern-cpp-kafka/examples/BUILD.bazel @@ -0,0 +1,50 @@ +cc_binary( + name = "kafka_sync_producer", + + srcs = ["kafka_sync_producer.cc"], + + linkopts = ["-lrdkafka"], + + deps = ["//:modern-cpp-kafka-api"], +) + +cc_binary( + name = "kafka_async_producer_copy_payload", + + srcs = ["kafka_async_producer_copy_payload.cc"], + + linkopts = ["-lrdkafka"], + + deps = ["//:modern-cpp-kafka-api"], +) + +cc_binary( + name = "kafka_async_producer_not_copy_payload", + + srcs = ["kafka_async_producer_not_copy_payload.cc"], + + linkopts = ["-lrdkafka"], + + deps = ["//:modern-cpp-kafka-api"], +) + +cc_binary( + name = "kafka_auto_commit_consumer", + + srcs = ["kafka_auto_commit_consumer.cc"], + + linkopts = ["-lrdkafka"], + + deps = ["//:modern-cpp-kafka-api"], +) + +cc_binary( + name = "kafka_manual_commit_consumer", + + srcs = ["kafka_manual_commit_consumer.cc"], + + linkopts = ["-lrdkafka"], + + deps = ["//:modern-cpp-kafka-api"], +) + diff --git a/modern-cpp-kafka/examples/CMakeLists.txt b/modern-cpp-kafka/examples/CMakeLists.txt new file mode 100644 index 00000000..0b26d5ec --- /dev/null +++ b/modern-cpp-kafka/examples/CMakeLists.txt @@ -0,0 +1,26 @@ +project("kafka-examples") + + +# Target: kafka_sync_producer +add_executable("kafka_sync_producer" "kafka_sync_producer.cc") +target_link_libraries("kafka_sync_producer" modern-cpp-kafka-api) + + +# Target: kafka_async_producer_copy_payload +add_executable("kafka_async_producer_copy_payload" "kafka_async_producer_copy_payload.cc") +target_link_libraries("kafka_async_producer_copy_payload" modern-cpp-kafka-api) + + +# Target: kafka_async_producer_not_copy_payload +add_executable("kafka_async_producer_not_copy_payload" "kafka_async_producer_not_copy_payload.cc") +target_link_libraries("kafka_async_producer_not_copy_payload" modern-cpp-kafka-api) + + +# Target: kafka_auto_commit_consumer +add_executable("kafka_auto_commit_consumer" "kafka_auto_commit_consumer.cc") +target_link_libraries("kafka_auto_commit_consumer" modern-cpp-kafka-api) + + +# Target: kafka_manual_commit_consumer +add_executable("kafka_manual_commit_consumer" "kafka_manual_commit_consumer.cc") +target_link_libraries("kafka_manual_commit_consumer" modern-cpp-kafka-api) diff --git a/modern-cpp-kafka/examples/kafka_async_producer_copy_payload.cc b/modern-cpp-kafka/examples/kafka_async_producer_copy_payload.cc new file mode 100644 index 00000000..0e5b3573 --- /dev/null +++ b/modern-cpp-kafka/examples/kafka_async_producer_copy_payload.cc @@ -0,0 +1,60 @@ +#include "kafka/KafkaProducer.h" + +#include +#include +#include + +int main(int argc, char **argv) +{ + using namespace kafka::clients; + + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " \n"; + exit(argc == 1 ? 0 : 1); // NOLINT + } + + std::string brokers = argv[1]; + kafka::Topic topic = argv[2]; + + try { + + // Create configuration object + kafka::Properties props ({ + {"bootstrap.servers", brokers}, + {"enable.idempotence", "true"}, + }); + + // Create a producer instance + KafkaProducer producer(props); + + // Read messages from stdin and produce to the broker + std::cout << "% Type message value and hit enter to produce message. (empty line to quit)" << std::endl; + + for (std::string line; std::getline(std::cin, line);) { + // The ProducerRecord doesn't own `line`, it is just a thin wrapper + auto record = producer::ProducerRecord(topic, + kafka::NullKey, + kafka::Value(line.c_str(), line.size())); + // Send the message + producer.send(record, + // The delivery report handler + [](const producer::RecordMetadata& metadata, const kafka::Error& error) { + if (!error) { + std::cout << "% Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "% Message delivery failed: " << error.message() << std::endl; + } + }, + // The memory block given by record.value() would be copied + KafkaProducer::SendOption::ToCopyRecordValue); + + if (line.empty()) break; + } + + // producer.close(); // No explicit close is needed, RAII will take care of it + + } catch (const kafka::KafkaException& e) { + std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; + } +} + diff --git a/modern-cpp-kafka/examples/kafka_async_producer_not_copy_payload.cc b/modern-cpp-kafka/examples/kafka_async_producer_not_copy_payload.cc new file mode 100644 index 00000000..96abab16 --- /dev/null +++ b/modern-cpp-kafka/examples/kafka_async_producer_not_copy_payload.cc @@ -0,0 +1,64 @@ +#include "kafka/KafkaProducer.h" + +#include +#include +#include + +int main(int argc, char **argv) +{ + using namespace kafka::clients; + + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " \n"; + exit(argc == 1 ? 0 : 1); // NOLINT + } + + std::string brokers = argv[1]; + kafka::Topic topic = argv[2]; + + try { + + // Create configuration object + kafka::Properties props ({ + {"bootstrap.servers", brokers}, + {"enable.idempotence", "true"}, + }); + + // Create a producer instance + KafkaProducer producer(props); + + // Read messages from stdin and produce to the broker + std::cout << "% Type message value and hit enter to produce message. (empty line to quit)" << std::endl; + + for (auto line = std::make_shared(); + std::getline(std::cin, *line); + line = std::make_shared()) { + // The ProducerRecord doesn't own `line`, it is just a thin wrapper + auto record = producer::ProducerRecord(topic, + kafka::NullKey, + kafka::Value(line->c_str(), line->size())); + + // Send the message + producer.send(record, + // The delivery report handler + // Note: Here we capture the shared_pointer of `line`, + // which holds the content for `record.value()`. + // It makes sure the memory block is valid until the lambda finishes. + [line](const producer::RecordMetadata& metadata, const kafka::Error& error) { + if (!error) { + std::cout << "% Message delivered: " << metadata.toString() << std::endl; + } else { + std::cerr << "% Message delivery failed: " << error.message() << std::endl; + } + }); + + if (line->empty()) break; + } + + // producer.close(); // No explicit close is needed, RAII will take care of it + + } catch (const kafka::KafkaException& e) { + std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; + } +} + diff --git a/modern-cpp-kafka/examples/kafka_auto_commit_consumer.cc b/modern-cpp-kafka/examples/kafka_auto_commit_consumer.cc new file mode 100644 index 00000000..5c2ec144 --- /dev/null +++ b/modern-cpp-kafka/examples/kafka_auto_commit_consumer.cc @@ -0,0 +1,59 @@ +#include "kafka/KafkaConsumer.h" + +#include +#include + +int main(int argc, char **argv) +{ + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " \n"; + exit(argc == 1 ? 0 : 1); // NOLINT + } + + std::string brokers = argv[1]; + kafka::Topic topic = argv[2]; + + try { + + // Create configuration object + kafka::Properties props ({ + {"bootstrap.servers", brokers}, + {"enable.auto.commit", "true"} + }); + + // Create a consumer instance + kafka::clients::KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + // Read messages from the topic + std::cout << "% Reading messages from topic: " << topic << std::endl; + while (true) { + auto records = consumer.poll(std::chrono::milliseconds(100)); + for (const auto& record: records) { + // In this example, quit on empty message + if (record.value().size() == 0) return 0; + + if (!record.error()) { + std::cout << "% Got a new message..." << std::endl; + std::cout << " Topic : " << record.topic() << std::endl; + std::cout << " Partition: " << record.partition() << std::endl; + std::cout << " Offset : " << record.offset() << std::endl; + std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; + std::cout << " Headers : " << kafka::toString(record.headers()) << std::endl; + std::cout << " Key [" << record.key().toString() << "]" << std::endl; + std::cout << " Value [" << record.value().toString() << "]" << std::endl; + } else { + std::cerr << record.toString() << std::endl; + } + } + } + + // consumer.close(); // No explicit close is needed, RAII will take care of it + + } catch (const kafka::KafkaException& e) { + std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; + } +} + diff --git a/modern-cpp-kafka/examples/kafka_manual_commit_consumer.cc b/modern-cpp-kafka/examples/kafka_manual_commit_consumer.cc new file mode 100644 index 00000000..b3d8f946 --- /dev/null +++ b/modern-cpp-kafka/examples/kafka_manual_commit_consumer.cc @@ -0,0 +1,79 @@ +#include "kafka/KafkaConsumer.h" + +#include +#include + +int main(int argc, char **argv) +{ + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " \n"; + exit(argc == 1 ? 0 : 1); // NOLINT + } + + std::string brokers = argv[1]; + kafka::Topic topic = argv[2]; + + try { + + // Create configuration object + kafka::Properties props ({ + {"bootstrap.servers", brokers}, + }); + + // Create a consumer instance + kafka::clients::KafkaConsumer consumer(props); + + // Subscribe to topics + consumer.subscribe({topic}); + + auto lastTimeCommitted = std::chrono::steady_clock::now(); + + // Read messages from the topic + std::cout << "% Reading messages from topic: " << topic << std::endl; + bool allCommitted = true; + bool running = true; + while (running) { + auto records = consumer.poll(std::chrono::milliseconds(100)); + for (const auto& record: records) { + // In this example, quit on empty message + if (record.value().size() == 0) { + running = false; + break; + } + + if (!record.error()) { + std::cout << "% Got a new message..." << std::endl; + std::cout << " Topic : " << record.topic() << std::endl; + std::cout << " Partition: " << record.partition() << std::endl; + std::cout << " Offset : " << record.offset() << std::endl; + std::cout << " Timestamp: " << record.timestamp().toString() << std::endl; + std::cout << " Headers : " << kafka::toString(record.headers()) << std::endl; + std::cout << " Key [" << record.key().toString() << "]" << std::endl; + std::cout << " Value [" << record.value().toString() << "]" << std::endl; + + allCommitted = false; + } else { + std::cerr << record.toString() << std::endl; + } + } + + if (!allCommitted) { + auto now = std::chrono::steady_clock::now(); + if (now - lastTimeCommitted > std::chrono::seconds(1)) { + // Commit offsets for messages polled + std::cout << "% syncCommit offsets: " << kafka::utility::getCurrentTime() << std::endl; + consumer.commitSync(); // or commitAsync() + + lastTimeCommitted = now; + allCommitted = true; + } + } + } + + // consumer.close(); // No explicit close is needed, RAII will take care of it + + } catch (const kafka::KafkaException& e) { + std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; + } +} + diff --git a/modern-cpp-kafka/examples/kafka_sync_producer.cc b/modern-cpp-kafka/examples/kafka_sync_producer.cc new file mode 100644 index 00000000..6c4ca6f5 --- /dev/null +++ b/modern-cpp-kafka/examples/kafka_sync_producer.cc @@ -0,0 +1,55 @@ +#include "kafka/KafkaProducer.h" + +#include +#include + +int main(int argc, char **argv) +{ + using namespace kafka::clients; + + if (argc != 3) { + std::cerr << "Usage: " << argv[0] << " \n"; + exit(argc == 1 ? 0 : 1); // NOLINT + } + + std::string brokers = argv[1]; + kafka::Topic topic = argv[2]; + + try { + + // Create configuration object + kafka::Properties props({ + {"bootstrap.servers", brokers}, + {"enable.idempotence", "true"}, + }); + + // Create a producer instance. + KafkaProducer producer(props); + + // Read messages from stdin and produce to the broker. + std::cout << "% Type message value and hit enter to produce message. (empty line to quit)" << std::endl; + + for (std::string line; std::getline(std::cin, line);) { + // The ProducerRecord doesn't own `line`, it is just a thin wrapper + auto record = producer::ProducerRecord(topic, + kafka::NullKey, + kafka::Value(line.c_str(), line.size())); + + // Send the message. + try { + producer::RecordMetadata metadata = producer.syncSend(record); + std::cout << "% Message delivered: " << metadata.toString() << std::endl; + } catch (const kafka::KafkaException& e) { + std::cerr << "% Message delivery failed: " << e.error().message() << std::endl; + } + + if (line.empty()) break; + }; + + // producer.close(); // No explicit close is needed, RAII will take care of it + + } catch (const kafka::KafkaException& e) { + std::cerr << "% Unexpected exception caught: " << e.what() << std::endl; + } +} + diff --git a/modern-cpp-kafka/include/CMakeLists.txt b/modern-cpp-kafka/include/CMakeLists.txt new file mode 100644 index 00000000..0ab2783e --- /dev/null +++ b/modern-cpp-kafka/include/CMakeLists.txt @@ -0,0 +1,30 @@ +project(modern-cpp-kafka-api) + +add_library(${PROJECT_NAME} INTERFACE) + +target_include_directories(${PROJECT_NAME} INTERFACE ${CMAKE_CURRENT_LIST_DIR}) + +#--------------------------- +# librdkafka +#--------------------------- +target_include_directories(${PROJECT_NAME} SYSTEM INTERFACE ${LIBRDKAFKA_INCLUDE_DIR}) +target_link_directories(${PROJECT_NAME} INTERFACE ${LIBRDKAFKA_LIBRARY_DIR}) +target_link_libraries(${PROJECT_NAME} INTERFACE rdkafka) + +#--------------------------- +# pthread +#--------------------------- +if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + target_link_libraries(${PROJECT_NAME} INTERFACE pthread) +endif () + +#--------------------------- +# sasl (if required) +#--------------------------- +if (SASL_LIBRARY) + target_link_directories(${PROJECT_NAME} INTERFACE ${SASL_LIBRARYDIR}) + target_link_libraries(${PROJECT_NAME} INTERFACE ${SASL_LIBRARY}) +endif () + +# Header-only +install(DIRECTORY kafka DESTINATION include) diff --git a/modern-cpp-kafka/include/kafka/AdminClient.h b/modern-cpp-kafka/include/kafka/AdminClient.h new file mode 100644 index 00000000..d3ef9f3c --- /dev/null +++ b/modern-cpp-kafka/include/kafka/AdminClient.h @@ -0,0 +1,345 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + + +namespace KAFKA_API { namespace clients { + +/** + * The administrative client for Kafka, which supports managing and inspecting topics, etc. + */ +class AdminClient: public KafkaClient +{ +public: + explicit AdminClient(const Properties& properties) + : KafkaClient(ClientType::AdminClient, + KafkaClient::validateAndReformProperties(properties)) + { + } + + /** + * Create a batch of new topics. + */ + admin::CreateTopicsResult createTopics(const Topics& topics, + int numPartitions, + int replicationFactor, + const Properties& topicConfig = Properties(), + std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_COMMAND_TIMEOUT_MS)); + /** + * Delete a batch of topics. + */ + admin::DeleteTopicsResult deleteTopics(const Topics& topics, + std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_COMMAND_TIMEOUT_MS)); + /** + * List the topics available in the cluster. + */ + admin::ListTopicsResult listTopics(std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_COMMAND_TIMEOUT_MS)); + + /** + * Delete records whose offset is smaller than the given offset of the corresponding partition. + * @param topicPartitionOffsets a batch of offsets for partitions + * @param timeout + * @return + */ + admin::DeleteRecordsResult deleteRecords(const TopicPartitionOffsets& topicPartitionOffsets, + std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_COMMAND_TIMEOUT_MS)); + +private: + static std::list getPerTopicResults(const rd_kafka_topic_result_t** topicResults, std::size_t topicCount); + static std::list getPerTopicPartitionResults(const rd_kafka_topic_partition_list_t* partitionResults); + static Error combineErrors(const std::list& errors); + +#if COMPILER_SUPPORTS_CPP_17 + static constexpr int DEFAULT_COMMAND_TIMEOUT_MS = 30000; +#else + enum { DEFAULT_COMMAND_TIMEOUT_MS = 30000 }; +#endif +}; + + +inline std::list +AdminClient::getPerTopicResults(const rd_kafka_topic_result_t** topicResults, std::size_t topicCount) +{ + std::list errors; + + for (std::size_t i = 0; i < topicCount; ++i) + { + const rd_kafka_topic_result_t* topicResult = topicResults[i]; + if (rd_kafka_resp_err_t topicError = rd_kafka_topic_result_error(topicResult)) + { + std::string detailedMsg = "topic[" + std::string(rd_kafka_topic_result_name(topicResult)) + "] with error[" + rd_kafka_topic_result_error_string(topicResult) + "]"; + errors.emplace_back(topicError, detailedMsg); + } + } + return errors; +} + +inline std::list +AdminClient::getPerTopicPartitionResults(const rd_kafka_topic_partition_list_t* partitionResults) +{ + std::list errors; + + for (int i = 0; i < (partitionResults ? partitionResults->cnt : 0); ++i) + { + if (rd_kafka_resp_err_t partitionError = partitionResults->elems[i].err) + { + std::string detailedMsg = "topic-partition[" + std::string(partitionResults->elems[i].topic) + "-" + std::to_string(partitionResults->elems[i].partition) + "] with error[" + rd_kafka_err2str(partitionError) + "]"; + errors.emplace_back(partitionError, detailedMsg); + } + } + return errors; +} + +inline Error +AdminClient::combineErrors(const std::list& errors) +{ + if (!errors.empty()) + { + std::string detailedMsg; + std::for_each(errors.cbegin(), errors.cend(), + [&detailedMsg](const auto& error) { + if (!detailedMsg.empty()) detailedMsg += "; "; + + detailedMsg += error.message(); + }); + + return Error{static_cast(errors.front().value()), detailedMsg}; + } + + return Error{RD_KAFKA_RESP_ERR_NO_ERROR, "Success"}; +} + +inline admin::CreateTopicsResult +AdminClient::createTopics(const Topics& topics, + int numPartitions, + int replicationFactor, + const Properties& topicConfig, + std::chrono::milliseconds timeout) +{ + LogBuffer<500> errInfo; + + std::vector rkNewTopics; + + for (const auto& topic: topics) + { + rkNewTopics.emplace_back(rd_kafka_NewTopic_new(topic.c_str(), numPartitions, replicationFactor, errInfo.str(), errInfo.capacity())); + if (!rkNewTopics.back()) + { + return admin::CreateTopicsResult(Error{RD_KAFKA_RESP_ERR__INVALID_ARG, rd_kafka_err2str(RD_KAFKA_RESP_ERR__INVALID_ARG)}); + } + + for (const auto& conf: topicConfig.map()) + { + rd_kafka_resp_err_t err = rd_kafka_NewTopic_set_config(rkNewTopics.back().get(), conf.first.c_str(), conf.second.c_str()); + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + { + std::string errMsg = "Invalid config[" + conf.first + "=" + conf.second + "]"; + KAFKA_API_DO_LOG(Log::Level::Err, errMsg.c_str()); + return admin::CreateTopicsResult(Error{RD_KAFKA_RESP_ERR__INVALID_ARG, errMsg}); + } + } + } + + std::vector rk_topics; + rk_topics.reserve(rkNewTopics.size()); + for (const auto& topic : rkNewTopics) { rk_topics.emplace_back(topic.get()); } + + auto rk_queue = rd_kafka_queue_unique_ptr(rd_kafka_queue_new(getClientHandle())); + + rd_kafka_CreateTopics(getClientHandle(), rk_topics.data(), rk_topics.size(), nullptr, rk_queue.get()); + + auto rk_ev = rd_kafka_event_unique_ptr(); + + const auto end = std::chrono::steady_clock::now() + timeout; + do + { + rk_ev.reset(rd_kafka_queue_poll(rk_queue.get(), EVENT_POLLING_INTERVAL_MS)); + + if (rd_kafka_event_type(rk_ev.get()) == RD_KAFKA_EVENT_CREATETOPICS_RESULT) break; + + if (rk_ev) + { + KAFKA_API_DO_LOG(Log::Level::Err, "rd_kafka_queue_poll got event[%s], with error[%s]", rd_kafka_event_name(rk_ev.get()), rd_kafka_event_error_string(rk_ev.get())); + rk_ev.reset(); + } + } while (std::chrono::steady_clock::now() < end); + + if (!rk_ev) + { + return admin::CreateTopicsResult(Error{RD_KAFKA_RESP_ERR__TIMED_OUT, "No response within the time limit"}); + } + + std::list errors; + + if (rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get())) + { + errors.emplace_back(respErr, rd_kafka_event_error_string(rk_ev.get())); + } + + // Fetch per-topic results + const rd_kafka_CreateTopics_result_t* res = rd_kafka_event_CreateTopics_result(rk_ev.get()); + std::size_t res_topic_cnt{}; + const rd_kafka_topic_result_t** res_topics = rd_kafka_CreateTopics_result_topics(res, &res_topic_cnt); + + errors.splice(errors.end(), getPerTopicResults(res_topics, res_topic_cnt)); + + // Return the error if any + if (!errors.empty()) + { + return admin::CreateTopicsResult{combineErrors(errors)}; + } + + // Update metedata + do + { + auto listResult = listTopics(); + if (!listResult.error) + { + return admin::CreateTopicsResult(Error{RD_KAFKA_RESP_ERR_NO_ERROR, "Success"}); + } + } while (std::chrono::steady_clock::now() < end); + + return admin::CreateTopicsResult(Error{RD_KAFKA_RESP_ERR__TIMED_OUT, "Updating metadata timed out"}); +} + +inline admin::DeleteTopicsResult +AdminClient::deleteTopics(const Topics& topics, std::chrono::milliseconds timeout) +{ + std::vector rkDeleteTopics; + + for (const auto& topic: topics) + { + rkDeleteTopics.emplace_back(rd_kafka_DeleteTopic_new(topic.c_str())); + assert(rkDeleteTopics.back()); + } + + std::vector rk_topics; + rk_topics.reserve(rkDeleteTopics.size()); + for (const auto& topic : rkDeleteTopics) { rk_topics.emplace_back(topic.get()); } + + auto rk_queue = rd_kafka_queue_unique_ptr(rd_kafka_queue_new(getClientHandle())); + + rd_kafka_DeleteTopics(getClientHandle(), rk_topics.data(), rk_topics.size(), nullptr, rk_queue.get()); + + auto rk_ev = rd_kafka_event_unique_ptr(); + + const auto end = std::chrono::steady_clock::now() + timeout; + do + { + rk_ev.reset(rd_kafka_queue_poll(rk_queue.get(), EVENT_POLLING_INTERVAL_MS)); + + if (rd_kafka_event_type(rk_ev.get()) == RD_KAFKA_EVENT_DELETETOPICS_RESULT) break; + + if (rk_ev) + { + KAFKA_API_DO_LOG(Log::Level::Err, "rd_kafka_queue_poll got event[%s], with error[%s]", rd_kafka_event_name(rk_ev.get()), rd_kafka_event_error_string(rk_ev.get())); + rk_ev.reset(); + } + } while (std::chrono::steady_clock::now() < end); + + if (!rk_ev) + { + return admin::DeleteTopicsResult(Error{RD_KAFKA_RESP_ERR__TIMED_OUT, "No response within the time limit"}); + } + + std::list errors; + + if (rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get())) + { + errors.emplace_back(respErr, rd_kafka_event_error_string(rk_ev.get())); + } + + // Fetch per-topic results + const rd_kafka_DeleteTopics_result_t* res = rd_kafka_event_DeleteTopics_result(rk_ev.get()); + std::size_t res_topic_cnt{}; + const rd_kafka_topic_result_t** res_topics = rd_kafka_DeleteTopics_result_topics(res, &res_topic_cnt); + + errors.splice(errors.end(), getPerTopicResults(res_topics, res_topic_cnt)); + + return admin::DeleteTopicsResult(combineErrors(errors)); +} + +inline admin::ListTopicsResult +AdminClient::listTopics(std::chrono::milliseconds timeout) +{ + const rd_kafka_metadata_t* rk_metadata = nullptr; + rd_kafka_resp_err_t err = rd_kafka_metadata(getClientHandle(), true, nullptr, &rk_metadata, convertMsDurationToInt(timeout)); + auto guard = rd_kafka_metadata_unique_ptr(rk_metadata); + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + { + return admin::ListTopicsResult(Error{err, rd_kafka_err2str(err)}); + } + + Topics names; + for (int i = 0; i < rk_metadata->topic_cnt; ++i) + { + names.insert(rk_metadata->topics[i].topic); + } + return admin::ListTopicsResult(names); +} + +inline admin::DeleteRecordsResult +AdminClient::deleteRecords(const TopicPartitionOffsets& topicPartitionOffsets, + std::chrono::milliseconds timeout) +{ + auto rk_queue = rd_kafka_queue_unique_ptr(rd_kafka_queue_new(getClientHandle())); + + rd_kafka_DeleteRecords_unique_ptr rkDeleteRecords(rd_kafka_DeleteRecords_new(createRkTopicPartitionList(topicPartitionOffsets))); + std::array rk_del_records{rkDeleteRecords.get()}; + + rd_kafka_DeleteRecords(getClientHandle(), rk_del_records.data(), rk_del_records.size(), nullptr, rk_queue.get()); + + auto rk_ev = rd_kafka_event_unique_ptr(); + + const auto end = std::chrono::steady_clock::now() + timeout; + do + { + rk_ev.reset(rd_kafka_queue_poll(rk_queue.get(), EVENT_POLLING_INTERVAL_MS)); + + if (rd_kafka_event_type(rk_ev.get()) == RD_KAFKA_EVENT_DELETERECORDS_RESULT) break; + + if (rk_ev) + { + KAFKA_API_DO_LOG(Log::Level::Err, "rd_kafka_queue_poll got event[%s], with error[%s]", rd_kafka_event_name(rk_ev.get()), rd_kafka_event_error_string(rk_ev.get())); + rk_ev.reset(); + } + } while (std::chrono::steady_clock::now() < end); + + if (!rk_ev) + { + return admin::DeleteRecordsResult(Error{RD_KAFKA_RESP_ERR__TIMED_OUT, "No response within the time limit"}); + } + + std::list errors; + + if (rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get())) + { + errors.emplace_back(respErr, rd_kafka_event_error_string(rk_ev.get())); + } + + const rd_kafka_DeleteRecords_result_t* res = rd_kafka_event_DeleteRecords_result(rk_ev.get()); + const rd_kafka_topic_partition_list_t* res_offsets = rd_kafka_DeleteRecords_result_offsets(res); + + errors.splice(errors.end(), getPerTopicPartitionResults(res_offsets)); + + return admin::DeleteRecordsResult(combineErrors(errors)); +} + +} } // end of KAFKA_API::clients + diff --git a/modern-cpp-kafka/include/kafka/AdminClientConfig.h b/modern-cpp-kafka/include/kafka/AdminClientConfig.h new file mode 100644 index 00000000..2bd6977e --- /dev/null +++ b/modern-cpp-kafka/include/kafka/AdminClientConfig.h @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include + + +namespace KAFKA_API { namespace clients { namespace admin { + +/** + * Configuration for the Kafka Consumer. + */ +class Config: public Properties +{ +public: + Config() = default; + Config(const Config&) = default; + explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} + + /** + * The string contains host:port pairs of brokers (splitted by ",") that the administrative client will use to establish initial connection to the Kafka cluster. + * Note: It's mandatory. + */ + static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; + + /** + * Protocol used to communicate with brokers. + * Default value: plaintext + */ + static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; + + /** + * Shell command to refresh or acquire the client's Kerberos ticket. + */ + static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; + + /** + * The client's Kerberos principal name. + */ + static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; +}; + +} } } // end of KAFKA_API::clients::admin + diff --git a/modern-cpp-kafka/include/kafka/AdminCommon.h b/modern-cpp-kafka/include/kafka/AdminCommon.h new file mode 100644 index 00000000..f821d904 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/AdminCommon.h @@ -0,0 +1,70 @@ +#pragma once + +#include + +#include +#include + + +namespace KAFKA_API { namespace clients { namespace admin { + +/** + * The result of AdminClient::createTopics(). + */ +struct CreateTopicsResult +{ + explicit CreateTopicsResult(const Error& err): error(err) {} + + /** + * The result error. + */ + Error error; +}; + +/** + * The result of AdminClient::deleteTopics(). + */ +struct DeleteTopicsResult +{ + explicit DeleteTopicsResult(const Error& err): error(err) {} + + /** + * The result error. + */ + Error error; +}; + +/** + * The result of AdminClient::deleteRecords(). + */ +struct DeleteRecordsResult +{ + explicit DeleteRecordsResult(const Error& err): error(err) {} + + /** + * The result error. + */ + Error error; +}; + +/** + * The result of AdminClient::listTopics(). + */ +struct ListTopicsResult +{ + explicit ListTopicsResult(const Error& err): error(err) {} + explicit ListTopicsResult(Topics names): topics(std::move(names)) {} + + /** + * The result error. + */ + Error error; + + /** + * The topics fetched. + */ + Topics topics; +}; + +} } } // end of KAFKA_API::clients::admin + diff --git a/modern-cpp-kafka/include/kafka/BrokerMetadata.h b/modern-cpp-kafka/include/kafka/BrokerMetadata.h new file mode 100644 index 00000000..e0f86bce --- /dev/null +++ b/modern-cpp-kafka/include/kafka/BrokerMetadata.h @@ -0,0 +1,187 @@ +#pragma once + +#include + +#include +#include + +#include + +#include +#include + + +namespace KAFKA_API { + +/** + * The metadata info for a topic. + */ +struct BrokerMetadata { + /** + * Information for a Kafka node. + */ + struct Node + { + public: + using Id = int; + using Host = std::string; + using Port = int; + + Node(Id i, Host h, Port p): id(i), host(std::move(h)), port(p) {} + + /** + * The node id. + */ + Node::Id id; + + /** + * The host name. + */ + Node::Host host; + + /** + * The port. + */ + Node::Port port; + + /** + * Obtains explanatory string. + */ + std::string toString() const { return host + ":" + std::to_string(port) + "/" + std::to_string(id); } + }; + + /** + * It is used to describe per-partition state in the MetadataResponse. + */ + struct PartitionInfo + { + explicit PartitionInfo(Node::Id leaderId): leader(leaderId) {} + + void addReplica(Node::Id id) { replicas.emplace_back(id); } + void addInSyncReplica(Node::Id id) { inSyncReplicas.emplace_back(id); } + + /** + * The node id currently acting as a leader for this partition or null if there is no leader. + */ + Node::Id leader; + + /** + * The complete set of replicas id for this partition regardless of whether they are alive or up-to-date. + */ + std::vector replicas; + + /** + * The subset of the replicas id that are in sync, that is caught-up to the leader and ready to take over as leader if the leader should fail. + */ + std::vector inSyncReplicas; + + }; + + /** + * Obtains explanatory string from Node::Id. + */ + std::string getNodeDescription(Node::Id id) const; + + /** + * Obtains explanatory string for PartitionInfo. + */ + std::string toString(const PartitionInfo& partitionInfo) const; + + /** + * The BrokerMetadata is per-topic constructed. + */ + explicit BrokerMetadata(Topic topic): _topic(std::move(topic)) {} + + /** + * The topic name. + */ + const std::string& topic() const { return _topic; } + + /** + * The nodes info in the MetadataResponse. + */ + std::vector> nodes() const; + + /** + * The partitions' state in the MetadataResponse. + */ + const std::map& partitions() const { return _partitions; } + + /** + * Obtains explanatory string. + */ + std::string toString() const; + + void setOrigNodeName(const std::string& origNodeName) { _origNodeName = origNodeName; } + void addNode(Node::Id nodeId, const Node::Host& host, Node::Port port) { _nodes[nodeId] = std::make_shared(nodeId, host, port); } + void addPartitionInfo(Partition partition, const PartitionInfo& partitionInfo) { _partitions.emplace(partition, partitionInfo); } + +private: + Topic _topic; + std::string _origNodeName; + std::map> _nodes; + std::map _partitions; +}; + +inline std::vector> +BrokerMetadata::nodes() const +{ + std::vector> ret; + for (const auto& nodeInfo: _nodes) + { + ret.emplace_back(nodeInfo.second); + } + return ret; +} + +inline std::string +BrokerMetadata::getNodeDescription(Node::Id id) const +{ + const auto& found = _nodes.find(id); + if (found == _nodes.cend()) return "-:-/" + std::to_string(id); + + auto node = found->second; + return node->host + ":" + std::to_string(node->port) + "/" + std::to_string(id); +} + +inline std::string +BrokerMetadata::toString(const PartitionInfo& partitionInfo) const +{ + std::ostringstream oss; + + auto streamNodes = [this](std::ostringstream& ss, const std::vector& nodeIds) -> std::ostringstream& { + bool isTheFirst = true; + for (const auto id: nodeIds) + { + ss << (isTheFirst ? (isTheFirst = false, "") : ", ") << getNodeDescription(id); + } + return ss; + }; + + oss << "leader[" << getNodeDescription(partitionInfo.leader) << "], replicas["; + streamNodes(oss, partitionInfo.replicas) << "], inSyncReplicas["; + streamNodes(oss, partitionInfo.inSyncReplicas) << "]"; + + return oss.str(); +} + +inline std::string +BrokerMetadata::toString() const +{ + std::ostringstream oss; + + oss << "originatingNode[" << _origNodeName << "], topic[" << _topic << "], partitions{"; + bool isTheFirst = true; + for (const auto& partitionInfoPair: _partitions) + { + const Partition partition = partitionInfoPair.first; + const PartitionInfo& partitionInfo = partitionInfoPair.second; + oss << (isTheFirst ? (isTheFirst = false, "") : "; ") << partition << ": " << toString(partitionInfo); + } + oss << "}"; + + return oss.str(); +} + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/ConsumerCommon.h b/modern-cpp-kafka/include/kafka/ConsumerCommon.h new file mode 100644 index 00000000..a1150c3a --- /dev/null +++ b/modern-cpp-kafka/include/kafka/ConsumerCommon.h @@ -0,0 +1,65 @@ +#pragma once + +#include + +#include +#include +#include + +#include + +#include + + +namespace KAFKA_API { namespace clients { namespace consumer { + + /** + * To identify which kind of re-balance event is handling, when the set of partitions assigned to the consumer changes. + * It's guaranteed that rebalance callback will be called twice (first with PartitionsRevoked, and then with PartitionsAssigned). + */ + enum class RebalanceEventType { PartitionsAssigned, PartitionsRevoked }; + + /** + * A callback interface that the user can implement to trigger custom actions when the set of partitions assigned to the consumer changes. + */ + using RebalanceCallback = std::function; + + /** + * Null RebalanceCallback + */ +#if COMPILER_SUPPORTS_CPP_17 + const inline RebalanceCallback NullRebalanceCallback = RebalanceCallback{}; +#else + const static RebalanceCallback NullRebalanceCallback = RebalanceCallback{}; +#endif + + /** + * A callback interface that the user can implement to trigger custom actions when a commit request completes. + */ + using OffsetCommitCallback = std::function; + + /** + * Null OffsetCommitCallback + */ +#if COMPILER_SUPPORTS_CPP_17 + const inline OffsetCommitCallback NullOffsetCommitCallback = OffsetCommitCallback{}; +#else + const static OffsetCommitCallback NullOffsetCommitCallback = OffsetCommitCallback{}; +#endif + + /** + * A metadata struct containing the consumer group information. + */ + class ConsumerGroupMetadata + { + public: + explicit ConsumerGroupMetadata(rd_kafka_consumer_group_metadata_t* p): _rkConsumerGroupMetadata(p) {} + + const rd_kafka_consumer_group_metadata_t* rawHandle() const { return _rkConsumerGroupMetadata.get(); } + + private: + rd_kafka_consumer_group_metadata_unique_ptr _rkConsumerGroupMetadata; + }; + +} } } // end of KAFKA_API::clients::consumer + diff --git a/modern-cpp-kafka/include/kafka/ConsumerConfig.h b/modern-cpp-kafka/include/kafka/ConsumerConfig.h new file mode 100644 index 00000000..95a7db1f --- /dev/null +++ b/modern-cpp-kafka/include/kafka/ConsumerConfig.h @@ -0,0 +1,115 @@ +#pragma once + +#include + +#include + + +namespace KAFKA_API { namespace clients { namespace consumer { + +/** + * Configuration for the Kafka Consumer. + */ +class Config: public Properties +{ +public: + Config() = default; + Config(const Config&) = default; + explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} + + /** + * The string contains host:port pairs of brokers (splitted by ",") that the consumer will use to establish initial connection to the Kafka cluster. + * Note: It's mandatory. + */ + static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; + + /** + * Group identifier. + * Note: It's better to configure it manually, otherwise a random one would be used for it. + * + */ + static const constexpr char* GROUP_ID = "group.id"; + + /** + * Client identifier. + */ + static const constexpr char* CLIENT_ID = "client.id"; + + /** + * Automatically commits previously polled offsets on each `poll` operation. + */ + static const constexpr char* ENABLE_AUTO_COMMIT = "enable.auto.commit"; + + /** + * This property controls the behavior of the consumer when it starts reading a partition for which it doesn't have a valid committed offset. + * The "latest" means the consumer will begin reading the newest records written after the consumer started. While "earliest" means that the consumer will read from the very beginning. + * Available options: latest, earliest + * Default value: latest + */ + static const constexpr char* AUTO_OFFSET_RESET = "auto.offset.reset"; + + /** + * Emit RD_KAFKA_RESP_ERR_PARTITION_EOF event whenever the consumer reaches the end of a partition. + * Default value: false + */ + static const constexpr char* ENABLE_PARTITION_EOF = "enable.partition.eof"; + + /** + * This controls the maximum number of records that a single call to poll() will return. + * Default value: 500 + */ + static const constexpr char* MAX_POLL_RECORDS = "max.poll.records"; + + /** + * Minimum number of messages per topic/partition tries to maintain in the local consumer queue. + * Note: With a larger value configured, the consumer would send FetchRequest towards brokers more frequently. + * Defalut value: 100000 + */ + static const constexpr char* QUEUED_MIN_MESSAGES = "queued.min.messages"; + + /** + * Client group session and failure detection timeout. + * If no heartbeat received by the broker within this timeout, the broker will remove the consumer and trigger a rebalance. + * Default value: 10000 + */ + static const constexpr char* SESSION_TIMEOUT_MS = "session.timeout.ms"; + + /** + * Timeout for network requests. + * Default value: 60000 + */ + static const constexpr char* SOCKET_TIMEOUT_MS = "socket.timeout.ms"; + + /** + * Control how to read messages written transactionally. + * Available options: read_uncommitted, read_committed + * Default value: read_committed + */ + static const constexpr char* ISOLATION_LEVEL = "isolation.level"; + + /* + * The name of one or more partition assignment strategies. + * The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. + * Available options: range, roundrobin, cooperative-sticky + * Default value: range,roundrobin + */ + static const constexpr char* PARTITION_ASSIGNMENT_STRATEGY = "partition.assignment.strategy"; + /** + * Protocol used to communicate with brokers. + * Default value: plaintext + */ + static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; + + /** + * Shell command to refresh or acquire the client's Kerberos ticket. + */ + static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; + + /** + * The client's Kerberos principal name. + */ + static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; +}; + +} } } // end of KAFKA_API::clients::consumer + diff --git a/modern-cpp-kafka/include/kafka/ConsumerRecord.h b/modern-cpp-kafka/include/kafka/ConsumerRecord.h new file mode 100644 index 00000000..e78f6030 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/ConsumerRecord.h @@ -0,0 +1,154 @@ +#pragma once + +#include + +#include +#include +#include +#include + +#include + +#include + + +namespace KAFKA_API { namespace clients { namespace consumer { + +/** + * A key/value pair to be received from Kafka. + * This also consists of a topic name and a partition number from which the record is being received, an offset that points to the record in a Kafka partition + */ +class ConsumerRecord +{ +public: + // ConsumerRecord will take the ownership of msg (rd_kafka_message_t*) + explicit ConsumerRecord(rd_kafka_message_t* msg): _rk_msg(msg, rd_kafka_message_destroy) {} + + /** + * The topic this record is received from. + */ + Topic topic() const { return _rk_msg->rkt ? rd_kafka_topic_name(_rk_msg->rkt): ""; } + + /** + * The partition from which this record is received. + */ + Partition partition() const { return _rk_msg->partition; } + + /** + * The position of this record in the corresponding Kafka partition. + */ + Offset offset() const { return _rk_msg->offset; } + + /** + * The key (or null if no key is specified). + */ + Key key() const { return Key(_rk_msg->key, _rk_msg->key_len); } + + /** + * The value. + */ + Value value() const { return Value(_rk_msg->payload, _rk_msg->len); } + + /** + * The timestamp of the record. + */ + Timestamp timestamp() const + { + rd_kafka_timestamp_type_t tstype{}; + Timestamp::Value tsValue = rd_kafka_message_timestamp(_rk_msg.get(), &tstype); + return {tsValue, tstype}; + } + + /** + * The headers of the record. + */ + Headers headers() const; + + /** + * Return just one (the very last) header's value for the given key. + */ + Header::Value lastHeaderValue(const Header::Key& key); + + /** + * The error. + * + * Possible cases: + * 1. Success + * - RD_KAFKA_RESP_ERR_NO_ERROR (0), -- got a message successfully + * - RD_KAFKA_RESP_ERR__PARTITION_EOF, -- reached the end of a partition (got no message) + * 2. Failure + * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + */ + Error error() const { return Error{_rk_msg->err}; } + + /** + * Obtains explanatory string. + */ + std::string toString() const; + +private: + using rd_kafka_message_shared_ptr = std::shared_ptr; + rd_kafka_message_shared_ptr _rk_msg; +}; + +inline Headers +ConsumerRecord::headers() const +{ + Headers headers; + + rd_kafka_headers_t* hdrs = nullptr; + if (rd_kafka_message_headers(_rk_msg.get(), &hdrs) != RD_KAFKA_RESP_ERR_NO_ERROR) + { + return headers; + } + + headers.reserve(rd_kafka_header_cnt(hdrs)); + + const char* name = nullptr; + const void* valuePtr = nullptr; + std::size_t valueSize = 0; + for (std::size_t i = 0; !rd_kafka_header_get_all(hdrs, i, &name, &valuePtr, &valueSize); i++) + { + headers.emplace_back(name, Header::Value(valuePtr, valueSize)); + } + + return headers; +} + +inline Header::Value +ConsumerRecord::lastHeaderValue(const Header::Key& key) +{ + rd_kafka_headers_t* hdrs = nullptr; + if (rd_kafka_message_headers(_rk_msg.get(), &hdrs) != RD_KAFKA_RESP_ERR_NO_ERROR) + { + return Header::Value(); + } + + const void* valuePtr = nullptr; + std::size_t valueSize = 0; + return (rd_kafka_header_get_last(hdrs, key.c_str(), &valuePtr, &valueSize) == RD_KAFKA_RESP_ERR_NO_ERROR) ? + Header::Value(valuePtr, valueSize) : Header::Value(); +} + +inline std::string +ConsumerRecord::toString() const +{ + std::ostringstream oss; + if (!error()) + { + oss << topic() << "-" << partition() << ":" << offset() << ", " << timestamp().toString() << ", " + << (key().size() ? (key().toString() + "/") : "") << value().toString(); + } + else if (error().value() == RD_KAFKA_RESP_ERR__PARTITION_EOF) + { + oss << "EOF[" << topic() << "-" << partition() << ":" << offset() << "]"; + } + else + { + oss << "ERROR[" << error().message() << ", " << topic() << "-" << partition() << ":" << offset() << "]"; + } + return oss.str(); +} + +} } } // end of KAFKA_API::clients::consumer + diff --git a/modern-cpp-kafka/include/kafka/Error.h b/modern-cpp-kafka/include/kafka/Error.h new file mode 100644 index 00000000..4212d1b1 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/Error.h @@ -0,0 +1,148 @@ +#pragma once + +#include + +#include + +#include + +#include +#include + + +namespace KAFKA_API { + +struct ErrorCategory: public std::error_category +{ + const char* name() const noexcept override { return "KafkaError"; } + std::string message(int ev) const override { return rd_kafka_err2str(static_cast(ev)); } + + template + struct Global { static ErrorCategory category; }; +}; + +template +ErrorCategory ErrorCategory::Global::category; + + +/** + * Unified error type. + */ +class Error +{ +public: + // The error with rich info + explicit Error(rd_kafka_error_t* error = nullptr): _rkError(error, RkErrorDeleter) {} + // The error with brief info + explicit Error(rd_kafka_resp_err_t respErr): _respErr(respErr) {} + // The error with detailed message + Error(rd_kafka_resp_err_t respErr, std::string message, bool fatal = false) + : _respErr(respErr), _message(std::move(message)), _isFatal(fatal) {} + // Copy constructor + Error(const Error& error) { *this = error; } + + // Assignment operator + Error& operator=(const Error& error) + { + if (this == &error) return *this; + + _rkError.reset(); + + _respErr = static_cast(error.value()); + _message = error._message; + _isFatal = error.isFatal(); + _txnRequiresAbort = error.transactionRequiresAbort(); + _isRetriable = error.isRetriable(); + + return *this; + } + + /** + * Check if the error is valid. + */ + explicit operator bool() const { return static_cast(value()); } + + /** + * Conversion to `std::error_code` + */ + explicit operator std::error_code() const + { + return {value(), ErrorCategory::Global<>::category}; + } + + /** + * Obtains the underlying error code value. + * + * Actually, it's the same as 'rd_kafka_resp_err_t', which is defined by librdkafka. + * 1. The negative values are for internal errors. + * 2. Non-negative values are for external errors. See the defination at, + * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + */ + int value() const + { + return static_cast(_rkError ? rd_kafka_error_code(_rkError.get()) : _respErr); + } + + /** + * Readable error string. + */ + std::string message() const + { + return _message ? *_message : + (_rkError ? rd_kafka_error_string(_rkError.get()) : rd_kafka_err2str(_respErr)); + } + + /** + * Detailed error string. + */ + std::string toString() const + { + std::ostringstream oss; + + oss << rd_kafka_err2str(static_cast(value())) << " [" << value() << "]" << (isFatal() ? " fatal" : ""); + if (transactionRequiresAbort()) oss << " | transaction-requires-abort"; + if (auto retriable = isRetriable()) oss << " | " << (*retriable ? "retriable" : "non-retriable"); + if (_message) oss << " | " << *_message; + + return oss.str(); + } + + /** + * Fatal error indicates that the client instance is no longer usable. + */ + bool isFatal() const + { + return _rkError ? rd_kafka_error_is_fatal(_rkError.get()) : _isFatal; + } + + /** + * Show whether the operation may be retried. + */ + Optional isRetriable() const + { + return _rkError ? rd_kafka_error_is_retriable(_rkError.get()) : _isRetriable; + } + + /** + * Show whether the error is an abortable transaction error. + * + * Note: + * 1. Only valid for transactional API. + * 2. If `true`, the producer must call `abortTransaction` and start a new transaction with `beginTransaction` to proceed with transactions. + */ + bool transactionRequiresAbort() const + { + return _rkError ? rd_kafka_error_txn_requires_abort(_rkError.get()) : false; + } + +private: + rd_kafka_error_shared_ptr _rkError; // For error with rich info + rd_kafka_resp_err_t _respErr{}; // For error with a simple response code + Optional _message; // Additional detailed message (if any) + bool _isFatal = false; + bool _txnRequiresAbort = false; + Optional _isRetriable; // Retriable flag (if any) +}; + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/Header.h b/modern-cpp-kafka/include/kafka/Header.h new file mode 100644 index 00000000..58cc6767 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/Header.h @@ -0,0 +1,65 @@ +#pragma once + +#include + +#include + +#include +#include +#include + + +namespace KAFKA_API { + +/** + * Message Header (with a key value pair) + */ +struct Header +{ + using Key = std::string; + using Value = ConstBuffer; + + Header() = default; + Header(Key k, Value v): key(std::move(k)), value(v) {} + + /** + * Obtains explanatory string. + */ + std::string toString() const + { + return key + ":" + value.toString(); + } + + Key key; + Value value; +}; + +/** + * Message Headers. + */ +using Headers = std::vector
; + +/** + * Null Headers. + */ +#if COMPILER_SUPPORTS_CPP_17 +const inline Headers NullHeaders = Headers{}; +#else +const static Headers NullHeaders = Headers{}; +#endif + +/** + * Obtains explanatory string for Headers. + */ +inline std::string toString(const Headers& headers) +{ + std::string ret; + std::for_each(headers.cbegin(), headers.cend(), + [&ret](const auto& header) { + ret.append(ret.empty() ? "" : ",").append(header.toString()); + }); + return ret; +} + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/KafkaClient.h b/modern-cpp-kafka/include/kafka/KafkaClient.h new file mode 100644 index 00000000..f6b07297 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/KafkaClient.h @@ -0,0 +1,626 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace KAFKA_API { namespace clients { + +/** + * The base class for Kafka clients. + */ +class KafkaClient +{ +public: + /** + * The option shows whether user wants to call `pollEvents()` manually to trigger internal callbacks. + */ + enum class EventsPollingOption { Manual, Auto }; + + virtual ~KafkaClient() = default; + + /** + * Get the client id. + */ + const std::string& clientId() const { return _clientId; } + + /** + * Get the client name (i.e. client type + id). + */ + const std::string& name() const { return _clientName; } + + /** + * Set a log callback for kafka clients, which do not have a client specific logging callback configured (see `setLogger`). + */ + static void setGlobalLogger(Logger logger = NullLogger) + { + std::call_once(Global<>::initOnce, [](){}); // Then no need to init within KafkaClient constructor + Global<>::logger = std::move(logger); + } + + /** + * Set the log callback for the kafka client (it's a per-client setting). + */ + void setLogger(Logger logger) { _logger = std::move(logger); } + + /** + * Set log level for the kafka client (the default value: 5). + */ + void setLogLevel(int level); + + /** + * Callback type for statistics info dumping. + */ + using StatsCallback = std::function; + + /** + * Set callback to receive the periodic statistics info. + * Note: 1) It only works while the "statistics.interval.ms" property is configured with a non-0 value. + * 2) The callback would be triggered periodically, receiving the internal statistics info (with JSON format) emited from librdkafka. + */ + void setStatsCallback(StatsCallback cb) { _statsCb = std::move(cb); } + + /** + * Callback type for error notification. + */ + using ErrorCallback = std::function; + + /** + * Set callback for error notification. + */ + void setErrorCallback(ErrorCallback cb) { _errorCb = std::move(cb); } + + /** + * Return the properties which took effect. + */ + const Properties& properties() const { return _properties; } + + /** + * Fetch the effected property (including the property internally set by librdkafka). + */ + Optional getProperty(const std::string& name) const; + + /** + * Call the OffsetCommit callbacks (if any) + * Note: The Kafka client should be constructed with option `EventsPollingOption::Manual`. + */ + void pollEvents(std::chrono::milliseconds timeout) + { + _pollable->poll(convertMsDurationToInt(timeout)); + } + + /** + * Fetch matadata from a available broker. + * Note: the Metadata response information may trigger a re-join if any subscribed topic has changed partition count or existence state. + */ + Optional fetchBrokerMetadata(const std::string& topic, + std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_METADATA_TIMEOUT_MS), + bool disableErrorLogging = false); + + template + void doLog(int level, const char* filename, int lineno, const char* format, Args... args) const + { + const auto& logger = _logger ? _logger : Global<>::logger; + if (level >= 0 && level <= _logLevel && logger) + { + LogBuffer logBuffer; + logBuffer.print("%s ", name().c_str()).print(format, args...); + logger(level, filename, lineno, logBuffer.c_str()); + } + } + + void doLog(int level, const char* filename, int lineno, const char* msg) const + { + doLog(level, filename, lineno, "%s", msg); + } + +#define KAFKA_API_DO_LOG(lvl, ...) doLog(lvl, __FILE__, __LINE__, ##__VA_ARGS__) + + template + static void doGlobalLog(int level, const char* filename, int lineno, const char* format, Args... args) + { + if (!Global<>::logger) return; + + LogBuffer logBuffer; + logBuffer.print(format, args...); + Global<>::logger(level, filename, lineno, logBuffer.c_str()); + } + static void doGlobalLog(int level, const char* filename, int lineno, const char* msg) + { + doGlobalLog(level, filename, lineno, "%s", msg); + } + +/** + * Log for kafka clients, with the callback which `setGlobalLogger` assigned. + * + * E.g, + * KAFKA_API_LOG(Log::Level::Err, "something wrong happened! %s", detailedInfo.c_str()); + */ +#define KAFKA_API_LOG(lvl, ...) KafkaClient::doGlobalLog(lvl, __FILE__, __LINE__, ##__VA_ARGS__) + +#if COMPILER_SUPPORTS_CPP_17 + static constexpr int DEFAULT_METADATA_TIMEOUT_MS = 10000; +#else + enum { DEFAULT_METADATA_TIMEOUT_MS = 10000 }; +#endif + +protected: + // There're 3 derived classes: KafkaConsumer, KafkaProducer, AdminClient + enum class ClientType { KafkaConsumer, KafkaProducer, AdminClient }; + + using ConfigCallbacksRegister = std::function; + + KafkaClient(ClientType clientType, + const Properties& properties, + const ConfigCallbacksRegister& extraConfigRegister = ConfigCallbacksRegister{}, + EventsPollingOption eventsPollingOption = EventsPollingOption::Auto); + + rd_kafka_t* getClientHandle() const { return _rk.get(); } + + static const KafkaClient& kafkaClient(const rd_kafka_t* rk) { return *static_cast(rd_kafka_opaque(rk)); } + static KafkaClient& kafkaClient(rd_kafka_t* rk) { return *static_cast(rd_kafka_opaque(rk)); } + + static constexpr int TIMEOUT_INFINITE = -1; + + static int convertMsDurationToInt(std::chrono::milliseconds ms) + { + return ms > std::chrono::milliseconds(INT_MAX) ? TIMEOUT_INFINITE : static_cast(ms.count()); + } + + // Show whether it's using automatical events polling + bool isWithAutoEventsPolling() const { return _eventsPollingOption == EventsPollingOption::Auto; } + + // Buffer size for single line logging + static const constexpr int LOG_BUFFER_SIZE = 1024; + + // Global logger + template + struct Global + { + static Logger logger; + static std::once_flag initOnce; + }; + + // Validate properties (and fix it if necesary) + static Properties validateAndReformProperties(const Properties& properties); + + // To avoid double-close + bool _opened = false; + + // Accepted properties + Properties _properties; + +#if COMPILER_SUPPORTS_CPP_17 + static constexpr int EVENT_POLLING_INTERVAL_MS = 100; +#else + enum { EVENT_POLLING_INTERVAL_MS = 100 }; +#endif + +private: + std::string _clientId; + std::string _clientName; + std::atomic _logLevel = {Log::Level::Notice}; + Logger _logger; + StatsCallback _statsCb; + ErrorCallback _errorCb; + rd_kafka_unique_ptr _rk; + EventsPollingOption _eventsPollingOption; + + static std::string getClientTypeString(ClientType type) + { + return (type == ClientType::KafkaConsumer ? "KafkaConsumer" + : (type == ClientType::KafkaProducer ? "KafkaProducer" : "AdminClient")); + } + + // Log callback (for librdkafka) + static void logCallback(const rd_kafka_t* rk, int level, const char* fac, const char* buf); + + // Statistics callback (for librdkafka) + static int statsCallback(rd_kafka_t* rk, char* jsonStrBuf, size_t jsonStrLen, void* opaque); + + // Error callback (for librdkafka) + static void errorCallback(rd_kafka_t* rk, int err, const char* reason, void* opaque); + + // Log callback (for class instance) + void onLog(int level, const char* fac, const char* buf) const; + + // Stats callback (for class instance) + void onStats(const std::string& jsonString); + + // Error callback (for class instance) + void onError(const Error& error); + + static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; + static const constexpr char* CLIENT_ID = "client.id"; + static const constexpr char* LOG_LEVEL = "log_level"; + +protected: + struct Pollable + { + virtual ~Pollable() = default; + virtual void poll(int timeoutMs) = 0; + }; + + class PollableCallback: public Pollable + { + public: + using Callback = std::function; + + explicit PollableCallback(Callback cb): _cb(std::move(cb)) {} + + void poll(int timeoutMs) override { _cb(timeoutMs); } + + private: + const Callback _cb; + }; + + class PollThread + { + public: + explicit PollThread(Pollable& pollable) + : _running(true), _thread(keepPolling, std::ref(_running), std::ref(pollable)) + { + } + + ~PollThread() + { + _running = false; + + if (_thread.joinable()) _thread.join(); + } + + private: + static void keepPolling(std::atomic_bool& running, Pollable& pollable) + { + while (running.load()) + { + pollable.poll(CALLBACK_POLLING_INTERVAL_MS); + } + } + + static constexpr int CALLBACK_POLLING_INTERVAL_MS = 10; + + std::atomic_bool _running; + std::thread _thread; + }; + + void startBackgroundPollingIfNecessary(const PollableCallback::Callback& pollableCallback) + { + _pollable = std::make_unique(pollableCallback); + + if (isWithAutoEventsPolling()) _pollThread = std::make_unique(*_pollable); + } + + void stopBackgroundPollingIfNecessary() + { + _pollThread.reset(); // Join the polling thread (in case it's running) + + _pollable.reset(); + } + +private: + std::unique_ptr _pollable; + std::unique_ptr _pollThread; +}; + +template +Logger KafkaClient::Global::logger; + +template +std::once_flag KafkaClient::Global::initOnce; + +inline +KafkaClient::KafkaClient(ClientType clientType, + const Properties& properties, + const ConfigCallbacksRegister& extraConfigRegister, + EventsPollingOption eventsPollingOption) + : _eventsPollingOption(eventsPollingOption) +{ + static const std::set PRIVATE_PROPERTY_KEYS = { "max.poll.records" }; + + // Save clientID + if (auto clientId = properties.getProperty(CLIENT_ID)) + { + _clientId = *clientId; + _clientName = getClientTypeString(clientType) + "[" + _clientId + "]"; + } + + // Init global logger + std::call_once(Global<>::initOnce, [](){ Global<>::logger = DefaultLogger; }); + + // Save LogLevel + if (auto logLevel = properties.getProperty(LOG_LEVEL)) + { + try + { + _logLevel = std::stoi(*logLevel); + } + catch (const std::exception& e) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG, std::string("Invalid log_level[").append(*logLevel).append("], which must be an number!").append(e.what()))); + } + + if (_logLevel < Log::Level::Emerg || _logLevel > Log::Level::Debug) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG, std::string("Invalid log_level[").append(*logLevel).append("], which must be a value between 0 and 7!"))); + } + } + + LogBuffer errInfo; + + auto rk_conf = rd_kafka_conf_unique_ptr(rd_kafka_conf_new()); + + for (const auto& prop: properties.map()) + { + // Those private properties are only available for `C++ wrapper`, not for librdkafka + if (PRIVATE_PROPERTY_KEYS.count(prop.first)) + { + _properties.put(prop.first, prop.second); + continue; + } + + rd_kafka_conf_res_t result = rd_kafka_conf_set(rk_conf.get(), prop.first.c_str(), prop.second.c_str(), errInfo.str(), errInfo.capacity()); + if (result == RD_KAFKA_CONF_OK) + { + _properties.put(prop.first, prop.second); + } + else + { + KAFKA_API_DO_LOG(Log::Level::Err, "failed to be initialized with property[%s:%s], result[%d]", prop.first.c_str(), prop.second.c_str(), result); + } + } + + // Save KafkaClient's raw pointer to the "opaque" field, thus we could fetch it later (for kinds of callbacks) + rd_kafka_conf_set_opaque(rk_conf.get(), this); + + // Log Callback + rd_kafka_conf_set_log_cb(rk_conf.get(), KafkaClient::logCallback); + + // Statistics Callback + rd_kafka_conf_set_stats_cb(rk_conf.get(), KafkaClient::statsCallback); + + // Error Callback + rd_kafka_conf_set_error_cb(rk_conf.get(), KafkaClient::errorCallback); + + // Other Callbacks + if (extraConfigRegister) extraConfigRegister(rk_conf.get()); + + // Set client handler + _rk.reset(rd_kafka_new((clientType == ClientType::KafkaConsumer ? RD_KAFKA_CONSUMER : RD_KAFKA_PRODUCER), + rk_conf.release(), // rk_conf's ownship would be transferred to rk, after the "rd_kafka_new()" call + errInfo.clear().str(), + errInfo.capacity())); + KAFKA_THROW_IF_WITH_ERROR(Error(rd_kafka_last_error())); + + // Add brokers + auto brokers = properties.getProperty(BOOTSTRAP_SERVERS); + if (rd_kafka_brokers_add(getClientHandle(), brokers->c_str()) == 0) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG,\ + "No broker could be added successfully, BOOTSTRAP_SERVERS=[" + *brokers + "]")); + } + + _opened = true; +} + +inline Properties +KafkaClient::validateAndReformProperties(const Properties& properties) +{ + auto newProperties = properties; + + // BOOTSTRAP_SERVERS property is mandatory + if (!newProperties.getProperty(BOOTSTRAP_SERVERS)) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG,\ + "Validation failed! With no property [" + std::string(BOOTSTRAP_SERVERS) + "]")); + } + + // If no "client.id" configured, generate a random one for user + if (!newProperties.getProperty(CLIENT_ID)) + { + newProperties.put(CLIENT_ID, utility::getRandomString()); + } + + // If no "log_level" configured, use Log::Level::Notice as default + if (!newProperties.getProperty(LOG_LEVEL)) + { + newProperties.put(LOG_LEVEL, std::to_string(static_cast(Log::Level::Notice))); + } + + return newProperties; +} + +inline Optional +KafkaClient::getProperty(const std::string& name) const +{ + // Find it in pre-saved properties + if (auto property = _properties.getProperty(name)) return *property; + + constexpr int DEFAULT_BUF_SIZE = 512; + + const rd_kafka_conf_t* conf = rd_kafka_conf(getClientHandle()); + + std::vector valueBuf(DEFAULT_BUF_SIZE); + std::size_t valueSize = valueBuf.size(); + + // Try with a default buf size. If could not find the property, return immediately. + if (rd_kafka_conf_get(conf, name.c_str(), valueBuf.data(), &valueSize) != RD_KAFKA_CONF_OK) return Optional{}; + + // If the default buf size is not big enough, retry with a larger one + if (valueSize > valueBuf.size()) + { + valueBuf.resize(valueSize); + [[maybe_unused]] rd_kafka_conf_res_t result = rd_kafka_conf_get(conf, name.c_str(), valueBuf.data(), &valueSize); + assert(result == RD_KAFKA_CONF_OK); + } + + return std::string(valueBuf.data()); +} + +inline void +KafkaClient::setLogLevel(int level) +{ + _logLevel = level < Log::Level::Emerg ? Log::Level::Emerg : (level > Log::Level::Debug ? Log::Level::Debug : level); + rd_kafka_set_log_level(getClientHandle(), _logLevel); +} + +inline void +KafkaClient::onLog(int level, const char* fac, const char* buf) const +{ + doLog(level, "LIBRDKAFKA", 0, "%s | %s", fac, buf); // The log is coming from librdkafka +} + +inline void +KafkaClient::logCallback(const rd_kafka_t* rk, int level, const char* fac, const char* buf) +{ + kafkaClient(rk).onLog(level, fac, buf); +} + +inline void +KafkaClient::onStats(const std::string& jsonString) +{ + if (_statsCb) _statsCb(jsonString); +} + +inline int +KafkaClient::statsCallback(rd_kafka_t* rk, char* jsonStrBuf, size_t jsonStrLen, void* /*opaque*/) +{ + std::string stats(jsonStrBuf, jsonStrBuf+jsonStrLen); + kafkaClient(rk).onStats(stats); + return 0; +} + +inline void +KafkaClient::onError(const Error& error) +{ + if (_errorCb) _errorCb(error); +} + +inline void +KafkaClient::errorCallback(rd_kafka_t* rk, int err, const char* reason, void* /*opaque*/) +{ + auto respErr = static_cast(err); + + Error error; + if (respErr != RD_KAFKA_RESP_ERR__FATAL) + { + error = Error{respErr, reason}; + } + else + { + LogBuffer errInfo; + respErr = rd_kafka_fatal_error(rk, errInfo.str(), errInfo.capacity()); + error = Error{respErr, errInfo.c_str(), true}; + } + + kafkaClient(rk).onError(error); +} + +inline Optional +KafkaClient::fetchBrokerMetadata(const std::string& topic, std::chrono::milliseconds timeout, bool disableErrorLogging) +{ + const rd_kafka_metadata_t* rk_metadata = nullptr; + // Here the input parameter for `all_topics` is `true`, since we want the `cgrp_update` + rd_kafka_resp_err_t err = rd_kafka_metadata(getClientHandle(), true, nullptr, &rk_metadata, convertMsDurationToInt(timeout)); + + auto guard = rd_kafka_metadata_unique_ptr(rk_metadata); + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + { + if (!disableErrorLogging) + { + KAFKA_API_DO_LOG(Log::Level::Err, "failed to get BrokerMetadata! error[%s]", rd_kafka_err2str(err)); + } + return Optional{}; + } + + const rd_kafka_metadata_topic* metadata_topic = nullptr; + for (int i = 0; i < rk_metadata->topic_cnt; ++i) + { + if (rk_metadata->topics[i].topic == topic) + { + metadata_topic = &rk_metadata->topics[i]; + break; + } + } + + if (!metadata_topic || metadata_topic->err) + { + if (!disableErrorLogging) + { + if (!metadata_topic) + { + KAFKA_API_DO_LOG(Log::Level::Err, "failed to find BrokerMetadata for topic[%s]", topic.c_str()); + } + else + { + KAFKA_API_DO_LOG(Log::Level::Err, "failed to get BrokerMetadata for topic[%s]! error[%s]", topic.c_str(), rd_kafka_err2str(metadata_topic->err)); + } + } + return Optional{}; + } + + // Construct the BrokerMetadata + BrokerMetadata metadata(metadata_topic->topic); + metadata.setOrigNodeName(rk_metadata->orig_broker_name ? std::string(rk_metadata->orig_broker_name) : ""); + + for (int i = 0; i < rk_metadata->broker_cnt; ++i) + { + metadata.addNode(rk_metadata->brokers[i].id, rk_metadata->brokers[i].host, rk_metadata->brokers[i].port); + } + + for (int i = 0; i < metadata_topic->partition_cnt; ++i) + { + const rd_kafka_metadata_partition& metadata_partition = metadata_topic->partitions[i]; + + Partition partition = metadata_partition.id; + + if (metadata_partition.err != 0) + { + if (!disableErrorLogging) + { + KAFKA_API_DO_LOG(Log::Level::Err, "got error[%s] while constructing BrokerMetadata for topic[%s]-partition[%d]", rd_kafka_err2str(metadata_partition.err), topic.c_str(), partition); + } + + continue; + } + + BrokerMetadata::PartitionInfo partitionInfo(metadata_partition.leader); + + for (int j = 0; j < metadata_partition.replica_cnt; ++j) + { + partitionInfo.addReplica(metadata_partition.replicas[j]); + } + + for (int j = 0; j < metadata_partition.isr_cnt; ++j) + { + partitionInfo.addInSyncReplica(metadata_partition.isrs[j]); + } + + metadata.addPartitionInfo(partition, partitionInfo); + } + + return metadata; +} + + +} } // end of KAFKA_API::clients + diff --git a/modern-cpp-kafka/include/kafka/KafkaConsumer.h b/modern-cpp-kafka/include/kafka/KafkaConsumer.h new file mode 100644 index 00000000..9f580d08 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/KafkaConsumer.h @@ -0,0 +1,1051 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + + +namespace KAFKA_API { namespace clients { + +/** + * KafkaConsumer class. + */ +class KafkaConsumer: public KafkaClient +{ +public: + // Default value for property "max.poll.records" (which is same with Java API) + static const constexpr char* DEFAULT_MAX_POLL_RECORDS_VALUE = "500"; + + /** + * The constructor for KafkaConsumer. + * + * Options: + * - EventsPollingOption::Auto (default) : An internal thread would be started for OffsetCommit callbacks handling. + * - EventsPollingOption::Maunal : User have to call the member function `pollEvents()` to trigger OffsetCommit callbacks. + * + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__INVALID_ARG : Invalid BOOTSTRAP_SERVERS property + * - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE: Fail to create internal threads + */ + explicit KafkaConsumer(const Properties& properties, + EventsPollingOption eventsPollingOption = EventsPollingOption::Auto); + + /** + * The destructor for KafkaConsumer. + */ + ~KafkaConsumer() override { if (_opened) close(); } + + /** + * Close the consumer, waiting for any needed cleanup. + */ + void close(); + + /** + * To get group ID. + */ + std::string getGroupId() const { return _groupId; } + + /** + * To set group ID. The group ID is mandatory for a Consumer. + */ + void setGroupId(const std::string& id) { _groupId = id; } + + /** + * Subscribe to the given list of topics to get dynamically assigned partitions. + * An exception would be thrown if assign is called previously (without a subsequent call to unsubscribe()) + */ + void subscribe(const Topics& topics, + consumer::RebalanceCallback rebalanceCallback = consumer::NullRebalanceCallback, + std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_SUBSCRIBE_TIMEOUT_MS)); + /** + * Get the current subscription. + */ + Topics subscription() const; + + /** + * Unsubscribe from topics currently subscribed. + */ + void unsubscribe(std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_UNSUBSCRIBE_TIMEOUT_MS)); + + /** + * Manually assign a list of partitions to this consumer. + * An exception would be thrown if subscribe is called previously (without a subsequent call to unsubscribe()) + */ + void assign(const TopicPartitions& topicPartitions); + + /** + * Get the set of partitions currently assigned to this consumer. + */ + TopicPartitions assignment() const; + + // Seek & Position + /** + * Overrides the fetch offsets that the consumer will use on the next poll(timeout). + * If this API is invoked for the same partition more than once, the latest offset will be used on the next poll(). + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__TIMED_OUT: Operation timed out + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: Invalid partition + * - RD_KAFKA_RESP_ERR__STATE: Invalid broker state + */ + void seek(const TopicPartition& topicPartition, Offset offset, std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_SEEK_TIMEOUT_MS)); + + /** + * Seek to the first offset for each of the given partitions. + * This function evaluates lazily, seeking to the first offset in all partitions only when poll(long) or position(TopicPartition) are called. + * If no partitions are provided, seek to the first offset for all of the currently assigned partitions. + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__TIMED_OUT: Operation timed out + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: Invalid partition + * - RD_KAFKA_RESP_ERR__STATE: Invalid broker state + */ + void seekToBeginning(const TopicPartitions& topicPartitions, + std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_SEEK_TIMEOUT_MS)) { seekToBeginningOrEnd(topicPartitions, true, timeout); } + void seekToBeginning(std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_SEEK_TIMEOUT_MS)) { seekToBeginningOrEnd(_assignment, true, timeout); } + + /** + * Seek to the last offset for each of the given partitions. + * This function evaluates lazily, seeking to the final offset in all partitions only when poll(long) or position(TopicPartition) are called. + * If no partitions are provided, seek to the first offset for all of the currently assigned partitions. + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__TIMED_OUT: Operation timed out + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: Invalid partition + * - RD_KAFKA_RESP_ERR__STATE: Invalid broker state + */ + void seekToEnd(const TopicPartitions& topicPartitions, + std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_SEEK_TIMEOUT_MS)) { seekToBeginningOrEnd(topicPartitions, false, timeout); } + void seekToEnd(std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_SEEK_TIMEOUT_MS)) { seekToBeginningOrEnd(_assignment, false, timeout); } + + /** + * Get the offset of the next record that will be fetched (if a record with that offset exists). + */ + Offset position(const TopicPartition& topicPartition) const; + + /** + * Get the first offset for the given partitions. + * This method does not change the current consumer position of the partitions. + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__FAIL: Generic failure + */ + std::map beginningOffsets(const TopicPartitions& topicPartitions) const { return getOffsets(topicPartitions, true); } + + /** + * Get the last offset for the given partitions. The last offset of a partition is the offset of the upcoming message, i.e. the offset of the last available message + 1. + * This method does not change the current consumer position of the partitions. + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__FAIL: Generic failure + */ + std::map endOffsets(const TopicPartitions& topicPartitions) const { return getOffsets(topicPartitions, false); } + + /** + * Get the offsets for the given partitions by time-point. + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__TIMED_OUT: Not all offsets could be fetched in time. + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: All partitions are unknown. + * - RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE: Unable to query leaders from the given partitions. + */ + std::map offsetsForTime(const TopicPartitions& topicPartitions, + std::chrono::time_point timepoint, + std::chrono::milliseconds timeout = std::chrono::milliseconds(DEFAULT_QUERY_TIMEOUT_MS)) const; + + /** + * Commit offsets returned on the last poll() for all the subscribed list of topics and partitions. + */ + void commitSync(); + + /** + * Commit the specified offsets for the specified records + */ + void commitSync(const consumer::ConsumerRecord& record); + + /** + * Commit the specified offsets for the specified list of topics and partitions. + */ + void commitSync(const TopicPartitionOffsets& topicPartitionOffsets); + + /** + * Commit offsets returned on the last poll() for all the subscribed list of topics and partition. + * Note: If a callback is provided, it's guaranteed to be triggered (before closing the consumer). + */ + void commitAsync(const consumer::OffsetCommitCallback& offsetCommitCallback = consumer::NullOffsetCommitCallback); + + /** + * Commit the specified offsets for the specified records + * Note: If a callback is provided, it's guaranteed to be triggered (before closing the consumer). + */ + void commitAsync(const consumer::ConsumerRecord& record, const consumer::OffsetCommitCallback& offsetCommitCallback = consumer::NullOffsetCommitCallback); + + /** + * Commit the specified offsets for the specified list of topics and partitions to Kafka. + * Note: If a callback is provided, it's guaranteed to be triggered (before closing the consumer). + */ + void commitAsync(const TopicPartitionOffsets& topicPartitionOffsets, const consumer::OffsetCommitCallback& offsetCommitCallback = consumer::NullOffsetCommitCallback); + + /** + * Get the last committed offset for the given partition (whether the commit happened by this process or another).This offset will be used as the position for the consumer in the event of a failure. + * This call will block to do a remote call to get the latest committed offsets from the server. + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__INVALID_ARG: Invalid partition + */ + Offset committed(const TopicPartition& topicPartition); + + /** + * Fetch data for the topics or partitions specified using one of the subscribe/assign APIs. + * Returns the polled records. + * Note: 1) The result could be fetched through ConsumerRecord (with member function `error`). + * 2) Make sure the `ConsumerRecord` be destructed before the `KafkaConsumer.close()`. + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: Unknow partition + */ + std::vector poll(std::chrono::milliseconds timeout); + + /** + * Fetch data for the topics or partitions specified using one of the subscribe/assign APIs. + * Returns the number of polled records (which have been saved into parameter `output`). + * Note: 1) The result could be fetched through ConsumerRecord (with member function `error`). + * 2) Make sure the `ConsumerRecord` be destructed before the `KafkaConsumer.close()`. + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: Unknow partition + */ + std::size_t poll(std::chrono::milliseconds timeout, std::vector& output); + + /** + * Suspend fetching from the requested partitions. Future calls to poll() will not return any records from these partitions until they have been resumed using resume(). + * Note: 1) After pausing, the application still need to call `poll()` at regular intervals. + * 2) This method does not affect partition subscription/assignment (i.e, pause fetching from partitions would not trigger a rebalance, since the consumer is still alive). + * 3) If none of the provided partitions is assigned to this consumer, an exception would be thrown. + * Throws KafkaException with error: + * - RD_KAFKA_RESP_ERR__INVALID_ARG: Invalid arguments + */ + void pause(const TopicPartitions& topicPartitions); + + /** + * Suspend fetching from all assigned partitions. Future calls to poll() will not return any records until they have been resumed using resume(). + * Note: This method does not affect partition subscription/assignment. + */ + void pause(); + + /** + * Resume specified partitions which have been paused with pause(). New calls to poll() will return records from these partitions if there are any to be fetched. + * Note: If the partitions were not previously paused, this method is a no-op. + */ + void resume(const TopicPartitions& topicPartitions); + + /** + * Resume all partitions which have been paused with pause(). New calls to poll() will return records from these partitions if there are any to be fetched. + */ + void resume(); + + /** + * Return the current group metadata associated with this consumer. + */ + consumer::ConsumerGroupMetadata groupMetadata(); + +private: + static const constexpr char* ENABLE_AUTO_OFFSET_STORE = "enable.auto.offset.store"; + static const constexpr char* AUTO_COMMIT_INTERVAL_MS = "auto.commit.interval.ms"; + +#if COMPILER_SUPPORTS_CPP_17 + static constexpr int DEFAULT_SUBSCRIBE_TIMEOUT_MS = 30000; + static constexpr int DEFAULT_UNSUBSCRIBE_TIMEOUT_MS = 10000; + static constexpr int DEFAULT_QUERY_TIMEOUT_MS = 10000; + static constexpr int DEFAULT_SEEK_TIMEOUT_MS = 10000; + static constexpr int SEEK_RETRY_INTERVAL_MS = 5000; +#else + enum { DEFAULT_SUBSCRIBE_TIMEOUT_MS = 30000 }; + enum { DEFAULT_UNSUBSCRIBE_TIMEOUT_MS = 10000 }; + enum { DEFAULT_QUERY_TIMEOUT_MS = 10000 }; + enum { DEFAULT_SEEK_TIMEOUT_MS = 10000 }; + enum { SEEK_RETRY_INTERVAL_MS = 5000 }; +#endif + + enum class CommitType { Sync, Async }; + void commit(const TopicPartitionOffsets& topicPartitionOffsets, CommitType type); + + // Offset Commit Callback (for librdkafka) + static void offsetCommitCallback(rd_kafka_t* rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t* rk_tpos, void* opaque); + + // Validate properties (and fix it if necesary) + static Properties validateAndReformProperties(Properties properties); + + void commitStoredOffsetsIfNecessary(CommitType type); + void storeOffsetsIfNecessary(const std::vector& records); + + void seekToBeginningOrEnd(const TopicPartitions& topicPartitions, bool toBeginning, std::chrono::milliseconds timeout); + std::map getOffsets(const TopicPartitions& topicPartitions, bool atBeginning) const; + + enum class PartitionsRebalanceEvent { Assign, Revoke, IncrementalAssign, IncrementalUnassign }; + void changeAssignment(PartitionsRebalanceEvent event, const TopicPartitions& tps); + + std::string _groupId; + + std::size_t _maxPollRecords = 500; // From "max.poll.records" property, and here is the default for batch-poll + bool _enableAutoCommit = false; // From "enable.auto.commit" property + + rd_kafka_queue_unique_ptr _rk_queue; + + // Save assignment info (from "assign()" call or rebalance callback) locally, to accelerate seeking procedure + TopicPartitions _assignment; + // Assignment from user's input, -- by calling "assign()" + TopicPartitions _userAssignment; + // Subscription from user's input, -- by calling "subscribe()" + Topics _userSubscription; + + enum class PendingEvent { PartitionsAssignment, PartitionsRevocation }; + Optional _pendingEvent; + + // Identify whether the "partition.assignment.strategy" is "cooperative-sticky" + Optional _cooperativeEnabled; + bool isCooperativeEnabled() const { return _cooperativeEnabled && *_cooperativeEnabled; } + + // The offsets to store (and commit later) + std::map _offsetsToStore; + + // Register Callbacks for rd_kafka_conf_t + static void registerConfigCallbacks(rd_kafka_conf_t* conf); + + void pollMessages(int timeoutMs, std::vector& output); + + enum class PauseOrResumeOperation { Pause, Resume }; + void pauseOrResumePartitions(const TopicPartitions& topicPartitions, PauseOrResumeOperation op); + + // Rebalance Callback (for librdkafka) + static void rebalanceCallback(rd_kafka_t* rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t* partitions, void* opaque); + // Rebalance Callback (for class instance) + void onRebalance(rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t* rk_partitions); + + consumer::RebalanceCallback _rebalanceCb; + + rd_kafka_queue_t* getCommitCbQueue() { return _rk_commit_cb_queue.get(); } + + rd_kafka_queue_unique_ptr _rk_commit_cb_queue; + + void pollCallbacks(int timeoutMs) + { + rd_kafka_queue_t* queue = getCommitCbQueue(); + rd_kafka_queue_poll_callback(queue, timeoutMs); + } +}; + + +// Validate properties (and fix it if necesary) +inline Properties +KafkaConsumer::validateAndReformProperties(Properties properties) +{ + // Don't pass the "max.poll.records" property to librdkafka + properties.remove(consumer::Config::MAX_POLL_RECORDS); + + // Let the base class validate first + auto newProperties = KafkaClient::validateAndReformProperties(properties); + + // If no "group.id" configured, generate a random one for user + if (!newProperties.getProperty(consumer::Config::GROUP_ID)) + { + newProperties.put(consumer::Config::GROUP_ID, utility::getRandomString()); + } + + // Disable the internal auto-commit from librdkafka, since we want to customize the behavior + newProperties.put(consumer::Config::ENABLE_AUTO_COMMIT, "false"); + newProperties.put(AUTO_COMMIT_INTERVAL_MS, "0"); + newProperties.put(ENABLE_AUTO_OFFSET_STORE, "true"); + + return newProperties; +} + +// Register Callbacks for rd_kafka_conf_t +inline void +KafkaConsumer::registerConfigCallbacks(rd_kafka_conf_t* conf) +{ + // Rebalance Callback + // would turn off librdkafka's automatic partition assignment/revocation + rd_kafka_conf_set_rebalance_cb(conf, KafkaConsumer::rebalanceCallback); +} + +inline +KafkaConsumer::KafkaConsumer(const Properties &properties, EventsPollingOption eventsPollingOption) + : KafkaClient(ClientType::KafkaConsumer, + validateAndReformProperties(properties), + registerConfigCallbacks, + eventsPollingOption) +{ + // Pick up the "max.poll.records" property + if (auto maxPollRecordsProperty = properties.getProperty(consumer::Config::MAX_POLL_RECORDS)) + { + const std::string maxPollRecords = *maxPollRecordsProperty; + _maxPollRecords = static_cast(std::stoi(maxPollRecords)); + } + _properties.put(consumer::Config::MAX_POLL_RECORDS, std::to_string(_maxPollRecords)); + + // Pick up the "enable.auto.commit" property + if (auto enableAutoCommitProperty = properties.getProperty(consumer::Config::ENABLE_AUTO_COMMIT)) + { + const std::string enableAutoCommit = *enableAutoCommitProperty; + + auto isTrue = [](const std::string& str) { return str == "1" || str == "true"; }; + auto isFalse = [](const std::string& str) { return str == "0" || str == "false"; }; + + if (!isTrue(enableAutoCommit) && !isFalse(enableAutoCommit)) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG, std::string("Invalid property[enable.auto.commit=").append(enableAutoCommit).append("], which MUST be true(1) or false(0)!"))); + } + + _enableAutoCommit = isTrue(enableAutoCommit); + } + _properties.put(consumer::Config::ENABLE_AUTO_COMMIT, (_enableAutoCommit ? "true" : "false")); + + // Fetch groupId from reformed configuration + auto groupId = _properties.getProperty(consumer::Config::GROUP_ID); + assert(groupId); + setGroupId(*groupId); + + // Redirect the reply queue (to the client group queue) + Error result{ rd_kafka_poll_set_consumer(getClientHandle()) }; + KAFKA_THROW_IF_WITH_ERROR(result); + + // Initialize message-fetching queue + _rk_queue.reset(rd_kafka_queue_get_consumer(getClientHandle())); + + // Initialize commit-callback queue + _rk_commit_cb_queue.reset(rd_kafka_queue_new(getClientHandle())); + + // Start background polling (if needed) + startBackgroundPollingIfNecessary([this](int timeoutMs){ pollCallbacks(timeoutMs); }); + + const auto propsStr = KafkaClient::properties().toString(); + KAFKA_API_DO_LOG(Log::Level::Notice, "initialized with properties[%s]", propsStr.c_str()); +} + +inline void +KafkaConsumer::close() +{ + _opened = false; + + stopBackgroundPollingIfNecessary(); + + try + { + // Commit the offsets for these messages which had been polled last time (for `enable.auto.commit=true` case.) + commitStoredOffsetsIfNecessary(CommitType::Sync); + } + catch (const KafkaException& e) + { + KAFKA_API_DO_LOG(Log::Level::Err, "met error[%s] while closing", e.what()); + } + + rd_kafka_consumer_close(getClientHandle()); + + while (rd_kafka_outq_len(getClientHandle())) + { + rd_kafka_poll(getClientHandle(), KafkaClient::TIMEOUT_INFINITE); + } + + rd_kafka_queue_t* queue = getCommitCbQueue(); + while (rd_kafka_queue_length(queue)) + { + rd_kafka_queue_poll_callback(queue, TIMEOUT_INFINITE); + } + + KAFKA_API_DO_LOG(Log::Level::Notice, "closed"); +} + + +// Subscription +inline void +KafkaConsumer::subscribe(const Topics& topics, consumer::RebalanceCallback rebalanceCallback, std::chrono::milliseconds timeout) +{ + if (!_userAssignment.empty()) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__FAIL, "Unexpected Operation! Once assign() was used, subscribe() should not be called any more!")); + } + + if (isCooperativeEnabled() && topics == _userSubscription) + { + KAFKA_API_DO_LOG(Log::Level::Info, "skip subscribe (no change since last time)"); + return; + } + + _userSubscription = topics; + + std::string topicsStr = toString(topics); + KAFKA_API_DO_LOG(Log::Level::Info, "will subscribe, topics[%s]", topicsStr.c_str()); + + _rebalanceCb = std::move(rebalanceCallback); + + auto rk_topics = rd_kafka_topic_partition_list_unique_ptr(createRkTopicPartitionList(topics)); + + Error result{ rd_kafka_subscribe(getClientHandle(), rk_topics.get()) }; + KAFKA_THROW_IF_WITH_ERROR(result); + + _pendingEvent = PendingEvent::PartitionsAssignment; + + // The rebalcance callback would be served during the time (within this thread) + for (const auto end = std::chrono::steady_clock::now() + timeout; std::chrono::steady_clock::now() < end; ) + { + rd_kafka_poll(getClientHandle(), EVENT_POLLING_INTERVAL_MS); + + if (!_pendingEvent) + { + KAFKA_API_DO_LOG(Log::Level::Notice, "subscribed, topics[%s]", topicsStr.c_str()); + return; + } + } + + _pendingEvent.reset(); + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__TIMED_OUT, "subscribe() timed out!")); +} + +inline void +KafkaConsumer::unsubscribe(std::chrono::milliseconds timeout) +{ + if (_userSubscription.empty() && _userAssignment.empty()) + { + KAFKA_API_DO_LOG(Log::Level::Info, "skip unsubscribe (no assignment/subscription yet)"); + return; + } + + KAFKA_API_DO_LOG(Log::Level::Info, "will unsubscribe"); + + // While it's for the previous `assign(...)` + if (!_userAssignment.empty()) + { + changeAssignment(isCooperativeEnabled() ? PartitionsRebalanceEvent::IncrementalUnassign : PartitionsRebalanceEvent::Revoke, + _userAssignment); + _userAssignment.clear(); + + KAFKA_API_DO_LOG(Log::Level::Notice, "unsubscribed (the previously assigned partitions)"); + return; + } + + _userSubscription.clear(); + + Error result{ rd_kafka_unsubscribe(getClientHandle()) }; + KAFKA_THROW_IF_WITH_ERROR(result); + + _pendingEvent = PendingEvent::PartitionsRevocation; + + // The rebalance callback would be served during the time (within this thread) + for (const auto end = std::chrono::steady_clock::now() + timeout; std::chrono::steady_clock::now() < end; ) + { + rd_kafka_poll(getClientHandle(), EVENT_POLLING_INTERVAL_MS); + + if (!_pendingEvent) + { + KAFKA_API_DO_LOG(Log::Level::Notice, "unsubscribed"); + return; + } + } + + _pendingEvent.reset(); + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__TIMED_OUT, "unsubscribe() timed out!")); +} + +inline Topics +KafkaConsumer::subscription() const +{ + rd_kafka_topic_partition_list_t* raw_topics = nullptr; + Error result{ rd_kafka_subscription(getClientHandle(), &raw_topics) }; + auto rk_topics = rd_kafka_topic_partition_list_unique_ptr(raw_topics); + + KAFKA_THROW_IF_WITH_ERROR(result); + + return getTopics(rk_topics.get()); +} + +inline void +KafkaConsumer::changeAssignment(PartitionsRebalanceEvent event, const TopicPartitions& tps) +{ + auto rk_tps = rd_kafka_topic_partition_list_unique_ptr(createRkTopicPartitionList(tps)); + + Error result; + switch (event) + { + case PartitionsRebalanceEvent::Assign: + result = Error{ rd_kafka_assign(getClientHandle(), rk_tps.get()) }; + // Update assignment + _assignment = tps; + break; + + case PartitionsRebalanceEvent::Revoke: + result = Error{ rd_kafka_assign(getClientHandle(), nullptr) }; + // Update assignment + _assignment.clear(); + break; + + case PartitionsRebalanceEvent::IncrementalAssign: + result = Error{ rd_kafka_incremental_assign(getClientHandle(), rk_tps.get()) }; + // Update assignment + for (const auto& tp: tps) + { + auto found = _assignment.find(tp); + if (found != _assignment.end()) + { + std::string tpStr = toString(tp); + KAFKA_API_DO_LOG(Log::Level::Err, "incremental assign partition[%s] has already been assigned", tpStr.c_str()); + continue; + } + _assignment.emplace(tp); + } + break; + + case PartitionsRebalanceEvent::IncrementalUnassign: + result = Error{ rd_kafka_incremental_unassign(getClientHandle(), rk_tps.get()) }; + // Update assignment + for (const auto& tp: tps) + { + auto found = _assignment.find(tp); + if (found == _assignment.end()) + { + std::string tpStr = toString(tp); + KAFKA_API_DO_LOG(Log::Level::Err, "incremental unassign partition[%s] could not be found", tpStr.c_str()); + continue; + } + _assignment.erase(found); + } + break; + } + + KAFKA_THROW_IF_WITH_ERROR(result); +} + +// Assign Topic-Partitions +inline void +KafkaConsumer::assign(const TopicPartitions& topicPartitions) +{ + if (!_userSubscription.empty()) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__FAIL, "Unexpected Operation! Once subscribe() was used, assign() should not be called any more!")); + } + + _userAssignment = topicPartitions; + + changeAssignment(isCooperativeEnabled() ? PartitionsRebalanceEvent::IncrementalAssign : PartitionsRebalanceEvent::Assign, + topicPartitions); +} + +// Assignment +inline TopicPartitions +KafkaConsumer::assignment() const +{ + rd_kafka_topic_partition_list_t* raw_tps = nullptr; + Error result{ rd_kafka_assignment(getClientHandle(), &raw_tps) }; + + auto rk_tps = rd_kafka_topic_partition_list_unique_ptr(raw_tps); + + KAFKA_THROW_IF_WITH_ERROR(result); + + return getTopicPartitions(rk_tps.get()); +} + + +// Seek & Position +inline void +KafkaConsumer::seek(const TopicPartition& topicPartition, Offset offset, std::chrono::milliseconds timeout) +{ + std::string topicPartitionStr = toString(topicPartition); + KAFKA_API_DO_LOG(Log::Level::Info, "will seek with topic-partition[%s], offset[%d]", topicPartitionStr.c_str(), offset); + + auto rkt = rd_kafka_topic_unique_ptr(rd_kafka_topic_new(getClientHandle(), topicPartition.first.c_str(), nullptr)); + if (!rkt) + { + KAFKA_THROW_ERROR(Error(rd_kafka_last_error())); + } + + const auto end = std::chrono::steady_clock::now() + timeout; + + rd_kafka_resp_err_t respErr = RD_KAFKA_RESP_ERR_NO_ERROR; + do + { + respErr = rd_kafka_seek(rkt.get(), topicPartition.second, offset, SEEK_RETRY_INTERVAL_MS); + if (respErr != RD_KAFKA_RESP_ERR__STATE && respErr != RD_KAFKA_RESP_ERR__TIMED_OUT && respErr != RD_KAFKA_RESP_ERR__OUTDATED) + { + break; + } + + // If the "seek" was called just after "assign", there's a chance that the toppar's "fetch_state" (async setted) was not ready yes. + // If that's the case, we would retry again (normally, just after a very short while, the "seek" would succeed) + std::this_thread::yield(); + } while (std::chrono::steady_clock::now() < end); + + KAFKA_THROW_IF_WITH_ERROR(Error(respErr)); + + KAFKA_API_DO_LOG(Log::Level::Info, "seeked with topic-partition[%s], offset[%d]", topicPartitionStr.c_str(), offset); +} + +inline void +KafkaConsumer::seekToBeginningOrEnd(const TopicPartitions& topicPartitions, bool toBeginning, std::chrono::milliseconds timeout) +{ + for (const auto& topicPartition: topicPartitions) + { + seek(topicPartition, (toBeginning ? RD_KAFKA_OFFSET_BEGINNING : RD_KAFKA_OFFSET_END), timeout); + } +} + +inline Offset +KafkaConsumer::position(const TopicPartition& topicPartition) const +{ + auto rk_tp = rd_kafka_topic_partition_list_unique_ptr(createRkTopicPartitionList({topicPartition})); + + Error error{ rd_kafka_position(getClientHandle(), rk_tp.get()) }; + KAFKA_THROW_IF_WITH_ERROR(error); + + return rk_tp->elems[0].offset; +} + +inline std::map +KafkaConsumer::offsetsForTime(const TopicPartitions& topicPartitions, + std::chrono::time_point timepoint, + std::chrono::milliseconds timeout) const +{ + if (topicPartitions.empty()) return {}; + + auto msSinceEpoch = std::chrono::duration_cast(timepoint.time_since_epoch()).count(); + + auto rk_tpos = rd_kafka_topic_partition_list_unique_ptr(createRkTopicPartitionList(topicPartitions)); + + for (int i = 0; i < rk_tpos->cnt; ++i) + { + rd_kafka_topic_partition_t& rk_tp = rk_tpos->elems[i]; + // Here the `msSinceEpoch` would be overridden by the offset result (after called by `rd_kafka_offsets_for_times`) + rk_tp.offset = msSinceEpoch; + } + + Error error{ rd_kafka_offsets_for_times(getClientHandle(), rk_tpos.get(), static_cast(timeout.count())) }; // NOLINT + KAFKA_THROW_IF_WITH_ERROR(error); + + auto results = getTopicPartitionOffsets(rk_tpos.get()); + + // Remove invalid results (which are not updated with an valid offset) + for (auto it = results.begin(); it != results.end(); ) + { + it = ((it->second == msSinceEpoch) ? results.erase(it) : std::next(it)); + } + + return results; +} + +inline std::map +KafkaConsumer::getOffsets(const TopicPartitions& topicPartitions, bool atBeginning) const +{ + std::map result; + + for (const auto& topicPartition: topicPartitions) + { + Offset beginning{}, end{}; + Error error{ rd_kafka_query_watermark_offsets(getClientHandle(), topicPartition.first.c_str(), topicPartition.second, &beginning, &end, 0) }; + KAFKA_THROW_IF_WITH_ERROR(error); + + result[topicPartition] = (atBeginning ? beginning : end); + } + + return result; +} + +// Commit +inline void +KafkaConsumer::commit(const TopicPartitionOffsets& topicPartitionOffsets, CommitType type) +{ + auto rk_tpos = rd_kafka_topic_partition_list_unique_ptr(topicPartitionOffsets.empty() ? nullptr : createRkTopicPartitionList(topicPartitionOffsets)); + + Error error{ rd_kafka_commit(getClientHandle(), rk_tpos.get(), type == CommitType::Async ? 1 : 0) }; + // No stored offset to commit (it might happen and should not be treated as a mistake) + if (topicPartitionOffsets.empty() && error.value() == RD_KAFKA_RESP_ERR__NO_OFFSET) + { + error = Error{}; + } + + KAFKA_THROW_IF_WITH_ERROR(error); +} + +// Fetch committed offset +inline Offset +KafkaConsumer::committed(const TopicPartition& topicPartition) +{ + auto rk_tps = rd_kafka_topic_partition_list_unique_ptr(createRkTopicPartitionList({topicPartition})); + + Error error {rd_kafka_committed(getClientHandle(), rk_tps.get(), TIMEOUT_INFINITE) }; + KAFKA_THROW_IF_WITH_ERROR(error); + + return rk_tps->elems[0].offset; +} + +// Commit stored offsets +inline void +KafkaConsumer::commitStoredOffsetsIfNecessary(CommitType type) +{ + if (_enableAutoCommit && !_offsetsToStore.empty()) + { + for (auto& o: _offsetsToStore) + { + ++o.second; + } + commit(_offsetsToStore, type); + _offsetsToStore.clear(); + } +} + +// Store offsets +inline void +KafkaConsumer::storeOffsetsIfNecessary(const std::vector& records) +{ + if (_enableAutoCommit) + { + for (const auto& record: records) + { + _offsetsToStore[TopicPartition(record.topic(), record.partition())] = record.offset(); + } + } +} + +// Fetch messages (internally used) +inline void +KafkaConsumer::pollMessages(int timeoutMs, std::vector& output) +{ + // Commit the offsets for these messages which had been polled last time (for "enable.auto.commit=true" case) + commitStoredOffsetsIfNecessary(CommitType::Async); + + // Poll messages with librdkafka's API + std::vector msgPtrArray(_maxPollRecords); + auto msgReceived = rd_kafka_consume_batch_queue(_rk_queue.get(), timeoutMs, msgPtrArray.data(), _maxPollRecords); + if (msgReceived < 0) + { + KAFKA_THROW_ERROR(Error(rd_kafka_last_error())); + } + + // Wrap messages with ConsumerRecord + output.clear(); + output.reserve(static_cast(msgReceived)); + std::for_each(msgPtrArray.begin(), msgPtrArray.begin() + msgReceived, [&output](rd_kafka_message_t* rkMsg) { output.emplace_back(rkMsg); }); + + // Store the offsets for all these polled messages (for "enable.auto.commit=true" case) + storeOffsetsIfNecessary(output); +} + +// Fetch messages (return via return value) +inline std::vector +KafkaConsumer::poll(std::chrono::milliseconds timeout) +{ + std::vector result; + poll(timeout, result); + return result; +} + +// Fetch messages (return via input parameter) +inline std::size_t +KafkaConsumer::poll(std::chrono::milliseconds timeout, std::vector& output) +{ + pollMessages(convertMsDurationToInt(timeout), output); + return output.size(); +} + +inline void +KafkaConsumer::pauseOrResumePartitions(const TopicPartitions& topicPartitions, PauseOrResumeOperation op) +{ + auto rk_tpos = rd_kafka_topic_partition_list_unique_ptr(createRkTopicPartitionList(topicPartitions)); + + Error error{ (op == PauseOrResumeOperation::Pause) ? + rd_kafka_pause_partitions(getClientHandle(), rk_tpos.get()) : rd_kafka_resume_partitions(getClientHandle(), rk_tpos.get()) }; + KAFKA_THROW_IF_WITH_ERROR(error); + + const char* opString = (op == PauseOrResumeOperation::Pause) ? "pause" : "resume"; + int cnt = 0; + for (int i = 0; i < rk_tpos->cnt; ++i) + { + const rd_kafka_topic_partition_t& rk_tp = rk_tpos->elems[i]; + if (rk_tp.err != RD_KAFKA_RESP_ERR_NO_ERROR) + { + KAFKA_API_DO_LOG(Log::Level::Err, "%s topic-partition[%s-%d] error[%s]", opString, rk_tp.topic, rk_tp.partition, rd_kafka_err2str(rk_tp.err)); + } + else + { + KAFKA_API_DO_LOG(Log::Level::Notice, "%sd topic-partition[%s-%d]", opString, rk_tp.topic, rk_tp.partition, rd_kafka_err2str(rk_tp.err)); + ++cnt; + } + } + + if (cnt == 0 && op == PauseOrResumeOperation::Pause) + { + std::string errMsg = std::string("No partition could be ") + opString + std::string("d among TopicPartitions[") + toString(topicPartitions) + std::string("]"); + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG, errMsg)); + } +} + +inline void +KafkaConsumer::pause(const TopicPartitions& topicPartitions) +{ + pauseOrResumePartitions(topicPartitions, PauseOrResumeOperation::Pause); +} + +inline void +KafkaConsumer::pause() +{ + pause(_assignment); +} + +inline void +KafkaConsumer::resume(const TopicPartitions& topicPartitions) +{ + pauseOrResumePartitions(topicPartitions, PauseOrResumeOperation::Resume); +} + +inline void +KafkaConsumer::resume() +{ + resume(_assignment); +} + +// Rebalance Callback (for class instance) +inline void +KafkaConsumer::onRebalance(rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t* rk_partitions) +{ + TopicPartitions tps = getTopicPartitions(rk_partitions); + std::string tpsStr = toString(tps); + + if (err != RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS && err != RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) + { + KAFKA_API_DO_LOG(Log::Level::Err, "unknown re-balance event[%d], topic-partitions[%s]", err, tpsStr.c_str()); + return; + } + + // Initialize attribute for cooperative protocol + if (!_cooperativeEnabled) + { + if (const char* protocol = rd_kafka_rebalance_protocol(getClientHandle())) + { + _cooperativeEnabled = (std::string(protocol) == "COOPERATIVE"); + } + } + + KAFKA_API_DO_LOG(Log::Level::Notice, "re-balance event triggered[%s], cooperative[%s], topic-partitions[%s]", + err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ? "ASSIGN_PARTITIONS" : "REVOKE_PARTITIONS", + isCooperativeEnabled() ? "enabled" : "disabled", + tpsStr.c_str()); + + // Remove the mark for pending event + if (_pendingEvent + && ((err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS && *_pendingEvent == PendingEvent::PartitionsAssignment) + || (err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS && *_pendingEvent == PendingEvent::PartitionsRevocation))) + { + _pendingEvent.reset(); + } + + PartitionsRebalanceEvent event = (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ? + (isCooperativeEnabled() ? PartitionsRebalanceEvent::IncrementalAssign : PartitionsRebalanceEvent::Assign) + : (isCooperativeEnabled() ? PartitionsRebalanceEvent::IncrementalUnassign : PartitionsRebalanceEvent::Revoke)); + + if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) + { + changeAssignment(event, tps); + } + + if (_rebalanceCb) + { + _rebalanceCb(err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ? consumer::RebalanceEventType::PartitionsAssigned : consumer::RebalanceEventType::PartitionsRevoked, + tps); + } + + if (err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) + { + changeAssignment(event, isCooperativeEnabled() ? tps : TopicPartitions{}); + } +} + +// Rebalance Callback (for librdkafka) +inline void +KafkaConsumer::rebalanceCallback(rd_kafka_t* rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t* partitions, void* /* opaque */) +{ + KafkaClient& client = kafkaClient(rk); + auto& consumer = dynamic_cast(client); + consumer.onRebalance(err, partitions); +} + +// Offset Commit Callback (for librdkafka) +inline void +KafkaConsumer::offsetCommitCallback(rd_kafka_t* rk, rd_kafka_resp_err_t err, rd_kafka_topic_partition_list_t* rk_tpos, void* opaque) +{ + TopicPartitionOffsets tpos = getTopicPartitionOffsets(rk_tpos); + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) + { + auto tposStr = toString(tpos); + kafkaClient(rk).KAFKA_API_DO_LOG(Log::Level::Err, "invoked offset-commit callback. offsets[%s], result[%s]", tposStr.c_str(), rd_kafka_err2str(err)); + } + + auto* cb = static_cast(opaque); + if (cb && *cb) + { + (*cb)(tpos, Error(err)); + } + delete cb; +} + +inline consumer::ConsumerGroupMetadata +KafkaConsumer::groupMetadata() +{ + return consumer::ConsumerGroupMetadata{rd_kafka_consumer_group_metadata(getClientHandle())}; +} + + + +inline void +KafkaConsumer::commitSync() +{ + commit(TopicPartitionOffsets(), CommitType::Sync); +} + +inline void +KafkaConsumer::commitSync(const consumer::ConsumerRecord& record) +{ + TopicPartitionOffsets tpos; + // committed offset should be "current-received-offset + 1" + tpos[TopicPartition(record.topic(), record.partition())] = record.offset() + 1; + + commit(tpos, CommitType::Sync); +} + +inline void +KafkaConsumer::commitSync(const TopicPartitionOffsets& topicPartitionOffsets) +{ + commit(topicPartitionOffsets, CommitType::Sync); +} + +inline void +KafkaConsumer::commitAsync(const TopicPartitionOffsets& topicPartitionOffsets, const consumer::OffsetCommitCallback& offsetCommitCallback) +{ + auto rk_tpos = rd_kafka_topic_partition_list_unique_ptr(topicPartitionOffsets.empty() ? nullptr : createRkTopicPartitionList(topicPartitionOffsets)); + + Error error{ rd_kafka_commit_queue(getClientHandle(), + rk_tpos.get(), + getCommitCbQueue(), + &KafkaConsumer::offsetCommitCallback, + new consumer::OffsetCommitCallback(offsetCommitCallback)) }; + KAFKA_THROW_IF_WITH_ERROR(error); +} + +inline void +KafkaConsumer::commitAsync(const consumer::ConsumerRecord& record, const consumer::OffsetCommitCallback& offsetCommitCallback) +{ + TopicPartitionOffsets tpos; + // committed offset should be "current received record's offset" + 1 + tpos[TopicPartition(record.topic(), record.partition())] = record.offset() + 1; + commitAsync(tpos, offsetCommitCallback); +} + +inline void +KafkaConsumer::commitAsync(const consumer::OffsetCommitCallback& offsetCommitCallback) +{ + commitAsync(TopicPartitionOffsets(), offsetCommitCallback); +} + +} } // end of KAFKA_API::clients + diff --git a/modern-cpp-kafka/include/kafka/KafkaException.h b/modern-cpp-kafka/include/kafka/KafkaException.h new file mode 100644 index 00000000..a4fd3d5d --- /dev/null +++ b/modern-cpp-kafka/include/kafka/KafkaException.h @@ -0,0 +1,60 @@ +#pragma once + +#include + +#include +#include +#include + +#include + +#include +#include +#include + + +namespace KAFKA_API { + +/** + * Specific exception for Kafka clients. + */ +class KafkaException: public std::exception +{ +public: + KafkaException(const char* filename, std::size_t lineno, const Error& error) + : _when(std::chrono::system_clock::now()), + _filename(filename), + _lineno(lineno), + _error(std::make_shared(error)) + {} + + /** + * Obtains the underlying error. + */ + const Error& error() const { return *_error; } + + /** + * Obtains explanatory string. + */ + const char* what() const noexcept override + { + _what = utility::getLocalTimeString(_when) + ": " + _error->toString() + " (" + std::string(_filename) + ":" + std::to_string(_lineno) + ")"; + return _what.c_str(); + } + +private: + using TimePoint = std::chrono::system_clock::time_point; + + const TimePoint _when; + const std::string _filename; + const std::size_t _lineno; + const std::shared_ptr _error; + mutable std::string _what; +}; + + +#define KAFKA_THROW_ERROR(error) throw KafkaException(__FILE__, __LINE__, error) +#define KAFKA_THROW_IF_WITH_ERROR(error) if (error) KAFKA_THROW_ERROR(error) + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/KafkaProducer.h b/modern-cpp-kafka/include/kafka/KafkaProducer.h new file mode 100644 index 00000000..5d6c59f0 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/KafkaProducer.h @@ -0,0 +1,516 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + + +namespace KAFKA_API { namespace clients { + +/** + * KafkaProducer class. + */ +class KafkaProducer: public KafkaClient +{ +public: + /** + * The constructor for KafkaProducer. + * + * Options: + * - EventsPollingOption::Auto (default) : An internal thread would be started for MessageDelivery callbacks handling. + * - EventsPollingOption::Manual : User have to call the member function `pollEvents()` to trigger MessageDelivery callbacks. + * + * Throws KafkaException with errors: + * - RD_KAFKA_RESP_ERR__INVALID_ARG : Invalid BOOTSTRAP_SERVERS property + * - RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE: Fail to create internal threads + */ + explicit KafkaProducer(const Properties& properties, + EventsPollingOption eventsPollingOption = EventsPollingOption::Auto); + + /** + * The destructor for KafkaProducer. + */ + ~KafkaProducer() override { if (_opened) close(); } + + /** + * Invoking this method makes all buffered records immediately available to send, and blocks on the completion of the requests associated with these records. + * + * Possible error values: + * - RD_KAFKA_RESP_ERR__TIMED_OUT: The `timeout` was reached before all outstanding requests were completed. + */ + Error flush(std::chrono::milliseconds timeout = std::chrono::milliseconds::max()); + + /** + * Purge messages currently handled by the KafkaProducer. + */ + Error purge(); + + /** + * Close this producer. This method would wait up to timeout for the producer to complete the sending of all incomplete requests (before purging them). + */ + void close(std::chrono::milliseconds timeout = std::chrono::milliseconds::max()); + + /** + * Options for sending messages. + */ + enum class SendOption { NoCopyRecordValue, ToCopyRecordValue }; + + /** + * Choose the action while the sending buffer is full. + */ + enum class ActionWhileQueueIsFull { Block, NoBlock }; + + /** + * Asynchronously send a record to a topic. + * + * Note: + * - If a callback is provided, it's guaranteed to be triggered (before closing the producer). + * - If any error occured, an exception would be thrown. + * - Make sure the memory block (for ProducerRecord's value) is valid until the delivery callback finishes; Otherwise, should be with option `KafkaProducer::SendOption::ToCopyRecordValue`. + * + * Possible errors: + * Local errors, + * - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC: The topic doesn't exist + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: The partition doesn't exist + * - RD_KAFKA_RESP_ERR__INVALID_ARG: Invalid topic(topic is null, or the length is too long (> 512) + * - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: No ack received within the time limit + * - RD_KAFKA_RESP_ERR__QUEUE_FULL: The message buffing queue is full + * Broker errors, + * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + */ + void send(const producer::ProducerRecord& record, + const producer::Callback& deliveryCb, + SendOption option = SendOption::NoCopyRecordValue, + ActionWhileQueueIsFull action = ActionWhileQueueIsFull::Block); + + /** + * Asynchronously send a record to a topic. + * + * Note: + * - If a callback is provided, it's guaranteed to be triggered (before closing the producer). + * - The input reference parameter `error` will be set if an error occurred. + * - Make sure the memory block (for ProducerRecord's value) is valid until the delivery callback finishes; Otherwise, should be with option `KafkaProducer::SendOption::ToCopyRecordValue`. + * + * Possible errors: + * Local errors, + * - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC: The topic doesn't exist + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: The partition doesn't exist + * - RD_KAFKA_RESP_ERR__INVALID_ARG: Invalid topic(topic is null, or the length is too long (> 512) + * - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: No ack received within the time limit + * - RD_KAFKA_RESP_ERR__QUEUE_FULL: The message buffing queue is full + * Broker errors, + * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + */ + void send(const producer::ProducerRecord& record, + const producer::Callback& deliveryCb, + Error& error, + SendOption option = SendOption::NoCopyRecordValue, + ActionWhileQueueIsFull action = ActionWhileQueueIsFull::Block) + { + try { send(record, deliveryCb, option, action); } catch (const KafkaException& e) { error = e.error(); } + } + + /** + * Synchronously send a record to a topic. + * Throws KafkaException with errors: + * Local errors, + * - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC: The topic doesn't exist + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: The partition doesn't exist + * - RD_KAFKA_RESP_ERR__INVALID_ARG: Invalid topic(topic is null, or the length is too long (> 512) + * - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: No ack received within the time limit + * Broker errors, + * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + */ + producer::RecordMetadata syncSend(const producer::ProducerRecord& record); + + /** + * Needs to be called before any other methods when the transactional.id is set in the configuration. + */ + void initTransactions(std::chrono::milliseconds timeout = std::chrono::milliseconds(KafkaProducer::DEFAULT_INIT_TRANSACTIONS_TIMEOUT_MS)); + + /** + * Should be called before the start of each new transaction. + */ + void beginTransaction(); + + /** + * Commit the ongoing transaction. + */ + void commitTransaction(std::chrono::milliseconds timeout = std::chrono::milliseconds(KafkaProducer::DEFAULT_COMMIT_TRANSACTION_TIMEOUT_MS)); + + /** + * Abort the ongoing transaction. + */ + void abortTransaction(std::chrono::milliseconds timeout = std::chrono::milliseconds::max()); + + + /** + * Send a list of specified offsets to the consumer group coodinator, and also marks those offsets as part of the current transaction. + */ + void sendOffsetsToTransaction(const TopicPartitionOffsets& topicPartitionOffsets, + const consumer::ConsumerGroupMetadata& groupMetadata, + std::chrono::milliseconds timeout); + +#if COMPILER_SUPPORTS_CPP_17 + static constexpr int DEFAULT_INIT_TRANSACTIONS_TIMEOUT_MS = 10000; + static constexpr int DEFAULT_COMMIT_TRANSACTION_TIMEOUT_MS = 10000; +#else + enum { DEFAULT_INIT_TRANSACTIONS_TIMEOUT_MS = 10000 }; + enum { DEFAULT_COMMIT_TRANSACTION_TIMEOUT_MS = 10000 }; +#endif + +private: + void pollCallbacks(int timeoutMs) + { + rd_kafka_poll(getClientHandle(), timeoutMs); + } + + // Define datatypes for "opaque" (as an input for rd_kafka_produceva), in order to handle the delivery callback + class DeliveryCbOpaque + { + public: + DeliveryCbOpaque(Optional id, producer::Callback cb): _recordId(id), _deliveryCb(std::move(cb)) {} + + void operator()(rd_kafka_t* /*rk*/, const rd_kafka_message_t* rkmsg) + { + _deliveryCb(producer::RecordMetadata{rkmsg, _recordId}, Error{rkmsg->err}); + } + + private: + const Optional _recordId; + const producer::Callback _deliveryCb; + }; + + // Validate properties (and fix it if necesary) + static Properties validateAndReformProperties(const Properties& properties); + + // Delivery Callback (for librdkafka) + static void deliveryCallback(rd_kafka_t* rk, const rd_kafka_message_t* rkmsg, void* opaque); + + // Register Callbacks for rd_kafka_conf_t + static void registerConfigCallbacks(rd_kafka_conf_t* conf); + +#ifdef KAFKA_API_ENABLE_UNIT_TEST_STUBS +public: + using HandleProduceResponseCb = std::function; + + /** + * Stub for ProduceResponse handing. + * Note: Only for internal unit tests + */ + void stubHandleProduceResponse(HandleProduceResponseCb cb = HandleProduceResponseCb()) { _handleProduceRespCb = std::move(cb); } + +private: + static rd_kafka_resp_err_t handleProduceResponse(rd_kafka_t* rk, int32_t brokerId, uint64_t msgSeq, rd_kafka_resp_err_t err) + { + auto* client = static_cast(rd_kafka_opaque(rk)); + auto* producer = dynamic_cast(client); + auto respCb = producer->_handleProduceRespCb; + return respCb ? respCb(rk, brokerId, msgSeq, err) : err; + } + + HandleProduceResponseCb _handleProduceRespCb; +#endif +}; + +inline +KafkaProducer::KafkaProducer(const Properties& properties, EventsPollingOption eventsPollingOption) + : KafkaClient(ClientType::KafkaProducer, + validateAndReformProperties(properties), + registerConfigCallbacks, + eventsPollingOption) +{ + // Start background polling (if needed) + startBackgroundPollingIfNecessary([this](int timeoutMs){ pollCallbacks(timeoutMs); }); + + const auto propStr = KafkaClient::properties().toString(); + KAFKA_API_DO_LOG(Log::Level::Notice, "initializes with properties[%s]", propStr.c_str()); +} + +inline void +KafkaProducer::registerConfigCallbacks(rd_kafka_conf_t* conf) +{ + // Delivery Callback + rd_kafka_conf_set_dr_msg_cb(conf, deliveryCallback); + +#ifdef KAFKA_API_ENABLE_UNIT_TEST_STUBS + // UT stub for ProduceResponse + LogBuffer errInfo; + if (rd_kafka_conf_set(conf, "ut_handle_ProduceResponse", reinterpret_cast(&handleProduceResponse), errInfo.str(), errInfo.capacity())) // NOLINT + { + KafkaClient* client = nullptr; + size_t clientPtrSize = 0; + if (rd_kafka_conf_get(conf, "opaque", reinterpret_cast(&client), &clientPtrSize)) // NOLINT + { + KAFKA_API_LOG(Log::Level::Crit, "failed to stub ut_handle_ProduceResponse! error[%s]. Meanwhile, failed to get the Kafka client!", errInfo.c_str()); + } + else + { + assert(clientPtrSize == sizeof(client)); // NOLINT + client->KAFKA_API_DO_LOG(Log::Level::Err, "failed to stub ut_handle_ProduceResponse! error[%s]", errInfo.c_str()); + } + } +#endif +} + +inline Properties +KafkaProducer::validateAndReformProperties(const Properties& properties) +{ + // Let the base class validate first + auto newProperties = KafkaClient::validateAndReformProperties(properties); + + // Check whether it's an available partitioner + const std::set availPartitioners = {"murmur2_random", "murmur2", "random", "consistent", "consistent_random", "fnv1a", "fnv1a_random"}; + auto partitioner = newProperties.getProperty(producer::Config::PARTITIONER); + if (partitioner && !availPartitioners.count(*partitioner)) + { + std::string errMsg = "Invalid partitioner [" + *partitioner + "]! Valid options: "; + bool isTheFirst = true; + for (const auto& availPartitioner: availPartitioners) + { + errMsg += (std::string(isTheFirst ? (isTheFirst = false, "") : ", ") + availPartitioner); + } + errMsg += "."; + + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG, errMsg)); + } + + // For "idempotence" feature + constexpr int KAFKA_IDEMP_MAX_INFLIGHT = 5; + const auto enableIdempotence = newProperties.getProperty(producer::Config::ENABLE_IDEMPOTENCE); + if (enableIdempotence && *enableIdempotence == "true") + { + if (const auto maxInFlight = newProperties.getProperty(producer::Config::MAX_IN_FLIGHT)) + { + if (std::stoi(*maxInFlight) > KAFKA_IDEMP_MAX_INFLIGHT) + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG,\ + "`max.in.flight` must be set <= " + std::to_string(KAFKA_IDEMP_MAX_INFLIGHT) + " when `enable.idempotence` is `true`")); + } + } + + if (const auto acks = newProperties.getProperty(producer::Config::ACKS)) + { + if (*acks != "all" && *acks != "-1") + { + KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG,\ + "`acks` must be set to `all`/`-1` when `enable.idempotence` is `true`")); + } + } + } + + return newProperties; +} + +// Delivery Callback (for librdkafka) +inline void +KafkaProducer::deliveryCallback(rd_kafka_t* rk, const rd_kafka_message_t* rkmsg, void* /*opaque*/) +{ + if (auto* deliveryCbOpaque = static_cast(rkmsg->_private)) + { + (*deliveryCbOpaque)(rk, rkmsg); + delete deliveryCbOpaque; + } +} + +inline void +KafkaProducer::send(const producer::ProducerRecord& record, + const producer::Callback& deliveryCb, + SendOption option, + ActionWhileQueueIsFull action) +{ + auto deliveryCbOpaque = std::make_unique(record.id(), deliveryCb); + auto queueFullAction = (isWithAutoEventsPolling() ? action : ActionWhileQueueIsFull::NoBlock); + + const auto* topic = record.topic().c_str(); + const auto partition = record.partition(); + const auto msgFlags = (static_cast(option == SendOption::ToCopyRecordValue ? RD_KAFKA_MSG_F_COPY : 0) + | static_cast(queueFullAction == ActionWhileQueueIsFull::Block ? RD_KAFKA_MSG_F_BLOCK : 0)); + const auto* keyPtr = record.key().data(); + const auto keyLen = record.key().size(); + const auto* valuePtr = record.value().data(); + const auto valueLen = record.value().size(); + + auto* rk = getClientHandle(); + auto* opaquePtr = deliveryCbOpaque.get(); + + constexpr std::size_t VU_LIST_SIZE_WITH_NO_HEADERS = 6; + std::vector rkVUs(VU_LIST_SIZE_WITH_NO_HEADERS + record.headers().size()); + + std::size_t uvCount = 0; + + { // Topic + auto& vu = rkVUs[uvCount++]; + vu.vtype = RD_KAFKA_VTYPE_TOPIC; + vu.u.cstr = topic; + } + + { // Partition + auto& vu = rkVUs[uvCount++]; + vu.vtype = RD_KAFKA_VTYPE_PARTITION; + vu.u.i32 = partition; + } + + { // Message flags + auto& vu = rkVUs[uvCount++]; + vu.vtype = RD_KAFKA_VTYPE_MSGFLAGS; + vu.u.i = static_cast(msgFlags); + } + + { // Key + auto& vu = rkVUs[uvCount++]; + vu.vtype = RD_KAFKA_VTYPE_KEY; + vu.u.mem.ptr = const_cast(keyPtr); // NOLINT + vu.u.mem.size = keyLen; + } + + { // Value + auto& vu = rkVUs[uvCount++]; + vu.vtype = RD_KAFKA_VTYPE_VALUE; + vu.u.mem.ptr = const_cast(valuePtr); // NOLINT + vu.u.mem.size = valueLen; + } + + { // Opaque + auto& vu = rkVUs[uvCount++]; + vu.vtype = RD_KAFKA_VTYPE_OPAQUE; + vu.u.ptr = opaquePtr; + } + + // Headers + for (const auto& header: record.headers()) + { + auto& vu = rkVUs[uvCount++]; + vu.vtype = RD_KAFKA_VTYPE_HEADER; + vu.u.header.name = header.key.c_str(); + vu.u.header.val = header.value.data(); + vu.u.header.size = static_cast(header.value.size()); + } + + assert(uvCount == rkVUs.size()); + + Error sendResult{ rd_kafka_produceva(rk, rkVUs.data(), rkVUs.size()) }; + KAFKA_THROW_IF_WITH_ERROR(sendResult); + + // KafkaProducer::deliveryCallback would delete the "opaque" + deliveryCbOpaque.release(); +} + +inline producer::RecordMetadata +KafkaProducer::syncSend(const producer::ProducerRecord& record) +{ + Optional deliveryResult; + producer::RecordMetadata recordMetadata; + std::mutex mtx; + std::condition_variable delivered; + + auto deliveryCb = [&deliveryResult, &recordMetadata, &mtx, &delivered] (const producer::RecordMetadata& metadata, const Error& error) { + std::lock_guard guard(mtx); + + deliveryResult = error; + recordMetadata = metadata; + + delivered.notify_one(); + }; + + send(record, deliveryCb); + + std::unique_lock lock(mtx); + delivered.wait(lock, [&deliveryResult]{ return static_cast(deliveryResult); }); + + KAFKA_THROW_IF_WITH_ERROR(*deliveryResult); + + return recordMetadata; +} + +inline Error +KafkaProducer::flush(std::chrono::milliseconds timeout) +{ + return Error{rd_kafka_flush(getClientHandle(), convertMsDurationToInt(timeout))}; +} + +inline Error +KafkaProducer::purge() +{ + return Error{rd_kafka_purge(getClientHandle(), + (static_cast(RD_KAFKA_PURGE_F_QUEUE) | static_cast(RD_KAFKA_PURGE_F_INFLIGHT)))}; +} + +inline void +KafkaProducer::close(std::chrono::milliseconds timeout) +{ + _opened = false; + + stopBackgroundPollingIfNecessary(); + + Error result = flush(timeout); + if (result.value() == RD_KAFKA_RESP_ERR__TIMED_OUT) + { + KAFKA_API_DO_LOG(Log::Level::Notice, "purge messages before close, outQLen[%d]", rd_kafka_outq_len(getClientHandle())); + purge(); + } + + rd_kafka_poll(getClientHandle(), 0); + + KAFKA_API_DO_LOG(Log::Level::Notice, "closed"); + +} + +inline void +KafkaProducer::initTransactions(std::chrono::milliseconds timeout) +{ + Error result{ rd_kafka_init_transactions(getClientHandle(), static_cast(timeout.count())) }; // NOLINT + KAFKA_THROW_IF_WITH_ERROR(result); +} + +inline void +KafkaProducer::beginTransaction() +{ + Error result{ rd_kafka_begin_transaction(getClientHandle()) }; + KAFKA_THROW_IF_WITH_ERROR(result); +} + +inline void +KafkaProducer::commitTransaction(std::chrono::milliseconds timeout) +{ + Error result{ rd_kafka_commit_transaction(getClientHandle(), static_cast(timeout.count())) }; // NOLINT + KAFKA_THROW_IF_WITH_ERROR(result); +} + +inline void +KafkaProducer::abortTransaction(std::chrono::milliseconds timeout) +{ + Error result{ rd_kafka_abort_transaction(getClientHandle(), static_cast(timeout.count())) }; // NOLINT + KAFKA_THROW_IF_WITH_ERROR(result); +} + +inline void +KafkaProducer::sendOffsetsToTransaction(const TopicPartitionOffsets& topicPartitionOffsets, + const consumer::ConsumerGroupMetadata& groupMetadata, + std::chrono::milliseconds timeout) +{ + auto rk_tpos = rd_kafka_topic_partition_list_unique_ptr(createRkTopicPartitionList(topicPartitionOffsets)); + Error result{ rd_kafka_send_offsets_to_transaction(getClientHandle(), + rk_tpos.get(), + groupMetadata.rawHandle(), + static_cast(timeout.count())) }; // NOLINT + KAFKA_THROW_IF_WITH_ERROR(result); +} + +} } // end of KAFKA_API::clients + diff --git a/modern-cpp-kafka/include/kafka/Log.h b/modern-cpp-kafka/include/kafka/Log.h new file mode 100644 index 00000000..d8790568 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/Log.h @@ -0,0 +1,88 @@ +#pragma once + +#include + +#include + +#include +#include +#include +#include +#include + + +namespace KAFKA_API { + +struct Log +{ + enum Level + { + Emerg = 0, + Alert = 1, + Crit = 2, + Err = 3, + Warning = 4, + Notice = 5, + Info = 6, + Debug = 7 + }; + + static const std::string& levelString(std::size_t level) + { + static const std::vector levelNames = {"EMERG", "ALERT", "CRIT", "ERR", "WARNING", "NOTICE", "INFO", "DEBUG", "INVALID"}; + static const std::size_t maxIndex = levelNames.size() - 1; + + return levelNames[std::min(level, maxIndex)]; + } +}; + +template +class LogBuffer +{ +public: + LogBuffer():_wptr(_buf.data()) { _buf[0] = 0; } // NOLINT + + LogBuffer& clear() + { + _wptr = _buf.data(); + _buf[0] = 0; + return *this; + } + + template + LogBuffer& print(const char* format, Args... args) + { + assert(!(_buf[0] != 0 && _wptr == _buf.data())); // means it has already been used as a plain buffer (with `str()`) + + auto cnt = std::snprintf(_wptr, capacity(), format, args...); // returns number of characters written if successful (not including '\0') + if (cnt > 0) + { + _wptr = std::min(_wptr + cnt, _buf.data() + MAX_CAPACITY - 1); + } + return *this; + } + LogBuffer& print(const char* format) { return print("%s", format); } + + std::size_t capacity() const { return static_cast(_buf.data() + MAX_CAPACITY - _wptr); } + char* str() { return _buf.data(); } + const char* c_str() const { return _buf.data(); } + +private: + std::array _buf; + char* _wptr; +}; + +using Logger = std::function; + +inline void DefaultLogger(int level, const char* /*filename*/, int /*lineno*/, const char* msg) +{ + std::cout << "[" << utility::getCurrentTime() << "]" << Log::levelString(static_cast(level)) << " " << msg; + std::cout << std::endl; +} + +inline void NullLogger(int /*level*/, const char* /*filename*/, int /*lineno*/, const char* /*msg*/) +{ +} + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/ProducerCommon.h b/modern-cpp-kafka/include/kafka/ProducerCommon.h new file mode 100644 index 00000000..ede7fd02 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/ProducerCommon.h @@ -0,0 +1,185 @@ +#pragma once + +#include + +#include +#include +#include +#include + +#include + +#include +#include + + +namespace KAFKA_API { namespace clients { namespace producer { + +/** + * The metadata for a record that has been acknowledged by the server. + */ +class RecordMetadata +{ +public: + enum class PersistedStatus { Not, Possibly, Done }; + + RecordMetadata() = default; + + RecordMetadata(const RecordMetadata& another) { *this = another; } + + // This is only called by the KafkaProducer::deliveryCallback (with a valid rkmsg pointer) + RecordMetadata(const rd_kafka_message_t* rkmsg, Optional recordId) + : _rkmsg(rkmsg), _recordId(recordId) {} + + RecordMetadata& operator=(const RecordMetadata& another) + { + if (this != &another) + { + _cachedInfo = std::make_unique(another.topic(), + another.partition(), + another.offset() ? *another.offset() : RD_KAFKA_OFFSET_INVALID, + another.keySize(), + another.valueSize(), + another.timestamp(), + another.persistedStatus()); + _recordId = another._recordId; + _rkmsg = nullptr; + } + + return *this; + } + + /** + * The topic the record was appended to. + */ + std::string topic() const + { + return _rkmsg ? (_rkmsg->rkt ? rd_kafka_topic_name(_rkmsg->rkt) : "") : _cachedInfo->topic; + } + + /** + * The partition the record was sent to. + */ + Partition partition() const + { + return _rkmsg ? _rkmsg->partition : _cachedInfo->partition; + } + + /** + * The offset of the record in the topic/partition. + */ + Optional offset() const + { + auto offset = _rkmsg ? _rkmsg->offset : _cachedInfo->offset; + return (offset != RD_KAFKA_OFFSET_INVALID) ? Optional(offset) : Optional(); + } + + /** + * The recordId could be used to identify the acknowledged message. + */ + Optional recordId() const + { + return _recordId; + } + + /** + * The size of the key in bytes. + */ + KeySize keySize() const + { + return _rkmsg ? _rkmsg->key_len : _cachedInfo->keySize; + } + + /** + * The size of the value in bytes. + */ + ValueSize valueSize() const + { + return _rkmsg ? _rkmsg->len : _cachedInfo->valueSize; + } + + /** + * The timestamp of the record in the topic/partition. + */ + Timestamp timestamp() const + { + return _rkmsg ? getMsgTimestamp(_rkmsg) : _cachedInfo->timestamp; + } + + /** + * The persisted status of the record. + */ + PersistedStatus persistedStatus() const + { + return _rkmsg ? getMsgPersistedStatus(_rkmsg) : _cachedInfo->persistedStatus; + } + + std::string persistedStatusString() const + { + return getPersistedStatusString(persistedStatus()); + } + + std::string toString() const + { + return topic() + "-" + std::to_string(partition()) + "@" + (offset() ? std::to_string(*offset()) : "NA") + + (recordId() ? (":id[" + std::to_string(*recordId()) + "],") : ",") + + timestamp().toString() + "," + persistedStatusString(); + } + +private: + static Timestamp getMsgTimestamp(const rd_kafka_message_t* rkmsg) + { + rd_kafka_timestamp_type_t tstype{}; + Timestamp::Value tsValue = rd_kafka_message_timestamp(rkmsg, &tstype); + return {tsValue, tstype}; + } + + static PersistedStatus getMsgPersistedStatus(const rd_kafka_message_t* rkmsg) + { + rd_kafka_msg_status_t status = rd_kafka_message_status(rkmsg); + return status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED ? PersistedStatus::Not : (status == RD_KAFKA_MSG_STATUS_PERSISTED ? PersistedStatus::Done : PersistedStatus::Possibly); + } + + static std::string getPersistedStatusString(PersistedStatus status) + { + return status == PersistedStatus::Not ? "NotPersisted" : + (status == PersistedStatus::Done ? "Persisted" : "PossiblyPersisted"); + } + + struct CachedInfo + { + CachedInfo(Topic t, Partition p, Offset o, KeySize ks, ValueSize vs, Timestamp ts, PersistedStatus pst) + : topic(std::move(t)), + partition(p), + offset(o), + keySize(ks), + valueSize(vs), + timestamp(ts), + persistedStatus(pst) + { + } + + CachedInfo(const CachedInfo&) = default; + + std::string topic; + Partition partition; + Offset offset; + KeySize keySize; + ValueSize valueSize; + Timestamp timestamp; + PersistedStatus persistedStatus; + }; + + std::unique_ptr _cachedInfo; + const rd_kafka_message_t* _rkmsg = nullptr; + Optional _recordId; +}; + +/** + * A callback method could be used to provide asynchronous handling of request completion. + * This method will be called when the record sent (by KafkaAsyncProducer) to the server has been acknowledged. + */ +using Callback = std::function; + +} } } // end of KAFKA_API::clients::producer + diff --git a/modern-cpp-kafka/include/kafka/ProducerConfig.h b/modern-cpp-kafka/include/kafka/ProducerConfig.h new file mode 100644 index 00000000..b3a8b44b --- /dev/null +++ b/modern-cpp-kafka/include/kafka/ProducerConfig.h @@ -0,0 +1,150 @@ +#pragma once + +#include + +#include + + +namespace KAFKA_API { namespace clients { namespace producer { + +/** + * Configuration for the Kafka Producer. + */ +class Config: public Properties +{ +public: + Config() = default; + Config(const Config&) = default; + explicit Config(const PropertiesMap& kvMap): Properties(kvMap) {} + + /** + * The string contains host:port pairs of brokers (splitted by ",") that the producer will use to establish initial connection to the Kafka cluster. + * Note: It's mandatory. + */ + static const constexpr char* BOOTSTRAP_SERVERS = "bootstrap.servers"; + + /** + * This can be any string, and will be used by the brokers to identify messages sent from the client. + */ + static const constexpr char* CLIENT_ID = "client.id"; + + /** + * The acks parameter controls how many partition replicas must receive the record before the producer can consider the write successful. + * 1) acks=0, the producer will not wait for a reply from the broker before assuming the message was sent successfully. + * 2) acks=1, the producer will receive a success response from the broker the moment the leader replica received the message. + * 3) acks=all, the producer will receive a success response from the broker once all in-sync replicas received the message. + * Note: if "ack=all", please make sure the topic's replication factor be larger than 1. + * That means, if the topic is automaticly created by producer's `send`, the `default.replication.factor` property for the kafka server should be larger than 1. + * The "ack=all" property is mandatory for reliability requirements, but would increase the ack latency and impact the throughput. + * Default value: all + */ + static const constexpr char* ACKS = "acks"; + + /** + * Maximum number of messages allowed on the producer queue. + * Default value: 100000 + */ + static const constexpr char* QUEUE_BUFFERING_MAX_MESSAGES = "queue.buffering.max.messages"; + + /** + * Maximum total message size sum allowed on the producer queue. + * Default value: 0x100000 (1GB) + */ + static const constexpr char* QUEUE_BUFFERING_MAX_KBYTES = "queue.buffering.max.kbytes"; + + /** + * Delay in milliseconds to wait for messages in the producer queue, to accumulate before constructing messages batches to transmit to brokers. + * Default value: 0 (KafkaSyncProducer); 0.5 (KafkaAsyncProducer) + */ + static const constexpr char* LINGER_MS = "linger.ms"; + + /** + * Maximum number of messages batched in one messageSet. The total MessageSet size is also limited by MESSAGE_MAX_BYTES. + * Default value: 10000 + */ + static const constexpr char* BATCH_NUM_MESSAGES = "batch.num.messages"; + + /** + * Maximum size (in bytes) of all messages batched in one MessageSet (including protocol framing overhead). + * Default value: 1000000 + */ + static const constexpr char* BATCH_SIZE = "batch.size"; + + /** + * Maximum Kafka protocol request message size. + * Note: Should be coordinated with the brokers's configuration. Otherwise, any larger message would be rejected! + * Default value: 1000000 + */ + static const constexpr char* MESSAGE_MAX_BYTES = "message.max.bytes"; + + /** + * This value is enforced locally and limits the time a produced message waits for successful delivery. + * Note: If failed to get the ack within this limit, an exception would be thrown (in `SyncProducer.send()`), or an error code would be passed into the delivery callback (AsyncProducer). + * Default value: 300000 + */ + static const constexpr char* MESSAGE_TIMEOUT_MS = "message.timeout.ms"; + + /** + * This value is only enforced by the brokers and relies on `ACKS` being non-zero. + * Note: The leading broker waits for in-sync replicas to acknowledge the message, and will return an error if the time elapses without the necessary acks. + * Default value: 5000 + */ + static const constexpr char* REQUEST_TIMEOUT_MS = "request.timeout.ms"; + + /** + * The default partitioner for a ProducerRecord (with no partition assigned). + * Note: It's not the same with Java version's "partitioner.class" property + * Available options: + * 1) random -- random distribution + * 2) consistent -- CRC32 hash of key (`ProducerRecord`s with empty/null key are mapped to single partition) + * 3) consistent_random -- CRC32 hash of key (`ProducerRecord`s with empty/null key are randomly partitioned) + * 4) murmur2 -- Java Producer compatible Murmur2 hash of key (`ProducerRecord`s with null key are mapped to single partition) + * 5) murmur2_random -- Java Producer compatible Murmur2 hash of key (`ProducerRecord`s with null key are randomly partitioned. It's equivalent to the Java Producer's default partitioner) + * 6) fnv1a -- FNV-1a hash of key (`ProducerRecord`s with null key are mapped to single partition) + * 7) fnv1a_random -- FNV-1a hash of key (`ProducerRecord`s with null key are randomly partitioned) + * Default value: murmur2_random + */ + static const constexpr char* PARTITIONER = "partitioner"; + + /** + * Maximum number of in-flight requests per broker connection. + * Default value: 1000000 (while `enable.idempotence`=false); 5 (while `enable.idempotence`=true) + */ + static const constexpr char* MAX_IN_FLIGHT = "max.in.flight"; + + /** + * When set to `true`, the producer will ensure that messages are succefully sent exactly once and in the original order. + * Default value: false + */ + static const constexpr char* ENABLE_IDEMPOTENCE = "enable.idempotence"; + + /** + * It's used to identify the same transactional producer instance across process restarts. + */ + static const constexpr char* TRANSACTIONAL_ID = "transactional.id"; + + /** + * Th maximus amount of time in milliseconds that the transaction coordinator will wait for a trnsaction status update from the producer before proactively ablrting the ongoing transaction. + * Default value: 60000 + */ + static const constexpr char* TRANSACTION_TIMEOUT_MS = "transaction.timeout.ms"; + + /** + * Protocol used to communicate with brokers. + * Default value: plaintext + */ + static const constexpr char* SECURITY_PROTOCOL = "security.protocol"; + + /** + * Shell command to refresh or acquire the client's Kerberos ticket. + */ + static const constexpr char* SASL_KERBEROS_KINIT_CMD = "sasl.kerberos.kinit.cmd"; + + /** + * The client's Kerberos principal name. + */ + static const constexpr char* SASL_KERBEROS_SERVICE_NAME = "sasl.kerberos.service.name"; +}; + +} } } // end of KAFKA_API::clients::producer + diff --git a/modern-cpp-kafka/include/kafka/ProducerRecord.h b/modern-cpp-kafka/include/kafka/ProducerRecord.h new file mode 100644 index 00000000..f466bfce --- /dev/null +++ b/modern-cpp-kafka/include/kafka/ProducerRecord.h @@ -0,0 +1,109 @@ +#pragma once + +#include + +#include +#include + +#include + + +namespace KAFKA_API { namespace clients { namespace producer { + +/** + * A key/value pair to be sent to Kafka. + * This consists of a topic name to which the record is being sent, an optional partition number, and an optional key and value. + * Note: `ProducerRecord` would not take the ownership from the memory block of `Value`. + */ +class ProducerRecord +{ +public: + using Id = std::uint64_t; + + ProducerRecord(Topic topic, Partition partition, const Key& key, const Value& value) + : _topic(std::move(topic)), _partition(partition), _key(key), _value(value) {} + + ProducerRecord(const Topic& topic, Partition partition, const Key& key, const Value& value, Id id) + : ProducerRecord(topic, partition, key, value) { _id = id; } + + ProducerRecord(const Topic& topic, const Key& key, const Value& value) + : ProducerRecord(topic, RD_KAFKA_PARTITION_UA, key, value) {} + + ProducerRecord(const Topic& topic, const Key& key, const Value& value, Id id) + : ProducerRecord(topic, key, value) { _id = id; } + + /** + * The topic this record is being sent to. + */ + const Topic& topic() const { return _topic; } + + /** + * The partition to which the record will be sent (or UNKNOWN_PARTITION if no partition was specified). + */ + Partition partition() const { return _partition; } + + /** + * The key (or null if no key is specified). + */ + Key key() const { return _key; } + + /** + * The value. + */ + Value value() const { return _value; } + + /** + * The id to identify the message (consistent with `Producer::Metadata::recordId()`). + */ + Optional id() const { return _id; } + + /** + * The headers. + */ + const Headers& headers() const { return _headers; } + + /** + * The headers. + * Note: Users could set headers with the reference. + */ + Headers& headers() { return _headers; } + + /** + * Set the partition. + */ + void setPartition(Partition partition) { _partition = partition; } + + /** + * Set the key. + */ + void setKey(const Key& key) { _key = key; } + + /** + * Set the value. + */ + void setValue(const Value& value) { _value = value; } + + /** + * Set the record id. + */ + void setId(Id id) { _id = id; } + + std::string toString() const + { + return _topic + "-" + (_partition == RD_KAFKA_PARTITION_UA ? "NA" : std::to_string(_partition)) + std::string(":") + + (_id ? (std::to_string(*_id) + std::string(", ")) : " ") + + (_headers.empty() ? "" : ("headers[" + KAFKA_API::toString(_headers) + "], ")) + + _key.toString() + std::string("/") + _value.toString(); + } + +private: + Topic _topic; + Partition _partition; + Key _key; + Value _value; + Headers _headers; + Optional _id; +}; + +} } } // end of KAFKA_API::clients::producer + diff --git a/modern-cpp-kafka/include/kafka/Project.h b/modern-cpp-kafka/include/kafka/Project.h new file mode 100644 index 00000000..9ab1bf08 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/Project.h @@ -0,0 +1,22 @@ +#pragma once + +// Customize the namespace (default is `kafka`) if necessary +#ifndef KAFKA_API +#define KAFKA_API kafka +#endif + +// Here is the MACRO to enable internal stubs for UT +// #ifndef KAFKA_API_ENABLE_UNIT_TEST_STUBS +// #define KAFKA_API_ENABLE_UNIT_TEST_STUBS +// #endif + +#if defined(WIN32) && !defined(NOMINMAX) +#define NOMINMAX +#endif + +#if ((__cplusplus >= 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)) +#define COMPILER_SUPPORTS_CPP_17 1 +#else +#define COMPILER_SUPPORTS_CPP_17 0 +#endif + diff --git a/modern-cpp-kafka/include/kafka/Properties.h b/modern-cpp-kafka/include/kafka/Properties.h new file mode 100644 index 00000000..f21b04ff --- /dev/null +++ b/modern-cpp-kafka/include/kafka/Properties.h @@ -0,0 +1,100 @@ +#pragma once + +#include + +#include + +#include +#include +#include +#include + + +namespace KAFKA_API { + +/** + * The properties for Kafka clients. + */ +class Properties +{ +public: + // Just make sure key will printed in order + using PropertiesMap = std::map; + + Properties() = default; + Properties(const Properties&) = default; + explicit Properties(PropertiesMap kvMap): _kvMap(std::move(kvMap)) {} + + virtual ~Properties() = default; + + bool operator==(const Properties& rhs) const { return map() == rhs.map(); } + + /** + * Set a property. + * If the map previously contained a mapping for the key, the old value is replaced by the specified value. + */ + Properties& put(const std::string& key, const std::string& value) + { + _kvMap[key] = value; + return *this; + } + + /** + * Remove the property (if one exists). + */ + void remove(const std::string& key) + { + _kvMap.erase(key); + } + + /** + * Get a property. + * If the map previously contained a mapping for the key, the old value is replaced by the specified value. + */ + Optional getProperty(const std::string& key) const + { + Optional ret; + auto search = _kvMap.find(key); + if (search != _kvMap.end()) + { + ret = search->second; + } + return ret; + } + + /** + * Remove a property. + */ + void eraseProperty(const std::string& key) + { + _kvMap.erase(key); + } + + std::string toString() const + { + + std::string ret; + std::for_each(_kvMap.cbegin(), _kvMap.cend(), + [&ret](const auto& kv) { + const std::string& key = kv.first; + const std::string& value = kv.second; + + static const std::regex reSensitiveKey(R"(.+\.password|.+\.username)"); + bool isSensitive = std::regex_match(key, reSensitiveKey); + + ret.append(ret.empty() ? "" : "|").append(key).append("=").append(isSensitive ? "*" : value); + }); + return ret; + } + + /** + * Get all properties with a map. + */ + const PropertiesMap& map() const { return _kvMap; } + +private: + PropertiesMap _kvMap; +}; + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/RdKafkaHelper.h b/modern-cpp-kafka/include/kafka/RdKafkaHelper.h new file mode 100644 index 00000000..a7217266 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/RdKafkaHelper.h @@ -0,0 +1,122 @@ +#pragma once + +#include + +#include + +#include + +#include + +namespace KAFKA_API { + +// define smart pointers for rk_kafka_xxx datatypes + +struct RkQueueDeleter { void operator()(rd_kafka_queue_t* p) { rd_kafka_queue_destroy(p); } }; +using rd_kafka_queue_unique_ptr = std::unique_ptr; + +struct RkEventDeleter { void operator()(rd_kafka_event_t* p) { rd_kafka_event_destroy(p); } }; +using rd_kafka_event_unique_ptr = std::unique_ptr; + +struct RkTopicDeleter { void operator()(rd_kafka_topic_t* p) { rd_kafka_topic_destroy(p); } }; +using rd_kafka_topic_unique_ptr = std::unique_ptr; + +struct RkTopicPartitionListDeleter { void operator()(rd_kafka_topic_partition_list_t* p) { rd_kafka_topic_partition_list_destroy(p); } }; +using rd_kafka_topic_partition_list_unique_ptr = std::unique_ptr; + +struct RkConfDeleter { void operator()(rd_kafka_conf_t* p) { rd_kafka_conf_destroy(p); } }; +using rd_kafka_conf_unique_ptr = std::unique_ptr; + +struct RkMetadataDeleter { void operator()(const rd_kafka_metadata_t* p) { rd_kafka_metadata_destroy(p); } }; +using rd_kafka_metadata_unique_ptr = std::unique_ptr; + +struct RkDeleter { void operator()(rd_kafka_t* p) { rd_kafka_destroy(p); } }; +using rd_kafka_unique_ptr = std::unique_ptr; + +struct RkNewTopicDeleter { void operator()(rd_kafka_NewTopic_t* p) { rd_kafka_NewTopic_destroy(p); } }; +using rd_kafka_NewTopic_unique_ptr = std::unique_ptr; + +struct RkDeleteTopicDeleter { void operator()(rd_kafka_DeleteTopic_t* p) { rd_kafka_DeleteTopic_destroy(p); } }; +using rd_kafka_DeleteTopic_unique_ptr = std::unique_ptr; + +struct RkDeleteRecordsDeleter { void operator()(rd_kafka_DeleteRecords_t* p) { rd_kafka_DeleteRecords_destroy(p); } }; +using rd_kafka_DeleteRecords_unique_ptr = std::unique_ptr; + +struct RkConsumerGroupMetadataDeleter { void operator()(rd_kafka_consumer_group_metadata_t* p) { rd_kafka_consumer_group_metadata_destroy(p) ; } }; +using rd_kafka_consumer_group_metadata_unique_ptr = std::unique_ptr; + +inline void RkErrorDeleter(rd_kafka_error_t* p) { rd_kafka_error_destroy(p); } +using rd_kafka_error_shared_ptr = std::shared_ptr; + +// Convert from rd_kafka_xxx datatypes +inline TopicPartitionOffsets getTopicPartitionOffsets(const rd_kafka_topic_partition_list_t* rk_tpos) +{ + TopicPartitionOffsets ret; + int count = rk_tpos ? rk_tpos->cnt : 0; + for (int i = 0; i < count; ++i) + { + const Topic t = rk_tpos->elems[i].topic; + const Partition p = rk_tpos->elems[i].partition; + const Offset o = rk_tpos->elems[i].offset; + + ret[TopicPartition(t, p)] = o; + } + return ret; +} + +inline Topics getTopics(const rd_kafka_topic_partition_list_t* rk_topics) +{ + Topics result; + for (int i = 0; i < (rk_topics ? rk_topics->cnt : 0); ++i) + { + result.insert(rk_topics->elems[i].topic); + } + return result; +} + +inline TopicPartitions getTopicPartitions(const rd_kafka_topic_partition_list_t* rk_tpos) +{ + TopicPartitions result; + for (int i = 0; i < (rk_tpos ? rk_tpos->cnt : 0); ++i) + { + result.insert(TopicPartition{rk_tpos->elems[i].topic, rk_tpos->elems[i].partition}); + } + return result; +} + +// Convert to rd_kafka_xxx datatypes +inline rd_kafka_topic_partition_list_t* createRkTopicPartitionList(const TopicPartitionOffsets& tpos) +{ + rd_kafka_topic_partition_list_t* rk_tpos = rd_kafka_topic_partition_list_new(static_cast(tpos.size())); + for (const auto& tp_o: tpos) + { + const auto& tp = tp_o.first; + const auto& o = tp_o.second; + rd_kafka_topic_partition_t* rk_tp = rd_kafka_topic_partition_list_add(rk_tpos, tp.first.c_str(), tp.second); + rk_tp->offset = o; + } + return rk_tpos; +} + +inline rd_kafka_topic_partition_list_t* createRkTopicPartitionList(const TopicPartitions& tps) +{ + TopicPartitionOffsets tpos; + for (const auto& tp: tps) + { + tpos[TopicPartition(tp.first, tp.second)] = RD_KAFKA_OFFSET_INVALID; + } + return createRkTopicPartitionList(tpos); +} + +inline rd_kafka_topic_partition_list_t* createRkTopicPartitionList(const Topics& topics) +{ + TopicPartitionOffsets tpos; + for (const auto& topic: topics) + { + tpos[TopicPartition(topic, RD_KAFKA_PARTITION_UA)] = RD_KAFKA_OFFSET_INVALID; + } + return createRkTopicPartitionList(tpos); +} + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/Timestamp.h b/modern-cpp-kafka/include/kafka/Timestamp.h new file mode 100644 index 00000000..4579eb52 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/Timestamp.h @@ -0,0 +1,92 @@ +#pragma once + +#include + +#include + +#include +#include +#include +#include +#include +#include + + +namespace KAFKA_API { + +/** + * The time point together with the type. + */ +struct Timestamp +{ + using Value = std::int64_t; + + enum class Type { NotAvailable, CreateTime, LogAppendTime }; + + /** + * The milliseconds since epoch. + */ + Value msSinceEpoch; + + /** + * The type shows what the `msSinceEpoch` means (CreateTime or LogAppendTime). + */ + Type type; + + explicit Timestamp(Value v = 0, Type t = Type::NotAvailable): msSinceEpoch(v), type(t) {} + Timestamp(Value v, rd_kafka_timestamp_type_t t): Timestamp(v, convertType(t)) {} + + static Type convertType(rd_kafka_timestamp_type_t tstype) + { + return (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME) ? Type::CreateTime : + (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME ? Type::LogAppendTime : Type::NotAvailable); + } + + operator std::chrono::time_point() const // NOLINT + { + return std::chrono::time_point(std::chrono::milliseconds(msSinceEpoch)); + } + + static std::string toString(Type t) + { + switch (t) + { + case Type::CreateTime: + return "CreateTime"; + case Type::LogAppendTime: + return "LogAppendTime"; + default: + assert(t == Type::NotAvailable); + return ""; + } + } + + static std::string toString(Value v) + { + auto ms = std::chrono::milliseconds(v); + auto timepoint = std::chrono::time_point(ms); + std::time_t time = std::chrono::system_clock::to_time_t(timepoint); + std::ostringstream oss; + std::tm tmBuf = {}; +#if !defined(WIN32) + oss << std::put_time(localtime_r(&time, &tmBuf), "%F %T") << "." << std::setfill('0') << std::setw(3) << (v % 1000); +#else + localtime_s(&tmBuf, &time); + oss << std::put_time(&tmBuf, "%F %T") << "." << std::setfill('0') << std::setw(3) << (v % 1000); +#endif + return oss.str(); + } + + /** + * Obtains explanatory string. + */ + std::string toString() const + { + auto typeString = toString(type); + auto timeString = toString(msSinceEpoch); + return typeString.empty() ? timeString : (typeString + "[" + timeString + "]"); + } +}; + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/Types.h b/modern-cpp-kafka/include/kafka/Types.h new file mode 100644 index 00000000..e843f902 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/Types.h @@ -0,0 +1,192 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// Use `boost::optional` for C++14, which doesn't support `std::optional` +#if COMPILER_SUPPORTS_CPP_17 +#include +template +using Optional = std::optional; +#else +#include +#include +template +using Optional = boost::optional; +#endif + + +namespace KAFKA_API { + +// Which is similar with `boost::const_buffer` (thus avoid the dependency towards `boost`) +class ConstBuffer +{ +public: + explicit ConstBuffer(const void* data = nullptr, std::size_t size = 0): _data(data), _size(size) {} + const void* data() const { return _data; } + std::size_t size() const { return _size; } + std::string toString() const + { + if (_size == 0) return _data ? "[empty]" : "[null]"; + + std::ostringstream oss; + + auto printChar = [&oss](const unsigned char c) { + if (std::isprint(c)) { + oss << c; + } else { + oss << "[0x" << std::hex << std::setfill('0') << std::setw(2) << static_cast(c) << "]"; + } + }; + const auto* beg = static_cast(_data); + std::for_each(beg, beg + _size, printChar); + + return oss.str(); + } +private: + const void* _data; + std::size_t _size; +}; + +/** + * Topic name. + */ +using Topic = std::string; + +/** + * Partition number. + */ +using Partition = std::int32_t; + +/** + * Record offset. + */ +using Offset = std::int64_t; + +/** + * Record key. + */ +using Key = ConstBuffer; +using KeySize = std::size_t; + +/** + * Null Key. + */ +#if COMPILER_SUPPORTS_CPP_17 +const inline Key NullKey = Key{}; +#else +const static Key NullKey = Key{}; +#endif + +/** + * Record value. + */ +using Value = ConstBuffer; +using ValueSize = std::size_t; + +/** + * Null Value. + */ +#if COMPILER_SUPPORTS_CPP_17 +const inline Value NullValue = Value{}; +#else +const static Value NullValue = Value{}; +#endif + +/** + * Topic set. + */ +using Topics = std::set; + +/** + * Topic Partition pair. + */ +using TopicPartition = std::pair; + +/** + * TopicPartition set. + */ +using TopicPartitions = std::set; + +/** + * Topic/Partition/Offset tuple + */ +using TopicPartitionOffset = std::tuple; + +/** + * TopicPartition to Offset map. + */ +using TopicPartitionOffsets = std::map; + + +/** + * Obtains explanatory string for Topics. + */ +inline std::string toString(const Topics& topics) +{ + std::string ret; + std::for_each(topics.cbegin(), topics.cend(), + [&ret](const auto& topic) { + ret.append(ret.empty() ? "" : ",").append(topic); + }); + return ret; +} + +/** + * Obtains explanatory string for TopicPartition. + */ +inline std::string toString(const TopicPartition& tp) +{ + return tp.first + std::string("-") + std::to_string(tp.second); +} + +/** + * Obtains explanatory string for TopicPartitions. + */ +inline std::string toString(const TopicPartitions& tps) +{ + std::string ret; + std::for_each(tps.cbegin(), tps.cend(), + [&ret](const auto& tp) { + ret.append((ret.empty() ? "" : ",") + tp.first + "-" + std::to_string(tp.second)); + }); + return ret; +} + +/** + * Obtains explanatory string for TopicPartitionOffset. + */ +inline std::string toString(const TopicPartitionOffset& tpo) +{ + return std::get<0>(tpo) + "-" + std::to_string(std::get<1>(tpo)) + ":" + std::to_string(std::get<2>(tpo)); +} + +/** + * Obtains explanatory string for TopicPartitionOffsets. + */ +inline std::string toString(const TopicPartitionOffsets& tpos) +{ + std::string ret; + std::for_each(tpos.cbegin(), tpos.cend(), + [&ret](const auto& tp_o) { + const TopicPartition& tp = tp_o.first; + const Offset& o = tp_o.second; + ret.append((ret.empty() ? "" : ",") + tp.first + "-" + std::to_string(tp.second) + ":" + std::to_string(o)); + }); + return ret; +} + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/Utility.h b/modern-cpp-kafka/include/kafka/Utility.h new file mode 100644 index 00000000..88e62717 --- /dev/null +++ b/modern-cpp-kafka/include/kafka/Utility.h @@ -0,0 +1,81 @@ +#pragma once + +#include + +#include + +#include +#include +#include +#include +#include +#include + + +namespace KAFKA_API { namespace utility { + +/** + * Get local time as string. + */ +inline std::string getLocalTimeString(const std::chrono::system_clock::time_point& timePoint) +{ + auto time = std::chrono::system_clock::to_time_t(timePoint); + std::tm tmBuf = {}; + +#if !defined(WIN32) + localtime_r(&time, &tmBuf); +#else + localtime_s(&tmBuf, &time); +#endif + + std::ostringstream oss; + oss << std::put_time(&tmBuf, "%F %T") << "." << std::setfill('0') << std::setw(6) + << std::chrono::duration_cast(timePoint.time_since_epoch()).count() % 1000000; + + return oss.str(); +} + +/** + * Get current local time as string. + */ +inline std::string getCurrentTime() +{ + return getLocalTimeString(std::chrono::system_clock::now()); +} + +/** + * Get random string. + */ +inline std::string getRandomString() +{ + using namespace std::chrono; + std::uint32_t timestamp = static_cast(duration_cast(system_clock::now().time_since_epoch()).count()); + + std::random_device r; + std::default_random_engine e(r()); + std::uniform_int_distribution uniform_dist(0, 0xFFFFFFFF); + std::uint64_t rand = uniform_dist(e); + + std::ostringstream oss; + oss << std::setfill('0') << std::setw(sizeof(std::uint32_t) * 2) << std::hex << timestamp << "-" << rand; + return oss.str(); +} + +/** + * Get librdkafka version string. + */ +inline std::string getLibRdKafkaVersion() +{ + return rd_kafka_version_str(); +} + +/** + * Current number of threads created by rdkafka. + */ +inline int getLibRdKafkaThreadCount() +{ + return rd_kafka_thread_cnt(); +} + +} } // end of KAFKA_API::utility + diff --git a/modern-cpp-kafka/include/kafka/addons/KafkaMetrics.h b/modern-cpp-kafka/include/kafka/addons/KafkaMetrics.h new file mode 100644 index 00000000..cd31538a --- /dev/null +++ b/modern-cpp-kafka/include/kafka/addons/KafkaMetrics.h @@ -0,0 +1,208 @@ +#pragma once + +#include + +// https://github.com/Tencent/rapidjson/releases/tag/v1.1.0 +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +namespace KAFKA_API { + +/** + * \brief Helps to parse the metrics string with JSON format. + */ +class KafkaMetrics +{ +public: + /** + * \brief Initilize with the metrics string. + */ + explicit KafkaMetrics(std::string jsonMetrics); + + static const constexpr char* WILDCARD = "*"; + + using KeysType = std::vector; + + /** + * \brief The matched keys (for wildcards) and the value. + */ + template + using ResultsType = std::vector>; + + /** + * \brief Get integer value(s) for the specified metrics. + * Note: the wildcard ("*") is supported. + */ + ResultsType getInt(const KeysType& keys) { return get(keys); } + + /** + * \brief Get string value(s) for the specified metrics. + * Note: the wildcard ("*") is supported. + */ + ResultsType getString(const KeysType& keys) { return get(keys); } + + static std::string toString(const KafkaMetrics::KeysType& keys); + + template + static std::string toString(const KafkaMetrics::ResultsType& results); + +private: + template + ResultsType get(const KeysType& keys); + + template + void getResults(ResultsType& results, + KeysType& keysForWildcards, + rapidjson::Value::ConstMemberIterator iter, + KeysType::const_iterator keysToParse, + KeysType::const_iterator keysEnd); + + template + static ValueType getValue(rapidjson::Value::ConstMemberIterator iter); + +#if COMPILER_SUPPORTS_CPP_17 + std::string _decodeBuf; +#else + std::vector _decodeBuf; +#endif + rapidjson::Document _jsonDoc; +}; + +inline +KafkaMetrics::KafkaMetrics(std::string jsonMetrics) +#if COMPILER_SUPPORTS_CPP_17 + : _decodeBuf(std::move(jsonMetrics)) +#else + : _decodeBuf(jsonMetrics.cbegin(), jsonMetrics.cend() + 1) +#endif +{ + if (_jsonDoc.ParseInsitu(_decodeBuf.data()).HasParseError()) + { + throw std::runtime_error("Failed to parse string with JSON format!"); + } +} + +template<> +inline std::int64_t +KafkaMetrics::getValue(rapidjson::Value::ConstMemberIterator iter) +{ + return iter->value.GetInt(); +} + +template<> +inline std::string +KafkaMetrics::getValue(rapidjson::Value::ConstMemberIterator iter) +{ + return iter->value.GetString(); +} + +template +inline KafkaMetrics::ResultsType +KafkaMetrics::get(const KeysType& keys) +{ + if (keys.empty()) throw std::invalid_argument("Input keys cannot be empty!"); + if (keys.front() == WILDCARD) throw std::invalid_argument("The first key cannot be wildcard!"); + if (keys.back() == WILDCARD) throw std::invalid_argument("The last key cannot be wildcard!"); + + ResultsType results; + + rapidjson::Value::ConstMemberIterator iter = _jsonDoc.FindMember(keys.front().c_str()); + if (iter == _jsonDoc.MemberEnd()) return results; + + if (keys.size() == 1) + { + if (std::is_same::value ? iter->value.IsString() : iter->value.IsInt()) + { + results.emplace_back(KeysType{}, getValue(iter)); + } + + return results; + } + + KeysType keysForWildcards; + + getResults(results, keysForWildcards, iter, keys.cbegin() + 1, keys.cend()); + return results; +} + +template +inline void +KafkaMetrics::getResults(KafkaMetrics::ResultsType& results, + KeysType& keysForWildcards, + rapidjson::Value::ConstMemberIterator iter, + KeysType::const_iterator keysToParse, + KeysType::const_iterator keysEnd) +{ + if (!iter->value.IsObject()) return; + + const auto& key = *(keysToParse++); + const bool isTheEnd = (keysToParse == keysEnd); + + if (key == WILDCARD) + { + for (rapidjson::Value::ConstMemberIterator subIter = iter->value.MemberBegin(); subIter != iter->value.MemberEnd(); ++subIter) + { + KeysType newKeysForWildcards = keysForWildcards; + newKeysForWildcards.emplace_back(subIter->name.GetString()); + + getResults(results, newKeysForWildcards, subIter, keysToParse, keysEnd); + } + } + else + { + rapidjson::Value::ConstMemberIterator subIter = iter->value.FindMember(key.c_str()); + if (subIter == iter->value.MemberEnd()) return; + + if (!isTheEnd) + { + getResults(results, keysForWildcards, subIter, keysToParse, keysEnd); + } + else if (std::is_same::value ? subIter->value.IsString() : subIter->value.IsInt()) + { + results.emplace_back(keysForWildcards, getValue(subIter)); + } + } +} + +inline std::string +KafkaMetrics::toString(const KafkaMetrics::KeysType& keys) +{ + std::string ret; + + std::for_each(keys.cbegin(), keys.cend(), + [&ret](const auto& key){ ret.append((ret.empty() ? std::string() : std::string(", ")) + "\"" + key + "\""); }); + + return ret; +} + +template +inline std::string +KafkaMetrics::toString(const KafkaMetrics::ResultsType& results) +{ + std::ostringstream oss; + bool isTheFirstOne = true; + + std::for_each(results.cbegin(), results.cend(), + [&oss, &isTheFirstOne](const auto& result) { + const auto keysString = toString(result.first); + + oss << (isTheFirstOne ? (isTheFirstOne = false, "") : ", ") + << (keysString.empty() ? "" : (std::string("[") + keysString + "]:")); + oss << (std::is_same::value ? "\"" : "") << result.second << (std::is_same::value ? "\"" : ""); + }); + + return oss.str(); +} + +} // end of KAFKA_API + diff --git a/modern-cpp-kafka/include/kafka/addons/KafkaRecoverableProducer.h b/modern-cpp-kafka/include/kafka/addons/KafkaRecoverableProducer.h new file mode 100644 index 00000000..911f832f --- /dev/null +++ b/modern-cpp-kafka/include/kafka/addons/KafkaRecoverableProducer.h @@ -0,0 +1,360 @@ +#pragma once + +#include + +#include +#include +#include + +#include +#include +#include + +namespace KAFKA_API { namespace clients { + +class KafkaRecoverableProducer +{ +public: + explicit KafkaRecoverableProducer(const Properties& properties) + : _properties(properties), _running(true) + { + _errorCb = [this](const Error& error) { + if (error.isFatal()) _fatalError = std::make_unique(error); + }; + + _producer = createProducer(); + + _pollThread = std::thread([this]() { keepPolling(); }); + } + + ~KafkaRecoverableProducer() + { + if (_running) close(); + } + + /** + * Get the client id. + */ + const std::string& clientId() const + { + std::lock_guard lock(_producerMutex); + + return _producer->clientId(); + } + + /** + * Get the client name (i.e. client type + id). + */ + const std::string& name() const + { + std::lock_guard lock(_producerMutex); + + return _producer->name(); + } + + /** + * Set the log callback for the kafka client (it's a per-client setting). + */ + void setLogger(const Logger& logger) + { + std::lock_guard lock(_producerMutex); + + _logger = logger; + _producer->setLogger(*_logger); + } + + /** + * Set log level for the kafka client (the default value: 5). + */ + void setLogLevel(int level) + { + std::lock_guard lock(_producerMutex); + + _logLevel = level; + _producer->setLogLevel(*_logLevel); + } + + /** + * Set callback to receive the periodic statistics info. + * Note: 1) It only works while the "statistics.interval.ms" property is configured with a non-0 value. + * 2) The callback would be triggered periodically, receiving the internal statistics info (with JSON format) emited from librdkafka. + */ + void setStatsCallback(const KafkaClient::StatsCallback& cb) + { + std::lock_guard lock(_producerMutex); + + _statsCb = cb; + _producer->setStatsCallback(*_statsCb); + } + + void setErrorCallback(const KafkaClient::ErrorCallback& cb) + { + std::lock_guard lock(_producerMutex); + + _errorCb = [cb, this](const Error& error) { + cb(error); + + if (error.isFatal()) _fatalError = std::make_unique(error); + }; + _producer->setErrorCallback(*_errorCb); + } + + /** + * Return the properties which took effect. + */ + const Properties& properties() const + { + std::lock_guard lock(_producerMutex); + + return _producer->properties(); + } + + /** + * Fetch the effected property (including the property internally set by librdkafka). + */ + Optional getProperty(const std::string& name) const + { + std::lock_guard lock(_producerMutex); + + return _producer->getProperty(name); + } + + /** + * Fetch matadata from a available broker. + * Note: the Metadata response information may trigger a re-join if any subscribed topic has changed partition count or existence state. + */ + Optional fetchBrokerMetadata(const std::string& topic, + std::chrono::milliseconds timeout = std::chrono::milliseconds(KafkaClient::DEFAULT_METADATA_TIMEOUT_MS), + bool disableErrorLogging = false) + { + std::lock_guard lock(_producerMutex); + + return _producer->fetchBrokerMetadata(topic, timeout, disableErrorLogging); + } + + /** + * Invoking this method makes all buffered records immediately available to send, and blocks on the completion of the requests associated with these records. + * + * Possible error values: + * - RD_KAFKA_RESP_ERR__TIMED_OUT: The `timeout` was reached before all outstanding requests were completed. + */ + Error flush(std::chrono::milliseconds timeout = std::chrono::milliseconds::max()) + { + std::lock_guard lock(_producerMutex); + + return _producer->flush(timeout); + } + + /** + * Purge messages currently handled by the KafkaProducer. + */ + Error purge() + { + std::lock_guard lock(_producerMutex); + + return _producer->purge(); + } + + /** + * Close this producer. This method would wait up to timeout for the producer to complete the sending of all incomplete requests (before purging them). + */ + void close(std::chrono::milliseconds timeout = std::chrono::milliseconds::max()) + { + std::lock_guard lock(_producerMutex); + + _running = false; + if (_pollThread.joinable()) _pollThread.join(); + + _producer->close(timeout); + } + + /** + * Synchronously send a record to a topic. + * Throws KafkaException with errors: + * Local errors, + * - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC: The topic doesn't exist + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: The partition doesn't exist + * - RD_KAFKA_RESP_ERR__INVALID_ARG: Invalid topic(topic is null, or the length is too long (> 512) + * - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: No ack received within the time limit + * Broker errors, + * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + */ + producer::RecordMetadata syncSend(const producer::ProducerRecord& record) + { + std::lock_guard lock(_producerMutex); + + return _producer->syncSend(record); + } + + /** + * Asynchronously send a record to a topic. + * + * Note: + * - If a callback is provided, it's guaranteed to be triggered (before closing the producer). + * - If any error occured, an exception would be thrown. + * - Make sure the memory block (for ProducerRecord's value) is valid until the delivery callback finishes; Otherwise, should be with option `KafkaProducer::SendOption::ToCopyRecordValue`. + * + * Possible errors: + * Local errors, + * - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC: The topic doesn't exist + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: The partition doesn't exist + * - RD_KAFKA_RESP_ERR__INVALID_ARG: Invalid topic(topic is null, or the length is too long (> 512) + * - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: No ack received within the time limit + * - RD_KAFKA_RESP_ERR__QUEUE_FULL: The message buffing queue is full + * Broker errors, + * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + */ + void send(const producer::ProducerRecord& record, + const producer::Callback& deliveryCb, + KafkaProducer::SendOption option = KafkaProducer::SendOption::NoCopyRecordValue, + KafkaProducer::ActionWhileQueueIsFull action = KafkaProducer::ActionWhileQueueIsFull::Block) + { + std::lock_guard lock(_producerMutex); + + _producer->send(record, deliveryCb, option, action); + } + + /** + * Asynchronously send a record to a topic. + * + * Note: + * - If a callback is provided, it's guaranteed to be triggered (before closing the producer). + * - The input reference parameter `error` will be set if an error occurred. + * - Make sure the memory block (for ProducerRecord's value) is valid until the delivery callback finishes; Otherwise, should be with option `KafkaProducer::SendOption::ToCopyRecordValue`. + * + * Possible errors: + * Local errors, + * - RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC: The topic doesn't exist + * - RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION: The partition doesn't exist + * - RD_KAFKA_RESP_ERR__INVALID_ARG: Invalid topic(topic is null, or the length is too long (> 512) + * - RD_KAFKA_RESP_ERR__MSG_TIMED_OUT: No ack received within the time limit + * - RD_KAFKA_RESP_ERR__QUEUE_FULL: The message buffing queue is full + * Broker errors, + * - [Error Codes] (https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes) + */ + + void send(const producer::ProducerRecord& record, + const producer::Callback& deliveryCb, + Error& error, + KafkaProducer::SendOption option = KafkaProducer::SendOption::NoCopyRecordValue, + KafkaProducer::ActionWhileQueueIsFull action = KafkaProducer::ActionWhileQueueIsFull::Block) + { + std::lock_guard lock(_producerMutex); + + _producer->send(record, deliveryCb, error, option, action); + } + + /** + * Needs to be called before any other methods when the transactional.id is set in the configuration. + */ + void initTransactions(std::chrono::milliseconds timeout = std::chrono::milliseconds(KafkaProducer::DEFAULT_INIT_TRANSACTIONS_TIMEOUT_MS)) + { + std::lock_guard lock(_producerMutex); + + _producer->initTransactions(timeout); + } + + /** + * Should be called before the start of each new transaction. + */ + void beginTransaction() + { + std::lock_guard lock(_producerMutex); + + _producer->beginTransaction(); + } + + /** + * Commit the ongoing transaction. + */ + void commitTransaction(std::chrono::milliseconds timeout = std::chrono::milliseconds(KafkaProducer::DEFAULT_COMMIT_TRANSACTION_TIMEOUT_MS)) + { + std::lock_guard lock(_producerMutex); + + _producer->commitTransaction(timeout); + } + + /** + * Abort the ongoing transaction. + */ + void abortTransaction(std::chrono::milliseconds timeout = std::chrono::milliseconds::max()) + { + std::lock_guard lock(_producerMutex); + + _producer->abortTransaction(timeout); + } + + /** + * Send a list of specified offsets to the consumer group coodinator, and also marks those offsets as part of the current transaction. + */ + void sendOffsetsToTransaction(const TopicPartitionOffsets& topicPartitionOffsets, + const consumer::ConsumerGroupMetadata& groupMetadata, + std::chrono::milliseconds timeout) + { + std::lock_guard lock(_producerMutex); + + _producer->sendOffsetsToTransaction(topicPartitionOffsets, groupMetadata, timeout); + } + +#ifdef KAFKA_API_ENABLE_UNIT_TEST_STUBS + void mockFatalError() + { + _fatalError = std::make_unique(RD_KAFKA_RESP_ERR__FATAL, "fake fatal error", true); + } +#endif + +private: + void keepPolling() + { + while (_running) + { + _producer->pollEvents(std::chrono::milliseconds(1)); + if (_fatalError) + { + const std::string errStr = _fatalError->toString(); + KAFKA_API_LOG(Log::Level::Notice, "met fatal error[%s], will re-initialize the internal producer", errStr.c_str()); + + std::lock_guard lock(_producerMutex); + + if (!_running) return; + + _producer->purge(); + _producer->close(); + + _fatalError.reset(); + + _producer = createProducer(); + } + } + } + + std::unique_ptr createProducer() + { + auto producer = std::make_unique(_properties, KafkaClient::EventsPollingOption::Manual); + + if (_logger) producer->setLogger(*_logger); + if (_logLevel) producer->setLogLevel(*_logLevel); + if (_statsCb) producer->setStatsCallback(*_statsCb); + if (_errorCb) producer->setErrorCallback(*_errorCb); + + return producer; + } + + // Configurations for producer + Properties _properties; + Optional _logger; + Optional _logLevel; + Optional _statsCb; + Optional _errorCb; + + std::unique_ptr _fatalError; + + std::atomic _running; + std::thread _pollThread; + + mutable std::mutex _producerMutex; + std::unique_ptr _producer; +}; + +} } // end of KAFKA_API::clients + diff --git a/modern-cpp-kafka/include/kafka/addons/UnorderedOffsetCommitQueue.h b/modern-cpp-kafka/include/kafka/addons/UnorderedOffsetCommitQueue.h new file mode 100644 index 00000000..756b00fb --- /dev/null +++ b/modern-cpp-kafka/include/kafka/addons/UnorderedOffsetCommitQueue.h @@ -0,0 +1,178 @@ +#pragma once + +#include + +#include +#include + +#include +#include + +namespace KAFKA_API { namespace clients { namespace consumer { + +template +class Heap +{ +public: + bool empty() const { return data.empty(); } + std::size_t size() const { return data.size(); } + + const T& front() const { return data[0]; } + + void push(const T& t) + { + data.emplace_back(t); + + for (std::size_t indexCurrent = data.size() - 1; indexCurrent > 0;) + { + std::size_t indexParent = (indexCurrent + 1) / 2 - 1; + + if (!(data[indexCurrent] < data[indexParent])) return; + + std::swap(data[indexCurrent], data[indexParent]); + indexCurrent = indexParent; + } + } + + void pop_front() + { + data[0] = data.back(); + data.pop_back(); + + if (data.empty()) return; + + for (std::size_t indexCurrent = 0;;) + { + std::size_t indexRightChild = (indexCurrent + 1) * 2; + std::size_t indexLeftChild = indexRightChild - 1; + + if (indexLeftChild >= data.size()) return; + + std::size_t indexMinChild = (indexRightChild >= data.size() || data[indexLeftChild] < data[indexRightChild]) ? indexLeftChild : indexRightChild; + + if (!(data[indexMinChild] < data[indexCurrent])) return; + + std::swap(data[indexCurrent], data[indexMinChild]); + indexCurrent = indexMinChild; + } + } + +private: + std::vector data; +}; + + +/** + * \brief The queue can be used to determine the right offset to commit. + * A `KafkaManuallyCommitConsumer` might forward the received records to different handlers, while these handlers could not ack the records in order. + * Then, the `UnorderedOffsetCommitQueue` would help, + * 1. Prepare an `UnorderedOffsetCommitQueue` for each topic-partition. + * 2. Make sure call `waitOffset()` for each record received. + * 3. Make sure call `ackOffset()` while a handler acks for an record. + * 4. Figure out whether there's offset to commit with `popOffsetToCommit()` and commit the offset then. + */ +class UnorderedOffsetCommitQueue +{ +public: + UnorderedOffsetCommitQueue(const Topic& topic, Partition partition) + : _partitionInfo(std::string("topic[").append(topic).append("], paritition[").append(std::to_string(partition)).append("]")) + { + } + UnorderedOffsetCommitQueue() = default; + + /** + * \brief Return how many received offsets have not been popped to commit (with `popOffsetToCommit()`). + */ + std::size_t size() const { return _offsetsReceived.size(); } + + /** + * \brief Add an offset (for a ConsumerRecord) to the waiting list, until it being acked (with `ackOffset`). + * Note: Make sure the offset would be `ack` later with `ackOffset()`. + */ + void waitOffset(Offset offset) + { + if (offset < 0 || (!_offsetsReceived.empty() && offset <= _offsetsReceived.back())) + { + // Invalid offset (might be fetched from the record which had no valid offset) + KAFKA_API_LOG(Log::Level::Err, "Got invalid offset to wait[%lld]! %s", offset, (_partitionInfo.empty() ? "" : _partitionInfo.c_str())); + return; + } + + _offsetsReceived.emplace_back(offset); + } + + /** + * \brief Ack the record has been handled and ready to be committed. + * Note: If all offsets ahead has been acked, then with `popOffsetToCommit()`, we'd get `offset + 1`, which is ready to be committed for the consumer. + */ + void ackOffset(Offset offset) + { + Offset maxOffsetReceived = _offsetsReceived.back(); + if (offset > maxOffsetReceived) + { + // Runtime error + KAFKA_API_LOG(Log::Level::Err, "Got invalid ack offset[%lld]! Even larger than all offsets received[%lld]! %s", offset, maxOffsetReceived, (_partitionInfo.empty() ? "" : _partitionInfo.c_str())); + } + + _offsetsToCommit.push(offset); + do + { + Offset minOffsetToCommit = _offsetsToCommit.front(); + Offset expectedOffset = _offsetsReceived.front(); + if (minOffsetToCommit == expectedOffset) + { + _toCommit = expectedOffset + 1; + _offsetsToCommit.pop_front(); + _offsetsReceived.pop_front(); + } + else if (minOffsetToCommit < expectedOffset) + { + // Inconsist error (might be caused by duplicated ack) + KAFKA_API_LOG(Log::Level::Err, "Got invalid ack offset[%lld]! Even smaller than expected[%lld]! %s", minOffsetToCommit, expectedOffset, (_partitionInfo.empty() ? "" : _partitionInfo.c_str())); + _offsetsToCommit.pop_front(); + } + else + { + break; + } + } while (!_offsetsToCommit.empty()); + } + + /** + * \brief Pop the offset which is ready for the consumer (if any). + */ + Optional popOffsetToCommit() + { + Optional ret; + if (_committed != _toCommit) + { + ret = _committed = _toCommit; + } + return ret; + } + + /** + * \brief Return the offset last popped. + */ + Optional lastPoppedOffset() + { + Optional ret; + if (_committed != INVALID_OFFSET) + { + ret = _committed; + } + return ret; + } + +private: + std::deque _offsetsReceived; + Heap _offsetsToCommit; + Offset _toCommit = {INVALID_OFFSET}; + Offset _committed = {INVALID_OFFSET}; + std::string _partitionInfo; + + static constexpr Offset INVALID_OFFSET = -1; +}; + +} } } // end of KAFKA_API::clients::consumer + diff --git a/modern-cpp-kafka/scripts/doxyfile.cfg b/modern-cpp-kafka/scripts/doxyfile.cfg new file mode 100644 index 00000000..b8befd97 --- /dev/null +++ b/modern-cpp-kafka/scripts/doxyfile.cfg @@ -0,0 +1,2511 @@ +# Doxyfile 1.8.15 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "Modern C++ Kafka API" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all generated output in the proper direction. +# Possible values are: None, LTR, RTL and Context. +# The default value is: None. + +OUTPUT_TEXT_DIRECTION = None + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Fortran +# (fixed format Fortran: FortranFixed, free formatted Fortran: FortranFree, +# unknown formatted Fortran: Fortran. In the later case the parser tries to +# guess whether the code is fixed or free formatted code, this is the default +# for Fortran type files), VHDL, tcl. For instance to make doxygen treat .inc +# files as Fortran files (default is PHP), and .f files as C (default is +# Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 0. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = YES + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = NO + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = YES + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = NO + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. If +# EXTRACT_ALL is set to YES then this flag will automatically be disabled. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = @DOXYGEN_INPUT_DIR@ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: https://www.gnu.org/software/libiconv/) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, +# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. + +FILE_PATTERNS = *.c \ + *.C \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.H \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.pyw \ + *.f90 \ + *.f95 \ + *.f03 \ + *.f08 \ + *.f \ + *.for \ + *.tcl \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = doxygen + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via Javascript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have Javascript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: https://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://doc.qt.io/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://doc.qt.io/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://doc.qt.io/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/ + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /