From 19d4770da5470b01efabfe7d154126175fbcd85c Mon Sep 17 00:00:00 2001 From: Mauro Stettler Date: Wed, 7 Feb 2018 15:25:02 +0800 Subject: [PATCH 01/24] use confluent instead of sarama --- .circleci/config.yml | 31 +- Gopkg.lock | 33 +- cmd/mt-kafka-mdm-sniff-out-of-order/main.go | 2 - cmd/mt-kafka-mdm-sniff/main.go | 2 - docker/docker-chaos/docker-compose.yml | 6 - docker/docker-cluster/docker-compose.yml | 4 - docker/docker-cluster/metrictank.ini | 11 +- .../docker-compose.yml | 1 - docker/docker-dev/docker-compose.yml | 3 +- input/kafkamdm/kafkamdm.go | 343 +- kafka/consumer.go | 401 ++ {input/kafkamdm => kafka}/lag_monitor.go | 2 +- {input/kafkamdm => kafka}/lag_monitor_test.go | 2 +- kafka/offsetMgr.go | 131 - kafka/partitions.go | 56 +- kafka/utils.go | 25 + mdata/notifierKafka/cfg.go | 135 +- mdata/notifierKafka/notifierKafka.go | 275 +- metrictank.go | 4 +- scripts/Dockerfile | 34 +- scripts/build.sh | 25 +- scripts/build_deps.sh | 43 + scripts/build_docker.sh | 11 +- scripts/build_packages.sh | 148 +- .../build_packages/systemd-centos7/Dockerfile | 17 + .../systemd-centos7/build_package.sh | 33 + scripts/build_packages/systemd/Dockerfile | 16 + .../build_packages/systemd/build_package.sh | 33 + scripts/build_packages/sysvinit/Dockerfile | 16 + .../build_packages/sysvinit/build_package.sh | 31 + .../build_packages/upstart-0.6.5/Dockerfile | 18 + .../upstart-0.6.5/build_package.sh | 33 + .../build_packages/upstart-0.6.5/get_ruby.sh | 18 + scripts/build_packages/upstart/Dockerfile | 16 + .../build_packages/upstart/build_package.sh | 31 + scripts/build_tools.sh | 8 +- scripts/get_go.sh | 11 + .../confluent-kafka-go/.travis.yml | 34 + .../confluentinc/confluent-kafka-go/LICENSE | 202 + .../confluentinc/confluent-kafka-go/README.md | 278 ++ .../confluent-kafka-go/examples/README | 19 + .../confluent_cloud_example.go | 104 + .../consumer_channel_example.go | 90 + .../consumer_example/consumer_example.go | 93 + .../examples/go-kafkacat/go-kafkacat.go | 255 + .../producer_channel_example.go | 75 + .../producer_example/producer_example.go | 68 + .../confluent-kafka-go/kafka/00version.go | 60 + .../confluent-kafka-go/kafka/README.md | 69 + .../confluent-kafka-go/kafka/api.html | 1632 +++++++ .../confluent-kafka-go/kafka/build_dynamic.go | 7 + .../confluent-kafka-go/kafka/build_static.go | 7 + .../kafka/build_static_all.go | 8 + .../confluent-kafka-go/kafka/config.go | 225 + .../confluent-kafka-go/kafka/config_test.go | 126 + .../confluent-kafka-go/kafka/consumer.go | 576 +++ .../kafka/consumer_performance_test.go | 177 + .../confluent-kafka-go/kafka/consumer_test.go | 247 + .../confluent-kafka-go/kafka/error.go | 69 + .../confluent-kafka-go/kafka/event.go | 330 ++ .../confluent-kafka-go/kafka/event_test.go | 43 + .../kafka/generated_errors.go | 223 + .../confluent-kafka-go/kafka/glue_rdkafka.h | 46 + .../go_rdkafka_generr/go_rdkafka_generr.go | 115 + .../confluent-kafka-go/kafka/handle.go | 207 + .../confluent-kafka-go/kafka/header.go | 67 + .../confluent-kafka-go/kafka/header_test.go | 41 + .../kafka/integration_test.go | 1086 +++++ .../confluent-kafka-go/kafka/kafka.go | 242 + .../confluent-kafka-go/kafka/kafka_test.go | 138 + .../confluent-kafka-go/kafka/message.go | 207 + .../confluent-kafka-go/kafka/message_test.go | 33 + .../confluent-kafka-go/kafka/metadata.go | 157 + .../confluent-kafka-go/kafka/metadata_test.go | 64 + .../confluent-kafka-go/kafka/misc.go | 27 + .../confluent-kafka-go/kafka/offset.go | 144 + .../confluent-kafka-go/kafka/producer.go | 562 +++ .../kafka/producer_performance_test.go | 225 + .../confluent-kafka-go/kafka/producer_test.go | 216 + .../kafka/stats_event_test.go | 131 + .../kafka/testconf-example.json | 8 + .../confluent-kafka-go/kafka/testhelpers.go | 127 + .../kafka/testhelpers_test.go | 67 + .../confluent-kafka-go/kafkatest/README.md | 38 + .../confluent-kafka-go/kafkatest/deploy.sh | 7 + .../confluent-kafka-go/kafkatest/globals.json | 11 + .../go_verifiable_consumer.go | 443 ++ .../go_verifiable_producer.go | 230 + .../confluent-kafka-go/mk/Makefile | 16 + .../mk/bootstrap-librdkafka.sh | 36 + .../confluent-kafka-go/mk/doc-gen.py | 37 + .../edenhill/librdkafka/.appveyor.yml | 88 + .../edenhill/librdkafka/.dir-locals.el | 3 + .../edenhill/librdkafka/.doozer.json | 110 + .../edenhill/librdkafka/.travis.yml | 42 + .../edenhill/librdkafka/CMakeLists.txt | 182 + .../edenhill/librdkafka/CODE_OF_CONDUCT.md | 46 + .../edenhill/librdkafka/CONFIGURATION.md | 138 + .../edenhill/librdkafka/CONTRIBUTING.md | 271 ++ .../github.com/edenhill/librdkafka/Doxyfile | 2385 ++++++++++ .../edenhill/librdkafka/INTRODUCTION.md | 735 +++ vendor/github.com/edenhill/librdkafka/LICENSE | 25 + .../edenhill/librdkafka/LICENSE.crc32c | 28 + .../edenhill/librdkafka/LICENSE.lz4 | 26 + .../edenhill/librdkafka/LICENSE.murmur2 | 25 + .../edenhill/librdkafka/LICENSE.pycrc | 23 + .../edenhill/librdkafka/LICENSE.queue | 31 + .../edenhill/librdkafka/LICENSE.regexp | 5 + .../edenhill/librdkafka/LICENSE.snappy | 36 + .../edenhill/librdkafka/LICENSE.tinycthread | 26 + .../edenhill/librdkafka/LICENSE.wingetopt | 49 + .../edenhill/librdkafka/LICENSES.txt | 313 ++ .../github.com/edenhill/librdkafka/Makefile | 68 + .../github.com/edenhill/librdkafka/README.md | 168 + .../edenhill/librdkafka/README.win32 | 28 + .../github.com/edenhill/librdkafka/configure | 214 + .../edenhill/librdkafka/configure.librdkafka | 215 + .../edenhill/librdkafka/dev-conf.sh | 45 + .../librdkafka/examples/CMakeLists.txt | 30 + .../edenhill/librdkafka/examples/Makefile | 96 + .../edenhill/librdkafka/examples/globals.json | 11 + .../examples/kafkatest_verifiable_client.cpp | 960 ++++ .../examples/rdkafka_consume_batch.cpp | 260 + .../examples/rdkafka_consumer_example.c | 624 +++ .../examples/rdkafka_consumer_example.cpp | 485 ++ .../librdkafka/examples/rdkafka_example.c | 885 ++++ .../librdkafka/examples/rdkafka_example.cpp | 645 +++ .../librdkafka/examples/rdkafka_performance.c | 1651 +++++++ .../examples/rdkafka_simple_producer.c | 260 + .../examples/rdkafka_zookeeper_example.c | 728 +++ .../github.com/edenhill/librdkafka/lds-gen.py | 38 + .../edenhill/librdkafka/mainpage.doxy | 35 + .../edenhill/librdkafka/mklove/Makefile.base | 215 + .../mklove/modules/configure.atomics | 144 + .../librdkafka/mklove/modules/configure.base | 1771 +++++++ .../mklove/modules/configure.builtin | 62 + .../librdkafka/mklove/modules/configure.cc | 178 + .../librdkafka/mklove/modules/configure.cxx | 8 + .../mklove/modules/configure.fileversion | 65 + .../mklove/modules/configure.good_cflags | 18 + .../librdkafka/mklove/modules/configure.host | 110 + .../librdkafka/mklove/modules/configure.lib | 49 + .../mklove/modules/configure.parseversion | 95 + .../librdkafka/mklove/modules/configure.pic | 16 + .../mklove/modules/configure.socket | 20 + .../edenhill/librdkafka/packaging/RELEASE.md | 137 + .../librdkafka/packaging/archlinux/PKGBUILD | 5 + .../packaging/cmake/Config.cmake.in | 20 + .../librdkafka/packaging/cmake/README.md | 38 + .../librdkafka/packaging/cmake/config.h.in | 40 + .../cmake/try_compile/atomic_32_test.c | 8 + .../cmake/try_compile/atomic_64_test.c | 8 + .../packaging/cmake/try_compile/dlopen_test.c | 11 + .../cmake/try_compile/libsasl2_test.c | 7 + .../cmake/try_compile/rdkafka_setup.cmake | 76 + .../packaging/cmake/try_compile/regex_test.c | 10 + .../cmake/try_compile/strndup_test.c | 5 + .../cmake/try_compile/sync_32_test.c | 8 + .../cmake/try_compile/sync_64_test.c | 8 + .../librdkafka/packaging/debian/changelog | 66 + .../librdkafka/packaging/debian/compat | 1 + .../librdkafka/packaging/debian/control | 49 + .../librdkafka/packaging/debian/copyright | 84 + .../edenhill/librdkafka/packaging/debian/docs | 3 + .../librdkafka/packaging/debian/gbp.conf | 9 + .../packaging/debian/librdkafka-dev.dirs | 2 + .../packaging/debian/librdkafka-dev.examples | 2 + .../packaging/debian/librdkafka-dev.install | 6 + .../packaging/debian/librdkafka-dev.substvars | 1 + .../packaging/debian/librdkafka.dsc | 16 + .../debian/librdkafka1-dbg.substvars | 1 + .../packaging/debian/librdkafka1.dirs | 1 + .../packaging/debian/librdkafka1.install | 2 + .../debian/librdkafka1.postinst.debhelper | 5 + .../debian/librdkafka1.postrm.debhelper | 5 + .../packaging/debian/librdkafka1.symbols | 64 + .../librdkafka/packaging/debian/rules | 19 + .../librdkafka/packaging/debian/source/format | 1 + .../librdkafka/packaging/debian/watch | 2 + .../librdkafka/packaging/get_version.py | 21 + .../librdkafka/packaging/homebrew/README.md | 15 + .../packaging/homebrew/brew-update-pr.sh | 31 + .../librdkafka/packaging/nuget/README.md | 50 + .../librdkafka/packaging/nuget/artifact.py | 173 + .../msvcr120.zip | Bin 0 -> 520101 bytes .../msvcr120.zip | Bin 0 -> 461473 bytes .../librdkafka/packaging/nuget/nuget.sh | 21 + .../librdkafka/packaging/nuget/packaging.py | 421 ++ .../librdkafka/packaging/nuget/release.py | 83 + .../packaging/nuget/requirements.txt | 2 + .../nuget/templates/librdkafka.redist.nuspec | 21 + .../nuget/templates/librdkafka.redist.props | 18 + .../nuget/templates/librdkafka.redist.targets | 19 + .../packaging/nuget/zfile/__init__.py | 0 .../librdkafka/packaging/nuget/zfile/zfile.py | 100 + .../librdkafka/packaging/rpm/Makefile | 81 + .../librdkafka/packaging/rpm/el7-x86_64.cfg | 40 + .../librdkafka/packaging/rpm/librdkafka.spec | 104 + .../packaging/tools/build-debian.sh | 53 + .../librdkafka/src-cpp/CMakeLists.txt | 35 + .../edenhill/librdkafka/src-cpp/ConfImpl.cpp | 89 + .../librdkafka/src-cpp/ConsumerImpl.cpp | 233 + .../librdkafka/src-cpp/HandleImpl.cpp | 365 ++ .../librdkafka/src-cpp/KafkaConsumerImpl.cpp | 257 + .../edenhill/librdkafka/src-cpp/Makefile | 49 + .../librdkafka/src-cpp/MessageImpl.cpp | 38 + .../librdkafka/src-cpp/MetadataImpl.cpp | 151 + .../librdkafka/src-cpp/ProducerImpl.cpp | 167 + .../edenhill/librdkafka/src-cpp/QueueImpl.cpp | 71 + .../edenhill/librdkafka/src-cpp/README.md | 16 + .../edenhill/librdkafka/src-cpp/RdKafka.cpp | 52 + .../edenhill/librdkafka/src-cpp/TopicImpl.cpp | 128 + .../librdkafka/src-cpp/TopicPartitionImpl.cpp | 55 + .../edenhill/librdkafka/src-cpp/rdkafkacpp.h | 2284 +++++++++ .../librdkafka/src-cpp/rdkafkacpp_int.h | 910 ++++ .../edenhill/librdkafka/src/CMakeLists.txt | 184 + .../edenhill/librdkafka/src/Makefile | 82 + .../edenhill/librdkafka/src/crc32c.c | 438 ++ .../edenhill/librdkafka/src/crc32c.h | 38 + .../librdkafka/src/librdkafka_cgrp_synch.png | Bin 0 -> 93796 bytes .../github.com/edenhill/librdkafka/src/lz4.c | 1462 ++++++ .../github.com/edenhill/librdkafka/src/lz4.h | 463 ++ .../edenhill/librdkafka/src/lz4frame.c | 1440 ++++++ .../edenhill/librdkafka/src/lz4frame.h | 367 ++ .../edenhill/librdkafka/src/lz4frame_static.h | 98 + .../edenhill/librdkafka/src/lz4hc.c | 786 +++ .../edenhill/librdkafka/src/lz4hc.h | 269 ++ .../edenhill/librdkafka/src/lz4opt.h | 360 ++ .../edenhill/librdkafka/src/queue.h | 850 ++++ .../github.com/edenhill/librdkafka/src/rd.h | 457 ++ .../edenhill/librdkafka/src/rdaddr.c | 220 + .../edenhill/librdkafka/src/rdaddr.h | 187 + .../edenhill/librdkafka/src/rdatomic.h | 191 + .../edenhill/librdkafka/src/rdavg.h | 97 + .../edenhill/librdkafka/src/rdavl.c | 214 + .../edenhill/librdkafka/src/rdavl.h | 256 + .../edenhill/librdkafka/src/rdbuf.c | 1550 ++++++ .../edenhill/librdkafka/src/rdbuf.h | 325 ++ .../edenhill/librdkafka/src/rdcrc32.c | 113 + .../edenhill/librdkafka/src/rdcrc32.h | 146 + .../github.com/edenhill/librdkafka/src/rddl.c | 179 + .../github.com/edenhill/librdkafka/src/rddl.h | 41 + .../edenhill/librdkafka/src/rdendian.h | 169 + .../github.com/edenhill/librdkafka/src/rdgz.c | 124 + .../github.com/edenhill/librdkafka/src/rdgz.h | 45 + .../edenhill/librdkafka/src/rdinterval.h | 117 + .../edenhill/librdkafka/src/rdkafka.c | 3518 ++++++++++++++ .../edenhill/librdkafka/src/rdkafka.h | 4211 +++++++++++++++++ .../librdkafka/src/rdkafka_assignor.c | 551 +++ .../librdkafka/src/rdkafka_assignor.h | 159 + .../edenhill/librdkafka/src/rdkafka_broker.c | 4038 ++++++++++++++++ .../edenhill/librdkafka/src/rdkafka_broker.h | 361 ++ .../edenhill/librdkafka/src/rdkafka_buf.c | 451 ++ .../edenhill/librdkafka/src/rdkafka_buf.h | 946 ++++ .../edenhill/librdkafka/src/rdkafka_cgrp.c | 3262 +++++++++++++ .../edenhill/librdkafka/src/rdkafka_cgrp.h | 278 ++ .../edenhill/librdkafka/src/rdkafka_conf.c | 2248 +++++++++ .../edenhill/librdkafka/src/rdkafka_conf.h | 350 ++ .../edenhill/librdkafka/src/rdkafka_event.c | 232 + .../edenhill/librdkafka/src/rdkafka_event.h | 81 + .../edenhill/librdkafka/src/rdkafka_feature.c | 444 ++ .../edenhill/librdkafka/src/rdkafka_feature.h | 82 + .../edenhill/librdkafka/src/rdkafka_header.c | 222 + .../edenhill/librdkafka/src/rdkafka_header.h | 76 + .../edenhill/librdkafka/src/rdkafka_int.h | 446 ++ .../librdkafka/src/rdkafka_interceptor.c | 675 +++ .../librdkafka/src/rdkafka_interceptor.h | 80 + .../edenhill/librdkafka/src/rdkafka_lz4.c | 436 ++ .../edenhill/librdkafka/src/rdkafka_lz4.h | 43 + .../librdkafka/src/rdkafka_metadata.c | 1031 ++++ .../librdkafka/src/rdkafka_metadata.h | 160 + .../librdkafka/src/rdkafka_metadata_cache.c | 732 +++ .../edenhill/librdkafka/src/rdkafka_msg.c | 1277 +++++ .../edenhill/librdkafka/src/rdkafka_msg.h | 381 ++ .../edenhill/librdkafka/src/rdkafka_msgset.h | 50 + .../librdkafka/src/rdkafka_msgset_reader.c | 1137 +++++ .../librdkafka/src/rdkafka_msgset_writer.c | 1226 +++++ .../edenhill/librdkafka/src/rdkafka_offset.c | 1145 +++++ .../edenhill/librdkafka/src/rdkafka_offset.h | 74 + .../edenhill/librdkafka/src/rdkafka_op.c | 660 +++ .../edenhill/librdkafka/src/rdkafka_op.h | 403 ++ .../librdkafka/src/rdkafka_partition.c | 3363 +++++++++++++ .../librdkafka/src/rdkafka_partition.h | 641 +++ .../edenhill/librdkafka/src/rdkafka_pattern.c | 224 + .../edenhill/librdkafka/src/rdkafka_pattern.h | 68 + .../edenhill/librdkafka/src/rdkafka_plugin.c | 209 + .../edenhill/librdkafka/src/rdkafka_plugin.h | 37 + .../edenhill/librdkafka/src/rdkafka_proto.h | 502 ++ .../edenhill/librdkafka/src/rdkafka_queue.c | 866 ++++ .../edenhill/librdkafka/src/rdkafka_queue.h | 769 +++ .../librdkafka/src/rdkafka_range_assignor.c | 125 + .../edenhill/librdkafka/src/rdkafka_request.c | 1997 ++++++++ .../edenhill/librdkafka/src/rdkafka_request.h | 198 + .../src/rdkafka_roundrobin_assignor.c | 114 + .../edenhill/librdkafka/src/rdkafka_sasl.c | 343 ++ .../edenhill/librdkafka/src/rdkafka_sasl.h | 49 + .../librdkafka/src/rdkafka_sasl_cyrus.c | 623 +++ .../librdkafka/src/rdkafka_sasl_int.h | 72 + .../librdkafka/src/rdkafka_sasl_plain.c | 128 + .../librdkafka/src/rdkafka_sasl_scram.c | 901 ++++ .../librdkafka/src/rdkafka_sasl_win32.c | 526 ++ .../librdkafka/src/rdkafka_subscription.c | 186 + .../edenhill/librdkafka/src/rdkafka_timer.c | 292 ++ .../edenhill/librdkafka/src/rdkafka_timer.h | 80 + .../edenhill/librdkafka/src/rdkafka_topic.c | 1310 +++++ .../edenhill/librdkafka/src/rdkafka_topic.h | 188 + .../librdkafka/src/rdkafka_transport.c | 1607 +++++++ .../librdkafka/src/rdkafka_transport.h | 79 + .../librdkafka/src/rdkafka_transport_int.h | 87 + .../edenhill/librdkafka/src/rdlist.c | 333 ++ .../edenhill/librdkafka/src/rdlist.h | 269 ++ .../edenhill/librdkafka/src/rdlog.c | 89 + .../edenhill/librdkafka/src/rdlog.h | 40 + .../edenhill/librdkafka/src/rdmurmur2.c | 159 + .../edenhill/librdkafka/src/rdmurmur2.h | 7 + .../edenhill/librdkafka/src/rdports.c | 60 + .../edenhill/librdkafka/src/rdports.h | 36 + .../edenhill/librdkafka/src/rdposix.h | 184 + .../edenhill/librdkafka/src/rdrand.c | 50 + .../edenhill/librdkafka/src/rdrand.h | 48 + .../edenhill/librdkafka/src/rdregex.c | 157 + .../edenhill/librdkafka/src/rdregex.h | 40 + .../edenhill/librdkafka/src/rdsignal.h | 57 + .../edenhill/librdkafka/src/rdstring.c | 204 + .../edenhill/librdkafka/src/rdstring.h | 59 + .../edenhill/librdkafka/src/rdsysqueue.h | 348 ++ .../edenhill/librdkafka/src/rdtime.h | 184 + .../edenhill/librdkafka/src/rdtypes.h | 45 + .../edenhill/librdkafka/src/rdunittest.c | 63 + .../edenhill/librdkafka/src/rdunittest.h | 83 + .../edenhill/librdkafka/src/rdvarint.c | 126 + .../edenhill/librdkafka/src/rdvarint.h | 169 + .../edenhill/librdkafka/src/rdwin32.h | 265 ++ .../edenhill/librdkafka/src/regexp.c | 1156 +++++ .../edenhill/librdkafka/src/regexp.h | 31 + .../edenhill/librdkafka/src/snappy.c | 1838 +++++++ .../edenhill/librdkafka/src/snappy.h | 34 + .../edenhill/librdkafka/src/snappy_compat.h | 169 + .../edenhill/librdkafka/src/tinycthread.c | 1039 ++++ .../edenhill/librdkafka/src/tinycthread.h | 528 +++ .../edenhill/librdkafka/src/win32_config.h | 45 + .../edenhill/librdkafka/src/xxhash.c | 889 ++++ .../edenhill/librdkafka/src/xxhash.h | 293 ++ .../librdkafka/tests/0000-unittests.c | 43 + .../edenhill/librdkafka/tests/0001-multiobj.c | 96 + .../edenhill/librdkafka/tests/0002-unkpart.c | 155 + .../librdkafka/tests/0003-msgmaxsize.c | 150 + .../edenhill/librdkafka/tests/0004-conf.c | 474 ++ .../edenhill/librdkafka/tests/0005-order.c | 131 + .../edenhill/librdkafka/tests/0006-symbols.c | 161 + .../librdkafka/tests/0007-autotopic.c | 129 + .../edenhill/librdkafka/tests/0008-reqacks.c | 161 + .../librdkafka/tests/0011-produce_batch.c | 550 +++ .../librdkafka/tests/0012-produce_consume.c | 513 ++ .../librdkafka/tests/0013-null-msgs.c | 453 ++ .../librdkafka/tests/0014-reconsume-191.c | 494 ++ .../librdkafka/tests/0015-offset_seeks.c | 102 + .../librdkafka/tests/0017-compression.c | 138 + .../librdkafka/tests/0018-cgrp_term.c | 272 ++ .../librdkafka/tests/0019-list_groups.c | 244 + .../librdkafka/tests/0020-destroy_hang.c | 162 + .../librdkafka/tests/0021-rkt_destroy.c | 74 + .../librdkafka/tests/0022-consume_batch.c | 157 + .../edenhill/librdkafka/tests/0025-timers.c | 144 + .../librdkafka/tests/0026-consume_pause.c | 230 + .../librdkafka/tests/0028-long_topicnames.c | 78 + .../librdkafka/tests/0029-assign_offset.c | 196 + .../librdkafka/tests/0030-offset_commit.c | 546 +++ .../librdkafka/tests/0031-get_offsets.c | 116 + .../librdkafka/tests/0033-regex_subscribe.c | 480 ++ .../librdkafka/tests/0034-offset_reset.c | 133 + .../librdkafka/tests/0035-api_version.c | 74 + .../librdkafka/tests/0036-partial_fetch.c | 87 + .../tests/0037-destroy_hang_local.c | 88 + .../librdkafka/tests/0038-performance.c | 118 + .../edenhill/librdkafka/tests/0039-event.c | 162 + .../edenhill/librdkafka/tests/0040-io_event.c | 239 + .../librdkafka/tests/0041-fetch_max_bytes.c | 93 + .../librdkafka/tests/0042-many_topics.c | 252 + .../librdkafka/tests/0043-no_connection.c | 77 + .../librdkafka/tests/0044-partition_cnt.c | 97 + .../librdkafka/tests/0045-subscribe_update.c | 385 ++ .../librdkafka/tests/0046-rkt_cache.c | 65 + .../librdkafka/tests/0047-partial_buf_tmout.c | 97 + .../librdkafka/tests/0048-partitioner.c | 287 ++ .../tests/0049-consume_conn_close.c | 161 + .../librdkafka/tests/0050-subscribe_adds.c | 124 + .../librdkafka/tests/0051-assign_adds.c | 127 + .../librdkafka/tests/0052-msg_timestamps.c | 199 + .../librdkafka/tests/0053-stats_cb.cpp | 100 + .../librdkafka/tests/0054-offset_time.cpp | 177 + .../librdkafka/tests/0055-producer_latency.c | 217 + .../librdkafka/tests/0056-balanced_group_mt.c | 303 ++ .../librdkafka/tests/0057-invalid_topic.cpp | 114 + .../edenhill/librdkafka/tests/0058-log.cpp | 124 + .../librdkafka/tests/0059-bsearch.cpp | 238 + .../librdkafka/tests/0060-op_prio.cpp | 161 + .../librdkafka/tests/0061-consumer_lag.cpp | 199 + .../librdkafka/tests/0062-stats_event.c | 124 + .../librdkafka/tests/0063-clusterid.cpp | 106 + .../librdkafka/tests/0064-interceptors.c | 463 ++ .../edenhill/librdkafka/tests/0065-yield.cpp | 136 + .../librdkafka/tests/0066-plugins.cpp | 122 + .../librdkafka/tests/0067-empty_topic.cpp | 136 + .../librdkafka/tests/0068-produce_timeout.c | 131 + .../tests/0069-consumer_add_parts.c | 114 + .../librdkafka/tests/0070-null_empty.cpp | 187 + .../librdkafka/tests/0072-headers_ut.c | 468 ++ .../edenhill/librdkafka/tests/0073-headers.c | 398 ++ .../edenhill/librdkafka/tests/0074-producev.c | 67 + .../edenhill/librdkafka/tests/0075-retry.c | 246 + .../librdkafka/tests/0076-produce_retry.c | 369 ++ .../librdkafka/tests/0077-compaction.c | 334 ++ .../librdkafka/tests/0078-c_from_cpp.cpp | 94 + .../edenhill/librdkafka/tests/0079-fork.c | 96 + .../librdkafka/tests/0081-fetch_max_bytes.cpp | 124 + .../edenhill/librdkafka/tests/1000-unktopic.c | 153 + .../edenhill/librdkafka/tests/8000-idle.cpp | 61 + .../edenhill/librdkafka/tests/CMakeLists.txt | 100 + .../librdkafka/tests/LibrdkafkaTestApp.py | 143 + .../edenhill/librdkafka/tests/Makefile | 76 + .../edenhill/librdkafka/tests/README | 132 + .../librdkafka/tests/broker_version_tests.py | 229 + .../edenhill/librdkafka/tests/buildbox.sh | 17 + .../librdkafka/tests/cleanup-checker-tests.sh | 20 + .../librdkafka/tests/cluster_testing.py | 140 + .../librdkafka/tests/delete-test-topics.sh | 56 + .../librdkafka/tests/gen-ssl-certs.sh | 165 + .../tests/interactive_broker_version.py | 208 + .../tests/interceptor_test/CMakeLists.txt | 16 + .../tests/interceptor_test/Makefile | 22 + .../tests/interceptor_test/interceptor_test.c | 311 ++ .../tests/interceptor_test/interceptor_test.h | 47 + .../edenhill/librdkafka/tests/java/Makefile | 8 + .../librdkafka/tests/java/Murmur2Cli.java | 15 + .../edenhill/librdkafka/tests/java/README.md | 12 + .../librdkafka/tests/java/run-class.sh | 9 + .../librdkafka/tests/librdkafka.suppressions | 397 ++ .../librdkafka/tests/lz4_manual_test.sh | 59 + .../tests/multi-broker-version-test.sh | 50 + .../librdkafka/tests/performance_plot.py | 110 + .../librdkafka/tests/plugin_test/Makefile | 19 + .../tests/plugin_test/plugin_test.c | 58 + .../edenhill/librdkafka/tests/run-test.sh | 118 + .../edenhill/librdkafka/tests/sasl_test.py | 246 + .../edenhill/librdkafka/tests/sockem.c | 794 ++++ .../edenhill/librdkafka/tests/sockem.h | 85 + .../edenhill/librdkafka/tests/test.c | 3558 ++++++++++++++ .../librdkafka/tests/test.conf.example | 27 + .../edenhill/librdkafka/tests/test.h | 546 +++ .../edenhill/librdkafka/tests/testcpp.cpp | 133 + .../edenhill/librdkafka/tests/testcpp.h | 128 + .../edenhill/librdkafka/tests/testshared.h | 180 + .../edenhill/librdkafka/tests/until-fail.sh | 61 + .../librdkafka/tests/xxxx-assign_partition.c | 122 + .../librdkafka/tests/xxxx-metadata.cpp | 156 + .../edenhill/librdkafka/win32/README.md | 5 + .../librdkafka/win32/build-package.bat | 3 + .../edenhill/librdkafka/win32/build.bat | 19 + .../edenhill/librdkafka/win32/common.vcxproj | 76 + .../interceptor_test/interceptor_test.vcxproj | 87 + .../win32/librdkafka.autopkg.template | 55 + .../win32/librdkafka.master.testing.targets | 13 + .../edenhill/librdkafka/win32/librdkafka.sln | 176 + .../librdkafka/win32/librdkafka.vcxproj | 231 + .../win32/librdkafkacpp/librdkafkacpp.vcxproj | 103 + .../librdkafka/win32/package-nuget.ps1 | 21 + .../edenhill/librdkafka/win32/packages.config | 6 + .../win32/packages/repositories.config | 4 + .../librdkafka/win32/push-package.bat | 4 + .../rdkafka_consumer_example_cpp.vcxproj | 67 + .../rdkafka_example/rdkafka_example.vcxproj | 97 + .../rdkafka_performance.vcxproj | 97 + .../librdkafka/win32/tests/test.conf.example | 25 + .../librdkafka/win32/tests/tests.vcxproj | 180 + .../edenhill/librdkafka/win32/wingetopt.c | 564 +++ .../edenhill/librdkafka/win32/wingetopt.h | 95 + .../edenhill/librdkafka/win32/wintime.h | 29 + vendor/github.com/twinj/uuid/.travis.yml | 25 + vendor/github.com/twinj/uuid/LICENSE | 20 + vendor/github.com/twinj/uuid/README.md | 265 ++ vendor/github.com/twinj/uuid/appveyor.yml | 31 + .../github.com/twinj/uuid/benchmarks_test.go | 176 + vendor/github.com/twinj/uuid/examples_test.go | 111 + vendor/github.com/twinj/uuid/format.go | 164 + vendor/github.com/twinj/uuid/format_test.go | 148 + vendor/github.com/twinj/uuid/generator.go | 381 ++ .../github.com/twinj/uuid/generator_test.go | 352 ++ vendor/github.com/twinj/uuid/glide.lock | 12 + vendor/github.com/twinj/uuid/glide.yaml | 10 + .../twinj/uuid/integrations_test.go | 16 + .../github.com/twinj/uuid/resolution_test.go | 103 + vendor/github.com/twinj/uuid/saver.go | 55 + vendor/github.com/twinj/uuid/saver_test.go | 94 + .../github.com/twinj/uuid/savers/cover.html | 204 + vendor/github.com/twinj/uuid/savers/cover.out | 114 + .../twinj/uuid/savers/filesystem.go | 102 + .../twinj/uuid/savers/filesystem_test.go | 132 + vendor/github.com/twinj/uuid/savers/savers.go | 17 + vendor/github.com/twinj/uuid/timestamp.go | 117 + .../github.com/twinj/uuid/timestamp_test.go | 140 + vendor/github.com/twinj/uuid/types.go | 174 + vendor/github.com/twinj/uuid/types_test.go | 255 + vendor/github.com/twinj/uuid/uuid.go | 268 ++ vendor/github.com/twinj/uuid/uuid_test.go | 445 ++ vendor/github.com/twinj/uuid/version.go | 69 + vendor/github.com/twinj/uuid/version_test.go | 49 + 507 files changed, 129367 insertions(+), 914 deletions(-) create mode 100644 kafka/consumer.go rename {input/kafkamdm => kafka}/lag_monitor.go (99%) rename {input/kafkamdm => kafka}/lag_monitor_test.go (99%) delete mode 100644 kafka/offsetMgr.go create mode 100644 kafka/utils.go create mode 100755 scripts/build_deps.sh create mode 100644 scripts/build_packages/systemd-centos7/Dockerfile create mode 100755 scripts/build_packages/systemd-centos7/build_package.sh create mode 100644 scripts/build_packages/systemd/Dockerfile create mode 100755 scripts/build_packages/systemd/build_package.sh create mode 100644 scripts/build_packages/sysvinit/Dockerfile create mode 100755 scripts/build_packages/sysvinit/build_package.sh create mode 100644 scripts/build_packages/upstart-0.6.5/Dockerfile create mode 100755 scripts/build_packages/upstart-0.6.5/build_package.sh create mode 100755 scripts/build_packages/upstart-0.6.5/get_ruby.sh create mode 100644 scripts/build_packages/upstart/Dockerfile create mode 100755 scripts/build_packages/upstart/build_package.sh create mode 100755 scripts/get_go.sh create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/.travis.yml create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/LICENSE create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/README.md create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/examples/README create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/examples/confluent_cloud_example/confluent_cloud_example.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/examples/consumer_channel_example/consumer_channel_example.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/examples/consumer_example/consumer_example.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/examples/go-kafkacat/go-kafkacat.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/examples/producer_channel_example/producer_channel_example.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/examples/producer_example/producer_example.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static_all.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/config_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer_performance_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/event_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/go_rdkafka_generr/go_rdkafka_generr.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/header_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/integration_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/message_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer_performance_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/stats_event_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers_test.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/README.md create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/deploy.sh create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/globals.json create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/go_verifiable_consumer/go_verifiable_consumer.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/go_verifiable_producer/go_verifiable_producer.go create mode 100644 vendor/github.com/confluentinc/confluent-kafka-go/mk/Makefile create mode 100755 vendor/github.com/confluentinc/confluent-kafka-go/mk/bootstrap-librdkafka.sh create mode 100755 vendor/github.com/confluentinc/confluent-kafka-go/mk/doc-gen.py create mode 100644 vendor/github.com/edenhill/librdkafka/.appveyor.yml create mode 100644 vendor/github.com/edenhill/librdkafka/.dir-locals.el create mode 100644 vendor/github.com/edenhill/librdkafka/.doozer.json create mode 100644 vendor/github.com/edenhill/librdkafka/.travis.yml create mode 100644 vendor/github.com/edenhill/librdkafka/CMakeLists.txt create mode 100644 vendor/github.com/edenhill/librdkafka/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/edenhill/librdkafka/CONFIGURATION.md create mode 100644 vendor/github.com/edenhill/librdkafka/CONTRIBUTING.md create mode 100644 vendor/github.com/edenhill/librdkafka/Doxyfile create mode 100644 vendor/github.com/edenhill/librdkafka/INTRODUCTION.md create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.crc32c create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.lz4 create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.murmur2 create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.pycrc create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.queue create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.regexp create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.snappy create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.tinycthread create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSE.wingetopt create mode 100644 vendor/github.com/edenhill/librdkafka/LICENSES.txt create mode 100755 vendor/github.com/edenhill/librdkafka/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/README.md create mode 100644 vendor/github.com/edenhill/librdkafka/README.win32 create mode 100755 vendor/github.com/edenhill/librdkafka/configure create mode 100644 vendor/github.com/edenhill/librdkafka/configure.librdkafka create mode 100755 vendor/github.com/edenhill/librdkafka/dev-conf.sh create mode 100644 vendor/github.com/edenhill/librdkafka/examples/CMakeLists.txt create mode 100644 vendor/github.com/edenhill/librdkafka/examples/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/examples/globals.json create mode 100644 vendor/github.com/edenhill/librdkafka/examples/kafkatest_verifiable_client.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/examples/rdkafka_consume_batch.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/examples/rdkafka_consumer_example.c create mode 100644 vendor/github.com/edenhill/librdkafka/examples/rdkafka_consumer_example.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/examples/rdkafka_example.c create mode 100644 vendor/github.com/edenhill/librdkafka/examples/rdkafka_example.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/examples/rdkafka_performance.c create mode 100644 vendor/github.com/edenhill/librdkafka/examples/rdkafka_simple_producer.c create mode 100644 vendor/github.com/edenhill/librdkafka/examples/rdkafka_zookeeper_example.c create mode 100755 vendor/github.com/edenhill/librdkafka/lds-gen.py create mode 100644 vendor/github.com/edenhill/librdkafka/mainpage.doxy create mode 100755 vendor/github.com/edenhill/librdkafka/mklove/Makefile.base create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.atomics create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.base create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.builtin create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.cc create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.cxx create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.fileversion create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.good_cflags create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.host create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.lib create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.parseversion create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.pic create mode 100644 vendor/github.com/edenhill/librdkafka/mklove/modules/configure.socket create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/RELEASE.md create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/archlinux/PKGBUILD create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/Config.cmake.in create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/README.md create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/config.h.in create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/atomic_32_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/atomic_64_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/dlopen_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/libsasl2_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/rdkafka_setup.cmake create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/regex_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/strndup_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/sync_32_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/cmake/try_compile/sync_64_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/changelog create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/compat create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/control create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/copyright create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/docs create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/gbp.conf create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka-dev.dirs create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka-dev.examples create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka-dev.install create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka-dev.substvars create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka.dsc create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka1-dbg.substvars create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka1.dirs create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka1.install create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka1.postinst.debhelper create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka1.postrm.debhelper create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/librdkafka1.symbols create mode 100755 vendor/github.com/edenhill/librdkafka/packaging/debian/rules create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/source/format create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/debian/watch create mode 100755 vendor/github.com/edenhill/librdkafka/packaging/get_version.py create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/homebrew/README.md create mode 100755 vendor/github.com/edenhill/librdkafka/packaging/homebrew/brew-update-pr.sh create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/README.md create mode 100755 vendor/github.com/edenhill/librdkafka/packaging/nuget/artifact.py create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip create mode 100755 vendor/github.com/edenhill/librdkafka/packaging/nuget/nuget.sh create mode 100755 vendor/github.com/edenhill/librdkafka/packaging/nuget/packaging.py create mode 100755 vendor/github.com/edenhill/librdkafka/packaging/nuget/release.py create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/requirements.txt create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/templates/librdkafka.redist.nuspec create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/templates/librdkafka.redist.props create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/templates/librdkafka.redist.targets create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/zfile/__init__.py create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/nuget/zfile/zfile.py create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/rpm/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/rpm/el7-x86_64.cfg create mode 100644 vendor/github.com/edenhill/librdkafka/packaging/rpm/librdkafka.spec create mode 100755 vendor/github.com/edenhill/librdkafka/packaging/tools/build-debian.sh create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/CMakeLists.txt create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/ConfImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/ConsumerImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/HandleImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/KafkaConsumerImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/MessageImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/MetadataImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/ProducerImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/QueueImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/README.md create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/RdKafka.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/TopicImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/TopicPartitionImpl.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/rdkafkacpp.h create mode 100644 vendor/github.com/edenhill/librdkafka/src-cpp/rdkafkacpp_int.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/CMakeLists.txt create mode 100644 vendor/github.com/edenhill/librdkafka/src/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/src/crc32c.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/crc32c.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/librdkafka_cgrp_synch.png create mode 100644 vendor/github.com/edenhill/librdkafka/src/lz4.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/lz4.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/lz4frame.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/lz4frame.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/lz4frame_static.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/lz4hc.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/lz4hc.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/lz4opt.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/queue.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rd.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdaddr.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdaddr.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdatomic.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdavg.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdavl.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdavl.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdbuf.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdbuf.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdcrc32.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdcrc32.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rddl.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rddl.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdendian.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdgz.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdgz.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdinterval.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_assignor.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_assignor.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_broker.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_broker.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_buf.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_buf.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_cgrp.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_cgrp.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_conf.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_conf.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_event.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_event.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_feature.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_feature.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_header.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_header.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_int.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_interceptor.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_interceptor.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_lz4.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_lz4.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_metadata.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_metadata.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_metadata_cache.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_msg.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_msg.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_msgset.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_msgset_reader.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_msgset_writer.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_offset.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_offset.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_op.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_op.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_partition.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_partition.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_pattern.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_pattern.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_plugin.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_plugin.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_proto.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_queue.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_queue.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_range_assignor.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_request.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_request.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_roundrobin_assignor.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_sasl.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_sasl.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_sasl_cyrus.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_sasl_int.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_sasl_plain.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_sasl_scram.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_sasl_win32.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_subscription.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_timer.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_timer.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_topic.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_topic.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_transport.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_transport.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdkafka_transport_int.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdlist.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdlist.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdlog.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdlog.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdmurmur2.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdmurmur2.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdports.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdports.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdposix.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdrand.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdrand.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdregex.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdregex.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdsignal.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdstring.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdstring.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdsysqueue.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdtime.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdtypes.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdunittest.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdunittest.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdvarint.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdvarint.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/rdwin32.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/regexp.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/regexp.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/snappy.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/snappy.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/snappy_compat.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/tinycthread.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/tinycthread.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/win32_config.h create mode 100644 vendor/github.com/edenhill/librdkafka/src/xxhash.c create mode 100644 vendor/github.com/edenhill/librdkafka/src/xxhash.h create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0000-unittests.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0001-multiobj.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0002-unkpart.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0003-msgmaxsize.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0004-conf.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0005-order.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0006-symbols.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0007-autotopic.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0008-reqacks.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0011-produce_batch.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0012-produce_consume.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0013-null-msgs.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0014-reconsume-191.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0015-offset_seeks.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0017-compression.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0018-cgrp_term.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0019-list_groups.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0020-destroy_hang.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0021-rkt_destroy.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0022-consume_batch.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0025-timers.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0026-consume_pause.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0028-long_topicnames.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0029-assign_offset.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0030-offset_commit.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0031-get_offsets.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0033-regex_subscribe.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0034-offset_reset.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0035-api_version.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0036-partial_fetch.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0037-destroy_hang_local.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0038-performance.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0039-event.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0040-io_event.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0041-fetch_max_bytes.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0042-many_topics.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0043-no_connection.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0044-partition_cnt.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0045-subscribe_update.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0046-rkt_cache.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0047-partial_buf_tmout.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0048-partitioner.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0049-consume_conn_close.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0050-subscribe_adds.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0051-assign_adds.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0052-msg_timestamps.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0053-stats_cb.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0054-offset_time.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0055-producer_latency.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0056-balanced_group_mt.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0057-invalid_topic.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0058-log.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0059-bsearch.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0060-op_prio.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0061-consumer_lag.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0062-stats_event.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0063-clusterid.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0064-interceptors.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0065-yield.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0066-plugins.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0067-empty_topic.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0068-produce_timeout.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0069-consumer_add_parts.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0070-null_empty.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0072-headers_ut.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0073-headers.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0074-producev.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0075-retry.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0076-produce_retry.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0077-compaction.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0078-c_from_cpp.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0079-fork.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/0081-fetch_max_bytes.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/1000-unktopic.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/8000-idle.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/CMakeLists.txt create mode 100644 vendor/github.com/edenhill/librdkafka/tests/LibrdkafkaTestApp.py create mode 100644 vendor/github.com/edenhill/librdkafka/tests/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/tests/README create mode 100755 vendor/github.com/edenhill/librdkafka/tests/broker_version_tests.py create mode 100755 vendor/github.com/edenhill/librdkafka/tests/buildbox.sh create mode 100755 vendor/github.com/edenhill/librdkafka/tests/cleanup-checker-tests.sh create mode 100755 vendor/github.com/edenhill/librdkafka/tests/cluster_testing.py create mode 100755 vendor/github.com/edenhill/librdkafka/tests/delete-test-topics.sh create mode 100755 vendor/github.com/edenhill/librdkafka/tests/gen-ssl-certs.sh create mode 100755 vendor/github.com/edenhill/librdkafka/tests/interactive_broker_version.py create mode 100644 vendor/github.com/edenhill/librdkafka/tests/interceptor_test/CMakeLists.txt create mode 100644 vendor/github.com/edenhill/librdkafka/tests/interceptor_test/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/tests/interceptor_test/interceptor_test.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/interceptor_test/interceptor_test.h create mode 100644 vendor/github.com/edenhill/librdkafka/tests/java/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/tests/java/Murmur2Cli.java create mode 100644 vendor/github.com/edenhill/librdkafka/tests/java/README.md create mode 100755 vendor/github.com/edenhill/librdkafka/tests/java/run-class.sh create mode 100644 vendor/github.com/edenhill/librdkafka/tests/librdkafka.suppressions create mode 100755 vendor/github.com/edenhill/librdkafka/tests/lz4_manual_test.sh create mode 100755 vendor/github.com/edenhill/librdkafka/tests/multi-broker-version-test.sh create mode 100755 vendor/github.com/edenhill/librdkafka/tests/performance_plot.py create mode 100644 vendor/github.com/edenhill/librdkafka/tests/plugin_test/Makefile create mode 100644 vendor/github.com/edenhill/librdkafka/tests/plugin_test/plugin_test.c create mode 100755 vendor/github.com/edenhill/librdkafka/tests/run-test.sh create mode 100755 vendor/github.com/edenhill/librdkafka/tests/sasl_test.py create mode 100644 vendor/github.com/edenhill/librdkafka/tests/sockem.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/sockem.h create mode 100644 vendor/github.com/edenhill/librdkafka/tests/test.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/test.conf.example create mode 100644 vendor/github.com/edenhill/librdkafka/tests/test.h create mode 100644 vendor/github.com/edenhill/librdkafka/tests/testcpp.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/tests/testcpp.h create mode 100644 vendor/github.com/edenhill/librdkafka/tests/testshared.h create mode 100755 vendor/github.com/edenhill/librdkafka/tests/until-fail.sh create mode 100644 vendor/github.com/edenhill/librdkafka/tests/xxxx-assign_partition.c create mode 100644 vendor/github.com/edenhill/librdkafka/tests/xxxx-metadata.cpp create mode 100644 vendor/github.com/edenhill/librdkafka/win32/README.md create mode 100644 vendor/github.com/edenhill/librdkafka/win32/build-package.bat create mode 100644 vendor/github.com/edenhill/librdkafka/win32/build.bat create mode 100644 vendor/github.com/edenhill/librdkafka/win32/common.vcxproj create mode 100644 vendor/github.com/edenhill/librdkafka/win32/interceptor_test/interceptor_test.vcxproj create mode 100644 vendor/github.com/edenhill/librdkafka/win32/librdkafka.autopkg.template create mode 100644 vendor/github.com/edenhill/librdkafka/win32/librdkafka.master.testing.targets create mode 100644 vendor/github.com/edenhill/librdkafka/win32/librdkafka.sln create mode 100644 vendor/github.com/edenhill/librdkafka/win32/librdkafka.vcxproj create mode 100644 vendor/github.com/edenhill/librdkafka/win32/librdkafkacpp/librdkafkacpp.vcxproj create mode 100644 vendor/github.com/edenhill/librdkafka/win32/package-nuget.ps1 create mode 100644 vendor/github.com/edenhill/librdkafka/win32/packages.config create mode 100644 vendor/github.com/edenhill/librdkafka/win32/packages/repositories.config create mode 100644 vendor/github.com/edenhill/librdkafka/win32/push-package.bat create mode 100644 vendor/github.com/edenhill/librdkafka/win32/rdkafka_consumer_example_cpp/rdkafka_consumer_example_cpp.vcxproj create mode 100644 vendor/github.com/edenhill/librdkafka/win32/rdkafka_example/rdkafka_example.vcxproj create mode 100644 vendor/github.com/edenhill/librdkafka/win32/rdkafka_performance/rdkafka_performance.vcxproj create mode 100644 vendor/github.com/edenhill/librdkafka/win32/tests/test.conf.example create mode 100644 vendor/github.com/edenhill/librdkafka/win32/tests/tests.vcxproj create mode 100644 vendor/github.com/edenhill/librdkafka/win32/wingetopt.c create mode 100644 vendor/github.com/edenhill/librdkafka/win32/wingetopt.h create mode 100644 vendor/github.com/edenhill/librdkafka/win32/wintime.h create mode 100644 vendor/github.com/twinj/uuid/.travis.yml create mode 100644 vendor/github.com/twinj/uuid/LICENSE create mode 100644 vendor/github.com/twinj/uuid/README.md create mode 100644 vendor/github.com/twinj/uuid/appveyor.yml create mode 100644 vendor/github.com/twinj/uuid/benchmarks_test.go create mode 100644 vendor/github.com/twinj/uuid/examples_test.go create mode 100644 vendor/github.com/twinj/uuid/format.go create mode 100644 vendor/github.com/twinj/uuid/format_test.go create mode 100644 vendor/github.com/twinj/uuid/generator.go create mode 100644 vendor/github.com/twinj/uuid/generator_test.go create mode 100644 vendor/github.com/twinj/uuid/glide.lock create mode 100644 vendor/github.com/twinj/uuid/glide.yaml create mode 100644 vendor/github.com/twinj/uuid/integrations_test.go create mode 100644 vendor/github.com/twinj/uuid/resolution_test.go create mode 100644 vendor/github.com/twinj/uuid/saver.go create mode 100644 vendor/github.com/twinj/uuid/saver_test.go create mode 100644 vendor/github.com/twinj/uuid/savers/cover.html create mode 100644 vendor/github.com/twinj/uuid/savers/cover.out create mode 100644 vendor/github.com/twinj/uuid/savers/filesystem.go create mode 100644 vendor/github.com/twinj/uuid/savers/filesystem_test.go create mode 100644 vendor/github.com/twinj/uuid/savers/savers.go create mode 100644 vendor/github.com/twinj/uuid/timestamp.go create mode 100644 vendor/github.com/twinj/uuid/timestamp_test.go create mode 100644 vendor/github.com/twinj/uuid/types.go create mode 100644 vendor/github.com/twinj/uuid/types_test.go create mode 100644 vendor/github.com/twinj/uuid/uuid.go create mode 100644 vendor/github.com/twinj/uuid/uuid_test.go create mode 100644 vendor/github.com/twinj/uuid/version.go create mode 100644 vendor/github.com/twinj/uuid/version_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 241d634650..b9f94bfc68 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -5,6 +5,7 @@ jobs: docker: - image: circleci/golang:1.10 steps: + - run: sudo apt-get install libssl-dev openssl libsasl2-2 libsasl2-dev zlib1g-dev - checkout - run: scripts/build.sh - run: scripts/build_tools.sh @@ -16,23 +17,32 @@ jobs: - persist_to_workspace: root: . paths: - - build - build_docker test: working_directory: /go/src/github.com/grafana/metrictank docker: - image: circleci/golang:1.10 + environment: + - TMP_DIR: /tmp/tmpdir + - PKG_CONFIG_PATH: /tmp/tmpdir/lib/pkgconfig + - LD_LIBRARY_PATH: /tmp/tmpdir/lib steps: - checkout - - run: go test -v -race $(go list ./... | grep -v github.com/grafana/metrictank/stacktest) + - run: scripts/build_deps.sh + - run: go test -v -race -tags static $(go list ./... | grep -v github.com/grafana/metrictank/stacktest) qa: working_directory: /go/src/github.com/grafana/metrictank docker: - image: circleci/golang:1.10 + environment: + - TMP_DIR: /tmp/tmpdir + - PKG_CONFIG_PATH: /tmp/tmpdir/lib/pkgconfig + - LD_LIBRARY_PATH: /tmp/tmpdir/lib steps: - checkout + - run: scripts/build_deps.sh - run: scripts/qa/gofmt.sh - run: scripts/qa/go-generate.sh - run: scripts/qa/ineffassign.sh @@ -43,29 +53,36 @@ jobs: qa-post-build: working_directory: /home/circleci/.go_workspace/src/github.com/grafana/metrictank - machine: true + machine: + image: true + environment: + - TMP_DIR: /tmp/tmpdir + - PKG_CONFIG_PATH: /tmp/tmpdir/lib/pkgconfig + - LD_LIBRARY_PATH: /tmp/tmpdir/lib steps: - checkout + - run: cd /usr/local; curl https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz | sudo tar -xz + - run: scripts/build_deps.sh + - run: scripts/build_tools.sh + - run: scripts/qa/docs.sh - attach_workspace: at: . - - run: scripts/qa/docs.sh - run: docker load -i build_docker/metrictank.tar - run: go test -v ./stacktest/tests/end2end_carbon deploy: docker: - - image: circleci/ruby:2.3 + - image: circleci/golang:1.10 steps: - checkout - attach_workspace: at: . - - run: scripts/depends.sh + - setup_remote_docker - run: scripts/build_packages.sh - store_artifacts: path: build - store_artifacts: path: build_pkg - - setup_remote_docker - deploy: command: | if [ "${CIRCLE_BRANCH}" == "master" ]; then diff --git a/Gopkg.lock b/Gopkg.lock index 20d603c0fc..d487c33bc0 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -85,6 +85,12 @@ packages = ["."] revision = "3a0bb77429bd3a61596f5e8a3172445844342120" +[[projects]] + name = "github.com/confluentinc/confluent-kafka-go" + packages = ["kafka"] + revision = "5e4d04e05fc319ce5996a867aafef29059f26862" + version = "v0.11.4" + [[projects]] name = "github.com/davecgh/go-spew" packages = ["spew"] @@ -561,31 +567,18 @@ ] revision = "995f5b2e021c69b8b028ba6d0b05c1dd500783db" -[[projects]] - branch = "master" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util" - ] - revision = "3d8f4155ffd9029d32e5cf03853b58759b6e3710" - [[projects]] name = "github.com/tinylib/msgp" packages = ["msgp"] revision = "0cea1fa86e8403be1284013014f87ab942056de8" version = "v1.0-beta" +[[projects]] + name = "github.com/twinj/uuid" + packages = ["."] + revision = "835a10bbd6bce40820349a68b1368a62c3c5617c" + version = "v1.0.0" + [[projects]] name = "github.com/uber/jaeger-client-go" packages = [ @@ -738,6 +731,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "63370741741523b4f8a63db039aab356266c8b413105c9bd7784a3e9812f0dae" + inputs-digest = "eea56bedbd1b188f2b809506f13b09af0c1ce4f4641e41d80001ca73b1c9b1dd" solver-name = "gps-cdcl" solver-version = 1 diff --git a/cmd/mt-kafka-mdm-sniff-out-of-order/main.go b/cmd/mt-kafka-mdm-sniff-out-of-order/main.go index c5da588fd2..9e3c58831e 100644 --- a/cmd/mt-kafka-mdm-sniff-out-of-order/main.go +++ b/cmd/mt-kafka-mdm-sniff-out-of-order/main.go @@ -199,8 +199,6 @@ func main() { // config may have had it disabled inKafkaMdm.Enabled = true - // important: we don't want to share the same offset tracker as the mdm input of MT itself - inKafkaMdm.DataDir = "/tmp/" + instance inKafkaMdm.ConfigProcess(instance) diff --git a/cmd/mt-kafka-mdm-sniff/main.go b/cmd/mt-kafka-mdm-sniff/main.go index a74046259c..d5044d304d 100644 --- a/cmd/mt-kafka-mdm-sniff/main.go +++ b/cmd/mt-kafka-mdm-sniff/main.go @@ -115,8 +115,6 @@ func main() { // config may have had it disabled inKafkaMdm.Enabled = true - // important: we don't want to share the same offset tracker as the mdm input of MT itself - inKafkaMdm.DataDir = "/tmp/" + instance inKafkaMdm.ConfigProcess(instance) diff --git a/docker/docker-chaos/docker-compose.yml b/docker/docker-chaos/docker-compose.yml index 8dd1231ffa..d69202deff 100644 --- a/docker/docker-chaos/docker-compose.yml +++ b/docker/docker-chaos/docker-compose.yml @@ -7,7 +7,6 @@ services: ports: - "6060:6060" volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini environment: WAIT_HOSTS: kafka:9092,cassandra:9042 @@ -32,7 +31,6 @@ services: ports: - "6061:6060" volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini environment: WAIT_HOSTS: kafka:9092,cassandra:9042,metrictank0:6060 @@ -57,7 +55,6 @@ services: ports: - "6062:6060" volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini environment: WAIT_HOSTS: kafka:9092,cassandra:9042,metrictank0:6060 @@ -82,7 +79,6 @@ services: ports: - "6063:6060" volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini environment: WAIT_HOSTS: kafka:9092,cassandra:9042,metrictank0:6060 @@ -107,7 +103,6 @@ services: ports: - "6064:6060" volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini environment: WAIT_HOSTS: kafka:9092,cassandra:9042,metrictank0:6060 @@ -132,7 +127,6 @@ services: ports: - "6065:6060" volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini environment: WAIT_HOSTS: kafka:9092,cassandra:9042,metrictank0:6060 diff --git a/docker/docker-cluster/docker-compose.yml b/docker/docker-cluster/docker-compose.yml index a17a4a2701..c33913cd1d 100644 --- a/docker/docker-cluster/docker-compose.yml +++ b/docker/docker-cluster/docker-compose.yml @@ -7,7 +7,6 @@ services: expose: - 6060 volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini - ./storage-schemas.conf:/etc/metrictank/storage-schemas.conf - ./storage-aggregation.conf:/etc/metrictank/storage-aggregation.conf @@ -32,7 +31,6 @@ services: expose: - 6060 volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini - ./storage-schemas.conf:/etc/metrictank/storage-schemas.conf - ./storage-aggregation.conf:/etc/metrictank/storage-aggregation.conf @@ -57,7 +55,6 @@ services: expose: - 6060 volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini - ./storage-schemas.conf:/etc/metrictank/storage-schemas.conf - ./storage-aggregation.conf:/etc/metrictank/storage-aggregation.conf @@ -82,7 +79,6 @@ services: expose: - 6060 volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini - ./storage-schemas.conf:/etc/metrictank/storage-schemas.conf - ./storage-aggregation.conf:/etc/metrictank/storage-aggregation.conf diff --git a/docker/docker-cluster/metrictank.ini b/docker/docker-cluster/metrictank.ini index b7cad80faa..5e1c07095c 100644 --- a/docker/docker-cluster/metrictank.ini +++ b/docker/docker-cluster/metrictank.ini @@ -182,10 +182,9 @@ org-id = 0 brokers = kafka:9092 # kafka topic (may be given multiple times as a comma-separated list) topics = mdm -# offset to start consuming from. Can be one of newest, oldest,last or a time duration -# When using a duration but the offset request fails (e.g. Kafka doesn't have data so far back), metrictank falls back to `oldest`. +# offset to start consuming from. Can be one of newest, oldest or a time duration # the further back in time you go, the more old data you can load into metrictank, but the longer it takes to catch up to realtime data -offset = last +offset = oldest # kafka partitions to consume. use '*' or a comma separated list of id's partitions = * # save interval for offsets @@ -281,10 +280,8 @@ topic = metricpersist partitions = * # method used for partitioning metrics. This should match the settings of tsdb-gw. (byOrg|bySeries) partition-scheme = bySeries -# offset to start consuming from. Can be one of newest, oldest,last or a time duration -# When using a duration but the offset request fails (e.g. Kafka doesn't have data so far back), metrictank falls back to `oldest`. -# Should match your kafka-mdm-in setting -offset = last +# offset to start consuming from. Can be one of newest, oldest or a time duration +offset = oldest # save interval for offsets offset-commit-interval = 5s # Maximum time backlog processing can block during metrictank startup. diff --git a/docker/docker-dev-custom-cfg-kafka/docker-compose.yml b/docker/docker-dev-custom-cfg-kafka/docker-compose.yml index 7e1c755f93..41a1bdca43 100644 --- a/docker/docker-dev-custom-cfg-kafka/docker-compose.yml +++ b/docker/docker-dev-custom-cfg-kafka/docker-compose.yml @@ -9,7 +9,6 @@ services: expose: - 6060 volumes: - - ../../build/metrictank:/usr/bin/metrictank - ./metrictank.ini:/etc/metrictank/metrictank.ini - ./storage-schemas.conf:/etc/metrictank/storage-schemas.conf - ./storage-aggregation.conf:/etc/metrictank/storage-aggregation.conf diff --git a/docker/docker-dev/docker-compose.yml b/docker/docker-dev/docker-compose.yml index 345b41cd33..0e2acf0d02 100644 --- a/docker/docker-dev/docker-compose.yml +++ b/docker/docker-dev/docker-compose.yml @@ -8,7 +8,6 @@ services: - "6060:6060" - "2003:2003" volumes: - - ../../build/metrictank:/usr/bin/metrictank - ../../scripts/config/metrictank-docker.ini:/etc/metrictank/metrictank.ini - ../../scripts/config/storage-schemas.conf:/etc/metrictank/storage-schemas.conf - ../../scripts/config/storage-aggregation.conf:/etc/metrictank/storage-aggregation.conf @@ -52,4 +51,4 @@ services: ports: - "8125:8125/udp" volumes: - - "../statsdaemon.ini:/etc/statsdaemon.ini" \ No newline at end of file + - "../statsdaemon.ini:/etc/statsdaemon.ini" diff --git a/input/kafkamdm/kafkamdm.go b/input/kafkamdm/kafkamdm.go index dd69cb3993..6d7f74fa13 100644 --- a/input/kafkamdm/kafkamdm.go +++ b/input/kafkamdm/kafkamdm.go @@ -2,23 +2,18 @@ package kafkamdm import ( "flag" - "fmt" - "strconv" "strings" "sync" "time" - schema "gopkg.in/raintank/schema.v1" - "gopkg.in/raintank/schema.v1/msg" - - "github.com/Shopify/sarama" - "github.com/raintank/worldping-api/pkg/log" - "github.com/rakyll/globalconf" - "github.com/grafana/metrictank/cluster" "github.com/grafana/metrictank/input" "github.com/grafana/metrictank/kafka" "github.com/grafana/metrictank/stats" + "github.com/raintank/worldping-api/pkg/log" + "github.com/rakyll/globalconf" + schema "gopkg.in/raintank/schema.v1" + "gopkg.in/raintank/schema.v1/msg" ) // metric input.kafka-mdm.metrics_per_message is how many metrics per message were seen. @@ -29,13 +24,11 @@ var metricsDecodeErr = stats.NewCounter32("input.kafka-mdm.metrics_decode_err") type KafkaMdm struct { input.Handler - consumer sarama.Consumer - client sarama.Client - lagMonitor *LagMonitor - wg sync.WaitGroup + consumer *kafka.Consumer + wg sync.WaitGroup // signal to PartitionConsumers to shutdown - stopConsuming chan struct{} + stopChan chan struct{} // signal to caller that it should shutdown fatal chan struct{} } @@ -44,47 +37,32 @@ func (k *KafkaMdm) Name() string { return "kafka-mdm" } -var LogLevel int +var consumerConf *kafka.ConsumerConf var Enabled bool var orgId uint -var brokerStr string -var brokers []string +var LogLevel int var topicStr string -var topics []string -var partitionStr string -var partitions []int32 -var offsetStr string -var DataDir string -var config *sarama.Config -var channelBufferSize int -var consumerFetchMin int -var consumerFetchDefault int -var consumerMaxWaitTime time.Duration -var consumerMaxProcessingTime time.Duration -var netMaxOpenRequests int -var offsetMgr *kafka.OffsetMgr -var offsetDuration time.Duration -var offsetCommitInterval time.Duration -var partitionOffset map[int32]*stats.Gauge64 -var partitionLogSize map[int32]*stats.Gauge64 -var partitionLag map[int32]*stats.Gauge64 func ConfigSetup() { + consumerConf = kafka.NewConfig() inKafkaMdm := flag.NewFlagSet("kafka-mdm-in", flag.ExitOnError) inKafkaMdm.BoolVar(&Enabled, "enabled", false, "") inKafkaMdm.UintVar(&orgId, "org-id", 0, "For incoming MetricPoint messages without org-id, assume this org id") - inKafkaMdm.StringVar(&brokerStr, "brokers", "kafka:9092", "tcp address for kafka (may be be given multiple times as a comma-separated list)") + inKafkaMdm.DurationVar(&consumerConf.OffsetCommitInterval, "offset-commit-interval", time.Second*5, "Interval at which offsets should be saved.") + inKafkaMdm.IntVar(&consumerConf.BatchNumMessages, "batch-num-messages", 10000, "Maximum number of messages batched in one MessageSet") + inKafkaMdm.IntVar(&consumerConf.BufferMaxMs, "metrics-buffer-max-ms", 100, "Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers") + inKafkaMdm.IntVar(&consumerConf.ChannelBufferSize, "channel-buffer-size", 1000, "Maximum number of messages allowed on the producer queue") + inKafkaMdm.IntVar(&consumerConf.FetchMin, "consumer-fetch-min", 1, "Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting") + inKafkaMdm.IntVar(&consumerConf.MaxWaitMs, "consumer-max-wait-ms", 100, "Maximum time the broker may wait to fill the response with fetch.min.bytes") + inKafkaMdm.IntVar(&consumerConf.MetadataBackoffTime, "metadata-backoff-time", 500, "Time to wait between attempts to fetch metadata in ms") + inKafkaMdm.IntVar(&consumerConf.MetadataRetries, "metadata-retries", 5, "Number of retries to fetch metadata in case of failure") + inKafkaMdm.IntVar(&consumerConf.MetadataTimeout, "consumer-metadata-timeout-ms", 10000, "Maximum time to wait for the broker to reply to metadata queries in ms") + inKafkaMdm.IntVar(&consumerConf.NetMaxOpenRequests, "net-max-open-requests", 100, "Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests.") + inKafkaMdm.IntVar(&consumerConf.SessionTimeout, "consumer-session-timeout", 30000, "Client group session and failure detection timeout in ms") + inKafkaMdm.StringVar(&consumerConf.Broker, "brokers", "kafka:9092", "tcp address for kafka (may be be given multiple times as a comma-separated list)") + inKafkaMdm.StringVar(&consumerConf.StartAtOffset, "offset", "oldest", "Set the offset to start consuming from. Can be one of newest, oldest or a time duration") + inKafkaMdm.StringVar(&consumerConf.Partitions, "partitions", "*", "kafka partitions to consume. use '*' or a comma separated list of id's") inKafkaMdm.StringVar(&topicStr, "topics", "mdm", "kafka topic (may be given multiple times as a comma-separated list)") - inKafkaMdm.StringVar(&offsetStr, "offset", "last", "Set the offset to start consuming from. Can be one of newest, oldest,last or a time duration") - inKafkaMdm.StringVar(&partitionStr, "partitions", "*", "kafka partitions to consume. use '*' or a comma separated list of id's") - inKafkaMdm.DurationVar(&offsetCommitInterval, "offset-commit-interval", time.Second*5, "Interval at which offsets should be saved.") - inKafkaMdm.StringVar(&DataDir, "data-dir", "", "Directory to store partition offsets index") - inKafkaMdm.IntVar(&channelBufferSize, "channel-buffer-size", 1000, "The number of metrics to buffer in internal and external channels") - inKafkaMdm.IntVar(&consumerFetchMin, "consumer-fetch-min", 1, "The minimum number of message bytes to fetch in a request") - inKafkaMdm.IntVar(&consumerFetchDefault, "consumer-fetch-default", 32768, "The default number of message bytes to fetch in a request") - inKafkaMdm.DurationVar(&consumerMaxWaitTime, "consumer-max-wait-time", time.Second, "The maximum amount of time the broker will wait for Consumer.Fetch.Min bytes to become available before it returns fewer than that anyway") - inKafkaMdm.DurationVar(&consumerMaxProcessingTime, "consumer-max-processing-time", time.Second, "The maximum amount of time the consumer expects a message takes to process") - inKafkaMdm.IntVar(&netMaxOpenRequests, "net-max-open-requests", 100, "How many outstanding requests a connection is allowed to have before sending on it blocks") globalconf.Register("kafka-mdm-in", inKafkaMdm) } @@ -93,263 +71,56 @@ func ConfigProcess(instance string) { return } - if offsetCommitInterval == 0 { + if consumerConf.OffsetCommitInterval == 0 { log.Fatal(4, "kafkamdm: offset-commit-interval must be greater then 0") } - if consumerMaxWaitTime == 0 { - log.Fatal(4, "kafkamdm: consumer-max-wait-time must be greater then 0") - } - if consumerMaxProcessingTime == 0 { - log.Fatal(4, "kafkamdm: consumer-max-processing-time must be greater then 0") - } - var err error - switch offsetStr { - case "last": - case "oldest": - case "newest": - default: - offsetDuration, err = time.ParseDuration(offsetStr) - if err != nil { - log.Fatal(4, "kafkamdm: invalid offest format. %s", err) - } - } - offsetMgr, err = kafka.NewOffsetMgr(DataDir) - if err != nil { - log.Fatal(4, "kafka-mdm couldnt create offsetMgr. %s", err) + if consumerConf.MaxWaitMs == 0 { + log.Fatal(4, "kafkamdm: consumer-max-wait-time must be greater then 0") } - brokers = strings.Split(brokerStr, ",") - topics = strings.Split(topicStr, ",") - - config = sarama.NewConfig() - config.ClientID = instance + "-mdm" - config.ChannelBufferSize = channelBufferSize - config.Consumer.Fetch.Min = int32(consumerFetchMin) - config.Consumer.Fetch.Default = int32(consumerFetchDefault) - config.Consumer.MaxWaitTime = consumerMaxWaitTime - config.Consumer.MaxProcessingTime = consumerMaxProcessingTime - config.Net.MaxOpenRequests = netMaxOpenRequests - config.Version = sarama.V0_10_0_0 - err = config.Validate() - if err != nil { - log.Fatal(2, "kafka-mdm invalid config: %s", err) - } - // validate our partitions - client, err := sarama.NewClient(brokers, config) - if err != nil { - log.Fatal(4, "kafka-mdm failed to create client. %s", err) - } - defer client.Close() + consumerConf.Topics = strings.Split(topicStr, ",") + consumerConf.ClientID = instance + "-mdm" - availParts, err := kafka.GetPartitions(client, topics) - if err != nil { - log.Fatal(4, "kafka-mdm: %s", err.Error()) - } - log.Info("kafka-mdm: available partitions %v", availParts) - if partitionStr == "*" { - partitions = availParts - } else { - parts := strings.Split(partitionStr, ",") - for _, part := range parts { - i, err := strconv.Atoi(part) - if err != nil { - log.Fatal(4, "could not parse partition %q. partitions must be '*' or a comma separated list of id's", part) - } - partitions = append(partitions, int32(i)) - } - missing := kafka.DiffPartitions(partitions, availParts) - if len(missing) > 0 { - log.Fatal(4, "kafka-mdm: configured partitions not in list of available partitions. missing %v", missing) - } - } // record our partitions so others (MetricIdx) can use the partitioning information. // but only if the manager has been created (e.g. in metrictank), not when this input plugin is used in other contexts if cluster.Manager != nil { - cluster.Manager.SetPartitions(partitions) - } - - // initialize our offset metrics - partitionOffset = make(map[int32]*stats.Gauge64) - partitionLogSize = make(map[int32]*stats.Gauge64) - partitionLag = make(map[int32]*stats.Gauge64) - for _, part := range partitions { - partitionOffset[part] = stats.NewGauge64(fmt.Sprintf("input.kafka-mdm.partition.%d.offset", part)) - partitionLogSize[part] = stats.NewGauge64(fmt.Sprintf("input.kafka-mdm.partition.%d.log_size", part)) - partitionLag[part] = stats.NewGauge64(fmt.Sprintf("input.kafka-mdm.partition.%d.lag", part)) + consumer, err := kafka.NewConsumer(consumerConf) + if err != nil { + log.Fatal(2, "kafka-cluster failed to initialize consumer: %s", err) + } + if LogLevel < 2 { + log.Debug("kafkamdm: setting partitions on manager: %+v", consumer.Partitions) + } + cluster.Manager.SetPartitions(consumer.Partitions) + consumer.Stop() } } func New() *KafkaMdm { - client, err := sarama.NewClient(brokers, config) - if err != nil { - log.Fatal(4, "kafka-mdm failed to create client. %s", err) - } - consumer, err := sarama.NewConsumerFromClient(client) - if err != nil { - log.Fatal(2, "kafka-mdm failed to create consumer: %s", err) - } log.Info("kafka-mdm consumer created without error") k := KafkaMdm{ - consumer: consumer, - client: client, - lagMonitor: NewLagMonitor(10, partitions), - stopConsuming: make(chan struct{}), + stopChan: make(chan struct{}), } - return &k -} + consumerConf.GaugePrefix = "input.kafka-mdm.partition" + consumerConf.MessageHandler = k.handleMsg -func (k *KafkaMdm) Start(handler input.Handler, fatal chan struct{}) error { - k.Handler = handler - k.fatal = fatal var err error - for _, topic := range topics { - for _, partition := range partitions { - var offset int64 - switch offsetStr { - case "oldest": - offset = sarama.OffsetOldest - case "newest": - offset = sarama.OffsetNewest - case "last": - offset, err = offsetMgr.Last(topic, partition) - if err != nil { - log.Error(4, "kafka-mdm: Failed to get %q duration offset for %s:%d. %q", offsetStr, topic, partition, err) - return err - } - default: - offset, err = k.client.GetOffset(topic, partition, time.Now().Add(-1*offsetDuration).UnixNano()/int64(time.Millisecond)) - if err != nil { - offset = sarama.OffsetOldest - log.Warn("kafka-mdm failed to get offset %s: %s -> will use oldest instead", offsetDuration, err) - } - } - k.wg.Add(1) - go k.consumePartition(topic, partition, offset) - } - } - return nil -} - -// tryGetOffset will to query kafka repeatedly for the requested offset and give up after attempts unsuccesfull attempts -// an error is returned when it had to give up -func (k *KafkaMdm) tryGetOffset(topic string, partition int32, offset int64, attempts int, sleep time.Duration) (int64, error) { - - var val int64 - var err error - var offsetStr string - - switch offset { - case sarama.OffsetNewest: - offsetStr = "newest" - case sarama.OffsetOldest: - offsetStr = "oldest" - default: - offsetStr = strconv.FormatInt(offset, 10) - } - - attempt := 1 - for { - val, err = k.client.GetOffset(topic, partition, offset) - if err == nil { - break - } - - err = fmt.Errorf("failed to get offset %s of partition %s:%d. %s (attempt %d/%d)", offsetStr, topic, partition, err, attempt, attempts) - if attempt == attempts { - break - } - log.Warn("kafka-mdm %s", err) - attempt += 1 - time.Sleep(sleep) - } - return val, err -} - -// this will continually consume from the topic until k.stopConsuming is triggered. -func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset int64) { - defer k.wg.Done() - - partitionOffsetMetric := partitionOffset[partition] - partitionLogSizeMetric := partitionLogSize[partition] - partitionLagMetric := partitionLag[partition] - - // determine the pos of the topic and the initial offset of our consumer - newest, err := k.tryGetOffset(topic, partition, sarama.OffsetNewest, 7, time.Second*10) + k.consumer, err = kafka.NewConsumer(consumerConf) if err != nil { - log.Error(3, "kafka-mdm %s", err) - close(k.fatal) - return - } - if currentOffset == sarama.OffsetNewest { - currentOffset = newest - } else if currentOffset == sarama.OffsetOldest { - currentOffset, err = k.tryGetOffset(topic, partition, sarama.OffsetOldest, 7, time.Second*10) - if err != nil { - log.Error(3, "kafka-mdm %s", err) - close(k.fatal) - return - } + log.Fatal(2, "kafka-cluster failed to initialize consumer: %s", err) } + k.consumer.InitLagMonitor(10) - partitionOffsetMetric.Set(int(currentOffset)) - partitionLogSizeMetric.Set(int(newest)) - partitionLagMetric.Set(int(newest - currentOffset)) + return &k +} - log.Info("kafka-mdm: consuming from %s:%d from offset %d", topic, partition, currentOffset) - pc, err := k.consumer.ConsumePartition(topic, partition, currentOffset) - if err != nil { - log.Error(4, "kafka-mdm: failed to start partitionConsumer for %s:%d. %s", topic, partition, err) - close(k.fatal) - return - } - messages := pc.Messages() - ticker := time.NewTicker(offsetCommitInterval) - for { - select { - case msg, ok := <-messages: - // https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions#why-am-i-getting-a-nil-message-from-the-sarama-consumer - if !ok { - log.Error(3, "kafka-mdm: kafka consumer for %s:%d has shutdown. stop consuming", topic, partition) - if err := offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-mdm failed to commit offset for %s:%d, %s", topic, partition, err) - } - close(k.fatal) - return - } - if LogLevel < 2 { - log.Debug("kafka-mdm received message: Topic %s, Partition: %d, Offset: %d, Key: %x", msg.Topic, msg.Partition, msg.Offset, msg.Key) - } - k.handleMsg(msg.Value, partition) - currentOffset = msg.Offset - case ts := <-ticker.C: - if err := offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-mdm failed to commit offset for %s:%d, %s", topic, partition, err) - } - k.lagMonitor.StoreOffset(partition, currentOffset, ts) - newest, err := k.tryGetOffset(topic, partition, sarama.OffsetNewest, 1, 0) - if err != nil { - log.Error(3, "kafka-mdm %s", err) - } else { - partitionLogSizeMetric.Set(int(newest)) - } +func (k *KafkaMdm) Start(handler input.Handler, fatal chan struct{}) error { + k.Handler = handler + k.fatal = fatal - partitionOffsetMetric.Set(int(currentOffset)) - if err == nil { - lag := int(newest - currentOffset) - partitionLagMetric.Set(lag) - k.lagMonitor.StoreLag(partition, lag) - } - case <-k.stopConsuming: - pc.Close() - if err := offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-mdm failed to commit offset for %s:%d, %s", topic, partition, err) - } - log.Info("kafka-mdm consumer for %s:%d ended.", topic, partition) - return - } - } + return k.consumer.Start(nil) } func (k *KafkaMdm) handleMsg(data []byte, partition int32) { @@ -376,14 +147,10 @@ func (k *KafkaMdm) handleMsg(data []byte, partition int32) { k.Handler.ProcessMetricData(&md, partition) } -// Stop will initiate a graceful stop of the Consumer (permanent) -// and block until it stopped. func (k *KafkaMdm) Stop() { - // closes notifications and messages channels, amongst others - close(k.stopConsuming) - k.wg.Wait() - k.client.Close() - offsetMgr.Close() + log.Info("kafka-mdm: stopping kafka input") + close(k.stopChan) + k.consumer.Stop() } func (k *KafkaMdm) MaintainPriority() { @@ -391,10 +158,10 @@ func (k *KafkaMdm) MaintainPriority() { ticker := time.NewTicker(time.Second * 10) for { select { - case <-k.stopConsuming: + case <-k.stopChan: return case <-ticker.C: - cluster.Manager.SetPriority(k.lagMonitor.Metric()) + cluster.Manager.SetPriority(k.consumer.LagMonitor.Metric()) } } }() diff --git a/kafka/consumer.go b/kafka/consumer.go new file mode 100644 index 0000000000..f00e393b3a --- /dev/null +++ b/kafka/consumer.go @@ -0,0 +1,401 @@ +package kafka + +import ( + "fmt" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + confluent "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/grafana/metrictank/stats" + "github.com/raintank/worldping-api/pkg/log" + "github.com/twinj/uuid" +) + +var LogLevel int + +type Consumer struct { + conf ConsumerConf + wg sync.WaitGroup + consumer *confluent.Consumer + Partitions []int32 + currentOffsets map[int32]*int64 + bootTimeOffsets map[int32]int64 + partitionOffset map[int32]*stats.Gauge64 + partitionLogSize map[int32]*stats.Gauge64 + partitionLag map[int32]*stats.Gauge64 + LagMonitor *LagMonitor + stopChan chan struct{} +} + +type ConsumerConf struct { + ClientID string + Broker string + Partitions string + StartAtOffset string + GaugePrefix string + Topics []string + MessageHandler func([]byte, int32) + BatchNumMessages int + BufferMaxMs int + ChannelBufferSize int + FetchMin int + NetMaxOpenRequests int + MaxWaitMs int + SessionTimeout int + MetadataRetries int + MetadataBackoffTime int + MetadataTimeout int + OffsetCommitInterval time.Duration +} + +func NewConfig() *ConsumerConf { + return &ConsumerConf{ + GaugePrefix: "default.kafka.partition", + BatchNumMessages: 10000, + BufferMaxMs: 100, + ChannelBufferSize: 1000000, + FetchMin: 1, + NetMaxOpenRequests: 100, + MaxWaitMs: 100, + SessionTimeout: 30000, + MetadataRetries: 5, + MetadataBackoffTime: 500, + MetadataTimeout: 10000, + OffsetCommitInterval: time.Second * 5, + } +} + +func NewConsumer(conf *ConsumerConf) (*Consumer, error) { + if len(conf.Topics) < 1 { + return nil, fmt.Errorf("kafka-consumer: Requiring at least 1 topic") + } + + consumer, err := confluent.NewConsumer(&confluent.ConfigMap{ + "client.id": conf.ClientID, + "bootstrap.servers": conf.Broker, + "compression.codec": "snappy", + "group.id": uuid.NewV4().String(), + "fetch.min.bytes": conf.FetchMin, + "fetch.wait.max.ms": conf.MaxWaitMs, + "max.in.flight.requests.per.connection": conf.NetMaxOpenRequests, + "queue.buffering.max.messages": conf.ChannelBufferSize, + "retries": 10, + "session.timeout.ms": conf.SessionTimeout, + "queue.buffering.max.ms": conf.BufferMaxMs, + "batch.num.messages": conf.BatchNumMessages, + "enable.partition.eof": false, + "enable.auto.offset.store": false, + "enable.auto.commit": false, + "go.events.channel.enable": true, + "go.application.rebalance.enable": true, + }) + if err != nil { + return nil, err + } + + c := Consumer{ + conf: *conf, + consumer: consumer, + currentOffsets: make(map[int32]*int64), + bootTimeOffsets: make(map[int32]int64), + partitionOffset: make(map[int32]*stats.Gauge64), + partitionLogSize: make(map[int32]*stats.Gauge64), + partitionLag: make(map[int32]*stats.Gauge64), + stopChan: make(chan struct{}), + } + + availParts, err := GetPartitions(c.consumer, c.conf.Topics, c.conf.MetadataRetries, c.conf.MetadataBackoffTime, c.conf.MetadataTimeout) + if err != nil { + return nil, err + } + + log.Info("kafka-consumer: Available partitions %v", availParts) + if c.conf.Partitions == "*" { + c.Partitions = availParts + } else { + parts := strings.Split(c.conf.Partitions, ",") + for _, part := range parts { + i, err := strconv.Atoi(part) + if err != nil { + return nil, fmt.Errorf("Could not parse partition %q. partitions must be '*' or a comma separated list of id's", part) + } + c.Partitions = append(c.Partitions, int32(i)) + } + missing := DiffPartitions(c.Partitions, availParts) + if len(missing) > 0 { + return nil, fmt.Errorf("Configured partitions not in list of available partitions. Missing %v", missing) + } + } + + for _, part := range c.Partitions { + _, offset, err := c.consumer.QueryWatermarkOffsets(c.conf.Topics[0], part, c.conf.MetadataTimeout) + if err != nil { + return nil, fmt.Errorf("Failed to get newest offset for topic %s part %d: %s", c.conf.Topics[0], part, err) + } + c.bootTimeOffsets[part] = offset + c.partitionOffset[part] = stats.NewGauge64(fmt.Sprintf("%s.%d.offset", c.conf.GaugePrefix, part)) + c.partitionLogSize[part] = stats.NewGauge64(fmt.Sprintf("%s.%d.log_size", c.conf.GaugePrefix, part)) + c.partitionLag[part] = stats.NewGauge64(fmt.Sprintf("%s.%d.lag", c.conf.GaugePrefix, part)) + } + + return &c, nil +} + +// Creates a lag monitor for the given size +// This needs to be called before Start() or StartAndAwaitBacklog() to prevent +// race conditions between initializing the lag monitor and setting lag values +func (c *Consumer) InitLagMonitor(size int) { + c.LagMonitor = NewLagMonitor(size, c.Partitions) +} + +func (c *Consumer) Start(processBacklog *sync.WaitGroup) error { + err := c.startConsumer() + if err != nil { + return fmt.Errorf("Failed to start consumer: %s", err) + } + + go c.monitorLag(processBacklog) + + for range c.Partitions { + go c.consume() + } + + return nil +} + +func (c *Consumer) StartAndAwaitBacklog(backlogProcessTimeout time.Duration) error { + pre := time.Now() + processBacklog := new(sync.WaitGroup) + processBacklog.Add(len(c.Partitions)) + + err := c.Start(processBacklog) + if err != nil { + return err + } + + // wait for our backlog to be processed before returning. This will block metrictank from consuming metrics until + // we have processed old metricPersist messages. The end result is that we wont overwrite chunks in cassandra that + // have already been previously written. + // We don't wait more than backlogProcessTimeout for the backlog to be processed. + log.Info("kafka-consumer: Waiting for metricPersist backlog to be processed.") + backlogProcessed := make(chan struct{}, 1) + go func() { + processBacklog.Wait() + backlogProcessed <- struct{}{} + }() + + select { + case <-time.After(backlogProcessTimeout): + log.Warn("kafka-consumer: Processing metricPersist backlog has taken too long, giving up lock after %s.", backlogProcessTimeout) + case <-backlogProcessed: + log.Info("kafka-consumer: MetricPersist backlog processed in %s.", time.Since(pre)) + } + + return nil +} + +func (c *Consumer) consume() { + c.wg.Add(1) + defer c.wg.Done() + + var ok bool + var offsetPtr *int64 + events := c.consumer.Events() + for { + select { + case ev := <-events: + switch e := ev.(type) { + case confluent.AssignedPartitions: + c.consumer.Assign(e.Partitions) + log.Info("kafka-consumer: Assigned partitions: %+v", e) + case confluent.RevokedPartitions: + c.consumer.Unassign() + log.Info("kafka-consumer: Revoked partitions: %+v", e) + case *confluent.Message: + tp := e.TopicPartition + if LogLevel < 2 { + log.Debug("kafka-consumer: Received message: Topic %s, Partition: %d, Offset: %d, Key: %x", tp.Topic, tp.Partition, tp.Offset, e.Key) + } + + if offsetPtr, ok = c.currentOffsets[tp.Partition]; !ok || offsetPtr == nil { + log.Error(3, "kafka-consumer: Received message of unexpected partition: %s:%d", tp.Topic, tp.Partition) + continue + } + + c.conf.MessageHandler(e.Value, tp.Partition) + atomic.StoreInt64(offsetPtr, int64(tp.Offset)) + case *confluent.Error: + log.Error(3, "kafka-consumer: Kafka consumer error: %s", e.String()) + return + } + case <-c.stopChan: + log.Info("kafka-consumer: Consumer ended.") + return + } + } +} + +func (c *Consumer) monitorLag(processBacklog *sync.WaitGroup) { + c.wg.Add(1) + defer c.wg.Done() + + completed := make(map[int32]bool, len(c.Partitions)) + for _, partition := range c.Partitions { + completed[partition] = false + } + + storeOffsets := func(ts time.Time) { + for partition := range c.currentOffsets { + offset := atomic.LoadInt64(c.currentOffsets[partition]) + c.partitionOffset[partition].Set(int(offset)) + if c.LagMonitor != nil { + c.LagMonitor.StoreOffset(partition, offset, ts) + } + if !completed[partition] && offset >= c.bootTimeOffsets[partition]-1 { + if processBacklog != nil { + processBacklog.Done() + } + completed[partition] = true + delete(c.bootTimeOffsets, partition) + if len(c.bootTimeOffsets) == 0 { + c.bootTimeOffsets = nil + } + } + + _, newest, err := c.consumer.QueryWatermarkOffsets(c.conf.Topics[0], partition, c.conf.MetadataTimeout) + if err != nil { + log.Error(3, "kafka-consumer: Error when querying for offsets: %s", err) + } else { + c.partitionLogSize[partition].Set(int(newest)) + } + + if err == nil { + lag := int(newest - offset) + c.partitionLag[partition].Set(lag) + if c.LagMonitor != nil { + c.LagMonitor.StoreLag(partition, lag) + } + } + } + } + + ticker := time.NewTicker(c.conf.OffsetCommitInterval) + for { + select { + case ts := <-ticker.C: + storeOffsets(ts) + case <-c.stopChan: + storeOffsets(time.Now()) + return + } + } +} + +func (c *Consumer) startConsumer() error { + var offset confluent.Offset + var err error + var topicPartitions confluent.TopicPartitions + c.currentOffsets = make(map[int32]*int64, len(c.Partitions)) + + for i, topic := range c.conf.Topics { + for _, partition := range c.Partitions { + var currentOffset int64 + switch c.conf.StartAtOffset { + case "oldest": + currentOffset, err = c.tryGetOffset(topic, partition, int64(confluent.OffsetBeginning), 3, time.Second) + if err != nil { + return err + } + case "newest": + currentOffset, err = c.tryGetOffset(topic, partition, int64(confluent.OffsetEnd), 3, time.Second) + if err != nil { + return err + } + default: + offsetDuration, err := time.ParseDuration(c.conf.StartAtOffset) + if err != nil { + return fmt.Errorf("invalid offest format %s: %s", c.conf.StartAtOffset, err) + } + currentOffset = time.Now().Add(-1*offsetDuration).UnixNano() / int64(time.Millisecond) + currentOffset, err = c.tryGetOffset(topic, partition, currentOffset, 3, time.Second) + if err != nil { + log.Warn("kafka-consumer: Failed to get specified offset %s, falling back to \"oldest\"", c.conf.StartAtOffset) + currentOffset, err = c.tryGetOffset(topic, partition, int64(confluent.OffsetBeginning), 3, time.Second) + if err != nil { + return err + } + } + } + + offset, err = confluent.NewOffset(currentOffset) + if err != nil { + return err + } + + topicPartitions = append(topicPartitions, confluent.TopicPartition{ + Topic: &topic, + Partition: partition, + Offset: offset, + }) + + if i == 0 { + c.currentOffsets[partition] = ¤tOffset + } + } + } + + return c.consumer.Assign(topicPartitions) +} + +func (c *Consumer) tryGetOffset(topic string, partition int32, offsetI int64, attempts int, sleep time.Duration) (int64, error) { + offset, err := confluent.NewOffset(offsetI) + if err != nil { + return 0, err + } + + var beginning, end int64 + + attempt := 1 + for { + if offset == confluent.OffsetBeginning || offset == confluent.OffsetEnd { + beginning, end, err = c.consumer.QueryWatermarkOffsets(topic, partition, c.conf.MetadataTimeout) + if err == nil { + if offset == confluent.OffsetBeginning { + return beginning, nil + } else { + return end, nil + } + } + } else { + times := []confluent.TopicPartition{{Topic: &topic, Partition: partition, Offset: offset}} + times, err = c.consumer.OffsetsForTimes(times, c.conf.MetadataTimeout) + if err != nil { + log.Error(3, "kafka-consumer: Failed to get offset", err) + } else if len(times) == 0 { + log.Info("kafka-consumer: Falling back to oldest because no offsets were returned") + offset = confluent.OffsetBeginning + } else { + return int64(times[0].Offset), nil + } + } + + if attempt >= attempts { + break + } + + log.Warn("kafka-consumer: Error when querying offsets, %d retries left: %s", attempts-attempt, err) + attempt += 1 + time.Sleep(sleep) + } + + return 0, fmt.Errorf("Failed to get offset %s of partition %s:%d. %s (attempt %d/%d)", offset.String(), topic, partition, err, attempt, attempts) +} + +func (c *Consumer) Stop() { + close(c.stopChan) + c.wg.Wait() + c.consumer.Close() +} diff --git a/input/kafkamdm/lag_monitor.go b/kafka/lag_monitor.go similarity index 99% rename from input/kafkamdm/lag_monitor.go rename to kafka/lag_monitor.go index 2238be6fb6..27ba3eb310 100644 --- a/input/kafkamdm/lag_monitor.go +++ b/kafka/lag_monitor.go @@ -1,4 +1,4 @@ -package kafkamdm +package kafka import ( "sync" diff --git a/input/kafkamdm/lag_monitor_test.go b/kafka/lag_monitor_test.go similarity index 99% rename from input/kafkamdm/lag_monitor_test.go rename to kafka/lag_monitor_test.go index d36ad3e83a..fd26822f39 100644 --- a/input/kafkamdm/lag_monitor_test.go +++ b/kafka/lag_monitor_test.go @@ -1,4 +1,4 @@ -package kafkamdm +package kafka import ( "testing" diff --git a/kafka/offsetMgr.go b/kafka/offsetMgr.go deleted file mode 100644 index 9c6eee4c92..0000000000 --- a/kafka/offsetMgr.go +++ /dev/null @@ -1,131 +0,0 @@ -package kafka - -import ( - "bytes" - "encoding/binary" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/raintank/worldping-api/pkg/log" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" -) - -var ( - managers map[string]*OffsetMgr - mu sync.Mutex -) - -func init() { - managers = make(map[string]*OffsetMgr) -} - -type OffsetMgr struct { - path string - db *leveldb.DB - sync.Mutex - users int -} - -// Returns an OffsetMgr using a leveldb database in the passed directory. -// directory can be empty for working dir, or any relative or absolute path. -// If there is already a OffsetMgr open using the same dir, then it is -// returned instead of creating a new one. -func NewOffsetMgr(dir string) (*OffsetMgr, error) { - - // note that dir can be anything like '' (working dir), ., .., ./., ./.. etc - dbFile := filepath.Join(dir, "partitionOffsets.db") - - //make sure the needed directory exists. - err := os.MkdirAll(filepath.Base(dbFile), 0755) - if err != nil { - return nil, err - } - - //check if this db is already opened. - mu.Lock() - defer mu.Unlock() - if mgr, ok := managers[dbFile]; ok { - mgr.Open() - return mgr, nil - } - - db, err := leveldb.OpenFile(dbFile, &opt.Options{}) - if err != nil { - if _, ok := err.(*storage.ErrCorrupted); ok { - log.Warn("partitionOffsets.db is corrupt. Recovering.") - db, err = leveldb.RecoverFile(dbFile, &opt.Options{}) - if err != nil { - return nil, err - } - } else { - return nil, err - } - } - log.Info("Opened %s", dbFile) - mgr := &OffsetMgr{ - path: dbFile, - db: db, - users: 1, - } - managers[dbFile] = mgr - return mgr, nil -} - -func (o *OffsetMgr) Open() { - o.Lock() - o.users++ - o.Unlock() -} - -func (o *OffsetMgr) Close() { - // acquire the package lock to prevent a possible deadlock if - // NewOffsetMgr is called at the same time. - mu.Lock() - - o.Lock() - o.users-- - if o.users == 0 { - log.Info("Closing partitionsOffset DB.") - o.db.Close() - - // remove the mgr from the registry - delete(managers, o.path) - } - o.Unlock() - mu.Unlock() -} - -func (o *OffsetMgr) Commit(topic string, partition int32, offset int64) error { - key := new(bytes.Buffer) - key.WriteString(fmt.Sprintf("T:%s-P:%d", topic, partition)) - data := new(bytes.Buffer) - if err := binary.Write(data, binary.LittleEndian, offset); err != nil { - return err - } - log.Debug("committing offset %d for %s:%d to partitionsOffset.db", offset, topic, partition) - return o.db.Put(key.Bytes(), data.Bytes(), &opt.WriteOptions{Sync: true}) -} - -func (o *OffsetMgr) Last(topic string, partition int32) (int64, error) { - key := new(bytes.Buffer) - key.WriteString(fmt.Sprintf("T:%s-P:%d", topic, partition)) - data, err := o.db.Get(key.Bytes(), nil) - if err != nil { - if err == leveldb.ErrNotFound { - log.Debug("no offset recorded for %s:%d", topic, partition) - return -1, nil - } - return 0, err - } - var offset int64 - err = binary.Read(bytes.NewBuffer(data), binary.LittleEndian, &offset) - if err != nil { - return 0, err - } - log.Debug("found saved offset %d for %s:%d", offset, topic, partition) - return offset, nil -} diff --git a/kafka/partitions.go b/kafka/partitions.go index f99d43c1b1..7eaf9a174b 100644 --- a/kafka/partitions.go +++ b/kafka/partitions.go @@ -2,8 +2,10 @@ package kafka import ( "fmt" + "time" - "github.com/Shopify/sarama" + confluent "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/raintank/worldping-api/pkg/log" ) // returns elements that are in a but not in b @@ -21,25 +23,45 @@ Iter: return diff } -func GetPartitions(client sarama.Client, topics []string) ([]int32, error) { - partitionCount := 0 - partitions := make([]int32, 0) - var err error +func GetPartitions(client *confluent.Consumer, topics []string, retries, backoff, timeout int) ([]int32, error) { + var partitions []int32 for i, topic := range topics { - partitions, err = client.Partitions(topic) - if err != nil { - return nil, fmt.Errorf("Failed to get partitions for topic %s. %s", topic, err) - } - if len(partitions) == 0 { - return nil, fmt.Errorf("No partitions returned for topic %s", topic) - } - if i > 0 { - if len(partitions) != partitionCount { - return nil, fmt.Errorf("Configured topics have different partition counts, this is not supported") + for retry := retries; retry > 0; retry-- { + metadata, err := client.GetMetadata(&topic, false, timeout) + if err != nil { + log.Warn("kafka: failed to get metadata from kafka client. %s, %d retries", err, retry) + time.Sleep(time.Duration(backoff) * time.Millisecond) + continue + } + + // if kafka's auto.create.topics is enabled (default) then a topic will get created with the default + // settings after our first GetMetadata call for it. But because the topic creation can take a moment + // we'll need to retry a fraction of a second later in order to actually get the according metadata. + tm, ok := metadata.Topics[topic] + if !ok || tm.Error.Code() == confluent.ErrUnknownTopic { + log.Warn("kafka: unknown topic %s, %d retries", topic, retry) + time.Sleep(time.Duration(backoff) * time.Millisecond) + continue + } + if len(tm.Partitions) == 0 { + log.Warn("kafka: 0 partitions returned for %s, %d retries left, %d backoffMs", topic, retry, backoff) + time.Sleep(time.Duration(backoff) * time.Millisecond) + continue + } + + if i == 0 { + partitions = make([]int32, 0, len(tm.Partitions)) + for _, partitionMetadata := range tm.Partitions { + partitions = append(partitions, partitionMetadata.ID) + } + } else { + if len(tm.Partitions) != len(partitions) { + return nil, fmt.Errorf("Configured topics have different partition counts, this is not supported") + } } - continue } - partitionCount = len(partitions) } + + log.Info("kafka: partitions for topics %+v: %+v", topics, partitions) return partitions, nil } diff --git a/kafka/utils.go b/kafka/utils.go new file mode 100644 index 0000000000..dd89cbd119 --- /dev/null +++ b/kafka/utils.go @@ -0,0 +1,25 @@ +package kafka + +import ( + confluent "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/twinj/uuid" +) + +// according to this we need to generate a uuid to set as group.id: +// https://github.com/edenhill/librdkafka/issues/1210 + +func GetConfig(broker, compression string, batchNumMessages, bufferMaxMs, bufferSize, fetchMin, maxOpenRequests, maxWait, sessionTimeout int) *confluent.ConfigMap { + return &confluent.ConfigMap{ + "bootstrap.servers": broker, + "compression.codec": "snappy", + "group.id": uuid.NewV4().String(), + "fetch.min.bytes": fetchMin, + "fetch.wait.max.ms": maxWait, + "max.in.flight.requests.per.connection": maxOpenRequests, + "queue.buffering.max.messages": bufferSize, + "retries": 10, + "session.timeout.ms": sessionTimeout, + "queue.buffering.max.ms": bufferMaxMs, + "batch.num.messages": batchNumMessages, + } +} diff --git a/mdata/notifierKafka/cfg.go b/mdata/notifierKafka/cfg.go index b45f17b00f..1952a96d85 100644 --- a/mdata/notifierKafka/cfg.go +++ b/mdata/notifierKafka/cfg.go @@ -2,38 +2,20 @@ package notifierKafka import ( "flag" - "fmt" - "strconv" - "strings" "time" - "github.com/Shopify/sarama" - part "github.com/grafana/metrictank/cluster/partitioner" "github.com/grafana/metrictank/kafka" "github.com/grafana/metrictank/stats" "github.com/raintank/worldping-api/pkg/log" "github.com/rakyll/globalconf" ) +var consumerConf *kafka.ConsumerConf var Enabled bool -var brokerStr string -var brokers []string -var topic string -var offsetStr string -var dataDir string -var config *sarama.Config -var offsetDuration time.Duration -var offsetCommitInterval time.Duration -var partitionStr string -var partitions []int32 -var partitioner *part.Kafka -var partitionScheme string -var bootTimeOffsets map[int32]int64 -var backlogProcessTimeout time.Duration var backlogProcessTimeoutStr string -var partitionOffset map[int32]*stats.Gauge64 -var partitionLogSize map[int32]*stats.Gauge64 -var partitionLag map[int32]*stats.Gauge64 +var backlogProcessTimeout time.Duration +var partitionScheme string +var topic string // metric cluster.notifier.kafka.messages-published is a counter of messages published to the kafka cluster notifier var messagesPublished = stats.NewCounter32("cluster.notifier.kafka.messages-published") @@ -42,106 +24,45 @@ var messagesPublished = stats.NewCounter32("cluster.notifier.kafka.messages-publ var messagesSize = stats.NewMeter32("cluster.notifier.kafka.message_size", false) func init() { + consumerConf = kafka.NewConfig() fs := flag.NewFlagSet("kafka-cluster", flag.ExitOnError) fs.BoolVar(&Enabled, "enabled", false, "") - fs.StringVar(&brokerStr, "brokers", "kafka:9092", "tcp address for kafka (may be given multiple times as comma separated list)") - fs.StringVar(&topic, "topic", "metricpersist", "kafka topic") - fs.StringVar(&partitionStr, "partitions", "*", "kafka partitions to consume. use '*' or a comma separated list of id's. This should match the partitions used for kafka-mdm-in") - fs.StringVar(&partitionScheme, "partition-scheme", "bySeries", "method used for partitioning metrics. This should match the settings of tsdb-gw. (byOrg|bySeries)") - fs.StringVar(&offsetStr, "offset", "last", "Set the offset to start consuming from. Can be one of newest, oldest,last or a time duration") - fs.StringVar(&dataDir, "data-dir", "", "Directory to store partition offsets index") - fs.DurationVar(&offsetCommitInterval, "offset-commit-interval", time.Second*5, "Interval at which offsets should be saved.") + fs.DurationVar(&consumerConf.OffsetCommitInterval, "offset-commit-interval", time.Second*5, "Interval at which offsets should be saved.") + fs.IntVar(&consumerConf.BatchNumMessages, "batch-num-messages", 10000, "Maximum number of messages batched in one MessageSet") + fs.IntVar(&consumerConf.BufferMaxMs, "metrics-buffer-max-ms", 100, "Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers") + fs.IntVar(&consumerConf.ChannelBufferSize, "channel-buffer-size", 1000000, "Maximum number of messages allowed on the producer queue") + fs.IntVar(&consumerConf.FetchMin, "consumer-fetch-min", 1, "Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting") + fs.IntVar(&consumerConf.MaxWaitMs, "consumer-max-wait-ms", 100, "Maximum time the broker may wait to fill the response with fetch.min.bytes") + fs.IntVar(&consumerConf.MetadataBackoffTime, "metadata-backoff-time", 500, "Time to wait between attempts to fetch metadata in ms") + fs.IntVar(&consumerConf.MetadataRetries, "metadata-retries", 5, "Number of retries to fetch metadata in case of failure") + fs.IntVar(&consumerConf.MetadataTimeout, "consumer-metadata-timeout-ms", 10000, "Maximum time to wait for the broker to send its metadata in ms") + fs.IntVar(&consumerConf.NetMaxOpenRequests, "net-max-open-requests", 100, "Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests.") + fs.IntVar(&consumerConf.SessionTimeout, "consumer-session-timeout", 30000, "Client group session and failure detection timeout in ms") fs.StringVar(&backlogProcessTimeoutStr, "backlog-process-timeout", "60s", "Maximum time backlog processing can block during metrictank startup.") + fs.StringVar(&consumerConf.Broker, "brokers", "kafka:9092", "tcp address for kafka (may be given multiple times as comma separated list)") + fs.StringVar(&consumerConf.StartAtOffset, "offset", "oldest", "Set the offset to start consuming from. Can be one of newest, oldest or a time duration") + fs.StringVar(&partitionScheme, "partition-scheme", "bySeries", "method used for partitioning metrics. This should match the settings of tsdb-gw. (byOrg|bySeries)") + fs.StringVar(&consumerConf.Partitions, "partitions", "*", "kafka partitions to consume. use '*' or a comma separated list of id's. This should match the partitions used for kafka-mdm-in") + fs.StringVar(&topic, "topic", "metricpersist", "kafka topic") globalconf.Register("kafka-cluster", fs) } -func ConfigProcess(instance string) { +func ConfigProcess() { if !Enabled { return } - var err error - switch offsetStr { - case "last": - case "oldest": - case "newest": - default: - offsetDuration, err = time.ParseDuration(offsetStr) - if err != nil { - log.Fatal(4, "kafka-cluster: invalid offest format. %s", err) - } + + if consumerConf.OffsetCommitInterval == 0 { + log.Fatal(4, "kafkamdm: offset-commit-interval must be greater then 0") } - brokers = strings.Split(brokerStr, ",") - config = sarama.NewConfig() - config.ClientID = instance + "-cluster" - config.Version = sarama.V0_10_0_0 - config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message - config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message - config.Producer.Compression = sarama.CompressionSnappy - config.Producer.Return.Successes = true - err = config.Validate() - if err != nil { - log.Fatal(2, "kafka-cluster invalid consumer config: %s", err) + if consumerConf.MaxWaitMs == 0 { + log.Fatal(4, "kafkamdm: consumer-max-wait-time must be greater then 0") } + var err error backlogProcessTimeout, err = time.ParseDuration(backlogProcessTimeoutStr) if err != nil { log.Fatal(4, "kafka-cluster: unable to parse backlog-process-timeout. %s", err) } - - partitioner, err = part.NewKafka(partitionScheme) - if err != nil { - log.Fatal(4, "kafka-cluster: failed to initialize partitioner. %s", err) - } - - if partitionStr != "*" { - parts := strings.Split(partitionStr, ",") - for _, part := range parts { - i, err := strconv.Atoi(part) - if err != nil { - log.Fatal(4, "kafka-cluster: could not parse partition %q. partitions must be '*' or a comma separated list of id's", part) - } - partitions = append(partitions, int32(i)) - } - } - // validate our partitions - client, err := sarama.NewClient(brokers, config) - if err != nil { - log.Fatal(4, "kafka-cluster failed to create client. %s", err) - } - defer client.Close() - - availParts, err := kafka.GetPartitions(client, []string{topic}) - if err != nil { - log.Fatal(4, "kafka-cluster: %s", err.Error()) - } - if partitionStr == "*" { - partitions = availParts - } else { - missing := kafka.DiffPartitions(partitions, availParts) - if len(missing) > 0 { - log.Fatal(4, "kafka-cluster: configured partitions not in list of available partitions. missing %v", missing) - } - } - - // initialize our offset metrics - partitionOffset = make(map[int32]*stats.Gauge64) - partitionLogSize = make(map[int32]*stats.Gauge64) - partitionLag = make(map[int32]*stats.Gauge64) - - // get the "newest" offset for all partitions. - // when booting up, we will delay consuming metrics until we have - // caught up to these offsets. - bootTimeOffsets = make(map[int32]int64) - for _, part := range partitions { - offset, err := client.GetOffset(topic, part, sarama.OffsetNewest) - if err != nil { - log.Fatal(4, "kakfa-cluster: failed to get newest offset for topic %s part %d: %s", topic, part, err) - } - bootTimeOffsets[part] = offset - partitionOffset[part] = stats.NewGauge64(fmt.Sprintf("cluster.notifier.kafka.partition.%d.offset", part)) - partitionLogSize[part] = stats.NewGauge64(fmt.Sprintf("cluster.notifier.kafka.partition.%d.log_size", part)) - partitionLag[part] = stats.NewGauge64(fmt.Sprintf("cluster.notifier.kafka.partition.%d.lag", part)) - } - log.Info("kafka-cluster: consuming from partitions %v", partitions) } diff --git a/mdata/notifierKafka/notifierKafka.go b/mdata/notifierKafka/notifierKafka.go index a7a5013513..88b74c278c 100644 --- a/mdata/notifierKafka/notifierKafka.go +++ b/mdata/notifierKafka/notifierKafka.go @@ -4,202 +4,85 @@ import ( "bytes" "encoding/binary" "encoding/json" + "hash/fnv" "sync" "time" - schema "gopkg.in/raintank/schema.v1" - - "github.com/Shopify/sarama" + confluent "github.com/confluentinc/confluent-kafka-go/kafka" + part "github.com/grafana/metrictank/cluster/partitioner" "github.com/grafana/metrictank/idx" "github.com/grafana/metrictank/kafka" "github.com/grafana/metrictank/mdata" "github.com/grafana/metrictank/util" "github.com/raintank/worldping-api/pkg/log" + schema "gopkg.in/raintank/schema.v1" ) type NotifierKafka struct { - instance string - in chan mdata.SavedChunk - buf []mdata.SavedChunk - wg sync.WaitGroup - idx idx.MetricIndex - metrics mdata.Metrics - bPool *util.BufferPool - client sarama.Client - consumer sarama.Consumer - producer sarama.SyncProducer - offsetMgr *kafka.OffsetMgr - StopChan chan int - - // signal to PartitionConsumers to shutdown - stopConsuming chan struct{} + instance string + in chan mdata.SavedChunk + buf []mdata.SavedChunk + wg sync.WaitGroup + bPool *util.BufferPool + idx idx.MetricIndex + metrics mdata.Metrics + partitioner *part.Kafka + consumer *kafka.Consumer + producer *confluent.Producer + stopChan chan struct{} } func New(instance string, metrics mdata.Metrics, idx idx.MetricIndex) *NotifierKafka { - client, err := sarama.NewClient(brokers, config) - if err != nil { - log.Fatal(2, "kafka-cluster failed to start client: %s", err) - } - consumer, err := sarama.NewConsumerFromClient(client) - if err != nil { - log.Fatal(2, "kafka-cluster failed to initialize consumer: %s", err) - } - log.Info("kafka-cluster consumer initialized without error") + producer, err := confluent.NewProducer(kafka.GetConfig(consumerConf.Broker, "snappy", consumerConf.BatchNumMessages, consumerConf.BufferMaxMs, consumerConf.ChannelBufferSize, consumerConf.FetchMin, consumerConf.NetMaxOpenRequests, consumerConf.MaxWaitMs, consumerConf.SessionTimeout)) - producer, err := sarama.NewSyncProducerFromClient(client) if err != nil { log.Fatal(2, "kafka-cluster failed to initialize producer: %s", err) } - offsetMgr, err := kafka.NewOffsetMgr(dataDir) - if err != nil { - log.Fatal(2, "kafka-cluster couldnt create offsetMgr. %s", err) - } - c := NotifierKafka{ - instance: instance, - in: make(chan mdata.SavedChunk), - idx: idx, - metrics: metrics, - bPool: util.NewBufferPool(), - client: client, - consumer: consumer, - producer: producer, - offsetMgr: offsetMgr, - - StopChan: make(chan int), - stopConsuming: make(chan struct{}), + instance: instance, + in: make(chan mdata.SavedChunk), + bPool: util.NewBufferPool(), + producer: producer, + metrics: metrics, + idx: idx, + stopChan: make(chan struct{}), } - c.start() - go c.produce() - return &c -} + consumerConf.ClientID = instance + "-notifier" + consumerConf.GaugePrefix = "cluster.notifier.kafka.partition" + consumerConf.Topics = []string{topic} + consumerConf.MessageHandler = c.handleMessage -func (c *NotifierKafka) start() { - var err error - pre := time.Now() - processBacklog := new(sync.WaitGroup) - for _, partition := range partitions { - var offset int64 - switch offsetStr { - case "oldest": - offset = -2 - case "newest": - offset = -1 - case "last": - offset, err = c.offsetMgr.Last(topic, partition) - if err != nil { - log.Fatal(4, "kafka-cluster: Failed to get %q duration offset for %s:%d. %q", offsetStr, topic, partition, err) - } - default: - offset, err = c.client.GetOffset(topic, partition, time.Now().Add(-1*offsetDuration).UnixNano()/int64(time.Millisecond)) - if err != nil { - offset = sarama.OffsetOldest - log.Warn("kafka-cluster failed to get offset %s: %s -> will use oldest instead", offsetDuration, err) - } - } - partitionLogSize[partition].Set(int(bootTimeOffsets[partition])) - if offset >= 0 { - partitionOffset[partition].Set(int(offset)) - partitionLag[partition].Set(int(bootTimeOffsets[partition] - offset)) - } - processBacklog.Add(1) - go c.consumePartition(topic, partition, offset, processBacklog) + c.consumer, err = kafka.NewConsumer(consumerConf) + if err != nil { + log.Fatal(4, "kafka-cluster failed to initialize consumer: %s", err) } - // wait for our backlog to be processed before returning. This will block metrictank from consuming metrics until - // we have processed old metricPersist messages. The end result is that we wont overwrite chunks in cassandra that - // have already been previously written. - // We don't wait more than backlogProcessTimeout for the backlog to be processed. - log.Info("kafka-cluster: waiting for metricPersist backlog to be processed.") - backlogProcessed := make(chan struct{}, 1) - go func() { - processBacklog.Wait() - backlogProcessed <- struct{}{} - }() - select { - case <-time.After(backlogProcessTimeout): - log.Warn("kafka-cluster: Processing metricPersist backlog has taken too long, giving up lock after %s.", backlogProcessTimeout) - case <-backlogProcessed: - log.Info("kafka-cluster: metricPersist backlog processed in %s.", time.Since(pre)) + c.partitioner, err = part.NewKafka(partitionScheme) + if err != nil { + log.Fatal(4, "kafka-cluster: failed to initialize partitioner. %s", err) } -} - -func (c *NotifierKafka) consumePartition(topic string, partition int32, currentOffset int64, processBacklog *sync.WaitGroup) { - c.wg.Add(1) - defer c.wg.Done() - - pc, err := c.consumer.ConsumePartition(topic, partition, currentOffset) + err = c.consumer.StartAndAwaitBacklog(backlogProcessTimeout) if err != nil { - log.Fatal(4, "kafka-cluster: failed to start partitionConsumer for %s:%d. %s", topic, partition, err) + log.Fatal(4, "kafka-cluster: Failed to start consumer: %s", err) } - log.Info("kafka-cluster: consuming from %s:%d from offset %d", topic, partition, currentOffset) - messages := pc.Messages() - ticker := time.NewTicker(offsetCommitInterval) - startingUp := true - // the bootTimeOffset is the next available offset. There may not be a message with that - // offset yet, so we subtract 1 to get the highest offset that we can fetch. - bootTimeOffset := bootTimeOffsets[partition] - 1 - partitionOffsetMetric := partitionOffset[partition] - partitionLogSizeMetric := partitionLogSize[partition] - partitionLagMetric := partitionLag[partition] - for { - select { - case msg := <-messages: - if mdata.LogLevel < 2 { - log.Debug("kafka-cluster received message: Topic %s, Partition: %d, Offset: %d, Key: %x", msg.Topic, msg.Partition, msg.Offset, msg.Key) - } - mdata.Handle(c.metrics, msg.Value, c.idx) - currentOffset = msg.Offset - case <-ticker.C: - if err := c.offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-cluster failed to commit offset for %s:%d, %s", topic, partition, err) - } - if startingUp && currentOffset >= bootTimeOffset { - processBacklog.Done() - startingUp = false - } - offset, err := c.client.GetOffset(topic, partition, sarama.OffsetNewest) - if err != nil { - log.Error(3, "kafka-mdm failed to get log-size of partition %s:%d. %s", topic, partition, err) - } else { - partitionLogSizeMetric.Set(int(offset)) - } - if currentOffset < 0 { - // we have not yet consumed any messages. - continue - } - partitionOffsetMetric.Set(int(currentOffset)) - if err == nil { - partitionLagMetric.Set(int(offset - currentOffset)) - } - case <-c.stopConsuming: - pc.Close() - if err := c.offsetMgr.Commit(topic, partition, currentOffset); err != nil { - log.Error(3, "kafka-cluster failed to commit offset for %s:%d, %s", topic, partition, err) - } - log.Info("kafka-cluster consumer for %s:%d ended.", topic, partition) - return - } - } + go c.produce() + + return &c +} + +func (c *NotifierKafka) handleMessage(data []byte, partition int32) { + mdata.Handle(c.metrics, data, c.idx) } -// Stop will initiate a graceful stop of the Consumer (permanent) -// -// NOTE: receive on StopChan to block until this process completes func (c *NotifierKafka) Stop() { - // closes notifications and messages channels, amongst others - close(c.stopConsuming) + log.Info("kafka-notifier: stopping kafka input") c.producer.Close() - - go func() { - c.wg.Wait() - c.offsetMgr.Close() - close(c.StopChan) - }() + c.consumer.Stop() + close(c.stopChan) } func (c *NotifierKafka) Send(sc mdata.SavedChunk) { @@ -218,6 +101,8 @@ func (c *NotifierKafka) produce() { } case <-ticker.C: c.flush() + case <-c.stopChan: + return } } } @@ -228,9 +113,11 @@ func (c *NotifierKafka) flush() { return } + hasher := fnv.New32a() + // In order to correctly route the saveMessages to the correct partition, // we cant send them in batches anymore. - payload := make([]*sarama.ProducerMessage, 0, len(c.buf)) + payload := make([]*confluent.Message, 0, len(c.buf)) var pMsg mdata.PersistMessageBatch for i, msg := range c.buf { amkey, err := schema.AMKeyFromString(msg.Key) @@ -254,14 +141,27 @@ func (c *NotifierKafka) flush() { } messagesSize.Value(buf.Len()) key := c.bPool.Get() - key, err = partitioner.GetPartitionKey(&def, key) + key, err = c.partitioner.GetPartitionKey(&def, key) if err != nil { log.Fatal(4, "Unable to get partitionKey for metricDef with id %s. %s", def.Id, err) } - kafkaMsg := &sarama.ProducerMessage{ - Topic: topic, - Value: sarama.ByteEncoder(buf.Bytes()), - Key: sarama.ByteEncoder(key), + + hasher.Reset() + _, err = hasher.Write(key) + if err != nil { + log.Fatal(4, "Unable to write key %s to hasher: %s", key, err) + } + partition := int32(hasher.Sum32()) % int32(len(c.consumer.Partitions)) + if partition < 0 { + partition = -partition + } + + kafkaMsg := &confluent.Message{ + TopicPartition: confluent.TopicPartition{ + Topic: &topic, Partition: partition, + }, + Value: []byte(buf.Bytes()), + Key: []byte(key), } payload = append(payload, kafkaMsg) } @@ -270,21 +170,38 @@ func (c *NotifierKafka) flush() { go func() { log.Debug("kafka-cluster sending %d batch metricPersist messages", len(payload)) - sent := false - for !sent { - err := c.producer.SendMessages(payload) - if err != nil { - log.Warn("kafka-cluster publisher %s", err) - } else { - sent = true + producerCh := c.producer.ProduceChannel() + for _, msg := range payload { + producerCh <- msg + } + sent := 0 + + EVENTS: + for e := range c.producer.Events() { + switch ev := e.(type) { + case *confluent.Message: + if ev.TopicPartition.Error != nil { + log.Warn("Delivery failed (retrying): %v\n", ev.TopicPartition.Error) + time.Sleep(time.Second) + ev.TopicPartition.Error = nil + producerCh <- ev + } else { + sent++ + } + if sent == len(payload) { + break EVENTS + } + default: + log.Error(3, "Ignored unexpected event: %s\n", ev) } - time.Sleep(time.Second) } - messagesPublished.Add(len(payload)) + + messagesPublished.Add(sent) + // put our buffers back in the bufferPool for _, msg := range payload { - c.bPool.Put([]byte(msg.Key.(sarama.ByteEncoder))) - c.bPool.Put([]byte(msg.Value.(sarama.ByteEncoder))) + c.bPool.Put(msg.Key) + c.bPool.Put(msg.Value) } }() } diff --git a/metrictank.go b/metrictank.go index 061d0edc2f..373dc21477 100644 --- a/metrictank.go +++ b/metrictank.go @@ -27,6 +27,7 @@ import ( inCarbon "github.com/grafana/metrictank/input/carbon" inKafkaMdm "github.com/grafana/metrictank/input/kafkamdm" inPrometheus "github.com/grafana/metrictank/input/prometheus" + "github.com/grafana/metrictank/kafka" "github.com/grafana/metrictank/mdata" "github.com/grafana/metrictank/mdata/cache" "github.com/grafana/metrictank/mdata/notifierKafka" @@ -168,6 +169,7 @@ func main() { ***********************************/ mdata.LogLevel = logLevel memory.LogLevel = logLevel + kafka.LogLevel = logLevel inKafkaMdm.LogLevel = logLevel api.LogLevel = logLevel // workaround for https://github.com/grafana/grafana/issues/4055 @@ -220,7 +222,7 @@ func main() { inKafkaMdm.ConfigProcess(*instance) inPrometheus.ConfigProcess() notifierNsq.ConfigProcess() - notifierKafka.ConfigProcess(*instance) + notifierKafka.ConfigProcess() statsConfig.ConfigProcess(*instance) mdata.ConfigProcess() diff --git a/scripts/Dockerfile b/scripts/Dockerfile index 750ca1c44a..f797249503 100644 --- a/scripts/Dockerfile +++ b/scripts/Dockerfile @@ -1,18 +1,36 @@ -FROM alpine -MAINTAINER Dieter Plaetinck dieter@grafana.com +ARG BASE_PATH=/go/src/github.com/grafana/metrictank -RUN apk add -U tzdata +# build binaries inside an alpine3.7 container +FROM golang:1.9.3-alpine3.7 AS build +ARG BASE_PATH + +RUN apk --update add build-base linux-headers openssl-dev cyrus-sasl-dev git bash python + +COPY . $BASE_PATH/ +RUN $BASE_PATH/scripts/build.sh + + +# now build the production image based on the same alpine version +# as was used to build the binaries in +FROM alpine:3.7 +ARG BASE_PATH + +LABEL maintainer="Dieter Plaetinck dieter@grafana.com" + +RUN apk add -U ca-certificates openssl cyrus-sasl tzdata RUN mkdir -p /etc/metrictank -COPY config/metrictank-docker.ini /etc/metrictank/metrictank.ini -COPY config/storage-schemas.conf /etc/metrictank/storage-schemas.conf -COPY config/storage-aggregation.conf /etc/metrictank/storage-aggregation.conf +COPY scripts/config/metrictank-docker.ini /etc/metrictank/metrictank.ini +COPY scripts/config/storage-schemas.conf /etc/metrictank/storage-schemas.conf +COPY scripts/config/storage-aggregation.conf /etc/metrictank/storage-aggregation.conf -COPY build/* /usr/bin/ +# copy the built binaries from the build image +COPY --from=build $BASE_PATH/build/* /usr/bin/ -COPY util/wait_for_endpoint.sh /usr/bin/wait_for_endpoint.sh +COPY scripts/util/wait_for_endpoint.sh /usr/bin/wait_for_endpoint.sh EXPOSE 6060 ENTRYPOINT ["/usr/bin/wait_for_endpoint.sh"] CMD ["/usr/bin/metrictank", "-config=/etc/metrictank/metrictank.ini"] + diff --git a/scripts/build.sh b/scripts/build.sh index e78b88294b..b91eb79d90 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -1,7 +1,20 @@ #!/bin/bash + +set -x # Find the directory we exist within -DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -cd ${DIR}/.. +SCRIPTS_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +SOURCE_DIR=$SCRIPTS_DIR/.. +BUILD_DIR=$SOURCE_DIR/build +TMP_DIR=$(mktemp -d) + +cd $SOURCE_DIR + +if ! [ -d $PKG_CONFIG_PATH ] || [ -z $PKG_CONFIG_PATH ] +then + source scripts/build_deps.sh +else + echo "not building librdkafka" +fi # make sure CircleCI gets all tags properly. # see https://discuss.circleci.com/t/where-are-my-git-tags/2371 @@ -17,16 +30,16 @@ mkdir -p $BUILDDIR # Clean build bin dir rm -rf $BUILDDIR/* -# disable cgo -export CGO_ENABLED=0 +# enable cgo +export CGO_ENABLED=1 OUTPUT=$BUILDDIR/metrictank if [ "$1" == "-race" ] then set -x - CGO_ENABLED=1 go build -race -ldflags "-X main.gitHash=$GITVERSION" -o $OUTPUT + go build -tags static -race -ldflags "-X main.gitHash=$GITVERSION" -o $OUTPUT else set -x - go build -ldflags "-X main.gitHash=$GITVERSION" -o $OUTPUT + go build -tags static -ldflags "-X main.gitHash=$GITVERSION" -o $OUTPUT fi diff --git a/scripts/build_deps.sh b/scripts/build_deps.sh new file mode 100755 index 0000000000..cb3244abec --- /dev/null +++ b/scripts/build_deps.sh @@ -0,0 +1,43 @@ +# build librdkafka and export the according pkg config so it will be found + +if [ -z $TMP_DIR ] +then + TMP_DIR=$(mktemp -d) +else + if ! [ -d $TMP_DIR ] + then + mkdir -p $TMP_DIR + fi +fi + +SOURCE_DIR=$(dirname ${BASH_SOURCE[0]})/.. +LIB_RDKAFKA_DIR=$SOURCE_DIR/vendor/github.com/edenhill/librdkafka +cd $LIB_RDKAFKA_DIR +./configure --prefix=$TMP_DIR +make +make install +make clean + +export PKG_CONFIG_PATH=$TMP_DIR/lib/pkgconfig + +if [ -z $LD_LIBRARY_PATH ] +then + export LD_LIBRARY_PATH=$TMP_DIR/lib +else + export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$TMP_DIR/lib" +fi + +cd $OLDPWD + +# cleanup +rm \ + vendor/github.com/edenhill/librdkafka/Makefile.config \ + vendor/github.com/edenhill/librdkafka/config.cache \ + vendor/github.com/edenhill/librdkafka/config.h \ + vendor/github.com/edenhill/librdkafka/config.log \ + vendor/github.com/edenhill/librdkafka/config.log.old + +# this file gets modified when configuring the package, which then causes +# tests to fail because it differs from what's committed in git. so we check +# it out to not make the tests fail. +git checkout vendor/github.com/edenhill/librdkafka/CONFIGURATION.md diff --git a/scripts/build_docker.sh b/scripts/build_docker.sh index 658ea93234..a6646a569c 100755 --- a/scripts/build_docker.sh +++ b/scripts/build_docker.sh @@ -3,21 +3,16 @@ set -x # Find the directory we exist within DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) -cd ${DIR} +cd ${DIR}/.. VERSION=`git describe --abbrev=7` -# regular image -rm -rf build/* -mkdir -p build -cp ../build/* build/ - -docker build -t grafana/metrictank . +docker build -f scripts/Dockerfile -t grafana/metrictank . docker tag grafana/metrictank grafana/metrictank:latest docker tag grafana/metrictank grafana/metrictank:$VERSION # k8s image cd ${DIR}/k8s -docker build -t us.gcr.io/metrictank-gcr/metrictank . +docker build -f Dockerfile -t us.gcr.io/metrictank-gcr/metrictank . docker tag us.gcr.io/metrictank-gcr/metrictank us.gcr.io/metrictank-gcr/metrictank:latest docker tag us.gcr.io/metrictank-gcr/metrictank us.gcr.io/metrictank-gcr/metrictank:${VERSION} diff --git a/scripts/build_packages.sh b/scripts/build_packages.sh index 4f763492cc..6f7b881336 100755 --- a/scripts/build_packages.sh +++ b/scripts/build_packages.sh @@ -1,14 +1,12 @@ #!/bin/bash +set -e set -x -BASE=$(dirname $0) # points to scripts directory -CODE_DIR=$(readlink -e "$BASE/../") # project root -BUILD_ROOT=$CODE_DIR/build # should have all binaries already inside -BUILD_PKG=$CODE_DIR/build_pkg # will place packages here -BUILD_TMP=$CODE_DIR/build_tmp # used for temporary data used to construct the packages -mkdir $BUILD_TMP -mkdir $BUILD_PKG +BASE=$(dirname ${0}) # points to scripts directory +CODE_DIR=$(readlink -e "${BASE}/../") # project root +BUILD_PKG=${CODE_DIR}/build_pkg # will place packages here -sudo apt-get install rpm # to be able to make rpms +rm -rf ${BUILD_PKG} +mkdir ${BUILD_PKG} ARCH="$(uname -m)" VERSION=$(git describe --long --abbrev=7) @@ -16,119 +14,45 @@ VERSION=$(git describe --long --abbrev=7) ## debian wheezy ## -BUILD=${BUILD_TMP}/sysvinit -mkdir -p ${BUILD}/usr/bin -mkdir -p ${BUILD}/etc/metrictank -PKG=${BUILD_PKG}/sysvinit -mkdir -p ${PKG} - -cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini -cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ -cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ -cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ - -PACKAGE_NAME="${PKG}/metrictank-${VERSION}_${ARCH}.deb" -fpm -s dir -t deb \ - -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ - --deb-init ${BASE}/config/sysvinit/init.d/metrictank \ - --deb-default ${BASE}/config/sysvinit/default/metrictank \ - --replaces metric-tank --provides metric-tank \ - --conflicts metric-tank \ - --config-files /etc/metrictank/ \ - -C ${BUILD} -p ${PACKAGE_NAME} . - +BUILD_NAME="sysvinit" +PKG_NAME="metrictank-${VERSION}_${ARCH}.deb" +PKG="/tmp/${PKG_NAME}" +docker build --build-arg "PKG=${PKG}" --build-arg "ARCH=${ARCH}" --build-arg "VERSION=${VERSION}" -f scripts/build_packages/${BUILD_NAME}/Dockerfile -t ${BUILD_NAME}:build . +mkdir -p ${BUILD_PKG}/${BUILD_NAME} +docker run ${BUILD_NAME}:build cat ${PKG} > ${BUILD_PKG}/${BUILD_NAME}/${PKG_NAME} ## ubuntu 14.04 ## -BUILD=${BUILD_TMP}/upstart -mkdir -p ${BUILD}/usr/bin -mkdir -p ${BUILD}/etc/init -mkdir -p ${BUILD}/etc/metrictank -PKG=${BUILD_PKG}/upstart -mkdir -p ${PKG} - -cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini -cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ -cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ -cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ - -PACKAGE_NAME="${PKG}/metrictank-${VERSION}_${ARCH}.deb" -fpm -s dir -t deb \ - -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ - --deb-upstart ${BASE}/config/upstart/metrictank \ - --replaces metric-tank --provides metric-tank \ - --conflicts metric-tank \ - --config-files /etc/metrictank/ \ - -C ${BUILD} -p ${PACKAGE_NAME} . - +BUILD_NAME="upstart" +PKG_NAME="metrictank-${VERSION}_${ARCH}.deb" +PKG="/tmp/${PKG_NAME}" +docker build --build-arg "PKG=${PKG}" --build-arg "ARCH=${ARCH}" --build-arg "VERSION=${VERSION}" -f scripts/build_packages/${BUILD_NAME}/Dockerfile -t ${BUILD_NAME}:build . +mkdir -p ${BUILD_PKG}/${BUILD_NAME} +docker run ${BUILD_NAME}:build cat ${PKG} > ${BUILD_PKG}/${BUILD_NAME}/${PKG_NAME} ## ubuntu 16.04, Debian 8, CentOS 7 ## -BUILD=${BUILD_TMP}/systemd -mkdir -p ${BUILD}/usr/bin -mkdir -p ${BUILD}/lib/systemd/system/ -mkdir -p ${BUILD}/etc/metrictank -mkdir -p ${BUILD}/var/run/metrictank -PKG=${BUILD_PKG}/systemd -mkdir -p ${PKG} - -cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini -cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ -cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ -cp ${BASE}/config/systemd/metrictank.service $BUILD/lib/systemd/system/ -cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ - -PACKAGE_NAME="${PKG}/metrictank-${VERSION}_${ARCH}.deb" -fpm -s dir -t deb \ - -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ - --config-files /etc/metrictank/ \ - -m "Raintank Inc. " --vendor "grafana.com" \ - --license "Apache2.0" -C ${BUILD} -p ${PACKAGE_NAME} . - +BUILD_NAME="systemd" +PKG_NAME="metrictank-${VERSION}_${ARCH}.deb" +PKG="/tmp/${PKG_NAME}" +docker build --build-arg "PKG=${PKG}" --build-arg "ARCH=${ARCH}" --build-arg "VERSION=${VERSION}" -f scripts/build_packages/${BUILD_NAME}/Dockerfile -t ${BUILD_NAME}:build . +mkdir -p ${BUILD_PKG}/${BUILD_NAME} +docker run ${BUILD_NAME}:build cat ${PKG} > ${BUILD_PKG}/${BUILD_NAME}/${PKG_NAME} ## centos 7 ## -BUILD=${BUILD_TMP}/systemd-centos7 -mkdir -p ${BUILD}/usr/bin -mkdir -p ${BUILD}/lib/systemd/system/ -mkdir -p ${BUILD}/etc/metrictank -mkdir -p ${BUILD}/var/run/metrictank -PKG=${BUILD_PKG}/systemd-centos7 -mkdir -p ${PKG} - -cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini -cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ -cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ -cp ${BASE}/config/systemd/metrictank.service $BUILD/lib/systemd/system/ -cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ - -PACKAGE_NAME="${PKG}/metrictank-${VERSION}.el7.${ARCH}.rpm" -fpm -s dir -t rpm \ - -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ - --config-files /etc/metrictank/ \ - -m "Raintank Inc. " --vendor "grafana.com" \ - --license "Apache2.0" -C ${BUILD} -p ${PACKAGE_NAME} . - +BUILD_NAME="systemd-centos7" +PKG_NAME="metrictank-${VERSION}_${ARCH}.rpm" +PKG="/tmp/${PKG_NAME}" +docker build --build-arg "PKG=${PKG}" --build-arg "ARCH=${ARCH}" --build-arg "VERSION=${VERSION}" -f scripts/build_packages/${BUILD_NAME}/Dockerfile -t ${BUILD_NAME}:build . +mkdir -p ${BUILD_PKG}/${BUILD_NAME} +docker run ${BUILD_NAME}:build cat ${PKG} > ${BUILD_PKG}/${BUILD_NAME}/${PKG_NAME} ## CentOS 6 ## -BUILD=${BUILD_TMP}/upstart-0.6.5 -mkdir -p ${BUILD}/usr/bin -mkdir -p ${BUILD}/etc/init -mkdir -p ${BUILD}/etc/metrictank -PKG=${BUILD_PKG}/upstart-0.6.5 -mkdir -p ${PKG} - -cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini -cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ -cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ -cp ${BASE}/config/upstart-0.6.5/metrictank.conf $BUILD/etc/init -cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ - -PACKAGE_NAME="${PKG}/metrictank-${VERSION}.el6.${ARCH}.rpm" -fpm -s dir -t rpm \ - -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ - --replaces metric-tank --provides metric-tank \ - --conflicts metric-tank \ - --config-files /etc/metrictank/ \ - -C ${BUILD} -p ${PACKAGE_NAME} . +BUILD_NAME="upstart-0.6.5" +PKG_NAME="metrictank-${VERSION}_${ARCH}.rpm" +PKG="/tmp/${PKG_NAME}" +docker build --build-arg "PKG=${PKG}" --build-arg "ARCH=${ARCH}" --build-arg "VERSION=${VERSION}" -f scripts/build_packages/${BUILD_NAME}/Dockerfile -t ${BUILD_NAME}:build . +mkdir -p ${BUILD_PKG}/${BUILD_NAME} +docker run ${BUILD_NAME}:build cat ${PKG} > ${BUILD_PKG}/${BUILD_NAME}/${PKG_NAME} diff --git a/scripts/build_packages/systemd-centos7/Dockerfile b/scripts/build_packages/systemd-centos7/Dockerfile new file mode 100644 index 0000000000..7fb20e6ab3 --- /dev/null +++ b/scripts/build_packages/systemd-centos7/Dockerfile @@ -0,0 +1,17 @@ +FROM centos:7 + +ENV BASE_PATH=/go/src/github.com/grafana/metrictank + +ARG PKG +ARG VERSION +ARG ARCH +COPY . $BASE_PATH/ +RUN yum install -y git ruby rubygems ruby-devel openssl-devel cyrus-sasl-devel +RUN yum groupinstall -y 'Development Tools' +RUN gem install --no-ri --no-rdoc fpm +RUN $BASE_PATH/scripts/get_go.sh +ENV PATH="${PATH}:/usr/local/go/bin" +ENV GOPATH="/go" +RUN $BASE_PATH/scripts/build.sh +RUN $BASE_PATH/scripts/build_tools.sh +RUN $BASE_PATH/scripts/build_packages/systemd-centos7/build_package.sh diff --git a/scripts/build_packages/systemd-centos7/build_package.sh b/scripts/build_packages/systemd-centos7/build_package.sh new file mode 100755 index 0000000000..52a584586d --- /dev/null +++ b/scripts/build_packages/systemd-centos7/build_package.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e +set -x + +BASE=$(readlink -e $(dirname ${0})/../..) # points to scripts directory +CODE_DIR=$(readlink -e ${BASE}/..) # project root +BUILD_ROOT=$CODE_DIR/build # should have all binaries already inside +BUILD=$CODE_DIR/build_tmp # used for temporary data used to construct the packages + +cd ${CODE_DIR} +VERSION=$(git describe --long --abbrev=7) +ARCH=$(uname -m) + +mkdir -p ${BUILD}/usr/bin +mkdir -p ${BUILD}/lib/systemd/system/ +mkdir -p ${BUILD}/etc/metrictank +mkdir -p ${BUILD}/var/run/metrictank +mkdir -p $(dirname ${PKG}) + +cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini +cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ +cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ +cp ${BASE}/config/systemd/metrictank.service $BUILD/lib/systemd/system/ +cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ + +fpm -s dir -t rpm \ + -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ + --depends cyrus-sasl \ + --depends openssl \ + --config-files /etc/metrictank/ \ + -m "Raintank Inc. " --vendor "grafana.com" \ + --license "Apache2.0" -C ${BUILD} -p ${PKG} . diff --git a/scripts/build_packages/systemd/Dockerfile b/scripts/build_packages/systemd/Dockerfile new file mode 100644 index 0000000000..2a5a4d7878 --- /dev/null +++ b/scripts/build_packages/systemd/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:16.04 + +ENV BASE_PATH=/go/src/github.com/grafana/metrictank + +ARG PKG +ARG VERSION +ARG ARCH +COPY . $BASE_PATH/ +RUN apt-get update && apt-get install -y curl build-essential libssl-dev libsasl2-dev git bash python pkg-config ruby ruby-dev rubygems +RUN gem install --no-ri --no-rdoc fpm +RUN $BASE_PATH/scripts/get_go.sh +ENV PATH="${PATH}:/usr/local/go/bin" +ENV GOPATH="/go" +RUN $BASE_PATH/scripts/build.sh +RUN $BASE_PATH/scripts/build_tools.sh +RUN $BASE_PATH/scripts/build_packages/sysvinit/build_package.sh diff --git a/scripts/build_packages/systemd/build_package.sh b/scripts/build_packages/systemd/build_package.sh new file mode 100755 index 0000000000..4cb3073cd6 --- /dev/null +++ b/scripts/build_packages/systemd/build_package.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e +set -x + +BASE=$(readlink -e $(dirname ${0})/../..) # points to scripts directory +CODE_DIR=$(readlink -e ${BASE}/..) # project root +BUILD_ROOT=$CODE_DIR/build # should have all binaries already inside +BUILD=$CODE_DIR/build_tmp # used for temporary data used to construct the packages + +cd ${CODE_DIR} +VERSION=$(git describe --long --abbrev=7) +ARCH=$(uname -m) + +mkdir -p ${BUILD}/usr/bin +mkdir -p ${BUILD}/lib/systemd/system/ +mkdir -p ${BUILD}/etc/metrictank +mkdir -p ${BUILD}/var/run/metrictank +mkdir -p $(dirname ${PKG}) + +cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini +cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ +cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ +cp ${BASE}/config/systemd/metrictank.service $BUILD/lib/systemd/system/ +cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ + +fpm -s dir -t deb \ + -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ + --depends libssl1.0.0 \ + --depends libsasl2-2 \ + --config-files /etc/metrictank/ \ + -m "Raintank Inc. " --vendor "grafana.com" \ + --license "Apache2.0" -C ${BUILD} -p ${PKG} . diff --git a/scripts/build_packages/sysvinit/Dockerfile b/scripts/build_packages/sysvinit/Dockerfile new file mode 100644 index 0000000000..dff8019392 --- /dev/null +++ b/scripts/build_packages/sysvinit/Dockerfile @@ -0,0 +1,16 @@ +FROM debian:wheezy + +ENV BASE_PATH=/go/src/github.com/grafana/metrictank + +ARG PKG +ARG VERSION +ARG ARCH +COPY . $BASE_PATH/ +RUN apt-get update && apt-get install -y curl build-essential linux-headers-amd64 libssl-dev libsasl2-dev git bash python pkg-config ruby ruby-dev rubygems +RUN gem install --no-ri --no-rdoc fpm +RUN $BASE_PATH/scripts/get_go.sh +ENV PATH="${PATH}:/usr/local/go/bin" +ENV GOPATH="/go" +RUN $BASE_PATH/scripts/build.sh +RUN $BASE_PATH/scripts/build_tools.sh +RUN $BASE_PATH/scripts/build_packages/sysvinit/build_package.sh diff --git a/scripts/build_packages/sysvinit/build_package.sh b/scripts/build_packages/sysvinit/build_package.sh new file mode 100755 index 0000000000..82ee4c4e4e --- /dev/null +++ b/scripts/build_packages/sysvinit/build_package.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e +set -x + +BASE=$(readlink -e $(dirname ${0})/../..) # points to scripts directory +CODE_DIR=$(readlink -e ${BASE}/..) # project root +BUILD_ROOT=$CODE_DIR/build # should have all binaries already inside +BUILD=$CODE_DIR/build_tmp # used for temporary data used to construct the packages + +cd ${CODE_DIR} + +mkdir -p ${BUILD}/usr/bin +mkdir -p ${BUILD}/etc/metrictank +mkdir -p $(dirname ${PKG}) + +cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini +cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ +cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ +cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ + +fpm -s dir -t deb \ + -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ + --depends libssl1.0.0 \ + --depends libsasl2-2 \ + --deb-init ${BASE}/config/sysvinit/init.d/metrictank \ + --deb-default ${BASE}/config/sysvinit/default/metrictank \ + --replaces metric-tank --provides metric-tank \ + --conflicts metric-tank \ + --config-files /etc/metrictank/ \ + -C ${BUILD} -p ${PKG} . diff --git a/scripts/build_packages/upstart-0.6.5/Dockerfile b/scripts/build_packages/upstart-0.6.5/Dockerfile new file mode 100644 index 0000000000..28ef4ffde2 --- /dev/null +++ b/scripts/build_packages/upstart-0.6.5/Dockerfile @@ -0,0 +1,18 @@ +FROM centos:6 + +ENV BASE_PATH=/go/src/github.com/grafana/metrictank + +ARG PKG +ARG VERSION +ARG ARCH +COPY . $BASE_PATH/ +RUN yum install -y git libyaml-devel zlib-devel openssl-devel cyrus-sasl-devel +RUN yum groupinstall -y 'Development Tools' +RUN $BASE_PATH/scripts/build_packages/upstart-0.6.5/get_ruby.sh +RUN gem install --no-ri --no-rdoc fpm +RUN $BASE_PATH/scripts/get_go.sh +ENV PATH="${PATH}:/usr/local/go/bin" +ENV GOPATH="/go" +RUN $BASE_PATH/scripts/build.sh +RUN $BASE_PATH/scripts/build_tools.sh +RUN $BASE_PATH/scripts/build_packages/upstart-0.6.5/build_package.sh diff --git a/scripts/build_packages/upstart-0.6.5/build_package.sh b/scripts/build_packages/upstart-0.6.5/build_package.sh new file mode 100755 index 0000000000..4653390a4f --- /dev/null +++ b/scripts/build_packages/upstart-0.6.5/build_package.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e +set -x + +BASE=$(readlink -e $(dirname ${0})/../..) # points to scripts directory +CODE_DIR=$(readlink -e ${BASE}/..) # project root +BUILD_ROOT=$CODE_DIR/build # should have all binaries already inside +BUILD=$CODE_DIR/build_tmp # used for temporary data used to construct the packages + +cd ${CODE_DIR} +VERSION=$(git describe --long --abbrev=7) +ARCH=$(uname -m) + +mkdir -p ${BUILD}/usr/bin +mkdir -p ${BUILD}/etc/init +mkdir -p ${BUILD}/etc/metrictank +mkdir -p $(dirname ${PKG}) + +cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini +cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ +cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ +cp ${BASE}/config/upstart-0.6.5/metrictank.conf $BUILD/etc/init +cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ + +fpm -s dir -t rpm \ + -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ + --depends cyrus-sasl \ + --depends openssl \ + --replaces metric-tank --provides metric-tank \ + --conflicts metric-tank \ + --config-files /etc/metrictank/ \ + -C ${BUILD} -p ${PKG} . diff --git a/scripts/build_packages/upstart-0.6.5/get_ruby.sh b/scripts/build_packages/upstart-0.6.5/get_ruby.sh new file mode 100755 index 0000000000..7cb445bc41 --- /dev/null +++ b/scripts/build_packages/upstart-0.6.5/get_ruby.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e +set -x + +MAJOR_VERSION="2.5" +MINOR_VERSION="${MAJOR_VERSION}.1" + +cd /tmp +yum install -y wget +wget http://ftp.ruby-lang.org/pub/ruby/${MAJOR_VERSION}/ruby-${MINOR_VERSION}.tar.gz +tar -xvzf /tmp/ruby-${MINOR_VERSION}.tar.gz +cd /tmp/ruby-${MINOR_VERSION} +./configure +make +make install + +cd ${OLD_PWD} diff --git a/scripts/build_packages/upstart/Dockerfile b/scripts/build_packages/upstart/Dockerfile new file mode 100644 index 0000000000..9737bc4fa5 --- /dev/null +++ b/scripts/build_packages/upstart/Dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:14.04 + +ENV BASE_PATH=/go/src/github.com/grafana/metrictank + +ARG PKG +ARG VERSION +ARG ARCH +COPY . $BASE_PATH/ +RUN apt-get update && apt-get install -y curl build-essential libssl-dev libsasl2-dev git bash python pkg-config ruby ruby-dev +RUN gem install --no-ri --no-rdoc fpm +RUN $BASE_PATH/scripts/get_go.sh +ENV PATH="${PATH}:/usr/local/go/bin" +ENV GOPATH="/go" +RUN $BASE_PATH/scripts/build.sh +RUN $BASE_PATH/scripts/build_tools.sh +RUN $BASE_PATH/scripts/build_packages/sysvinit/build_package.sh diff --git a/scripts/build_packages/upstart/build_package.sh b/scripts/build_packages/upstart/build_package.sh new file mode 100755 index 0000000000..db56a040ed --- /dev/null +++ b/scripts/build_packages/upstart/build_package.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e +set -x + +BASE=$(readlink -e $(dirname ${0})/../..) # points to scripts directory +CODE_DIR=$(readlink -e ${BASE}/..) # project root +BUILD_ROOT=$CODE_DIR/build # should have all binaries already inside +BUILD=$CODE_DIR/build_tmp # used for temporary data used to construct the packages + +cd ${CODE_DIR} + +mkdir -p ${BUILD}/usr/bin +mkdir -p ${BUILD}/etc/init +mkdir -p ${BUILD}/etc/metrictank +mkdir -p $(dirname ${PKG}) + +cp ${BASE}/config/metrictank-package.ini ${BUILD}/etc/metrictank/metrictank.ini +cp ${BASE}/config/storage-schemas.conf ${BUILD}/etc/metrictank/ +cp ${BASE}/config/storage-aggregation.conf ${BUILD}/etc/metrictank/ +cp ${BUILD_ROOT}/{metrictank,mt-*} ${BUILD}/usr/bin/ + +fpm -s dir -t deb \ + -v ${VERSION} -n metrictank -a ${ARCH} --description "metrictank, the gorilla-inspired timeseries database backend for graphite" \ + --depends libssl1.0.0 \ + --depends libsasl2-2 \ + --deb-upstart ${BASE}/config/upstart/metrictank \ + --replaces metric-tank --provides metric-tank \ + --conflicts metric-tank \ + --config-files /etc/metrictank/ \ + -C ${BUILD} -p ${PKG} . diff --git a/scripts/build_tools.sh b/scripts/build_tools.sh index 56093e3a8a..22ff67a9fe 100755 --- a/scripts/build_tools.sh +++ b/scripts/build_tools.sh @@ -9,8 +9,8 @@ BUILDDIR=$(pwd)/build # Make dir mkdir -p $BUILDDIR -# disable cgo -export CGO_ENABLED=0 +# enable cgo +export CGO_ENABLED=1 function fail () { echo "Aborting due to failure." >&2 @@ -24,10 +24,10 @@ for tool in *; do if [ "$1" == "-race" ] then set -x - CGO_ENABLED=1 go build -race -ldflags "-X main.gitHash=$GITVERSION" -o $BUILDDIR/$tool || fail + go build -tags static -race -ldflags "-X main.gitHash=$GITVERSION" -o $BUILDDIR/$tool || fail else set -x - go build -ldflags "-X main.gitHash=$GITVERSION" -o $BUILDDIR/$tool || fail + go build -tags static -ldflags "-X main.gitHash=$GITVERSION" -o $BUILDDIR/$tool || fail fi set +x cd .. diff --git a/scripts/get_go.sh b/scripts/get_go.sh new file mode 100755 index 0000000000..3d308b0d69 --- /dev/null +++ b/scripts/get_go.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +GO_DEFAULT_VERSION="1.10.1" +if [ -z "$GO_VERSION" ] +then + GO_VERSION=$GO_DEFAULT_VERSION +fi +GO_DOWNLOAD_LINK="https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" + +cd /usr/local +curl $GO_DOWNLOAD_LINK | tar -xz diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/.travis.yml b/vendor/github.com/confluentinc/confluent-kafka-go/.travis.yml new file mode 100644 index 0000000000..9784f7aeff --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/.travis.yml @@ -0,0 +1,34 @@ +language: go +go: + - 1.7 + - 1.8 + - 1.9 +osx_image: xcode9.2 +os: + - linux + - osx +env: + global: + - PKG_CONFIG_PATH="$HOME/gopath/src/github.com/confluentinc/confluent-kafka-go/tmp-build/lib/pkgconfig" + - LD_LIBRARY_PATH="$HOME/gopath/src/github.com/confluentinc/confluent-kafka-go/tmp-build/lib" + - DYLD_LIBRARY_PATH="$HOME/gopath/src/github.com/confluentinc/confluent-kafka-go/tmp-build/lib" + - PATH="$PATH:$GOPATH/bin" + - LIBRDKAFKA_VERSION=master + +# Travis OSX worker has problems running our Go binaries for 1.7 and 1.8, +# workaround for now is to skip exec for those. + +before_install: + - rm -rf tmp-build + - bash mk/bootstrap-librdkafka.sh ${LIBRDKAFKA_VERSION} tmp-build + - go get -u github.com/golang/lint/golint + - if [[ $TRAVIS_OS_NAME == osx && $TRAVIS_GO_VERSION =~ ^1\.[78] ]] ; then touch .no_exec ; fi + +install: + - go get -tags static ./... + - go install -tags static ./... + +script: + - golint -set_exit_status ./... + - if [[ ! -f .no_exec ]]; then go test -timeout 60s -v -tags static ./... ; fi + - if [[ ! -f .no_exec ]]; then go-kafkacat --help ; fi diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/LICENSE b/vendor/github.com/confluentinc/confluent-kafka-go/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/README.md new file mode 100644 index 0000000000..45f84ad209 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/README.md @@ -0,0 +1,278 @@ +Confluent's Golang Client for Apache KafkaTM +===================================================== + +**confluent-kafka-go** is Confluent's Golang client for [Apache Kafka](http://kafka.apache.org/) and the +[Confluent Platform](https://www.confluent.io/product/compare/). + +Features: + +- **High performance** - confluent-kafka-go is a lightweight wrapper around +[librdkafka](https://github.com/edenhill/librdkafka), a finely tuned C +client. + +- **Reliability** - There are a lot of details to get right when writing an Apache Kafka +client. We get them right in one place (librdkafka) and leverage this work +across all of our clients (also [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python) +and [confluent-kafka-dotnet](https://github.com/confluentinc/confluent-kafka-dotnet)). + +- **Supported** - Commercial support is offered by +[Confluent](https://confluent.io/). + +- **Future proof** - Confluent, founded by the +creators of Kafka, is building a [streaming platform](https://www.confluent.io/product/compare/) +with Apache Kafka at its core. It's high priority for us that client features keep +pace with core Apache Kafka and components of the [Confluent Platform](https://www.confluent.io/product/compare/). + + +The Golang bindings provides a high-level Producer and Consumer with support +for the balanced consumer groups of Apache Kafka 0.9 and above. + +See the [API documentation](http://docs.confluent.io/current/clients/confluent-kafka-go/index.html) for more information. + +**License**: [Apache License v2.0](http://www.apache.org/licenses/LICENSE-2.0) + + +Examples +======== + +High-level balanced consumer + +```golang +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" +) + +func main() { + + c, err := kafka.NewConsumer(&kafka.ConfigMap{ + "bootstrap.servers": "localhost", + "group.id": "myGroup", + "auto.offset.reset": "earliest", + }) + + if err != nil { + panic(err) + } + + c.SubscribeTopics([]string{"myTopic", "^aRegex.*[Tt]opic"}, nil) + + for { + msg, err := c.ReadMessage(-1) + if err == nil { + fmt.Printf("Message on %s: %s\n", msg.TopicPartition, string(msg.Value)) + } else { + fmt.Printf("Consumer error: %v (%v)\n", err, msg) + break + } + } + + c.Close() +} +``` + +Producer + +```golang +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" +) + +func main() { + + p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": "localhost"}) + if err != nil { + panic(err) + } + + // Delivery report handler for produced messages + go func() { + for e := range p.Events() { + switch ev := e.(type) { + case *kafka.Message: + if ev.TopicPartition.Error != nil { + fmt.Printf("Delivery failed: %v\n", ev.TopicPartition) + } else { + fmt.Printf("Delivered message to %v\n", ev.TopicPartition) + } + } + } + }() + + // Produce messages to topic (asynchronously) + topic := "myTopic" + for _, word := range []string{"Welcome", "to", "the", "Confluent", "Kafka", "Golang", "client"} { + p.Produce(&kafka.Message{ + TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, + Value: []byte(word), + }, nil) + } + + // Wait for message deliveries + p.Flush(15 * 1000) +} +``` + +More elaborate examples are available in the [examples](examples) directory, +including [how to configure](examples/confluent_cloud_example) the Go client +for use with [Confluent Cloud](https://www.confluent.io/confluent-cloud/). + + +Getting Started +=============== + +Installing librdkafka +--------------------- + +This client for Go depends on librdkafka v0.11.4 or later, so you either need to install librdkafka through your OS/distributions package manager, +or download and build it from source. + +- For Debian and Ubuntu based distros, install `librdkafka-dev` from the standard +repositories or using [Confluent's Deb repository](http://docs.confluent.io/current/installation.html#installation-apt). +- For Redhat based distros, install `librdkafka-devel` using [Confluent's YUM repository](http://docs.confluent.io/current/installation.html#rpm-packages-via-yum). +- For MacOS X, install `librdkafka` from Homebrew. You may also need to brew install pkg-config if you don't already have it. +- For Windows, see the `librdkafka.redist` NuGet package. + +Build from source: + + git clone https://github.com/edenhill/librdkafka.git + cd librdkafka + ./configure --prefix /usr + make + sudo make install + + +Install the client +------------------- + +``` +go get -u github.com/confluentinc/confluent-kafka-go/kafka +``` + +See the [examples](examples) for usage details. + +Note that the development of librdkafka and the Go client are kept in synch. So +if you use HEAD on master of the Go client, then you need to use HEAD on master of +librdkafka. See this [issue](https://github.com/confluentinc/confluent-kafka-go/issues/61#issuecomment-303746159) for more details. + +API Strands +=========== + +There are two main API strands: channel based or function based. + +Channel Based Consumer +---------------------- + +Messages, errors and events are posted on the consumer.Events channel +for the application to read. + +Pros: + + * Possibly more Golang:ish + * Makes reading from multiple channels easy + * Fast + +Cons: + + * Outdated events and messages may be consumed due to the buffering nature + of channels. The extent is limited, but not remedied, by the Events channel + buffer size (`go.events.channel.size`). + +See [examples/consumer_channel_example](examples/consumer_channel_example) + + + +Function Based Consumer +----------------------- + +Messages, errors and events are polled through the consumer.Poll() function. + +Pros: + + * More direct mapping to underlying librdkafka functionality. + +Cons: + + * Makes it harder to read from multiple channels, but a go-routine easily + solves that (see Cons in channel based consumer above about outdated events). + * Slower than the channel consumer. + +See [examples/consumer_example](examples/consumer_example) + + + +Channel Based Producer +---------------------- + +Application writes messages to the producer.ProducerChannel. +Delivery reports are emitted on the producer.Events or specified private channel. + +Pros: + + * Go:ish + * Proper channel backpressure if librdkafka internal queue is full. + +Cons: + + * Double queueing: messages are first queued in the channel (size is configurable) + and then inside librdkafka. + +See [examples/producer_channel_example](examples/producer_channel_example) + + +Function Based Producer +----------------------- + +Application calls producer.Produce() to produce messages. +Delivery reports are emitted on the producer.Events or specified private channel. + +Pros: + + * Go:ish + +Cons: + + * Produce() is a non-blocking call, if the internal librdkafka queue is full + the call will fail. + * Somewhat slower than the channel producer. + +See [examples/producer_example](examples/producer_example) + + +Static Builds +============= + +**NOTE**: Requires pkg-config + +To link your application statically with librdkafka append `-tags static` to +your application's `go build` command, e.g.: + + $ cd kafkatest/go_verifiable_consumer + $ go build -tags static + +This will create a binary with librdkafka statically linked, do note however +that any librdkafka dependencies (such as ssl, sasl2, lz4, etc, depending +on librdkafka build configuration) will be linked dynamically and thus required +on the target system. + +To create a completely static binary append `-tags static_all` instead. +This requires all dependencies to be available as static libraries +(e.g., libsasl2.a). Static libraries are typically not installed +by default but are available in the corresponding `..-dev` or `..-devel` +packages (e.g., libsasl2-dev). + +After a succesful static build verify the dependencies by running +`ldd ./your_program` (or `otool -L ./your_program` on OSX), librdkafka should not be listed. + + +Tests +===== + +See [kafka/README](kafka/README.md) + +Contributing +------------ +Contributions to the code, examples, documentation, et.al, are very much appreciated. + +Make your changes, run gofmt, tests, etc, push your branch, create a PR, and [sign the CLA](http://clabot.confluent.io/cla). diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/examples/README b/vendor/github.com/confluentinc/confluent-kafka-go/examples/README new file mode 100644 index 0000000000..105d08bf68 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/examples/README @@ -0,0 +1,19 @@ + +Examples: + + consumer_channel_example - Channel based consumer + consumer_example - Function & callback based consumer + + producer_channel_example - Channel based producer + producer_example - Function based producer + + go-kafkacat - Channel based kafkacat Go clone + + +Usage example: + + $ cd consumer_example + $ go build (or 'go install') + $ ./consumer_example # see usage + $ ./consumer_example mybroker mygroup mytopic + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/examples/confluent_cloud_example/confluent_cloud_example.go b/vendor/github.com/confluentinc/confluent-kafka-go/examples/confluent_cloud_example/confluent_cloud_example.go new file mode 100644 index 0000000000..7b99c4083a --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/examples/confluent_cloud_example/confluent_cloud_example.go @@ -0,0 +1,104 @@ +// This is a simple example demonstrating how to produce a message to +// Confluent Cloud then read it back again. +// +// https://www.confluent.io/confluent-cloud/ +// +// Auto-creation of topics is disabled in Confluent Cloud. You will need to +// use the ccloud cli to create the go-test-topic topic before running this +// example. +// +// $ ccloud topic create go-test-topic +// +// The , and parameters +// are available via the Confluent Cloud web interface. For more information, +// refer to the quick-start: +// +// https://docs.confluent.io/current/cloud-quickstart.html +package main + +/** + * Copyright 2018 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "time" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" +) + +func main() { + + p, err := kafka.NewProducer(&kafka.ConfigMap{ + "bootstrap.servers": "", + "broker.version.fallback": "0.10.0.0", + "api.version.fallback.ms": 0, + "sasl.mechanisms": "PLAIN", + "security.protocol": "SASL_SSL", + "sasl.username": "", + "sasl.password": "",}) + + if err != nil { + panic(fmt.Sprintf("Failed to create producer: %s", err)) + } + + value := "golang test value" + topic := "go-test-topic" + p.Produce(&kafka.Message{ + TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, + Value: []byte(value), + }, nil) + + // Wait for delivery report + e := <-p.Events() + + m := e.(*kafka.Message) + if m.TopicPartition.Error != nil { + fmt.Printf("failed to deliver message: %v\n", m.TopicPartition) + } else { + fmt.Printf("delivered to topic %s [%d] at offset %v\n", + *m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset) + } + + p.Close() + + + c, err := kafka.NewConsumer(&kafka.ConfigMap{ + "bootstrap.servers": "", + "broker.version.fallback": "0.10.0.0", + "api.version.fallback.ms": 0, + "sasl.mechanisms": "PLAIN", + "security.protocol": "SASL_SSL", + "sasl.username": "", + "sasl.password": "", + "session.timeout.ms": 6000, + "group.id": "my-group", + "default.topic.config": kafka.ConfigMap{"auto.offset.reset": "earliest"},}) + + if err != nil { + panic(fmt.Sprintf("Failed to create consumer: %s", err)) + } + + topics := []string { topic } + c.SubscribeTopics(topics, nil) + + for { + msg, err := c.ReadMessage(100 * time.Millisecond) + if err == nil { + fmt.Printf("consumed: %s: %s\n", msg.TopicPartition, string(msg.Value)) + } + } + + c.Close() +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/examples/consumer_channel_example/consumer_channel_example.go b/vendor/github.com/confluentinc/confluent-kafka-go/examples/consumer_channel_example/consumer_channel_example.go new file mode 100644 index 0000000000..1e453af4e6 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/examples/consumer_channel_example/consumer_channel_example.go @@ -0,0 +1,90 @@ +// Example channel-based high-level Apache Kafka consumer +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "os" + "os/signal" + "syscall" +) + +func main() { + + if len(os.Args) < 4 { + fmt.Fprintf(os.Stderr, "Usage: %s \n", + os.Args[0]) + os.Exit(1) + } + + broker := os.Args[1] + group := os.Args[2] + topics := os.Args[3:] + + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + + c, err := kafka.NewConsumer(&kafka.ConfigMap{ + "bootstrap.servers": broker, + "group.id": group, + "session.timeout.ms": 6000, + "go.events.channel.enable": true, + "go.application.rebalance.enable": true, + "default.topic.config": kafka.ConfigMap{"auto.offset.reset": "earliest"}}) + + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err) + os.Exit(1) + } + + fmt.Printf("Created Consumer %v\n", c) + + err = c.SubscribeTopics(topics, nil) + + run := true + + for run == true { + select { + case sig := <-sigchan: + fmt.Printf("Caught signal %v: terminating\n", sig) + run = false + + case ev := <-c.Events(): + switch e := ev.(type) { + case kafka.AssignedPartitions: + fmt.Fprintf(os.Stderr, "%% %v\n", e) + c.Assign(e.Partitions) + case kafka.RevokedPartitions: + fmt.Fprintf(os.Stderr, "%% %v\n", e) + c.Unassign() + case *kafka.Message: + fmt.Printf("%% Message on %s:\n%s\n", + e.TopicPartition, string(e.Value)) + case kafka.PartitionEOF: + fmt.Printf("%% Reached %v\n", e) + case kafka.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v\n", e) + run = false + } + } + } + + fmt.Printf("Closing consumer\n") + c.Close() +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/examples/consumer_example/consumer_example.go b/vendor/github.com/confluentinc/confluent-kafka-go/examples/consumer_example/consumer_example.go new file mode 100644 index 0000000000..f507b5c80a --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/examples/consumer_example/consumer_example.go @@ -0,0 +1,93 @@ +// Example function-based high-level Apache Kafka consumer +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// consumer_example implements a consumer using the non-channel Poll() API +// to retrieve messages and events. + +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "os" + "os/signal" + "syscall" +) + +func main() { + + if len(os.Args) < 4 { + fmt.Fprintf(os.Stderr, "Usage: %s \n", + os.Args[0]) + os.Exit(1) + } + + broker := os.Args[1] + group := os.Args[2] + topics := os.Args[3:] + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + + c, err := kafka.NewConsumer(&kafka.ConfigMap{ + "bootstrap.servers": broker, + "group.id": group, + "session.timeout.ms": 6000, + "default.topic.config": kafka.ConfigMap{"auto.offset.reset": "earliest"}}) + + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err) + os.Exit(1) + } + + fmt.Printf("Created Consumer %v\n", c) + + err = c.SubscribeTopics(topics, nil) + + run := true + + for run == true { + select { + case sig := <-sigchan: + fmt.Printf("Caught signal %v: terminating\n", sig) + run = false + default: + ev := c.Poll(100) + if ev == nil { + continue + } + + switch e := ev.(type) { + case *kafka.Message: + fmt.Printf("%% Message on %s:\n%s\n", + e.TopicPartition, string(e.Value)) + if e.Headers != nil { + fmt.Printf("%% Headers: %v\n", e.Headers) + } + case kafka.PartitionEOF: + fmt.Printf("%% Reached %v\n", e) + case kafka.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v\n", e) + run = false + default: + fmt.Printf("Ignored %v\n", e) + } + } + } + + fmt.Printf("Closing consumer\n") + c.Close() +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/examples/go-kafkacat/go-kafkacat.go b/vendor/github.com/confluentinc/confluent-kafka-go/examples/go-kafkacat/go-kafkacat.go new file mode 100644 index 0000000000..c0fa806aad --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/examples/go-kafkacat/go-kafkacat.go @@ -0,0 +1,255 @@ +// Example kafkacat clone written in Golang +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "bufio" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "gopkg.in/alecthomas/kingpin.v2" + "os" + "os/signal" + "strings" + "syscall" +) + +var ( + verbosity = 1 + exitEOF = false + eofCnt = 0 + partitionCnt = 0 + keyDelim = "" + sigs chan os.Signal +) + +func runProducer(config *kafka.ConfigMap, topic string, partition int32) { + p, err := kafka.NewProducer(config) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create producer: %s\n", err) + os.Exit(1) + } + + fmt.Fprintf(os.Stderr, "Created Producer %v, topic %s [%d]\n", p, topic, partition) + + tp := kafka.TopicPartition{Topic: &topic, Partition: partition} + + go func(drs chan kafka.Event) { + for ev := range drs { + m, ok := ev.(*kafka.Message) + if !ok { + continue + } + if m.TopicPartition.Error != nil { + fmt.Fprintf(os.Stderr, "%% Delivery error: %v\n", m.TopicPartition) + } else if verbosity >= 2 { + fmt.Fprintf(os.Stderr, "%% Delivered %v\n", m) + } + } + }(p.Events()) + + reader := bufio.NewReader(os.Stdin) + stdinChan := make(chan string) + + go func() { + for true { + line, err := reader.ReadString('\n') + if err != nil { + break + } + + line = strings.TrimSuffix(line, "\n") + if len(line) == 0 { + continue + } + + stdinChan <- line + } + close(stdinChan) + }() + + run := true + + for run == true { + select { + case sig := <-sigs: + fmt.Fprintf(os.Stderr, "%% Terminating on signal %v\n", sig) + run = false + + case line, ok := <-stdinChan: + if !ok { + run = false + break + } + + msg := kafka.Message{TopicPartition: tp} + + if keyDelim != "" { + vec := strings.SplitN(line, keyDelim, 2) + if len(vec[0]) > 0 { + msg.Key = ([]byte)(vec[0]) + } + if len(vec) == 2 && len(vec[1]) > 0 { + msg.Value = ([]byte)(vec[1]) + } + } else { + msg.Value = ([]byte)(line) + } + + p.ProduceChannel() <- &msg + } + } + + fmt.Fprintf(os.Stderr, "%% Flushing %d message(s)\n", p.Len()) + p.Flush(10000) + fmt.Fprintf(os.Stderr, "%% Closing\n") + p.Close() +} + +func runConsumer(config *kafka.ConfigMap, topics []string) { + c, err := kafka.NewConsumer(config) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err) + os.Exit(1) + } + + fmt.Fprintf(os.Stderr, "%% Created Consumer %v\n", c) + + c.SubscribeTopics(topics, nil) + + run := true + + for run == true { + select { + + case sig := <-sigs: + fmt.Fprintf(os.Stderr, "%% Terminating on signal %v\n", sig) + run = false + + case ev := <-c.Events(): + switch e := ev.(type) { + case kafka.AssignedPartitions: + fmt.Fprintf(os.Stderr, "%% %v\n", e) + c.Assign(e.Partitions) + partitionCnt = len(e.Partitions) + eofCnt = 0 + case kafka.RevokedPartitions: + fmt.Fprintf(os.Stderr, "%% %v\n", e) + c.Unassign() + partitionCnt = 0 + eofCnt = 0 + case *kafka.Message: + if verbosity >= 2 { + fmt.Fprintf(os.Stderr, "%% %v:\n", e.TopicPartition) + } + if keyDelim != "" { + if e.Key != nil { + fmt.Printf("%s%s", string(e.Key), keyDelim) + } else { + fmt.Printf("%s", keyDelim) + } + } + fmt.Println(string(e.Value)) + case kafka.PartitionEOF: + fmt.Fprintf(os.Stderr, "%% Reached %v\n", e) + eofCnt++ + if exitEOF && eofCnt >= partitionCnt { + run = false + } + case kafka.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v\n", e) + run = false + case kafka.OffsetsCommitted: + if verbosity >= 2 { + fmt.Fprintf(os.Stderr, "%% %v\n", e) + } + default: + fmt.Fprintf(os.Stderr, "%% Unhandled event %T ignored: %v\n", e, e) + } + } + } + + fmt.Fprintf(os.Stderr, "%% Closing consumer\n") + c.Close() +} + +type configArgs struct { + conf kafka.ConfigMap +} + +func (c *configArgs) String() string { + return "FIXME" +} + +func (c *configArgs) Set(value string) error { + return c.conf.Set(value) +} + +func (c *configArgs) IsCumulative() bool { + return true +} + +func main() { + sigs = make(chan os.Signal) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + _, libver := kafka.LibraryVersion() + kingpin.Version(fmt.Sprintf("confluent-kafka-go (librdkafka v%s)", libver)) + + // Default config + var confargs configArgs + confargs.conf = kafka.ConfigMap{"session.timeout.ms": 6000} + + /* General options */ + brokers := kingpin.Flag("broker", "Bootstrap broker(s)").Required().String() + kingpin.Flag("config", "Configuration property (prop=val)").Short('X').PlaceHolder("PROP=VAL").SetValue(&confargs) + keyDelimArg := kingpin.Flag("key-delim", "Key and value delimiter (empty string=dont print/parse key)").Default("").String() + verbosityArg := kingpin.Flag("verbosity", "Output verbosity level").Short('v').Default("1").Int() + + /* Producer mode options */ + modeP := kingpin.Command("produce", "Produce messages") + topic := modeP.Flag("topic", "Topic to produce to").Required().String() + partition := modeP.Flag("partition", "Partition to produce to").Default("-1").Int() + + /* Consumer mode options */ + modeC := kingpin.Command("consume", "Consume messages").Default() + group := modeC.Flag("group", "Consumer group").Required().String() + topics := modeC.Arg("topic", "Topic(s) to subscribe to").Required().Strings() + initialOffset := modeC.Flag("offset", "Initial offset").Short('o').Default(kafka.OffsetBeginning.String()).String() + exitEOFArg := modeC.Flag("eof", "Exit when EOF is reached for all partitions").Bool() + + mode := kingpin.Parse() + + verbosity = *verbosityArg + keyDelim = *keyDelimArg + exitEOF = *exitEOFArg + confargs.conf["bootstrap.servers"] = *brokers + + switch mode { + case "produce": + confargs.conf["default.topic.config"] = kafka.ConfigMap{"produce.offset.report": true} + runProducer((*kafka.ConfigMap)(&confargs.conf), *topic, int32(*partition)) + + case "consume": + confargs.conf["group.id"] = *group + confargs.conf["go.events.channel.enable"] = true + confargs.conf["go.application.rebalance.enable"] = true + confargs.conf["default.topic.config"] = kafka.ConfigMap{"auto.offset.reset": *initialOffset} + runConsumer((*kafka.ConfigMap)(&confargs.conf), *topics) + } + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/examples/producer_channel_example/producer_channel_example.go b/vendor/github.com/confluentinc/confluent-kafka-go/examples/producer_channel_example/producer_channel_example.go new file mode 100644 index 0000000000..c06dc92511 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/examples/producer_channel_example/producer_channel_example.go @@ -0,0 +1,75 @@ +// Example channel-based Apache Kafka producer +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "os" +) + +func main() { + + if len(os.Args) != 3 { + fmt.Fprintf(os.Stderr, "Usage: %s \n", + os.Args[0]) + os.Exit(1) + } + + broker := os.Args[1] + topic := os.Args[2] + + p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": broker}) + + if err != nil { + fmt.Printf("Failed to create producer: %s\n", err) + os.Exit(1) + } + + fmt.Printf("Created Producer %v\n", p) + + doneChan := make(chan bool) + + go func() { + defer close(doneChan) + for e := range p.Events() { + switch ev := e.(type) { + case *kafka.Message: + m := ev + if m.TopicPartition.Error != nil { + fmt.Printf("Delivery failed: %v\n", m.TopicPartition.Error) + } else { + fmt.Printf("Delivered message to topic %s [%d] at offset %v\n", + *m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset) + } + return + + default: + fmt.Printf("Ignored event: %s\n", ev) + } + } + }() + + value := "Hello Go!" + p.ProduceChannel() <- &kafka.Message{TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, Value: []byte(value)} + + // wait for delivery report goroutine to finish + _ = <-doneChan + + p.Close() +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/examples/producer_example/producer_example.go b/vendor/github.com/confluentinc/confluent-kafka-go/examples/producer_example/producer_example.go new file mode 100644 index 0000000000..5ca096a903 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/examples/producer_example/producer_example.go @@ -0,0 +1,68 @@ +// Example function-based Apache Kafka producer +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "os" +) + +func main() { + + if len(os.Args) != 3 { + fmt.Fprintf(os.Stderr, "Usage: %s \n", + os.Args[0]) + os.Exit(1) + } + + broker := os.Args[1] + topic := os.Args[2] + + p, err := kafka.NewProducer(&kafka.ConfigMap{"bootstrap.servers": broker}) + + if err != nil { + fmt.Printf("Failed to create producer: %s\n", err) + os.Exit(1) + } + + fmt.Printf("Created Producer %v\n", p) + + // Optional delivery channel, if not specified the Producer object's + // .Events channel is used. + deliveryChan := make(chan kafka.Event) + + value := "Hello Go!" + err = p.Produce(&kafka.Message{ + TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, + Value: []byte(value), + Headers: []kafka.Header{{"myTestHeader", []byte("header values are binary")}}, + }, deliveryChan) + + e := <-deliveryChan + m := e.(*kafka.Message) + + if m.TopicPartition.Error != nil { + fmt.Printf("Delivery failed: %v\n", m.TopicPartition.Error) + } else { + fmt.Printf("Delivered message to topic %s [%d] at offset %v\n", + *m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset) + } + + close(deliveryChan) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go new file mode 100644 index 0000000000..88eff92bb2 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go @@ -0,0 +1,60 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" +) + + +/* +#include + +//Minimum required librdkafka version. This is checked both during +//build-time and runtime. +//Make sure to keep the MIN_RD_KAFKA_VERSION, MIN_VER_ERRSTR and #error +//defines and strings in sync. +// + +#define MIN_RD_KAFKA_VERSION 0x0000b0400 + +#ifdef __APPLE__ +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v0.11.4 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#else +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v0.11.4 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#endif + +#if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION +#ifdef __APPLE__ +#error "confluent-kafka-go requires librdkafka v0.11.4 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#else +#error "confluent-kafka-go requires librdkafka v0.11.4 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#endif +#endif +*/ +import "C" + + +func versionCheck () error { + ver, verstr := LibraryVersion() + if ver < C.MIN_RD_KAFKA_VERSION { + return newErrorFromString(ErrNotImplemented, + fmt.Sprintf("%s: librdkafka version %s (0x%x) detected", + C.MIN_VER_ERRSTR, verstr, ver)) + } + return nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md new file mode 100644 index 0000000000..6df4d546f2 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md @@ -0,0 +1,69 @@ +# Information for confluent-kafka-go developers + +Whenever librdkafka error codes are updated make sure to run generate before building: + +``` + $ (cd go_rdkafka_generr && go install) && go generate + $ go build +``` + + + + +## Testing + +Some of the tests included in this directory, the benchmark and integration tests in particular, +require an existing Kafka cluster and a testconf.json configuration file to +provide tests with bootstrap brokers, topic name, etc. + +The format of testconf.json is a JSON object: +``` +{ + "Brokers": "", + "Topic": "" +} +``` + +See testconf-example.json for an example and full set of available options. + + +To run unit-tests: +``` +$ go test +``` + +To run benchmark tests: +``` +$ go test -bench . +``` + +For the code coverage: +``` +$ go test -coverprofile=coverage.out -bench=. +$ go tool cover -func=coverage.out +``` + +## Build tags (static linking) + + +Different build types are supported through Go build tags (`-tags ..`), +these tags should be specified on the **application** build command. + + * `static` - Build with librdkafka linked statically (but librdkafka + dependencies linked dynamically). + * `static_all` - Build with all libraries linked statically. + * neither - Build with librdkafka (and its dependencies) linked dynamically. + + + +## Generating HTML documentation + +To generate one-page HTML documentation run the mk/doc-gen.py script from the +top-level directory. This script requires the beautifulsoup4 Python package. + +``` +$ source .../your/virtualenv/bin/activate +$ pip install beautifulsoup4 +... +$ mk/doc-gen.py > kafka.html +``` diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html new file mode 100644 index 0000000000..05c8fed8c2 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html @@ -0,0 +1,1632 @@ + + + + + + + + kafka - The Go Programming Language + + + + + + + + + + + +
+ ... +
+ +
+
+

+ Package kafka +

+ + + + +
+
+
+ + import "github.com/confluentinc/confluent-kafka-go/kafka" + +
+
+
+
+ + Overview + +
+
+ + Index + +
+
+
+
+
+ +
+ +
+

+ Overview ▾ +

+

+ Package kafka provides high-level Apache Kafka producer and consumers +using bindings on-top of the librdkafka C library. +

+

+ High-level Consumer +

+

+ * Decide if you want to read messages and events from the `.Events()` channel +(set `"go.events.channel.enable": true`) or by calling `.Poll()`. +

+

+ * Create a Consumer with `kafka.NewConsumer()` providing at +least the `bootstrap.servers` and `group.id` configuration properties. +

+

+ * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics) +to join the group with the specified subscription set. +Subscriptions are atomic, calling `.Subscribe*()` again will leave +the group and rejoin with the new set of topics. +

+

+ * Start reading events and messages from either the `.Events` channel +or by calling `.Poll()`. +

+

+ * When the group has rebalanced each client member is assigned a +(sub-)set of topic+partitions. +By default the consumer will start fetching messages for its assigned +partitions at this point, but your application may enable rebalance +events to get an insight into what the assigned partitions where +as well as set the initial offsets. To do this you need to pass +`"go.application.rebalance.enable": true` to the `NewConsumer()` call +mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event +with the assigned partition set. You can optionally modify the initial +offsets (they'll default to stored offsets and if there are no previously stored +offsets it will fall back to `"default.topic.config": ConfigMap{"auto.offset.reset": ..}` +which defaults to the `latest` message) and then call `.Assign(partitions)` +to start consuming. If you don't need to modify the initial offsets you will +not need to call `.Assign()`, the client will do so automatically for you if +you dont. +

+

+ * As messages are fetched they will be made available on either the +`.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`. +

+

+ * Handle messages, events and errors to your liking. +

+

+ * When you are done consuming call `.Close()` to commit final offsets +and leave the consumer group. +

+

+ Producer +

+

+ * Create a Producer with `kafka.NewProducer()` providing at least +the `bootstrap.servers` configuration properties. +

+

+ * Messages may now be produced either by sending a `*kafka.Message` +on the `.ProduceChannel` or by calling `.Produce()`. +

+

+ * Producing is an asynchronous operation so the client notifies the application +of per-message produce success or failure through something called delivery reports. +Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message` +and you should check `msg.TopicPartition.Error` for `nil` to find out if the message +was succesfully delivered or not. +It is also possible to direct delivery reports to alternate channels +by providing a non-nil `chan Event` channel to `.Produce()`. +If no delivery reports are wanted they can be completely disabled by +setting configuration property `"go.delivery.reports": false`. +

+

+ * When you are done producing messages you will need to make sure all messages +are indeed delivered to the broker (or failed), remember that this is +an asynchronous client so some of your messages may be lingering in internal +channels or tranmission queues. +To do this you can either keep track of the messages you've produced +and wait for their corresponding delivery reports, or call the convenience +function `.Flush()` that will block until all message deliveries are done +or the provided timeout elapses. +

+

+ * Finally call `.Close()` to decommission the producer. +

+

+ Events +

+

+ Apart from emitting messages and delivery reports the client also communicates +with the application through a number of different event types. +An application may choose to handle or ignore these events. +

+

+ Consumer events +

+

+ * `*kafka.Message` - a fetched message. +

+

+ * `AssignedPartitions` - The assigned partition set for this client following a rebalance. +Requires `go.application.rebalance.enable` +

+

+ * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance. +`AssignedPartitions` and `RevokedPartitions` are symetrical. +Requires `go.application.rebalance.enable` +

+

+ * `PartitionEOF` - Consumer has reached the end of a partition. +NOTE: The consumer will keep trying to fetch new messages for the partition. +

+

+ * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled). +

+

+ Producer events +

+

+ * `*kafka.Message` - delivery report for produced message. +Check `.TopicPartition.Error` for delivery result. +

+

+ Generic events for both Consumer and Producer +

+

+ * `KafkaError` - client (error codes are prefixed with _) or broker error. +These errors are normally just informational since the +client will try its best to automatically recover (eventually). +

+

+ Hint: If your application registers a signal notification +(signal.Notify) makes sure the signals channel is buffered to avoid +possible complications with blocking Poll() calls. +

+
+
+
+ +
+

+ Index ▾ +

+ +
+
+
+ + Constants + +
+
+ + func LibraryVersion() (int, string) + +
+
+ + type AssignedPartitions + +
+
+ + func (e AssignedPartitions) String() string + +
+
+ + type BrokerMetadata + +
+
+ + type ConfigMap + +
+
+ + func (m ConfigMap) Set(kv string) error + +
+
+ + func (m ConfigMap) SetKey(key string, value ConfigValue) error + +
+
+ + type ConfigValue + +
+
+ + type Consumer + +
+
+ + func NewConsumer(conf *ConfigMap) (*Consumer, error) + +
+
+ + func (c *Consumer) Assign(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) Close() (err error) + +
+
+ + func (c *Consumer) Commit() ([]TopicPartition, error) + +
+
+ + func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) + +
+
+ + func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) + +
+
+ + func (c *Consumer) Events() chan Event + +
+
+ + func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) + +
+
+ + func (c *Consumer) Poll(timeoutMs int) (event Event) + +
+
+ + func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) + +
+
+ + func (c *Consumer) String() string + +
+
+ + func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error + +
+
+ + func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) + +
+
+ + func (c *Consumer) Unassign() (err error) + +
+
+ + func (c *Consumer) Unsubscribe() (err error) + +
+
+ + type Error + +
+
+ + func (e Error) Code() ErrorCode + +
+
+ + func (e Error) Error() string + +
+
+ + func (e Error) String() string + +
+
+ + type ErrorCode + +
+
+ + func (c ErrorCode) String() string + +
+
+ + type Event + +
+
+ + type Handle + +
+
+ + type Message + +
+
+ + func (m *Message) String() string + +
+
+ + type Metadata + +
+
+ + type Offset + +
+
+ + func NewOffset(offset interface{}) (Offset, error) + +
+
+ + func OffsetTail(relativeOffset Offset) Offset + +
+
+ + func (o Offset) Set(offset interface{}) error + +
+
+ + func (o Offset) String() string + +
+
+ + type OffsetsCommitted + +
+
+ + func (o OffsetsCommitted) String() string + +
+
+ + type PartitionEOF + +
+
+ + func (p PartitionEOF) String() string + +
+
+ + type PartitionMetadata + +
+
+ + type Producer + +
+
+ + func NewProducer(conf *ConfigMap) (*Producer, error) + +
+
+ + func (p *Producer) Close() + +
+
+ + func (p *Producer) Events() chan Event + +
+
+ + func (p *Producer) Flush(timeoutMs int) int + +
+
+ + func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) + +
+
+ + func (p *Producer) Len() int + +
+
+ + func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error + +
+
+ + func (p *Producer) ProduceChannel() chan *Message + +
+
+ + func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) + +
+
+ + func (p *Producer) String() string + +
+
+ + type RebalanceCb + +
+
+ + type RevokedPartitions + +
+
+ + func (e RevokedPartitions) String() string + +
+
+ + type TimestampType + +
+
+ + func (t TimestampType) String() string + +
+
+ + type TopicMetadata + +
+
+ + type TopicPartition + +
+
+ + func (p TopicPartition) String() string + +
+
+
+ +

+ Package files +

+

+ + + build_dynamic.go + + + config.go + + + consumer.go + + + error.go + + + event.go + + + generated_errors.go + + + handle.go + + + kafka.go + + + message.go + + + metadata.go + + + misc.go + + + producer.go + + + testhelpers.go + + +

+
+ +
+ + + +

+ Constants +

+
const (
+    // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
+    TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE)
+    // TimestampCreateTime indicates timestamp set by producer (source time)
+    TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME)
+    // TimestampLogAppendTime indicates timestamp set set by broker (store time)
+    TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
+)
+
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
+

+ Earliest offset (logical) +

+
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
+

+ Latest offset (logical) +

+
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
+

+ Invalid/unspecified offset +

+
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
+

+ Use stored offset +

+
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
+

+ Any partition (for partitioning), or unspecified value (for all other cases) +

+

+ func + + LibraryVersion + +

+
func LibraryVersion() (int, string)
+

+ LibraryVersion returns the underlying librdkafka library version as a +(version_int, version_str) tuple. +

+

+ type + + AssignedPartitions + +

+
type AssignedPartitions struct {
+    Partitions []TopicPartition
+}
+

+ AssignedPartitions consumer group rebalance event: assigned partition set +

+

+ func (AssignedPartitions) + + String + +

+
func (e AssignedPartitions) String() string
+

+ type + + BrokerMetadata + +

+
type BrokerMetadata struct {
+    ID   int32
+    Host string
+    Port int
+}
+

+ BrokerMetadata contains per-broker metadata +

+

+ type + + ConfigMap + +

+
type ConfigMap map[string]ConfigValue
+

+ ConfigMap is a map contaning standard librdkafka configuration properties as documented in: + + https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md + +

+

+ The special property "default.topic.config" (optional) is a ConfigMap containing default topic +configuration properties. +

+

+ func (ConfigMap) + + Set + +

+
func (m ConfigMap) Set(kv string) error
+

+ Set implements flag.Set (command line argument parser) as a convenience +for `-X key=value` config. +

+

+ func (ConfigMap) + + SetKey + +

+
func (m ConfigMap) SetKey(key string, value ConfigValue) error
+

+ SetKey sets configuration property key to value. +For user convenience a key prefixed with {topic}. will be +set on the "default.topic.config" sub-map. +

+

+ type + + ConfigValue + +

+
type ConfigValue interface{}
+

+ ConfigValue supports the following types: +

+
bool, int, string, any type with the standard String() interface
+
+

+ type + + Consumer + +

+
type Consumer struct {
+    // contains filtered or unexported fields
+}
+

+ Consumer implements a High-level Apache Kafka Consumer instance +

+

+ func + + NewConsumer + +

+
func NewConsumer(conf *ConfigMap) (*Consumer, error)
+

+ NewConsumer creates a new high-level Consumer instance. +

+

+ Supported special configuration properties: +

+
go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel.
+                                     If set to true the app must handle the AssignedPartitions and
+                                     RevokedPartitions events and call Assign() and Unassign()
+                                     respectively.
+go.events.channel.enable (bool, false) - Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled. (Experimental)
+go.events.channel.size (int, 1000) - Events() channel size
+
+

+ WARNING: Due to the buffering nature of channels (and queues in general) the +use of the events channel risks receiving outdated events and +messages. Minimizing go.events.channel.size reduces the risk +and number of outdated events and messages but does not eliminate +the factor completely. With a channel size of 1 at most one +event or message may be outdated. +

+

+ func (*Consumer) + + Assign + +

+
func (c *Consumer) Assign(partitions []TopicPartition) (err error)
+

+ Assign an atomic set of partitions to consume. +This replaces the current assignment. +

+

+ func (*Consumer) + + Close + +

+
func (c *Consumer) Close() (err error)
+

+ Close Consumer instance. +The object is no longer usable after this call. +

+

+ func (*Consumer) + + Commit + +

+
func (c *Consumer) Commit() ([]TopicPartition, error)
+

+ Commit offsets for currently assigned partitions +This is a blocking call. +Returns the committed offsets on success. +

+

+ func (*Consumer) + + CommitMessage + +

+
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)
+

+ CommitMessage commits offset based on the provided message. +This is a blocking call. +Returns the committed offsets on success. +

+

+ func (*Consumer) + + CommitOffsets + +

+
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)
+

+ CommitOffsets commits the provided list of offsets +This is a blocking call. +Returns the committed offsets on success. +

+

+ func (*Consumer) + + Events + +

+
func (c *Consumer) Events() chan Event
+

+ Events returns the Events channel (if enabled) +

+

+ func (*Consumer) + + GetMetadata + +

+
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+

+ GetMetadata queries broker for cluster and topic metadata. +If topic is non-nil only information about that topic is returned, else if +allTopics is false only information about locally used topics is returned, +else information about all topics is returned. +

+

+ func (*Consumer) + + Poll + +

+
func (c *Consumer) Poll(timeoutMs int) (event Event)
+

+ Poll the consumer for messages or events. +

+

+ Will block for at most timeoutMs milliseconds +

+

+ The following callbacks may be triggered: +

+
Subscribe()'s rebalanceCb
+
+

+ Returns nil on timeout, else an Event +

+

+ func (*Consumer) + + QueryWatermarkOffsets + +

+
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+

+ QueryWatermarkOffsets returns the broker's low and high offsets for the given topic +and partition. +

+

+ func (*Consumer) + + String + +

+
func (c *Consumer) String() string
+

+ Strings returns a human readable name for a Consumer instance +

+

+ func (*Consumer) + + Subscribe + +

+
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
+

+ Subscribe to a single topic +This replaces the current subscription +

+

+ func (*Consumer) + + SubscribeTopics + +

+
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)
+

+ SubscribeTopics subscribes to the provided list of topics. +This replaces the current subscription. +

+

+ func (*Consumer) + + Unassign + +

+
func (c *Consumer) Unassign() (err error)
+

+ Unassign the current set of partitions to consume. +

+

+ func (*Consumer) + + Unsubscribe + +

+
func (c *Consumer) Unsubscribe() (err error)
+

+ Unsubscribe from the current subscription, if any. +

+

+ type + + Error + +

+
type Error struct {
+    // contains filtered or unexported fields
+}
+

+ Error provides a Kafka-specific error container +

+

+ func (Error) + + Code + +

+
func (e Error) Code() ErrorCode
+

+ Code returns the ErrorCode of an Error +

+

+ func (Error) + + Error + +

+
func (e Error) Error() string
+

+ Error returns a human readable representation of an Error +Same as Error.String() +

+

+ func (Error) + + String + +

+
func (e Error) String() string
+

+ String returns a human readable representation of an Error +

+

+ type + + ErrorCode + +

+
type ErrorCode int
+

+ ErrorCode is the integer representation of local and broker error codes +

+
const (
+    // ErrBadMsg Local: Bad message format
+    ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG)
+    // ErrBadCompression Local: Invalid compressed data
+    ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION)
+    // ErrDestroy Local: Broker handle destroyed
+    ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY)
+    // ErrFail Local: Communication failure with broker
+    ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL)
+    // ErrTransport Local: Broker transport failure
+    ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT)
+    // ErrCritSysResource Local: Critical system resource failure
+    ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE)
+    // ErrResolve Local: Host resolution failure
+    ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE)
+    // ErrMsgTimedOut Local: Message timed out
+    ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
+    // ErrPartitionEOF Broker: No more messages
+    ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF)
+    // ErrUnknownPartition Local: Unknown partition
+    ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+    // ErrFs Local: File or filesystem error
+    ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS)
+    // ErrUnknownTopic Local: Unknown topic
+    ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+    // ErrAllBrokersDown Local: All broker connections are down
+    ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
+    // ErrInvalidArg Local: Invalid argument or configuration
+    ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG)
+    // ErrTimedOut Local: Timed out
+    ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
+    // ErrQueueFull Local: Queue full
+    ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL)
+    // ErrIsrInsuff Local: ISR count insufficient
+    ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF)
+    // ErrNodeUpdate Local: Broker node update
+    ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE)
+    // ErrSsl Local: SSL error
+    ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL)
+    // ErrWaitCoord Local: Waiting for coordinator
+    ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD)
+    // ErrUnknownGroup Local: Unknown group
+    ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP)
+    // ErrInProgress Local: Operation in progress
+    ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS)
+    // ErrPrevInProgress Local: Previous operation in progress
+    ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS)
+    // ErrExistingSubscription Local: Existing subscription
+    ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION)
+    // ErrAssignPartitions Local: Assign partitions
+    ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+    // ErrRevokePartitions Local: Revoke partitions
+    ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
+    // ErrConflict Local: Conflicting use
+    ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT)
+    // ErrState Local: Erroneous state
+    ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE)
+    // ErrUnknownProtocol Local: Unknown protocol
+    ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL)
+    // ErrNotImplemented Local: Not implemented
+    ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED)
+    // ErrAuthentication Local: Authentication failure
+    ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION)
+    // ErrNoOffset Local: No offset stored
+    ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET)
+    // ErrOutdated Local: Outdated
+    ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED)
+    // ErrTimedOutQueue Local: Timed out in queue
+    ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE)
+    // ErrUnknown Unknown broker error
+    ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN)
+    // ErrNoError Success
+    ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR)
+    // ErrOffsetOutOfRange Broker: Offset out of range
+    ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE)
+    // ErrInvalidMsg Broker: Invalid message
+    ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG)
+    // ErrUnknownTopicOrPart Broker: Unknown topic or partition
+    ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+    // ErrInvalidMsgSize Broker: Invalid message size
+    ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE)
+    // ErrLeaderNotAvailable Broker: Leader not available
+    ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
+    // ErrNotLeaderForPartition Broker: Not leader for partition
+    ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION)
+    // ErrRequestTimedOut Broker: Request timed out
+    ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT)
+    // ErrBrokerNotAvailable Broker: Broker not available
+    ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE)
+    // ErrReplicaNotAvailable Broker: Replica not available
+    ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE)
+    // ErrMsgSizeTooLarge Broker: Message size too large
+    ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
+    // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
+    ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH)
+    // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
+    ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE)
+    // ErrNetworkException Broker: Broker disconnected before response received
+    ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION)
+    // ErrGroupLoadInProgress Broker: Group coordinator load in progress
+    ErrGroupLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS)
+    // ErrGroupCoordinatorNotAvailable Broker: Group coordinator not available
+    ErrGroupCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE)
+    // ErrNotCoordinatorForGroup Broker: Not coordinator for group
+    ErrNotCoordinatorForGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP)
+    // ErrTopicException Broker: Invalid topic
+    ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION)
+    // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
+    ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE)
+    // ErrNotEnoughReplicas Broker: Not enough in-sync replicas
+    ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS)
+    // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
+    ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND)
+    // ErrInvalidRequiredAcks Broker: Invalid required acks value
+    ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS)
+    // ErrIllegalGeneration Broker: Specified group generation id is not valid
+    ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
+    // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
+    ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL)
+    // ErrInvalidGroupID Broker: Invalid group.id
+    ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID)
+    // ErrUnknownMemberID Broker: Unknown member
+    ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
+    // ErrInvalidSessionTimeout Broker: Invalid session timeout
+    ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT)
+    // ErrRebalanceInProgress Broker: Group rebalance in progress
+    ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS)
+    // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
+    ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE)
+    // ErrTopicAuthorizationFailed Broker: Topic authorization failed
+    ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
+    // ErrGroupAuthorizationFailed Broker: Group authorization failed
+    ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED)
+    // ErrClusterAuthorizationFailed Broker: Cluster authorization failed
+    ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED)
+    // ErrInvalidTimestamp Broker: Invalid timestamp
+    ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP)
+    // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
+    ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM)
+    // ErrIllegalSaslState Broker: Request not valid in current SASL state
+    ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE)
+    // ErrUnsupportedVersion Broker: API version not supported
+    ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION)
+)
+

+ func (ErrorCode) + + String + +

+
func (c ErrorCode) String() string
+

+ String returns a human readable representation of an error code +

+

+ type + + Event + +

+
type Event interface {
+    // String returns a human-readable representation of the event
+    String() string
+}
+

+ Event generic interface +

+

+ type + + Handle + +

+
type Handle interface {
+    // contains filtered or unexported methods
+}
+

+ Handle represents a generic client handle containing common parts for +both Producer and Consumer. +

+

+ type + + Message + +

+
type Message struct {
+    TopicPartition TopicPartition
+    Value          []byte
+    Key            []byte
+    Timestamp      time.Time
+    TimestampType  TimestampType
+    Opaque         interface{}
+}
+

+ Message represents a Kafka message +

+

+ func (*Message) + + String + +

+
func (m *Message) String() string
+

+ String returns a human readable representation of a Message. +Key and payload are not represented. +

+

+ type + + Metadata + +

+
type Metadata struct {
+    Brokers []BrokerMetadata
+    Topics  map[string]TopicMetadata
+
+    OriginatingBroker BrokerMetadata
+}
+

+ Metadata contains broker and topic metadata for all (matching) topics +

+

+ type + + Offset + +

+
type Offset int64
+

+ Offset type (int64) with support for canonical names +

+

+ func + + NewOffset + +

+
func NewOffset(offset interface{}) (Offset, error)
+

+ NewOffset creates a new Offset using the provided logical string, or an +absolute int64 offset value. +Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored" +

+

+ func + + OffsetTail + +

+
func OffsetTail(relativeOffset Offset) Offset
+

+ OffsetTail returns the logical offset relativeOffset from current end of partition +

+

+ func (Offset) + + Set + +

+
func (o Offset) Set(offset interface{}) error
+

+ Set offset value, see NewOffset() +

+

+ func (Offset) + + String + +

+
func (o Offset) String() string
+

+ type + + OffsetsCommitted + +

+
type OffsetsCommitted struct {
+    Error   error
+    Offsets []TopicPartition
+}
+

+ OffsetsCommitted reports committed offsets +

+

+ func (OffsetsCommitted) + + String + +

+
func (o OffsetsCommitted) String() string
+

+ type + + PartitionEOF + +

+
type PartitionEOF TopicPartition
+

+ PartitionEOF consumer reached end of partition +

+

+ func (PartitionEOF) + + String + +

+
func (p PartitionEOF) String() string
+

+ type + + PartitionMetadata + +

+
type PartitionMetadata struct {
+    ID       int32
+    Error    Error
+    Leader   int32
+    Replicas []int32
+    Isrs     []int32
+}
+

+ PartitionMetadata contains per-partition metadata +

+

+ type + + Producer + +

+
type Producer struct {
+    // contains filtered or unexported fields
+}
+

+ Producer implements a High-level Apache Kafka Producer instance +

+

+ func + + NewProducer + +

+
func NewProducer(conf *ConfigMap) (*Producer, error)
+

+ NewProducer creates a new high-level Producer instance. +

+

+ conf is a *ConfigMap with standard librdkafka configuration properties, see here: +

+

+ Supported special configuration properties: +

+
go.batch.producer (bool, false) - Enable batch producer (experimental for increased performance).
+                                  These batches do not relate to Kafka message batches in any way.
+go.delivery.reports (bool, true) - Forward per-message delivery reports to the
+                                   Events() channel.
+go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
+
+

+ func (*Producer) + + Close + +

+
func (p *Producer) Close()
+

+ Close a Producer instance. +The Producer object or its channels are no longer usable after this call. +

+

+ func (*Producer) + + Events + +

+
func (p *Producer) Events() chan Event
+

+ Events returns the Events channel (read) +

+

+ func (*Producer) + + Flush + +

+
func (p *Producer) Flush(timeoutMs int) int
+

+ Flush and wait for outstanding messages and requests to complete delivery. +Includes messages on ProduceChannel. +Runs until value reaches zero or on timeoutMs. +Returns the number of outstanding events still un-flushed. +

+

+ func (*Producer) + + GetMetadata + +

+
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+

+ GetMetadata queries broker for cluster and topic metadata. +If topic is non-nil only information about that topic is returned, else if +allTopics is false only information about locally used topics is returned, +else information about all topics is returned. +

+

+ func (*Producer) + + Len + +

+
func (p *Producer) Len() int
+

+ Len returns the number of messages and requests waiting to be transmitted to the broker +as well as delivery reports queued for the application. +Includes messages on ProduceChannel. +

+

+ func (*Producer) + + Produce + +

+
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error
+

+ Produce single message. +This is an asynchronous call that enqueues the message on the internal +transmit queue, thus returning immediately. +The delivery report will be sent on the provided deliveryChan if specified, +or on the Producer object's Events() channel if not. +Returns an error if message could not be enqueued. +

+

+ func (*Producer) + + ProduceChannel + +

+
func (p *Producer) ProduceChannel() chan *Message
+

+ ProduceChannel returns the produce *Message channel (write) +

+

+ func (*Producer) + + QueryWatermarkOffsets + +

+
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+

+ QueryWatermarkOffsets returns the broker's low and high offsets for the given topic +and partition. +

+

+ func (*Producer) + + String + +

+
func (p *Producer) String() string
+

+ String returns a human readable name for a Producer instance +

+

+ type + + RebalanceCb + +

+
type RebalanceCb func(*Consumer, Event) error
+

+ RebalanceCb provides a per-Subscribe*() rebalance event callback. +The passed Event will be either AssignedPartitions or RevokedPartitions +

+

+ type + + RevokedPartitions + +

+
type RevokedPartitions struct {
+    Partitions []TopicPartition
+}
+

+ RevokedPartitions consumer group rebalance event: revoked partition set +

+

+ func (RevokedPartitions) + + String + +

+
func (e RevokedPartitions) String() string
+

+ type + + TimestampType + +

+
type TimestampType int
+

+ TimestampType is a the Message timestamp type or source +

+

+ func (TimestampType) + + String + +

+
func (t TimestampType) String() string
+

+ type + + TopicMetadata + +

+
type TopicMetadata struct {
+    Topic      string
+    Partitions []PartitionMetadata
+    Error      Error
+}
+

+ TopicMetadata contains per-topic metadata +

+

+ type + + TopicPartition + +

+
type TopicPartition struct {
+    Topic     *string
+    Partition int32
+    Offset    Offset
+    Error     error
+}
+

+ TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset. +

+

+ func (TopicPartition) + + String + +

+
func (p TopicPartition) String() string
+ +
+ +
+ + + + + + + + + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go new file mode 100644 index 0000000000..c14c1f6324 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go @@ -0,0 +1,7 @@ +// +build !static +// +build !static_all + +package kafka + +// #cgo pkg-config: rdkafka +import "C" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static.go new file mode 100644 index 0000000000..3c8799c6ff --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static.go @@ -0,0 +1,7 @@ +// +build static +// +build !static_all + +package kafka + +// #cgo pkg-config: rdkafka-static +import "C" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static_all.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static_all.go new file mode 100644 index 0000000000..8afb8c949d --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static_all.go @@ -0,0 +1,8 @@ +// +build !static +// +build static_all + +package kafka + +// #cgo pkg-config: rdkafka-static +// #cgo LDFLAGS: -static +import "C" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go new file mode 100644 index 0000000000..df90bac207 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go @@ -0,0 +1,225 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "reflect" + "strings" + "unsafe" +) + +/* +#include +#include +*/ +import "C" + +// ConfigValue supports the following types: +// bool, int, string, any type with the standard String() interface +type ConfigValue interface{} + +// ConfigMap is a map contaning standard librdkafka configuration properties as documented in: +// https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md +// +// The special property "default.topic.config" (optional) is a ConfigMap containing default topic +// configuration properties. +type ConfigMap map[string]ConfigValue + +// SetKey sets configuration property key to value. +// For user convenience a key prefixed with {topic}. will be +// set on the "default.topic.config" sub-map. +func (m ConfigMap) SetKey(key string, value ConfigValue) error { + if strings.HasPrefix(key, "{topic}.") { + _, found := m["default.topic.config"] + if !found { + m["default.topic.config"] = ConfigMap{} + } + m["default.topic.config"].(ConfigMap)[strings.TrimPrefix(key, "{topic}.")] = value + } else { + m[key] = value + } + + return nil +} + +// Set implements flag.Set (command line argument parser) as a convenience +// for `-X key=value` config. +func (m ConfigMap) Set(kv string) error { + i := strings.Index(kv, "=") + if i == -1 { + return Error{ErrInvalidArg, "Expected key=value"} + } + + k := kv[:i] + v := kv[i+1:] + + return m.SetKey(k, v) +} + +func value2string(v ConfigValue) (ret string, errstr string) { + + switch x := v.(type) { + case bool: + if x { + ret = "true" + } else { + ret = "false" + } + case int: + ret = fmt.Sprintf("%d", x) + case string: + ret = x + case fmt.Stringer: + ret = x.String() + default: + return "", fmt.Sprintf("Invalid value type %T", v) + } + + return ret, "" +} + +// rdkAnyconf abstracts rd_kafka_conf_t and rd_kafka_topic_conf_t +// into a common interface. +type rdkAnyconf interface { + set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t +} + +func anyconfSet(anyconf rdkAnyconf, key string, value string) (err error) { + cKey := C.CString(key) + cVal := C.CString(value) + cErrstr := (*C.char)(C.malloc(C.size_t(128))) + defer C.free(unsafe.Pointer(cErrstr)) + + if anyconf.set(cKey, cVal, cErrstr, 128) != C.RD_KAFKA_CONF_OK { + C.free(unsafe.Pointer(cKey)) + C.free(unsafe.Pointer(cVal)) + return newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + return nil +} + +// we need these typedefs to workaround a crash in golint +// when parsing the set() methods below +type rdkConf C.rd_kafka_conf_t +type rdkTopicConf C.rd_kafka_topic_conf_t + +func (cConf *rdkConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t { + return C.rd_kafka_conf_set((*C.rd_kafka_conf_t)(cConf), cKey, cVal, cErrstr, C.size_t(errstrSize)) +} + +func (ctopicConf *rdkTopicConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t { + return C.rd_kafka_topic_conf_set((*C.rd_kafka_topic_conf_t)(ctopicConf), cKey, cVal, cErrstr, C.size_t(errstrSize)) +} + +func configConvertAnyconf(m ConfigMap, anyconf rdkAnyconf) (err error) { + + for k, v := range m { + switch v.(type) { + case ConfigMap: + /* Special sub-ConfigMap, only used for default.topic.config */ + + if k != "default.topic.config" { + return Error{ErrInvalidArg, fmt.Sprintf("Invalid type for key %s", k)} + } + + var cTopicConf = C.rd_kafka_topic_conf_new() + + err = configConvertAnyconf(v.(ConfigMap), + (*rdkTopicConf)(cTopicConf)) + if err != nil { + C.rd_kafka_topic_conf_destroy(cTopicConf) + return err + } + + C.rd_kafka_conf_set_default_topic_conf( + (*C.rd_kafka_conf_t)(anyconf.(*rdkConf)), + (*C.rd_kafka_topic_conf_t)((*rdkTopicConf)(cTopicConf))) + + default: + val, errstr := value2string(v) + if errstr != "" { + return Error{ErrInvalidArg, fmt.Sprintf("%s for key %s (expected string,bool,int,ConfigMap)", errstr, k)} + } + + err = anyconfSet(anyconf, k, val) + if err != nil { + return err + } + } + } + + return nil +} + +// convert ConfigMap to C rd_kafka_conf_t * +func (m ConfigMap) convert() (cConf *C.rd_kafka_conf_t, err error) { + cConf = C.rd_kafka_conf_new() + + err = configConvertAnyconf(m, (*rdkConf)(cConf)) + if err != nil { + C.rd_kafka_conf_destroy(cConf) + return nil, err + } + return cConf, nil +} + +// get finds key in the configmap and returns its value. +// If the key is not found defval is returned. +// If the key is found but the type is mismatched an error is returned. +func (m ConfigMap) get(key string, defval ConfigValue) (ConfigValue, error) { + if strings.HasPrefix(key, "{topic}.") { + defconfCv, found := m["default.topic.config"] + if !found { + return defval, nil + } + return defconfCv.(ConfigMap).get(strings.TrimPrefix(key, "{topic}."), defval) + } + + v, ok := m[key] + if !ok { + return defval, nil + } + + if defval != nil && reflect.TypeOf(defval) != reflect.TypeOf(v) { + return nil, Error{ErrInvalidArg, fmt.Sprintf("%s expects type %T, not %T", key, defval, v)} + } + + return v, nil +} + +// extract performs a get() and if found deletes the key. +func (m ConfigMap) extract(key string, defval ConfigValue) (ConfigValue, error) { + + v, err := m.get(key, defval) + if err != nil { + return nil, err + } + + delete(m, key) + + return v, nil +} + +// Get finds the given key in the ConfigMap and returns its value. +// If the key is not found `defval` is returned. +// If the key is found but the type does not match that of `defval` (unless nil) +// an ErrInvalidArg error is returned. +func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) { + return m.get(key, defval) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config_test.go new file mode 100644 index 0000000000..6b414fdf9f --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config_test.go @@ -0,0 +1,126 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "testing" +) + +// A custom type with Stringer interface to be used to test config map APIs +type HostPortType struct { + Host string + Port int +} + +// implements String() interface +func (hp HostPortType) String() string { + return fmt.Sprintf("%s:%d", hp.Host, hp.Port) +} + +//Test config map APIs +func TestConfigMapAPIs(t *testing.T) { + config := &ConfigMap{} + + // set a good key via SetKey() + err := config.SetKey("bootstrap.servers", testconf.Brokers) + if err != nil { + t.Errorf("Failed to set key via SetKey(). Error: %s\n", err) + } + + // test custom Stringer type + hostPort := HostPortType{Host: "localhost", Port: 9092} + err = config.SetKey("bootstrap.servers", hostPort) + if err != nil { + t.Errorf("Failed to set custom Stringer type via SetKey(). Error: %s\n", err) + } + + // test boolean type + err = config.SetKey("{topic}.produce.offset.report", true) + if err != nil { + t.Errorf("Failed to set key via SetKey(). Error: %s\n", err) + } + + // test offset literal string + err = config.SetKey("{topic}.auto.offset.reset", "earliest") + if err != nil { + t.Errorf("Failed to set key via SetKey(). Error: %s\n", err) + } + + //test offset constant + err = config.SetKey("{topic}.auto.offset.reset", OffsetBeginning) + if err != nil { + t.Errorf("Failed to set key via SetKey(). Error: %s\n", err) + } + + //test integer offset + err = config.SetKey("{topic}.message.timeout.ms", 10) + if err != nil { + t.Errorf("Failed to set integer value via SetKey(). Error: %s\n", err) + } + + // set a good key-value pair via Set() + err = config.Set("group.id=test.id") + if err != nil { + t.Errorf("Failed to set key-value pair via Set(). Error: %s\n", err) + } + + // negative test cases + // set a bad key-value pair via Set() + err = config.Set("group.id:test.id2") + if err == nil { + t.Errorf("Expected failure when setting invalid key-value pair via Set()\n") + } + + // get string value + v, err := config.Get("group.id", nil) + if err != nil { + t.Errorf("Expected Get(group.id) to succeed: %s\n", err) + } + if v == nil { + t.Errorf("Expected Get(group.id) to return non-nil value\n") + } + if v.(string) != "test.id" { + t.Errorf("group.id mismatch: %s\n", v) + } + + // get string value but request int + dummyInt := 12 + _, err = config.Get("group.id", dummyInt) + if err == nil { + t.Errorf("Expected Get(group.id) to fail\n") + } + + // get integer value + v, err = config.Get("{topic}.message.timeout.ms", dummyInt) + if err != nil { + t.Errorf("Expected Get(message.timeout.ms) to succeed: %s\n", err) + } + if v == nil { + t.Errorf("Expected Get(message.timeout.ms) to return non-nil value\n") + } + if v.(int) != 10 { + t.Errorf("message.timeout.ms mismatch: %d\n", v.(int)) + } + + // get unknown value + v, err = config.Get("dummy.value.not.found", nil) + if v != nil { + t.Errorf("Expected nil for dummy value, got %v\n", v) + } + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go new file mode 100644 index 0000000000..0ac7e2a1a8 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go @@ -0,0 +1,576 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "math" + "time" + "unsafe" +) + +/* +#include +#include + + +static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { + return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; +} +*/ +import "C" + +// RebalanceCb provides a per-Subscribe*() rebalance event callback. +// The passed Event will be either AssignedPartitions or RevokedPartitions +type RebalanceCb func(*Consumer, Event) error + +// Consumer implements a High-level Apache Kafka Consumer instance +type Consumer struct { + events chan Event + handle handle + eventsChanEnable bool + readerTermChan chan bool + rebalanceCb RebalanceCb + appReassigned bool + appRebalanceEnable bool // config setting +} + +// Strings returns a human readable name for a Consumer instance +func (c *Consumer) String() string { + return c.handle.String() +} + +// getHandle implements the Handle interface +func (c *Consumer) gethandle() *handle { + return &c.handle +} + +// Subscribe to a single topic +// This replaces the current subscription +func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error { + return c.SubscribeTopics([]string{topic}, rebalanceCb) +} + +// SubscribeTopics subscribes to the provided list of topics. +// This replaces the current subscription. +func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) { + ctopics := C.rd_kafka_topic_partition_list_new(C.int(len(topics))) + defer C.rd_kafka_topic_partition_list_destroy(ctopics) + + for _, topic := range topics { + ctopic := C.CString(topic) + defer C.free(unsafe.Pointer(ctopic)) + C.rd_kafka_topic_partition_list_add(ctopics, ctopic, C.RD_KAFKA_PARTITION_UA) + } + + e := C.rd_kafka_subscribe(c.handle.rk, ctopics) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(e) + } + + c.rebalanceCb = rebalanceCb + c.handle.currAppRebalanceEnable = c.rebalanceCb != nil || c.appRebalanceEnable + + return nil +} + +// Unsubscribe from the current subscription, if any. +func (c *Consumer) Unsubscribe() (err error) { + C.rd_kafka_unsubscribe(c.handle.rk) + return nil +} + +// Assign an atomic set of partitions to consume. +// This replaces the current assignment. +func (c *Consumer) Assign(partitions []TopicPartition) (err error) { + c.appReassigned = true + + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + + e := C.rd_kafka_assign(c.handle.rk, cparts) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(e) + } + + return nil +} + +// Unassign the current set of partitions to consume. +func (c *Consumer) Unassign() (err error) { + c.appReassigned = true + + e := C.rd_kafka_assign(c.handle.rk, nil) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(e) + } + + return nil +} + +// commit offsets for specified offsets. +// If offsets is nil the currently assigned partitions' offsets are committed. +// This is a blocking call, caller will need to wrap in go-routine to +// get async or throw-away behaviour. +func (c *Consumer) commit(offsets []TopicPartition) (committedOffsets []TopicPartition, err error) { + var rkqu *C.rd_kafka_queue_t + + rkqu = C.rd_kafka_queue_new(c.handle.rk) + defer C.rd_kafka_queue_destroy(rkqu) + + var coffsets *C.rd_kafka_topic_partition_list_t + if offsets != nil { + coffsets = newCPartsFromTopicPartitions(offsets) + defer C.rd_kafka_topic_partition_list_destroy(coffsets) + } + + cErr := C.rd_kafka_commit_queue(c.handle.rk, coffsets, rkqu, nil, nil) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cErr) + } + + rkev := C.rd_kafka_queue_poll(rkqu, C.int(-1)) + if rkev == nil { + // shouldn't happen + return nil, newError(C.RD_KAFKA_RESP_ERR__DESTROY) + } + defer C.rd_kafka_event_destroy(rkev) + + if C.rd_kafka_event_type(rkev) != C.RD_KAFKA_EVENT_OFFSET_COMMIT { + panic(fmt.Sprintf("Expected OFFSET_COMMIT, got %s", + C.GoString(C.rd_kafka_event_name(rkev)))) + } + + cErr = C.rd_kafka_event_error(rkev) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) + } + + cRetoffsets := C.rd_kafka_event_topic_partition_list(rkev) + if cRetoffsets == nil { + // no offsets, no error + return nil, nil + } + committedOffsets = newTopicPartitionsFromCparts(cRetoffsets) + + return committedOffsets, nil +} + +// Commit offsets for currently assigned partitions +// This is a blocking call. +// Returns the committed offsets on success. +func (c *Consumer) Commit() ([]TopicPartition, error) { + return c.commit(nil) +} + +// CommitMessage commits offset based on the provided message. +// This is a blocking call. +// Returns the committed offsets on success. +func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) { + if m.TopicPartition.Error != nil { + return nil, Error{ErrInvalidArg, "Can't commit errored message"} + } + offsets := []TopicPartition{m.TopicPartition} + offsets[0].Offset++ + return c.commit(offsets) +} + +// CommitOffsets commits the provided list of offsets +// This is a blocking call. +// Returns the committed offsets on success. +func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) { + return c.commit(offsets) +} + +// StoreOffsets stores the provided list of offsets that will be committed +// to the offset store according to `auto.commit.interval.ms` or manual +// offset-less Commit(). +// +// Returns the stored offsets on success. If at least one offset couldn't be stored, +// an error and a list of offsets is returned. Each offset can be checked for +// specific errors via its `.Error` member. +func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) { + coffsets := newCPartsFromTopicPartitions(offsets) + defer C.rd_kafka_topic_partition_list_destroy(coffsets) + + cErr := C.rd_kafka_offsets_store(c.handle.rk, coffsets) + + // coffsets might be annotated with an error + storedOffsets = newTopicPartitionsFromCparts(coffsets) + + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return storedOffsets, newError(cErr) + } + + return storedOffsets, nil +} + +// Seek seeks the given topic partitions using the offset from the TopicPartition. +// +// If timeoutMs is not 0 the call will wait this long for the +// seek to be performed. If the timeout is reached the internal state +// will be unknown and this function returns ErrTimedOut. +// If timeoutMs is 0 it will initiate the seek but return +// immediately without any error reporting (e.g., async). +// +// Seek() may only be used for partitions already being consumed +// (through Assign() or implicitly through a self-rebalanced Subscribe()). +// To set the starting offset it is preferred to use Assign() and provide +// a starting offset for each partition. +// +// Returns an error on failure or nil otherwise. +func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error { + rkt := c.handle.getRkt(*partition.Topic) + cErr := C.rd_kafka_seek(rkt, + C.int32_t(partition.Partition), + C.int64_t(partition.Offset), + C.int(timeoutMs)) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cErr) + } + return nil +} + +// Poll the consumer for messages or events. +// +// Will block for at most timeoutMs milliseconds +// +// The following callbacks may be triggered: +// Subscribe()'s rebalanceCb +// +// Returns nil on timeout, else an Event +func (c *Consumer) Poll(timeoutMs int) (event Event) { + ev, _ := c.handle.eventPoll(nil, timeoutMs, 1, nil) + return ev +} + +// Events returns the Events channel (if enabled) +func (c *Consumer) Events() chan Event { + return c.events +} + +// ReadMessage polls the consumer for a message. +// +// This is a conveniance API that wraps Poll() and only returns +// messages or errors. All other event types are discarded. +// +// The call will block for at most `timeout` waiting for +// a new message or error. `timeout` may be set to -1 for +// indefinite wait. +// +// Timeout is returned as (nil, err) where err is `kafka.(Error).Code == Kafka.ErrTimedOut`. +// +// Messages are returned as (msg, nil), +// while general errors are returned as (nil, err), +// and partition-specific errors are returned as (msg, err) where +// msg.TopicPartition provides partition-specific information (such as topic, partition and offset). +// +// All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded. +// +func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) { + + var absTimeout time.Time + var timeoutMs int + + if timeout > 0 { + absTimeout = time.Now().Add(timeout) + timeoutMs = (int)(timeout.Seconds() * 1000.0) + } else { + timeoutMs = (int)(timeout) + } + + for { + ev := c.Poll(timeoutMs) + + switch e := ev.(type) { + case *Message: + if e.TopicPartition.Error != nil { + return e, e.TopicPartition.Error + } + return e, nil + case Error: + return nil, e + default: + // Ignore other event types + } + + if timeout > 0 { + // Calculate remaining time + timeoutMs = int(math.Max(0.0, absTimeout.Sub(time.Now()).Seconds()*1000.0)) + } + + if timeoutMs == 0 && ev == nil { + return nil, newError(C.RD_KAFKA_RESP_ERR__TIMED_OUT) + } + + } + +} + +// Close Consumer instance. +// The object is no longer usable after this call. +func (c *Consumer) Close() (err error) { + + if c.eventsChanEnable { + // Wait for consumerReader() to terminate (by closing readerTermChan) + close(c.readerTermChan) + c.handle.waitTerminated(1) + close(c.events) + } + + C.rd_kafka_queue_destroy(c.handle.rkq) + c.handle.rkq = nil + + e := C.rd_kafka_consumer_close(c.handle.rk) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(e) + } + + c.handle.cleanup() + + C.rd_kafka_destroy(c.handle.rk) + + return nil +} + +// NewConsumer creates a new high-level Consumer instance. +// +// Supported special configuration properties: +// go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel. +// If set to true the app must handle the AssignedPartitions and +// RevokedPartitions events and call Assign() and Unassign() +// respectively. +// go.events.channel.enable (bool, false) - Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled. (Experimental) +// go.events.channel.size (int, 1000) - Events() channel size +// +// WARNING: Due to the buffering nature of channels (and queues in general) the +// use of the events channel risks receiving outdated events and +// messages. Minimizing go.events.channel.size reduces the risk +// and number of outdated events and messages but does not eliminate +// the factor completely. With a channel size of 1 at most one +// event or message may be outdated. +func NewConsumer(conf *ConfigMap) (*Consumer, error) { + + err := versionCheck() + if err != nil { + return nil, err + } + + groupid, _ := conf.get("group.id", nil) + if groupid == nil { + // without a group.id the underlying cgrp subsystem in librdkafka wont get started + // and without it there is no way to consume assigned partitions. + // So for now require the group.id, this might change in the future. + return nil, newErrorFromString(ErrInvalidArg, "Required property group.id not set") + } + + c := &Consumer{} + + v, err := conf.extract("go.application.rebalance.enable", false) + if err != nil { + return nil, err + } + c.appRebalanceEnable = v.(bool) + + v, err = conf.extract("go.events.channel.enable", false) + if err != nil { + return nil, err + } + c.eventsChanEnable = v.(bool) + + v, err = conf.extract("go.events.channel.size", 1000) + if err != nil { + return nil, err + } + eventsChanSize := v.(int) + + cConf, err := conf.convert() + if err != nil { + return nil, err + } + cErrstr := (*C.char)(C.malloc(C.size_t(256))) + defer C.free(unsafe.Pointer(cErrstr)) + + C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_REBALANCE|C.RD_KAFKA_EVENT_OFFSET_COMMIT|C.RD_KAFKA_EVENT_STATS) + + c.handle.rk = C.rd_kafka_new(C.RD_KAFKA_CONSUMER, cConf, cErrstr, 256) + if c.handle.rk == nil { + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + C.rd_kafka_poll_set_consumer(c.handle.rk) + + c.handle.c = c + c.handle.setup() + c.handle.rkq = C.rd_kafka_queue_get_consumer(c.handle.rk) + if c.handle.rkq == nil { + // no cgrp (no group.id configured), revert to main queue. + c.handle.rkq = C.rd_kafka_queue_get_main(c.handle.rk) + } + + if c.eventsChanEnable { + c.events = make(chan Event, eventsChanSize) + c.readerTermChan = make(chan bool) + + /* Start rdkafka consumer queue reader -> events writer goroutine */ + go consumerReader(c, c.readerTermChan) + } + + return c, nil +} + +// rebalance calls the application's rebalance callback, if any. +// Returns true if the underlying assignment was updated, else false. +func (c *Consumer) rebalance(ev Event) bool { + c.appReassigned = false + + if c.rebalanceCb != nil { + c.rebalanceCb(c, ev) + } + + return c.appReassigned +} + +// consumerReader reads messages and events from the librdkafka consumer queue +// and posts them on the consumer channel. +// Runs until termChan closes +func consumerReader(c *Consumer, termChan chan bool) { + +out: + for true { + select { + case _ = <-termChan: + break out + default: + _, term := c.handle.eventPoll(c.events, 100, 1000, termChan) + if term { + break out + } + + } + } + + c.handle.terminatedChan <- "consumerReader" + return + +} + +// GetMetadata queries broker for cluster and topic metadata. +// If topic is non-nil only information about that topic is returned, else if +// allTopics is false only information about locally used topics is returned, +// else information about all topics is returned. +func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + return getMetadata(c, topic, allTopics, timeoutMs) +} + +// QueryWatermarkOffsets returns the broker's low and high offsets for the given topic +// and partition. +func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { + return queryWatermarkOffsets(c, topic, partition, timeoutMs) +} + +// OffsetsForTimes looks up offsets by timestamp for the given partitions. +// +// The returned offset for each partition is the earliest offset whose +// timestamp is greater than or equal to the given timestamp in the +// corresponding partition. +// +// The timestamps to query are represented as `.Offset` in the `times` +// argument and the looked up offsets are represented as `.Offset` in the returned +// `offsets` list. +// +// The function will block for at most timeoutMs milliseconds. +// +// Duplicate Topic+Partitions are not supported. +// Per-partition errors may be returned in the `.Error` field. +func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + return offsetsForTimes(c, times, timeoutMs) +} + +// Subscription returns the current subscription as set by Subscribe() +func (c *Consumer) Subscription() (topics []string, err error) { + var cTopics *C.rd_kafka_topic_partition_list_t + + cErr := C.rd_kafka_subscription(c.handle.rk, &cTopics) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cErr) + } + defer C.rd_kafka_topic_partition_list_destroy(cTopics) + + topicCnt := int(cTopics.cnt) + topics = make([]string, topicCnt) + for i := 0; i < topicCnt; i++ { + crktpar := C._c_rdkafka_topic_partition_list_entry(cTopics, + C.int(i)) + topics[i] = C.GoString(crktpar.topic) + } + + return topics, nil +} + +// Assignment returns the current partition assignments +func (c *Consumer) Assignment() (partitions []TopicPartition, err error) { + var cParts *C.rd_kafka_topic_partition_list_t + + cErr := C.rd_kafka_assignment(c.handle.rk, &cParts) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cErr) + } + defer C.rd_kafka_topic_partition_list_destroy(cParts) + + partitions = newTopicPartitionsFromCparts(cParts) + + return partitions, nil +} + +// Committed retrieves committed offsets for the given set of partitions +func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_committed(c.handle.rk, cparts, C.int(timeoutMs)) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cerr) + } + + return newTopicPartitionsFromCparts(cparts), nil +} + +// Pause consumption for the provided list of partitions +// +// Note that messages already enqueued on the consumer's Event channel +// (if `go.events.channel.enable` has been set) will NOT be purged by +// this call, set `go.events.channel.size` accordingly. +func (c *Consumer) Pause(partitions []TopicPartition) (err error) { + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_pause_partitions(c.handle.rk, cparts) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cerr) + } + return nil +} + +// Resume consumption for the provided list of partitions +func (c *Consumer) Resume(partitions []TopicPartition) (err error) { + cparts := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_resume_partitions(c.handle.rk, cparts) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cerr) + } + return nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer_performance_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer_performance_test.go new file mode 100644 index 0000000000..3fd6263847 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer_performance_test.go @@ -0,0 +1,177 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "math/rand" + "testing" + "time" +) + +// consumerPerfTest measures the consumer performance using a pre-primed (produced to) topic +func consumerPerfTest(b *testing.B, testname string, msgcnt int, useChannel bool, consumeFunc func(c *Consumer, rd *ratedisp, expCnt int), rebalanceCb func(c *Consumer, event Event) error) { + + r := testconsumerInit(b) + if r == -1 { + b.Skipf("Missing testconf.json") + return + } + if msgcnt == 0 { + msgcnt = r + } + + rand.Seed(int64(time.Now().Unix())) + + conf := ConfigMap{"bootstrap.servers": testconf.Brokers, + "go.events.channel.enable": useChannel, + "group.id": fmt.Sprintf("go_cperf_%d", rand.Intn(1000000)), + "session.timeout.ms": 6000, + "api.version.request": "true", + "enable.auto.commit": false, + "debug": ",", + "default.topic.config": ConfigMap{"auto.offset.reset": "earliest"}} + + conf.updateFromTestconf() + + c, err := NewConsumer(&conf) + + if err != nil { + panic(err) + } + + expCnt := msgcnt + b.Logf("%s, expecting %d messages", testname, expCnt) + + c.Subscribe(testconf.Topic, rebalanceCb) + + rd := ratedispStart(b, testname, 10) + + consumeFunc(c, &rd, expCnt) + + rd.print("TOTAL: ") + + c.Close() + + b.SetBytes(rd.size) + +} + +// handleEvent returns false if processing should stop, else true +func handleEvent(c *Consumer, rd *ratedisp, expCnt int, ev Event) bool { + switch e := ev.(type) { + case *Message: + if e.TopicPartition.Error != nil { + rd.b.Logf("Error: %v", e.TopicPartition) + } + + if rd.cnt == 0 { + // start measuring time from first message to avoid + // including rebalancing time. + rd.b.ResetTimer() + rd.reset() + } + + rd.tick(1, int64(len(e.Value))) + + if rd.cnt >= int64(expCnt) { + return false + } + case PartitionEOF: + break // silence + default: + rd.b.Fatalf("Consumer error: %v", e) + } + return true + +} + +// consume messages through the Events channel +func eventChannelConsumer(c *Consumer, rd *ratedisp, expCnt int) { + for ev := range c.Events() { + if !handleEvent(c, rd, expCnt, ev) { + break + } + } +} + +// consume messages through the Poll() interface +func eventPollConsumer(c *Consumer, rd *ratedisp, expCnt int) { + for true { + ev := c.Poll(100) + if ev == nil { + // timeout + continue + } + if !handleEvent(c, rd, expCnt, ev) { + break + } + } +} + +var testconsumerInited = false + +// Produce messages to consume (if needed) +// Query watermarks of topic to see if we need to prime it at all. +// NOTE: This wont work for compacted topics.. +// returns the number of messages to consume +func testconsumerInit(b *testing.B) int { + if testconsumerInited { + return testconf.PerfMsgCount + } + + if !testconfRead() { + return -1 + } + + msgcnt := testconf.PerfMsgCount + + currcnt, err := getMessageCountInTopic(testconf.Topic) + if err == nil { + b.Logf("Topic %s has %d messages, need %d", testconf.Topic, currcnt, msgcnt) + } + if currcnt < msgcnt { + producerPerfTest(b, "Priming producer", msgcnt, false, false, + true, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) + } + + testconsumerInited = true + b.ResetTimer() + return msgcnt +} + +func BenchmarkConsumerChannelPerformance(b *testing.B) { + consumerPerfTest(b, "Channel Consumer", + 0, true, eventChannelConsumer, nil) +} + +func BenchmarkConsumerPollPerformance(b *testing.B) { + consumerPerfTest(b, "Poll Consumer", + 0, false, eventPollConsumer, nil) +} + +func BenchmarkConsumerPollRebalancePerformance(b *testing.B) { + consumerPerfTest(b, "Poll Consumer (rebalance callback)", + 0, false, eventPollConsumer, + func(c *Consumer, event Event) error { + b.Logf("Rebalanced: %s", event) + return nil + }) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer_test.go new file mode 100644 index 0000000000..d3e71ca311 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer_test.go @@ -0,0 +1,247 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "os" + "reflect" + "sort" + "testing" + "time" +) + +// TestConsumerAPIs dry-tests most Consumer APIs, no broker is needed. +func TestConsumerAPIs(t *testing.T) { + + c, err := NewConsumer(&ConfigMap{}) + if err == nil { + t.Fatalf("Expected NewConsumer() to fail without group.id") + } + + c, err = NewConsumer(&ConfigMap{ + "group.id": "gotest", + "socket.timeout.ms": 10, + "session.timeout.ms": 10, + "enable.auto.offset.store": false, // permit StoreOffsets() + }) + if err != nil { + t.Fatalf("%s", err) + } + + t.Logf("Consumer %s", c) + + err = c.Subscribe("gotest", nil) + if err != nil { + t.Errorf("Subscribe failed: %s", err) + } + + err = c.SubscribeTopics([]string{"gotest1", "gotest2"}, + func(my_c *Consumer, ev Event) error { + t.Logf("%s", ev) + return nil + }) + if err != nil { + t.Errorf("SubscribeTopics failed: %s", err) + } + + _, err = c.Commit() + if err != nil && err.(Error).Code() != ErrNoOffset { + t.Errorf("Commit() failed: %s", err) + } + + err = c.Unsubscribe() + if err != nil { + t.Errorf("Unsubscribe failed: %s", err) + } + + topic := "gotest" + stored, err := c.StoreOffsets([]TopicPartition{{Topic: &topic, Partition: 0, Offset: 1}}) + if err != nil && err.(Error).Code() != ErrUnknownPartition { + t.Errorf("StoreOffsets() failed: %s", err) + toppar := stored[0] + if toppar.Error != nil && toppar.Error.(Error).Code() == ErrUnknownPartition { + t.Errorf("StoreOffsets() TopicPartition error: %s", toppar.Error) + } + } + var empty []TopicPartition + stored, err = c.StoreOffsets(empty) + if err != nil { + t.Errorf("StoreOffsets(empty) failed: %s", err) + } + + topic1 := "gotest1" + topic2 := "gotest2" + err = c.Assign([]TopicPartition{{Topic: &topic1, Partition: 2}, + {Topic: &topic2, Partition: 1}}) + if err != nil { + t.Errorf("Assign failed: %s", err) + } + + err = c.Seek(TopicPartition{Topic: &topic1, Partition: 2, Offset: -1}, 1000) + if err != nil { + t.Errorf("Seek failed: %s", err) + } + + // Pause & Resume + err = c.Pause([]TopicPartition{{Topic: &topic1, Partition: 2}, + {Topic: &topic2, Partition: 1}}) + if err != nil { + t.Errorf("Pause failed: %s", err) + } + err = c.Resume([]TopicPartition{{Topic: &topic1, Partition: 2}, + {Topic: &topic2, Partition: 1}}) + if err != nil { + t.Errorf("Resume failed: %s", err) + } + + err = c.Unassign() + if err != nil { + t.Errorf("Unassign failed: %s", err) + } + + topic = "mytopic" + // OffsetsForTimes + offsets, err := c.OffsetsForTimes([]TopicPartition{{Topic: &topic, Offset: 12345}}, 100) + t.Logf("OffsetsForTimes() returned Offsets %s and error %s\n", offsets, err) + if err == nil { + t.Errorf("OffsetsForTimes() should have failed\n") + } + if offsets != nil { + t.Errorf("OffsetsForTimes() failed but returned non-nil Offsets: %s\n", offsets) + } + + // Committed + offsets, err = c.Committed([]TopicPartition{{Topic: &topic, Partition: 5}}, 10) + t.Logf("Committed() returned Offsets %s and error %s\n", offsets, err) + if err == nil { + t.Errorf("Committed() should have failed\n") + } + if offsets != nil { + t.Errorf("Committed() failed but returned non-nil Offsets: %s\n", offsets) + } + + err = c.Close() + if err != nil { + t.Errorf("Close failed: %s", err) + } +} + +func TestConsumerSubscription(t *testing.T) { + c, err := NewConsumer(&ConfigMap{"group.id": "gotest"}) + if err != nil { + t.Fatalf("%s", err) + } + + topics := []string{"gotest1", "gotest2", "gotest3"} + sort.Strings(topics) + + err = c.SubscribeTopics(topics, nil) + if err != nil { + t.Fatalf("SubscribeTopics failed: %s", err) + } + + subscription, err := c.Subscription() + if err != nil { + t.Fatalf("Subscription() failed: %s", err) + } + + sort.Strings(subscription) + + t.Logf("Compare Subscription %v to original list of topics %v\n", + subscription, topics) + + r := reflect.DeepEqual(topics, subscription) + if r != true { + t.Fatalf("Subscription() %v does not match original topics %v", + subscription, topics) + } + c.Close() +} + +func TestConsumerAssignment(t *testing.T) { + c, err := NewConsumer(&ConfigMap{"group.id": "gotest"}) + if err != nil { + t.Fatalf("%s", err) + } + + topic0 := "topic0" + topic1 := "topic1" + partitions := TopicPartitions{ + {Topic: &topic1, Partition: 1}, + {Topic: &topic1, Partition: 3}, + {Topic: &topic0, Partition: 2}} + sort.Sort(partitions) + + err = c.Assign(partitions) + if err != nil { + t.Fatalf("Assign failed: %s", err) + } + + assignment, err := c.Assignment() + if err != nil { + t.Fatalf("Assignment() failed: %s", err) + } + + sort.Sort(TopicPartitions(assignment)) + + t.Logf("Compare Assignment %v to original list of partitions %v\n", + assignment, partitions) + + // Use Logf instead of Errorf for timeout-checking errors on CI builds + // since CI environments are unreliable timing-wise. + tmoutFunc := t.Errorf + _, onCi := os.LookupEnv("CI") + if onCi { + tmoutFunc = t.Logf + } + + // Test ReadMessage() + for _, tmout := range []time.Duration{0, 200 * time.Millisecond} { + start := time.Now() + m, err := c.ReadMessage(tmout) + duration := time.Since(start) + + t.Logf("ReadMessage(%v) ret %v and %v in %v", tmout, m, err, duration) + if m != nil || err == nil { + t.Errorf("Expected ReadMessage to fail: %v, %v", m, err) + } + if err.(Error).Code() != ErrTimedOut { + t.Errorf("Expected ReadMessage to fail with ErrTimedOut, not %v", err) + } + + if tmout == 0 { + if duration.Seconds() > 0.1 { + tmoutFunc("Expected ReadMessage(%v) to fail after max 100ms, not %v", tmout, duration) + } + } else if tmout > 0 { + if duration.Seconds() < tmout.Seconds()*0.75 || duration.Seconds() > tmout.Seconds()*1.25 { + tmoutFunc("Expected ReadMessage() to fail after %v -+25%%, not %v", tmout, duration) + } + } + } + + // reflect.DeepEqual() can't be used since TopicPartition.Topic + // is a pointer to a string rather than a string and the pointer + // will differ between partitions and assignment. + // Instead do a simple stringification + string compare. + if fmt.Sprintf("%v", assignment) != fmt.Sprintf("%v", partitions) { + t.Fatalf("Assignment() %v does not match original partitions %v", + assignment, partitions) + } + c.Close() +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go new file mode 100644 index 0000000000..d5a1f74003 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go @@ -0,0 +1,69 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Automatically generate error codes from librdkafka +// See README for instructions +//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go + +/* +#include +*/ +import "C" + +// Error provides a Kafka-specific error container +type Error struct { + code ErrorCode + str string +} + +func newError(code C.rd_kafka_resp_err_t) (err Error) { + return Error{ErrorCode(code), ""} +} + +func newErrorFromString(code ErrorCode, str string) (err Error) { + return Error{code, str} +} + +func newErrorFromCString(code C.rd_kafka_resp_err_t, cstr *C.char) (err Error) { + var str string + if cstr != nil { + str = C.GoString(cstr) + } else { + str = "" + } + return Error{ErrorCode(code), str} +} + +// Error returns a human readable representation of an Error +// Same as Error.String() +func (e Error) Error() string { + return e.String() +} + +// String returns a human readable representation of an Error +func (e Error) String() string { + if len(e.str) > 0 { + return e.str + } + return e.code.String() +} + +// Code returns the ErrorCode of an Error +func (e Error) Code() ErrorCode { + return e.code +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go new file mode 100644 index 0000000000..8a89edb1b7 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go @@ -0,0 +1,330 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "os" + "unsafe" +) + +/* +#include +#include +#include "glue_rdkafka.h" + + +#ifdef RD_KAFKA_V_HEADERS +void chdrs_to_tmphdrs (rd_kafka_headers_t *chdrs, tmphdr_t *tmphdrs) { + size_t i = 0; + const char *name; + const void *val; + size_t size; + + while (!rd_kafka_header_get_all(chdrs, i, + &tmphdrs[i].key, + &tmphdrs[i].val, + (size_t *)&tmphdrs[i].size)) + i++; +} +#endif + +rd_kafka_event_t *_rk_queue_poll (rd_kafka_queue_t *rkq, int timeoutMs, + rd_kafka_event_type_t *evtype, + fetched_c_msg_t *fcMsg, + rd_kafka_event_t *prev_rkev) { + rd_kafka_event_t *rkev; + + if (prev_rkev) + rd_kafka_event_destroy(prev_rkev); + + rkev = rd_kafka_queue_poll(rkq, timeoutMs); + *evtype = rd_kafka_event_type(rkev); + + if (*evtype == RD_KAFKA_EVENT_FETCH) { +#ifdef RD_KAFKA_V_HEADERS + rd_kafka_headers_t *hdrs; +#endif + + fcMsg->msg = (rd_kafka_message_t *)rd_kafka_event_message_next(rkev); + fcMsg->ts = rd_kafka_message_timestamp(fcMsg->msg, &fcMsg->tstype); + +#ifdef RD_KAFKA_V_HEADERS + if (!rd_kafka_message_headers(fcMsg->msg, &hdrs)) { + fcMsg->tmphdrsCnt = rd_kafka_header_cnt(hdrs); + fcMsg->tmphdrs = malloc(sizeof(*fcMsg->tmphdrs) * fcMsg->tmphdrsCnt); + chdrs_to_tmphdrs(hdrs, fcMsg->tmphdrs); + } else { +#else + if (1) { +#endif + fcMsg->tmphdrs = NULL; + fcMsg->tmphdrsCnt = 0; + } + } + return rkev; +} +*/ +import "C" + +// Event generic interface +type Event interface { + // String returns a human-readable representation of the event + String() string +} + +// Specific event types + +// Stats statistics event +type Stats struct { + statsJSON string +} + +func (e Stats) String() string { + return e.statsJSON +} + +// AssignedPartitions consumer group rebalance event: assigned partition set +type AssignedPartitions struct { + Partitions []TopicPartition +} + +func (e AssignedPartitions) String() string { + return fmt.Sprintf("AssignedPartitions: %v", e.Partitions) +} + +// RevokedPartitions consumer group rebalance event: revoked partition set +type RevokedPartitions struct { + Partitions []TopicPartition +} + +func (e RevokedPartitions) String() string { + return fmt.Sprintf("RevokedPartitions: %v", e.Partitions) +} + +// PartitionEOF consumer reached end of partition +type PartitionEOF TopicPartition + +func (p PartitionEOF) String() string { + return fmt.Sprintf("EOF at %s", TopicPartition(p)) +} + +// OffsetsCommitted reports committed offsets +type OffsetsCommitted struct { + Error error + Offsets []TopicPartition +} + +func (o OffsetsCommitted) String() string { + return fmt.Sprintf("OffsetsCommitted (%v, %v)", o.Error, o.Offsets) +} + +// eventPoll polls an event from the handler's C rd_kafka_queue_t, +// translates it into an Event type and then sends on `channel` if non-nil, else returns the Event. +// term_chan is an optional channel to monitor along with producing to channel +// to indicate that `channel` is being terminated. +// returns (event Event, terminate Bool) tuple, where Terminate indicates +// if termChan received a termination event. +func (h *handle) eventPoll(channel chan Event, timeoutMs int, maxEvents int, termChan chan bool) (Event, bool) { + + var prevRkev *C.rd_kafka_event_t + term := false + + var retval Event + + if channel == nil { + maxEvents = 1 + } +out: + for evcnt := 0; evcnt < maxEvents; evcnt++ { + var evtype C.rd_kafka_event_type_t + var fcMsg C.fetched_c_msg_t + rkev := C._rk_queue_poll(h.rkq, C.int(timeoutMs), &evtype, &fcMsg, prevRkev) + prevRkev = rkev + timeoutMs = 0 + + retval = nil + + switch evtype { + case C.RD_KAFKA_EVENT_FETCH: + // Consumer fetch event, new message. + // Extracted into temporary fcMsg for optimization + retval = h.newMessageFromFcMsg(&fcMsg) + + case C.RD_KAFKA_EVENT_REBALANCE: + // Consumer rebalance event + // If the app provided a RebalanceCb to Subscribe*() or + // has go.application.rebalance.enable=true we create an event + // and forward it to the application thru the RebalanceCb or the + // Events channel respectively. + // Since librdkafka requires the rebalance event to be "acked" by + // the application to synchronize state we keep track of if the + // application performed Assign() or Unassign(), but this only works for + // the non-channel case. For the channel case we assume the application + // calls Assign() / Unassign(). + // Failure to do so will "hang" the consumer, e.g., it wont start consuming + // and it wont close cleanly, so this error case should be visible + // immediately to the application developer. + appReassigned := false + if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS { + if h.currAppRebalanceEnable { + // Application must perform Assign() call + var ev AssignedPartitions + ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev)) + if channel != nil || h.c.rebalanceCb == nil { + retval = ev + appReassigned = true + } else { + appReassigned = h.c.rebalance(ev) + } + } + + if !appReassigned { + C.rd_kafka_assign(h.rk, C.rd_kafka_event_topic_partition_list(rkev)) + } + } else { + if h.currAppRebalanceEnable { + // Application must perform Unassign() call + var ev RevokedPartitions + ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev)) + if channel != nil || h.c.rebalanceCb == nil { + retval = ev + appReassigned = true + } else { + appReassigned = h.c.rebalance(ev) + } + } + + if !appReassigned { + C.rd_kafka_assign(h.rk, nil) + } + } + + case C.RD_KAFKA_EVENT_ERROR: + // Error event + cErr := C.rd_kafka_event_error(rkev) + switch cErr { + case C.RD_KAFKA_RESP_ERR__PARTITION_EOF: + crktpar := C.rd_kafka_event_topic_partition(rkev) + if crktpar == nil { + break + } + + defer C.rd_kafka_topic_partition_destroy(crktpar) + var peof PartitionEOF + setupTopicPartitionFromCrktpar((*TopicPartition)(&peof), crktpar) + + retval = peof + default: + retval = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) + } + + case C.RD_KAFKA_EVENT_STATS: + retval = &Stats{C.GoString(C.rd_kafka_event_stats(rkev))} + + case C.RD_KAFKA_EVENT_DR: + // Producer Delivery Report event + // Each such event contains delivery reports for all + // messages in the produced batch. + // Forward delivery reports to per-message's response channel + // or to the global Producer.Events channel, or none. + rkmessages := make([]*C.rd_kafka_message_t, int(C.rd_kafka_event_message_count(rkev))) + + cnt := int(C.rd_kafka_event_message_array(rkev, (**C.rd_kafka_message_t)(unsafe.Pointer(&rkmessages[0])), C.size_t(len(rkmessages)))) + + for _, rkmessage := range rkmessages[:cnt] { + msg := h.newMessageFromC(rkmessage) + var ch *chan Event + + if rkmessage._private != nil { + // Find cgoif by id + cg, found := h.cgoGet((int)((uintptr)(rkmessage._private))) + if found { + cdr := cg.(cgoDr) + + if cdr.deliveryChan != nil { + ch = &cdr.deliveryChan + } + msg.Opaque = cdr.opaque + } + } + + if ch == nil && h.fwdDr { + ch = &channel + } + + if ch != nil { + select { + case *ch <- msg: + case <-termChan: + break out + } + + } else { + retval = msg + break out + } + } + + case C.RD_KAFKA_EVENT_OFFSET_COMMIT: + // Offsets committed + cErr := C.rd_kafka_event_error(rkev) + coffsets := C.rd_kafka_event_topic_partition_list(rkev) + var offsets []TopicPartition + if coffsets != nil { + offsets = newTopicPartitionsFromCparts(coffsets) + } + + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + retval = OffsetsCommitted{newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)), offsets} + } else { + retval = OffsetsCommitted{nil, offsets} + } + + case C.RD_KAFKA_EVENT_NONE: + // poll timed out: no events available + break out + + default: + if rkev != nil { + fmt.Fprintf(os.Stderr, "Ignored event %s\n", + C.GoString(C.rd_kafka_event_name(rkev))) + } + + } + + if retval != nil { + if channel != nil { + select { + case channel <- retval: + case <-termChan: + retval = nil + term = true + break out + } + } else { + break out + } + } + } + + if prevRkev != nil { + C.rd_kafka_event_destroy(prevRkev) + } + + return retval, term +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event_test.go new file mode 100644 index 0000000000..57db700cc4 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event_test.go @@ -0,0 +1,43 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "testing" +) + +// TestEventAPIs dry-tests the public event related APIs, no broker is needed. +func TestEventAPIs(t *testing.T) { + assignedPartitions := AssignedPartitions{} + t.Logf("%s\n", assignedPartitions.String()) + + revokedPartitions := RevokedPartitions{} + t.Logf("%s\n", revokedPartitions.String()) + + topic := "test" + partition := PartitionEOF{Topic: &topic} + t.Logf("%s\n", partition.String()) + + partition = PartitionEOF{} + t.Logf("%s\n", partition.String()) + + committedOffsets := OffsetsCommitted{} + t.Logf("%s\n", committedOffsets.String()) + + stats := Stats{"{\"name\": \"Producer-1\"}"} + t.Logf("Stats: %s\n", stats.String()) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go new file mode 100644 index 0000000000..3d173a82d5 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go @@ -0,0 +1,223 @@ +package kafka +// Copyright 2016 Confluent Inc. +// AUTOMATICALLY GENERATED BY /Users/magnus/gocode/bin/go_rdkafka_generr ON 2018-01-30 09:13:09.376879939 +0100 CET m=+0.002362671 USING librdkafka 0.11.3-CI1-7-g3fe870-dirty + +/* +#include +*/ +import "C" + +// ErrorCode is the integer representation of local and broker error codes +type ErrorCode int + +// String returns a human readable representation of an error code +func (c ErrorCode) String() string { + return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) +} + +const ( + // ErrBadMsg Local: Bad message format + ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG) + // ErrBadCompression Local: Invalid compressed data + ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION) + // ErrDestroy Local: Broker handle destroyed + ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY) + // ErrFail Local: Communication failure with broker + ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL) + // ErrTransport Local: Broker transport failure + ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT) + // ErrCritSysResource Local: Critical system resource failure + ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE) + // ErrResolve Local: Host resolution failure + ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE) + // ErrMsgTimedOut Local: Message timed out + ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) + // ErrPartitionEOF Broker: No more messages + ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF) + // ErrUnknownPartition Local: Unknown partition + ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) + // ErrFs Local: File or filesystem error + ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS) + // ErrUnknownTopic Local: Unknown topic + ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + // ErrAllBrokersDown Local: All broker connections are down + ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) + // ErrInvalidArg Local: Invalid argument or configuration + ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG) + // ErrTimedOut Local: Timed out + ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT) + // ErrQueueFull Local: Queue full + ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL) + // ErrIsrInsuff Local: ISR count insufficient + ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF) + // ErrNodeUpdate Local: Broker node update + ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE) + // ErrSsl Local: SSL error + ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL) + // ErrWaitCoord Local: Waiting for coordinator + ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD) + // ErrUnknownGroup Local: Unknown group + ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP) + // ErrInProgress Local: Operation in progress + ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS) + // ErrPrevInProgress Local: Previous operation in progress + ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS) + // ErrExistingSubscription Local: Existing subscription + ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION) + // ErrAssignPartitions Local: Assign partitions + ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) + // ErrRevokePartitions Local: Revoke partitions + ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) + // ErrConflict Local: Conflicting use + ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT) + // ErrState Local: Erroneous state + ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE) + // ErrUnknownProtocol Local: Unknown protocol + ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL) + // ErrNotImplemented Local: Not implemented + ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) + // ErrAuthentication Local: Authentication failure + ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION) + // ErrNoOffset Local: No offset stored + ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET) + // ErrOutdated Local: Outdated + ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED) + // ErrTimedOutQueue Local: Timed out in queue + ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) + // ErrUnsupportedFeature Local: Required feature not supported by broker + ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) + // ErrWaitCache Local: Awaiting cache update + ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE) + // ErrIntr Local: Operation interrupted + ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR) + // ErrKeySerialization Local: Key serialization error + ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION) + // ErrValueSerialization Local: Value serialization error + ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION) + // ErrKeyDeserialization Local: Key deserialization error + ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION) + // ErrValueDeserialization Local: Value deserialization error + ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION) + // ErrPartial Local: Partial response + ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL) + // ErrReadOnly Local: Read-only object + ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY) + // ErrNoent Local: No such entry + ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT) + // ErrUnderflow Local: Read underflow + ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW) + // ErrUnknown Unknown broker error + ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN) + // ErrNoError Success + ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR) + // ErrOffsetOutOfRange Broker: Offset out of range + ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE) + // ErrInvalidMsg Broker: Invalid message + ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG) + // ErrUnknownTopicOrPart Broker: Unknown topic or partition + ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) + // ErrInvalidMsgSize Broker: Invalid message size + ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE) + // ErrLeaderNotAvailable Broker: Leader not available + ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) + // ErrNotLeaderForPartition Broker: Not leader for partition + ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION) + // ErrRequestTimedOut Broker: Request timed out + ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT) + // ErrBrokerNotAvailable Broker: Broker not available + ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE) + // ErrReplicaNotAvailable Broker: Replica not available + ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE) + // ErrMsgSizeTooLarge Broker: Message size too large + ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) + // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode + ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH) + // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large + ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE) + // ErrNetworkException Broker: Broker disconnected before response received + ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION) + // ErrGroupLoadInProgress Broker: Group coordinator load in progress + ErrGroupLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) + // ErrGroupCoordinatorNotAvailable Broker: Group coordinator not available + ErrGroupCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE) + // ErrNotCoordinatorForGroup Broker: Not coordinator for group + ErrNotCoordinatorForGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP) + // ErrTopicException Broker: Invalid topic + ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION) + // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size + ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE) + // ErrNotEnoughReplicas Broker: Not enough in-sync replicas + ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS) + // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas + ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND) + // ErrInvalidRequiredAcks Broker: Invalid required acks value + ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS) + // ErrIllegalGeneration Broker: Specified group generation id is not valid + ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) + // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol + ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL) + // ErrInvalidGroupID Broker: Invalid group.id + ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID) + // ErrUnknownMemberID Broker: Unknown member + ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) + // ErrInvalidSessionTimeout Broker: Invalid session timeout + ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT) + // ErrRebalanceInProgress Broker: Group rebalance in progress + ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS) + // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid + ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE) + // ErrTopicAuthorizationFailed Broker: Topic authorization failed + ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) + // ErrGroupAuthorizationFailed Broker: Group authorization failed + ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED) + // ErrClusterAuthorizationFailed Broker: Cluster authorization failed + ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED) + // ErrInvalidTimestamp Broker: Invalid timestamp + ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP) + // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism + ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM) + // ErrIllegalSaslState Broker: Request not valid in current SASL state + ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE) + // ErrUnsupportedVersion Broker: API version not supported + ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) + // ErrTopicAlreadyExists Broker: Topic already exists + ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS) + // ErrInvalidPartitions Broker: Invalid number of partitions + ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS) + // ErrInvalidReplicationFactor Broker: Invalid replication factor + ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR) + // ErrInvalidReplicaAssignment Broker: Invalid replica assignment + ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT) + // ErrInvalidConfig Broker: Configuration is invalid + ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG) + // ErrNotController Broker: Not controller for cluster + ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER) + // ErrInvalidRequest Broker: Invalid request + ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST) + // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request + ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT) + // ErrPolicyViolation Broker: Isolation policy volation + ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION) + // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number + ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER) + // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number + ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER) + // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch + ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) + // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state + ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE) + // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id + ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING) + // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms + ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT) + // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing + ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS) + // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer + ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED) + // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed + ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED) + // ErrSecurityDisabled Broker: Security features are disabled + ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED) + // ErrOperationNotAttempted Broker: Operation not attempted + ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED) +) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h new file mode 100644 index 0000000000..adcef9a0b0 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h @@ -0,0 +1,46 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + + +/** + * Glue between Go, Cgo and librdkafka + */ + + +/** + * Temporary C to Go header representation + */ +typedef struct tmphdr_s { + const char *key; + const void *val; // producer: malloc()ed by Go code if size > 0 + // consumer: owned by librdkafka + ssize_t size; +} tmphdr_t; + + + +/** + * Represents a fetched C message, with all extra fields extracted + * to struct fields. + */ +typedef struct fetched_c_msg { + rd_kafka_message_t *msg; + rd_kafka_timestamp_type_t tstype; + int64_t ts; + tmphdr_t *tmphdrs; + size_t tmphdrsCnt; +} fetched_c_msg_t; diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/go_rdkafka_generr/go_rdkafka_generr.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/go_rdkafka_generr/go_rdkafka_generr.go new file mode 100644 index 0000000000..3f37150bd5 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/go_rdkafka_generr/go_rdkafka_generr.go @@ -0,0 +1,115 @@ +// confluent-kafka-go internal tool to generate error constants from librdkafka +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "os" + "strings" + "time" +) + +/* +#cgo pkg-config: --static rdkafka +#cgo LDFLAGS: -lrdkafka +#include + +static const char *errdesc_to_string (const struct rd_kafka_err_desc *ed, int idx) { + return ed[idx].name; +} + +static const char *errdesc_to_desc (const struct rd_kafka_err_desc *ed, int idx) { + return ed[idx].desc; +} + +*/ +import "C" + +func camelCase(s string) string { + ret := "" + for _, v := range strings.Split(s, "_") { + if len(v) == 0 { + continue + } + ret += strings.ToUpper((string)(v[0])) + strings.ToLower(v[1:]) + } + return ret +} + +func main() { + + outfile := os.Args[1] + + f, err := os.Create(outfile) + if err != nil { + panic(err) + } + defer f.Close() + + f.WriteString("package kafka\n") + f.WriteString("// Copyright 2016 Confluent Inc.\n") + f.WriteString(fmt.Sprintf("// AUTOMATICALLY GENERATED BY %s ON %v USING librdkafka %s\n", + os.Args[0], time.Now(), C.GoString(C.rd_kafka_version_str()))) + + var errdescs *C.struct_rd_kafka_err_desc + var csize C.size_t + C.rd_kafka_get_err_descs(&errdescs, &csize) + + f.WriteString(` +/* +#include +*/ +import "C" + +// ErrorCode is the integer representation of local and broker error codes +type ErrorCode int + +// String returns a human readable representation of an error code +func (c ErrorCode) String() string { + return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) +} + +const ( +`) + + for i := 0; i < int(csize); i++ { + orig := C.GoString(C.errdesc_to_string(errdescs, C.int(i))) + if len(orig) == 0 { + continue + } + desc := C.GoString(C.errdesc_to_desc(errdescs, C.int(i))) + if len(desc) == 0 { + continue + } + + errname := "Err" + camelCase(orig) + + // Special handling to please golint + // Eof -> EOF + // Id -> ID + errname = strings.Replace(errname, "Eof", "EOF", -1) + errname = strings.Replace(errname, "Id", "ID", -1) + + f.WriteString(fmt.Sprintf(" // %s %s\n", errname, desc)) + f.WriteString(fmt.Sprintf(" %s ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_%s)\n", + errname, orig)) + } + + f.WriteString(")\n") + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go new file mode 100644 index 0000000000..c09e64d8ab --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go @@ -0,0 +1,207 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "sync" + "unsafe" +) + +/* +#include +#include +*/ +import "C" + +// Handle represents a generic client handle containing common parts for +// both Producer and Consumer. +type Handle interface { + gethandle() *handle +} + +// Common instance handle for both Producer and Consumer +type handle struct { + rk *C.rd_kafka_t + rkq *C.rd_kafka_queue_t + + // Termination of background go-routines + terminatedChan chan string // string is go-routine name + + // Topic <-> rkt caches + rktCacheLock sync.Mutex + // topic name -> rkt cache + rktCache map[string]*C.rd_kafka_topic_t + // rkt -> topic name cache + rktNameCache map[*C.rd_kafka_topic_t]string + + // + // cgo map + // Maps C callbacks based on cgoid back to its Go object + cgoLock sync.Mutex + cgoidNext uintptr + cgomap map[int]cgoif + + // + // producer + // + p *Producer + + // Forward delivery reports on Producer.Events channel + fwdDr bool + + // + // consumer + // + c *Consumer + + // Forward rebalancing ack responsibility to application (current setting) + currAppRebalanceEnable bool +} + +func (h *handle) String() string { + return C.GoString(C.rd_kafka_name(h.rk)) +} + +func (h *handle) setup() { + h.rktCache = make(map[string]*C.rd_kafka_topic_t) + h.rktNameCache = make(map[*C.rd_kafka_topic_t]string) + h.cgomap = make(map[int]cgoif) + h.terminatedChan = make(chan string, 10) +} + +func (h *handle) cleanup() { + for _, crkt := range h.rktCache { + C.rd_kafka_topic_destroy(crkt) + } + + if h.rkq != nil { + C.rd_kafka_queue_destroy(h.rkq) + } +} + +// waitTerminated waits termination of background go-routines. +// termCnt is the number of goroutines expected to signal termination completion +// on h.terminatedChan +func (h *handle) waitTerminated(termCnt int) { + // Wait for termCnt termination-done events from goroutines + for ; termCnt > 0; termCnt-- { + _ = <-h.terminatedChan + } +} + +// getRkt0 finds or creates and returns a C topic_t object from the local cache. +func (h *handle) getRkt0(topic string, ctopic *C.char, doLock bool) (crkt *C.rd_kafka_topic_t) { + if doLock { + h.rktCacheLock.Lock() + defer h.rktCacheLock.Unlock() + } + crkt, ok := h.rktCache[topic] + if ok { + return crkt + } + + if ctopic == nil { + ctopic = C.CString(topic) + defer C.free(unsafe.Pointer(ctopic)) + } + + crkt = C.rd_kafka_topic_new(h.rk, ctopic, nil) + if crkt == nil { + panic(fmt.Sprintf("Unable to create new C topic \"%s\": %s", + topic, C.GoString(C.rd_kafka_err2str(C.rd_kafka_last_error())))) + } + + h.rktCache[topic] = crkt + h.rktNameCache[crkt] = topic + + return crkt +} + +// getRkt finds or creates and returns a C topic_t object from the local cache. +func (h *handle) getRkt(topic string) (crkt *C.rd_kafka_topic_t) { + return h.getRkt0(topic, nil, true) +} + +// getTopicNameFromRkt returns the topic name for a C topic_t object, preferably +// using the local cache to avoid a cgo call. +func (h *handle) getTopicNameFromRkt(crkt *C.rd_kafka_topic_t) (topic string) { + h.rktCacheLock.Lock() + defer h.rktCacheLock.Unlock() + + topic, ok := h.rktNameCache[crkt] + if ok { + return topic + } + + // we need our own copy/refcount of the crkt + ctopic := C.rd_kafka_topic_name(crkt) + topic = C.GoString(ctopic) + + crkt = h.getRkt0(topic, ctopic, false /* dont lock */) + + return topic +} + +// cgoif is a generic interface for holding Go state passed as opaque +// value to the C code. +// Since pointers to complex Go types cannot be passed to C we instead create +// a cgoif object, generate a unique id that is added to the cgomap, +// and then pass that id to the C code. When the C code callback is called we +// use the id to look up the cgoif object in the cgomap. +type cgoif interface{} + +// delivery report cgoif container +type cgoDr struct { + deliveryChan chan Event + opaque interface{} +} + +// cgoPut adds object cg to the handle's cgo map and returns a +// unique id for the added entry. +// Thread-safe. +// FIXME: the uniquity of the id is questionable over time. +func (h *handle) cgoPut(cg cgoif) (cgoid int) { + h.cgoLock.Lock() + defer h.cgoLock.Unlock() + + h.cgoidNext++ + if h.cgoidNext == 0 { + h.cgoidNext++ + } + cgoid = (int)(h.cgoidNext) + h.cgomap[cgoid] = cg + return cgoid +} + +// cgoGet looks up cgoid in the cgo map, deletes the reference from the map +// and returns the object, if found. Else returns nil, false. +// Thread-safe. +func (h *handle) cgoGet(cgoid int) (cg cgoif, found bool) { + if cgoid == 0 { + return nil, false + } + + h.cgoLock.Lock() + defer h.cgoLock.Unlock() + cg, found = h.cgomap[cgoid] + if found { + delete(h.cgomap, cgoid) + } + + return cg, found +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go new file mode 100644 index 0000000000..67d6202ec3 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go @@ -0,0 +1,67 @@ +package kafka + +/** + * Copyright 2018 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "strconv" +) + +/* +#include +#include +#include "glue_rdkafka.h" +*/ +import "C" + +// Header represents a single Kafka message header. +// +// Message headers are made up of a list of Header elements, retaining their original insert +// order and allowing for duplicate Keys. +// +// Key is a human readable string identifying the header. +// Value is the key's binary value, Kafka does not put any restrictions on the format of +// of the Value but it should be made relatively compact. +// The value may be a byte array, empty, or nil. +// +// NOTE: Message headers are not available on producer delivery report messages. +type Header struct { + Key string // Header name (utf-8 string) + Value []byte // Header value (nil, empty, or binary) +} + +// String returns the Header Key and data in a human representable possibly truncated form +// suitable for displaying to the user. +func (h Header) String() string { + if h.Value == nil { + return fmt.Sprintf("%s=nil", h.Key) + } + + valueLen := len(h.Value) + if valueLen == 0 { + return fmt.Sprintf("%s=", h.Key) + } + + truncSize := valueLen + trunc := "" + if valueLen > 50+15 { + truncSize = 50 + trunc = fmt.Sprintf("(%d more bytes)", valueLen-truncSize) + } + + return fmt.Sprintf("%s=%s%s", h.Key, strconv.Quote(string(h.Value[:truncSize])), trunc) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header_test.go new file mode 100644 index 0000000000..f365cb9124 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header_test.go @@ -0,0 +1,41 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "testing" +) + +// TestHeader tests the Header type +func TestHeader(t *testing.T) { + + hdr := Header{"MyHdr1", []byte("a string")} + if hdr.String() != "MyHdr1=\"a string\"" { + t.Errorf("Unexpected: %s", hdr.String()) + } + + hdr = Header{"MyHdr2", []byte("a longer string that will be truncated right here <-- so you wont see this part.")} + if hdr.String() != "MyHdr2=\"a longer string that will be truncated right here \"(30 more bytes)" { + t.Errorf("Unexpected: %s", hdr.String()) + } + + hdr = Header{"MyHdr3", []byte{1, 2, 3, 4}} + if hdr.String() != "MyHdr3=\"\\x01\\x02\\x03\\x04\"" { + t.Errorf("Unexpected: %s", hdr.String()) + } + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/integration_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/integration_test.go new file mode 100644 index 0000000000..ce502070cd --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/integration_test.go @@ -0,0 +1,1086 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "encoding/binary" + "fmt" + "reflect" + "testing" + "time" +) + +// producer test control +type producerCtrl struct { + silent bool + withDr bool // use delivery channel + batchProducer bool // enable batch producer +} + +// define commitMode with constants +type commitMode string + +const ( + ViaCommitMessageAPI = "CommitMessage" + ViaCommitOffsetsAPI = "CommitOffsets" + ViaCommitAPI = "Commit" +) + +// consumer test control +type consumerCtrl struct { + autoCommit bool // set enable.auto.commit property + useChannel bool + commitMode commitMode // which commit api to use +} + +type testmsgType struct { + msg Message + expectedError Error +} + +// msgtracker tracks messages +type msgtracker struct { + t *testing.T + msgcnt int64 + errcnt int64 // count of failed messages + msgs []*Message +} + +// msgtrackerStart sets up a new message tracker +func msgtrackerStart(t *testing.T, expectedCnt int) (mt msgtracker) { + mt = msgtracker{t: t} + mt.msgs = make([]*Message, expectedCnt) + return mt +} + +var testMsgsInit = false +var p0TestMsgs []*testmsgType // partition 0 test messages +// pAllTestMsgs holds messages for various partitions including PartitionAny and invalid partitions +var pAllTestMsgs []*testmsgType + +// createTestMessages populates p0TestMsgs and pAllTestMsgs +func createTestMessages() { + + if testMsgsInit { + return + } + defer func() { testMsgsInit = true }() + + testmsgs := make([]*testmsgType, 100) + i := 0 + + // a test message with default initialization + testmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}}} + i++ + + // a test message for partition 0 with only Opaque specified + testmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}, + Opaque: fmt.Sprintf("Op%d", i), + }} + i++ + + // a test message for partition 0 with empty Value and Keys + testmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}, + Value: []byte(""), + Key: []byte(""), + Opaque: fmt.Sprintf("Op%d", i), + }} + i++ + + // a test message for partition 0 with Value, Key, and Opaque + testmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}, + Value: []byte(fmt.Sprintf("value%d", i)), + Key: []byte(fmt.Sprintf("key%d", i)), + Opaque: fmt.Sprintf("Op%d", i), + }} + i++ + + // a test message for partition 0 without Value + testmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}, + Key: []byte(fmt.Sprintf("key%d", i)), + Opaque: fmt.Sprintf("Op%d", i), + }} + i++ + + // a test message for partition 0 without Key + testmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}, + Value: []byte(fmt.Sprintf("value%d", i)), + Opaque: fmt.Sprintf("Op%d", i), + }} + i++ + + p0TestMsgs = testmsgs[:i] + + // a test message for PartitonAny with Value, Key, and Opaque + testmsgs[i] = &testmsgType{msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: PartitionAny}, + Value: []byte(fmt.Sprintf("value%d", i)), + Key: []byte(fmt.Sprintf("key%d", i)), + Opaque: fmt.Sprintf("Op%d", i), + }} + i++ + + // a test message for a non-existent partition with Value, Key, and Opaque. + // It should generate ErrUnknownPartition + testmsgs[i] = &testmsgType{expectedError: Error{ErrUnknownPartition, ""}, + msg: Message{TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: int32(10000)}, + Value: []byte(fmt.Sprintf("value%d", i)), + Key: []byte(fmt.Sprintf("key%d", i)), + Opaque: fmt.Sprintf("Op%d", i), + }} + i++ + + pAllTestMsgs = testmsgs[:i] +} + +// consume messages through the Poll() interface +func eventTestPollConsumer(c *Consumer, mt *msgtracker, expCnt int) { + for true { + ev := c.Poll(100) + if ev == nil { + // timeout + continue + } + if !handleTestEvent(c, mt, expCnt, ev) { + break + } + } +} + +// consume messages through the Events channel +func eventTestChannelConsumer(c *Consumer, mt *msgtracker, expCnt int) { + for ev := range c.Events() { + if !handleTestEvent(c, mt, expCnt, ev) { + break + } + } +} + +// handleTestEvent returns false if processing should stop, else true. Tracks the message received +func handleTestEvent(c *Consumer, mt *msgtracker, expCnt int, ev Event) bool { + switch e := ev.(type) { + case *Message: + if e.TopicPartition.Error != nil { + mt.t.Errorf("Error: %v", e.TopicPartition) + } + mt.msgs[mt.msgcnt] = e + mt.msgcnt++ + if mt.msgcnt >= int64(expCnt) { + return false + } + case PartitionEOF: + break // silence + default: + mt.t.Fatalf("Consumer error: %v", e) + } + return true + +} + +// delivery event handler. Tracks the message received +func deliveryTestHandler(t *testing.T, expCnt int64, deliveryChan chan Event, mt *msgtracker, doneChan chan int64) { + + for ev := range deliveryChan { + m, ok := ev.(*Message) + if !ok { + continue + } + + mt.msgs[mt.msgcnt] = m + mt.msgcnt++ + + if m.TopicPartition.Error != nil { + mt.errcnt++ + // log it and check it later + t.Logf("Message delivery error: %v", m.TopicPartition) + } + + t.Logf("Delivered %d/%d to %s, error count %d", mt.msgcnt, expCnt, m.TopicPartition, mt.errcnt) + + if mt.msgcnt >= expCnt { + break + } + + } + + doneChan <- mt.msgcnt + close(doneChan) +} + +// producerTest produces messages in to topic. Verifies delivered messages +func producerTest(t *testing.T, testname string, testmsgs []*testmsgType, pc producerCtrl, produceFunc func(p *Producer, m *Message, drChan chan Event)) { + + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + if testmsgs == nil { + createTestMessages() + testmsgs = pAllTestMsgs + } + + //get the number of messages prior to producing more messages + prerunMsgCnt, err := getMessageCountInTopic(testconf.Topic) + if err != nil { + t.Fatalf("Cannot get message count, Error: %s\n", err) + } + + conf := ConfigMap{"bootstrap.servers": testconf.Brokers, + "go.batch.producer": pc.batchProducer, + "go.delivery.reports": pc.withDr, + "queue.buffering.max.messages": len(testmsgs), + "api.version.request": "true", + "broker.version.fallback": "0.9.0.1", + "default.topic.config": ConfigMap{"acks": 1}} + + conf.updateFromTestconf() + + p, err := NewProducer(&conf) + if err != nil { + panic(err) + } + + mt := msgtrackerStart(t, len(testmsgs)) + + var doneChan chan int64 + var drChan chan Event + + if pc.withDr { + doneChan = make(chan int64) + drChan = p.Events() + go deliveryTestHandler(t, int64(len(testmsgs)), p.Events(), &mt, doneChan) + } + + if !pc.silent { + t.Logf("%s: produce %d messages", testname, len(testmsgs)) + } + + for i := 0; i < len(testmsgs); i++ { + t.Logf("producing message %d: %v\n", i, testmsgs[i].msg) + produceFunc(p, &testmsgs[i].msg, drChan) + } + + if !pc.silent { + t.Logf("produce done") + } + + // Wait for messages in-flight and in-queue to get delivered. + if !pc.silent { + t.Logf("%s: %d messages in queue", testname, p.Len()) + } + + r := p.Flush(10000) + if r > 0 { + t.Errorf("%s: %d messages remains in queue after Flush()", testname, r) + } + + if pc.withDr { + mt.msgcnt = <-doneChan + } else { + mt.msgcnt = int64(len(testmsgs)) + } + + if !pc.silent { + t.Logf("delivered %d messages\n", mt.msgcnt) + } + + p.Close() + + //get the number of messages afterward + postrunMsgCnt, err := getMessageCountInTopic(testconf.Topic) + if err != nil { + t.Fatalf("Cannot get message count, Error: %s\n", err) + } + + if !pc.silent { + t.Logf("prerun message count: %d, postrun count %d, delta: %d\n", prerunMsgCnt, postrunMsgCnt, postrunMsgCnt-prerunMsgCnt) + t.Logf("deliveried message count: %d, error message count %d\n", mt.msgcnt, mt.errcnt) + + } + + // verify the count and messages only if we get the delivered messages + if pc.withDr { + if int64(postrunMsgCnt-prerunMsgCnt) != (mt.msgcnt - mt.errcnt) { + t.Errorf("Expected topic message count %d, got %d\n", prerunMsgCnt+int(mt.msgcnt-mt.errcnt), postrunMsgCnt) + } + + verifyMessages(t, mt.msgs, testmsgs) + } +} + +// consumerTest consumes messages from a pre-primed (produced to) topic +func consumerTest(t *testing.T, testname string, msgcnt int, cc consumerCtrl, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) { + + if msgcnt == 0 { + createTestMessages() + producerTest(t, "Priming producer", p0TestMsgs, producerCtrl{}, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) + msgcnt = len(p0TestMsgs) + } + + conf := ConfigMap{"bootstrap.servers": testconf.Brokers, + "go.events.channel.enable": cc.useChannel, + "group.id": testconf.GroupID, + "session.timeout.ms": 6000, + "api.version.request": "true", + "enable.auto.commit": cc.autoCommit, + "debug": ",", + "default.topic.config": ConfigMap{"auto.offset.reset": "earliest"}} + + conf.updateFromTestconf() + + c, err := NewConsumer(&conf) + + if err != nil { + panic(err) + } + defer c.Close() + + expCnt := msgcnt + mt := msgtrackerStart(t, expCnt) + + t.Logf("%s, expecting %d messages", testname, expCnt) + c.Subscribe(testconf.Topic, rebalanceCb) + + consumeFunc(c, &mt, expCnt) + + //test commits + switch cc.commitMode { + case ViaCommitMessageAPI: + // verify CommitMessage() API + for _, message := range mt.msgs { + _, commitErr := c.CommitMessage(message) + if commitErr != nil { + t.Errorf("Cannot commit message. Error: %s\n", commitErr) + } + } + case ViaCommitOffsetsAPI: + // verify CommitOffset + partitions := make([]TopicPartition, len(mt.msgs)) + for index, message := range mt.msgs { + partitions[index] = message.TopicPartition + } + _, commitErr := c.CommitOffsets(partitions) + if commitErr != nil { + t.Errorf("Failed to commit using CommitOffsets. Error: %s\n", commitErr) + } + case ViaCommitAPI: + // verify Commit() API + _, commitErr := c.Commit() + if commitErr != nil { + t.Errorf("Failed to commit. Error: %s", commitErr) + } + + } + + // Trigger RevokePartitions + c.Unsubscribe() + + // Handle RevokePartitions + c.Poll(500) + +} + +//Test consumer QueryWatermarkOffsets API +func TestConsumerQueryWatermarkOffsets(t *testing.T) { + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + // getMessageCountInTopic() uses consumer QueryWatermarkOffsets() API to + // get the number of messages in a topic + msgcnt, err := getMessageCountInTopic(testconf.Topic) + if err != nil { + t.Errorf("Cannot get message size. Error: %s\n", err) + } + + // Prime topic with test messages + createTestMessages() + producerTest(t, "Priming producer", p0TestMsgs, producerCtrl{silent: true}, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) + + // getMessageCountInTopic() uses consumer QueryWatermarkOffsets() API to + // get the number of messages in a topic + newmsgcnt, err := getMessageCountInTopic(testconf.Topic) + if err != nil { + t.Errorf("Cannot get message size. Error: %s\n", err) + } + + if newmsgcnt-msgcnt != len(p0TestMsgs) { + t.Errorf("Incorrect offsets. Expected message count %d, got %d\n", len(p0TestMsgs), newmsgcnt-msgcnt) + } + +} + +//TestConsumerOffsetsForTimes +func TestConsumerOffsetsForTimes(t *testing.T) { + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + conf := ConfigMap{"bootstrap.servers": testconf.Brokers, + "group.id": testconf.GroupID, + "api.version.request": true} + + conf.updateFromTestconf() + + c, err := NewConsumer(&conf) + + if err != nil { + panic(err) + } + defer c.Close() + + // Prime topic with test messages + createTestMessages() + producerTest(t, "Priming producer", p0TestMsgs, producerCtrl{silent: true}, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) + + times := make([]TopicPartition, 1) + times[0] = TopicPartition{Topic: &testconf.Topic, Partition: 0, Offset: 12345} + offsets, err := c.OffsetsForTimes(times, 5000) + if err != nil { + t.Errorf("OffsetsForTimes() failed: %s\n", err) + return + } + + if len(offsets) != 1 { + t.Errorf("OffsetsForTimes() returned wrong length %d, expected 1\n", len(offsets)) + return + } + + if *offsets[0].Topic != testconf.Topic || offsets[0].Partition != 0 { + t.Errorf("OffsetsForTimes() returned wrong topic/partition\n") + return + } + + if offsets[0].Error != nil { + t.Errorf("OffsetsForTimes() returned error for partition 0: %s\n", err) + return + } + + low, _, err := c.QueryWatermarkOffsets(testconf.Topic, 0, 5*1000) + if err != nil { + t.Errorf("Failed to query watermark offsets for topic %s. Error: %s\n", testconf.Topic, err) + return + } + + t.Logf("OffsetsForTimes() returned offset %d for timestamp %d\n", offsets[0].Offset, times[0].Offset) + + // Since we're using a phony low timestamp it is assumed that the returned + // offset will be oldest message. + if offsets[0].Offset != Offset(low) { + t.Errorf("OffsetsForTimes() returned invalid offset %d for timestamp %d, expected %d\n", offsets[0].Offset, times[0].Offset, low) + return + } + +} + +// test consumer GetMetadata API +func TestConsumerGetMetadata(t *testing.T) { + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + config := &ConfigMap{"bootstrap.servers": testconf.Brokers, + "group.id": testconf.GroupID} + + // Create consumer + c, err := NewConsumer(config) + if err != nil { + t.Errorf("Failed to create consumer: %s\n", err) + return + } + defer c.Close() + + metaData, err := c.GetMetadata(&testconf.Topic, false, 5*1000) + if err != nil { + t.Errorf("Failed to get meta data for topic %s. Error: %s\n", testconf.Topic, err) + return + } + t.Logf("Meta data for topic %s: %v\n", testconf.Topic, metaData) + + metaData, err = c.GetMetadata(nil, true, 5*1000) + if err != nil { + t.Errorf("Failed to get meta data, Error: %s\n", err) + return + } + t.Logf("Meta data for consumer: %v\n", metaData) +} + +//Test producer QueryWatermarkOffsets API +func TestProducerQueryWatermarkOffsets(t *testing.T) { + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + config := &ConfigMap{"bootstrap.servers": testconf.Brokers} + + // Create producer + p, err := NewProducer(config) + if err != nil { + t.Errorf("Failed to create producer: %s\n", err) + return + } + defer p.Close() + + low, high, err := p.QueryWatermarkOffsets(testconf.Topic, 0, 5*1000) + if err != nil { + t.Errorf("Failed to query watermark offsets for topic %s. Error: %s\n", testconf.Topic, err) + return + } + cnt := high - low + t.Logf("Watermark offsets fo topic %s: low=%d, high=%d\n", testconf.Topic, low, high) + + createTestMessages() + producerTest(t, "Priming producer", p0TestMsgs, producerCtrl{silent: true}, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) + + low, high, err = p.QueryWatermarkOffsets(testconf.Topic, 0, 5*1000) + if err != nil { + t.Errorf("Failed to query watermark offsets for topic %s. Error: %s\n", testconf.Topic, err) + return + } + t.Logf("Watermark offsets fo topic %s: low=%d, high=%d\n", testconf.Topic, low, high) + newcnt := high - low + t.Logf("count = %d, New count = %d\n", cnt, newcnt) + if newcnt-cnt != int64(len(p0TestMsgs)) { + t.Errorf("Incorrect offsets. Expected message count %d, got %d\n", len(p0TestMsgs), newcnt-cnt) + } +} + +//Test producer GetMetadata API +func TestProducerGetMetadata(t *testing.T) { + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + config := &ConfigMap{"bootstrap.servers": testconf.Brokers} + + // Create producer + p, err := NewProducer(config) + if err != nil { + t.Errorf("Failed to create producer: %s\n", err) + return + } + defer p.Close() + + metaData, err := p.GetMetadata(&testconf.Topic, false, 5*1000) + if err != nil { + t.Errorf("Failed to get meta data for topic %s. Error: %s\n", testconf.Topic, err) + return + } + t.Logf("Meta data for topic %s: %v\n", testconf.Topic, metaData) + + metaData, err = p.GetMetadata(nil, true, 5*1000) + if err != nil { + t.Errorf("Failed to get meta data, Error: %s\n", err) + return + } + t.Logf("Meta data for producer: %v\n", metaData) + +} + +// test producer function-based API without delivery report +func TestProducerFunc(t *testing.T) { + producerTest(t, "Function producer (without DR)", + nil, producerCtrl{}, + func(p *Producer, m *Message, drChan chan Event) { + err := p.Produce(m, drChan) + if err != nil { + t.Errorf("Produce() failed: %v", err) + } + }) +} + +// test producer function-based API with delivery report +func TestProducerFuncDR(t *testing.T) { + producerTest(t, "Function producer (with DR)", + nil, producerCtrl{withDr: true}, + func(p *Producer, m *Message, drChan chan Event) { + err := p.Produce(m, drChan) + if err != nil { + t.Errorf("Produce() failed: %v", err) + } + }) +} + +// test producer with bad messages +func TestProducerWithBadMessages(t *testing.T) { + conf := ConfigMap{"bootstrap.servers": testconf.Brokers} + p, err := NewProducer(&conf) + if err != nil { + panic(err) + } + defer p.Close() + + // producing a nil message should return an error without crash + err = p.Produce(nil, p.Events()) + if err == nil { + t.Errorf("Producing a nil message should return error\n") + } else { + t.Logf("Producing a nil message returns expected error: %s\n", err) + } + + // producing a blank message (with nil Topic) should return an error without crash + err = p.Produce(&Message{}, p.Events()) + if err == nil { + t.Errorf("Producing a blank message should return error\n") + } else { + t.Logf("Producing a blank message returns expected error: %s\n", err) + } +} + +// test producer channel-based API without delivery report +func TestProducerChannel(t *testing.T) { + producerTest(t, "Channel producer (without DR)", + nil, producerCtrl{}, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) +} + +// test producer channel-based API with delivery report +func TestProducerChannelDR(t *testing.T) { + producerTest(t, "Channel producer (with DR)", + nil, producerCtrl{withDr: true}, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) + +} + +// test batch producer channel-based API without delivery report +func TestProducerBatchChannel(t *testing.T) { + producerTest(t, "Channel producer (without DR, batch channel)", + nil, producerCtrl{batchProducer: true}, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) +} + +// test batch producer channel-based API with delivery report +func TestProducerBatchChannelDR(t *testing.T) { + producerTest(t, "Channel producer (DR, batch channel)", + nil, producerCtrl{withDr: true, batchProducer: true}, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) +} + +// use opaque string to locate the matching test message for message verification +func findExpectedMessage(expected []*testmsgType, opaque string) *testmsgType { + for i, m := range expected { + if expected[i].msg.Opaque != nil && expected[i].msg.Opaque.(string) == opaque { + return m + } + } + return nil +} + +// verify the message content against the expected +func verifyMessages(t *testing.T, msgs []*Message, expected []*testmsgType) { + if len(msgs) != len(expected) { + t.Errorf("Expected %d messages, got %d instead\n", len(expected), len(msgs)) + return + } + for _, m := range msgs { + if m.Opaque == nil { + continue // No way to look up the corresponding expected message, let it go + } + testmsg := findExpectedMessage(expected, m.Opaque.(string)) + if testmsg == nil { + t.Errorf("Cannot find a matching expected message for message %v\n", m) + continue + } + em := testmsg.msg + if m.TopicPartition.Error != nil { + if m.TopicPartition.Error != testmsg.expectedError { + t.Errorf("Expected error %s, but got error %s\n", testmsg.expectedError, m.TopicPartition.Error) + } + continue + } + + // check partition + if em.TopicPartition.Partition == PartitionAny { + if m.TopicPartition.Partition < 0 { + t.Errorf("Expected partition %d, got %d\n", em.TopicPartition.Partition, m.TopicPartition.Partition) + } + } else if em.TopicPartition.Partition != m.TopicPartition.Partition { + t.Errorf("Expected partition %d, got %d\n", em.TopicPartition.Partition, m.TopicPartition.Partition) + } + + //check Key, Value, and Opaque + if string(m.Key) != string(em.Key) { + t.Errorf("Expected Key %v, got %v\n", m.Key, em.Key) + } + if string(m.Value) != string(em.Value) { + t.Errorf("Expected Value %v, got %v\n", m.Value, em.Value) + } + if m.Opaque.(string) != em.Opaque.(string) { + t.Errorf("Expected Opaque %v, got %v\n", m.Opaque, em.Opaque) + } + + } +} + +// test consumer APIs with various message commit modes +func consumerTestWithCommits(t *testing.T, testname string, msgcnt int, useChannel bool, consumeFunc func(c *Consumer, mt *msgtracker, expCnt int), rebalanceCb func(c *Consumer, event Event) error) { + consumerTest(t, testname+" auto commit", + msgcnt, consumerCtrl{useChannel: useChannel, autoCommit: true}, consumeFunc, rebalanceCb) + + consumerTest(t, testname+" using CommitMessage() API", + msgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitMessageAPI}, consumeFunc, rebalanceCb) + + consumerTest(t, testname+" using CommitOffsets() API", + msgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitOffsetsAPI}, consumeFunc, rebalanceCb) + + consumerTest(t, testname+" using Commit() API", + msgcnt, consumerCtrl{useChannel: useChannel, commitMode: ViaCommitAPI}, consumeFunc, rebalanceCb) + +} + +// test consumer channel-based API +func TestConsumerChannel(t *testing.T) { + consumerTestWithCommits(t, "Channel Consumer", 0, true, eventTestChannelConsumer, nil) +} + +// test consumer poll-based API +func TestConsumerPoll(t *testing.T) { + consumerTestWithCommits(t, "Poll Consumer", 0, false, eventTestPollConsumer, nil) +} + +// test consumer poll-based API with rebalance callback +func TestConsumerPollRebalance(t *testing.T) { + consumerTestWithCommits(t, "Poll Consumer (rebalance callback)", + 0, false, eventTestPollConsumer, + func(c *Consumer, event Event) error { + t.Logf("Rebalanced: %s", event) + return nil + }) +} + +// Test Committed() API +func TestConsumerCommitted(t *testing.T) { + consumerTestWithCommits(t, "Poll Consumer (rebalance callback, verify Committed())", + 0, false, eventTestPollConsumer, + func(c *Consumer, event Event) error { + t.Logf("Rebalanced: %s", event) + rp, ok := event.(RevokedPartitions) + if ok { + offsets, err := c.Committed(rp.Partitions, 5000) + if err != nil { + t.Errorf("Failed to get committed offsets: %s\n", err) + return nil + } + + t.Logf("Retrieved Committed offsets: %s\n", offsets) + + if len(offsets) != len(rp.Partitions) || len(rp.Partitions) == 0 { + t.Errorf("Invalid number of partitions %d, should be %d (and >0)\n", len(offsets), len(rp.Partitions)) + } + + // Verify proper offsets: at least one partition needs + // to have a committed offset. + validCnt := 0 + for _, p := range offsets { + if p.Error != nil { + t.Errorf("Committed() partition error: %v: %v", p, p.Error) + } else if p.Offset >= 0 { + validCnt++ + } + } + + if validCnt == 0 { + t.Errorf("Committed(): no partitions with valid offsets: %v", offsets) + } + } + return nil + }) +} + +// TestProducerConsumerTimestamps produces messages with timestamps +// and verifies them on consumption. +// Requires librdkafka >=0.9.4 and Kafka >=0.10.0.0 +func TestProducerConsumerTimestamps(t *testing.T) { + numver, strver := LibraryVersion() + if numver < 0x00090400 { + t.Skipf("Requires librdkafka >=0.9.4 (currently on %s)", strver) + } + + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + conf := ConfigMap{"bootstrap.servers": testconf.Brokers, + "api.version.request": true, + "go.events.channel.enable": true, + "group.id": testconf.Topic, + } + + conf.updateFromTestconf() + + /* Create consumer and find recognizable message, verify timestamp. + * The consumer is started before the producer to make sure + * the message isn't missed. */ + t.Logf("Creating consumer") + c, err := NewConsumer(&conf) + if err != nil { + t.Fatalf("NewConsumer: %v", err) + } + + t.Logf("Assign %s [0]", testconf.Topic) + err = c.Assign([]TopicPartition{{Topic: &testconf.Topic, Partition: 0, + Offset: OffsetEnd}}) + if err != nil { + t.Fatalf("Assign: %v", err) + } + + /* Wait until EOF is reached so we dont miss the produced message */ + for ev := range c.Events() { + t.Logf("Awaiting initial EOF") + _, ok := ev.(PartitionEOF) + if ok { + break + } + } + + /* + * Create producer and produce one recognizable message with timestamp + */ + t.Logf("Creating producer") + conf.SetKey("{topic}.produce.offset.report", true) + p, err := NewProducer(&conf) + if err != nil { + t.Fatalf("NewProducer: %v", err) + } + + drChan := make(chan Event, 1) + + /* Offset the timestamp to avoid comparison with system clock */ + future, _ := time.ParseDuration("87658h") // 10y + timestamp := time.Now().Add(future) + key := fmt.Sprintf("TS: %v", timestamp) + t.Logf("Producing message with timestamp %v", timestamp) + err = p.Produce(&Message{ + TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}, + Key: []byte(key), + Timestamp: timestamp}, + drChan) + + if err != nil { + t.Fatalf("Produce: %v", err) + } + + // Wait for delivery + t.Logf("Awaiting delivery report") + ev := <-drChan + m, ok := ev.(*Message) + if !ok { + t.Fatalf("drChan: Expected *Message, got %v", ev) + } + if m.TopicPartition.Error != nil { + t.Fatalf("Delivery failed: %v", m.TopicPartition) + } + t.Logf("Produced message to %v", m.TopicPartition) + producedOffset := m.TopicPartition.Offset + + p.Close() + + /* Now consume messages, waiting for that recognizable one. */ + t.Logf("Consuming messages") +outer: + for ev := range c.Events() { + switch m := ev.(type) { + case *Message: + if m.TopicPartition.Error != nil { + continue + } + if m.Key == nil || string(m.Key) != key { + continue + } + + t.Logf("Found message at %v with timestamp %s %s", + m.TopicPartition, + m.TimestampType, m.Timestamp) + + if m.TopicPartition.Offset != producedOffset { + t.Fatalf("Produced Offset %d does not match consumed offset %d", producedOffset, m.TopicPartition.Offset) + } + + if m.TimestampType != TimestampCreateTime { + t.Fatalf("Expected timestamp CreateTime, not %s", + m.TimestampType) + } + + /* Since Kafka timestamps are milliseconds we need to + * shave off some precision for the comparison */ + if m.Timestamp.UnixNano()/1000000 != + timestamp.UnixNano()/1000000 { + t.Fatalf("Expected timestamp %v (%d), not %v (%d)", + timestamp, timestamp.UnixNano(), + m.Timestamp, m.Timestamp.UnixNano()) + } + break outer + default: + } + } + + c.Close() +} + +// TestProducerConsumerHeaders produces messages with headers +// and verifies them on consumption. +// Requires librdkafka >=0.11.4 and Kafka >=0.11.0.0 +func TestProducerConsumerHeaders(t *testing.T) { + numver, strver := LibraryVersion() + if numver < 0x000b0400 { + t.Skipf("Requires librdkafka >=0.11.4 (currently on %s, 0x%x)", strver, numver) + } + + if !testconfRead() { + t.Skipf("Missing testconf.json") + } + + conf := ConfigMap{"bootstrap.servers": testconf.Brokers, + "api.version.request": true, + "enable.auto.commit": false, + "group.id": testconf.Topic, + } + + conf.updateFromTestconf() + + /* + * Create producer and produce a couple of messages with and without + * headers. + */ + t.Logf("Creating producer") + p, err := NewProducer(&conf) + if err != nil { + t.Fatalf("NewProducer: %v", err) + } + + drChan := make(chan Event, 1) + + // prepare some header values + bigBytes := make([]byte, 2500) + for i := 0; i < len(bigBytes); i++ { + bigBytes[i] = byte(i) + } + + myVarint := make([]byte, binary.MaxVarintLen64) + myVarintLen := binary.PutVarint(myVarint, 12345678901234) + + expMsgHeaders := [][]Header{ + { + {"msgid", []byte("1")}, + {"a key with SPACES ", bigBytes[:15]}, + {"BIGONE!", bigBytes}, + }, + { + {"msgid", []byte("2")}, + {"myVarint", myVarint[:myVarintLen]}, + {"empty", []byte("")}, + {"theNullIsNil", nil}, + }, + nil, // no headers + { + {"msgid", []byte("4")}, + {"order", []byte("1")}, + {"order", []byte("2")}, + {"order", nil}, + {"order", []byte("4")}, + }, + } + + t.Logf("Producing %d messages", len(expMsgHeaders)) + for _, hdrs := range expMsgHeaders { + err = p.Produce(&Message{ + TopicPartition: TopicPartition{Topic: &testconf.Topic, Partition: 0}, + Headers: hdrs}, + drChan) + } + + if err != nil { + t.Fatalf("Produce: %v", err) + } + + var firstOffset Offset = OffsetInvalid + for range expMsgHeaders { + ev := <-drChan + m, ok := ev.(*Message) + if !ok { + t.Fatalf("drChan: Expected *Message, got %v", ev) + } + if m.TopicPartition.Error != nil { + t.Fatalf("Delivery failed: %v", m.TopicPartition) + } + t.Logf("Produced message to %v", m.TopicPartition) + if firstOffset == OffsetInvalid { + firstOffset = m.TopicPartition.Offset + } + } + + p.Close() + + /* Now consume the produced messages and verify the headers */ + t.Logf("Creating consumer starting at offset %v", firstOffset) + c, err := NewConsumer(&conf) + if err != nil { + t.Fatalf("NewConsumer: %v", err) + } + + err = c.Assign([]TopicPartition{{Topic: &testconf.Topic, Partition: 0, + Offset: firstOffset}}) + if err != nil { + t.Fatalf("Assign: %v", err) + } + + for n, hdrs := range expMsgHeaders { + m, err := c.ReadMessage(-1) + if err != nil { + t.Fatalf("Expected message #%d, not error %v", n, err) + } + + if m.Headers == nil { + if hdrs == nil { + continue + } + t.Fatalf("Expected message #%d to have headers", n) + } + + if hdrs == nil { + t.Fatalf("Expected message #%d not to have headers, but found %v", n, m.Headers) + } + + // Compare headers + if !reflect.DeepEqual(hdrs, m.Headers) { + t.Fatalf("Expected message #%d headers to match %v, but found %v", n, hdrs, m.Headers) + } + + t.Logf("Message #%d headers matched: %v", n, m.Headers) + } + + c.Close() +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go new file mode 100644 index 0000000000..4883ee2035 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go @@ -0,0 +1,242 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package kafka provides high-level Apache Kafka producer and consumers +// using bindings on-top of the librdkafka C library. +// +// +// High-level Consumer +// +// * Decide if you want to read messages and events from the `.Events()` channel +// (set `"go.events.channel.enable": true`) or by calling `.Poll()`. +// +// * Create a Consumer with `kafka.NewConsumer()` providing at +// least the `bootstrap.servers` and `group.id` configuration properties. +// +// * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics) +// to join the group with the specified subscription set. +// Subscriptions are atomic, calling `.Subscribe*()` again will leave +// the group and rejoin with the new set of topics. +// +// * Start reading events and messages from either the `.Events` channel +// or by calling `.Poll()`. +// +// * When the group has rebalanced each client member is assigned a +// (sub-)set of topic+partitions. +// By default the consumer will start fetching messages for its assigned +// partitions at this point, but your application may enable rebalance +// events to get an insight into what the assigned partitions where +// as well as set the initial offsets. To do this you need to pass +// `"go.application.rebalance.enable": true` to the `NewConsumer()` call +// mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event +// with the assigned partition set. You can optionally modify the initial +// offsets (they'll default to stored offsets and if there are no previously stored +// offsets it will fall back to `"default.topic.config": ConfigMap{"auto.offset.reset": ..}` +// which defaults to the `latest` message) and then call `.Assign(partitions)` +// to start consuming. If you don't need to modify the initial offsets you will +// not need to call `.Assign()`, the client will do so automatically for you if +// you dont. +// +// * As messages are fetched they will be made available on either the +// `.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`. +// +// * Handle messages, events and errors to your liking. +// +// * When you are done consuming call `.Close()` to commit final offsets +// and leave the consumer group. +// +// +// +// Producer +// +// * Create a Producer with `kafka.NewProducer()` providing at least +// the `bootstrap.servers` configuration properties. +// +// * Messages may now be produced either by sending a `*kafka.Message` +// on the `.ProduceChannel` or by calling `.Produce()`. +// +// * Producing is an asynchronous operation so the client notifies the application +// of per-message produce success or failure through something called delivery reports. +// Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message` +// and you should check `msg.TopicPartition.Error` for `nil` to find out if the message +// was succesfully delivered or not. +// It is also possible to direct delivery reports to alternate channels +// by providing a non-nil `chan Event` channel to `.Produce()`. +// If no delivery reports are wanted they can be completely disabled by +// setting configuration property `"go.delivery.reports": false`. +// +// * When you are done producing messages you will need to make sure all messages +// are indeed delivered to the broker (or failed), remember that this is +// an asynchronous client so some of your messages may be lingering in internal +// channels or tranmission queues. +// To do this you can either keep track of the messages you've produced +// and wait for their corresponding delivery reports, or call the convenience +// function `.Flush()` that will block until all message deliveries are done +// or the provided timeout elapses. +// +// * Finally call `.Close()` to decommission the producer. +// +// +// Events +// +// Apart from emitting messages and delivery reports the client also communicates +// with the application through a number of different event types. +// An application may choose to handle or ignore these events. +// +// Consumer events +// +// * `*kafka.Message` - a fetched message. +// +// * `AssignedPartitions` - The assigned partition set for this client following a rebalance. +// Requires `go.application.rebalance.enable` +// +// * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance. +// `AssignedPartitions` and `RevokedPartitions` are symetrical. +// Requires `go.application.rebalance.enable` +// +// * `PartitionEOF` - Consumer has reached the end of a partition. +// NOTE: The consumer will keep trying to fetch new messages for the partition. +// +// * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled). +// +// +// Producer events +// +// * `*kafka.Message` - delivery report for produced message. +// Check `.TopicPartition.Error` for delivery result. +// +// +// Generic events for both Consumer and Producer +// +// * `KafkaError` - client (error codes are prefixed with _) or broker error. +// These errors are normally just informational since the +// client will try its best to automatically recover (eventually). +// +// +// Hint: If your application registers a signal notification +// (signal.Notify) makes sure the signals channel is buffered to avoid +// possible complications with blocking Poll() calls. +// +// Note: The Confluent Kafka Go client is safe for concurrent use. +package kafka + +import ( + "fmt" + "unsafe" +) + +/* +#include +#include +#include + +static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { + return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; +} +*/ +import "C" + +// PartitionAny represents any partition (for partitioning), +// or unspecified value (for all other cases) +const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA) + +// TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset. +type TopicPartition struct { + Topic *string + Partition int32 + Offset Offset + Error error +} + +func (p TopicPartition) String() string { + topic := "" + if p.Topic != nil { + topic = *p.Topic + } + if p.Error != nil { + return fmt.Sprintf("%s[%d]@%s(%s)", + topic, p.Partition, p.Offset, p.Error) + } + return fmt.Sprintf("%s[%d]@%s", + topic, p.Partition, p.Offset) +} + +// TopicPartitions is a slice of TopicPartitions that also implements +// the sort interface +type TopicPartitions []TopicPartition + +func (tps TopicPartitions) Len() int { + return len(tps) +} + +func (tps TopicPartitions) Less(i, j int) bool { + if *tps[i].Topic < *tps[j].Topic { + return true + } else if *tps[i].Topic > *tps[j].Topic { + return false + } + return tps[i].Partition < tps[j].Partition +} + +func (tps TopicPartitions) Swap(i, j int) { + tps[i], tps[j] = tps[j], tps[i] +} + +// new_cparts_from_TopicPartitions creates a new C rd_kafka_topic_partition_list_t +// from a TopicPartition array. +func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kafka_topic_partition_list_t) { + cparts = C.rd_kafka_topic_partition_list_new(C.int(len(partitions))) + for _, part := range partitions { + ctopic := C.CString(*part.Topic) + defer C.free(unsafe.Pointer(ctopic)) + rktpar := C.rd_kafka_topic_partition_list_add(cparts, ctopic, C.int32_t(part.Partition)) + rktpar.offset = C.int64_t(part.Offset) + } + + return cparts +} + +func setupTopicPartitionFromCrktpar(partition *TopicPartition, crktpar *C.rd_kafka_topic_partition_t) { + + topic := C.GoString(crktpar.topic) + partition.Topic = &topic + partition.Partition = int32(crktpar.partition) + partition.Offset = Offset(crktpar.offset) + if crktpar.err != C.RD_KAFKA_RESP_ERR_NO_ERROR { + partition.Error = newError(crktpar.err) + } +} + +func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (partitions []TopicPartition) { + + partcnt := int(cparts.cnt) + + partitions = make([]TopicPartition, partcnt) + for i := 0; i < partcnt; i++ { + crktpar := C._c_rdkafka_topic_partition_list_entry(cparts, C.int(i)) + setupTopicPartitionFromCrktpar(&partitions[i], crktpar) + } + + return partitions +} + +// LibraryVersion returns the underlying librdkafka library version as a +// (version_int, version_str) tuple. +func LibraryVersion() (int, string) { + ver := (int)(C.rd_kafka_version()) + verstr := C.GoString(C.rd_kafka_version_str()) + return ver, verstr +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka_test.go new file mode 100644 index 0000000000..e268f7eb68 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka_test.go @@ -0,0 +1,138 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "testing" +) + +//Test LibraryVersion() +func TestLibraryVersion(t *testing.T) { + ver, verstr := LibraryVersion() + if ver >= 0x00090200 { + t.Logf("Library version %d: %s\n", ver, verstr) + } else { + t.Errorf("Unexpected Library version %d: %s\n", ver, verstr) + } +} + +//Test Offset APIs +func TestOffsetAPIs(t *testing.T) { + offsets := []Offset{OffsetBeginning, OffsetEnd, OffsetInvalid, OffsetStored, 1001} + for _, offset := range offsets { + t.Logf("Offset: %s\n", offset.String()) + } + + // test known offset strings + testOffsets := map[string]Offset{"beginning": OffsetBeginning, + "earliest": OffsetBeginning, + "end": OffsetEnd, + "latest": OffsetEnd, + "unset": OffsetInvalid, + "invalid": OffsetInvalid, + "stored": OffsetStored} + + for key, expectedOffset := range testOffsets { + offset, err := NewOffset(key) + if err != nil { + t.Errorf("Cannot create offset for %s, error: %s\n", key, err) + } else { + if offset != expectedOffset { + t.Errorf("Offset does not equal expected: %s != %s\n", offset, expectedOffset) + } + } + } + + // test numeric string conversion + offset, err := NewOffset("10") + if err != nil { + t.Errorf("Cannot create offset for 10, error: %s\n", err) + } else { + if offset != Offset(10) { + t.Errorf("Offset does not equal expected: %s != %s\n", offset, Offset(10)) + } + } + + // test integer offset + var intOffset = 10 + offset, err = NewOffset(intOffset) + if err != nil { + t.Errorf("Cannot create offset for int 10, Error: %s\n", err) + } else { + if offset != Offset(10) { + t.Errorf("Offset does not equal expected: %s != %s\n", offset, Offset(10)) + } + } + + // test int64 offset + var int64Offset int64 = 10 + offset, err = NewOffset(int64Offset) + if err != nil { + t.Errorf("Cannot create offset for int64 10, Error: %s \n", err) + } else { + if offset != Offset(10) { + t.Errorf("Offset does not equal expected: %s != %s\n", offset, Offset(10)) + } + } + + // test invalid string offset + invalidOffsetString := "what is this offset" + offset, err = NewOffset(invalidOffsetString) + if err == nil { + t.Errorf("Expected error for this string offset. Error: %s\n", err) + } else if offset != Offset(0) { + t.Errorf("Expected offset (%v), got (%v)\n", Offset(0), offset) + } + t.Logf("Offset for string (%s): %v\n", invalidOffsetString, offset) + + // test double offset + doubleOffset := 12.15 + offset, err = NewOffset(doubleOffset) + if err == nil { + t.Errorf("Expected error for this double offset: %f. Error: %s\n", doubleOffset, err) + } else if offset != OffsetInvalid { + t.Errorf("Expected offset (%v), got (%v)\n", OffsetInvalid, offset) + } + t.Logf("Offset for double (%f): %v\n", doubleOffset, offset) + + // test change offset via Set() + offset, err = NewOffset("beginning") + if err != nil { + t.Errorf("Cannot create offset for 'beginning'. Error: %s\n", err) + } + + // test change to a logical offset + err = offset.Set("latest") + if err != nil { + t.Errorf("Cannot set offset to 'latest'. Error: %s \n", err) + } else if offset != OffsetEnd { + t.Errorf("Failed to change offset. Expect (%v), got (%v)\n", OffsetEnd, offset) + } + + // test change to an integer offset + err = offset.Set(int(10)) + if err != nil { + t.Errorf("Cannot set offset to (%v). Error: %s \n", 10, err) + } else if offset != 10 { + t.Errorf("Failed to change offset. Expect (%v), got (%v)\n", 10, offset) + } + + // test OffsetTail() + tail := OffsetTail(offset) + t.Logf("offset tail %v\n", tail) + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go new file mode 100644 index 0000000000..3472d1c7d6 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go @@ -0,0 +1,207 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "fmt" + "time" + "unsafe" +) + +/* +#include +#include +#include +#include "glue_rdkafka.h" + +void setup_rkmessage (rd_kafka_message_t *rkmessage, + rd_kafka_topic_t *rkt, int32_t partition, + const void *payload, size_t len, + void *key, size_t keyLen, void *opaque) { + rkmessage->rkt = rkt; + rkmessage->partition = partition; + rkmessage->payload = (void *)payload; + rkmessage->len = len; + rkmessage->key = (void *)key; + rkmessage->key_len = keyLen; + rkmessage->_private = opaque; +} +*/ +import "C" + +// TimestampType is a the Message timestamp type or source +// +type TimestampType int + +const ( + // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support + TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) + // TimestampCreateTime indicates timestamp set by producer (source time) + TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME) + // TimestampLogAppendTime indicates timestamp set set by broker (store time) + TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) +) + +func (t TimestampType) String() string { + switch t { + case TimestampCreateTime: + return "CreateTime" + case TimestampLogAppendTime: + return "LogAppendTime" + case TimestampNotAvailable: + fallthrough + default: + return "NotAvailable" + } +} + +// Message represents a Kafka message +type Message struct { + TopicPartition TopicPartition + Value []byte + Key []byte + Timestamp time.Time + TimestampType TimestampType + Opaque interface{} + Headers []Header +} + +// String returns a human readable representation of a Message. +// Key and payload are not represented. +func (m *Message) String() string { + var topic string + if m.TopicPartition.Topic != nil { + topic = *m.TopicPartition.Topic + } else { + topic = "" + } + return fmt.Sprintf("%s[%d]@%s", topic, m.TopicPartition.Partition, m.TopicPartition.Offset) +} + +func (h *handle) getRktFromMessage(msg *Message) (crkt *C.rd_kafka_topic_t) { + if msg.TopicPartition.Topic == nil { + return nil + } + + return h.getRkt(*msg.TopicPartition.Topic) +} + +func (h *handle) newMessageFromFcMsg(fcMsg *C.fetched_c_msg_t) (msg *Message) { + msg = &Message{} + + if fcMsg.ts != -1 { + ts := int64(fcMsg.ts) + msg.TimestampType = TimestampType(fcMsg.tstype) + msg.Timestamp = time.Unix(ts/1000, (ts%1000)*1000000) + } + + if fcMsg.tmphdrsCnt > 0 { + msg.Headers = make([]Header, fcMsg.tmphdrsCnt) + for n := range msg.Headers { + tmphdr := (*[1 << 30]C.tmphdr_t)(unsafe.Pointer(fcMsg.tmphdrs))[n] + msg.Headers[n].Key = C.GoString(tmphdr.key) + if tmphdr.val != nil { + msg.Headers[n].Value = C.GoBytes(unsafe.Pointer(tmphdr.val), C.int(tmphdr.size)) + } else { + msg.Headers[n].Value = nil + } + } + C.free(unsafe.Pointer(fcMsg.tmphdrs)) + } + + h.setupMessageFromC(msg, fcMsg.msg) + + return msg +} + +// setupMessageFromC sets up a message object from a C rd_kafka_message_t +func (h *handle) setupMessageFromC(msg *Message, cmsg *C.rd_kafka_message_t) { + if cmsg.rkt != nil { + topic := h.getTopicNameFromRkt(cmsg.rkt) + msg.TopicPartition.Topic = &topic + } + msg.TopicPartition.Partition = int32(cmsg.partition) + if cmsg.payload != nil { + msg.Value = C.GoBytes(unsafe.Pointer(cmsg.payload), C.int(cmsg.len)) + } + if cmsg.key != nil { + msg.Key = C.GoBytes(unsafe.Pointer(cmsg.key), C.int(cmsg.key_len)) + } + msg.TopicPartition.Offset = Offset(cmsg.offset) + if cmsg.err != 0 { + msg.TopicPartition.Error = newError(cmsg.err) + } +} + +// newMessageFromC creates a new message object from a C rd_kafka_message_t +// NOTE: For use with Producer: does not set message timestamp fields. +func (h *handle) newMessageFromC(cmsg *C.rd_kafka_message_t) (msg *Message) { + msg = &Message{} + + h.setupMessageFromC(msg, cmsg) + + return msg +} + +// messageToC sets up cmsg as a clone of msg +func (h *handle) messageToC(msg *Message, cmsg *C.rd_kafka_message_t) { + var valp unsafe.Pointer + var keyp unsafe.Pointer + + // to circumvent Cgo constraints we need to allocate C heap memory + // for both Value and Key (one allocation back to back) + // and copy the bytes from Value and Key to the C memory. + // We later tell librdkafka (in produce()) to free the + // C memory pointer when it is done. + var payload unsafe.Pointer + + valueLen := 0 + keyLen := 0 + if msg.Value != nil { + valueLen = len(msg.Value) + } + if msg.Key != nil { + keyLen = len(msg.Key) + } + + allocLen := valueLen + keyLen + if allocLen > 0 { + payload = C.malloc(C.size_t(allocLen)) + if valueLen > 0 { + copy((*[1 << 30]byte)(payload)[0:valueLen], msg.Value) + valp = payload + } + if keyLen > 0 { + copy((*[1 << 30]byte)(payload)[valueLen:allocLen], msg.Key) + keyp = unsafe.Pointer(&((*[1 << 31]byte)(payload)[valueLen])) + } + } + + cmsg.rkt = h.getRktFromMessage(msg) + cmsg.partition = C.int32_t(msg.TopicPartition.Partition) + cmsg.payload = valp + cmsg.len = C.size_t(valueLen) + cmsg.key = keyp + cmsg.key_len = C.size_t(keyLen) + cmsg._private = nil +} + +// used for testing messageToC performance +func (h *handle) messageToCDummy(msg *Message) { + var cmsg C.rd_kafka_message_t + h.messageToC(msg, &cmsg) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message_test.go new file mode 100644 index 0000000000..e868dcc7a5 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message_test.go @@ -0,0 +1,33 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "testing" +) + +//Test TimestampType +func TestTimestampType(t *testing.T) { + timestampMap := map[TimestampType]string{TimestampCreateTime: "CreateTime", + TimestampLogAppendTime: "LogAppendTime", + TimestampNotAvailable: "NotAvailable"} + for ts, desc := range timestampMap { + if ts.String() != desc { + t.Errorf("Wrong timestamp description for %s, expected %s\n", desc, ts.String()) + } + } +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go new file mode 100644 index 0000000000..a34bc9b763 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go @@ -0,0 +1,157 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "unsafe" +) + +/* +#include +#include + +struct rd_kafka_metadata_broker *_getMetadata_broker_element(struct rd_kafka_metadata *m, int i) { + return &m->brokers[i]; +} + +struct rd_kafka_metadata_topic *_getMetadata_topic_element(struct rd_kafka_metadata *m, int i) { + return &m->topics[i]; +} + +struct rd_kafka_metadata_partition *_getMetadata_partition_element(struct rd_kafka_metadata *m, int topic_idx, int partition_idx) { + return &m->topics[topic_idx].partitions[partition_idx]; +} + +int32_t _get_int32_element (int32_t *arr, int i) { + return arr[i]; +} + +*/ +import "C" + +// BrokerMetadata contains per-broker metadata +type BrokerMetadata struct { + ID int32 + Host string + Port int +} + +// PartitionMetadata contains per-partition metadata +type PartitionMetadata struct { + ID int32 + Error Error + Leader int32 + Replicas []int32 + Isrs []int32 +} + +// TopicMetadata contains per-topic metadata +type TopicMetadata struct { + Topic string + Partitions []PartitionMetadata + Error Error +} + +// Metadata contains broker and topic metadata for all (matching) topics +type Metadata struct { + Brokers []BrokerMetadata + Topics map[string]TopicMetadata + + OriginatingBroker BrokerMetadata +} + +// getMetadata queries broker for cluster and topic metadata. +// If topic is non-nil only information about that topic is returned, else if +// allTopics is false only information about locally used topics is returned, +// else information about all topics is returned. +func getMetadata(H Handle, topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + h := H.gethandle() + + var rkt *C.rd_kafka_topic_t + if topic != nil { + rkt = h.getRkt(*topic) + } + + var cMd *C.struct_rd_kafka_metadata + cErr := C.rd_kafka_metadata(h.rk, bool2cint(allTopics), + rkt, &cMd, C.int(timeoutMs)) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cErr) + } + + m := Metadata{} + + m.Brokers = make([]BrokerMetadata, cMd.broker_cnt) + for i := 0; i < int(cMd.broker_cnt); i++ { + b := C._getMetadata_broker_element(cMd, C.int(i)) + m.Brokers[i] = BrokerMetadata{int32(b.id), C.GoString(b.host), + int(b.port)} + } + + m.Topics = make(map[string]TopicMetadata, int(cMd.topic_cnt)) + for i := 0; i < int(cMd.topic_cnt); i++ { + t := C._getMetadata_topic_element(cMd, C.int(i)) + + thisTopic := C.GoString(t.topic) + m.Topics[thisTopic] = TopicMetadata{Topic: thisTopic, + Error: newError(t.err), + Partitions: make([]PartitionMetadata, int(t.partition_cnt))} + + for j := 0; j < int(t.partition_cnt); j++ { + p := C._getMetadata_partition_element(cMd, C.int(i), C.int(j)) + m.Topics[thisTopic].Partitions[j] = PartitionMetadata{ + ID: int32(p.id), + Error: newError(p.err), + Leader: int32(p.leader)} + m.Topics[thisTopic].Partitions[j].Replicas = make([]int32, int(p.replica_cnt)) + for ir := 0; ir < int(p.replica_cnt); ir++ { + m.Topics[thisTopic].Partitions[j].Replicas[ir] = int32(C._get_int32_element(p.replicas, C.int(ir))) + } + + m.Topics[thisTopic].Partitions[j].Isrs = make([]int32, int(p.isr_cnt)) + for ii := 0; ii < int(p.isr_cnt); ii++ { + m.Topics[thisTopic].Partitions[j].Isrs[ii] = int32(C._get_int32_element(p.isrs, C.int(ii))) + } + } + } + + m.OriginatingBroker = BrokerMetadata{int32(cMd.orig_broker_id), + C.GoString(cMd.orig_broker_name), 0} + + return &m, nil +} + +// queryWatermarkOffsets returns the broker's low and high offsets for the given topic +// and partition. +func queryWatermarkOffsets(H Handle, topic string, partition int32, timeoutMs int) (low, high int64, err error) { + h := H.gethandle() + + ctopic := C.CString(topic) + defer C.free(unsafe.Pointer(ctopic)) + + var cLow, cHigh C.int64_t + + e := C.rd_kafka_query_watermark_offsets(h.rk, ctopic, C.int32_t(partition), + &cLow, &cHigh, C.int(timeoutMs)) + if e != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return 0, 0, newError(e) + } + + low = int64(cLow) + high = int64(cHigh) + return low, high, nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata_test.go new file mode 100644 index 0000000000..96e8a91416 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata_test.go @@ -0,0 +1,64 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "testing" +) + +// TestMetadataAPIs dry-tests the Metadata APIs, no broker is needed. +func TestMetadataAPIs(t *testing.T) { + + p, err := NewProducer(&ConfigMap{"socket.timeout.ms": 10}) + if err != nil { + t.Fatalf("%s", err) + } + + metadata, err := p.GetMetadata(nil, true, 10) + if err == nil { + t.Errorf("Expected GetMetadata to fail") + } + + topic := "gotest" + metadata, err = p.GetMetadata(&topic, false, 10) + if err == nil { + t.Errorf("Expected GetMetadata to fail") + } + + metadata, err = p.GetMetadata(nil, false, 10) + if err == nil { + t.Errorf("Expected GetMetadata to fail") + } + + p.Close() + + c, err := NewConsumer(&ConfigMap{"group.id": "gotest"}) + if err != nil { + t.Fatalf("%s", err) + } + + metadata, err = c.GetMetadata(nil, true, 10) + if err == nil { + t.Errorf("Expected GetMetadata to fail") + } + if metadata != nil { + t.Errorf("Return value should be nil") + } + + c.Close() + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go new file mode 100644 index 0000000000..2fe871774b --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go @@ -0,0 +1,27 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import "C" + +// bool2int converts a bool to a C.int (1 or 0) +func bool2cint(b bool) C.int { + if b { + return 1 + } + return 0 +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go new file mode 100644 index 0000000000..5dd7fd26c3 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go @@ -0,0 +1,144 @@ +/** + * Copyright 2017 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "strconv" +) + +/* +#include +#include + +static int64_t _c_rdkafka_offset_tail(int64_t rel) { + return RD_KAFKA_OFFSET_TAIL(rel); +} +*/ +import "C" + +// Offset type (int64) with support for canonical names +type Offset int64 + +// OffsetBeginning represents the earliest offset (logical) +const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING) + +// OffsetEnd represents the latest offset (logical) +const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END) + +// OffsetInvalid represents an invalid/unspecified offset +const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID) + +// OffsetStored represents a stored offset +const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED) + +func (o Offset) String() string { + switch o { + case OffsetBeginning: + return "beginning" + case OffsetEnd: + return "end" + case OffsetInvalid: + return "unset" + case OffsetStored: + return "stored" + default: + return fmt.Sprintf("%d", int64(o)) + } +} + +// Set offset value, see NewOffset() +func (o *Offset) Set(offset interface{}) error { + n, err := NewOffset(offset) + + if err == nil { + *o = n + } + + return err +} + +// NewOffset creates a new Offset using the provided logical string, or an +// absolute int64 offset value. +// Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored" +func NewOffset(offset interface{}) (Offset, error) { + + switch v := offset.(type) { + case string: + switch v { + case "beginning": + fallthrough + case "earliest": + return Offset(OffsetBeginning), nil + + case "end": + fallthrough + case "latest": + return Offset(OffsetEnd), nil + + case "unset": + fallthrough + case "invalid": + return Offset(OffsetInvalid), nil + + case "stored": + return Offset(OffsetStored), nil + + default: + off, err := strconv.Atoi(v) + return Offset(off), err + } + + case int: + return Offset((int64)(v)), nil + case int64: + return Offset(v), nil + default: + return OffsetInvalid, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid offset type: %t", v)) + } +} + +// OffsetTail returns the logical offset relativeOffset from current end of partition +func OffsetTail(relativeOffset Offset) Offset { + return Offset(C._c_rdkafka_offset_tail(C.int64_t(relativeOffset))) +} + +// offsetsForTimes looks up offsets by timestamp for the given partitions. +// +// The returned offset for each partition is the earliest offset whose +// timestamp is greater than or equal to the given timestamp in the +// corresponding partition. +// +// The timestamps to query are represented as `.Offset` in the `times` +// argument and the looked up offsets are represented as `.Offset` in the returned +// `offsets` list. +// +// The function will block for at most timeoutMs milliseconds. +// +// Duplicate Topic+Partitions are not supported. +// Per-partition errors may be returned in the `.Error` field. +func offsetsForTimes(H Handle, times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + cparts := newCPartsFromTopicPartitions(times) + defer C.rd_kafka_topic_partition_list_destroy(cparts) + cerr := C.rd_kafka_offsets_for_times(H.gethandle().rk, cparts, C.int(timeoutMs)) + if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return nil, newError(cerr) + } + + return newTopicPartitionsFromCparts(cparts), nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go new file mode 100644 index 0000000000..28a41357cb --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go @@ -0,0 +1,562 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "math" + "time" + "unsafe" +) + +/* +#include +#include +#include "glue_rdkafka.h" + + +#ifdef RD_KAFKA_V_HEADERS +// Convert tmphdrs to chdrs (created by this function). +// If tmphdr.size == -1: value is considered Null +// tmphdr.size == 0: value is considered empty (ignored) +// tmphdr.size > 0: value is considered non-empty +// +// WARNING: The header values will be freed by this function. +void tmphdrs_to_chdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt, + rd_kafka_headers_t **chdrs) { + size_t i; + + *chdrs = rd_kafka_headers_new(tmphdrsCnt); + + for (i = 0 ; i < tmphdrsCnt ; i++) { + rd_kafka_header_add(*chdrs, + tmphdrs[i].key, -1, + tmphdrs[i].size == -1 ? NULL : + (tmphdrs[i].size == 0 ? "" : tmphdrs[i].val), + tmphdrs[i].size == -1 ? 0 : tmphdrs[i].size); + if (tmphdrs[i].size > 0) + free((void *)tmphdrs[i].val); + } +} + +#else +void free_tmphdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt) { + size_t i; + for (i = 0 ; i < tmphdrsCnt ; i++) { + if (tmphdrs[i].size > 0) + free((void *)tmphdrs[i].val); + } +} +#endif + + +rd_kafka_resp_err_t do_produce (rd_kafka_t *rk, + rd_kafka_topic_t *rkt, int32_t partition, + int msgflags, + int valIsNull, void *val, size_t val_len, + int keyIsNull, void *key, size_t key_len, + int64_t timestamp, + tmphdr_t *tmphdrs, size_t tmphdrsCnt, + uintptr_t cgoid) { + void *valp = valIsNull ? NULL : val; + void *keyp = keyIsNull ? NULL : key; +#ifdef RD_KAFKA_V_HEADERS + rd_kafka_headers_t *hdrs = NULL; +#endif + + + if (tmphdrsCnt > 0) { +#ifdef RD_KAFKA_V_HEADERS + tmphdrs_to_chdrs(tmphdrs, tmphdrsCnt, &hdrs); +#else + free_tmphdrs(tmphdrs, tmphdrsCnt); + return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; +#endif + } + + +#ifdef RD_KAFKA_V_TIMESTAMP + return rd_kafka_producev(rk, + RD_KAFKA_V_RKT(rkt), + RD_KAFKA_V_PARTITION(partition), + RD_KAFKA_V_MSGFLAGS(msgflags), + RD_KAFKA_V_VALUE(valp, val_len), + RD_KAFKA_V_KEY(keyp, key_len), + RD_KAFKA_V_TIMESTAMP(timestamp), +#ifdef RD_KAFKA_V_HEADERS + RD_KAFKA_V_HEADERS(hdrs), +#endif + RD_KAFKA_V_OPAQUE((void *)cgoid), + RD_KAFKA_V_END); +#else + if (timestamp) + return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED; + if (rd_kafka_produce(rkt, partition, msgflags, + valp, val_len, + keyp, key_len, + (void *)cgoid) == -1) + return rd_kafka_last_error(); + else + return RD_KAFKA_RESP_ERR_NO_ERROR; +#endif +} +*/ +import "C" + +// Producer implements a High-level Apache Kafka Producer instance +type Producer struct { + events chan Event + produceChannel chan *Message + handle handle + + // Terminates the poller() goroutine + pollerTermChan chan bool +} + +// String returns a human readable name for a Producer instance +func (p *Producer) String() string { + return p.handle.String() +} + +// get_handle implements the Handle interface +func (p *Producer) gethandle() *handle { + return &p.handle +} + +func (p *Producer) produce(msg *Message, msgFlags int, deliveryChan chan Event) error { + if msg == nil || msg.TopicPartition.Topic == nil || len(*msg.TopicPartition.Topic) == 0 { + return newErrorFromString(ErrInvalidArg, "") + } + + crkt := p.handle.getRkt(*msg.TopicPartition.Topic) + + // Three problems: + // 1) There's a difference between an empty Value or Key (length 0, proper pointer) and + // a null Value or Key (length 0, null pointer). + // 2) we need to be able to send a null Value or Key, but the unsafe.Pointer(&slice[0]) + // dereference can't be performed on a nil slice. + // 3) cgo's pointer checking requires the unsafe.Pointer(slice..) call to be made + // in the call to the C function. + // + // Solution: + // Keep track of whether the Value or Key were nil (1), but let the valp and keyp pointers + // point to a 1-byte slice (but the length to send is still 0) so that the dereference (2) + // works. + // Then perform the unsafe.Pointer() on the valp and keyp pointers (which now either point + // to the original msg.Value and msg.Key or to the 1-byte slices) in the call to C (3). + // + var valp []byte + var keyp []byte + oneByte := []byte{0} + var valIsNull C.int + var keyIsNull C.int + var valLen int + var keyLen int + + if msg.Value == nil { + valIsNull = 1 + valLen = 0 + valp = oneByte + } else { + valLen = len(msg.Value) + if valLen > 0 { + valp = msg.Value + } else { + valp = oneByte + } + } + + if msg.Key == nil { + keyIsNull = 1 + keyLen = 0 + keyp = oneByte + } else { + keyLen = len(msg.Key) + if keyLen > 0 { + keyp = msg.Key + } else { + keyp = oneByte + } + } + + var cgoid int + + // Per-message state that needs to be retained through the C code: + // delivery channel (if specified) + // message opaque (if specified) + // Since these cant be passed as opaque pointers to the C code, + // due to cgo constraints, we add them to a per-producer map for lookup + // when the C code triggers the callbacks or events. + if deliveryChan != nil || msg.Opaque != nil { + cgoid = p.handle.cgoPut(cgoDr{deliveryChan: deliveryChan, opaque: msg.Opaque}) + } + + var timestamp int64 + if !msg.Timestamp.IsZero() { + timestamp = msg.Timestamp.UnixNano() / 1000000 + } + + // Convert headers to C-friendly tmphdrs + var tmphdrs []C.tmphdr_t + tmphdrsCnt := len(msg.Headers) + + if tmphdrsCnt > 0 { + tmphdrs = make([]C.tmphdr_t, tmphdrsCnt) + + for n, hdr := range msg.Headers { + tmphdrs[n].key = C.CString(hdr.Key) + if hdr.Value != nil { + tmphdrs[n].size = C.ssize_t(len(hdr.Value)) + if tmphdrs[n].size > 0 { + // Make a copy of the value + // to avoid runtime panic with + // foreign Go pointers in cgo. + tmphdrs[n].val = C.CBytes(hdr.Value) + } + } else { + // null value + tmphdrs[n].size = C.ssize_t(-1) + } + } + } else { + // no headers, need a dummy tmphdrs of size 1 to avoid index + // out of bounds panic in do_produce() call below. + // tmphdrsCnt will be 0. + tmphdrs = []C.tmphdr_t{{nil, nil, 0}} + } + + cErr := C.do_produce(p.handle.rk, crkt, + C.int32_t(msg.TopicPartition.Partition), + C.int(msgFlags)|C.RD_KAFKA_MSG_F_COPY, + valIsNull, unsafe.Pointer(&valp[0]), C.size_t(valLen), + keyIsNull, unsafe.Pointer(&keyp[0]), C.size_t(keyLen), + C.int64_t(timestamp), + (*C.tmphdr_t)(unsafe.Pointer(&tmphdrs[0])), C.size_t(tmphdrsCnt), + (C.uintptr_t)(cgoid)) + if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { + if cgoid != 0 { + p.handle.cgoGet(cgoid) + } + return newError(cErr) + } + + return nil +} + +// Produce single message. +// This is an asynchronous call that enqueues the message on the internal +// transmit queue, thus returning immediately. +// The delivery report will be sent on the provided deliveryChan if specified, +// or on the Producer object's Events() channel if not. +// msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented), +// api.version.request=true, and broker >= 0.10.0.0. +// msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented), +// api.version.request=true, and broker >= 0.11.0.0. +// Returns an error if message could not be enqueued. +func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error { + return p.produce(msg, 0, deliveryChan) +} + +// Produce a batch of messages. +// These batches do not relate to the message batches sent to the broker, the latter +// are collected on the fly internally in librdkafka. +// WARNING: This is an experimental API. +// NOTE: timestamps and headers are not supported with this API. +func (p *Producer) produceBatch(topic string, msgs []*Message, msgFlags int) error { + crkt := p.handle.getRkt(topic) + + cmsgs := make([]C.rd_kafka_message_t, len(msgs)) + for i, m := range msgs { + p.handle.messageToC(m, &cmsgs[i]) + } + r := C.rd_kafka_produce_batch(crkt, C.RD_KAFKA_PARTITION_UA, C.int(msgFlags)|C.RD_KAFKA_MSG_F_FREE, + (*C.rd_kafka_message_t)(&cmsgs[0]), C.int(len(msgs))) + if r == -1 { + return newError(C.rd_kafka_last_error()) + } + + return nil +} + +// Events returns the Events channel (read) +func (p *Producer) Events() chan Event { + return p.events +} + +// ProduceChannel returns the produce *Message channel (write) +func (p *Producer) ProduceChannel() chan *Message { + return p.produceChannel +} + +// Len returns the number of messages and requests waiting to be transmitted to the broker +// as well as delivery reports queued for the application. +// Includes messages on ProduceChannel. +func (p *Producer) Len() int { + return len(p.produceChannel) + len(p.events) + int(C.rd_kafka_outq_len(p.handle.rk)) +} + +// Flush and wait for outstanding messages and requests to complete delivery. +// Includes messages on ProduceChannel. +// Runs until value reaches zero or on timeoutMs. +// Returns the number of outstanding events still un-flushed. +func (p *Producer) Flush(timeoutMs int) int { + termChan := make(chan bool) // unused stand-in termChan + + d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs)) + tEnd := time.Now().Add(d) + for p.Len() > 0 { + remain := tEnd.Sub(time.Now()).Seconds() + if remain <= 0.0 { + return p.Len() + } + + p.handle.eventPoll(p.events, + int(math.Min(100, remain*1000)), 1000, termChan) + } + + return 0 +} + +// Close a Producer instance. +// The Producer object or its channels are no longer usable after this call. +func (p *Producer) Close() { + // Wait for poller() (signaled by closing pollerTermChan) + // and channel_producer() (signaled by closing ProduceChannel) + close(p.pollerTermChan) + close(p.produceChannel) + p.handle.waitTerminated(2) + + close(p.events) + + p.handle.cleanup() + + C.rd_kafka_destroy(p.handle.rk) +} + +// NewProducer creates a new high-level Producer instance. +// +// conf is a *ConfigMap with standard librdkafka configuration properties, see here: +// +// +// +// +// +// Supported special configuration properties: +// go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance). +// These batches do not relate to Kafka message batches in any way. +// Note: timestamps and headers are not supported with this interface. +// go.delivery.reports (bool, true) - Forward per-message delivery reports to the +// Events() channel. +// go.events.channel.size (int, 1000000) - Events() channel size +// go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages) +// +func NewProducer(conf *ConfigMap) (*Producer, error) { + + err := versionCheck() + if err != nil { + return nil, err + } + + p := &Producer{} + + v, err := conf.extract("go.batch.producer", false) + if err != nil { + return nil, err + } + batchProducer := v.(bool) + + v, err = conf.extract("go.delivery.reports", true) + if err != nil { + return nil, err + } + p.handle.fwdDr = v.(bool) + + v, err = conf.extract("go.events.channel.size", 1000000) + if err != nil { + return nil, err + } + eventsChanSize := v.(int) + + v, err = conf.extract("go.produce.channel.size", 1000000) + if err != nil { + return nil, err + } + produceChannelSize := v.(int) + + v, _ = conf.extract("{topic}.produce.offset.report", nil) + if v == nil { + // Enable offset reporting by default, unless overriden. + conf.SetKey("{topic}.produce.offset.report", true) + } + + // Convert ConfigMap to librdkafka conf_t + cConf, err := conf.convert() + if err != nil { + return nil, err + } + + cErrstr := (*C.char)(C.malloc(C.size_t(256))) + defer C.free(unsafe.Pointer(cErrstr)) + + C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_DR|C.RD_KAFKA_EVENT_STATS) + + // Create librdkafka producer instance + p.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) + if p.handle.rk == nil { + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + p.handle.p = p + p.handle.setup() + p.handle.rkq = C.rd_kafka_queue_get_main(p.handle.rk) + p.events = make(chan Event, eventsChanSize) + p.produceChannel = make(chan *Message, produceChannelSize) + p.pollerTermChan = make(chan bool) + + go poller(p, p.pollerTermChan) + + // non-batch or batch producer, only one must be used + if batchProducer { + go channelBatchProducer(p) + } else { + go channelProducer(p) + } + + return p, nil +} + +// channel_producer serves the ProduceChannel channel +func channelProducer(p *Producer) { + + for m := range p.produceChannel { + err := p.produce(m, C.RD_KAFKA_MSG_F_BLOCK, nil) + if err != nil { + m.TopicPartition.Error = err + p.events <- m + } + } + + p.handle.terminatedChan <- "channelProducer" +} + +// channelBatchProducer serves the ProduceChannel channel and attempts to +// improve cgo performance by using the produceBatch() interface. +func channelBatchProducer(p *Producer) { + var buffered = make(map[string][]*Message) + bufferedCnt := 0 + const batchSize int = 1000000 + totMsgCnt := 0 + totBatchCnt := 0 + + for m := range p.produceChannel { + buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m) + bufferedCnt++ + + loop2: + for true { + select { + case m, ok := <-p.produceChannel: + if !ok { + break loop2 + } + if m == nil { + panic("nil message received on ProduceChannel") + } + if m.TopicPartition.Topic == nil { + panic(fmt.Sprintf("message without Topic received on ProduceChannel: %v", m)) + } + buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m) + bufferedCnt++ + if bufferedCnt >= batchSize { + break loop2 + } + default: + break loop2 + } + } + + totBatchCnt++ + totMsgCnt += len(buffered) + + for topic, buffered2 := range buffered { + err := p.produceBatch(topic, buffered2, C.RD_KAFKA_MSG_F_BLOCK) + if err != nil { + for _, m = range buffered2 { + m.TopicPartition.Error = err + p.events <- m + } + } + } + + buffered = make(map[string][]*Message) + bufferedCnt = 0 + } + p.handle.terminatedChan <- "channelBatchProducer" +} + +// poller polls the rd_kafka_t handle for events until signalled for termination +func poller(p *Producer, termChan chan bool) { +out: + for true { + select { + case _ = <-termChan: + break out + + default: + _, term := p.handle.eventPoll(p.events, 100, 1000, termChan) + if term { + break out + } + break + } + } + + p.handle.terminatedChan <- "poller" + +} + +// GetMetadata queries broker for cluster and topic metadata. +// If topic is non-nil only information about that topic is returned, else if +// allTopics is false only information about locally used topics is returned, +// else information about all topics is returned. +func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + return getMetadata(p, topic, allTopics, timeoutMs) +} + +// QueryWatermarkOffsets returns the broker's low and high offsets for the given topic +// and partition. +func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { + return queryWatermarkOffsets(p, topic, partition, timeoutMs) +} + +// OffsetsForTimes looks up offsets by timestamp for the given partitions. +// +// The returned offset for each partition is the earliest offset whose +// timestamp is greater than or equal to the given timestamp in the +// corresponding partition. +// +// The timestamps to query are represented as `.Offset` in the `times` +// argument and the looked up offsets are represented as `.Offset` in the returned +// `offsets` list. +// +// The function will block for at most timeoutMs milliseconds. +// +// Duplicate Topic+Partitions are not supported. +// Per-partition errors may be returned in the `.Error` field. +func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + return offsetsForTimes(p, times, timeoutMs) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer_performance_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer_performance_test.go new file mode 100644 index 0000000000..e6ddc4f0d5 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer_performance_test.go @@ -0,0 +1,225 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "strings" + "testing" +) + +func deliveryHandler(b *testing.B, expCnt int64, deliveryChan chan Event, doneChan chan int64) { + + var cnt, size int64 + + for ev := range deliveryChan { + m, ok := ev.(*Message) + if !ok { + continue + } + + if m.TopicPartition.Error != nil { + b.Errorf("Message delivery error: %v", m.TopicPartition) + break + } + + cnt++ + // b.Logf("Delivered %d/%d to %s", cnt, expCnt, m.TopicPartition) + + if m.Value != nil { + size += int64(len(m.Value)) + } + if cnt >= expCnt { + break + } + + } + + doneChan <- cnt + doneChan <- size + close(doneChan) +} + +func producerPerfTest(b *testing.B, testname string, msgcnt int, withDr bool, batchProducer bool, silent bool, produceFunc func(p *Producer, m *Message, drChan chan Event)) { + + if !testconfRead() { + b.Skipf("Missing testconf.json") + } + + if msgcnt == 0 { + msgcnt = testconf.PerfMsgCount + } + + conf := ConfigMap{"bootstrap.servers": testconf.Brokers, + "go.batch.producer": batchProducer, + "go.delivery.reports": withDr, + "queue.buffering.max.messages": msgcnt, + "api.version.request": "true", + "broker.version.fallback": "0.9.0.1", + "default.topic.config": ConfigMap{"acks": 1}} + + conf.updateFromTestconf() + + p, err := NewProducer(&conf) + if err != nil { + panic(err) + } + + topic := testconf.Topic + partition := int32(-1) + size := testconf.PerfMsgSize + pattern := "Hello" + buf := []byte(strings.Repeat(pattern, size/len(pattern))) + + var doneChan chan int64 + var drChan chan Event + + if withDr { + doneChan = make(chan int64) + drChan = p.Events() + go deliveryHandler(b, int64(msgcnt), p.Events(), doneChan) + } + + if !silent { + b.Logf("%s: produce %d messages", testname, msgcnt) + } + + displayInterval := 5.0 + if !silent { + displayInterval = 1000.0 + } + rd := ratedispStart(b, fmt.Sprintf("%s: produce", testname), displayInterval) + rdDelivery := ratedispStart(b, fmt.Sprintf("%s: delivery", testname), displayInterval) + + for i := 0; i < msgcnt; i++ { + m := Message{TopicPartition: TopicPartition{Topic: &topic, Partition: partition}, Value: buf} + + produceFunc(p, &m, drChan) + + rd.tick(1, int64(size)) + } + + if !silent { + rd.print("produce done: ") + } + + // Wait for messages in-flight and in-queue to get delivered. + if !silent { + b.Logf("%s: %d messages in queue", testname, p.Len()) + } + r := p.Flush(10000) + if r > 0 { + b.Errorf("%s: %d messages remains in queue after Flush()", testname, r) + } + + // Close producer + p.Close() + + var deliveryCnt, deliverySize int64 + + if withDr { + deliveryCnt = <-doneChan + deliverySize = <-doneChan + } else { + deliveryCnt = int64(msgcnt) + deliverySize = deliveryCnt * int64(size) + } + rdDelivery.tick(deliveryCnt, deliverySize) + + rd.print("TOTAL: ") + + b.SetBytes(deliverySize) +} + +func BenchmarkProducerFunc(b *testing.B) { + producerPerfTest(b, "Function producer (without DR)", + 0, false, false, false, + func(p *Producer, m *Message, drChan chan Event) { + err := p.Produce(m, drChan) + if err != nil { + b.Errorf("Produce() failed: %v", err) + } + }) +} + +func BenchmarkProducerFuncDR(b *testing.B) { + producerPerfTest(b, "Function producer (with DR)", + 0, true, false, false, + func(p *Producer, m *Message, drChan chan Event) { + err := p.Produce(m, drChan) + if err != nil { + b.Errorf("Produce() failed: %v", err) + } + }) +} + +func BenchmarkProducerChannel(b *testing.B) { + producerPerfTest(b, "Channel producer (without DR)", + 0, false, false, false, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) +} + +func BenchmarkProducerChannelDR(b *testing.B) { + producerPerfTest(b, "Channel producer (with DR)", + testconf.PerfMsgCount, true, false, false, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) + +} + +func BenchmarkProducerBatchChannel(b *testing.B) { + producerPerfTest(b, "Channel producer (without DR, batch channel)", + 0, false, true, false, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) +} + +func BenchmarkProducerBatchChannelDR(b *testing.B) { + producerPerfTest(b, "Channel producer (DR, batch channel)", + 0, true, true, false, + func(p *Producer, m *Message, drChan chan Event) { + p.ProduceChannel() <- m + }) +} + +func BenchmarkProducerInternalMessageInstantiation(b *testing.B) { + topic := "test" + buf := []byte(strings.Repeat("Ten bytes!", 10)) + v := 0 + for i := 0; i < b.N; i++ { + msg := Message{TopicPartition: TopicPartition{Topic: &topic, Partition: 0}, Value: buf} + v += int(msg.TopicPartition.Partition) // avoid msg unused error + } +} + +func BenchmarkProducerInternalMessageToC(b *testing.B) { + p, err := NewProducer(&ConfigMap{}) + if err != nil { + b.Fatalf("NewProducer failed: %s", err) + } + b.ResetTimer() + topic := "test" + buf := []byte(strings.Repeat("Ten bytes!", 10)) + for i := 0; i < b.N; i++ { + msg := Message{TopicPartition: TopicPartition{Topic: &topic, Partition: 0}, Value: buf} + p.handle.messageToCDummy(&msg) + } +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer_test.go new file mode 100644 index 0000000000..b1d645bc17 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer_test.go @@ -0,0 +1,216 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "encoding/binary" + "encoding/json" + "reflect" + "testing" + "time" +) + +// TestProducerAPIs dry-tests all Producer APIs, no broker is needed. +func TestProducerAPIs(t *testing.T) { + + // expected message dr count on events channel + expMsgCnt := 0 + p, err := NewProducer(&ConfigMap{ + "socket.timeout.ms": 10, + "default.topic.config": ConfigMap{"message.timeout.ms": 10}}) + if err != nil { + t.Fatalf("%s", err) + } + + t.Logf("Producer %s", p) + + drChan := make(chan Event, 10) + + topic1 := "gotest" + topic2 := "gotest2" + + // Produce with function, DR on passed drChan + err = p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic1, Partition: 0}, + Value: []byte("Own drChan"), Key: []byte("This is my key")}, + drChan) + if err != nil { + t.Errorf("Produce failed: %s", err) + } + + // Produce with function, use default DR channel (Events) + err = p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic2, Partition: 0}, + Value: []byte("Events DR"), Key: []byte("This is my key")}, + nil) + if err != nil { + t.Errorf("Produce failed: %s", err) + } + expMsgCnt++ + + // Produce with function and timestamp, + // success depends on librdkafka version + err = p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic2, Partition: 0}, Timestamp: time.Now()}, nil) + numver, strver := LibraryVersion() + t.Logf("Produce with timestamp on %s returned: %s", strver, err) + if numver < 0x00090400 { + if err == nil || err.(Error).Code() != ErrNotImplemented { + t.Errorf("Expected Produce with timestamp to fail with ErrNotImplemented on %s (0x%x), got: %s", strver, numver, err) + } + } else { + if err != nil { + t.Errorf("Produce with timestamp failed on %s: %s", strver, err) + } + } + if err == nil { + expMsgCnt++ + } + + // Produce through ProducerChannel, uses default DR channel (Events), + // pass Opaque object. + myOpq := "My opaque" + p.ProduceChannel() <- &Message{TopicPartition: TopicPartition{Topic: &topic2, Partition: 0}, + Opaque: &myOpq, + Value: []byte("ProducerChannel"), Key: []byte("This is my key")} + expMsgCnt++ + + // Len() will not report messages on private delivery report chans (our drChan for example), + // so expect at least 2 messages, not 3. + // And completely ignore the timestamp message. + if p.Len() < 2 { + t.Errorf("Expected at least 2 messages (+requests) in queue, only %d reported", p.Len()) + } + + // Message Headers + varIntHeader := make([]byte, binary.MaxVarintLen64) + varIntLen := binary.PutVarint(varIntHeader, 123456789) + + myHeaders := []Header{ + {"thisHdrIsNullOrNil", nil}, + {"empty", []byte("")}, + {"MyVarIntHeader", varIntHeader[:varIntLen]}, + {"mystring", []byte("This is a simple string")}, + } + + p.ProduceChannel() <- &Message{TopicPartition: TopicPartition{Topic: &topic2, Partition: 0}, + Value: []byte("Headers"), + Headers: myHeaders} + expMsgCnt++ + + // + // Now wait for messages to time out so that delivery reports are triggered + // + + // drChan (1 message) + ev := <-drChan + m := ev.(*Message) + if string(m.Value) != "Own drChan" { + t.Errorf("DR for wrong message (wanted 'Own drChan'), got %s", + string(m.Value)) + } else if m.TopicPartition.Error == nil { + t.Errorf("Expected error for message") + } else { + t.Logf("Message %s", m.TopicPartition) + } + close(drChan) + + // Events chan (3 messages and possibly events) + for msgCnt := 0; msgCnt < expMsgCnt; { + ev = <-p.Events() + switch e := ev.(type) { + case *Message: + msgCnt++ + if (string)(e.Value) == "ProducerChannel" { + s := e.Opaque.(*string) + if s != &myOpq { + t.Errorf("Opaque should point to %v, not %v", &myOpq, s) + } + if *s != myOpq { + t.Errorf("Opaque should be \"%s\", not \"%v\"", + myOpq, *s) + } + t.Logf("Message \"%s\" with opaque \"%s\"\n", + (string)(e.Value), *s) + + } else if (string)(e.Value) == "Headers" { + if e.Opaque != nil { + t.Errorf("Message opaque should be nil, not %v", e.Opaque) + } + if !reflect.DeepEqual(e.Headers, myHeaders) { + // FIXME: Headers are currently not available on the delivery report. + // t.Errorf("Message headers should be %v, not %v", myHeaders, e.Headers) + } + } else { + if e.Opaque != nil { + t.Errorf("Message opaque should be nil, not %v", e.Opaque) + } + } + default: + t.Logf("Ignored event %s", e) + } + } + + r := p.Flush(2000) + if r > 0 { + t.Errorf("Expected empty queue after Flush, still has %d", r) + } + + // OffsetsForTimes + offsets, err := p.OffsetsForTimes([]TopicPartition{{Topic: &topic2, Offset: 12345}}, 100) + t.Logf("OffsetsForTimes() returned Offsets %s and error %s\n", offsets, err) + if err == nil { + t.Errorf("OffsetsForTimes() should have failed\n") + } + if offsets != nil { + t.Errorf("OffsetsForTimes() failed but returned non-nil Offsets: %s\n", offsets) + } +} + +// TestProducerBufferSafety verifies issue #24, passing any type of memory backed buffer +// (JSON in this case) to Produce() +func TestProducerBufferSafety(t *testing.T) { + + p, err := NewProducer(&ConfigMap{ + "socket.timeout.ms": 10, + "default.topic.config": ConfigMap{"message.timeout.ms": 10}}) + if err != nil { + t.Fatalf("%s", err) + } + + topic := "gotest" + value, _ := json.Marshal(struct{ M string }{M: "Hello Go!"}) + empty := []byte("") + + // Try combinations of Value and Key: json value, empty, nil + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: value, Key: nil}, nil) + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: value, Key: value}, nil) + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: nil, Key: value}, nil) + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: nil, Key: nil}, nil) + + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: empty, Key: nil}, nil) + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: empty, Key: empty}, nil) + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: nil, Key: empty}, nil) + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: value, Key: empty}, nil) + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: value, Key: value}, nil) + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: empty, Key: value}, nil) + + // And Headers + p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic}, Value: empty, Key: value, + Headers: []Header{{"hdr", value}, {"hdr2", empty}, {"hdr3", nil}}}, nil) + + p.Flush(100) + + p.Close() +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/stats_event_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/stats_event_test.go new file mode 100644 index 0000000000..bf2923b297 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/stats_event_test.go @@ -0,0 +1,131 @@ +/** + * Copyright 2017 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "encoding/json" + "testing" + "time" +) + +// handleStatsEvent checks for stats event and signals on statsReceived +func handleStatsEvent(t *testing.T, eventCh chan Event, statsReceived chan bool) { + for ev := range eventCh { + switch e := ev.(type) { + case *Stats: + t.Logf("Stats: %v", e) + + // test if the stats string can be decoded into JSON + var raw map[string]interface{} + err := json.Unmarshal([]byte(e.String()), &raw) // convert string to json + if err != nil { + t.Fatalf("json unmarshall error: %s", err) + } + t.Logf("Stats['name']: %s", raw["name"]) + close(statsReceived) + return + default: + t.Logf("Ignored event: %v", e) + } + } +} + +// TestStatsEventProducerFunc dry-test stats event, no broker is needed. +func TestStatsEventProducerFunc(t *testing.T) { + testProducerFunc(t, false) +} + +func TestStatsEventProducerChannel(t *testing.T) { + testProducerFunc(t, true) +} + +func testProducerFunc(t *testing.T, withProducerChannel bool) { + + p, err := NewProducer(&ConfigMap{ + "statistics.interval.ms": 50, + "socket.timeout.ms": 10, + "default.topic.config": ConfigMap{"message.timeout.ms": 10}}) + if err != nil { + t.Fatalf("%s", err) + } + defer p.Close() + + t.Logf("Producer %s", p) + + topic1 := "gotest" + + // go routine to check for stats event + statsReceived := make(chan bool) + go handleStatsEvent(t, p.Events(), statsReceived) + + if withProducerChannel { + err = p.Produce(&Message{TopicPartition: TopicPartition{Topic: &topic1, Partition: 0}, + Value: []byte("Own drChan"), Key: []byte("This is my key")}, + nil) + if err != nil { + t.Errorf("Produce failed: %s", err) + } + } else { + p.ProduceChannel() <- &Message{TopicPartition: TopicPartition{Topic: &topic1, Partition: 0}, + Value: []byte("Own drChan"), Key: []byte("This is my key")} + + } + + select { + case <-statsReceived: + t.Logf("Stats recevied") + case <-time.After(time.Second * 3): + t.Fatalf("Excepted stats but got none") + } + + return +} + +// TestStatsEventConsumerChannel dry-tests stats event for consumer, no broker is needed. +func TestStatsEventConsumerChannel(t *testing.T) { + + c, err := NewConsumer(&ConfigMap{ + "group.id": "gotest", + "statistics.interval.ms": 50, + "go.events.channel.enable": true, + "socket.timeout.ms": 10, + "session.timeout.ms": 10}) + if err != nil { + t.Fatalf("%s", err) + } + + defer c.Close() + + t.Logf("Consumer %s", c) + + // go routine to check for stats event + statsReceived := make(chan bool) + go handleStatsEvent(t, c.Events(), statsReceived) + + err = c.Subscribe("gotest", nil) + if err != nil { + t.Errorf("Subscribe failed: %s", err) + } + + select { + case <-statsReceived: + t.Logf("Stats recevied") + case <-time.After(time.Second * 3): + t.Fatalf("Excepted stats but got none") + } + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json new file mode 100644 index 0000000000..7024a9c0b2 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json @@ -0,0 +1,8 @@ +{ + "Brokers": "mybroker or $BROKERS env", + "Topic": "test", + "GroupID": "testgroup", + "PerfMsgCount": 1000000, + "PerfMsgSize": 100, + "Config": ["api.version.request=true"] +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go new file mode 100644 index 0000000000..916db05a17 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go @@ -0,0 +1,127 @@ +package kafka + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/json" + "fmt" + "os" +) + +/* +#include +*/ +import "C" + +var testconf struct { + Brokers string + Topic string + GroupID string + PerfMsgCount int + PerfMsgSize int + Config []string + conf ConfigMap +} + +// testconf_read reads the test suite config file testconf.json which must +// contain at least Brokers and Topic string properties. +// Returns true if the testconf was found and usable, false if no such file, or panics +// if the file format is wrong. +func testconfRead() bool { + cf, err := os.Open("testconf.json") + if err != nil { + fmt.Fprintf(os.Stderr, "%% testconf.json not found - ignoring test\n") + return false + } + + // Default values + testconf.PerfMsgCount = 2000000 + testconf.PerfMsgSize = 100 + testconf.GroupID = "testgroup" + + jp := json.NewDecoder(cf) + err = jp.Decode(&testconf) + if err != nil { + panic(fmt.Sprintf("Failed to parse testconf: %s", err)) + } + + cf.Close() + + if testconf.Brokers[0] == '$' { + // Read broker list from environment variable + testconf.Brokers = os.Getenv(testconf.Brokers[1:]) + } + + if testconf.Brokers == "" || testconf.Topic == "" { + panic("Missing Brokers or Topic in testconf.json") + } + + return true +} + +// update existing ConfigMap with key=value pairs from testconf.Config +func (cm *ConfigMap) updateFromTestconf() error { + if testconf.Config == nil { + return nil + } + + // Translate "key=value" pairs in Config to ConfigMap + for _, s := range testconf.Config { + err := cm.Set(s) + if err != nil { + return err + } + } + + return nil + +} + +// Return the number of messages available in all partitions of a topic. +// WARNING: This uses watermark offsets so it will be incorrect for compacted topics. +func getMessageCountInTopic(topic string) (int, error) { + + // Create consumer + c, err := NewConsumer(&ConfigMap{"bootstrap.servers": testconf.Brokers, + "group.id": testconf.GroupID}) + if err != nil { + return 0, err + } + + // get metadata for the topic to find out number of partitions + + metadata, err := c.GetMetadata(&topic, false, 5*1000) + if err != nil { + return 0, err + } + + t, ok := metadata.Topics[topic] + if !ok { + return 0, newError(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) + } + + cnt := 0 + for _, p := range t.Partitions { + low, high, err := c.QueryWatermarkOffsets(topic, p.ID, 5*1000) + if err != nil { + continue + } + cnt += int(high - low) + } + + return cnt, nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers_test.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers_test.go new file mode 100644 index 0000000000..4fca59e70a --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers_test.go @@ -0,0 +1,67 @@ +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "testing" + "time" +) + +// ratepdisp tracks and prints message & byte rates +type ratedisp struct { + name string + start time.Time + lastPrint time.Time + every float64 + cnt int64 + size int64 + b *testing.B +} + +// ratedisp_start sets up a new rate displayer +func ratedispStart(b *testing.B, name string, every float64) (pf ratedisp) { + now := time.Now() + return ratedisp{name: name, start: now, lastPrint: now, b: b, every: every} +} + +// reset start time and counters +func (rd *ratedisp) reset() { + rd.start = time.Now() + rd.cnt = 0 + rd.size = 0 +} + +// print the current (accumulated) rate +func (rd *ratedisp) print(pfx string) { + elapsed := time.Since(rd.start).Seconds() + + rd.b.Logf("%s: %s%d messages in %fs (%.0f msgs/s), %d bytes (%.3fMb/s)", + rd.name, pfx, rd.cnt, elapsed, float64(rd.cnt)/elapsed, + rd.size, (float64(rd.size)/elapsed)/(1024*1024)) +} + +// tick adds cnt of total size size to the rate displayer and also prints +// running stats every 1s. +func (rd *ratedisp) tick(cnt, size int64) { + rd.cnt += cnt + rd.size += size + + if time.Since(rd.lastPrint).Seconds() >= rd.every { + rd.print("") + rd.lastPrint = time.Now() + } +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/README.md new file mode 100644 index 0000000000..0f3eb532e2 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/README.md @@ -0,0 +1,38 @@ +Contains kafkatest compatible clients for plugging in with the official Apache Kafka client tests + + +Instructions +============ + +**Build both clients with statically linked librdkafka:** + + $ mkdir ~/src/kafka/tests/go + + $ cd go_verifiable_consumer + $ go build -tags static + $ cp go_verifiable_producer ~/src/kafka/tests/go + + $ cd go_verifiable_consumer + $ go build -tags static + $ $ cp go_verifiable_consumer ~/src/kafka/tests/go + + +**Install librdkafka's dependencies on kafkatest VMs:** + + $ cd ~/src/kafka # your Kafka git checkout + $ for n in $(vagrant status | grep running | awk '{print $1}') ; do \ + vagrant ssh $n -c 'sudo apt-get install -y libssl1.0.0 libsasl2-modules-gssapi-mit liblz4-1 zlib1g' ; done + +*Note*: There is also a deploy.sh script in this directory that can be + used on the VMs to do the same. + + + +**Run kafkatests using Go client:** + + $ cd ~/src/kafka # your Kafka git checkout + $ source ~/src/venv2.7/bin/activate # your virtualenv containing ducktape + $ vagrant rsync # to copy go_verifiable_* clients to worker instances + $ ducktape --debug tests/kafkatest/tests/client --globals $GOPATH/src/github.com/confluentinc/confluent-kafka-go/kafkatest/globals.json + # Go do something else for 40 minutes + # Come back and look at the results diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/deploy.sh b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/deploy.sh new file mode 100644 index 0000000000..4217258509 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/deploy.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# + +# Per-instance Go kafkatest client dependency deployment. +# Installs required dependencies. + +sudo apt-get install -y libsasl2 libsasl2-modules-gssapi-mit libssl1.1.0 liblz4-1 diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/globals.json b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/globals.json new file mode 100644 index 0000000000..e7f3a62899 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/globals.json @@ -0,0 +1,11 @@ +{"VerifiableConsumer": + { + "class": "kafkatest.services.verifiable_client.VerifiableClientApp", + "exec_cmd": "/vagrant/tests/go/go_verifiable_consumer --debug=cgrp,topic,protocol,broker -X api.version.request=true" + }, + "VerifiableProducer": + { + "class": "kafkatest.services.verifiable_client.VerifiableClientApp", + "exec_cmd": "/vagrant/tests/go/go_verifiable_producer --debug=topic,protocol,broker -X api.version.request=true" + } +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/go_verifiable_consumer/go_verifiable_consumer.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/go_verifiable_consumer/go_verifiable_consumer.go new file mode 100644 index 0000000000..7a7738ca23 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/go_verifiable_consumer/go_verifiable_consumer.go @@ -0,0 +1,443 @@ +// Apache Kafka kafkatest VerifiableConsumer implemented in Go +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/json" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "gopkg.in/alecthomas/kingpin.v2" + "os" + "os/signal" + "strings" + "syscall" + "time" +) + +var ( + verbosity = 1 + sigs chan os.Signal +) + +func fatal(why string) { + fmt.Fprintf(os.Stderr, "%% FATAL ERROR: %s", why) + panic(why) +} + +func send(name string, msg map[string]interface{}) { + if msg == nil { + msg = make(map[string]interface{}) + } + msg["name"] = name + msg["_time"] = time.Now().Format("2006-01-02 15:04:05.000") + b, err := json.Marshal(msg) + if err != nil { + fatal(fmt.Sprintf("json.Marshal failed: %v", err)) + } + fmt.Println(string(b)) +} + +func partitionsToMap(partitions []kafka.TopicPartition) []map[string]interface{} { + parts := make([]map[string]interface{}, len(partitions)) + for i, tp := range partitions { + parts[i] = map[string]interface{}{"topic": *tp.Topic, "partition": tp.Partition, "offset": tp.Offset} + } + return parts +} + +func sendOffsetsCommitted(offsets []kafka.TopicPartition, err error) { + if len(state.currAssignment) == 0 { + // Dont emit offsets_committed if there is no current assignment + // This happens when auto_commit is enabled since we also + // force a manual commit on rebalance to make sure + // offsets_committed is emitted prior to partitions_revoked, + // so the builtin auto committer will also kick in and post + // this later OffsetsCommitted event which we simply ignore.. + fmt.Fprintf(os.Stderr, "%% Ignore OffsetsCommitted(%v) without a valid assignment\n", err) + return + } + msg := make(map[string]interface{}) + + if err != nil { + msg["success"] = false + msg["error"] = fmt.Sprintf("%v", err) + + kerr, ok := err.(kafka.Error) + if ok && kerr.Code() == kafka.ErrNoOffset { + fmt.Fprintf(os.Stderr, "%% No offsets to commit\n") + return + } + + fmt.Fprintf(os.Stderr, "%% Commit failed: %v", msg["error"]) + } else { + msg["success"] = true + + } + + if offsets != nil { + msg["offsets"] = partitionsToMap(offsets) + } + + // Make sure we report consumption before commit, + // otherwise tests may fail because of commit > consumed + sendRecordsConsumed(true) + + send("offsets_committed", msg) +} + +func sendPartitions(name string, partitions []kafka.TopicPartition) { + + msg := make(map[string]interface{}) + msg["partitions"] = partitionsToMap(partitions) + + send(name, msg) +} + +type assignedPartition struct { + tp kafka.TopicPartition + consumedMsgs int + minOffset int64 + maxOffset int64 +} + +func assignmentKey(tp kafka.TopicPartition) string { + return fmt.Sprintf("%s-%d", *tp.Topic, tp.Partition) +} + +func findAssignment(tp kafka.TopicPartition) *assignedPartition { + a, ok := state.currAssignment[assignmentKey(tp)] + if !ok { + return nil + } + return a +} + +func addAssignment(tp kafka.TopicPartition) { + state.currAssignment[assignmentKey(tp)] = &assignedPartition{tp: tp, minOffset: -1, maxOffset: -1} +} + +func clearCurrAssignment() { + state.currAssignment = make(map[string]*assignedPartition) +} + +type commState struct { + run bool + consumedMsgs int + consumedMsgsLastReported int + consumedMsgsAtLastCommit int + currAssignment map[string]*assignedPartition + maxMessages int + autoCommit bool + asyncCommit bool + c *kafka.Consumer + termOnRevoke bool +} + +var state commState + +func sendRecordsConsumed(immediate bool) { + if len(state.currAssignment) == 0 || + (!immediate && state.consumedMsgsLastReported+1000 > state.consumedMsgs) { + return + } + + msg := map[string]interface{}{} + msg["count"] = state.consumedMsgs - state.consumedMsgsLastReported + parts := make([]map[string]interface{}, len(state.currAssignment)) + i := 0 + for _, a := range state.currAssignment { + if a.minOffset == -1 { + // Skip partitions that havent had any messages since last time. + // This is to circumvent some minOffset checks in kafkatest. + continue + } + parts[i] = map[string]interface{}{"topic": *a.tp.Topic, + "partition": a.tp.Partition, + "consumed_msgs": a.consumedMsgs, + "minOffset": a.minOffset, + "maxOffset": a.maxOffset} + a.minOffset = -1 + i++ + } + msg["partitions"] = parts[0:i] + + send("records_consumed", msg) + + state.consumedMsgsLastReported = state.consumedMsgs +} + +// do_commit commits every 1000 messages or whenever there is a consume timeout, or when immediate==true +func doCommit(immediate bool, async bool) { + if !immediate && + (state.autoCommit || + state.consumedMsgsAtLastCommit+1000 > state.consumedMsgs) { + return + } + + async = state.asyncCommit + + fmt.Fprintf(os.Stderr, "%% Committing %d messages (async=%v)\n", + state.consumedMsgs-state.consumedMsgsAtLastCommit, async) + + state.consumedMsgsAtLastCommit = state.consumedMsgs + + var waitCommitted chan bool + + if !async { + waitCommitted = make(chan bool) + } + + go func() { + offsets, err := state.c.Commit() + + sendOffsetsCommitted(offsets, err) + + if !async { + close(waitCommitted) + } + }() + + if !async { + _, _ = <-waitCommitted + } +} + +// returns false when consumer should terminate, else true to keep running. +func handleMsg(m *kafka.Message) bool { + if verbosity >= 2 { + fmt.Fprintf(os.Stderr, "%% Message receved: %v:\n", m.TopicPartition) + } + + a := findAssignment(m.TopicPartition) + if a == nil { + fmt.Fprintf(os.Stderr, "%% Received message on unassigned partition: %v\n", m.TopicPartition) + return true + } + + a.consumedMsgs++ + offset := int64(m.TopicPartition.Offset) + if a.minOffset == -1 { + a.minOffset = offset + } + if a.maxOffset < offset { + a.maxOffset = offset + } + + state.consumedMsgs++ + + sendRecordsConsumed(false) + doCommit(false, state.asyncCommit) + + if state.maxMessages > 0 && state.consumedMsgs >= state.maxMessages { + // ignore extra messages + return false + } + + return true + +} + +// handle_event handles an event as returned by Poll(). +func handleEvent(c *kafka.Consumer, ev kafka.Event) { + switch e := ev.(type) { + case kafka.AssignedPartitions: + if len(state.currAssignment) > 0 { + fatal(fmt.Sprintf("Assign: currAssignment should have been empty: %v", state.currAssignment)) + } + state.currAssignment = make(map[string]*assignedPartition) + for _, tp := range e.Partitions { + addAssignment(tp) + } + sendPartitions("partitions_assigned", e.Partitions) + c.Assign(e.Partitions) + + case kafka.RevokedPartitions: + sendRecordsConsumed(true) + doCommit(true, false) + sendPartitions("partitions_revoked", e.Partitions) + clearCurrAssignment() + c.Unassign() + if state.termOnRevoke { + state.run = false + } + + case kafka.OffsetsCommitted: + sendOffsetsCommitted(e.Offsets, e.Error) + + case *kafka.Message: + state.run = handleMsg(e) + + case kafka.Error: + if e.Code() == kafka.ErrUnknownTopicOrPart { + fmt.Fprintf(os.Stderr, + "%% Ignoring transient error: %v\n", e) + } else { + fatal(fmt.Sprintf("%% Error: %v\n", e)) + } + + default: + fmt.Fprintf(os.Stderr, "%% Unhandled event %T ignored: %v\n", e, e) + } +} + +// main_loop serves consumer events, signals, etc. +// will run for at most (roughly) \p timeout seconds. +func mainLoop(c *kafka.Consumer, timeout int) { + tmout := time.NewTicker(time.Duration(timeout) * time.Second) + every1s := time.NewTicker(1 * time.Second) + +out: + for state.run == true { + select { + + case _ = <-tmout.C: + tmout.Stop() + break out + + case sig := <-sigs: + fmt.Fprintf(os.Stderr, "%% Terminating on signal %v\n", sig) + state.run = false + + case _ = <-every1s.C: + // Report consumed messages + sendRecordsConsumed(true) + // Commit on timeout as well (not just every 1000 messages) + doCommit(false, state.asyncCommit) + + default: + //case _ = <-time.After(100000 * time.Microsecond): + for true { + ev := c.Poll(0) + if ev == nil { + break + } + handleEvent(c, ev) + } + } + } +} + +func runConsumer(config *kafka.ConfigMap, topic string) { + c, err := kafka.NewConsumer(config) + if err != nil { + fatal(fmt.Sprintf("Failed to create consumer: %v", err)) + } + + _, verstr := kafka.LibraryVersion() + fmt.Fprintf(os.Stderr, "%% Created Consumer %v (%s)\n", c, verstr) + state.c = c + + c.Subscribe(topic, nil) + + send("startup_complete", nil) + state.run = true + + mainLoop(c, 10*60) + + tTermBegin := time.Now() + fmt.Fprintf(os.Stderr, "%% Consumer shutting down\n") + + sendRecordsConsumed(true) + + // Final commit (if auto commit is disabled) + doCommit(false, false) + + c.Unsubscribe() + + // Wait for rebalance, final offset commits, etc. + state.run = true + state.termOnRevoke = true + mainLoop(c, 10) + + fmt.Fprintf(os.Stderr, "%% Closing consumer\n") + + c.Close() + + msg := make(map[string]interface{}) + msg["_shutdown_duration"] = time.Since(tTermBegin).Seconds() + send("shutdown_complete", msg) +} + +func main() { + sigs = make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + // Default config + conf := kafka.ConfigMap{"default.topic.config": kafka.ConfigMap{"auto.offset.reset": "earliest"}} + + /* Required options */ + group := kingpin.Flag("group-id", "Consumer group").Required().String() + topic := kingpin.Flag("topic", "Topic to consume").Required().String() + brokers := kingpin.Flag("broker-list", "Bootstrap broker(s)").Required().String() + sessionTimeout := kingpin.Flag("session-timeout", "Session timeout").Required().Int() + + /* Optionals */ + enableAutocommit := kingpin.Flag("enable-autocommit", "Enable auto-commit").Default("true").Bool() + maxMessages := kingpin.Flag("max-messages", "Max messages to consume").Default("10000000").Int() + javaAssignmentStrategy := kingpin.Flag("assignment-strategy", "Assignment strategy (Java class name)").String() + configFile := kingpin.Flag("consumer.config", "Config file").File() + debug := kingpin.Flag("debug", "Debug flags").String() + xconf := kingpin.Flag("--property", "CSV separated key=value librdkafka configuration properties").Short('X').String() + + kingpin.Parse() + + conf["bootstrap.servers"] = *brokers + conf["group.id"] = *group + conf["session.timeout.ms"] = *sessionTimeout + conf["enable.auto.commit"] = *enableAutocommit + + if len(*debug) > 0 { + conf["debug"] = *debug + } + + /* Convert Java assignment strategy(s) (CSV) to librdkafka one. + * "[java.class.path.]Strategy[Assignor],.." -> "strategy,.." */ + if javaAssignmentStrategy != nil && len(*javaAssignmentStrategy) > 0 { + var strats []string + for _, jstrat := range strings.Split(*javaAssignmentStrategy, ",") { + s := strings.Split(jstrat, ".") + strats = append(strats, strings.ToLower(strings.TrimSuffix(s[len(s)-1], "Assignor"))) + } + conf["partition.assignment.strategy"] = strings.Join(strats, ",") + fmt.Fprintf(os.Stderr, "%% Mapped %s -> %s\n", + *javaAssignmentStrategy, conf["partition.assignment.strategy"]) + } + + if *configFile != nil { + fmt.Fprintf(os.Stderr, "%% Ignoring config file %v\n", *configFile) + } + + conf["go.events.channel.enable"] = false + conf["go.application.rebalance.enable"] = true + + if len(*xconf) > 0 { + for _, kv := range strings.Split(*xconf, ",") { + x := strings.Split(kv, "=") + if len(x) != 2 { + panic("-X expects a ,-separated list of confprop=val pairs") + } + conf[x[0]] = x[1] + } + } + fmt.Println("Config: ", conf) + + state.autoCommit = *enableAutocommit + state.maxMessages = *maxMessages + runConsumer((*kafka.ConfigMap)(&conf), *topic) + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/go_verifiable_producer/go_verifiable_producer.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/go_verifiable_producer/go_verifiable_producer.go new file mode 100644 index 0000000000..4f0ac0f03c --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafkatest/go_verifiable_producer/go_verifiable_producer.go @@ -0,0 +1,230 @@ +// Apache Kafka kafkatest VerifiableProducer implemented in Go +package main + +/** + * Copyright 2016 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import ( + "encoding/json" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "gopkg.in/alecthomas/kingpin.v2" + "os" + "os/signal" + "strings" + "syscall" + "time" +) + +var ( + verbosity = 1 + sigs chan os.Signal +) + +func send(name string, msg map[string]interface{}) { + if msg == nil { + msg = make(map[string]interface{}) + } + msg["name"] = name + msg["_time"] = time.Now().Format("2006-01-02 15:04:05.000") + b, err := json.Marshal(msg) + if err != nil { + panic(err) + } + fmt.Println(string(b)) +} + +func partitionsToMap(partitions []kafka.TopicPartition) []map[string]interface{} { + parts := make([]map[string]interface{}, len(partitions)) + for i, tp := range partitions { + parts[i] = map[string]interface{}{"topic": *tp.Topic, "partition": tp.Partition} + } + return parts +} + +func sendPartitions(name string, partitions []kafka.TopicPartition) { + + msg := make(map[string]interface{}) + msg["partitions"] = partitionsToMap(partitions) + + send(name, msg) +} + +type commState struct { + maxMessages int // messages to send + msgCnt int // messages produced + deliveryCnt int // messages delivered + errCnt int // messages failed to deliver + valuePrefix string + throughput int + p *kafka.Producer +} + +var state commState + +// handle_dr handles delivery reports +// returns false when producer should terminate, else true to keep running. +func handleDr(m *kafka.Message) bool { + if verbosity >= 2 { + fmt.Fprintf(os.Stderr, "%% DR: %v:\n", m.TopicPartition) + } + + if m.TopicPartition.Error != nil { + state.errCnt++ + errmsg := make(map[string]interface{}) + errmsg["message"] = m.TopicPartition.Error.Error() + errmsg["topic"] = *m.TopicPartition.Topic + errmsg["partition"] = m.TopicPartition.Partition + errmsg["key"] = (string)(m.Key) + errmsg["value"] = (string)(m.Value) + send("producer_send_error", errmsg) + } else { + state.deliveryCnt++ + drmsg := make(map[string]interface{}) + drmsg["topic"] = *m.TopicPartition.Topic + drmsg["partition"] = m.TopicPartition.Partition + drmsg["offset"] = m.TopicPartition.Offset + drmsg["key"] = (string)(m.Key) + drmsg["value"] = (string)(m.Value) + send("producer_send_success", drmsg) + } + + if state.deliveryCnt+state.errCnt >= state.maxMessages { + // we're done + return false + } + + return true + +} + +func runProducer(config *kafka.ConfigMap, topic string) { + p, err := kafka.NewProducer(config) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create producer: %s\n", err) + os.Exit(1) + } + + _, verstr := kafka.LibraryVersion() + fmt.Fprintf(os.Stderr, "%% Created Producer %v (%s)\n", p, verstr) + state.p = p + + send("startup_complete", nil) + run := true + + throttle := time.NewTicker(time.Second / (time.Duration)(state.throughput)) + for run == true { + select { + case <-throttle.C: + // produce a message (async) on each throttler tick + value := fmt.Sprintf("%s%d", state.valuePrefix, state.msgCnt) + state.msgCnt++ + err := p.Produce(&kafka.Message{ + TopicPartition: kafka.TopicPartition{ + Topic: &topic, + Partition: kafka.PartitionAny}, + Value: []byte(value)}, nil) + if err != nil { + fmt.Fprintf(os.Stderr, "%% Produce failed: %v\n", err) + state.errCnt++ + } + + if state.msgCnt == state.maxMessages { + // all messages sent, now wait for deliveries + throttle.Stop() + } + + case sig := <-sigs: + fmt.Fprintf(os.Stderr, "%% Terminating on signal %v\n", sig) + run = false + + case ev := <-p.Events(): + switch e := ev.(type) { + case *kafka.Message: + run = handleDr(e) + case kafka.Error: + fmt.Fprintf(os.Stderr, "%% Error: %v\n", e) + run = false + default: + fmt.Fprintf(os.Stderr, "%% Unhandled event %T ignored: %v\n", e, e) + } + } + } + + fmt.Fprintf(os.Stderr, "%% Closing, %d/%d messages delivered, %d failed\n", state.deliveryCnt, state.msgCnt, state.errCnt) + + p.Close() + + send("shutdown_complete", nil) +} + +func main() { + sigs = make(chan os.Signal) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + // Default config + conf := kafka.ConfigMap{"default.topic.config": kafka.ConfigMap{ + "auto.offset.reset": "earliest", + "produce.offset.report": true}} + + /* Required options */ + topic := kingpin.Flag("topic", "Topic").Required().String() + brokers := kingpin.Flag("broker-list", "Bootstrap broker(s)").Required().String() + + /* Optionals */ + throughput := kingpin.Flag("throughput", "Msgs/s").Default("1000000").Int() + maxMessages := kingpin.Flag("max-messages", "Max message count").Default("1000000").Int() + valuePrefix := kingpin.Flag("value-prefix", "Payload value string prefix").Default("").String() + acks := kingpin.Flag("acks", "Required acks").Default("all").String() + configFile := kingpin.Flag("producer.config", "Config file").File() + debug := kingpin.Flag("debug", "Debug flags").String() + xconf := kingpin.Flag("--property", "CSV separated key=value librdkafka configuration properties").Short('X').String() + + kingpin.Parse() + + conf["bootstrap.servers"] = *brokers + conf["default.topic.config"].(kafka.ConfigMap).SetKey("acks", *acks) + + if len(*debug) > 0 { + conf["debug"] = *debug + } + + if len(*xconf) > 0 { + for _, kv := range strings.Split(*xconf, ",") { + x := strings.Split(kv, "=") + if len(x) != 2 { + panic("-X expects a ,-separated list of confprop=val pairs") + } + conf[x[0]] = x[1] + } + } + fmt.Println("Config: ", conf) + + if *configFile != nil { + fmt.Fprintf(os.Stderr, "%% Ignoring config file %v\n", *configFile) + } + + if len(*valuePrefix) > 0 { + state.valuePrefix = fmt.Sprintf("%s.", *valuePrefix) + } else { + state.valuePrefix = "" + } + + state.throughput = *throughput + state.maxMessages = *maxMessages + runProducer((*kafka.ConfigMap)(&conf), *topic) + +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/mk/Makefile b/vendor/github.com/confluentinc/confluent-kafka-go/mk/Makefile new file mode 100644 index 0000000000..a3fdb4a39b --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/mk/Makefile @@ -0,0 +1,16 @@ +# Convenience helper Makefile for simplifying tasks in all sub-dirs +# of this git repo. +# +# Usage (from top-level dir): +# make -f mk/Makefile " " +# +# E.g., to run 'go vet' on all Go code: +# make -f mk/Makefile "go vet" +# +# +# + +DIRS?=$(shell find . -xdev -type f -name '*.go' -exec dirname {} \; | sort | uniq) + +%: + @(for d in $(DIRS) ; do (cd "$$d" && $@) ; done) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/mk/bootstrap-librdkafka.sh b/vendor/github.com/confluentinc/confluent-kafka-go/mk/bootstrap-librdkafka.sh new file mode 100755 index 0000000000..97f9787c15 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/mk/bootstrap-librdkafka.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# +# +# Downloads, builds and installs librdkafka into +# + +set -e + +VERSION=$1 +PREFIXDIR=$2 + +if [[ -z "$VERSION" ]]; then + echo "Usage: $0 []" 1>&2 + exit 1 +fi + +if [[ -z "$PREFIXDIR" ]]; then + PREFIXDIR=tmp-build +fi + +if [[ $PREFIXDIR != /* ]]; then + PREFIXDIR="$PWD/$PREFIXDIR" +fi + +mkdir -p "$PREFIXDIR/librdkafka" +pushd "$PREFIXDIR/librdkafka" + +test -f configure || +curl -sL "https://github.com/edenhill/librdkafka/archive/${VERSION}.tar.gz" | \ + tar -xz --strip-components=1 -f - + +./configure --prefix="$PREFIXDIR" +make -j +make install +popd + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/mk/doc-gen.py b/vendor/github.com/confluentinc/confluent-kafka-go/mk/doc-gen.py new file mode 100755 index 0000000000..94e47a918f --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/mk/doc-gen.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# Extract godoc HTML documentation for our packages, +# remove some nonsense, update some links and make it ready +# for inclusion in Confluent doc tree. + + +import subprocess, re +from bs4 import BeautifulSoup + + +if __name__ == '__main__': + + # Use godoc client to extract our package docs + html_in = subprocess.check_output(["godoc", "-url=/pkg/github.com/confluentinc/confluent-kafka-go/kafka"]) + + # Parse HTML + soup = BeautifulSoup(html_in, 'html.parser') + + # Remove topbar (Blog, Search, etc) + topbar = soup.find(id="topbar").decompose() + + # Remove "Subdirectories" + soup.find(id="pkg-subdirectories").decompose() + soup.find(attrs={"class":"pkg-dir"}).decompose() + for t in soup.find_all(href="#pkg-subdirectories"): + t.decompose() + + # Use golang.org for external resources (such as CSS and JS) + for t in soup.find_all(href=re.compile(r'^/')): + t['href'] = 'http://golang.org' + t['href'] + + for t in soup.find_all(src=re.compile(r'^/')): + t['src'] = 'http://golang.org' + t['src'] + + # Write updated HTML to stdout + print(soup.prettify().encode('utf-8')) diff --git a/vendor/github.com/edenhill/librdkafka/.appveyor.yml b/vendor/github.com/edenhill/librdkafka/.appveyor.yml new file mode 100644 index 0000000000..2cb8722b42 --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/.appveyor.yml @@ -0,0 +1,88 @@ +version: 0.11.4-R-pre{build} +pull_requests: + do_not_increment_build_number: true +image: Visual Studio 2013 +configuration: Release +environment: + matrix: + - platform: x64 + - platform: win32 +install: +- ps: "$OpenSSLVersion = \"1_0_2o\"\n$OpenSSLExe = \"OpenSSL-$OpenSSLVersion.exe\"\n\nRemove-Item C:\\OpenSSL-Win32 -recurse\nRemove-Item C:\\OpenSSL-Win64 -recurse\n\nWrite-Host \"Installing OpenSSL v1.0 32-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win32OpenSSL-1_0_2o.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win32OpenSSL-1_0_2o.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=C:\\OpenSSL-Win32\nWrite-Host \"Installed\" -ForegroundColor Green\n\nWrite-Host \"Installing OpenSSL v1.0 64-bit ...\" -ForegroundColor Cyan\nWrite-Host \"Downloading...\"\n$exePath = \"$($env:USERPROFILE)\\Win64OpenSSL-1_0_2o.exe\"\n(New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/Win64OpenSSL-1_0_2o.exe', $exePath)\nWrite-Host \"Installing...\"\ncmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=C:\\OpenSSL-Win64\nWrite-Host \"Installed\" -ForegroundColor Green\n\nif (!(Test-Path(\"C:\\OpenSSL-Win32\"))) {\n echo \"Downloading https://slproweb.com/download/Win32$OpenSSLExe\"\n Start-FileDownload 'https://slproweb.com/download/Win32$OpenSSLExe'\n Start-Process \"Win32$OpenSSLExe\" -ArgumentList \"/silent /verysilent /sp- /suppressmsgboxes\" -Wait\n} else {\n echo \"OpenSSL-Win32 already exists: not downloading\"\n}\n\nif (!(Test-Path(\"C:\\OpenSSL-Win64\"))) {\n echo \"Downloading https://slproweb.com/download/Win64$OpenSSLExe\"\n Start-FileDownload 'https://slproweb.com/download/Win64$OpenSSLExe' \n Start-Process \"Win64$OpenSSLExe\" -ArgumentList \"/silent /verysilent /sp- /suppressmsgboxes\" -Wait\n} else {\n echo \"OpenSSL-Win64 already exists: not downloading\"\n}\n\n\n\n# Download the CoApp tools.\n$msiPath = \"$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi\"\n(New-Object Net.WebClient).DownloadFile('http://coapp.org/files/CoApp.Tools.Powershell.msi', $msiPath)\n\n# Install the CoApp tools from the downloaded .msi.\nStart-Process -FilePath msiexec -ArgumentList /i, $msiPath, /quiet -Wait\n\n# Make the tools available for later PS scripts to use.\n$env:PSModulePath = $env:PSModulePath + ';C:\\Program Files (x86)\\Outercurve Foundation\\Modules'\nImport-Module CoApp\n\n# Install NuGet\n#Install-PackageProvider NuGet -MinimumVersion '2.8.5.201' -Force\n#Import-PackageProvider NuGet -MinimumVersion '2.8.5.201' -Force\n\n# Install CoApp for creating nuget packages\n#$msiPath = \"$($env:USERPROFILE)\\CoApp.Tools.Powershell.msi\"\n#(New-Object #Net.WebClient).DownloadFile('http://downloads.coapp.org/files/CoApp.Tools.Powershell.msi', $msiPath)\n#cmd /c start /wait msiexec /i \"$msiPath\" /quiet\n\n# Install CoApp module\n#Install-Module CoApp -Force" +cache: +- c:\OpenSSL-Win32 +- c:\OpenSSL-Win64 +nuget: + account_feed: true + project_feed: true + disable_publish_on_pr: true +before_build: +- cmd: nuget restore win32/librdkafka.sln +build: + project: win32/librdkafka.sln + publish_nuget: true + publish_nuget_symbols: true + include_nuget_references: true + parallel: true + verbosity: normal +test_script: +- cmd: if exist DISABLED\win32\outdir\v140 ( win32\outdir\v140\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 ) else ( win32\outdir\v120\%PLATFORM%\%CONFIGURATION%\tests.exe -l -p1 ) +artifacts: +- path: test_report*.json + name: Test report +- path: '*.nupkg' + name: Packages +- path: '**\*.dll' + name: Libraries +- path: '**\*.lib' + name: Libraries +- path: '**\*.pdb' + name: Libraries +- path: '**\*.exe' + name: Executables +before_deploy: +- ps: >- + # FIXME: Add to Deployment condition above: + + # APPVEYOR_REPO_TAG = true + + + + # This is the CoApp .autopkg file to create. + + $autopkgFile = "win32/librdkafka.autopkg" + + + # Get the ".autopkg.template" file, replace "@version" with the Appveyor version number, then save to the ".autopkg" file. + + cat ($autopkgFile + ".template") | % { $_ -replace "@version", $env:appveyor_build_version } > $autopkgFile + + + # Use the CoApp tools to create NuGet native packages from the .autopkg. + + Write-NuGetPackage $autopkgFile + + + # Push all newly created .nupkg files as Appveyor artifacts for later deployment. + + Get-ChildItem .\*.nupkg | % { Push-AppveyorArtifact $_.FullName -FileName $_.Name } +deploy: +- provider: S3 + access_key_id: + secure: t+Xo4x1mYVbqzvUDlnuMgFGp8LjQJNOfsDUAMxBsVH4= + secret_access_key: + secure: SNziQPPJs4poCHM7dk6OxufUYcGQhMWiNPx6Y1y6DYuWGjPc3K0APGeousLHsbLv + region: us-west-1 + bucket: librdkafka-ci-packages + folder: librdkafka/p-librdkafka__bld-appveyor__plat-windows__arch-$(platform)__bldtype-$(configuration)__tag-$(APPVEYOR_REPO_TAG_NAME)__sha-$(APPVEYOR_REPO_COMMIT)__bid-$(APPVEYOR_BUILD_ID) + artifact: /.*\.(nupkg)/ + max_error_retry: 3 + on: + APPVEYOR_REPO_TAG: true +notifications: +- provider: Email + to: + - magnus@edenhill.se + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff --git a/vendor/github.com/edenhill/librdkafka/.dir-locals.el b/vendor/github.com/edenhill/librdkafka/.dir-locals.el new file mode 100644 index 0000000000..22ca9223f0 --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/.dir-locals.el @@ -0,0 +1,3 @@ +( (c-mode . ((c-file-style . "linux"))) ) +((nil . ((compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevels) -k")))) + diff --git a/vendor/github.com/edenhill/librdkafka/.doozer.json b/vendor/github.com/edenhill/librdkafka/.doozer.json new file mode 100644 index 0000000000..27252daf65 --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/.doozer.json @@ -0,0 +1,110 @@ +{ + "targets": { + "xenial-amd64": { + + "buildenv": "xenial-amd64", + "builddeps": [ + "build-essential", + "python", + "zlib1g-dev", + "libssl-dev", + "libsasl2-dev" + ], + "buildcmd": [ + "./configure", + "make -j ${PARALLEL}", + "make -C tests build" + ], + "testcmd": [ + "make -C tests run_local" + ], + }, + + "xenial-i386": { + "_comment": "including liblz4-dev here to verify that WITH_LZ4_EXT works", + "buildenv": "xenial-i386", + "builddeps": [ + "build-essential", + "python", + "zlib1g-dev", + "libssl-dev", + "libsasl2-dev", + "liblz4-dev" + ], + "buildcmd": [ + "./configure", + "make -j ${PARALLEL}", + "make -C tests build" + ], + "testcmd": [ + "make -C tests run_local" + ], + }, + + "xenial-armhf": { + + "buildenv": "xenial-armhf", + "builddeps": [ + "build-essential", + "python", + "zlib1g-dev", + "libssl-dev", + "libsasl2-dev" + ], + "buildcmd": [ + "./configure", + "make -j ${PARALLEL}", + "make -j ${PARALLEL} -C tests build", + ], + "testcmd": [ + "cd tests", + "./run-test.sh -p1 -l ./merged", + "cd .." + ], + }, + + "stretch-mips": { + + "buildenv": "stretch-mips", + "builddeps": [ + "build-essential", + "python", + "zlib1g-dev", + "libssl-dev", + "libsasl2-dev" + ], + "buildcmd": [ + "./configure", + "make -j ${PARALLEL}", + "make -j ${PARALLEL} -C tests build", + ], + "testcmd": [ + "cd tests", + "./run-test.sh -p1 -l ./merged", + "cd .." + ], + }, + + "cmake-xenial-amd64": { + + "buildenv": "xenial-amd64", + "builddeps": [ + "build-essential", + "python", + "zlib1g-dev", + "libssl-dev", + "libsasl2-dev", + "cmake" + ], + "buildcmd": [ + "cmake -H. -B_builds -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=Debug", + "cmake --build _builds", + ], + "testcmd": [ + "cd _builds", + "ctest -VV -R RdKafkaTestBrokerLess" + ], + } + }, + "artifacts": ["config.log", "Makefile.config", "config.h"] +} diff --git a/vendor/github.com/edenhill/librdkafka/.travis.yml b/vendor/github.com/edenhill/librdkafka/.travis.yml new file mode 100644 index 0000000000..4154de59ab --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/.travis.yml @@ -0,0 +1,42 @@ +language: c +cache: ccache +env: +- ARCH=x64 +compiler: +- gcc +- clang +os: +- linux +- osx +dist: trusty +sudo: false +before_install: + - if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 prepare_ubuntu ; fi +before_script: + - ccache -s || echo "CCache is not available." +script: +- rm -rf artifacts dest +- mkdir dest artifacts +- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then CPPFLAGS="-I/usr/local/opt/openssl/include + -L/usr/local/opt/openssl/lib" ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; else ./configure --enable-static --disable-lz4 --prefix="$PWD/dest" ; fi +- make -j2 all examples check && make -C tests run_local +- make install +- (cd dest && tar cvzf ../artifacts/librdkafka.tar.gz .) +- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then sudo make -C packaging/rpm MOCK_CONFIG=el7-x86_64 all copy-artifacts ; fi +- if [[ "${TRAVIS_OS_NAME}_$CC" == "linux_gcc" ]]; then docker run -it -v $PWD:/v microsoft/dotnet:2-sdk /v/packaging/tools/build-debian.sh /v /v/artifacts/librdkafka-debian9.tgz; fi +deploy: + provider: s3 + access_key_id: + secure: "nGcknL5JZ5XYCEJ96UeDtnLOOidWsfXrk2x91Z9Ip2AyrUtdfZBc8BX16C7SAQbBeb4PQu/OjRBQWTIRqU64ZEQU1Z0lHjxCiGEt5HO0YlXWvZ8OJGAQ0wSmrQED850lWjGW2z5MpDqqxbZyATE8VksW5dtGiHgNuITinVW8Lok=" + secret_access_key: + secure: "J+LygNeoXQImN9E7EARNmcgLpqm6hoRjxwHJaen9opeuSDowKDpZxP7ixSml3BEn2pJJ4kpsdj5A8t5uius+qC4nu9mqSAZcmdKeSmliCbH7kj4J9MR7LBcXk3Uf515QGm7y4nzw+c1PmpteYL5S06Kgqp+KkPRLKTS2NevVZuY=" + bucket: librdkafka-ci-packages + region: us-west-1 + skip_cleanup: true + local-dir: artifacts + upload-dir: librdkafka/p-librdkafka__bld-travis__plat-${TRAVIS_OS_NAME}__arch-${ARCH}__tag-${TRAVIS_TAG}__sha-${TRAVIS_COMMIT}__bid-${TRAVIS_JOB_NUMBER} + on: + condition: "$CC = gcc" + repo: edenhill/librdkafka + all_branches: true + tags: true diff --git a/vendor/github.com/edenhill/librdkafka/CMakeLists.txt b/vendor/github.com/edenhill/librdkafka/CMakeLists.txt new file mode 100644 index 0000000000..ec37ee5311 --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/CMakeLists.txt @@ -0,0 +1,182 @@ +cmake_minimum_required(VERSION 3.2) +project(RdKafka) + +# Options. No 'RDKAFKA_' prefix to match old C++ code. { + +# This option doesn't affect build in fact, only C code +# (see 'rd_kafka_version_str'). In CMake the build type feature usually used +# (like Debug, Release, etc.). +option(WITHOUT_OPTIMIZATION "Disable optimization" OFF) + +option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF) +option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF) +option(ENABLE_SHAREDPTR_DEBUG "Enable sharedptr debugging" OFF) + +set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile") + +# ZLIB { +find_package(ZLIB QUIET) +if(ZLIB_FOUND) + set(with_zlib_default ON) +else() + set(with_zlib_default OFF) +endif() +option(WITH_ZLIB "With ZLIB" ${with_zlib_default}) +# } + +# LibDL { +try_compile( + WITH_LIBDL + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${TRYCOMPILE_SRC_DIR}/dlopen_test.c" + LINK_LIBRARIES "${CMAKE_DL_LIBS}" +) +# } + +# WITH_PLUGINS { +if(WITH_LIBDL) + set(with_plugins_default ON) +else() + set(with_plugins_default OFF) +endif() +option(WITH_PLUGINS "With plugin support" ${with_plugins_default}) +# } + +# OpenSSL { +if(WITH_BUNDLED_SSL) # option from 'h2o' parent project + set(with_ssl_default ON) +else() + find_package(OpenSSL QUIET) + if(OpenSSL_FOUND) + set(with_ssl_default ON) + else() + set(with_ssl_default OFF) + endif() +endif() +option(WITH_SSL "With SSL" ${with_ssl_default}) +# } + +# SASL { +if(WIN32) + set(with_sasl_default ON) +else() + include(FindPkgConfig) + pkg_check_modules(SASL libsasl2) + if(SASL_FOUND) + set(with_sasl_default ON) + else() + try_compile( + WITH_SASL_CYRUS_BOOL + "${CMAKE_CURRENT_BINARY_DIR}/try_compile" + "${TRYCOMPILE_SRC_DIR}/libsasl2_test.c" + LINK_LIBRARIES "-lsasl2" + ) + if(WITH_SASL_CYRUS_BOOL) + set(with_sasl_default ON) + set(SASL_LIBRARIES "-lsasl2") + else() + set(with_sasl_default OFF) + endif() + endif() +endif() +option(WITH_SASL "With SASL" ${with_sasl_default}) +if(WITH_SASL) + if(WITH_SSL) + set(WITH_SASL_SCRAM ON) + endif() + if(NOT WIN32) + set(WITH_SASL_CYRUS ON) + endif() +endif() +# } + +# } + +option(RDKAFKA_BUILD_EXAMPLES "Build examples" ON) +option(RDKAFKA_BUILD_TESTS "Build tests" ON) +if(WIN32) + option(WITHOUT_WIN32_CONFIG "Avoid including win32_config.h on cmake builds" ON) +endif(WIN32) + +# In: +# * TRYCOMPILE_SRC_DIR +# Out: +# * HAVE_ATOMICS_32 +# * HAVE_ATOMICS_32_SYNC +# * HAVE_ATOMICS_64 +# * HAVE_ATOMICS_64_SYNC +# * HAVE_REGEX +# * HAVE_STRNDUP +# * LINK_ATOMIC +include("packaging/cmake/try_compile/rdkafka_setup.cmake") + +set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated") + +# In: +# * WITHOUT_OPTIMIZATION +# * ENABLE_DEVEL +# * ENABLE_REFCNT_DEBUG +# * ENABLE_SHAREDPTR_DEBUG +# * HAVE_ATOMICS_32 +# * HAVE_ATOMICS_32_SYNC +# * HAVE_ATOMICS_64 +# * HAVE_ATOMICS_64_SYNC +# * WITH_ZLIB +# * WITH_SSL +# * WITH_SASL +# * HAVE_REGEX +# * HAVE_STRNDUP +configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h") + +# Installation (https://github.com/forexample/package-example) { + +include(GNUInstallDirs) + +set(config_install_dir "lib/cmake/${PROJECT_NAME}") + +set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") + +set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") +set(targets_export_name "${PROJECT_NAME}Targets") +set(namespace "${PROJECT_NAME}::") + +include(CMakePackageConfigHelpers) + +# In: +# * targets_export_name +# * PROJECT_NAME +configure_package_config_file( + "packaging/cmake/Config.cmake.in" + "${project_config}" + INSTALL_DESTINATION "${config_install_dir}" +) + +install( + FILES "${project_config}" + DESTINATION "${config_install_dir}" +) + +install( + EXPORT "${targets_export_name}" + NAMESPACE "${namespace}" + DESTINATION "${config_install_dir}" +) + +install( + FILES LICENSES.txt + DESTINATION "share/licenses/librdkafka" +) + +# } + +add_subdirectory(src) +add_subdirectory(src-cpp) + +if(RDKAFKA_BUILD_EXAMPLES) + add_subdirectory(examples) +endif() + +if(RDKAFKA_BUILD_TESTS) + enable_testing() + add_subdirectory(tests) +endif() diff --git a/vendor/github.com/edenhill/librdkafka/CODE_OF_CONDUCT.md b/vendor/github.com/edenhill/librdkafka/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..dbbde19c9c --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rdkafka@edenhill.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/edenhill/librdkafka/CONFIGURATION.md b/vendor/github.com/edenhill/librdkafka/CONFIGURATION.md new file mode 100644 index 0000000000..7bc060ffc2 --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/CONFIGURATION.md @@ -0,0 +1,138 @@ +//@file +## Global configuration properties + +Property | C/P | Range | Default | Description +-----------------------------------------|-----|-----------------|--------------:|-------------------------- +builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support.
*Type: CSV flags* +client.id | * | | rdkafka | Client identifier.
*Type: string* +metadata.broker.list | * | | | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
*Type: string* +bootstrap.servers | * | | | Alias for `metadata.broker.list` +message.max.bytes | * | 1000 .. 1000000000 | 1000000 | Maximum Kafka protocol request message size.
*Type: integer* +message.copy.max.bytes | * | 0 .. 1000000000 | 65535 | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs.
*Type: integer* +receive.message.max.bytes | * | 1000 .. 2147483647 | 100000000 | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value is automatically adjusted upwards to be at least `fetch.max.bytes` + 512 to allow for protocol overhead.
*Type: integer* +max.in.flight.requests.per.connection | * | 1 .. 1000000 | 1000000 | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
*Type: integer* +max.in.flight | * | | | Alias for `max.in.flight.requests.per.connection` +metadata.request.timeout.ms | * | 10 .. 900000 | 60000 | Non-topic request timeout in milliseconds. This is for metadata requests, etc.
*Type: integer* +topic.metadata.refresh.interval.ms | * | -1 .. 3600000 | 300000 | Topic metadata refresh interval in milliseconds. The metadata is automatically refreshed on error and connect. Use -1 to disable the intervalled refresh.
*Type: integer* +metadata.max.age.ms | * | 1 .. 86400000 | -1 | Metadata cache max age. Defaults to metadata.refresh.interval.ms * 3
*Type: integer* +topic.metadata.refresh.fast.interval.ms | * | 1 .. 60000 | 250 | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers.
*Type: integer* +topic.metadata.refresh.fast.cnt | * | 0 .. 1000 | 10 | *Deprecated: No longer used.*
*Type: integer* +topic.metadata.refresh.sparse | * | true, false | true | Sparse metadata requests (consumes less network bandwidth)
*Type: boolean* +topic.blacklist | * | | | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.
*Type: pattern list* +debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, all | | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
*Type: CSV flags* +socket.timeout.ms | * | 10 .. 300000 | 60000 | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of socket.timeout.ms and remaining message.timeout.ms for the first message in the batch. Consumer: FetchRequests will use fetch.wait.max.ms + socket.timeout.ms.
*Type: integer* +socket.blocking.max.ms | * | 1 .. 60000 | 1000 | Maximum time a broker socket operation may block. A lower value improves responsiveness at the expense of slightly higher CPU usage. **Deprecated**
*Type: integer* +socket.send.buffer.bytes | * | 0 .. 100000000 | 0 | Broker socket send buffer size. System default is used if 0.
*Type: integer* +socket.receive.buffer.bytes | * | 0 .. 100000000 | 0 | Broker socket receive buffer size. System default is used if 0.
*Type: integer* +socket.keepalive.enable | * | true, false | false | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets
*Type: boolean* +socket.nagle.disable | * | true, false | false | Disable the Nagle algorithm (TCP_NODELAY).
*Type: boolean* +socket.max.fails | * | 0 .. 1000000 | 1 | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. NOTE: The connection is automatically re-established.
*Type: integer* +broker.address.ttl | * | 0 .. 86400000 | 1000 | How long to cache the broker address resolving results (milliseconds).
*Type: integer* +broker.address.family | * | any, v4, v6 | any | Allowed broker IP address families: any, v4, v6
*Type: enum value* +reconnect.backoff.jitter.ms | * | 0 .. 3600000 | 500 | Throttle broker reconnection attempts by this value +-50%.
*Type: integer* +statistics.interval.ms | * | 0 .. 86400000 | 0 | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics.
*Type: integer* +enabled_events | * | 0 .. 2147483647 | 0 | See `rd_kafka_conf_set_events()`
*Type: integer* +error_cb | * | | | Error callback (set with rd_kafka_conf_set_error_cb())
*Type: pointer* +throttle_cb | * | | | Throttle callback (set with rd_kafka_conf_set_throttle_cb())
*Type: pointer* +stats_cb | * | | | Statistics callback (set with rd_kafka_conf_set_stats_cb())
*Type: pointer* +log_cb | * | | | Log callback (set with rd_kafka_conf_set_log_cb())
*Type: pointer* +log_level | * | 0 .. 7 | 6 | Logging level (syslog(3) levels)
*Type: integer* +log.queue | * | true, false | false | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set.
*Type: boolean* +log.thread.name | * | true, false | true | Print internal thread name in log messages (useful for debugging librdkafka internals)
*Type: boolean* +log.connection.close | * | true, false | true | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value.
*Type: boolean* +socket_cb | * | | | Socket creation callback to provide race-free CLOEXEC
*Type: pointer* +connect_cb | * | | | Socket connect callback
*Type: pointer* +closesocket_cb | * | | | Socket close callback
*Type: pointer* +open_cb | * | | | File open callback to provide race-free CLOEXEC
*Type: pointer* +opaque | * | | | Application opaque (set with rd_kafka_conf_set_opaque())
*Type: pointer* +default_topic_conf | * | | | Default topic configuration for automatically subscribed topics
*Type: pointer* +internal.termination.signal | * | 0 .. 128 | 0 | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.
*Type: integer* +api.version.request | * | true, false | true | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used.
*Type: boolean* +api.version.request.timeout.ms | * | 1 .. 300000 | 10000 | Timeout for broker API version requests.
*Type: integer* +api.version.fallback.ms | * | 0 .. 604800000 | 1200000 | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).
*Type: integer* +broker.version.fallback | * | | 0.9.0 | Older broker versions (<0.10.0) provides no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value, such as 0.10.2.1, enables ApiVersionRequests.
*Type: string* +security.protocol | * | plaintext, ssl, sasl_plaintext, sasl_ssl | plaintext | Protocol used to communicate with brokers.
*Type: enum value* +ssl.cipher.suites | * | | | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).
*Type: string* +ssl.key.location | * | | | Path to client's private key (PEM) used for authentication.
*Type: string* +ssl.key.password | * | | | Private key passphrase
*Type: string* +ssl.certificate.location | * | | | Path to client's public key (PEM) used for authentication.
*Type: string* +ssl.ca.location | * | | | File or directory path to CA certificate(s) for verifying the broker's key.
*Type: string* +ssl.crl.location | * | | | Path to CRL for verifying broker's certificate validity.
*Type: string* +ssl.keystore.location | * | | | Path to client's keystore (PKCS#12) used for authentication.
*Type: string* +ssl.keystore.password | * | | | Client's keystore (PKCS#12) password.
*Type: string* +sasl.mechanisms | * | | GSSAPI | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name only one mechanism must be configured.
*Type: string* +sasl.mechanism | * | | | Alias for `sasl.mechanisms` +sasl.kerberos.service.name | * | | kafka | Kerberos principal name that Kafka runs as, not including /hostname@REALM
*Type: string* +sasl.kerberos.principal | * | | kafkaclient | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal).
*Type: string* +sasl.kerberos.kinit.cmd | * | | kinit -S "%{sasl.kerberos.service.name}/%{broker.name}" -k -t "%{sasl.kerberos.keytab}" %{sasl.kerberos.principal} | Full kerberos kinit command string, %{config.prop.name} is replaced by corresponding config object value, %{broker.name} returns the broker's hostname.
*Type: string* +sasl.kerberos.keytab | * | | | Path to Kerberos keytab file. Uses system default if not set.**NOTE**: This is not automatically used but must be added to the template in sasl.kerberos.kinit.cmd as ` ... -t %{sasl.kerberos.keytab}`.
*Type: string* +sasl.kerberos.min.time.before.relogin | * | 1 .. 86400000 | 60000 | Minimum time in milliseconds between key refresh attempts.
*Type: integer* +sasl.username | * | | | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms
*Type: string* +sasl.password | * | | | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism
*Type: string* +plugin.library.paths | * | | | List of plugin libaries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.
*Type: string* +interceptors | * | | | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors.
*Type: * +group.id | * | | | Client group id string. All clients sharing the same group.id belong to the same group.
*Type: string* +partition.assignment.strategy | * | | range,roundrobin | Name of partition assignment strategy to use when elected group leader assigns partitions to group members.
*Type: string* +session.timeout.ms | * | 1 .. 3600000 | 30000 | Client group session and failure detection timeout.
*Type: integer* +heartbeat.interval.ms | * | 1 .. 3600000 | 1000 | Group session keepalive heartbeat interval.
*Type: integer* +group.protocol.type | * | | consumer | Group protocol type
*Type: string* +coordinator.query.interval.ms | * | 1 .. 3600000 | 600000 | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment.
*Type: integer* +enable.auto.commit | C | true, false | true | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign().
*Type: boolean* +auto.commit.interval.ms | C | 0 .. 86400000 | 5000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer.
*Type: integer* +enable.auto.offset.store | C | true, false | true | Automatically store offset of last message provided to application.
*Type: boolean* +queued.min.messages | C | 1 .. 10000000 | 100000 | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue.
*Type: integer* +queued.max.messages.kbytes | C | 1 .. 2097151 | 1048576 | Maximum number of kilobytes per topic+partition in the local consumer queue. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages.
*Type: integer* +fetch.wait.max.ms | C | 0 .. 300000 | 100 | Maximum time the broker may wait to fill the response with fetch.min.bytes.
*Type: integer* +fetch.message.max.bytes | C | 1 .. 1000000000 | 1048576 | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
*Type: integer* +max.partition.fetch.bytes | C | | | Alias for `fetch.message.max.bytes` +fetch.max.bytes | C | 0 .. 2147483135 | 52428800 | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config).
*Type: integer* +fetch.min.bytes | C | 1 .. 100000000 | 1 | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting.
*Type: integer* +fetch.error.backoff.ms | C | 0 .. 300000 | 500 | How long to postpone the next fetch request for a topic+partition in case of a fetch error.
*Type: integer* +offset.store.method | C | none, file, broker | broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker).
*Type: enum value* +consume_cb | C | | | Message consume callback (set with rd_kafka_conf_set_consume_cb())
*Type: pointer* +rebalance_cb | C | | | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb())
*Type: pointer* +offset_commit_cb | C | | | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb())
*Type: pointer* +enable.partition.eof | C | true, false | true | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition.
*Type: boolean* +check.crcs | C | true, false | false | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage.
*Type: boolean* +queue.buffering.max.messages | P | 1 .. 10000000 | 100000 | Maximum number of messages allowed on the producer queue.
*Type: integer* +queue.buffering.max.kbytes | P | 1 .. 2097151 | 1048576 | Maximum total message size sum allowed on the producer queue. This property has higher priority than queue.buffering.max.messages.
*Type: integer* +queue.buffering.max.ms | P | 0 .. 900000 | 0 | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
*Type: integer* +linger.ms | P | | | Alias for `queue.buffering.max.ms` +message.send.max.retries | P | 0 .. 10000000 | 2 | How many times to retry sending a failing MessageSet. **Note:** retrying may cause reordering.
*Type: integer* +retries | P | | | Alias for `message.send.max.retries` +retry.backoff.ms | P | 1 .. 300000 | 100 | The backoff time in milliseconds before retrying a protocol request.
*Type: integer* +queue.buffering.backpressure.threshold | P | 0 .. 1000000 | 10 | The threshold of outstanding not yet transmitted requests needed to backpressure the producer's message accumulator. A lower number yields larger and more effective batches.
*Type: integer* +compression.codec | P | none, gzip, snappy, lz4 | none | compression codec to use for compressing message sets. This is the default value for all topics, may be overriden by the topic configuration property `compression.codec`.
*Type: enum value* +compression.type | P | | | Alias for `compression.codec` +batch.num.messages | P | 1 .. 1000000 | 10000 | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by message.max.bytes.
*Type: integer* +delivery.report.only.error | P | true, false | false | Only provide delivery reports for failed messages.
*Type: boolean* +dr_cb | P | | | Delivery report callback (set with rd_kafka_conf_set_dr_cb())
*Type: pointer* +dr_msg_cb | P | | | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())
*Type: pointer* + + +## Topic configuration properties + +Property | C/P | Range | Default | Description +-----------------------------------------|-----|-----------------|--------------:|-------------------------- +request.required.acks | P | -1 .. 1000 | 1 | This field indicates how many acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *1*=Only the leader broker will need to ack the message, *-1* or *all*=broker will block until message is committed by all in sync replicas (ISRs) or broker's `min.insync.replicas` setting before sending response.
*Type: integer* +acks | P | | | Alias for `request.required.acks` +request.timeout.ms | P | 1 .. 900000 | 5000 | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0.
*Type: integer* +message.timeout.ms | P | 0 .. 900000 | 300000 | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite.
*Type: integer* +queuing.strategy | P | fifo, lifo | fifo | Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages. WARNING: `lifo` is experimental and subject to change or removal.
*Type: enum value* +produce.offset.report | P | true, false | false | Report offset of produced message back to application. The application must be use the `dr_msg_cb` to retrieve the offset from `rd_kafka_message_t.offset`.
*Type: boolean* +partitioner | P | | consistent_random | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.).
*Type: string* +partitioner_cb | P | | | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb())
*Type: pointer* +msg_order_cmp | P | | | Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`.
*Type: pointer* +opaque | * | | | Application opaque (set with rd_kafka_topic_conf_set_opaque())
*Type: pointer* +compression.codec | P | none, gzip, snappy, lz4, inherit | inherit | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration.
*Type: enum value* +compression.type | P | | | Alias for `compression.codec` +auto.commit.enable | C | true, false | true | If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). **NOTE:** This property should only be used with the simple legacy consumer, when using the high-level KafkaConsumer the global `enable.auto.commit` property must be used instead. **NOTE:** There is currently no zookeeper integration, offsets will be written to broker or local file according to offset.store.method.
*Type: boolean* +enable.auto.commit | C | | | Alias for `auto.commit.enable` +auto.commit.interval.ms | C | 10 .. 86400000 | 60000 | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. This setting is used by the low-level legacy consumer.
*Type: integer* +auto.offset.reset | C | smallest, earliest, beginning, largest, latest, end, error | largest | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error which is retrieved by consuming messages and checking 'message->err'.
*Type: enum value* +offset.store.path | C | | . | Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition.
*Type: string* +offset.store.sync.interval.ms | C | -1 .. 86400000 | -1 | fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write.
*Type: integer* +offset.store.method | C | file, broker | broker | Offset commit store method: 'file' - local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.).
*Type: enum value* +consume.callback.max.messages | C | 0 .. 1000000 | 0 | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited)
*Type: integer* + +### C/P legend: C = Consumer, P = Producer, * = both diff --git a/vendor/github.com/edenhill/librdkafka/CONTRIBUTING.md b/vendor/github.com/edenhill/librdkafka/CONTRIBUTING.md new file mode 100644 index 0000000000..5da7c77309 --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/CONTRIBUTING.md @@ -0,0 +1,271 @@ +# Contributing to librdkafka + +(This document is based on [curl's CONTRIBUTE.md](https://github.com/curl/curl/blob/master/docs/CONTRIBUTE.md) - thank you!) + +This document is intended to offer guidelines on how to best contribute to the +librdkafka project. This concerns new features as well as bug fixes and +general improvements. + +### License and copyright + +When contributing with code, you agree to put your changes and new code under +the same license librdkafka is already using unless stated and agreed +otherwise. + +When changing existing source code, you do not alter the copyright of the +original file(s). The copyright will still be owned by the original creator(s) +or those who have been assigned copyright by the original author(s). + +By submitting a patch to the librdkafka, you are assumed to have the right +to the code and to be allowed by your employer or whatever to hand over that +patch/code to us. We will credit you for your changes as far as possible, to +give credit but also to keep a trace back to who made what changes. Please +always provide us with your full real name when contributing! + +Official librdkafka project maintainer(s) assume ownership of all accepted +submissions. + +## Write a good patch + +### Follow code style + +When writing C code, follow the code style already established in +the project. Consistent style makes code easier to read and mistakes less +likely to happen. + +See the end of this document for the C style guide to use in librdkafka. + + +### Write Separate Changes + +It is annoying when you get a huge patch from someone that is said to fix 511 +odd problems, but discussions and opinions don't agree with 510 of them - or +509 of them were already fixed in a different way. Then the person merging +this change needs to extract the single interesting patch from somewhere +within the huge pile of source, and that gives a lot of extra work. + +Preferably, each fix that correct a problem should be in its own patch/commit +with its own description/commit message stating exactly what they correct so +that all changes can be selectively applied by the maintainer or other +interested parties. + +Also, separate changes enable bisecting much better when we track problems +and regression in the future. + +### Patch Against Recent Sources + +Please try to make your patches against latest master branch. + +### Test Cases + +Bugfixes should also include a new test case in the regression test suite +that verifies the bug is fixed. +Create a new tests/00-.c file and +try to reproduce the issue in its most simple form. +Verify that the test case fails for earlier versions and passes with your +bugfix in-place. + +New features and APIs should also result in an added test case. + +Submitted patches must pass all existing tests. +For more information on the test suite see [tests/README] + + + +## How to get your changes into the main sources + +File a [pull request on github](https://github.com/edenhill/librdkafka/pulls) + +Your change will be reviewed and discussed there and you will be +expected to correct flaws pointed out and update accordingly, or the change +risk stalling and eventually just get deleted without action. As a submitter +of a change, you are the owner of that change until it has been merged. + +Make sure to monitor your PR on github and answer questions and/or +fix nits/flaws. This is very important. We will take lack of replies as a +sign that you're not very anxious to get your patch accepted and we tend to +simply drop such changes. + +When you adjust your pull requests after review, please squash the +commits so that we can review the full updated version more easily +and keep history cleaner. + +For example: + + # Interactive rebase to let you squash/fixup commits + $ git rebase -i master + + # Mark fixes-on-fixes commits as 'fixup' (or just 'f') in the + # first column. These will be silently integrated into the + # previous commit, so make sure to move the fixup-commit to + # the line beneath the parent commit. + + # Since this probably rewrote the history of previously pushed + # commits you will need to make a force push, which is usually + # a bad idea but works good for pull requests. + $ git push --force origin your_feature_branch + + +### Write good commit messages + +A short guide to how to write commit messages in the curl project. + + ---- start ---- + [area]: [short line describing the main effect] [(#issuenumber)] + -- empty line -- + [full description, no wider than 72 columns that describe as much as + possible as to why this change is made, and possibly what things + it fixes and everything else that is related] + ---- stop ---- + +Example: + + cgrp: restart query timer on all heartbeat failures (#10023) + + If unhandled errors were received in HeartbeatResponse + the cgrp could get stuck in a state where it would not + refresh its coordinator. + + + +# librdkafka C style guide + +## Function and globals naming + +Use self-explanatory hierarchical snake-case naming. +Pretty much all symbols should start with `rd_kafka_`, followed by +their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an +action (e.g, `find`, `get`, `clear`, ..). + + +## Variable naming + +For existing types use the type prefix as variable name. +The type prefix is typically the first part of struct member fields. +Example: + + * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker + variable names should be named `rkb` + + +For other types use reasonably concise but descriptive names. +`i` and `j` are typical int iterators. + +## Variable declaration + +Variables must be declared at the head of a scope, no in-line variable +declarations are allowed. + +## Indenting + +Use 8 spaces indent, same as the Linux kernel. +In emacs, use `c-set-style "linux`. +For C++, use Google's C++ style. + +## Comments + +Use `/* .. */` comments, not `// ..` + +For functions, use doxygen syntax, e.g.: + + /** + * @brief + * .. + * @returns + */ + + +Make sure to comment non-obvious code and situations where the full +context of an operation is not easily graspable. + +Also make sure to update existing comments when the code changes. + + +## Line length + +Try hard to keep line length below 80 characters, when this is not possible +exceed it with reason. + + +## Braces + +Braces go on the same line as their enveloping statement: + + int some_func (..) { + while (1) { + if (1) { + do something; + .. + } else { + do something else; + .. + } + } + + /* Single line scopes should not have braces */ + if (1) + hi(); + else if (2) + /* Say hello */ + hello(); + else + bye(); + + +## Spaces + +All expression parentheses should be prefixed and suffixed with a single space: + + int some_func (int a) { + + if (1) + ....; + + for (i = 0 ; i < 19 ; i++) { + + + } + } + + +Use space around operators: + + int a = 2; + + if (b >= 3) + c += 2; + +Except for these: + + d++; + --e; + + +## New block on new line + +New blocks should be on a new line: + + if (1) + new(); + else + old(); + + +## Parentheses + +Don't assume the reader knows C operator precedence by heart for complex +statements, add parentheses to ease readability. + + +## ifdef hell + +Avoid ifdef's as much as possible. +Platform support checking should be performed in configure.librdkafka. + + + + + +# librdkafka C++ style guide + +Follow [Google's C++ style guide](https://google.github.io/styleguide/cppguide.html) diff --git a/vendor/github.com/edenhill/librdkafka/Doxyfile b/vendor/github.com/edenhill/librdkafka/Doxyfile new file mode 100644 index 0000000000..8e94e129f3 --- /dev/null +++ b/vendor/github.com/edenhill/librdkafka/Doxyfile @@ -0,0 +1,2385 @@ +# Doxyfile 1.8.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "librdkafka" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "The Apache Kafka C/C++ client library" + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +#PROJECT_LOGO = kafka_logo.png + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = staging-docs + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = "locality=@par Thread restriction:" +ALIASES += "locks=@par Lock restriction:" + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = YES + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = NO + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = mainpage.doxy INTRODUCTION.md CONFIGURATION.md src/rdkafka.h src-cpp/rdkafkacpp.h + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the +# cost of reduced performance. This can be particularly helpful with template +# rich C++ code for which doxygen's built-in parser lacks the necessary type +# information. +# Note: The availability of this option depends on whether or not doxygen was +# compiled with the --with-libclang option. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "librdkafka documentation" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = se.edenhill.librdkafka + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = se.edenhill + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Magnus Edenhill + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = se.edenhill.librdkafka + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = se.edenhill.librdkafka + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 1 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /