diff --git a/changelog.md b/changelog.md index d7fc7a061..6a1f566e2 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,21 @@ # Changelog +See also the [rdkafka-sys changelog](rdkafka-sys/changelog.md). + + +## 0.24.0 (Unreleased) + +* Decouple versioning of rdkafka-sys from rdkafka. rdkafka-sys now has its + own [changelog](rdkafka-sys/changelog.md) and will follow SemVer conventions. + ([#211]) + +[#211]: https://github.com/fede1024/rust-rdkafka/issues/211 + + +## 0.23.1 (2020-01-13) + +* Fix build on docs.rs. + ## 0.23.0 (2019-12-31) diff --git a/rdkafka-sys/Cargo.toml b/rdkafka-sys/Cargo.toml index 048ce8889..f7d91c24e 100644 --- a/rdkafka-sys/Cargo.toml +++ b/rdkafka-sys/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rdkafka-sys" -version = "1.3.1" +version = "1.4.0+1.4.0" authors = ["Federico Giraud "] build = "build.rs" links = "rdkafka" diff --git a/rdkafka-sys/README.md b/rdkafka-sys/README.md index 5c1c06285..9fa75fa12 100644 --- a/rdkafka-sys/README.md +++ b/rdkafka-sys/README.md @@ -14,9 +14,12 @@ cargo install bindgen ## Version -The rdkafka-sys version number is in the format `X.Y.Z-P`, where `X.Y.Z` -corresponds to the librdkafka version, and `P` indicates the version of the -rust bindings. +The rdkafka-sys version number is in the format `X.Y.Z+RX.RY.RZ`, where `X.Y.Z` +is the version of this crate and follows SemVer conventions, while `RX.RY.RZ` +is the version of the bundled librdkafka. + +Note that versions before v1.4.0+1.4.0 did not follow this convention, and +instead directly corresponded to the bundled librdkafka version. ## Build diff --git a/rdkafka-sys/build.rs b/rdkafka-sys/build.rs index cd6260696..78f91835c 100644 --- a/rdkafka-sys/build.rs +++ b/rdkafka-sys/build.rs @@ -193,7 +193,9 @@ fn build_librdkafka() { // want a stable location that we can add to the linker search path. // Since we're not actually installing to /usr or /usr/local, there's no // harm to always using "lib" here. - .define("CMAKE_INSTALL_LIBDIR", "lib"); + .define("CMAKE_INSTALL_LIBDIR", "lib") + // Workaround for https://github.com/edenhill/librdkafka/pull/2640. + .define("ENABLE_DEVEL", "0"); if env::var("CARGO_FEATURE_LIBZ").is_ok() { config.define("WITH_ZLIB", "1"); diff --git a/rdkafka-sys/changelog.md b/rdkafka-sys/changelog.md new file mode 100644 index 000000000..ea1558c28 --- /dev/null +++ b/rdkafka-sys/changelog.md @@ -0,0 +1,8 @@ +# Changelog + + +## v1.4.0+1.4.0 (Unreleased) + +* Upgrade to librdkafka v1.4.0. + +* Start separate changelog for rdkafka-sys. \ No newline at end of file diff --git a/rdkafka-sys/librdkafka b/rdkafka-sys/librdkafka index 4ffe54b4f..e4a8c0f62 160000 --- a/rdkafka-sys/librdkafka +++ b/rdkafka-sys/librdkafka @@ -1 +1 @@ -Subproject commit 4ffe54b4f59ee5ae3767f9f25dc14651a3384d62 +Subproject commit e4a8c0f62742789bd4d62accd1497c82c08c4259 diff --git a/rdkafka-sys/src/bindings.rs b/rdkafka-sys/src/bindings.rs index 15610bcde..485184c7e 100644 --- a/rdkafka-sys/src/bindings.rs +++ b/rdkafka-sys/src/bindings.rs @@ -4,7 +4,7 @@ use num_enum::TryFromPrimitive; type FILE = libc::FILE; type sockaddr = libc::sockaddr; -pub const RD_KAFKA_VERSION: u32 = 16974079; +pub const RD_KAFKA_VERSION: u32 = 17039615; pub const RD_KAFKA_DEBUG_CONTEXTS : & 'static [ u8 ; 124usize ] = b"all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp,security,fetch,interceptor,plugin,consumer,admin,eos,mock\0" ; pub const RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE: u32 = 8; pub const RD_KAFKA_OFFSET_BEGINNING: i32 = -2; @@ -97,6 +97,18 @@ pub struct rd_kafka_topic_result_s { _unused: [u8; 0], } pub type rd_kafka_topic_result_t = rd_kafka_topic_result_s; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct rd_kafka_consumer_group_metadata_s { + _unused: [u8; 0], +} +pub type rd_kafka_consumer_group_metadata_t = rd_kafka_consumer_group_metadata_s; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct rd_kafka_error_s { + _unused: [u8; 0], +} +pub type rd_kafka_error_t = rd_kafka_error_s; #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, TryFromPrimitive)] pub enum rd_kafka_resp_err_t { @@ -155,6 +167,9 @@ pub enum rd_kafka_resp_err_t { RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148, RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147, RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146, + RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145, + RD_KAFKA_RESP_ERR__FENCED = -144, + RD_KAFKA_RESP_ERR__APPLICATION = -143, RD_KAFKA_RESP_ERR__END = -100, RD_KAFKA_RESP_ERR_UNKNOWN = -1, RD_KAFKA_RESP_ERR_NO_ERROR = 0, @@ -239,7 +254,8 @@ pub enum rd_kafka_resp_err_t { RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79, RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80, RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81, - RD_KAFKA_RESP_ERR_END_ALL = 82, + RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82, + RD_KAFKA_RESP_ERR_END_ALL = 83, } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -280,6 +296,36 @@ extern "C" { reason: *const ::std::os::raw::c_char, ) -> rd_kafka_resp_err_t; } +extern "C" { + pub fn rd_kafka_error_code(error: *const rd_kafka_error_t) -> rd_kafka_resp_err_t; +} +extern "C" { + pub fn rd_kafka_error_name(error: *const rd_kafka_error_t) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn rd_kafka_error_string(error: *const rd_kafka_error_t) -> *const ::std::os::raw::c_char; +} +extern "C" { + pub fn rd_kafka_error_is_fatal(error: *const rd_kafka_error_t) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn rd_kafka_error_is_retriable(error: *const rd_kafka_error_t) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn rd_kafka_error_txn_requires_abort( + error: *const rd_kafka_error_t, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn rd_kafka_error_destroy(error: *mut rd_kafka_error_t); +} +extern "C" { + pub fn rd_kafka_error_new( + code: rd_kafka_resp_err_t, + fmt: *const ::std::os::raw::c_char, + ... + ) -> *mut rd_kafka_error_t; +} #[repr(C)] pub struct rd_kafka_topic_partition_s { pub topic: *mut ::std::os::raw::c_char, @@ -922,6 +968,26 @@ extern "C" { msg_opaque: *mut ::std::os::raw::c_void, ) -> i32; } +extern "C" { + pub fn rd_kafka_msg_partitioner_fnv1a( + rkt: *const rd_kafka_topic_t, + key: *const ::std::os::raw::c_void, + keylen: usize, + partition_cnt: i32, + rkt_opaque: *mut ::std::os::raw::c_void, + msg_opaque: *mut ::std::os::raw::c_void, + ) -> i32; +} +extern "C" { + pub fn rd_kafka_msg_partitioner_fnv1a_random( + rkt: *const rd_kafka_topic_t, + key: *const ::std::os::raw::c_void, + keylen: usize, + partition_cnt: i32, + rkt_opaque: *mut ::std::os::raw::c_void, + msg_opaque: *mut ::std::os::raw::c_void, + ) -> i32; +} extern "C" { pub fn rd_kafka_new( type_: rd_kafka_type_t, @@ -1249,6 +1315,33 @@ extern "C" { partitions: *mut rd_kafka_topic_partition_list_t, ) -> rd_kafka_resp_err_t; } +extern "C" { + pub fn rd_kafka_consumer_group_metadata( + rk: *mut rd_kafka_t, + ) -> *mut rd_kafka_consumer_group_metadata_t; +} +extern "C" { + pub fn rd_kafka_consumer_group_metadata_new( + group_id: *const ::std::os::raw::c_char, + ) -> *mut rd_kafka_consumer_group_metadata_t; +} +extern "C" { + pub fn rd_kafka_consumer_group_metadata_destroy(arg1: *mut rd_kafka_consumer_group_metadata_t); +} +extern "C" { + pub fn rd_kafka_consumer_group_metadata_write( + cgmd: *const rd_kafka_consumer_group_metadata_t, + bufferp: *mut *mut ::std::os::raw::c_void, + sizep: *mut usize, + ) -> *mut rd_kafka_error_t; +} +extern "C" { + pub fn rd_kafka_consumer_group_metadata_read( + cgmdp: *mut *mut rd_kafka_consumer_group_metadata_t, + buffer: *const ::std::os::raw::c_void, + size: usize, + ) -> *mut rd_kafka_error_t; +} extern "C" { pub fn rd_kafka_produce( rkt: *mut rd_kafka_topic_t, @@ -2138,3 +2231,32 @@ extern "C" { errstr: *const ::std::os::raw::c_char, ) -> rd_kafka_resp_err_t; } +extern "C" { + pub fn rd_kafka_init_transactions( + rk: *mut rd_kafka_t, + timeout_ms: ::std::os::raw::c_int, + ) -> *mut rd_kafka_error_t; +} +extern "C" { + pub fn rd_kafka_begin_transaction(rk: *mut rd_kafka_t) -> *mut rd_kafka_error_t; +} +extern "C" { + pub fn rd_kafka_send_offsets_to_transaction( + rk: *mut rd_kafka_t, + offsets: *const rd_kafka_topic_partition_list_t, + cgmetadata: *const rd_kafka_consumer_group_metadata_t, + timeout_ms: ::std::os::raw::c_int, + ) -> *mut rd_kafka_error_t; +} +extern "C" { + pub fn rd_kafka_commit_transaction( + rk: *mut rd_kafka_t, + timeout_ms: ::std::os::raw::c_int, + ) -> *mut rd_kafka_error_t; +} +extern "C" { + pub fn rd_kafka_abort_transaction( + rk: *mut rd_kafka_t, + timeout_ms: ::std::os::raw::c_int, + ) -> *mut rd_kafka_error_t; +} diff --git a/rdkafka-sys/src/helpers.rs b/rdkafka-sys/src/helpers.rs index 286d3bd82..7154e80f5 100644 --- a/rdkafka-sys/src/helpers.rs +++ b/rdkafka-sys/src/helpers.rs @@ -60,6 +60,9 @@ pub fn rd_kafka_resp_err_t_to_rdkafka_error(err: RDKafkaRespErr) -> RDKafkaError RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE => GaplessGuarantee, RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED => PollExceeded, RD_KAFKA_RESP_ERR__UNKNOWN_BROKER => UnknownBroker, + RD_KAFKA_RESP_ERR__NOT_CONFIGURED => NotConfigured, + RD_KAFKA_RESP_ERR__FENCED => Fenced, + RD_KAFKA_RESP_ERR__APPLICATION => Application, RD_KAFKA_RESP_ERR__END => End, RD_KAFKA_RESP_ERR_UNKNOWN => Unknown, RD_KAFKA_RESP_ERR_NO_ERROR => NoError, @@ -148,6 +151,7 @@ pub fn rd_kafka_resp_err_t_to_rdkafka_error(err: RDKafkaRespErr) -> RDKafkaError RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED => MemberIdRequired, RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE => PreferredLeaderNotAvailable, RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED => GroupMaxSizeReached, + RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID => FencedInstanceId, RD_KAFKA_RESP_ERR_END_ALL => EndAll, } } diff --git a/rdkafka-sys/src/types.rs b/rdkafka-sys/src/types.rs index 096c3592a..ae93d1e2e 100644 --- a/rdkafka-sys/src/types.rs +++ b/rdkafka-sys/src/types.rs @@ -216,6 +216,12 @@ pub enum RDKafkaError { PollExceeded = -147, /// Unknown broker UnknownBroker = -146, + /// Functionality not configured + NotConfigured, + /// Instance has been fenced + Fenced, + /// Application generated error + Application, #[doc(hidden)] End = -100, /// Unknown broker error @@ -387,6 +393,8 @@ pub enum RDKafkaError { PreferredLeaderNotAvailable = 80, /// Consumer group has reached maximum size GroupMaxSizeReached = 81, + /// Static consumer fenced by other consumer with same group.instance.id + FencedInstanceId = 82, #[doc(hidden)] EndAll, }