Skip to content

Commit

Permalink
rdkafka-sys: upgrade to librdkafka v1.4.0
Browse files Browse the repository at this point in the history
Also allow rdkafka and rdkafka-sys to be versioned independently by
making rdkafka-sys follow SemVer. This will fix future scenarios like
the one described in issue fede1024#211.
  • Loading branch information
benesch committed Apr 19, 2020
1 parent b18d225 commit aeefceb
Show file tree
Hide file tree
Showing 9 changed files with 171 additions and 8 deletions.
16 changes: 16 additions & 0 deletions changelog.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,21 @@
# Changelog

See also the [rdkafka-sys changelog](rdkafka-sys/changelog.md).

<a name="0.24.0"></a>
## 0.24.0 (Unreleased)

* Decouple versioning of rdkafka-sys from rdkafka. rdkafka-sys now has its
own [changelog](rdkafka-sys/changelog.md) and will follow SemVer conventions.
([#211])

[#211]: https://github.com/fede1024/rust-rdkafka/issues/211

<a name="0.23.1"></a>
## 0.23.1 (2020-01-13)

* Fix build on docs.rs.

<a name="0.23.0"></a>
## 0.23.0 (2019-12-31)

Expand Down
2 changes: 1 addition & 1 deletion rdkafka-sys/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "rdkafka-sys"
version = "1.3.1"
version = "1.4.0+1.4.0"
authors = ["Federico Giraud <[email protected]>"]
build = "build.rs"
links = "rdkafka"
Expand Down
9 changes: 6 additions & 3 deletions rdkafka-sys/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,12 @@ cargo install bindgen

## Version

The rdkafka-sys version number is in the format `X.Y.Z-P`, where `X.Y.Z`
corresponds to the librdkafka version, and `P` indicates the version of the
rust bindings.
The rdkafka-sys version number is in the format `X.Y.Z+RX.RY.RZ`, where `X.Y.Z`
is the version of this crate and follows SemVer conventions, while `RX.RY.RZ`
is the version of the bundled librdkafka.

Note that versions before v1.4.0+1.4.0 did not follow this convention, and
instead directly corresponded to the bundled librdkafka version.

## Build

Expand Down
4 changes: 3 additions & 1 deletion rdkafka-sys/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,9 @@ fn build_librdkafka() {
// want a stable location that we can add to the linker search path.
// Since we're not actually installing to /usr or /usr/local, there's no
// harm to always using "lib" here.
.define("CMAKE_INSTALL_LIBDIR", "lib");
.define("CMAKE_INSTALL_LIBDIR", "lib")
// Workaround for https://github.com/edenhill/librdkafka/pull/2640.
.define("ENABLE_DEVEL", "0");

if env::var("CARGO_FEATURE_LIBZ").is_ok() {
config.define("WITH_ZLIB", "1");
Expand Down
8 changes: 8 additions & 0 deletions rdkafka-sys/changelog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# Changelog

<a name="1.4.0+1.4.0"></a>
## v1.4.0+1.4.0 (Unreleased)

* Upgrade to librdkafka v1.4.0.

* Start separate changelog for rdkafka-sys.
2 changes: 1 addition & 1 deletion rdkafka-sys/librdkafka
126 changes: 124 additions & 2 deletions rdkafka-sys/src/bindings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use num_enum::TryFromPrimitive;
type FILE = libc::FILE;
type sockaddr = libc::sockaddr;

pub const RD_KAFKA_VERSION: u32 = 16974079;
pub const RD_KAFKA_VERSION: u32 = 17039615;
pub const RD_KAFKA_DEBUG_CONTEXTS : & 'static [ u8 ; 124usize ] = b"all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp,security,fetch,interceptor,plugin,consumer,admin,eos,mock\0" ;
pub const RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE: u32 = 8;
pub const RD_KAFKA_OFFSET_BEGINNING: i32 = -2;
Expand Down Expand Up @@ -97,6 +97,18 @@ pub struct rd_kafka_topic_result_s {
_unused: [u8; 0],
}
pub type rd_kafka_topic_result_t = rd_kafka_topic_result_s;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct rd_kafka_consumer_group_metadata_s {
_unused: [u8; 0],
}
pub type rd_kafka_consumer_group_metadata_t = rd_kafka_consumer_group_metadata_s;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct rd_kafka_error_s {
_unused: [u8; 0],
}
pub type rd_kafka_error_t = rd_kafka_error_s;
#[repr(i32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, TryFromPrimitive)]
pub enum rd_kafka_resp_err_t {
Expand Down Expand Up @@ -155,6 +167,9 @@ pub enum rd_kafka_resp_err_t {
RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148,
RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147,
RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146,
RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145,
RD_KAFKA_RESP_ERR__FENCED = -144,
RD_KAFKA_RESP_ERR__APPLICATION = -143,
RD_KAFKA_RESP_ERR__END = -100,
RD_KAFKA_RESP_ERR_UNKNOWN = -1,
RD_KAFKA_RESP_ERR_NO_ERROR = 0,
Expand Down Expand Up @@ -239,7 +254,8 @@ pub enum rd_kafka_resp_err_t {
RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
RD_KAFKA_RESP_ERR_END_ALL = 82,
RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
RD_KAFKA_RESP_ERR_END_ALL = 83,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
Expand Down Expand Up @@ -280,6 +296,36 @@ extern "C" {
reason: *const ::std::os::raw::c_char,
) -> rd_kafka_resp_err_t;
}
extern "C" {
pub fn rd_kafka_error_code(error: *const rd_kafka_error_t) -> rd_kafka_resp_err_t;
}
extern "C" {
pub fn rd_kafka_error_name(error: *const rd_kafka_error_t) -> *const ::std::os::raw::c_char;
}
extern "C" {
pub fn rd_kafka_error_string(error: *const rd_kafka_error_t) -> *const ::std::os::raw::c_char;
}
extern "C" {
pub fn rd_kafka_error_is_fatal(error: *const rd_kafka_error_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn rd_kafka_error_is_retriable(error: *const rd_kafka_error_t) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn rd_kafka_error_txn_requires_abort(
error: *const rd_kafka_error_t,
) -> ::std::os::raw::c_int;
}
extern "C" {
pub fn rd_kafka_error_destroy(error: *mut rd_kafka_error_t);
}
extern "C" {
pub fn rd_kafka_error_new(
code: rd_kafka_resp_err_t,
fmt: *const ::std::os::raw::c_char,
...
) -> *mut rd_kafka_error_t;
}
#[repr(C)]
pub struct rd_kafka_topic_partition_s {
pub topic: *mut ::std::os::raw::c_char,
Expand Down Expand Up @@ -922,6 +968,26 @@ extern "C" {
msg_opaque: *mut ::std::os::raw::c_void,
) -> i32;
}
extern "C" {
pub fn rd_kafka_msg_partitioner_fnv1a(
rkt: *const rd_kafka_topic_t,
key: *const ::std::os::raw::c_void,
keylen: usize,
partition_cnt: i32,
rkt_opaque: *mut ::std::os::raw::c_void,
msg_opaque: *mut ::std::os::raw::c_void,
) -> i32;
}
extern "C" {
pub fn rd_kafka_msg_partitioner_fnv1a_random(
rkt: *const rd_kafka_topic_t,
key: *const ::std::os::raw::c_void,
keylen: usize,
partition_cnt: i32,
rkt_opaque: *mut ::std::os::raw::c_void,
msg_opaque: *mut ::std::os::raw::c_void,
) -> i32;
}
extern "C" {
pub fn rd_kafka_new(
type_: rd_kafka_type_t,
Expand Down Expand Up @@ -1249,6 +1315,33 @@ extern "C" {
partitions: *mut rd_kafka_topic_partition_list_t,
) -> rd_kafka_resp_err_t;
}
extern "C" {
pub fn rd_kafka_consumer_group_metadata(
rk: *mut rd_kafka_t,
) -> *mut rd_kafka_consumer_group_metadata_t;
}
extern "C" {
pub fn rd_kafka_consumer_group_metadata_new(
group_id: *const ::std::os::raw::c_char,
) -> *mut rd_kafka_consumer_group_metadata_t;
}
extern "C" {
pub fn rd_kafka_consumer_group_metadata_destroy(arg1: *mut rd_kafka_consumer_group_metadata_t);
}
extern "C" {
pub fn rd_kafka_consumer_group_metadata_write(
cgmd: *const rd_kafka_consumer_group_metadata_t,
bufferp: *mut *mut ::std::os::raw::c_void,
sizep: *mut usize,
) -> *mut rd_kafka_error_t;
}
extern "C" {
pub fn rd_kafka_consumer_group_metadata_read(
cgmdp: *mut *mut rd_kafka_consumer_group_metadata_t,
buffer: *const ::std::os::raw::c_void,
size: usize,
) -> *mut rd_kafka_error_t;
}
extern "C" {
pub fn rd_kafka_produce(
rkt: *mut rd_kafka_topic_t,
Expand Down Expand Up @@ -2138,3 +2231,32 @@ extern "C" {
errstr: *const ::std::os::raw::c_char,
) -> rd_kafka_resp_err_t;
}
extern "C" {
pub fn rd_kafka_init_transactions(
rk: *mut rd_kafka_t,
timeout_ms: ::std::os::raw::c_int,
) -> *mut rd_kafka_error_t;
}
extern "C" {
pub fn rd_kafka_begin_transaction(rk: *mut rd_kafka_t) -> *mut rd_kafka_error_t;
}
extern "C" {
pub fn rd_kafka_send_offsets_to_transaction(
rk: *mut rd_kafka_t,
offsets: *const rd_kafka_topic_partition_list_t,
cgmetadata: *const rd_kafka_consumer_group_metadata_t,
timeout_ms: ::std::os::raw::c_int,
) -> *mut rd_kafka_error_t;
}
extern "C" {
pub fn rd_kafka_commit_transaction(
rk: *mut rd_kafka_t,
timeout_ms: ::std::os::raw::c_int,
) -> *mut rd_kafka_error_t;
}
extern "C" {
pub fn rd_kafka_abort_transaction(
rk: *mut rd_kafka_t,
timeout_ms: ::std::os::raw::c_int,
) -> *mut rd_kafka_error_t;
}
4 changes: 4 additions & 0 deletions rdkafka-sys/src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ pub fn rd_kafka_resp_err_t_to_rdkafka_error(err: RDKafkaRespErr) -> RDKafkaError
RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE => GaplessGuarantee,
RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED => PollExceeded,
RD_KAFKA_RESP_ERR__UNKNOWN_BROKER => UnknownBroker,
RD_KAFKA_RESP_ERR__NOT_CONFIGURED => NotConfigured,
RD_KAFKA_RESP_ERR__FENCED => Fenced,
RD_KAFKA_RESP_ERR__APPLICATION => Application,
RD_KAFKA_RESP_ERR__END => End,
RD_KAFKA_RESP_ERR_UNKNOWN => Unknown,
RD_KAFKA_RESP_ERR_NO_ERROR => NoError,
Expand Down Expand Up @@ -148,6 +151,7 @@ pub fn rd_kafka_resp_err_t_to_rdkafka_error(err: RDKafkaRespErr) -> RDKafkaError
RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED => MemberIdRequired,
RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE => PreferredLeaderNotAvailable,
RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED => GroupMaxSizeReached,
RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID => FencedInstanceId,
RD_KAFKA_RESP_ERR_END_ALL => EndAll,
}
}
8 changes: 8 additions & 0 deletions rdkafka-sys/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,12 @@ pub enum RDKafkaError {
PollExceeded = -147,
/// Unknown broker
UnknownBroker = -146,
/// Functionality not configured
NotConfigured,
/// Instance has been fenced
Fenced,
/// Application generated error
Application,
#[doc(hidden)]
End = -100,
/// Unknown broker error
Expand Down Expand Up @@ -387,6 +393,8 @@ pub enum RDKafkaError {
PreferredLeaderNotAvailable = 80,
/// Consumer group has reached maximum size
GroupMaxSizeReached = 81,
/// Static consumer fenced by other consumer with same group.instance.id
FencedInstanceId = 82,
#[doc(hidden)]
EndAll,
}
Expand Down

0 comments on commit aeefceb

Please sign in to comment.