From b558c13566dd9d5f0d658823dd0fdf1a8150262f Mon Sep 17 00:00:00 2001 From: Pranav Rathi <4427674+pranavrth@users.noreply.github.com> Date: Sun, 8 Sep 2024 00:54:11 +0530 Subject: [PATCH 1/5] [KIP-848] Added support for testing with new 'consumer' group protocol. * [KIP-848] Added support for testing with new 'consumer' group protocol. * Build fixes * updated trivup * Updated trivup install path * Fixed failing test * Style fix * Added more tests to be run with the new protocol * Fixed failing tests * Added Test common for common functionalities * Enabling SR again * Style fixes * Some refactoring * Added consumer protocol integration tests in semaphore * Ignoring failing admin tests * Fix typo * Fixed failing test case * Added new fixure for single broker and using this fixure for test_serializer tests * Build fixes * Fixed transiet test failures for proto * Fixed another test * Added Test*Consumer classes instead of functions * Build issue * Added common TestUtils * Using specific commit for trivup * Removed trivup 0.12.5 * PR comments * Style check * Skipping one list offsets assert for Zookeeper * 1) Moved sleep after result and assert. 2) Added a function to create a topic and wait for propogation. * Using create_topic_and_wait_propogation instead of create_topic function * Internally using create_topic in create_topic_and_wait_propogation * Removed kafka single broker cluster fixure * Removed unnecessary import time * Using broker version 3.8.0 for classic protocol and enabled test which was failing in 3.7.0 * Changed fixure scope to session * Style fixes --- .semaphore/semaphore.yml | 10 ++- tests/common/__init__.py | 61 +++++++++++++++ .../admin/test_basic_operations.py | 31 +++++--- .../integration/admin/test_delete_records.py | 31 ++++---- .../admin/test_describe_operations.py | 30 ++++--- .../admin/test_incremental_alter_configs.py | 30 ++++--- tests/integration/admin/test_list_offsets.py | 12 +-- .../admin/test_user_scram_credentials.py | 51 ++++++++---- tests/integration/cluster_fixture.py | 29 ++++++- tests/integration/conftest.py | 32 +++++--- .../consumer/test_consumer_error.py | 6 +- .../consumer/test_consumer_memberid.py | 2 +- .../test_consumer_topicpartition_metadata.py | 2 +- .../consumer/test_cooperative_rebalance_1.py | 19 +++-- .../consumer/test_cooperative_rebalance_2.py | 2 +- .../consumer/test_incremental_assign.py | 4 +- tests/integration/integration_test.py | 23 +++--- tests/integration/producer/__init__.py | 0 .../integration/producer/test_transactions.py | 18 +++-- .../schema_registry/test_avro_serializers.py | 8 +- .../schema_registry/test_json_serializers.py | 12 +-- .../schema_registry/test_proto_serializers.py | 8 +- .../serialization/test_serializers.py | 6 +- tests/requirements.txt | 2 +- tests/soak/soakclient.py | 5 +- tests/test_Consumer.py | 73 +++++++++--------- tests/test_Producer.py | 8 +- tests/test_log.py | 22 +++--- tests/test_misc.py | 21 ++--- tests/trivup/trivup-0.12.6.tar.gz | Bin 0 -> 32862 bytes tools/source-package-verification.sh | 6 +- 31 files changed, 368 insertions(+), 196 deletions(-) create mode 100644 tests/common/__init__.py create mode 100644 tests/integration/producer/__init__.py create mode 100644 tests/trivup/trivup-0.12.6.tar.gz diff --git a/.semaphore/semaphore.yml b/.semaphore/semaphore.yml index e2d32b255..0c06a3e0b 100644 --- a/.semaphore/semaphore.yml +++ b/.semaphore/semaphore.yml @@ -138,12 +138,20 @@ blocks: commands: - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' jobs: - - name: Build + - name: Build and Tests with 'classic' group protocol + commands: + - sem-version python 3.8 + # use a virtualenv + - python3 -m venv _venv && source _venv/bin/activate + - chmod u+r+x tools/source-package-verification.sh + - tools/source-package-verification.sh + - name: Build and Tests with 'consumer' group protocol commands: - sem-version python 3.8 # use a virtualenv - python3 -m venv _venv && source _venv/bin/activate - chmod u+r+x tools/source-package-verification.sh + - export TEST_CONSUMER_GROUP_PROTOCOL=consumer - tools/source-package-verification.sh - name: "Source package verification with Python 3 (Linux arm64)" dependencies: [] diff --git a/tests/common/__init__.py b/tests/common/__init__.py new file mode 100644 index 000000000..a0d54934f --- /dev/null +++ b/tests/common/__init__.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2024 Confluent Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +from confluent_kafka import Consumer, DeserializingConsumer +from confluent_kafka.avro import AvroConsumer + +_GROUP_PROTOCOL_ENV = 'TEST_CONSUMER_GROUP_PROTOCOL' +_TRIVUP_CLUSTER_TYPE_ENV = 'TEST_TRIVUP_CLUSTER_TYPE' + + +def _update_conf_group_protocol(conf=None): + if conf is not None and 'group.id' in conf and TestUtils.use_group_protocol_consumer(): + conf['group.protocol'] = 'consumer' + + +def _trivup_cluster_type_kraft(): + return _TRIVUP_CLUSTER_TYPE_ENV in os.environ and os.environ[_TRIVUP_CLUSTER_TYPE_ENV] == 'kraft' + + +class TestUtils: + @staticmethod + def use_kraft(): + return TestUtils.use_group_protocol_consumer() or _trivup_cluster_type_kraft() + + @staticmethod + def use_group_protocol_consumer(): + return _GROUP_PROTOCOL_ENV in os.environ and os.environ[_GROUP_PROTOCOL_ENV] == 'consumer' + + +class TestConsumer(Consumer): + def __init__(self, conf=None, **kwargs): + _update_conf_group_protocol(conf) + super(TestConsumer, self).__init__(conf, **kwargs) + + +class TestDeserializingConsumer(DeserializingConsumer): + def __init__(self, conf=None, **kwargs): + _update_conf_group_protocol(conf) + super(TestDeserializingConsumer, self).__init__(conf, **kwargs) + + +class TestAvroConsumer(AvroConsumer): + def __init__(self, conf=None, **kwargs): + _update_conf_group_protocol(conf) + super(TestAvroConsumer, self).__init__(conf, **kwargs) diff --git a/tests/integration/admin/test_basic_operations.py b/tests/integration/admin/test_basic_operations.py index 820fd5228..00bd2c045 100644 --- a/tests/integration/admin/test_basic_operations.py +++ b/tests/integration/admin/test_basic_operations.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import confluent_kafka import struct import time -from confluent_kafka import ConsumerGroupTopicPartitions, TopicPartition, ConsumerGroupState + +from confluent_kafka import ConsumerGroupTopicPartitions, TopicPartition, ConsumerGroupState, KafkaError from confluent_kafka.admin import (NewPartitions, ConfigResource, AclBinding, AclBindingFilter, ResourceType, ResourcePatternType, AclOperation, AclPermissionType) @@ -58,6 +58,8 @@ def verify_admin_acls(admin_client, for acl_binding, f in fs.items(): f.result() # trigger exception if there was an error + time.sleep(1) + acl_binding_filter1 = AclBindingFilter(ResourceType.ANY, None, ResourcePatternType.ANY, None, None, AclOperation.ANY, AclPermissionType.ANY) acl_binding_filter2 = AclBindingFilter(ResourceType.ANY, None, ResourcePatternType.PREFIXED, @@ -83,6 +85,8 @@ def verify_admin_acls(admin_client, "Deleted ACL bindings don't match, actual {} expected {}".format(deleted_acl_bindings, expected_acl_bindings) + time.sleep(1) + # # Delete the ACLs with TOPIC and GROUP # @@ -94,6 +98,9 @@ def verify_admin_acls(admin_client, assert deleted_acl_bindings == expected, \ "Deleted ACL bindings don't match, actual {} expected {}".format(deleted_acl_bindings, expected) + + time.sleep(1) + # # All the ACLs should have been deleted # @@ -201,14 +208,14 @@ def test_basic_operations(kafka_cluster): # Second iteration: create topic. # for validate in (True, False): - our_topic = kafka_cluster.create_topic(topic_prefix, - { - "num_partitions": num_partitions, - "config": topic_config, - "replication_factor": 1, - }, - validate_only=validate - ) + our_topic = kafka_cluster.create_topic_and_wait_propogation(topic_prefix, + { + "num_partitions": num_partitions, + "config": topic_config, + "replication_factor": 1, + }, + validate_only=validate + ) admin_client = kafka_cluster.admin() @@ -270,7 +277,7 @@ def consume_messages(group_id, num_messages=None): print('Read all the required messages: exiting') break except ConsumeError as e: - if msg is not None and e.code == confluent_kafka.KafkaError._PARTITION_EOF: + if msg is not None and e.code == KafkaError._PARTITION_EOF: print('Reached end of %s [%d] at offset %d' % ( msg.topic(), msg.partition(), msg.offset())) eof_reached[(msg.topic(), msg.partition())] = True @@ -345,6 +352,8 @@ def verify_config(expconfig, configs): fs = admin_client.alter_configs([resource]) fs[resource].result() # will raise exception on failure + time.sleep(1) + # # Read the config back again and verify. # diff --git a/tests/integration/admin/test_delete_records.py b/tests/integration/admin/test_delete_records.py index fdbaca749..2c58e36c4 100644 --- a/tests/integration/admin/test_delete_records.py +++ b/tests/integration/admin/test_delete_records.py @@ -25,11 +25,11 @@ def test_delete_records(kafka_cluster): admin_client = kafka_cluster.admin() # Create a topic with a single partition - topic = kafka_cluster.create_topic("test-del-records", - { - "num_partitions": 1, - "replication_factor": 1, - }) + topic = kafka_cluster.create_topic_and_wait_propogation("test-del-records", + { + "num_partitions": 1, + "replication_factor": 1, + }) # Create Producer instance p = kafka_cluster.producer() @@ -73,16 +73,17 @@ def test_delete_records_multiple_topics_and_partitions(kafka_cluster): admin_client = kafka_cluster.admin() num_partitions = 3 # Create two topics with a single partition - topic = kafka_cluster.create_topic("test-del-records", - { - "num_partitions": num_partitions, - "replication_factor": 1, - }) - topic2 = kafka_cluster.create_topic("test-del-records2", - { - "num_partitions": num_partitions, - "replication_factor": 1, - }) + topic = kafka_cluster.create_topic_and_wait_propogation("test-del-records", + { + "num_partitions": num_partitions, + "replication_factor": 1, + }) + topic2 = kafka_cluster.create_topic_and_wait_propogation("test-del-records2", + { + "num_partitions": num_partitions, + "replication_factor": 1, + }) + topics = [topic, topic2] partitions = list(range(num_partitions)) # Create Producer instance diff --git a/tests/integration/admin/test_describe_operations.py b/tests/integration/admin/test_describe_operations.py index ef5d94987..3bfbfd88a 100644 --- a/tests/integration/admin/test_describe_operations.py +++ b/tests/integration/admin/test_describe_operations.py @@ -13,12 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import time import pytest + from confluent_kafka.admin import (AclBinding, AclBindingFilter, ResourceType, ResourcePatternType, AclOperation, AclPermissionType) from confluent_kafka.error import ConsumeError from confluent_kafka import ConsumerGroupState, TopicCollection +from tests.common import TestUtils + topic_prefix = "test-topic" @@ -82,10 +86,12 @@ def perform_admin_operation_sync(operation, *arg, **kwargs): def create_acls(admin_client, acl_bindings): perform_admin_operation_sync(admin_client.create_acls, acl_bindings) + time.sleep(1) def delete_acls(admin_client, acl_binding_filters): perform_admin_operation_sync(admin_client.delete_acls, acl_binding_filters) + time.sleep(1) def verify_provided_describe_for_authorized_operations( @@ -115,6 +121,7 @@ def verify_provided_describe_for_authorized_operations( acl_binding = AclBinding(restype, resname, ResourcePatternType.LITERAL, "User:sasl_user", "*", operation_to_allow, AclPermissionType.ALLOW) create_acls(admin_client, [acl_binding]) + time.sleep(1) # Check with updated authorized operations desc = perform_admin_operation_sync(describe_fn, *arg, **kwargs) @@ -126,6 +133,7 @@ def verify_provided_describe_for_authorized_operations( acl_binding_filter = AclBindingFilter(restype, resname, ResourcePatternType.ANY, None, None, AclOperation.ANY, AclPermissionType.ANY) delete_acls(admin_client, [acl_binding_filter]) + time.sleep(1) return desc @@ -196,20 +204,24 @@ def test_describe_operations(sasl_cluster): # Create Topic topic_config = {"compression.type": "gzip"} - our_topic = sasl_cluster.create_topic(topic_prefix, - { - "num_partitions": 1, - "config": topic_config, - "replication_factor": 1, - }, - validate_only=False - ) + our_topic = sasl_cluster.create_topic_and_wait_propogation(topic_prefix, + { + "num_partitions": 1, + "config": topic_config, + "replication_factor": 1, + }, + validate_only=False + ) # Verify Authorized Operations in Describe Topics verify_describe_topics(admin_client, our_topic) # Verify Authorized Operations in Describe Groups - verify_describe_groups(sasl_cluster, admin_client, our_topic) + # Skip this test if using group protocol `consumer` + # as there is new RPC for describe_groups() in + # group protocol `consumer` case. + if not TestUtils.use_group_protocol_consumer(): + verify_describe_groups(sasl_cluster, admin_client, our_topic) # Delete Topic perform_admin_operation_sync(admin_client.delete_topics, [our_topic], operation_timeout=0, request_timeout=10) diff --git a/tests/integration/admin/test_incremental_alter_configs.py b/tests/integration/admin/test_incremental_alter_configs.py index ad19a1164..24428e6f1 100644 --- a/tests/integration/admin/test_incremental_alter_configs.py +++ b/tests/integration/admin/test_incremental_alter_configs.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import time + from confluent_kafka.admin import ConfigResource, \ ConfigEntry, ResourceType, \ AlterConfigOpType @@ -52,18 +54,18 @@ def test_incremental_alter_configs(kafka_cluster): num_partitions = 2 topic_config = {"compression.type": "gzip"} - our_topic = kafka_cluster.create_topic(topic_prefix, - { - "num_partitions": num_partitions, - "config": topic_config, - "replication_factor": 1, - }) - our_topic2 = kafka_cluster.create_topic(topic_prefix2, - { - "num_partitions": num_partitions, - "config": topic_config, - "replication_factor": 1, - }) + our_topic = kafka_cluster.create_topic_and_wait_propogation(topic_prefix, + { + "num_partitions": num_partitions, + "config": topic_config, + "replication_factor": 1, + }) + our_topic2 = kafka_cluster.create_topic_and_wait_propogation(topic_prefix2, + { + "num_partitions": num_partitions, + "config": topic_config, + "replication_factor": 1, + }) admin_client = kafka_cluster.admin() @@ -103,6 +105,8 @@ def test_incremental_alter_configs(kafka_cluster): assert_operation_succeeded(fs, 2) + time.sleep(1) + # # Get current topic config # @@ -134,6 +138,8 @@ def test_incremental_alter_configs(kafka_cluster): assert_operation_succeeded(fs, 1) + time.sleep(1) + # # Get current topic config # diff --git a/tests/integration/admin/test_list_offsets.py b/tests/integration/admin/test_list_offsets.py index 6a2e0a46a..f1448b267 100644 --- a/tests/integration/admin/test_list_offsets.py +++ b/tests/integration/admin/test_list_offsets.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from confluent_kafka.admin import ListOffsetsResultInfo, OffsetSpec from confluent_kafka import TopicPartition, IsolationLevel +from confluent_kafka.admin import ListOffsetsResultInfo, OffsetSpec def test_list_offsets(kafka_cluster): @@ -27,11 +27,11 @@ def test_list_offsets(kafka_cluster): admin_client = kafka_cluster.admin() # Create a topic with a single partition - topic = kafka_cluster.create_topic("test-topic-verify-list-offsets", - { - "num_partitions": 1, - "replication_factor": 1, - }) + topic = kafka_cluster.create_topic_and_wait_propogation("test-topic-verify-list-offsets", + { + "num_partitions": 1, + "replication_factor": 1, + }) # Create Producer instance p = kafka_cluster.producer() diff --git a/tests/integration/admin/test_user_scram_credentials.py b/tests/integration/admin/test_user_scram_credentials.py index 21c15bc07..6324ea9d3 100644 --- a/tests/integration/admin/test_user_scram_credentials.py +++ b/tests/integration/admin/test_user_scram_credentials.py @@ -13,13 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest +import time import concurrent +import pytest + from confluent_kafka.admin import UserScramCredentialsDescription, UserScramCredentialUpsertion, \ - UserScramCredentialDeletion, ScramCredentialInfo, \ - ScramMechanism + UserScramCredentialDeletion, ScramCredentialInfo, \ + ScramMechanism from confluent_kafka.error import KafkaException, KafkaError +from tests.common import TestUtils + def test_user_scram_credentials(kafka_cluster): """ @@ -51,31 +55,42 @@ def test_user_scram_credentials(kafka_cluster): result = fut.result() assert result is None + time.sleep(1) + # Try upsertion for newuser,SCRAM_SHA_256 and add newuser,SCRAM_SHA_512 - futmap = admin_client.alter_user_scram_credentials([UserScramCredentialUpsertion( - newuser, - ScramCredentialInfo( - mechanism, iterations), - password, salt), - UserScramCredentialUpsertion( - newuser, - ScramCredentialInfo( - ScramMechanism.SCRAM_SHA_512, 10000), - password) - ]) + request = [UserScramCredentialUpsertion(newuser, + ScramCredentialInfo( + mechanism, iterations), + password, salt), + UserScramCredentialUpsertion(newuser, + ScramCredentialInfo( + ScramMechanism.SCRAM_SHA_512, 10000), + password)] + + if TestUtils.use_kraft(): + futmap = admin_client.alter_user_scram_credentials([request[0]]) + result = futmap[newuser].result() + assert result is None + futmap = admin_client.alter_user_scram_credentials([request[1]]) + else: + futmap = admin_client.alter_user_scram_credentials(request) fut = futmap[newuser] result = fut.result() assert result is None + time.sleep(1) + # Delete newuser,SCRAM_SHA_512 futmap = admin_client.alter_user_scram_credentials([UserScramCredentialDeletion( - newuser, - ScramMechanism.SCRAM_SHA_512) - ]) + newuser, + ScramMechanism.SCRAM_SHA_512) + ]) fut = futmap[newuser] result = fut.result() assert result is None + time.sleep(1) + # Describe all users for args in [[], [None]]: f = admin_client.describe_user_scram_credentials(*args) @@ -108,6 +123,8 @@ def test_user_scram_credentials(kafka_cluster): result = fut.result() assert result is None + time.sleep(1) + # newuser isn't found futmap = admin_client.describe_user_scram_credentials([newuser]) assert isinstance(futmap, dict) diff --git a/tests/integration/cluster_fixture.py b/tests/integration/cluster_fixture.py index e23a0547e..b76b9892e 100644 --- a/tests/integration/cluster_fixture.py +++ b/tests/integration/cluster_fixture.py @@ -16,15 +16,17 @@ # limitations under the License. # +import time from uuid import uuid1 from trivup.clusters.KafkaCluster import KafkaCluster -from confluent_kafka import Consumer, Producer, DeserializingConsumer, \ - SerializingProducer +from confluent_kafka import Producer, SerializingProducer from confluent_kafka.admin import AdminClient, NewTopic from confluent_kafka.schema_registry.schema_registry_client import SchemaRegistryClient +from tests.common import TestDeserializingConsumer, TestConsumer + class KafkaClusterFixture(object): __slots__ = ['_cluster', '_admin', '_producer'] @@ -105,7 +107,7 @@ def cimpl_consumer(self, conf=None): if conf is not None: consumer_conf.update(conf) - return Consumer(consumer_conf) + return TestConsumer(consumer_conf) def consumer(self, conf=None, key_deserializer=None, value_deserializer=None): """ @@ -138,7 +140,7 @@ def consumer(self, conf=None, key_deserializer=None, value_deserializer=None): if value_deserializer is not None: consumer_conf['value.deserializer'] = value_deserializer - return DeserializingConsumer(consumer_conf) + return TestDeserializingConsumer(consumer_conf) def admin(self, conf=None): if conf: @@ -169,6 +171,25 @@ def create_topic(self, prefix, conf=None, **create_topic_kwargs): future_topic.get(name).result() return name + def create_topic_and_wait_propogation(self, prefix, conf=None, **create_topic_kwargs): + """ + Creates a new topic with this cluster. Wait for the topic to be propogated to all brokers. + + :param str prefix: topic name + :param dict conf: additions/overrides to topic configuration. + :returns: The topic's name + :rtype: str + """ + name = self.create_topic(prefix, conf, **create_topic_kwargs) + + # wait for topic propogation across all the brokers. + # FIXME: find a better way to wait for topic creation + # for all languages, given option to send request to + # a specific broker isn't present everywhere. + time.sleep(1) + + return name + def seed_topic(self, topic, value_source=None, key_source=None, header_source=None): """ Populates a topic with data asynchronously. diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 3945a2954..e224c59bb 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -17,35 +17,47 @@ # import os - import pytest +from tests.common import TestUtils from tests.integration.cluster_fixture import TrivupFixture from tests.integration.cluster_fixture import ByoFixture work_dir = os.path.dirname(os.path.realpath(__file__)) +def _broker_conf(): + broker_conf = ['transaction.state.log.replication.factor=1', + 'transaction.state.log.min.isr=1'] + if TestUtils.use_group_protocol_consumer(): + broker_conf.append('group.coordinator.rebalance.protocols=classic,consumer') + return broker_conf + + +def _broker_version(): + return 'trunk@f6c9feea76d01a46319b0ca602d70aa855057b07' if TestUtils.use_group_protocol_consumer() else '3.8.0' + + def create_trivup_cluster(conf={}): trivup_fixture_conf = {'with_sr': True, 'debug': True, - 'cp_version': '7.4.0', - 'version': '3.4.0', - 'broker_conf': ['transaction.state.log.replication.factor=1', - 'transaction.state.log.min.isr=1']} + 'cp_version': '7.6.0', + 'kraft': TestUtils.use_kraft(), + 'version': _broker_version(), + 'broker_conf': _broker_conf()} trivup_fixture_conf.update(conf) return TrivupFixture(trivup_fixture_conf) def create_sasl_cluster(conf={}): trivup_fixture_conf = {'with_sr': False, - 'version': '3.4.0', + 'version': _broker_version(), 'sasl_mechanism': "PLAIN", + 'kraft': TestUtils.use_kraft(), 'sasl_users': 'sasl_user=sasl_user', 'debug': True, 'cp_version': 'latest', - 'broker_conf': ['transaction.state.log.replication.factor=1', - 'transaction.state.log.min.isr=1']} + 'broker_conf': _broker_conf()} trivup_fixture_conf.update(conf) return TrivupFixture(trivup_fixture_conf) @@ -106,13 +118,13 @@ def sasl_cluster_fixture( cluster.stop() -@pytest.fixture(scope="package") +@pytest.fixture(scope="session") def kafka_cluster(): for fixture in kafka_cluster_fixture(): yield fixture -@pytest.fixture(scope="package") +@pytest.fixture(scope="session") def sasl_cluster(request): for fixture in sasl_cluster_fixture(request.param): yield fixture diff --git a/tests/integration/consumer/test_consumer_error.py b/tests/integration/consumer/test_consumer_error.py index 4b75ad7b8..0ca1902f4 100644 --- a/tests/integration/consumer/test_consumer_error.py +++ b/tests/integration/consumer/test_consumer_error.py @@ -28,7 +28,7 @@ def test_consume_error(kafka_cluster): Tests to ensure librdkafka errors are propagated as an instance of ConsumeError. """ - topic = kafka_cluster.create_topic("test_commit_transaction") + topic = kafka_cluster.create_topic_and_wait_propogation("test_commit_transaction") consumer_conf = {'group.id': 'pytest', 'enable.partition.eof': True} producer = kafka_cluster.producer() @@ -50,7 +50,7 @@ def test_consume_error_commit(kafka_cluster): """ Tests to ensure that we handle messages with errors when commiting. """ - topic = kafka_cluster.create_topic("test_commit_transaction") + topic = kafka_cluster.create_topic_and_wait_propogation("test_commit_transaction") consumer_conf = {'group.id': 'pytest', 'session.timeout.ms': 100} @@ -74,7 +74,7 @@ def test_consume_error_store_offsets(kafka_cluster): """ Tests to ensure that we handle messages with errors when storing offsets. """ - topic = kafka_cluster.create_topic("test_commit_transaction") + topic = kafka_cluster.create_topic_and_wait_propogation("test_commit_transaction") consumer_conf = {'group.id': 'pytest', 'session.timeout.ms': 100, 'enable.auto.offset.store': True, diff --git a/tests/integration/consumer/test_consumer_memberid.py b/tests/integration/consumer/test_consumer_memberid.py index cf002707d..50a686962 100644 --- a/tests/integration/consumer/test_consumer_memberid.py +++ b/tests/integration/consumer/test_consumer_memberid.py @@ -27,7 +27,7 @@ def test_consumer_memberid(kafka_cluster): topic = "testmemberid" - kafka_cluster.create_topic(topic) + kafka_cluster.create_topic_and_wait_propogation(topic) consumer = kafka_cluster.consumer(consumer_conf) diff --git a/tests/integration/consumer/test_consumer_topicpartition_metadata.py b/tests/integration/consumer/test_consumer_topicpartition_metadata.py index 4c01c1df7..0ac2b4466 100644 --- a/tests/integration/consumer/test_consumer_topicpartition_metadata.py +++ b/tests/integration/consumer/test_consumer_topicpartition_metadata.py @@ -30,7 +30,7 @@ def commit_and_check(consumer, topic, metadata): def test_consumer_topicpartition_metadata(kafka_cluster): - topic = kafka_cluster.create_topic("test_topicpartition") + topic = kafka_cluster.create_topic_and_wait_propogation("test_topicpartition") consumer_conf = {'group.id': 'pytest'} c = kafka_cluster.consumer(consumer_conf) diff --git a/tests/integration/consumer/test_cooperative_rebalance_1.py b/tests/integration/consumer/test_cooperative_rebalance_1.py index d9a94a85f..2645c7028 100644 --- a/tests/integration/consumer/test_cooperative_rebalance_1.py +++ b/tests/integration/consumer/test_cooperative_rebalance_1.py @@ -44,7 +44,13 @@ def __init__(self): def on_assign(self, consumer, partitions): self.assign_count += 1 - assert 1 == len(partitions) + if self.assign_count == 3: + # Assigning both partitions again after assignment lost + # due to max poll interval timeout exceeded + assert 2 == len(partitions) + else: + # Incremental assign cases + assert 1 == len(partitions) def on_revoke(self, consumer, partitions): self.revoke_count += 1 @@ -54,8 +60,8 @@ def on_lost(self, consumer, partitions): reb = RebalanceState() - topic1 = kafka_cluster.create_topic("topic1") - topic2 = kafka_cluster.create_topic("topic2") + topic1 = kafka_cluster.create_topic_and_wait_propogation("topic1") + topic2 = kafka_cluster.create_topic_and_wait_propogation("topic2") consumer = kafka_cluster.consumer(consumer_conf) @@ -97,10 +103,11 @@ def on_lost(self, consumer, partitions): assert e.value.args[0].code() == KafkaError._MAX_POLL_EXCEEDED # And poll again to trigger rebalance callbacks - msg4 = consumer.poll(1) - assert msg4 is None + # It will trigger on_lost and then on_assign during rejoin + msg4 = consumer.poll(10) + assert msg4 is not None # Reading messages again after rejoin - assert 2 == reb.assign_count + assert 3 == reb.assign_count assert 1 == reb.lost_count assert 0 == reb.revoke_count diff --git a/tests/integration/consumer/test_cooperative_rebalance_2.py b/tests/integration/consumer/test_cooperative_rebalance_2.py index 8437e7c1f..f3a6df91b 100644 --- a/tests/integration/consumer/test_cooperative_rebalance_2.py +++ b/tests/integration/consumer/test_cooperative_rebalance_2.py @@ -52,7 +52,7 @@ def on_revoke(self, consumer, partitions): reb = RebalanceState() - topic1 = kafka_cluster.create_topic("topic1") + topic1 = kafka_cluster.create_topic_and_wait_propogation("topic1") consumer = kafka_cluster.consumer(consumer_conf) diff --git a/tests/integration/consumer/test_incremental_assign.py b/tests/integration/consumer/test_incremental_assign.py index 9703ea82f..e2d9e8941 100644 --- a/tests/integration/consumer/test_incremental_assign.py +++ b/tests/integration/consumer/test_incremental_assign.py @@ -30,8 +30,8 @@ def test_incremental_assign(kafka_cluster): 'enable.auto.commit': 'false', 'auto.offset.reset': 'error'} - topic1 = kafka_cluster.create_topic("topic1") - topic2 = kafka_cluster.create_topic("topic2") + topic1 = kafka_cluster.create_topic_and_wait_propogation("topic1") + topic2 = kafka_cluster.create_topic_and_wait_propogation("topic2") kafka_cluster.seed_topic(topic1, value_source=[b'a']) kafka_cluster.seed_topic(topic2, value_source=[b'b']) diff --git a/tests/integration/integration_test.py b/tests/integration/integration_test.py index e4ae6deca..bc0efa1a7 100755 --- a/tests/integration/integration_test.py +++ b/tests/integration/integration_test.py @@ -20,7 +20,6 @@ """ Test script for confluent_kafka module """ -import confluent_kafka import os import time import uuid @@ -30,6 +29,10 @@ import struct import re +import confluent_kafka + +from tests.common import TestConsumer, TestAvroConsumer + try: # Memory tracker from pympler import tracker @@ -373,7 +376,7 @@ def verify_consumer(): 'enable.partition.eof': True} # Create consumer - c = confluent_kafka.Consumer(conf) + c = TestConsumer(conf) def print_wmark(consumer, topic_parts): # Verify #294: get_watermark_offsets() should not fail on the first call @@ -483,7 +486,7 @@ def print_wmark(consumer, topic_parts): c.close() # Start a new client and get the committed offsets - c = confluent_kafka.Consumer(conf) + c = TestConsumer(conf) offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3)))) for tp in offsets: print(tp) @@ -500,7 +503,7 @@ def verify_consumer_performance(): 'error_cb': error_cb, 'auto.offset.reset': 'earliest'} - c = confluent_kafka.Consumer(conf) + c = TestConsumer(conf) def my_on_assign(consumer, partitions): print('on_assign:', len(partitions), 'partitions:') @@ -608,7 +611,7 @@ def verify_batch_consumer(): 'auto.offset.reset': 'earliest'} # Create consumer - c = confluent_kafka.Consumer(conf) + c = TestConsumer(conf) # Subscribe to a list of topics c.subscribe([topic]) @@ -665,7 +668,7 @@ def verify_batch_consumer(): c.close() # Start a new client and get the committed offsets - c = confluent_kafka.Consumer(conf) + c = TestConsumer(conf) offsets = c.committed(list(map(lambda p: confluent_kafka.TopicPartition(topic, p), range(0, 3)))) for tp in offsets: print(tp) @@ -682,7 +685,7 @@ def verify_batch_consumer_performance(): 'error_cb': error_cb, 'auto.offset.reset': 'earliest'} - c = confluent_kafka.Consumer(conf) + c = TestConsumer(conf) def my_on_assign(consumer, partitions): print('on_assign:', len(partitions), 'partitions:') @@ -877,7 +880,7 @@ def run_avro_loop(producer_conf, consumer_conf): p.produce(**combo) p.flush() - c = avro.AvroConsumer(consumer_conf) + c = TestAvroConsumer(consumer_conf) c.subscribe([(t['topic']) for t in combinations]) msgcount = 0 @@ -989,7 +992,7 @@ def stats_cb(stats_json_str): 'statistics.interval.ms': 200, 'auto.offset.reset': 'earliest'} - c = confluent_kafka.Consumer(conf) + c = TestConsumer(conf) c.subscribe([topic]) max_msgcnt = 1000000 @@ -1116,7 +1119,7 @@ def verify_avro_explicit_read_schema(): p.produce(topic=avro_topic, **combo) p.flush() - c = avro.AvroConsumer(consumer_conf, reader_key_schema=reader_schema, reader_value_schema=reader_schema) + c = TestAvroConsumer(consumer_conf, reader_key_schema=reader_schema, reader_value_schema=reader_schema) c.subscribe([avro_topic]) msgcount = 0 diff --git a/tests/integration/producer/__init__.py b/tests/integration/producer/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/producer/test_transactions.py b/tests/integration/producer/test_transactions.py index b43f81437..ff83fea70 100644 --- a/tests/integration/producer/test_transactions.py +++ b/tests/integration/producer/test_transactions.py @@ -19,7 +19,9 @@ import sys from uuid import uuid1 -from confluent_kafka import Consumer, KafkaError +from confluent_kafka import KafkaError + +from tests.common import TestConsumer def called_by(): @@ -49,7 +51,7 @@ def delivery_err(err, msg): def test_commit_transaction(kafka_cluster): - output_topic = kafka_cluster.create_topic("output_topic") + output_topic = kafka_cluster.create_topic_and_wait_propogation("output_topic") producer = kafka_cluster.producer({ 'transactional.id': 'example_transactional_id', @@ -64,7 +66,7 @@ def test_commit_transaction(kafka_cluster): def test_abort_transaction(kafka_cluster): - output_topic = kafka_cluster.create_topic("output_topic") + output_topic = kafka_cluster.create_topic_and_wait_propogation("output_topic") producer = kafka_cluster.producer({ 'transactional.id': 'example_transactional_id', @@ -79,7 +81,7 @@ def test_abort_transaction(kafka_cluster): def test_abort_retry_commit_transaction(kafka_cluster): - output_topic = kafka_cluster.create_topic("output_topic") + output_topic = kafka_cluster.create_topic_and_wait_propogation("output_topic") producer = kafka_cluster.producer({ 'transactional.id': 'example_transactional_id', @@ -97,8 +99,8 @@ def test_abort_retry_commit_transaction(kafka_cluster): def test_send_offsets_committed_transaction(kafka_cluster): - input_topic = kafka_cluster.create_topic("input_topic") - output_topic = kafka_cluster.create_topic("output_topic") + input_topic = kafka_cluster.create_topic_and_wait_propogation("input_topic") + output_topic = kafka_cluster.create_topic_and_wait_propogation("output_topic") error_cb = prefixed_error_cb('test_send_offsets_committed_transaction') producer = kafka_cluster.producer({ 'client.id': 'producer1', @@ -114,7 +116,7 @@ def test_send_offsets_committed_transaction(kafka_cluster): 'error_cb': error_cb } consumer_conf.update(kafka_cluster.client_conf()) - consumer = Consumer(consumer_conf) + consumer = TestConsumer(consumer_conf) kafka_cluster.seed_topic(input_topic) consumer.subscribe([input_topic]) @@ -204,7 +206,7 @@ def consume_committed(conf, topic): 'error_cb': prefixed_error_cb(called_by()), } consumer_conf.update(conf) - consumer = Consumer(consumer_conf) + consumer = TestConsumer(consumer_conf) consumer.subscribe([topic]) msg_cnt = read_all_msgs(consumer) diff --git a/tests/integration/schema_registry/test_avro_serializers.py b/tests/integration/schema_registry/test_avro_serializers.py index 882af40db..4140ad600 100644 --- a/tests/integration/schema_registry/test_avro_serializers.py +++ b/tests/integration/schema_registry/test_avro_serializers.py @@ -151,7 +151,7 @@ def _references_test_common(kafka_cluster, awarded_user, serializer_schema, dese Args: kafka_cluster (KafkaClusterFixture): cluster fixture """ - topic = kafka_cluster.create_topic("reference-avro") + topic = kafka_cluster.create_topic_and_wait_propogation("reference-avro") sr = kafka_cluster.schema_registry() value_serializer = AvroSerializer(sr, serializer_schema, @@ -207,7 +207,7 @@ def test_avro_record_serialization(kafka_cluster, load_file, avsc, data, record_ data (object): data to be serialized """ - topic = kafka_cluster.create_topic("serialization-avro") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-avro") sr = kafka_cluster.schema_registry() schema_str = load_file(avsc) @@ -251,7 +251,7 @@ def test_delivery_report_serialization(kafka_cluster, load_file, avsc, data, rec data (object): data to be serialized """ - topic = kafka_cluster.create_topic("serialization-avro-dr") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-avro-dr") sr = kafka_cluster.schema_registry() schema_str = load_file(avsc) @@ -298,7 +298,7 @@ def test_avro_record_serialization_custom(kafka_cluster): kafka_cluster (KafkaClusterFixture): cluster fixture """ - topic = kafka_cluster.create_topic("serialization-avro") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-avro") sr = kafka_cluster.schema_registry() user = User('Bowie', 47, 'purple') diff --git a/tests/integration/schema_registry/test_json_serializers.py b/tests/integration/schema_registry/test_json_serializers.py index 3a60598d4..69b2b0339 100644 --- a/tests/integration/schema_registry/test_json_serializers.py +++ b/tests/integration/schema_registry/test_json_serializers.py @@ -257,7 +257,7 @@ def test_json_record_serialization(kafka_cluster, load_file): load_file (callable(str)): JSON Schema file reader """ - topic = kafka_cluster.create_topic("serialization-json") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-json") sr = kafka_cluster.schema_registry() schema_str = load_file("product.json") @@ -305,7 +305,7 @@ def test_json_record_serialization_incompatible(kafka_cluster, load_file): load_file (callable(str)): JSON Schema file reader """ - topic = kafka_cluster.create_topic("serialization-json") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-json") sr = kafka_cluster.schema_registry() schema_str = load_file("product.json") @@ -350,7 +350,7 @@ def test_json_record_serialization_custom(kafka_cluster, load_file): load_file (callable(str)): JSON Schema file reader """ - topic = kafka_cluster.create_topic("serialization-json") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-json") sr = kafka_cluster.schema_registry() schema_str = load_file("product.json") @@ -394,7 +394,7 @@ def test_json_record_deserialization_mismatch(kafka_cluster, load_file): load_file (callable(str)): JSON Schema file reader """ - topic = kafka_cluster.create_topic("serialization-json") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-json") sr = kafka_cluster.schema_registry() schema_str = load_file("contractor.json") @@ -435,7 +435,7 @@ def _register_referenced_schemas(sr, load_file): def test_json_reference(kafka_cluster, load_file): - topic = kafka_cluster.create_topic("serialization-json") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-json") sr = kafka_cluster.schema_registry() product = {"productId": 1, @@ -474,7 +474,7 @@ def test_json_reference(kafka_cluster, load_file): def test_json_reference_custom(kafka_cluster, load_file): - topic = kafka_cluster.create_topic("serialization-json") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-json") sr = kafka_cluster.schema_registry() product = _TestProduct(product_id=1, diff --git a/tests/integration/schema_registry/test_proto_serializers.py b/tests/integration/schema_registry/test_proto_serializers.py index 621beac43..16de4ea6b 100644 --- a/tests/integration/schema_registry/test_proto_serializers.py +++ b/tests/integration/schema_registry/test_proto_serializers.py @@ -52,7 +52,7 @@ def test_protobuf_message_serialization(kafka_cluster, pb2, data): Validates that we get the same message back that we put in. """ - topic = kafka_cluster.create_topic("serialization-proto") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-proto") sr = kafka_cluster.schema_registry() value_serializer = ProtobufSerializer(pb2, sr, {'use.deprecated.format': False}) @@ -85,7 +85,7 @@ def test_protobuf_reference_registration(kafka_cluster, pb2, expected_refs): """ sr = kafka_cluster.schema_registry() - topic = kafka_cluster.create_topic("serialization-proto-refs") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-proto-refs") serializer = ProtobufSerializer(pb2, sr, {'use.deprecated.format': False}) producer = kafka_cluster.producer(key_serializer=serializer) @@ -106,7 +106,7 @@ def test_protobuf_serializer_type_mismatch(kafka_cluster): pb2_2 = NestedTestProto_pb2.NestedMessage sr = kafka_cluster.schema_registry() - topic = kafka_cluster.create_topic("serialization-proto-refs") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-proto-refs") serializer = ProtobufSerializer(pb2_1, sr, {'use.deprecated.format': False}) producer = kafka_cluster.producer(key_serializer=serializer) @@ -127,7 +127,7 @@ def test_protobuf_deserializer_type_mismatch(kafka_cluster): pb2_2 = metadata_proto_pb2.HDFSOptions sr = kafka_cluster.schema_registry() - topic = kafka_cluster.create_topic("serialization-proto-refs") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-proto-refs") serializer = ProtobufSerializer(pb2_1, sr, {'use.deprecated.format': False}) deserializer = ProtobufDeserializer(pb2_2, {'use.deprecated.format': False}) diff --git a/tests/integration/serialization/test_serializers.py b/tests/integration/serialization/test_serializers.py index 7c4f796c2..f8ac10d0e 100644 --- a/tests/integration/serialization/test_serializers.py +++ b/tests/integration/serialization/test_serializers.py @@ -45,7 +45,7 @@ def test_numeric_serialization(kafka_cluster, serializer, deserializer, data): data(object): input data """ - topic = kafka_cluster.create_topic("serialization-numeric") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-numeric") producer = kafka_cluster.producer(value_serializer=serializer) producer.produce(topic, value=data) @@ -77,7 +77,7 @@ def test_string_serialization(kafka_cluster, data, codec): codec (str): encoding type """ - topic = kafka_cluster.create_topic("serialization-string") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-string") producer = kafka_cluster.producer(value_serializer=StringSerializer(codec)) @@ -119,7 +119,7 @@ def test_mixed_serialization(kafka_cluster, key_serializer, value_serializer, value (object): value data """ - topic = kafka_cluster.create_topic("serialization-numeric") + topic = kafka_cluster.create_topic_and_wait_propogation("serialization-numeric") producer = kafka_cluster.producer(key_serializer=key_serializer, value_serializer=value_serializer) diff --git a/tests/requirements.txt b/tests/requirements.txt index b5e20efb2..3b113a623 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -5,7 +5,7 @@ pytest==4.6.9;python_version<="3.0" pytest>=6.0.0;python_version>="3.0" pytest-timeout requests-mock -trivup>=0.8.3 +tests/trivup/trivup-0.12.6.tar.gz fastavro<1.8.0;python_version=="3.7" fastavro>=1.8.4;python_version>"3.7" fastavro diff --git a/tests/soak/soakclient.py b/tests/soak/soakclient.py index 453c7e5a5..027cc2e83 100755 --- a/tests/soak/soakclient.py +++ b/tests/soak/soakclient.py @@ -26,11 +26,12 @@ # from confluent_kafka import KafkaError, KafkaException, version -from confluent_kafka import Producer, Consumer +from confluent_kafka import Producer from confluent_kafka.admin import AdminClient, NewTopic from collections import defaultdict from builtins import int from opentelemetry import metrics +from common import TestConsumer import argparse import threading import time @@ -446,7 +447,7 @@ def filter_config(conf, filter_out, strip_prefix): cconf['error_cb'] = self.consumer_error_cb cconf['on_commit'] = self.consumer_commit_cb self.logger.info("consumer: using group.id {}".format(cconf['group.id'])) - self.consumer = Consumer(cconf) + self.consumer = TestConsumer(cconf) # Create and start producer thread self.producer_thread = threading.Thread(target=self.producer_thread_main) diff --git a/tests/test_Consumer.py b/tests/test_Consumer.py index 48a8d3f8e..73cc999e3 100644 --- a/tests/test_Consumer.py +++ b/tests/test_Consumer.py @@ -1,9 +1,12 @@ #!/usr/bin/env python +import pytest + from confluent_kafka import (Consumer, TopicPartition, KafkaError, KafkaException, TIMESTAMP_NOT_AVAILABLE, OFFSET_INVALID, libversion) -import pytest + +from tests.common import TestConsumer def test_basic_api(): @@ -11,15 +14,15 @@ def test_basic_api(): broker configured. """ with pytest.raises(TypeError) as ex: - kc = Consumer() + kc = TestConsumer() assert ex.match('expected configuration dict') def dummy_commit_cb(err, partitions): pass - kc = Consumer({'group.id': 'test', 'socket.timeout.ms': '100', - 'session.timeout.ms': 1000, # Avoid close() blocking too long - 'on_commit': dummy_commit_cb}) + kc = TestConsumer({'group.id': 'test', 'socket.timeout.ms': '100', + 'session.timeout.ms': 1000, # Avoid close() blocking too long + 'on_commit': dummy_commit_cb}) kc.subscribe(["test"]) kc.unsubscribe() @@ -119,11 +122,11 @@ def dummy_assign_revoke(consumer, partitions): def test_store_offsets(): """ Basic store_offsets() tests """ - c = Consumer({'group.id': 'test', - 'enable.auto.commit': True, - 'enable.auto.offset.store': False, - 'socket.timeout.ms': 50, - 'session.timeout.ms': 100}) + c = TestConsumer({'group.id': 'test', + 'enable.auto.commit': True, + 'enable.auto.offset.store': False, + 'socket.timeout.ms': 50, + 'session.timeout.ms': 100}) c.subscribe(["test"]) @@ -161,10 +164,10 @@ def commit_cb(cs, err, ps): cs = CommitState('test', 2) - c = Consumer({'group.id': 'x', - 'enable.auto.commit': False, 'socket.timeout.ms': 50, - 'session.timeout.ms': 100, - 'on_commit': lambda err, ps: commit_cb(cs, err, ps)}) + c = TestConsumer({'group.id': 'x', + 'enable.auto.commit': False, 'socket.timeout.ms': 50, + 'session.timeout.ms': 100, + 'on_commit': lambda err, ps: commit_cb(cs, err, ps)}) c.assign([TopicPartition(cs.topic, cs.partition)]) @@ -196,11 +199,11 @@ def poll(self, somearg): @pytest.mark.skipif(libversion()[1] < 0x000b0000, reason="requires librdkafka >=0.11.0") def test_offsets_for_times(): - c = Consumer({'group.id': 'test', - 'enable.auto.commit': True, - 'enable.auto.offset.store': False, - 'socket.timeout.ms': 50, - 'session.timeout.ms': 100}) + c = TestConsumer({'group.id': 'test', + 'enable.auto.commit': True, + 'enable.auto.offset.store': False, + 'socket.timeout.ms': 50, + 'session.timeout.ms': 100}) # Query broker for timestamps for partition try: test_topic_partition = TopicPartition("test", 0, 100) @@ -216,11 +219,11 @@ def test_offsets_for_times(): def test_multiple_close_does_not_throw_exception(): """ Calling Consumer.close() multiple times should not throw Runtime Exception """ - c = Consumer({'group.id': 'test', - 'enable.auto.commit': True, - 'enable.auto.offset.store': False, - 'socket.timeout.ms': 50, - 'session.timeout.ms': 100}) + c = TestConsumer({'group.id': 'test', + 'enable.auto.commit': True, + 'enable.auto.offset.store': False, + 'socket.timeout.ms': 50, + 'session.timeout.ms': 100}) c.subscribe(["test"]) @@ -232,11 +235,11 @@ def test_multiple_close_does_not_throw_exception(): def test_any_method_after_close_throws_exception(): """ Calling any consumer method after close should throw a RuntimeError """ - c = Consumer({'group.id': 'test', - 'enable.auto.commit': True, - 'enable.auto.offset.store': False, - 'socket.timeout.ms': 50, - 'session.timeout.ms': 100}) + c = TestConsumer({'group.id': 'test', + 'enable.auto.commit': True, + 'enable.auto.offset.store': False, + 'socket.timeout.ms': 50, + 'session.timeout.ms': 100}) c.subscribe(["test"]) c.unsubscribe() @@ -296,11 +299,11 @@ def test_any_method_after_close_throws_exception(): def test_calling_store_offsets_after_close_throws_erro(): """ calling store_offset after close should throw RuntimeError """ - c = Consumer({'group.id': 'test', - 'enable.auto.commit': True, - 'enable.auto.offset.store': False, - 'socket.timeout.ms': 50, - 'session.timeout.ms': 100}) + c = TestConsumer({'group.id': 'test', + 'enable.auto.commit': True, + 'enable.auto.offset.store': False, + 'socket.timeout.ms': 50, + 'session.timeout.ms': 100}) c.subscribe(["test"]) c.unsubscribe() @@ -319,5 +322,5 @@ def test_consumer_without_groupid(): """ Consumer should raise exception if group.id is not set """ with pytest.raises(ValueError) as ex: - Consumer({'bootstrap.servers': "mybroker:9092"}) + TestConsumer({'bootstrap.servers': "mybroker:9092"}) assert ex.match('group.id must be set') diff --git a/tests/test_Producer.py b/tests/test_Producer.py index 7b81d2125..0f0d69e1d 100644 --- a/tests/test_Producer.py +++ b/tests/test_Producer.py @@ -1,10 +1,12 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- import pytest +from struct import pack -from confluent_kafka import Producer, Consumer, KafkaError, KafkaException, \ +from confluent_kafka import Producer, KafkaError, KafkaException, \ TopicPartition, libversion -from struct import pack + +from tests.common import TestConsumer def error_cb(err): @@ -211,7 +213,7 @@ def test_transaction_api(): assert ex.value.args[0].fatal() is False assert ex.value.args[0].txn_requires_abort() is False - consumer = Consumer({"group.id": "testgroup"}) + consumer = TestConsumer({"group.id": "testgroup"}) group_metadata = consumer.consumer_group_metadata() consumer.close() diff --git a/tests/test_log.py b/tests/test_log.py index b28fd798c..36368a61b 100644 --- a/tests/test_log.py +++ b/tests/test_log.py @@ -1,10 +1,13 @@ #!/usr/bin/env python from io import StringIO +import logging + import confluent_kafka import confluent_kafka.avro import confluent_kafka.admin -import logging + +from tests.common import TestConsumer, TestAvroConsumer class CountingFilter(logging.Filter): @@ -35,10 +38,9 @@ def test_logging_consumer(): logger.setLevel(logging.DEBUG) f = CountingFilter('consumer') logger.addFilter(f) - - kc = confluent_kafka.Consumer({'group.id': 'test', - 'debug': 'all'}, - logger=logger) + kc = TestConsumer({'group.id': 'test', + 'debug': 'all'}, + logger=logger) while f.cnt == 0: kc.poll(timeout=0.5) @@ -55,10 +57,10 @@ def test_logging_avro_consumer(): f = CountingFilter('avroconsumer') logger.addFilter(f) - kc = confluent_kafka.avro.AvroConsumer({'schema.registry.url': 'http://example.com', - 'group.id': 'test', - 'debug': 'all'}, - logger=logger) + kc = TestAvroConsumer({'schema.registry.url': 'http://example.com', + 'group.id': 'test', + 'debug': 'all'}, + logger=logger) while f.cnt == 0: kc.poll(timeout=0.5) @@ -150,7 +152,7 @@ def test_consumer_logger_logging_in_given_format(): stringBuffer, logger = _setup_string_buffer_logger('Consumer') - c = confluent_kafka.Consumer( + c = TestConsumer( {"bootstrap.servers": "test", "group.id": "test", "logger": logger, "debug": "msg"}) c.poll(0) diff --git a/tests/test_misc.py b/tests/test_misc.py index aca7b5a4f..4db5cff5a 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -1,14 +1,17 @@ #!/usr/bin/env python -import confluent_kafka -from confluent_kafka import Consumer, Producer -from confluent_kafka.admin import AdminClient import json import pytest import os import time import sys +import confluent_kafka +from confluent_kafka import Consumer, Producer +from confluent_kafka.admin import AdminClient + +from tests.common import TestConsumer + def test_version(): print('Using confluent_kafka module version %s (0x%x)' % confluent_kafka.version()) @@ -40,7 +43,7 @@ def error_cb(error_msg): 'error_cb': error_cb } - kc = confluent_kafka.Consumer(**conf) + kc = TestConsumer(conf) kc.subscribe(["test"]) while not seen_error_cb: kc.poll(timeout=0.1) @@ -64,7 +67,7 @@ def stats_cb(stats_json_str): 'stats_cb': stats_cb } - kc = confluent_kafka.Consumer(**conf) + kc = TestConsumer(conf) kc.subscribe(["test"]) while not seen_stats_cb: @@ -137,7 +140,7 @@ def oauth_cb(oauth_config): 'oauth_cb': oauth_cb } - kc = confluent_kafka.Consumer(**conf) + kc = TestConsumer(conf) while not seen_oauth_cb: kc.poll(timeout=0.1) @@ -162,7 +165,7 @@ def oauth_cb(oauth_config): 'oauth_cb': oauth_cb } - kc = confluent_kafka.Consumer(**conf) + kc = TestConsumer(conf) while not seen_oauth_cb: kc.poll(timeout=0.1) @@ -189,7 +192,7 @@ def oauth_cb(oauth_config): 'oauth_cb': oauth_cb } - kc = confluent_kafka.Consumer(**conf) + kc = TestConsumer(conf) while oauth_cb_count < 2: kc.poll(timeout=0.1) @@ -267,7 +270,7 @@ def on_delivery(err, msg): def test_set_sasl_credentials_api(): clients = [ AdminClient({}), - confluent_kafka.Consumer({"group.id": "dummy"}), + TestConsumer({"group.id": "dummy"}), confluent_kafka.Producer({})] for c in clients: diff --git a/tests/trivup/trivup-0.12.6.tar.gz b/tests/trivup/trivup-0.12.6.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..5417120a69a5cb6cf73cb0d5ecdc8f6ace6fd4d0 GIT binary patch literal 32862 zcmV)bK&ihUiwFp%?>J@x|8#O`c6D$qFfK7NE;cT7VR8WMy?cM#xUn$Yf2&V{D#@Xg zO0?u_qE`D`#dZ>J9NVv@>~`z;(V;}j=9(f^lJaF!pU?iz3;+_mNwVWjw=K18ERn!q zFc=I5gTaixrmuZ}V%)yB4Ab)e@SA*E{8RL|)!NvMzvJ_EYrVbx2X_01Z}6E1q2WXG zfB5hGtiNK@(4Jb|_D*}N)!Ny3y`yh!@2u}^Zdd+Le*WV>p>JQ$XU&%0Ue~wRe!Ka< zv$chP+dEq=^=~xi+nd{M_-|)>a~tOW<~lrLTiRgJCh~t>%Kz=H_1BwQ>szn%Rw?=aOXdIJ!S4Q1fB!d_|J$3JMe=`rbA1Qo z|N3ToW2d#XwGQ)ttG%=N2iE$Q^Z&n8{_lFTJKw&z3|VblXX~x@b`$=2-C!S$3wIu{ zebaI;ZO5tf9EVZCfcaKn`PY`IS1PBLX$QbVqj_k1E;C${%>#?sE(^T5Kep(}$aW3? zj!is&8Z_9A9bPier@!7jtV})Ap4ejp0W_H5TWn_eQ#%YTlg)hZ+BV@!cxi<2j|D&+ z$GfrJ3pVy#)5b~xRhn91r&4*zk{AWdn@Ci~o(aWZyrBc30VpCoW8_`qD@g^{3+=Jh zfOmFKVax$o2tsYlHB*>CgJZ`4A?oXS7(h!31tbR0HFFMFEkFX=M>HOa1QT5|z43f% zxgk*&U_qTVpt}d}d^R;g%eM_Dh-pj%LlqVDp+|%F2YuE*emnT7ce>Bu`^o9?j|Y4E zd+g26Z18@c?H-@}e0uQi{eZncKHS?s?X%v|9y~o7oF2USG&qI_)m|TJRx9|Tcl0yc z|J%vwe!tI-Puao8lfwf52GCD?M}vd?euEtx?H+#GJ2-mRU;uy}9S_*y!N-FE6dN2j zu+d7k8asZ=KJK6HzK5T^HwT9YgP#eJw+DkGZ0jvF)?+8V)4{>+r^DVUJNa~aa@^l% z(5=eeL4WtKckpq4Plwi_MYjLr{?UN--}ep=b9;%nC;IqiACT<5Ioz*MOVG=`gVX)p z0d^<;whNR4{0I0GH}pWgHb2ZK+8efIA7cn=9(>F=NZ2=cxE7j}5uCu)7_?>E?9Z_uNL0R)f= zUc>h{pZW(xw1cC;{^{wblfl99Q5|~z6OiS!vfG2IdqmyiBkC$p==k(!1d22u;y2h& z@Au*PDH4<@)k6~WfnvJ@rC0^p0|E||KC+|zcZUb>_K$Y=5u0O#^3y?ozYb${(8mG? zyxE_6(B>!VGL8{oRiUp6$2Mpr*}+@Z+xzhV(H4ba?E43TPl#l@?-?hz{@fG-S>Z zYsPFAu$R5rZ0%(wE@a)Rh1kpgD@8@Q{(trQU!MQ`>(>AB{HI+1qxJvf!@K6e(c9y1 zYX57uX#H2uf40`Qx61YZTYNrRp^epe z*c01=$|oGDDO$@84fkRWt+Nh1zXKpHe%p9p`SnEkLwjtw0Tns!A23jzeDB&a@%Nkl zo_w9Cay+wqBg6*!cR-dYugeZx)0$cEj~iA*wdPyf0a^(IedW~p$K3XOo4EcL%{@OdUpIn4#?oH3g@;)FJ+R4RWubL^3CUXel{Tdu^d`NqxK z)IgYPdJJGl0Q8xAQLntL^)3Gz)uCX1aRJYv!TQTeMWAS-8aZ*STYKbK4K{P2kU?J^ zJT`6KSojBC7=8%7a19$e1VF|OHUUM>y7Bz0%Ea@j%eHC;UPWtI`x&>D6X3Re{REv8&tuFgQ^H0{a6@=+@s z`Q8=4zUHbqV))iF!nqIXCK~QKl?r$wwW4tQt*>%-fr(LA*-@YMJYUw^Rtk>z^<+hQN~c8LJ* z`u*O?!CLgj0L2~Z3kqYUF!P zV06}IFpfrOv9aZc+=swo7nVypH9)OYUcT(_?|wSP6TY8%r$+}z?_R!?`x>AGx-`sP zHg`c&53O-%ndqKiT1^kMZ{Q=(^PRiq7da zPj6t~z>f9@KOLWb=mWDgl(K_HXXo@S+@qVF1#6dEST9YXXqT(iN-!X^2l@W zc`h~=8>~4Ml-31+Kqq*+0$j5x*q3eSZM_A*1HV!^f!z`N-)8gDa%TTaB$#@>B{&ut zy$9SmY6kG$;X^pG7|QTax-b~yF%l7`CQR`Nd6-u-&jq|;Q>tS7{6p{Uhu-j{H+a7` zvfVYBZOzb|+2cSDE&;1wJ(z3A`t14NKMZ?&d*r$BS&+Wj9QzhBUXxd%Ne(|y2nlD6 zP59|s5pOgn1`Y3qz<^qWSFAZ=&)=LLe?Sk4<{|VnKo)|rvAOR{+;999j|!&&h=e&h zkC@&Kcf>I*#{!~6Jw-Tj=uKtF`KJSXo04Z^Qq^T0a=q2K(W3AHN@2A&?2b zy@ggm6m%F))F5!=vy_T?4?MZ+;QuB;edb55Y> z-81I-4bH!T7f~qc3XLtH)L8_oIhi|-7;hBeIM5**oF4r6>123%d_3s>pIDh;HA4|! zL#M1uSYJ?%prl~qSPOlyKdf?ifs>3L~f!!j5KL`d$1vd|ML8&JpU;_zn}eoV1=lzO)kFW{=e03@1)Luwp;6^|Hn7^oK3|6(0RqQ zg0XMUxQc4SKDNsudr+lvW?ftiZFl0GS3=`rIGWpz2_@)ne^|G%mI zU#$PHHcI>dfBE`9yLj0s0zxo)gHI6WcY1(yqY^T{zxIzQ*sB3hEW07nt@qtko;VYz99x&T$&OJcDQF z@DOh}L5XMVEr3POiGXaJWUM#wJ`=*LX^Ly=8oH)xb#4KPLH)VQ&Th|cbwsbGQ9&ql zPG+jx7|;u-udx@*tmha|?FqX*Yqp`*p8(ze)6Odu>VM5OKu${hr7c+4FuNL`LeX-Y`|1dgif2-9N;0^M8>1rvDn5cRFen_4%T`lVw6BV5M@P7H}O;q0D z{wPD#5QFZSCOlHKb6G$#MK{4BwR-A3Dvd@L|B)|*p=&72W&m6n98J2oXsQujlMXJ) zr?GpBCLTxFjU(@yB>d_M-a*yI!z!#NE3D=pl&rJdK`1JJUVqH8UD|(2`%h{ADfK_D z|M8Qa-(vnhEdI;Z#^!n%|E1jjm-7GH?f*;vzf%4epY;Eh@_%zX75@ch|9a{F_f0;} z{xQ{fx>h2A@2qpv=L4Zk&E8Sbgoh0lZB_I6qFHGezvZ8xlGeRSoa z_%34qTUYDqD@{?gx4cz2&$2ju-fnBV{VCR|ya07*h-c0z28BT4Zm4qfcD13@C z?R&FY{Xa)Qmg|4H{+H{2x&CL?|9;>+)j`0L^}oHbm5%=gZ+FV||97(fOYeKW;LEjM zErE7+9T=uJi;D4I6-6b0CpBTOj}-x_184&%vKv{vwQxFvKc+RBUvvll+!9OlGjae& zz~puvH~@mTR1+{oK|FSV*E;vj9bQ>?uyFfUO`G0v4M8mHV$J8pOeV{9*oy$q2Wm#6 zE+G0zi0E>T(8i99e(*)mBp`ngl)!Q(fqmgx<}d=tYVsE^u0B$u2(ueo|>0JC@^uN;nU+RCc z{>Km4e}nn|K>PpJM!U5Cm;3)x{+Ijz^7GB*|8R(Rorl9GG5{=#|Gu@encn|zZxpW$3J6F7&`tLd&1!H&)5@&j|bWE zqyglgv8N3o|BO9J5c$e*i2kfY)@5hpi;R{{d77%7|1Pww(*9H0e@gpLssAVRf4VpE zueAScZ({zZ(*9G*|5E;!^1uB2E95`<=)IxCa?AqpE#?1ud!xNm%Ky^-U&{Y)Xa6Vp zzx`^vRoeec`Ja{lrZscCJ3Y93a{0ft1M^?V|LyHI=z{AT?XA-P|6AGr(e^(wf=jac zqt%c`n$^U8?7Fy*j%MV)N1-s|kg@3W$8RH3&{MQR*9p##`=|Yb<0BSWQv*Z21|2jk z+Y|O&zQF*x{@lH4(4hct=E5eMH^x1bu{rk+&}7Ig2MUcZt??BCkx>Pekb~B(9fSd0 z1CUKb^`he5WMYpojIXe?Qy?R5w?5z9OjkfU;sdZ$$*;O}#sy(!=A^;r_>y!Oz3~;FQHM{=(ygw(FJ3;qkk-2Z#IJ=e6lo zXiaDJ3Wxj*fj)o9jF3IAVa%p_m3_yai{eqCxHB>I@04{_*A^yYLOuv2NsFeFRiV^iqU^h=z*~BB1scJobn~_$Ir&i}3M` z1=|>V(Qe10qkg|rKvQr;|}H#4A>F6Z8Dvc2M8#7 z{l_Fur*qdulz6eKaMIoNrZWs6hS63lf7I6`t$#5v>MsTnW6fI;DEF3Q8-#a`^$`!; z5>E!AOfd7p(flG1CFD zVL|1p+D+a-5n4G@1Sv?;j}tISur!>RH#4)GnuOY)V+>>r0Lukjs4&WXWt49AY= zTDg<>9WoTWMI=90pObj|T;)|8_(VX4XPhZ1bS*rO=IG}t@7INZgt3wZ|3?^l|N3eV2xtUnJV)uf8-64XyKjsbb6>l zMWmt^-Kr#q;g2uvYfI;Ai|+XsYj^_7v0QupMfCliKNzlqV%LnH>fyz&?D3%5DNxW2 zVCz2)PJc!u`(3pwl|RZg*D|{;C&SBsn!LN` zzQdY_tQi0aiFmMvj?H-KMwt(Tz;1t?uqN^ssJF9b903)G$E8n!HWE^3zyE zYsep?HDA^+96-QSyMaTIzs3uJ6|F{z*%8gTIA4g$xGAAR^%YH_vTU8>dZ6&fh_yRq z&F6|R$=*zOf) z%m_5F1d686w|EB~Sg?Ww*{TUakRx)r1`Y$n`<2UWtQOo&srgV z8F$Xabp-wSM8coGHwkahZSt)*cTMsOWVK%ZZJohSj9mz7yhzScc}6-EIRU~W5h0Kn z7qA5KY#xkZk`aV{bUZlN-DebEI`SM8?t}n`d>AP!2PGCUxeue^Fx)iyQ;vpjT^PQ} zJv^l&B&bN1A6(iq^uxn!O@OMzB;a7FP3Yv$-dyxS>8_wcgNbaXY#qelGgiYQGIC$F z{uj!NF*WWedk7k>V#wC**qVi?Q=x|G*szJQ;%-GP04;tNFuW0Ym>Sd#RPK~w%m~RQ z1s_z`L0iAMxluKB*|#+jfU!vkLyb=^=5UIWkBshQVxkdX=@>USY&14B;#Ke-wL7&wE4-HrbnA|Dcafc|RQt-lJ!o>TO9*ihBp>4@f?Q=z;g) z;ApSGESp?St=k#pK=LTWh+~r{al}3n0m|ZqDRE}j7=44GUm^nIg$LU-+N_e3a1clz zSb*Z#Q#%y-nKGTyD-|x|e9sFHy^9OW@9yot`ScFx5}N3VEZze(G4vwF0>Q^!7!5fv zyudEN_Z+Jy%H>zn9pf&y_K6XO(1SyIy|6AeB*Kr1O5L*;<~e&YV=tzx_F`Or;bxE~ zOR5p*sd7vK2 zu0?&)bE+?t6mj`Ha_2yM6uArGtVV87t>?Z=`kv!3%z#HBS}21mYUrXlhqNcY z&g$a&60{xMxnq6Lniz8@+`op6!Fy24oLJ3Dtjl?zcmNT7m;QXhfb}a^1!ZvZpu+lz z!21M#;MN1w`RDT4;1W%#E5O6iIcOb()>!}bN}&JuE8YQKJ*WNd!O`2}G}t^mT`R=B zl77lp0bFQZ%X!|#T&E3|S{o!Q(4Okr4)2x#nP3nW81e<}0y)q+?)hE3nzNs6GOw~w z?LP3{*};;!8GJMKIifi&DT^;E^ADm*s;(eL9#2LBOm-wI(ux{U>J~QuKXllR?DFME z@J4EJDrz8AH?I*=(ezjsyofuJqrGJt%BwF~hu7tMWJ)6mvV!d67Qd=#jKewX?TtA= zV^EiG`eQ+uM~lk;X^#3gF|2g)5vFtwP z0AN}Er}ifLe{St;wzju7+UWnkjmH3`{r{WT|3!GmpeI<(hAPP>6TPzHtxv^cr$OIM|>7jV^{k~$D*T_!+)KW{tkB3TO z)MsGNgb=)oA$*IU1&N35e5%J8*x?mC7{(8v*?H)RxP`h%_HMaoA;-4i-B83dglj?BRv`zYQvZ5jgtPLbc2eri!8&*Pz1bg89Hj(K*a~eIDQdeS&{G zhB>v}ih)rjbW9q4=ed?{PjuJQjj^Lo@Zg9Osg$#&{#WXMzjyr)?|-)2uS@-})c-R2 z-`9i!Tde+LfBR~i4SjQ>@}|0?5uEsg)B z_s9Yn=pP^K?J700Zv@gfrSVLU z7aIs;Kt=qu%b_P!l-9caayOTCEC~cfJEf z$F;~qe=ojc8eKyrFmvwk_Tki+gt%5%bp*r{Lt7IjgPGPaF+oG-1fXM@N%si0tPKE3 zUj!Bclj!^RGs8EgQPK+h?9P23q@{e8DGH$Fqg}F}%z3&gN`>8Efn_mJCSco~t_ek= z&k-&~bQ2-;m_Ns3*Gg2rKc69iK@d}8n7E>&-ZptD6H~LMAgTNfz~<=acmqo}zx@ib z9vvFprVDF;Z;y-eGe`);A^b+i%m^{!6S4Sb6BmT*61DHzOm>oPjR#Oaih_%S^9RTce)gGc4Dgp`ZkK3$Q_%ZQ+G_L*&UrXM!NrFtp2G z46!)0PI+3m0|Gq}5?0vr?`WtyQJ2!3ma&Ke#C;k@M!%^!(gieDoHr^i!?bg}mZ?n* z%1f1$Sox)yMgynCI8ZsUvJU0i*kzc$54 z2y2?I)z8m58|U%WOMYwNN5h$0`@Zk_Ie27|rvyz45q{lZ_`mZM9e%xnmohEW#CFXg zsCYF^Rp^vePL2uG0&$BfS`s_haZOn71N*MKuD@b8urW4V(v)CVOX^?~)j?@VV1NIR zs4byOtpCc+)*I|qgSF1HYm;xFdb3Z|g2G9Z(OeOsAw_6%wdPG-`KY6&1|P4&c8be# zTB|hCgapv&77E%@lG&_s4sX&Mr#m=dXhP^hlda7LQZ9Dy`3EbT*QK0>B(`7mpX~& zO5HZCAcTQY28$jm(9VmhjzFA5!sCd+H%PIW~>O$*~bDG== zrwPRt3k*k&H_8er{03}>QlqAbm|qMdkTv)_1rr4)DNd}^&agr0CUJvFTR(1tIzBwy zKW#889P2s2BEdxfip43#f{(VBT3s&C4XvIP8MG86hbbb4@a?W?GwzJ5`<`P3U9R3Y z;)DMb*JbOg{{8|<9c6aVjk}!!lI&~*DYtx<)3PZ1D%Vt9h`#QZdrjvTKWmjGYm6c= z>-B^VNSm#I8%*LALh7YBNN2Z^YLy0?(qD1dV6+M*Ac{|8Y}GU-%EhCKgXvfZ~%3DM(40=8=^0v1YV8Zk9}Hbx^W! zH2@i5n$$oNua!{lAEtOzkUb_I(Lbx;EusO3nw{fQV}|lBnl6!#j`W<-p+r|>e`cQ( z?Zp$nldv##q?IYAdMBrLvP5K&>O{E4sDeT~G|?jKOHq_o=3y8}!$ci6*$(bGmh2_Y zTIWPgb!_7%)5H1^$hCL8`(gi7k+WWGWMh(kOwuoZ$dT-S2D&#mu$YG0N}@9hCl*+p z6wg;Gh0;m|qc8`WoCDnb%l9>-o@^hj+|WKd@bK zI7kIz(!$8j1AYM<^<#tIq_5TW#9mtv%g||k4{)zkQx~S3t&kPsC7@Q~MOM5kMHA{{ zk6TpG@=InD6ug!(zD%yYQ3WhRBD!|lkf$1h@$3X13rLYI)?Mp1wOnx1e@e1P zHX1GWl~h~n^?Fv?$EyVy;Q@iLmK@G{1C8?C1S{0f^EN2d7;3H5RhUv|?QM!a|}hNnDFWxBPTsF_1K^!kVzSooAISJHBPMyJi)!Ir|k;Q1fZrc*`O5@PaMS z4(dD!GO_J!WCy9PDvfarC_(3llM}0AY<k+^p^uu0mabG&8kd$@qB)gN3%BNiR1J-9N}|U}-6Q8j;sKk_yxOUKU}Q*$X_DtD zoF(Xf=C>M&;A57RMg}1)h6}+?aYG}T1;QMO2wzRGBb^U-B^y3 zoQ8R{iTFdTo=78P5M>J03wP(dOdeRyAFnf*%jF%u>dO?$=Vv)Y+L8~ax8N&lkA_c7iCL5%!H6oKk;lw zOVQgLUDUKg*`E&MW1lZa@~ZZtQDA8(l|=^b{LxFxd|+5}7+^RVYZ%}Dt3@hZa?8DR zyAY$DI^2x<9oJ4&jO-E79HgbQPX-qDMcrfMZ;;7bpC6x^vDbdkEg)24*yQX{kgKQU z48M=f^-gZ~pi@^}=dpJ1Q2EBVtEA-|0$a3k6?DZnv?~O+G+2Th%n5Qq6*GZ4QdG<$GfQ2 zG`y~RaP+PN+O^uAH2;a?^cMk8Yl)1~tlOD2MjseFT31id8#-)GVIuM|uRmarGgGvt zDG?Urwe9KDbK~8S+_7T!0lF964pXcOltrmHpjTz zEN!%3e#8tqX*lG^1>>n1>6NvmsB1rb{M+#4_;k>%R$-!JG;q%gVGnA|#9;$IT|TR{ zW|RD^001oT0{2zweTJFXJ+nh$6bZVUt=HR)Xb4^e=byFJ+DAW}@UDqFenHsi8Tnvg zOP@88MYWZNG<}}3nx3{)=Uoj9uA{(64K}aWD~sjBS+jjkYs6o)O4^pBan}xQnNCok z-&_f25?$62z-ild(kZN7Su!{r z9n{bP=*nmouU4z0_Hcba#)|$HNEdR!Fa#NIU=u1LGGcV`5a~Mg$R>GZVJZS^Y|K-R zBQc;G3tE(>N(v~1p)sKcL6d_I#-RH8wx<^WK--tWX1?u??U~`QDc(_zJmlgRL3Fny zA5P>!DHO3P1B@;yS|owTLiic4JhGL_-nQ1H85&4nr_gaw& zrY)rtdXbX4a85<__0ZD^uI|tA!0`0b;eO|=%`f$y*Yub5VD10uFV{E*Ai)4t*ZYUj zVf$OHc2+uo@C3O-9I2e_G;+4UWCI7~FXd24*M(V)hPwOZzFw6Ff1LgDkN+(98aH>a zkve^|HH2OB+k1hdCBXZTq3{uQ+h2G3JrBz5XM=4vi6g>qCh~W7Aykhs$w+Z6QL6kY4?E_ zVRSF13%!y-%FwfXWQ?!$OCIS?r;9M%r8~+5JbY-G`f(4Qy`g768jfw^3Cga3F9PW; zW$35qn)=|En=6c)Va_}#t=xv|8vUjn6!(a|_Y3|~YFNlb5O=)S0(J`2t<&LecF|#{s zYr+wIDL%zIZaiyc-`NlS<0DWPN1$R|S#FVH#iJ~;05meGBg{2i@w%cB}OeJJph@@nE}G1TPY|r|Lj;tBG-|K9UT7 zNn8E1MehId+0_cq>$&#?q&7s#Tj#4b6I}m;#XIX%CokW?UI;IBvYHj@ zut{6peGY4(+Kb*YfUx*Jg#RIX;4LuD9kN_aDzW*y>Gc%gQ>H_KS``@VAfq9rnZYcHnsqY@sbKcPK{>UGRf8Df1qgZ$MQ^ z`cNsLWFI}esWr!ZKH=O2N#xw2>x&j*?2{}6c4@)5<@r9y4$`_Oe^m0tu{?1=<3{M$ zWa%Uk-(jQan1}h^9+Tn~cZl~yhw@$n3}X6zCE&1JQFyS}r3;W>4T^K`ke2#E)CFAg zY4MIdCh9sURY7Qnb0`oYP%ktSD%3qZez*Ap&zV$1&r`}Dj*+2(i-d^JUIENmbdz_y zHh)=tNYc5?RFfBc#g28!DSDAgm$%+i*H%$mI0lI;FI~j6v%0>b_BrkF5}^eMVOB`l z!RZy=D-!mnln6PU^9V$k7Wu-**^G0=1BM_M;@!}9n!A;(-I`(m@m(IB0lQw4hhspi zcSorpFW+N=CQLa6a~uKl!Zo@eAy@cZU#IA(QF?8P#h@myEHw%yX+0mBH;aIw7DXE} z-pMF-i0U^@lChKHyAoQ z(fJ?wtM_(zaI`;o==>M8`^UQ>$XTHN>uW@0eNl1?H{0y20ZxQ7Z{^>A8IND!io(xe&4=#D&kdQS)_T`>6 z$E@hG(9#=p3H5$Hx6Q@^N52BDB37VobnsxEPOzY3vP(Fe9Th|9m!d=X{MKIpYN0iy zcd7*R(NQYpS(CYtEJ%n17s-J;j9d%&@WCvmG{F?$Y`IG;;?Q8e6cO==Mn;rj?DQDq zjck0W4kZM~PN@7dSqEH zPjc*rwt+S&VklLqK}7@MDEP2i9$+reZtAojgVLZp%*UYuDvprAcTq#z{2tYo4$cZ) zy*&~&)MaPM`#fLnzh89E?Urw;8t`R-w`n9p&|ef!=j)aH3k!LMeLjHta7gy);jo_e zHdVP>=}VHMF?^!qB4H&iCjMjY`SYoM?S&YG_{%*i zCPie693B&R)pI4(GRdT~er!eLxw9!6h+MmpS88I1;ey#x&nub~l?tAskrayJDs%pA zVKKa7xlfKQd_(Il4D!S*0{Ho$2%`5g4YY0 z@CC{aM;MXJZCO0Ts{o>qpoM|~rx`3%R^%W7OD%rZzZXnfEC37NZ>zV6wic=?iaJt+EL2iN49+iek#tFm6lDsrrkq;!{L5o%j0*9+ zvOw2-P&|s2FP^BYS7IM`488Jg(-Hx!#2E5*cs?@?ikgeathC3X$iTUAAXw4;o-D;9 z##>#J{vbE%;z8M#PF;b-+l#LFID zPOZ>Dla-pgM3fy_Uf!8rp^)c-?-eA0di8~+R0qq*^G@p)ZTzap)-r2Tx zURj&1Epuz<_2$Ot^{dy$c=OfHXlJtdy4`*~YE3qc$>uZDo=lonYdoH`M_c39+neV4 zc%!v3+BUXd8?Q#MM%z2)WXsqdO-6?G$}qOto0F|qEpuaQW4t+r$FHo7?IlL<{DG8t z`;HnaIZ*30}K-z5LXt7QI;@;@v8qYG;*r~OPt@&OW7uB@R@j)v&-I>Sy{CgKA6+gAHEC`(d7PAbQ|ThG79fRy*0NDbwR zuLxw5U*;a5QDh5FhX9qbu|X*nup#mF9<#6t2YfXS23-woFbP)PaKv!NcvTROHpv%F z6Zv@b%9-g+4M2*1g08RN#5H<#+@TBlk^+hfDJUV-$8_iAP+S@x@rT+IVJ`c_R-hN%?e=qaDm-*kz{O@J{_piwR9;Z!@lA|Z1 z;T0rDmw8q{?CmC!qvIxw#Kp+sSD)yPipM86EN~ZANc%-yf?Ap$opyS^X_j=pndP5T z`i}d}hvriEX8gKkA}C1}sax&vE0=h)jNdFzmQgq0P_|0n+Fc-Jn{ID4=)pQZSf>Y7 z)>QZ53&WrcqI^75BaSnQ1M)nvolatvG;&lslq^0nzM-HF;=J6f-D1;J80c!s1dH)1M%Taa*4&DoP5&^0gT&O<{Z6nLl>rk1HmJ4!w1 zyx>=5&4d$s?gfJp&vYp-??l(O#ZjzQknJKqaLv|Dye+s9Eze=IwUu`{cNEk>wkr3o zJRGTNM7s9+rM9T46q~QyR=OTFw$Yj!R0{ot`Lej(g&u*1&5-F(j zf0qUBN{}tpDt*&)z!yZVeGn{(seo0SGgnznh@VsJq6|#>ik^BSM`hDAlwm?vrEBs| z@k&Yum!-HaLO&_$m!YX?tKg++%R;^6W{+0l_pT6FBwA#(#?wW0saIABd7O`PuAoN; zn$^1`{Jy@vN2_%RG}|F-%U)12 z0W~hs4C2ca4Pb@qdt}-P=msCK1PaKe5{vR%21Duab1Y?H5J5!-zD%f7eAOLyKBqDQ zuj3%5)o-v&5%TQLq=kKsRg%gD(WQciKjEv==C=4iJO zgOE>aRq<_2V)H%mcQvE!%l9cj@^RiS>w7+cu#6US&=!dp;v^nspNkX}CI7&%{w6gALI*|} z$iiSB;lM z6<83oUVqrgM=qx7-XzgRgiOo>^+f~zAc$3=sZ{Ha8+9bJ$(mRE$>T?yqx7UB&r!=8 z_ERpWl_{MT;WZ`KfDD08Wxfy4o;>Dp))Qh=(cy;SMHtjT_68}5j}k)Ne+}LZQaLZM z5;BWHrXUz_6UKntJp9O<)L_?!Gbf%(9cU#4QnYnLm=vBHkssf6RilY_6Bb`hYC< z|5)GNru$!8+fb;z*@ANG>zl3e{@3r#|6{#fx_*?dAEoO@>H1N+etd=N2fe}IP9jN| zeEr4AQUOR8VsND4DkZ$FE?ZVZWem7;x9K>(w~Hj@8K^uAqG^1KfDHp zQllFZqL}>6!`_*e6B+{HOV~7-WWMDk554hKMMD2200AMs8~(-2@B>T!_Bqx;rSfC% z@L+G)2i6Dn>#{Q~gy(qdiGQ1vLcM!esG6t#Q` z39~`QmnChU#-A=uj%}21VQ$iF(_&oJ`3Y*zmc!9J~*q z%c~pwR$%k(1(+9=TTWq#Yp9%{=XWrw3)HcvJYj7&81=Eal92EDISV%YUAO^ z;=-|~MxDan1+YL2-|Y_+dk8{-hrs^>41L@8(7thFp=F1b1?~r;_E6NfP6*Hxmx z%DC!GG6Ua0l95H9f;K~q*v=(whSvrxQX|LW>piw>)7>PVP2Mubmy$(e8`7IOR~vlG zv91lw5NqI#a=-we{_xX)V2v^6S`?33BvPuVemeC7ht}ItBu0GkbV0zVgW3>F|Rb3E_xtqJH`m&vgOJ&j^y1HCY` zGn&Sb>ksz0j${XzW`lPq?n0jua@-&RU_3DT9Unz>_~BE7fJAh}WVYlXU?{14m7d>t z*a{x6;6vBCQK8sV5I8n5aRLL3T!)l<_%#3h+$tf#sK!xNR+(u93*&@k2>c=F3=;mAWw|t6nYR+>(*EJZnJe5GHy}_T)H%F+YD$ zs@HN-h^f$Ja6nwRbjLHypccQ)%ReU82>3U*UnnSm=@Y@Zxo}JoLj;nhx;&ILOhyy} zwdxuoR;}lFLj;(41?Kb&_{6HMw_5drGM8klt7*HK9l{NpgFCsT&K$8f>y(^*hY8_#_w z6+yQ7H&m8DLB8Rfz5U@o6E(T0^S`yRFsV?5&Murx`kIb~HcCTrX_Z*FdyxFN&l zEf;r)W*!I`Ns_8{P?ZC5clguh#m_(e@XN=+!LP^8$6tTmIQZ4v-T#miGPj0j`rDi0 z05v-G6;bAQVdHW%7#{3(#JB$b?&G}YC^Npi2M(yZ%bMZ|rj zu=6A}%=vT{;2|J-{<%oV?q*$~M|V8@{U@Y4sKTzynH@Lf6ln;CZ~G!xN) zC;%@Yp>?8Nggy_)u6I+zkjK$}qGlR*LAQlR<~*IJBtHvmFaBt$~MtE95*S}m$^?L6Gt_3=8UqWiZV4Zu}iK{v=GR^KjC9RZY5=D>b@H7Po3{(yG|MOyT*sZQ5iB&1@m`|KUq# zvEsJsP>7^exX-|D#o4Kq=C#Ej3bCVqhve|E(s+=^#BgL?ptCeFK$q0?6-g1VW1?pDN%`0Q;y0qpzjz0T)=jfre-&rZIXp0_B&dUQuycg_616jFEWblb;Jf z7=Mw|Gsn#_`YsjfIjUcAk(9oi)se~yk5-Rnhal-l!#%Rnd4;|j2F69SQ zaXd>?O>xImR|lT4iIj1)NY|2p>q}^mV7bQ-ApziN#)Z$e2$?^@a+aBRE1^bk1WvJJ z%K}@DWMra9D<^qs$?s{yC-v>5U5IbIHtQ?x=&10jTe^{9@@0Fe4FA>?S&1YYz&9e7 z%#)J_G-8|)|DJ?twx5n7@<1eO3Cy(u$~8Gxp;qWA>_d7hYZXnG%pGk(leQ;rb*xjRHly0|Cq)WmjbTJoKdO_C2k+a2e>s%JuOr-O+al(Xz7~#P!OqO5suIg8xI|M@4U8v==%)@+JP+N6zvZoqTWb;aH7!iMF#%WYhR#V##p*40kvcsdkEK3Oler5%hH zRmy2EI)o=7xW(c9qCeQPNY*G!1H~5bTDr8JW;uka6K@o;ZgGGJDoE#xsHivSw!T5F zvDAI0MX?{AFzAE+uy_ucmP^n<4xPVxe6AygXag@ zs+b2x;0(Q(#fJRkokK@{(^je__S3d0BQ|V2s9s>fPMkBPW{k$G)#VOER7IU*a3@c^ z^<&$%HnzF3ZQC|BwrwXHYh&BCZQJ@i`QNI0pEuu{p6M4;UENbPr_cFx7+#cG1=yrb zTY6R84!Asme>20*w#PU6`UT=BFVeQFEEcD^yYYi+4>CH6a`A;uO!+_pYYhEPiL0O? zUWL<575s8?uAv7%{wY^rG(+;G=yBaYe=b5l@fYx|e5DFe-a(dPu_U#O#R9*yb_do{zX8bZ6%%$s?Mp?%de3c z0S63AFgS9_ou|F&+AvmjPwHs+%w*JHroH%$e+ZaTD_&ZsjwrO9jGgd&;1^s}$y3 zrc9^J{(}ZCI-7J3-%Puz)q&ddos5QhKyoKW$Yf*d;~h;dvd|tN(iY6?6v#+>jL)y zhz`olXUQ2xBy=s-+iCbrD91gb=9J-iGDiMT}4e?$p zsyYROMNADD7M<_1*@iXFqF`ji4aqQiEkhz!NV-luZSXi1OJow9pAu zH3{4EU>h2Zk$3gK~)KaAMr6Z4n-$o z0km_t7ZL#nM7c+kriFZ{1zMCsv#unbkx%sz-^jLH5(j(xNVy<5*K0xw3L$DyU1UPG zBys@n)#~*p`_SslY5{tEc#sL4Z79Ua>OhqRy&z>FD&(gUr}-xMkBZ8bq93t|wMJfed-@qViGuo=lPo1RS2l>esCt#w?W{{5gL z@2@4}=u|mC#P^RjDp=7?EmdbqJ6K)-lmXeAPkd>{8|f**!0jrD-zxGb52g@=j_;=F z5gYgGSm)^2)bUU*fXfP$0R$pC?i|>+LBsFm>w9GT)&;NyY|$@(DDb%Ld-^2wu|?R; z#ZS=Nusw#9;<~EtS|FvD^D^?)cU$)2*n=w{uo)P3YYwmgF7Eo`$lG>hGR|X-W(Y?s zsdFrj4T)h^FMzy|I{O00*Yr5#DT_6mIXDBy_dk91m08RWU=j>7g{pc2^}!0j4rgud9Asl}FW;NxL#{?);@(5~kU%>O~zaa)bu{qEOZ@!cE+m&?Jyi;Yk~Qb20c>Jp|yZ6 z;e`dlJdu+M@vkvZct>}kL|B|vQ=~v`a$FS%J2*Vs^*iO-_MQm=hd3-*CyTcYU94MV z=}M!8!h<7b8kA+Ro(~s2!xS;`$>{z!z!L>M<&o_`LB$9j}Vz6oUFo)h*9TEu3{N6h(NUwk9F=T#9a@3WvGUA+Of$_LIz3^ zoO#kqQN!VkiV+b)|G1wfF{9a=ia#2L)zPl-W(!B5OAUHnOlP zSqKhT9U9FVoeYLSG##xTRx^1YlKu+4@)wl*8z4#f5P*??Q2>pbus3+wzsXW<&6>o| zeJ~vbbXzA}F66AtraZaQDXZV5&D+(#s#d>XS1~hZ!l8jW!Q{E86gs8g9X{6?-`|vN z<8zFiCMnLFP?XiyTi?9#N3n?H$)H*QN#Gsy)s@N@um8pP?hC8?hG2|Z+!Y4Fw1jq0 zkUCqe(HU8?8C85gXV6`#w0&;=}6 zUtyjIkH44G`JFm*ygZm8lKM!-M-N&bO?6*)>Zc8(9odn`9VJn<@$?}zT@Y*_V8Y6T zL#CuR>!y~C8LcZbfeN#i&`pRnWd#oo$r>>tR~3<9oYkUTy;22DWe~5PD@pSm%5Do;9VOu;=DH7=B5Lk$ zlskZB`sw+)mwQqe1baY3S@XLtXE9YNX#q(>NDUm$X+=beJX{kGsl3D`%N%PtaW_KlDIj~HnG#b@PTS%ONp zHGke&%J+mSky)&h8a3{CIabePr4%ZeHZ);r)bQwuO1OSWWgmM2#)A#M2kKtYDV2a> zCoINJ!l1)!4f${LC}Kr7TMu_FQ`+>ZCp>(Se8npFa8BIJ@QqdHe$v^Vc5m@7IXrTq zCo8Q+mk7qpCI%Xt>^tx z%_2DC`D+Y;vDA>{`smU^o0p!s!bUR=^sAOVkcF@pv$65=Iv#1CLu&~DRy@f zq(p401vYcR{T3RyG15@O7WZ>cMu>i!h2k+uaIlIS6zVOJqbYi;c^<4H>i1!L{_=*KeYL*<0KIbLhev8=;ds?D zoSne-$mCI^*B7~2jlb3AAn`R-aOTZh=eZrkTLfXvxLVz#t8?U~?7o7q8;>rG4|uM6 z$Qstn!Q33*5hlROq+s9#&&=?fW2~!VVO!q~&+WfMgYWbBQlM_k*u9#!n3$^-N%Uao zQOJ}Vi44vl@kT8+qxV@y#w2k}KxMPgl9(YPpdv~B64y1Wi zhocEn4fiFl7(B)&XpDsT7V3&Q5(FHRjgVN_oK+sNe8CW=T3saMu)u77TuUY(xm&rp z*1v(d14weLrQ2qVRc*xzRtc5%tfCXP*mP%pf-8N6O{YypZN1Er3$pzm@v*Y)QRe#O z##fSxL`?-&`xm0e=^<{|ngY#}nDVvIlNCQ!G)%g*Bfyg)eG{EqCCLZg0;fGSqcQA& zNOT3+rF@?hC&{Wq0%;$0hwxY^i+_~bynIK~yBKH%>N#HmO-=OGCzVMhCPP9g z*8*7H)Md@s@9OfNZ^tFG3_JYofNk&G1<3&0&oL^GtxbJ77IOU~e zFW)7`9BoSgCJ6t(lt74_C}SC;`w6`G8uN5DfqtevpS=>U)_u1EXXEK!sXJ!{lbE1h zD&AeG%iI_?P4@(5OL61ayT^y+miw^UQcghheXskjik@OSf?t?%+afZRwdi zyj%7Js5sEJEJz)HRHzZ=FH=6&%PAUVXb{q?xe@e$dZg=A@*%~guzsg>e8zwILxCuz zV3>JwM+|(DefXy*r2Q%s<&(7fKEp;YXOukY%;N<_IxPq4X{k=AzwJ5T4QVx91GU9C za#Cuid9b!mVA&J=J*RlakS}NU6)#l}*6m|0c@cW5RzR9<) z*(UuHByDeYPe#yFi4y_n;PTTDqXc}`-}&@Q0lI^JtE-d4ei{r10zOHvh_!{Uh*#BW*5(IKu|M)zLw7@) zu2{fpZ7kr{eWs(n3VVo0=i~f8Y@I3+AYkV0`PdEjdt+^T^BZ*XoBan_XZjh&{4M_r z(4+X7#|#kv1%N#Q)ZT{?>}akCTSxY(Ro6xVKo|V1c~bt{4B4hW0Qf{9nt5(FeEkGV zU#A+sK49GQ2a$$u(cE)hQp`PAjid1hf+2=`EN9Y<O9oSvq)3je{ln>m});C@4xAHeCu#OkqTqlMwJl7|$gF&cMADVOk z6Hh%qWu`?J&_fTioA~7CZOoSgsO)N6qhA5o-~BoOq}&0dW61fIW6+x z-b?A{!3zL|B+-;I`Ky28qFYx4-&jRP+DhSjTb!`?2y*24+=nOWnrCh%3PkQTOe!tz z5jf7Gv>JFx{|s!j+fy4mn3yNSh%h;Wo50bXpyd;Y0+h_nJ6Y0s}p z2GOaM#2;tKJ^w~{A-4X3&>3laml0jg zi*6&u#2FD@b(f*xTL^1N*26_1LacAsSN0i$&0d;f8HEUU6S%LmDO0UM zkiD+A=Xp&agGX#7jx$W0wcIu1SF9y$_6!?2YPcFhG;|30Uwzrh-%i_$PX@=UxA{d& z7s;%(O_S9im=%0pr0^lK_G0xHH-y3Ha|ma#l(bNd9!>?(GKEt0%rg>}KkzZXzhAYV zK22Zp6~6j;z$Ou2_J6WI1ynxtt*yN){|mhSKuy%w0a73OmVgiUX298e`U~@RNzayz z?XJL;Ut5XlyIB0|KLIlCt;U!?Y;#@0Uccp{~G>#_?)No?Z@+hmw$ls}4`P+s@NRR}v__L;XJ-px?8PbORYm z{JfvPU#p~jhSQc8Ih*Je%r?;rjw{pgaUX7-Oag#j;t-$R8eDld@FrjsM7axht<(k{ zt6i^ulLz*=lO6Yp2>5bi=MoSi9v1cZVLd-TON`e2tVCLwp7qZA7)gyZ)9oML6;MFR zCl@c*D(*M>SFp_k&)fO`k+tOKtXl2Y(eHXEvGlPqF&>x(gb$N3n&t%XeYyG20yvB+ z25&yX2G&+nBv8eQ zEtW5EXm@mocf~Q1-hMc|yk5=q@!TB@Y+uI@_U`cmA58YYiqa>={rHWx6_d3vS{&lPaJMGX_vw;aJ~& zr2*-3hd@@g50!Z)L(9B^^GzI~CT1y;9F*;%HDOK^*Kwk5ILXvJ6oh5aPNJ&mdvWka z(Y<5q;x-3$mP@~o(Q+1>EeZb3yv^yS1-*Eu>H5nSJfM$Q!aV9gzb;zdy6(ZQVr6$> ztCv(iE|<;4J($Z0ffz>)Q;);V_atu|$3T~Q0c1)V>swS{K_FI$oki4{8{r9g!Cgin zUT1LqUn-ReJE&#^AzzCux4=vr*!w6hwEIYlLwF=t!ve3@O}iW=-(63$K&!ZGTZv`I zmlM2g%t|~`*iIKRCC%s&vg#$^&kDFvg+c*H@1CU?n#7kLApXTPraiF`=vcG_4<%h2 z%g^Nwc80Y&R99pexD9h9wVu9KR*#zrdI9~b)609=!k4(KGex83mbLb^#u&Rs%W0BO zP*Z0fE%Sgm4YB|eyc{Ld-{C>cM>K?^V$`x`F-8M+8`Q>0H`(3&sob%0gJLqqa(2jN z3#T42j@wQDl;`{YXbHY#Q?~6n!iTXHE z_ucxPlwZ%{O}mB4Uzr%$PNNt_?8t7xD*TBA&ekEUw=i9BryL0K1hxunM(m=LL2E)} zDGgiDOyar6AP1EXEQ;8~_smV2{vIs#2$G-ms>n=VYC}JYwx5ljG4sX0|9Ev$?LVqZ zMZkq@w|k^`qiz#B1S6EGA&wvhnd?_zQJ{|{%Px237FMDr`Hg^U2;56j=}lG(R=hRj z7<%+bdBK^xV|ms3VtM;L#eaDymvlXoj89UEGO3C7%T?wqr!2=dvGkQcr=t~N{;}Lr zgk8VJb&L;9xtFaGy+5@fLJaJ+>d#o;<99;|T?`J*sNCG?{WVQ*J5@7`AVNVc1MY>z z+a$gPoZc$BmZg!+(hulj_E>OU!;jUBwuFM7I-K8ZuYTB>upzsD9n;HrMsf=L&WlRF zC{ex0HFbt>mRMm03xv})#pAw^{=`i)11+WRdk3O207-JG@LAnfYcXm)HmgfOJERmz zjJHBERqIzee@`j0uz!J4YQNVPZvrS~X77JV&Uel1@3k%mT9>zxC)gNLC+M<_I<(N= zo~;f2*&f3^`?_eZ=ITU>Fd8uxsHNnJsM^x7W#Ai-u@F=lu0KrLA2@56(i}w7a1`C_ zJPBw6893Yyghx{hb+uy@u&di)K=+ucnTZ8aADSM7y`vgC;s zPnIN-7hoTCEz*hlSTMTr4(N8hbwMohWtIT+SK=z1TZ> z!sL07Iix5;A7C`AB;9 zsQeR;y6Y4FfS9k>`Us2yiWWs}-{)LCpCfpQFLOcKG*fWvi*Wgu#+Hx0H_Sx;7jWyG zWSaIt4a%<0tXewDT)zTblZd&98`nn2fMJG$?B5N1t5=$(pF;YAOUZ217}1QsC5`o^ z{$QJmOPVII%&DIf)e{18D&6|-v5oW+GZg;4z3JkCY&wP}<|O=x9Ku~Ll(JildtnN! zZ9MaENhBS{WmixTcAmR{@{WwB(|$eDKg8_69jK6l&lX2x%KqTmb0>L#Hf)`<)`F;D zTfeny=)#x78^;LM;7dQU28rQQ3h~b2TskvHOr&`~t3v=ZT3E0wCsPtiTvWwAr|T92 z)lido%sH}HvG_F7o-yrmKn^FPbz4(=z$06qq`m&BxaI!ytoO}JBku3wUwAbXp$!Gi zQ_ua7yAIv{C*JKz9@}oy{*YH0TqYe@KOkGdv@dn)5#TC_i79olc0|{L!(>!lqg5v} zfecYK8KYw7{``r+{IW`5c%L$xMeec?OdQFvH#Hm0(>&OwvWaoSfW%(g)(u{91b60Y z+a{Rs5B$;@-vl>5AYvvhS^7}g&QMxe6lwnGC2;medhrP>2Nj&tuz4IpV7L&+~kEkmI6A0i56;r#=t7r zw+?mHZH%95vL=^fHgwry0%G2#7~)+bDqJ2_pyQdW>@Y|@z9x}}rM%I0aK8CelDZ_q z;_c14IheK2`-gQd7c5^IC~{PKpr*J*39)WJb~Jfec1cC1A>uCxY+lp!83Nx+N+IQy zv&Qy%_1_#6k>nT|e;%f?2)A4z18g#5GjRhE7)E|9v~*=@TCi;E(?tsX=M#IvR7A!8 z=#pr85E*iHzv;DdBxj3LtLL`*%}9xc=8`$!WHGZoGamW3Z!`@XqYq$BXdg5@5vb39 zVsp^U!+e53h;3WoACf8?D&*Bkm5?*IHV~{CGbUp-se~3K;mxa9nOzZ$50I)c^&ZAc zns!UbR8Z#376gjK0-epln== zimH+6=y&M#eGWFx5?+fiTpDVYX+2A}a}m$)q0dcqg3oWM>F2{1pB^5-Hf~(Tb6i&k zz(Z$;Q0piABF5|8v@qG8M;@V`{?NaK65VAl)}7oq*!7<aM=u$uFi(&!O1=4QtPBWbQZK?-Fsno!U-*^_$yrGP_GL@QeOBWcjx}-8P8GS$t&_s*ae* zms1{A2!z3qxY1Vui8648K*jhpMg1>Kd+WsJYpQRfw2wl-s#6_rAF=(NhuptM!e%D(mSxe1o|3?Gt%Q$3s&&b-pxxGB)25DtPc zykd9@NAI18FX7L_S{u>`%=PE##)Yr#y%B~N?j1hyEr6h9;3MD^$aaJ(bg8R z4-)r4)lw&(s!d5EsfT^gClp=_Arw5D9g2=Vx%MjD_#LX!4_eEc1JGX~X}$^`~}VtG+W z7@9Sv8HZCbTm@|*P)*F)^32@jyneCCO+?@_t^Q%d;oiey-L*2=TBI0d7~$41y#9YR zHc(Jy>%>eXW%<&ABDo{ErUl9mY*x^9+?ToKv)m*|(%J)UIHWPcWmNF|b zQina8Zbs2MrJ`mLmE)e`)vM(0U7V{HxwUx;eeb;oRl7u@IPSQ=@M-D}@ zzrMd#>6Mr*h#V)A&(d~5o5L99y!sG-rNCN{voLciq8?KG$S^=D*}WqdcPFoqTN;7U z6=?d}uT1(ww<^gkB^RTOV|zgGjd~@0=HH@3wDV71nyKjb#EBb_-5^s*94*YXWHNAS z5BNOtt(G92GR3R;0o?9&v2$x!)7H`>?w|ki#l1JxfeX=Cuw}7thHer1_onoVkjteE z0f!8Z)ZY`h*0XxkeMdtTSyVB#EF z1e;1fZN7g#W9WXU+Gm)@9@HEV*k~>Bl2V73EwcC0LU)%q>VJyZG)PjVR07zVm**>5ObeuL7(sJSTSVO5o<&11c*0KyE_8*H_LB$ zP^;uv;(k|@;f_L#Q)!Gs6b$~XDQHN3COPef;_pZv1vOl{z`nUq$;0fxA@djbxTL5# zp(vZ1JAVpUQobSqsMtKJ9loGgrm^73!E027YK6rSZS{#a$#0LWcz=TrJ#iIqur0t0 zcgy02)UfZWGc0L0^z#nx#SaZ30a4D^ zLWO6sSzdb9&DBaj5GJA*#Gn-m)&G@pHLmYj_64}o^>*G6OYG@+`-N1hLSzh6sHYa& zv>8P@(a}zoIN|hMBUfsq2u4Lr0!$C2b=>y3W#4|X#De7?PuIcTVW7ke4R;)pn|qym zt#5xBjT|Wx^ru#&Yh}sx7WuX}2OK6svZ5&7qW2?D9Otsm}d@!lB#qiPP_VufLrz}!ylM^_Q zmYAfrdnV)B>3PX3o9(7AWTjT1L2+GQGcJfcFsgfxw)oi149gHtrz4+Dv;(U5XL@pG^y&2J4+OlHyvq zZuL&;j+hKIj_#--kbn;*Vi!;gl8Xtl;3LI)Uz=5$xfIfw60e@YTS4EyTsdrKuVav6 zcm^HuCy7v%Wh@N!z`!6TSH41GI31Kf9erI`#H^2UxUOUm27yn(N9B@{{<$mG{AZ)@c?{+s5$?u; zu}(xm)HJp}`fpWX=m|O-p;E-3ct-&?+Rry`#NO1|DWc}qAwT;cAa>vF&c>%7-FHU} z;G2~Th~o&rvnfz_54a-0!*6`O0ffKa*nan50J{FCyY}bH)PFZh)xPbI0M8F$NdR!) zPfW^hJ2C(vv!>rlGQd3!;n`jLttSQW{|lh(EpMDld4I!%`NUGqV6AS5>bf%uV}eWD z>~q@+J?tLAM_@P2p2G8SUvO*m_{{_(#$FFkS^H@Sl^ibee)-L2C_v=ysFr7XhH&wnI4w-I`lQV zO!g+RT&B34RtwPCBAF>Ja(;FtB5Qkd;7jP>8IUKBz8}Mg|Ts_*k(w^0u-6q3uK&e?{9Op6zqY5vxtZm=3CmnZjI=Fu8m8O|^&L z6J#-Vp4UdMJ;wn#4lNJPgVprEAMRxewgn0EM~4OrZ0XgyH*qu;DkXqe?RwpHldi(l z>XWa8Lrk})rsobgM7JbwGwlX(#U(9;@nLpn(^E*jWigJ9p*G^v45JV1inT7J&GVI! z_YP3+;zHua3s7)y4fB=-@^yQ89f|Ebz_mhEm2AmjBl6J#(|Spvu5XNo5zNfYr9_h`=qElFY+PIn4bYI#1#?_ukWnGiFBDwq zB23SF;BrGDp2HxwAv0lt?HKAiK8NC~gG)$bEs(&Dm(i#efw-1MRA(FTgvPNZGU-HH zDlZ8mt4w|CT{O=LoBSHkU*i~mCi~l$M+M#Cu#pLMpn16ZXV~@DVCM*DIt+9U8CBvp zg*q!U_9~Pqxt{uEbnn^nM49=pMb5MdY47ko)PXLA9%ktaUvZsbr*%_ykx#6|2TH*< z2NN>bV8Ccn%ZSfWq&@8AzdVfFLs0GQ8_MK{e2FJnCMet7NFk- zV7Qd#1RDc=Hh)fQ;X=SJiWtZ8-hDtmahfi`eE-XpeW)t>j5Ru`?%fxKT-NJ+H@mc} z6)zAiqPN-QJE#FBDNKUn-FHStoXr1#5->c5MG{hE1rq{JxkM#Z-WH|86@W8H^!l*$ zbTE2Qu9tvE^g~zy?dREZ@5U`xlBIEL0C~tc41RD@FoRV7IHQQt1P-`_`!Z{3$Cx=yLZkLSL*5_mV(qr&5Y;AE>Agxkl0h&mHGg+XeVOjNfj2(aYt7V z+Q4*)q4G+BSV`m%_a%@u=EjE|!lQ{^KDsJK1vPNlgKOEk&x{^XNO23=$5I!e z3_i*M7u__^#pwqV2> zG2ZBL!(j|8Z)*odVsI23Goc!V;_+z6qa8{&-sb#c13etn zdYRoLSsZ)yVV8>l8d07mzAK_&T!&fJlz^Ezujm|f&JX!B8wL&rhu<@+9vn>3!&(X{ zuY=wIWAbi|69tV{m?iH^3JzLSnfW=%tqhALbb=kV@X}+(^e?y3_0N~oGDF~wa;lr< zJiaUFYbVOC4ui*ov;1ZE)p$H3BI|;F&A5c?!)t!Iv^@7Dq^=vvE@mEKi z{>rrwxf-T69f&i0qj)A^01_Bp#2*{5K!gbe!f3whOwDQ6pd=4DTV2UTLK+0znH;g z>n>>457QyAFrpjYiJ&}`nFV@+_xvBXDoGrIW;u{OL5*rjTqrHmkc9(uSMJO?So5do z@}dPe&74R}1rty0ji40Hqgn$dM@czu*dPFm@QGow4x)iH9_U_BsEHX6FukJ|Z7Ir| z>ig^h=g>L{+vJFpeW#{3nf*<(i`Z;#um;9bQw$-VQPVhT)C-|Y z9JYQYMd-cwK3)wL)6%txuQxQ{v!aT;Iv_bnSS^J6@h^6xLb?IU0W!da2-OcYNZwq*yL;J5YgQe9kD8ipQ|=Rsgy*zAu1?-iqCTKAIgl(LYP55#HlKF=lYHNnS$*J!@LSMa`nTsImlUSRLEPW_lde zHC8I!sek%v_?Xn~_GA&MrkcR9tfasMd8LR1)#$lC4vZxlZazKDO+MWo&6|h=0(qp&)EAzcctgH;79p)UQ7v zuX6iBWs*m%LogIAHgr#1cy#IJii#|l#M){?yURUTnw4v1%}hqZq`LL7?)Qa3u?71> zIDK9%h`nj-=eacKHov~wX~#~ucL6R#wQ02@r6-g(@Aub7Lw5`Z6Ylo%df!?Y0r$HQ zi4!iw9E6Xn#JLdxyOtm-&^k369qV0gDiAlE^<;Dj`?_>)?!QwYB2==QIQ-m)J)+T@ z=3Xl`Qd#=qbn4Crp-qWiDPpFZ{#0DYH85OL)b285g97XZqZ98KQQ^JcPB8A&d2Mr?jNJ!kB|7Lc}l!-P2jt zVq-!0dRr@w%oI<&W99U}sM4ZbXy$+ykm)^u-dQ~=IcYiM-XA!}1jY@6^AhdS2pWq^ zNycZVX049z3pQ?6_3zMs6sW+dVWeu<65fQ}z!$T?*g?efly0n3e!!1!{*g{Vk#Po6 z=Sq2(t@bkiwKpy(RZr8&nS&A1+bhp5>D3YGh+2hj5FQ0hlBjq()=~-9fabqHp}J~E zK|huPt`XPPWHg+j$AsXa!js*@MCPB#s;;K9b->vCuzGK!e#xB#xWpWtu@(o4KuOBB zTxSn`3c~76q0x>aGg{9&M&Q^OKN-gHBt^uNHPqk1g#_bm37NX`42+86ap_G&5pRF3 z`W~}R2(xT(NeK=Zurd98p{$I{1(~E`nk7jst8Z*YMO|N$E6bgb4&7%62<$nHGl(Tf zlp;Fo%QvqCK^X|5J2Ir{~EaZ8aCQr0k{&w29 zHUTFk1XJH(J=O$GH0|1hA_Y#T9m_of{eug-YQqV-{{>UR^1XhjU9TtC6%gYPgmXcm z+X=R!AyfSY1t>KqM4Rx80Q&c9#7@hL@h~TSQ@o00-|f#%9~ct^gpPfBlit%wbcl*l zuKKJ?@F;cIsF**D^O-2mu@Ju=3uZ-lGqs>GPW_@^>)ixBGl+Q8V?BpFws9*e@v{0% zZkZlAj;j3u%W=Fce4}PHi?km*mFm?*sB83Xo**aCN0JZDh#J2*v`=Ky8E}dXysI=s zC^y8$eJfJUW0Ui`fp*$cej#Z|o^$^{Ut;+4F?9}4;`vCG9)8}W-FXlYHD?yCg>MrM ztObm2bKmI~=J;RI(+Qnycx2x0@0PA=sg;MWmoQjPqzi(j`yu(5yB4|hLSaVzYw+H_ z$gNJ=SjwjJLd463iosQ~ZRzdsvA?rj6&){hn^T#fspor9BWw5>-EHBB$UzazLQ*^S z_#R^U%(DV7aS9yLCH|p^>?lM9NbBZ}Xo0}#M{t#&Sh-{_z~;vE35Z$usnvvarC1|F z@ReOP)6Tf%<7$6O{eB$BF27B#xlDH2!n|n1Gyr$}hXTv6JFC0IkhG|$YB}N}ok(?5a1Bg)kZ*eZ!-1_4APpmct z3O?AAI}-Xu}0egVZjY&t|HRNibTHkXwyvX$IKa9GWza~+l&+9*4-J19T5i@cQ7D86zSy?T=BstIH9m~qfFgB_LD z1a*&kgQqrs+1JrcKf8)ysWJYh;E)!x2{La?7=h=cE7lWIZjn1*eDQr1`qwbUcDuq! zeFZ_vcOZKno|KSH#`5kQO3-W+T9sCbHl27mcs%al;QroKEz=gwLa?~Fh(Xz`uWY@L zNPrjNsJY_puc7Dr9r!$kZRJvCf953WLGb>Z1p`py}p7dcM`Y}yZ40i7fPv73b4h`8IyFQFk@(+4p-%6 z!E`4PIW`~~ojjP8obgFd$x*Y$&M5W;BhAqR6$!+sl?UUXrB0A>;HMF*RA}WC)3Sfm zfvJ_OX_W9^^F4>8>vtl?5|)RSP^O{R)XZtTRyj?`vk|*iUUi#@aRjS@5}h?1)0+0u zbLQP52`H+lbN@LvXD&dzmlZ$QPEUPwGuA=4PSAS_ocU~DjkR1L2bm-#nN|go<)bcX zuu27@LG%%)7#}W&KLNSA;IE&uz-fQIN;2U*ciGP-W`NdGL=}^bbyMRn{2A z4@CCHcLFJ8O&+`?=u2&es;D*KzbOFWYp}__#(q&EUTe}!Xs`ZCe{btVuT+mgLg+C}E5Hl(XHGFFjWzuvNW+6!Fxp z-W1-qO=vU_{C(!|K%R7?4oMJosung~GcV+lmR{OwZicB^n|P|wO#8F+{mGgK->2LU zq9!W-sQ1{-Ng$H88dj$9sE>YshplXlXZ)Vdl~h4F@LXF3%g-k zptLUBlvQtjo2yw@6(M_oP`1dZ!>ffysjw58O}f2DUff<{dt7p(EAX#SNu4(={8iau z5Bs+tEDcF9JSGHn6Ds;@3+j*i71Fs&5e2}8P8ySM9PgLh)}|;V8^W(ROlHDa4Kwk< zB}RH5Z+(R)h;3n0LRwQ!Vl1Xs(-zz__jigZyV8jPqXg+XfnLllOeKVjPX=-RC+ zq0nLD=+f|K@_i+)f*19u^S_Onk$L!TI3oe$UeTP}dke#V<~b(}>VD1F<5-B0M8{*x zb6u+Y1BLs8N5qFw)=mnest`;s?5ijTa2lhl777$~pZOtMfxgIV165Iic?SAHJAu2H zsMy8eO-{Mqe`B>m?z2PvH5AaHBQ8$O6Mg0dx$JGmHs_>^6IH7*#FC*gvSh;$nPF|S z6a@Df#@=r2+@CgGq1VZts*!q1!10=-2}av(ig4d_|f2l%0W z0tm^!0dv=T1`dsY8}YuqsG_c-AWT~fe?P@f08x9_R&0`H`-95`;PkeR8h}~zwiSfg zaoJOZ3CREi@UyoE>~s!b0D7hX^Y;MzWPs=cfE^$`0pJ4oJxu5~%g^ZR`%HZ@*fb%! zK?BS`2Dc6T92SrL-Z1j?i;O_Fr#Ac!{-p`qGpt@^2VwS%lou3TJCK~|{LBhyWz8=4 zOJ81KgQ1;Npi|OKn0f5%lPxpU(c{G068C&oIWULLXc&NXdw6qNlJ@eXt|INj-t@Gs z&Ste;pk@1NgoqWkP<&2WFU8FYlbmzlKML)(J{SUYiJ zcpaw~5uarq*?uUzmcruG@R4SH7Te4wM=u{IU(fjj&FdEZAx~hnrJmZt&e5xj9s!w# zr+SI23*M=j*O#zD(V@@vyK$nRXM%dDhA2`F6gNWPt2;UuJN~n6njWKjB&m#AOWm9jjP~%9y!a zJ>e6p?!hmaDAsLUntrKrhL?k5M7x!AKkveqJ2Lj?nl>4lc+|5&BJIHQkv}u3g9mA% zIcVUMOZuiLdd$TH?$a)LOGGmhfFg(Q&OjmayDNf1Gg&vgo7*k9HG7-s?upLp+00r! zz~{R4UBD7%L}r?vt7L;I9^cmm=g{3XgAcFu z-$1PY-Cp-w|LWNSZ2VUyJ_MH1dosVw8CMzB{PZrHJ^&nVWl5y2DvP=BO&@+8fR+!x z56c(70jlq(XFy*<7l07|y7>dp;tjY8v=_K4{c6m6UhrL)|FHbO#&?DQ0FmwSfEI`d H6zKl}VPD7A literal 0 HcmV?d00001 diff --git a/tools/source-package-verification.sh b/tools/source-package-verification.sh index eb7506061..58ccefddc 100755 --- a/tools/source-package-verification.sh +++ b/tools/source-package-verification.sh @@ -18,8 +18,10 @@ export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$PWD/$lib_dir" python setup.py build && python setup.py install if [[ $OS_NAME == linux && $ARCH == x64 ]]; then - flake8 --exclude ./_venv,*_pb2.py - make docs + if [[ -z $TEST_CONSUMER_GROUP_PROTOCOL ]]; then + flake8 --exclude ./_venv,*_pb2.py + make docs + fi python -m pytest --timeout 1200 --ignore=dest else python -m pytest --timeout 1200 --ignore=dest --ignore=tests/integration From 7c8abdf7d82a1da969e0da435fce360608ea8028 Mon Sep 17 00:00:00 2001 From: Pranav Rathi <4427674+pranavrth@users.noreply.github.com> Date: Sat, 5 Oct 2024 01:22:25 +0530 Subject: [PATCH 2/5] Fixing style check from master --- .../schema_registry/schema_registry_client.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/confluent_kafka/schema_registry/schema_registry_client.py b/src/confluent_kafka/schema_registry/schema_registry_client.py index ae6c55116..021e06427 100644 --- a/src/confluent_kafka/schema_registry/schema_registry_client.py +++ b/src/confluent_kafka/schema_registry/schema_registry_client.py @@ -222,8 +222,7 @@ def remove_by_subject(self, subject_name): Args: subject_name (str): Subject name the schema is registered under. """ - - + with self.lock: if subject_name in self.subject_schemas: for schema in self.subject_schemas[subject_name]: @@ -234,7 +233,6 @@ def remove_by_subject(self, subject_name): del self.subject_schemas[subject_name] - def get_schema(self, schema_id): """ Get the schema instance associated with schema_id from the cache. @@ -566,9 +564,9 @@ def delete_subject(self, subject_name, permanent=False): if permanent: self._rest_client.delete('subjects/{}?permanent=true' .format(_urlencode(subject_name))) - + self._cache.remove_by_subject(subject_name) - + return list def get_latest_version(self, subject_name): From 00c362dd30bcc70bb8f0dd7b450bda3eaed3bdc0 Mon Sep 17 00:00:00 2001 From: mahajanadhitya <115617755+mahajanadhitya@users.noreply.github.com> Date: Fri, 9 Aug 2024 18:25:19 +0530 Subject: [PATCH 3/5] feature/ListGroupsAPI KIP 848 (#2) * rebase commit * cherry-picked --- examples/adminapi.py | 63 ++++++++++++++++--- src/confluent_kafka/__init__.py | 2 +- src/confluent_kafka/_model/__init__.py | 18 ++++++ src/confluent_kafka/admin/__init__.py | 14 ++++- src/confluent_kafka/admin/_group.py | 15 ++++- src/confluent_kafka/src/Admin.c | 56 +++++++++++++++-- src/confluent_kafka/src/AdminTypes.c | 9 ++- .../admin/test_basic_operations.py | 14 ++++- tests/test_Admin.py | 14 ++++- tests/test_log.py | 2 +- 10 files changed, 185 insertions(+), 22 deletions(-) diff --git a/examples/adminapi.py b/examples/adminapi.py index 8442d6453..880496269 100755 --- a/examples/adminapi.py +++ b/examples/adminapi.py @@ -18,8 +18,8 @@ # Example use of AdminClient operations. from confluent_kafka import (KafkaException, ConsumerGroupTopicPartitions, - TopicPartition, ConsumerGroupState, TopicCollection, - IsolationLevel) + TopicPartition, ConsumerGroupState, + TopicCollection, IsolationLevel) from confluent_kafka.admin import (AdminClient, NewTopic, NewPartitions, ConfigResource, ConfigEntry, ConfigSource, AclBinding, AclBindingFilter, ResourceType, ResourcePatternType, @@ -27,6 +27,8 @@ ScramMechanism, ScramCredentialInfo, UserScramCredentialUpsertion, UserScramCredentialDeletion, OffsetSpec) +from confluent_kafka._model import ConsumerGroupType + import sys import threading import logging @@ -471,18 +473,64 @@ def example_list(a, args): print("id {} client_id: {} client_host: {}".format(m.id, m.client_id, m.client_host)) +def getConsumerGroupState(state_string): + if state_string == "STABLE": + return ConsumerGroupState.STABLE + elif state_string == "DEAD": + return ConsumerGroupState.DEAD + elif state_string == "PREPARING_REBALANCING": + return ConsumerGroupState.PREPARING_REBALANCING + elif state_string == "COMPLETING_REBALANCING": + return ConsumerGroupState.COMPLETING_REBALANCING + elif state_string == "EMPTY": + return ConsumerGroupState.EMPTY + return ConsumerGroupState.UNKNOWN + + +def getConsumerGroupType(type_string): + if type_string == "CONSUMER": + return ConsumerGroupType.CONSUMER + elif type_string == "CLASSIC": + return ConsumerGroupType.CLASSIC + return ConsumerGroupType.UNKNOWN + + def example_list_consumer_groups(a, args): """ List Consumer Groups """ - states = {ConsumerGroupState[state] for state in args} - future = a.list_consumer_groups(request_timeout=10, states=states) + states = set() + group_types = set() + if len(args) > 0: + isType = False + isState = False + for i in range(0, len(args)): + if (args[i] == "-states"): + if (isState): + raise Exception("Invalid Arguments\n Usage: list_consumer_groups [-states ..] " + + "[-types ..]") + isState = True + elif (args[i] == "-types"): + if (isType): + raise Exception("Invalid Arguments\n Usage: list_consumer_groups [-states ..] " + + "[-types ..]") + isType = True + else: + if (isType): + group_types.add(getConsumerGroupType(args[i])) + elif (isState): + states.add(getConsumerGroupState(args[i])) + else: + raise Exception("Invalid Arguments\n Usage: list_consumer_groups [-states ..] " + + "[-types ..]") + + future = a.list_consumer_groups(request_timeout=10, states=states, group_types=group_types) try: list_consumer_groups_result = future.result() print("{} consumer groups".format(len(list_consumer_groups_result.valid))) for valid in list_consumer_groups_result.valid: - print(" id: {} is_simple: {} state: {}".format( - valid.group_id, valid.is_simple_consumer_group, valid.state)) + print(" id: {} is_simple: {} state: {} group_type: {}".format( + valid.group_id, valid.is_simple_consumer_group, valid.state, valid.group_type)) print("{} errors".format(len(list_consumer_groups_result.errors))) for error in list_consumer_groups_result.errors: print(" error: {}".format(error)) @@ -900,7 +948,8 @@ def example_delete_records(a, args): sys.stderr.write(' delete_acls ' + ' ..\n') sys.stderr.write(' list []\n') - sys.stderr.write(' list_consumer_groups [ ..]\n') + sys.stderr.write(' list_consumer_groups [-states ..] ' + + '[-types ..]\n') sys.stderr.write(' describe_consumer_groups ..\n') sys.stderr.write(' describe_topics ..\n') sys.stderr.write(' describe_cluster \n') diff --git a/src/confluent_kafka/__init__.py b/src/confluent_kafka/__init__.py index 6a24d6e9e..4d908b822 100644 --- a/src/confluent_kafka/__init__.py +++ b/src/confluent_kafka/__init__.py @@ -48,7 +48,7 @@ 'Producer', 'DeserializingConsumer', 'SerializingProducer', 'TIMESTAMP_CREATE_TIME', 'TIMESTAMP_LOG_APPEND_TIME', 'TIMESTAMP_NOT_AVAILABLE', 'TopicPartition', 'Node', - 'ConsumerGroupTopicPartitions', 'ConsumerGroupState', 'Uuid', + 'ConsumerGroupTopicPartitions', 'ConsumerGroupState', 'ConsumerGroupType', 'Uuid', 'IsolationLevel', 'TopicCollection', 'TopicPartitionInfo'] __version__ = version()[0] diff --git a/src/confluent_kafka/_model/__init__.py b/src/confluent_kafka/_model/__init__.py index 1c2ec89f0..792033622 100644 --- a/src/confluent_kafka/_model/__init__.py +++ b/src/confluent_kafka/_model/__init__.py @@ -95,6 +95,24 @@ def __lt__(self, other): return self.value < other.value +class ConsumerGroupType(Enum): + """ + Enumerates the different types of Consumer Group Type. + + """ + #: Type is not known or not set + UNKNOWN = cimpl.CONSUMER_GROUP_TYPE_UNKNOWN + #: Consumer Type + CONSUMER = cimpl.CONSUMER_GROUP_TYPE_CONSUMER + #: Classic Type + CLASSIC = cimpl.CONSUMER_GROUP_TYPE_CLASSIC + + def __lt__(self, other): + if self.__class__ != other.__class__: + return NotImplemented + return self.value < other.value + + class TopicCollection: """ Represents collection of topics in the form of different identifiers diff --git a/src/confluent_kafka/admin/__init__.py b/src/confluent_kafka/admin/__init__.py index 9101b651f..12ac85dba 100644 --- a/src/confluent_kafka/admin/__init__.py +++ b/src/confluent_kafka/admin/__init__.py @@ -56,7 +56,7 @@ from ._records import DeletedRecords # noqa: F401 -from .._model import TopicCollection as _TopicCollection +from .._model import TopicCollection as _TopicCollection, ConsumerGroupType as _ConsumerGroupType from ..cimpl import (KafkaException, # noqa: F401 KafkaError, @@ -881,6 +881,8 @@ def list_consumer_groups(self, **kwargs): on broker, and response. Default: `socket.timeout.ms/1000.0` :param set(ConsumerGroupState) states: only list consumer groups which are currently in these states. + :param set(ConsumerGroupType) group_types: only list consumer groups which are currently of + these types. :returns: a future. Result method of the future returns :class:`ListConsumerGroupsResult`. @@ -900,6 +902,16 @@ def list_consumer_groups(self, **kwargs): raise TypeError("All elements of states must be of type ConsumerGroupState") kwargs["states_int"] = [state.value for state in states] kwargs.pop("states") + if "group_types" in kwargs: + group_types = kwargs["group_types"] + if group_types is not None: + if not isinstance(group_types, set): + raise TypeError("'group_types' must be a set") + for group_type in group_types: + if not isinstance(group_type, _ConsumerGroupType): + raise TypeError("All elements of group_types must be of type ConsumerGroupType") + kwargs["group_types_int"] = [group_type.value for group_type in group_types] + kwargs.pop("group_types") f, _ = AdminClient._make_futures([], None, AdminClient._make_list_consumer_groups_result) diff --git a/src/confluent_kafka/admin/_group.py b/src/confluent_kafka/admin/_group.py index 82ab98f1d..06a97c91a 100644 --- a/src/confluent_kafka/admin/_group.py +++ b/src/confluent_kafka/admin/_group.py @@ -14,7 +14,7 @@ from .._util import ConversionUtil -from .._model import ConsumerGroupState +from .._model import ConsumerGroupState, ConsumerGroupType from ._acl import AclOperation @@ -31,13 +31,17 @@ class ConsumerGroupListing: Whether a consumer group is simple or not. state : ConsumerGroupState Current state of the consumer group. + group_type : ConsumerGroupType + Current type of the consumer group. """ - def __init__(self, group_id, is_simple_consumer_group, state=None): + def __init__(self, group_id, is_simple_consumer_group, state=None, group_type=None): self.group_id = group_id self.is_simple_consumer_group = is_simple_consumer_group if state is not None: self.state = ConversionUtil.convert_to_enum(state, ConsumerGroupState) + if group_type is not None: + self.group_type = ConversionUtil.convert_to_enum(group_type, ConsumerGroupType) class ListConsumerGroupsResult: @@ -119,6 +123,8 @@ class ConsumerGroupDescription: Partition assignor. state : ConsumerGroupState Current state of the consumer group. + group_type : ConsumerGroupType + Current type of the consumer group. coordinator: Node Consumer group coordinator. authorized_operations: list(AclOperation) @@ -126,7 +132,7 @@ class ConsumerGroupDescription: """ def __init__(self, group_id, is_simple_consumer_group, members, partition_assignor, state, - coordinator, authorized_operations=None): + coordinator, authorized_operations=None, group_type=None): self.group_id = group_id self.is_simple_consumer_group = is_simple_consumer_group self.members = members @@ -139,4 +145,7 @@ def __init__(self, group_id, is_simple_consumer_group, members, partition_assign self.partition_assignor = partition_assignor if state is not None: self.state = ConversionUtil.convert_to_enum(state, ConsumerGroupState) + if group_type is not None: + self.group_type = ConversionUtil.convert_to_enum(group_type, ConsumerGroupType) + self.coordinator = coordinator diff --git a/src/confluent_kafka/src/Admin.c b/src/confluent_kafka/src/Admin.c index c58166d6e..fe572bb9f 100644 --- a/src/confluent_kafka/src/Admin.c +++ b/src/confluent_kafka/src/Admin.c @@ -82,6 +82,8 @@ struct Admin_options { rd_kafka_IsolationLevel_t isolation_level; rd_kafka_consumer_group_state_t* states; int states_cnt; + rd_kafka_consumer_group_type_t* group_types; + int group_types_cnt; }; /**@brief "unset" value initializers for Admin_options @@ -185,6 +187,13 @@ Admin_options_to_c (Handle *self, rd_kafka_admin_op_t for_api, goto err; } + if (Admin_options_is_set_ptr(options->group_types) && + (err_obj = rd_kafka_AdminOptions_set_match_consumer_group_types( + c_options, options->group_types, options->group_types_cnt))) { + snprintf(errstr, sizeof(errstr), "%s", rd_kafka_error_string(err_obj)); + goto err; + } + return c_options; err: @@ -1698,24 +1707,28 @@ static const char Admin_delete_acls_doc[] = PyDoc_STR( * @brief List consumer groups */ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kwargs) { - PyObject *future, *states_int = NULL; + PyObject *future, *states_int, *group_types_int = NULL; struct Admin_options options = Admin_options_INITIALIZER; rd_kafka_AdminOptions_t *c_options = NULL; CallState cs; rd_kafka_queue_t *rkqu; rd_kafka_consumer_group_state_t *c_states = NULL; + rd_kafka_consumer_group_type_t *c_group_types = NULL; int states_cnt = 0; + int group_types_cnt = 0; int i = 0; static char *kws[] = {"future", /* options */ "states_int", + "group_types_int", "request_timeout", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Of", kws, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OOf", kws, &future, &states_int, + &group_types_int, &options.request_timeout)) { goto err; } @@ -1746,6 +1759,32 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw } } + if(group_types_int != NULL && group_types_int != Py_None) { + if(!PyList_Check(group_types_int)) { + PyErr_SetString(PyExc_ValueError, + "group_types must of type list"); + goto err; + } + + group_types_cnt = (int)PyList_Size(group_types_int); + + if(group_types_cnt > 0) { + c_group_types = (rd_kafka_consumer_group_type_t *) + malloc(group_types_cnt*sizeof(rd_kafka_consumer_group_type_t)); + for(i = 0 ; i < group_types_cnt ; i++) { + PyObject *group_type = PyList_GET_ITEM(group_types_int, i); + if(!cfl_PyInt_Check(group_type)) { + PyErr_SetString(PyExc_ValueError, + "Element of group_types must be a valid group type"); + goto err; + } + c_group_types[i] = (rd_kafka_consumer_group_type_t) cfl_PyInt_AsInt(group_type); + } + options.group_types = c_group_types; + options.group_types_cnt = group_types_cnt; + } + } + c_options = Admin_options_to_c(self, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, &options, future); if (!c_options) { @@ -1760,7 +1799,6 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw /* Use librdkafka's background thread queue to automatically dispatch * Admin_background_event_cb() when the admin operation is finished. */ rkqu = rd_kafka_queue_get_background(self->rk); - /* * Call ListConsumerGroupOffsets * @@ -1774,14 +1812,19 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw if(c_states) { free(c_states); } + if(c_group_types) { + free(c_group_types); + } rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */ rd_kafka_AdminOptions_destroy(c_options); - Py_RETURN_NONE; err: if(c_states) { free(c_states); } + if(c_group_types) { + free(c_group_types); + } if (c_options) { rd_kafka_AdminOptions_destroy(c_options); Py_DECREF(future); @@ -1789,7 +1832,7 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw return NULL; } const char Admin_list_consumer_groups_doc[] = PyDoc_STR( - ".. py:function:: list_consumer_groups(future, [states_int], [request_timeout])\n" + ".. py:function:: list_consumer_groups(future, [states_int], [group_types_int], [request_timeout])\n" "\n" " List all the consumer groups.\n" "\n" @@ -3566,7 +3609,6 @@ static PyObject *Admin_c_ListConsumerGroupsResults_to_py( size_t valid_cnt, const rd_kafka_error_t **c_errors_responses, size_t errors_cnt) { - PyObject *result = NULL; PyObject *ListConsumerGroupsResult_type = NULL; PyObject *ConsumerGroupListing_type = NULL; @@ -3609,6 +3651,8 @@ static PyObject *Admin_c_ListConsumerGroupsResults_to_py( cfl_PyDict_SetInt(kwargs, "state", rd_kafka_ConsumerGroupListing_state(c_valid_responses[i])); + cfl_PyDict_SetInt(kwargs, "group_type", rd_kafka_ConsumerGroupListing_type(c_valid_responses[i])); + args = PyTuple_New(0); valid_result = PyObject_Call(ConsumerGroupListing_type, args, kwargs); diff --git a/src/confluent_kafka/src/AdminTypes.c b/src/confluent_kafka/src/AdminTypes.c index 43ca665ba..ef9affb30 100644 --- a/src/confluent_kafka/src/AdminTypes.c +++ b/src/confluent_kafka/src/AdminTypes.c @@ -570,8 +570,14 @@ static void AdminTypes_AddObjectsConsumerGroupStates (PyObject *m) { PyModule_AddIntConstant(m, "CONSUMER_GROUP_STATE_EMPTY", RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY); } +static void AdminTypes_AddObjectsConsumerGroupTypes (PyObject *m) { + /* rd_kafka_consumer_group_type_t */ + PyModule_AddIntConstant(m, "CONSUMER_GROUP_TYPE_UNKNOWN", RD_KAFKA_CONSUMER_GROUP_TYPE_UNKNOWN); + PyModule_AddIntConstant(m, "CONSUMER_GROUP_TYPE_CONSUMER", RD_KAFKA_CONSUMER_GROUP_TYPE_CONSUMER); + PyModule_AddIntConstant(m, "CONSUMER_GROUP_TYPE_CLASSIC", RD_KAFKA_CONSUMER_GROUP_TYPE_CLASSIC); +} + static void AdminTypes_AddObjectsAlterConfigOpType (PyObject *m) { - /* rd_kafka_consumer_group_state_t */ PyModule_AddIntConstant(m, "ALTER_CONFIG_OP_TYPE_SET", RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET); PyModule_AddIntConstant(m, "ALTER_CONFIG_OP_TYPE_DELETE", RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE); PyModule_AddIntConstant(m, "ALTER_CONFIG_OP_TYPE_APPEND", RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND); @@ -612,6 +618,7 @@ void AdminTypes_AddObjects (PyObject *m) { AdminTypes_AddObjectsAclOperation(m); AdminTypes_AddObjectsAclPermissionType(m); AdminTypes_AddObjectsConsumerGroupStates(m); + AdminTypes_AddObjectsConsumerGroupTypes(m); AdminTypes_AddObjectsAlterConfigOpType(m); AdminTypes_AddObjectsScramMechanismType(m); AdminTypes_AddObjectsIsolationLevel(m); diff --git a/tests/integration/admin/test_basic_operations.py b/tests/integration/admin/test_basic_operations.py index 00bd2c045..64509892c 100644 --- a/tests/integration/admin/test_basic_operations.py +++ b/tests/integration/admin/test_basic_operations.py @@ -17,11 +17,12 @@ import time from confluent_kafka import ConsumerGroupTopicPartitions, TopicPartition, ConsumerGroupState, KafkaError +from confluent_kafka._model import ConsumerGroupType from confluent_kafka.admin import (NewPartitions, ConfigResource, AclBinding, AclBindingFilter, ResourceType, ResourcePatternType, AclOperation, AclPermissionType) from confluent_kafka.error import ConsumeError - +from tests.common import TestUtils topic_prefix = "test-topic" @@ -318,6 +319,17 @@ def consume_messages(group_id, num_messages=None): assert isinstance(result.valid, list) assert not result.valid + # List Consumer Groups with Group Type Option Test + if TestUtils.use_group_protocol_consumer(): + future = admin_client.list_consumer_groups(request_timeout=10, types={ConsumerGroupType.CLASSIC}) + result = future.result() + group_ids = [group.group_id for group in result.valid] + assert group1 not in group_ids, "Consumer group {} was found despite passing Classic Group Type".format(group1) + assert group2 not in group_ids, "Consumer group {} was found despite passing Classic Group Type".format(group2) + for group in group_ids: + assert group.group_type == ConsumerGroupType.CLASSIC + + def verify_config(expconfig, configs): """ Verify that the config key,values in expconfig are found diff --git a/tests/test_Admin.py b/tests/test_Admin.py index 5e8ff2e6c..320fac16d 100644 --- a/tests/test_Admin.py +++ b/tests/test_Admin.py @@ -6,7 +6,7 @@ ResourcePatternType, AclOperation, AclPermissionType, AlterConfigOpType, \ ScramCredentialInfo, ScramMechanism, \ UserScramCredentialAlteration, UserScramCredentialDeletion, \ - UserScramCredentialUpsertion, OffsetSpec + UserScramCredentialUpsertion, OffsetSpec, _ConsumerGroupType from confluent_kafka import KafkaException, KafkaError, libversion, \ TopicPartition, ConsumerGroupTopicPartitions, ConsumerGroupState, \ IsolationLevel, TopicCollection @@ -616,6 +616,18 @@ def test_list_consumer_groups_api(): with pytest.raises(TypeError): a.list_consumer_groups(states=["EMPTY"]) + with pytest.raises(TypeError): + a.list_consumer_groups(group_types=["UNKNOWN"]) + + with pytest.raises(TypeError): + a.list_consumer_groups(group_types="UNKNOWN") + + with pytest.raises(TypeError): + a.list_consumer_groups(group_types=[_ConsumerGroupType.UNKNOWN]) + + with pytest.raises(TypeError): + a.list_consumer_groups(group_types=[_ConsumerGroupType.CLASSIC, _ConsumerGroupType.CLASSIC]) + with pytest.raises(TypeError): a.list_consumer_groups(states=[ConsumerGroupState.EMPTY, ConsumerGroupState.STABLE]) diff --git a/tests/test_log.py b/tests/test_log.py index 36368a61b..ab56f5952 100644 --- a/tests/test_log.py +++ b/tests/test_log.py @@ -8,7 +8,7 @@ import confluent_kafka.admin from tests.common import TestConsumer, TestAvroConsumer - +import confluent_kafka.admin class CountingFilter(logging.Filter): def __init__(self, name): From 9a1124dfe8ab9e42effeec2e93207e6cb23c567c Mon Sep 17 00:00:00 2001 From: Emanuele Sabellico Date: Fri, 4 Oct 2024 18:43:18 +0200 Subject: [PATCH 4/5] Reviewed implementation and tests --- CHANGELOG.md | 16 +++- examples/adminapi.py | 84 ++++++++----------- src/confluent_kafka/__init__.py | 4 +- src/confluent_kafka/_model/__init__.py | 1 - src/confluent_kafka/admin/__init__.py | 22 ++--- src/confluent_kafka/admin/_group.py | 17 ++-- src/confluent_kafka/src/Admin.c | 65 +++++++------- .../admin/test_basic_operations.py | 22 +++-- tests/test_Admin.py | 29 ++++--- tests/test_log.py | 2 +- 10 files changed, 138 insertions(+), 124 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9857786d..d65b3de2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Confluent's Python client for Apache Kafka + +## v2.6.0 + +v2.6.0 is a feature release with the following features, fixes and enhancements: + +- [KIP-848 EA](https://cwiki.apache.org/confluence/display/KAFKA/KIP-848%3A+The+Next+Generation+of+the+Consumer+Rebalance+Protocol): Admin API for listing consumer groups now has an optional filter to return only groups of given types (#). + +confluent-kafka-python is based on librdkafka v2.6.0, see the +[librdkafka release notes](https://github.com/confluentinc/librdkafka/releases/tag/v2.6.0) +for a complete list of changes, enhancements, fixes and upgrade considerations. + + ## v2.5.3 v2.5.3 is a maintenance release with the following fixes and enhancements: @@ -18,7 +30,7 @@ for a complete list of changes, enhancements, fixes and upgrade considerations. ## v2.5.0 > [!WARNING] -This version has introduced a regression in which an assert is triggered during **PushTelemetry** call. This happens when no metric is matched on the client side among those requested by broker subscription. +This version has introduced a regression in which an assert is triggered during **PushTelemetry** call. This happens when no metric is matched on the client side among those requested by broker subscription. > > You won't face any problem if: > * Broker doesn't support [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability). @@ -26,7 +38,7 @@ This version has introduced a regression in which an assert is triggered during > * [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) feature is disabled on the client side. This is enabled by default. Set configuration `enable.metrics.push` to `false`. > * If [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) is enabled on the broker side and there is no subscription configured there. > * If [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) is enabled on the broker side with subscriptions that match the [KIP-714](https://cwiki.apache.org/confluence/display/KAFKA/KIP-714%3A+Client+metrics+and+observability) metrics defined on the client. -> +> > Having said this, we strongly recommend using `v2.5.3` and above to not face this regression at all. v2.5.0 is a feature release with the following features, fixes and enhancements: diff --git a/examples/adminapi.py b/examples/adminapi.py index 880496269..720cc09cf 100755 --- a/examples/adminapi.py +++ b/examples/adminapi.py @@ -19,7 +19,8 @@ from confluent_kafka import (KafkaException, ConsumerGroupTopicPartitions, TopicPartition, ConsumerGroupState, - TopicCollection, IsolationLevel) + TopicCollection, IsolationLevel, + ConsumerGroupType) from confluent_kafka.admin import (AdminClient, NewTopic, NewPartitions, ConfigResource, ConfigEntry, ConfigSource, AclBinding, AclBindingFilter, ResourceType, ResourcePatternType, @@ -27,8 +28,6 @@ ScramMechanism, ScramCredentialInfo, UserScramCredentialUpsertion, UserScramCredentialDeletion, OffsetSpec) -from confluent_kafka._model import ConsumerGroupType - import sys import threading import logging @@ -473,64 +472,51 @@ def example_list(a, args): print("id {} client_id: {} client_host: {}".format(m.id, m.client_id, m.client_host)) -def getConsumerGroupState(state_string): - if state_string == "STABLE": - return ConsumerGroupState.STABLE - elif state_string == "DEAD": - return ConsumerGroupState.DEAD - elif state_string == "PREPARING_REBALANCING": - return ConsumerGroupState.PREPARING_REBALANCING - elif state_string == "COMPLETING_REBALANCING": - return ConsumerGroupState.COMPLETING_REBALANCING - elif state_string == "EMPTY": - return ConsumerGroupState.EMPTY - return ConsumerGroupState.UNKNOWN - +def _parse_list_consumer_groups_args(args, states, types): + def usage(message): + raise Exception(f"{message}\nUsage: list_consumer_groups [-states ..] " + "[-types ..]") -def getConsumerGroupType(type_string): - if type_string == "CONSUMER": - return ConsumerGroupType.CONSUMER - elif type_string == "CLASSIC": - return ConsumerGroupType.CLASSIC - return ConsumerGroupType.UNKNOWN + if len(args) > 0: + typeArray = False + stateArray = False + lastArray = 0 + for i in range(0, len(args)): + if (args[i] == "-states"): + if (stateArray): + usage("Cannot pass the states flag (-states) more than once") + lastArray = 1 + stateArray = True + elif (args[i] == "-types"): + if (typeArray): + usage("Cannot pass the types flag (-types) more than once") + lastArray = 2 + typeArray = True + else: + if (lastArray == 1): + states.add(ConsumerGroupState[args[i]]) + elif (lastArray == 2): + types.add(ConsumerGroupType[args[i]]) + else: + usage(f"Unknown argument: {args[i]}") def example_list_consumer_groups(a, args): """ List Consumer Groups """ + states = set() - group_types = set() - if len(args) > 0: - isType = False - isState = False - for i in range(0, len(args)): - if (args[i] == "-states"): - if (isState): - raise Exception("Invalid Arguments\n Usage: list_consumer_groups [-states ..] " + - "[-types ..]") - isState = True - elif (args[i] == "-types"): - if (isType): - raise Exception("Invalid Arguments\n Usage: list_consumer_groups [-states ..] " + - "[-types ..]") - isType = True - else: - if (isType): - group_types.add(getConsumerGroupType(args[i])) - elif (isState): - states.add(getConsumerGroupState(args[i])) - else: - raise Exception("Invalid Arguments\n Usage: list_consumer_groups [-states ..] " + - "[-types ..]") + types = set() + _parse_list_consumer_groups_args(args, states, types) - future = a.list_consumer_groups(request_timeout=10, states=states, group_types=group_types) + future = a.list_consumer_groups(request_timeout=10, states=states, types=types) try: list_consumer_groups_result = future.result() print("{} consumer groups".format(len(list_consumer_groups_result.valid))) for valid in list_consumer_groups_result.valid: - print(" id: {} is_simple: {} state: {} group_type: {}".format( - valid.group_id, valid.is_simple_consumer_group, valid.state, valid.group_type)) + print(" id: {} is_simple: {} state: {} type: {}".format( + valid.group_id, valid.is_simple_consumer_group, valid.state, valid.type)) print("{} errors".format(len(list_consumer_groups_result.errors))) for error in list_consumer_groups_result.errors: print(" error: {}".format(error)) @@ -949,7 +935,7 @@ def example_delete_records(a, args): ' ..\n') sys.stderr.write(' list []\n') sys.stderr.write(' list_consumer_groups [-states ..] ' + - '[-types ..]\n') + '[-types ..]\n') sys.stderr.write(' describe_consumer_groups ..\n') sys.stderr.write(' describe_topics ..\n') sys.stderr.write(' describe_cluster \n') diff --git a/src/confluent_kafka/__init__.py b/src/confluent_kafka/__init__.py index 4d908b822..f3a8d1778 100644 --- a/src/confluent_kafka/__init__.py +++ b/src/confluent_kafka/__init__.py @@ -22,6 +22,7 @@ from ._model import (Node, # noqa: F401 ConsumerGroupTopicPartitions, ConsumerGroupState, + ConsumerGroupType, TopicCollection, TopicPartitionInfo, IsolationLevel) @@ -48,7 +49,8 @@ 'Producer', 'DeserializingConsumer', 'SerializingProducer', 'TIMESTAMP_CREATE_TIME', 'TIMESTAMP_LOG_APPEND_TIME', 'TIMESTAMP_NOT_AVAILABLE', 'TopicPartition', 'Node', - 'ConsumerGroupTopicPartitions', 'ConsumerGroupState', 'ConsumerGroupType', 'Uuid', + 'ConsumerGroupTopicPartitions', 'ConsumerGroupState', + 'ConsumerGroupType', 'Uuid', 'IsolationLevel', 'TopicCollection', 'TopicPartitionInfo'] __version__ = version()[0] diff --git a/src/confluent_kafka/_model/__init__.py b/src/confluent_kafka/_model/__init__.py index 792033622..93c810757 100644 --- a/src/confluent_kafka/_model/__init__.py +++ b/src/confluent_kafka/_model/__init__.py @@ -98,7 +98,6 @@ def __lt__(self, other): class ConsumerGroupType(Enum): """ Enumerates the different types of Consumer Group Type. - """ #: Type is not known or not set UNKNOWN = cimpl.CONSUMER_GROUP_TYPE_UNKNOWN diff --git a/src/confluent_kafka/admin/__init__.py b/src/confluent_kafka/admin/__init__.py index 12ac85dba..5271a6101 100644 --- a/src/confluent_kafka/admin/__init__.py +++ b/src/confluent_kafka/admin/__init__.py @@ -881,7 +881,7 @@ def list_consumer_groups(self, **kwargs): on broker, and response. Default: `socket.timeout.ms/1000.0` :param set(ConsumerGroupState) states: only list consumer groups which are currently in these states. - :param set(ConsumerGroupType) group_types: only list consumer groups which are currently of + :param set(ConsumerGroupType) types: only list consumer groups of these types. :returns: a future. Result method of the future returns :class:`ListConsumerGroupsResult`. @@ -902,16 +902,16 @@ def list_consumer_groups(self, **kwargs): raise TypeError("All elements of states must be of type ConsumerGroupState") kwargs["states_int"] = [state.value for state in states] kwargs.pop("states") - if "group_types" in kwargs: - group_types = kwargs["group_types"] - if group_types is not None: - if not isinstance(group_types, set): - raise TypeError("'group_types' must be a set") - for group_type in group_types: - if not isinstance(group_type, _ConsumerGroupType): - raise TypeError("All elements of group_types must be of type ConsumerGroupType") - kwargs["group_types_int"] = [group_type.value for group_type in group_types] - kwargs.pop("group_types") + if "types" in kwargs: + types = kwargs["types"] + if types is not None: + if not isinstance(types, set): + raise TypeError("'types' must be a set") + for type in types: + if not isinstance(type, _ConsumerGroupType): + raise TypeError("All elements of types must be of type ConsumerGroupType") + kwargs["types_int"] = [type.value for type in types] + kwargs.pop("types") f, _ = AdminClient._make_futures([], None, AdminClient._make_list_consumer_groups_result) diff --git a/src/confluent_kafka/admin/_group.py b/src/confluent_kafka/admin/_group.py index 06a97c91a..964d62b2f 100644 --- a/src/confluent_kafka/admin/_group.py +++ b/src/confluent_kafka/admin/_group.py @@ -31,17 +31,17 @@ class ConsumerGroupListing: Whether a consumer group is simple or not. state : ConsumerGroupState Current state of the consumer group. - group_type : ConsumerGroupType - Current type of the consumer group. + type : ConsumerGroupType + Type of the consumer group. """ - def __init__(self, group_id, is_simple_consumer_group, state=None, group_type=None): + def __init__(self, group_id, is_simple_consumer_group, state=None, type=None): self.group_id = group_id self.is_simple_consumer_group = is_simple_consumer_group if state is not None: self.state = ConversionUtil.convert_to_enum(state, ConsumerGroupState) - if group_type is not None: - self.group_type = ConversionUtil.convert_to_enum(group_type, ConsumerGroupType) + if type is not None: + self.type = ConversionUtil.convert_to_enum(type, ConsumerGroupType) class ListConsumerGroupsResult: @@ -123,8 +123,6 @@ class ConsumerGroupDescription: Partition assignor. state : ConsumerGroupState Current state of the consumer group. - group_type : ConsumerGroupType - Current type of the consumer group. coordinator: Node Consumer group coordinator. authorized_operations: list(AclOperation) @@ -132,7 +130,7 @@ class ConsumerGroupDescription: """ def __init__(self, group_id, is_simple_consumer_group, members, partition_assignor, state, - coordinator, authorized_operations=None, group_type=None): + coordinator, authorized_operations=None): self.group_id = group_id self.is_simple_consumer_group = is_simple_consumer_group self.members = members @@ -145,7 +143,4 @@ def __init__(self, group_id, is_simple_consumer_group, members, partition_assign self.partition_assignor = partition_assignor if state is not None: self.state = ConversionUtil.convert_to_enum(state, ConsumerGroupState) - if group_type is not None: - self.group_type = ConversionUtil.convert_to_enum(group_type, ConsumerGroupType) - self.coordinator = coordinator diff --git a/src/confluent_kafka/src/Admin.c b/src/confluent_kafka/src/Admin.c index fe572bb9f..5fbeb780f 100644 --- a/src/confluent_kafka/src/Admin.c +++ b/src/confluent_kafka/src/Admin.c @@ -82,8 +82,8 @@ struct Admin_options { rd_kafka_IsolationLevel_t isolation_level; rd_kafka_consumer_group_state_t* states; int states_cnt; - rd_kafka_consumer_group_type_t* group_types; - int group_types_cnt; + rd_kafka_consumer_group_type_t* types; + int types_cnt; }; /**@brief "unset" value initializers for Admin_options @@ -98,6 +98,8 @@ struct Admin_options { Admin_options_def_int, \ Admin_options_def_ptr, \ Admin_options_def_cnt, \ + Admin_options_def_ptr, \ + Admin_options_def_cnt, \ } #define Admin_options_is_set_int(v) ((v) != Admin_options_def_int) @@ -187,9 +189,9 @@ Admin_options_to_c (Handle *self, rd_kafka_admin_op_t for_api, goto err; } - if (Admin_options_is_set_ptr(options->group_types) && + if (Admin_options_is_set_ptr(options->types) && (err_obj = rd_kafka_AdminOptions_set_match_consumer_group_types( - c_options, options->group_types, options->group_types_cnt))) { + c_options, options->types, options->types_cnt))) { snprintf(errstr, sizeof(errstr), "%s", rd_kafka_error_string(err_obj)); goto err; } @@ -1707,28 +1709,28 @@ static const char Admin_delete_acls_doc[] = PyDoc_STR( * @brief List consumer groups */ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kwargs) { - PyObject *future, *states_int, *group_types_int = NULL; + PyObject *future, *states_int = NULL, *types_int = NULL; struct Admin_options options = Admin_options_INITIALIZER; rd_kafka_AdminOptions_t *c_options = NULL; CallState cs; rd_kafka_queue_t *rkqu; rd_kafka_consumer_group_state_t *c_states = NULL; - rd_kafka_consumer_group_type_t *c_group_types = NULL; + rd_kafka_consumer_group_type_t *c_types = NULL; int states_cnt = 0; - int group_types_cnt = 0; + int types_cnt = 0; int i = 0; static char *kws[] = {"future", /* options */ "states_int", - "group_types_int", + "types_int", "request_timeout", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OOf", kws, &future, &states_int, - &group_types_int, + &types_int, &options.request_timeout)) { goto err; } @@ -1749,7 +1751,7 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw PyObject *state = PyList_GET_ITEM(states_int, i); if(!cfl_PyInt_Check(state)) { PyErr_SetString(PyExc_ValueError, - "Element of states must be a valid state"); + "Element of states must be valid states"); goto err; } c_states[i] = (rd_kafka_consumer_group_state_t) cfl_PyInt_AsInt(state); @@ -1759,29 +1761,30 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw } } - if(group_types_int != NULL && group_types_int != Py_None) { - if(!PyList_Check(group_types_int)) { + if(types_int != NULL && types_int != Py_None) { + if(!PyList_Check(types_int)) { PyErr_SetString(PyExc_ValueError, - "group_types must of type list"); + "types must of type list"); goto err; } - group_types_cnt = (int)PyList_Size(group_types_int); + types_cnt = (int)PyList_Size(types_int); - if(group_types_cnt > 0) { - c_group_types = (rd_kafka_consumer_group_type_t *) - malloc(group_types_cnt*sizeof(rd_kafka_consumer_group_type_t)); - for(i = 0 ; i < group_types_cnt ; i++) { - PyObject *group_type = PyList_GET_ITEM(group_types_int, i); - if(!cfl_PyInt_Check(group_type)) { + if(types_cnt > 0) { + c_types = (rd_kafka_consumer_group_type_t *) + malloc(types_cnt * + sizeof(rd_kafka_consumer_group_type_t)); + for(i = 0 ; i < types_cnt ; i++) { + PyObject *type = PyList_GET_ITEM(types_int, i); + if(!cfl_PyInt_Check(type)) { PyErr_SetString(PyExc_ValueError, - "Element of group_types must be a valid group type"); + "Element of types must be valid group types"); goto err; } - c_group_types[i] = (rd_kafka_consumer_group_type_t) cfl_PyInt_AsInt(group_type); + c_types[i] = (rd_kafka_consumer_group_type_t) cfl_PyInt_AsInt(type); } - options.group_types = c_group_types; - options.group_types_cnt = group_types_cnt; + options.types = c_types; + options.types_cnt = types_cnt; } } @@ -1799,6 +1802,7 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw /* Use librdkafka's background thread queue to automatically dispatch * Admin_background_event_cb() when the admin operation is finished. */ rkqu = rd_kafka_queue_get_background(self->rk); + /* * Call ListConsumerGroupOffsets * @@ -1812,8 +1816,8 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw if(c_states) { free(c_states); } - if(c_group_types) { - free(c_group_types); + if(c_types) { + free(c_types); } rd_kafka_queue_destroy(rkqu); /* drop reference from get_background */ rd_kafka_AdminOptions_destroy(c_options); @@ -1822,8 +1826,8 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw if(c_states) { free(c_states); } - if(c_group_types) { - free(c_group_types); + if(c_types) { + free(c_types); } if (c_options) { rd_kafka_AdminOptions_destroy(c_options); @@ -1832,7 +1836,7 @@ PyObject *Admin_list_consumer_groups (Handle *self, PyObject *args, PyObject *kw return NULL; } const char Admin_list_consumer_groups_doc[] = PyDoc_STR( - ".. py:function:: list_consumer_groups(future, [states_int], [group_types_int], [request_timeout])\n" + ".. py:function:: list_consumer_groups(future, [states_int], [types_int], [request_timeout])\n" "\n" " List all the consumer groups.\n" "\n" @@ -3609,6 +3613,7 @@ static PyObject *Admin_c_ListConsumerGroupsResults_to_py( size_t valid_cnt, const rd_kafka_error_t **c_errors_responses, size_t errors_cnt) { + PyObject *result = NULL; PyObject *ListConsumerGroupsResult_type = NULL; PyObject *ConsumerGroupListing_type = NULL; @@ -3651,7 +3656,7 @@ static PyObject *Admin_c_ListConsumerGroupsResults_to_py( cfl_PyDict_SetInt(kwargs, "state", rd_kafka_ConsumerGroupListing_state(c_valid_responses[i])); - cfl_PyDict_SetInt(kwargs, "group_type", rd_kafka_ConsumerGroupListing_type(c_valid_responses[i])); + cfl_PyDict_SetInt(kwargs, "type", rd_kafka_ConsumerGroupListing_type(c_valid_responses[i])); args = PyTuple_New(0); diff --git a/tests/integration/admin/test_basic_operations.py b/tests/integration/admin/test_basic_operations.py index 64509892c..eb7f41a39 100644 --- a/tests/integration/admin/test_basic_operations.py +++ b/tests/integration/admin/test_basic_operations.py @@ -17,7 +17,7 @@ import time from confluent_kafka import ConsumerGroupTopicPartitions, TopicPartition, ConsumerGroupState, KafkaError -from confluent_kafka._model import ConsumerGroupType +from confluent_kafka import ConsumerGroupType from confluent_kafka.admin import (NewPartitions, ConfigResource, AclBinding, AclBindingFilter, ResourceType, ResourcePatternType, AclOperation, AclPermissionType) @@ -308,27 +308,33 @@ def consume_messages(group_id, num_messages=None): assert group2 in groups, "Consumer group {} not found".format(group2) # List Consumer Groups new API test + current_type = TestUtils.use_group_protocol_consumer() and \ + ConsumerGroupType.CONSUMER or ConsumerGroupType.CLASSIC + opposite_type = current_type == ConsumerGroupType.CONSUMER and \ + ConsumerGroupType.CLASSIC or ConsumerGroupType.CONSUMER future = admin_client.list_consumer_groups(request_timeout=10) result = future.result() group_ids = [group.group_id for group in result.valid] assert group1 in group_ids, "Consumer group {} not found".format(group1) assert group2 in group_ids, "Consumer group {} not found".format(group2) - future = admin_client.list_consumer_groups(request_timeout=10, states={ConsumerGroupState.STABLE}) + future = admin_client.list_consumer_groups(request_timeout=10, + states={ConsumerGroupState.STABLE}, + types={current_type}) result = future.result() assert isinstance(result.valid, list) assert not result.valid - # List Consumer Groups with Group Type Option Test + # List Consumer Groups with group type option, should not return any group + # of those being tested. if TestUtils.use_group_protocol_consumer(): - future = admin_client.list_consumer_groups(request_timeout=10, types={ConsumerGroupType.CLASSIC}) + future = admin_client.list_consumer_groups(request_timeout=10, types={opposite_type}) result = future.result() group_ids = [group.group_id for group in result.valid] - assert group1 not in group_ids, "Consumer group {} was found despite passing Classic Group Type".format(group1) - assert group2 not in group_ids, "Consumer group {} was found despite passing Classic Group Type".format(group2) + assert group1 not in group_ids, f"Consumer group {group1} was found despite passing {opposite_type}" + assert group2 not in group_ids, f"Consumer group {group2} was found despite passing {opposite_type}" for group in group_ids: - assert group.group_type == ConsumerGroupType.CLASSIC - + assert group.type == ConsumerGroupType.CLASSIC def verify_config(expconfig, configs): """ diff --git a/tests/test_Admin.py b/tests/test_Admin.py index 320fac16d..ca9832f87 100644 --- a/tests/test_Admin.py +++ b/tests/test_Admin.py @@ -6,10 +6,11 @@ ResourcePatternType, AclOperation, AclPermissionType, AlterConfigOpType, \ ScramCredentialInfo, ScramMechanism, \ UserScramCredentialAlteration, UserScramCredentialDeletion, \ - UserScramCredentialUpsertion, OffsetSpec, _ConsumerGroupType + UserScramCredentialUpsertion, OffsetSpec, \ + ListConsumerGroupsResult from confluent_kafka import KafkaException, KafkaError, libversion, \ TopicPartition, ConsumerGroupTopicPartitions, ConsumerGroupState, \ - IsolationLevel, TopicCollection + IsolationLevel, TopicCollection, ConsumerGroupType import concurrent.futures @@ -617,19 +618,27 @@ def test_list_consumer_groups_api(): a.list_consumer_groups(states=["EMPTY"]) with pytest.raises(TypeError): - a.list_consumer_groups(group_types=["UNKNOWN"]) - - with pytest.raises(TypeError): - a.list_consumer_groups(group_types="UNKNOWN") + a.list_consumer_groups(states=[ConsumerGroupState.EMPTY, ConsumerGroupState.STABLE]) with pytest.raises(TypeError): - a.list_consumer_groups(group_types=[_ConsumerGroupType.UNKNOWN]) + a.list_consumer_groups(types=[ConsumerGroupType.CLASSIC]) with pytest.raises(TypeError): - a.list_consumer_groups(group_types=[_ConsumerGroupType.CLASSIC, _ConsumerGroupType.CLASSIC]) + a.list_consumer_groups(types="UNKNOWN") - with pytest.raises(TypeError): - a.list_consumer_groups(states=[ConsumerGroupState.EMPTY, ConsumerGroupState.STABLE]) + with pytest.raises(ValueError): + a.list_consumer_groups(types={ConsumerGroupType.UNKNOWN}) + + f = a.list_consumer_groups(types={ConsumerGroupType.CLASSIC, + ConsumerGroupType.CLASSIC}, + request_timeout=0.5) + r = f.result(timeout=1) + assert isinstance(r, ListConsumerGroupsResult) + assert len(r.errors) == 1 + assert len(r.valid) == 0 + e = r.errors[0] + assert isinstance(e, KafkaError) + assert e.code() == KafkaError._TIMED_OUT def test_describe_consumer_groups_api(): diff --git a/tests/test_log.py b/tests/test_log.py index ab56f5952..36368a61b 100644 --- a/tests/test_log.py +++ b/tests/test_log.py @@ -8,7 +8,7 @@ import confluent_kafka.admin from tests.common import TestConsumer, TestAvroConsumer -import confluent_kafka.admin + class CountingFilter(logging.Filter): def __init__(self, name): From 715fb34f0203cda525d5316bc9fd6935798a5167 Mon Sep 17 00:00:00 2001 From: Emanuele Sabellico Date: Wed, 9 Oct 2024 19:32:17 +0200 Subject: [PATCH 5/5] Address comment --- examples/adminapi.py | 55 +++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/examples/adminapi.py b/examples/adminapi.py index 720cc09cf..e2b075c5b 100755 --- a/examples/adminapi.py +++ b/examples/adminapi.py @@ -31,6 +31,7 @@ import sys import threading import logging +import argparse logging.basicConfig() @@ -472,33 +473,29 @@ def example_list(a, args): print("id {} client_id: {} client_host: {}".format(m.id, m.client_id, m.client_host)) -def _parse_list_consumer_groups_args(args, states, types): +def parse_list_consumer_groups_args(args, states, types): + parser = argparse.ArgumentParser(prog='list_consumer_groups') + parser.add_argument('-states') + parser.add_argument('-types') + parsed_args = parser.parse_args(args) + def usage(message): - raise Exception(f"{message}\nUsage: list_consumer_groups [-states ..] " - "[-types ..]") - - if len(args) > 0: - typeArray = False - stateArray = False - lastArray = 0 - for i in range(0, len(args)): - if (args[i] == "-states"): - if (stateArray): - usage("Cannot pass the states flag (-states) more than once") - lastArray = 1 - stateArray = True - elif (args[i] == "-types"): - if (typeArray): - usage("Cannot pass the types flag (-types) more than once") - lastArray = 2 - typeArray = True - else: - if (lastArray == 1): - states.add(ConsumerGroupState[args[i]]) - elif (lastArray == 2): - types.add(ConsumerGroupType[args[i]]) - else: - usage(f"Unknown argument: {args[i]}") + print(message) + parser.print_usage() + sys.exit(1) + + if parsed_args.states: + for arg in parsed_args.states.split(","): + try: + states.add(ConsumerGroupState[arg]) + except KeyError: + usage(f"Invalid state: {arg}") + if parsed_args.types: + for arg in parsed_args.types.split(","): + try: + types.add(ConsumerGroupType[arg]) + except KeyError: + usage(f"Invalid type: {arg}") def example_list_consumer_groups(a, args): @@ -508,7 +505,7 @@ def example_list_consumer_groups(a, args): states = set() types = set() - _parse_list_consumer_groups_args(args, states, types) + parse_list_consumer_groups_args(args, states, types) future = a.list_consumer_groups(request_timeout=10, states=states, types=types) try: @@ -934,8 +931,8 @@ def example_delete_records(a, args): sys.stderr.write(' delete_acls ' + ' ..\n') sys.stderr.write(' list []\n') - sys.stderr.write(' list_consumer_groups [-states ..] ' + - '[-types ..]\n') + sys.stderr.write(' list_consumer_groups [-states ,,..] ' + + '[-types ,,..]\n') sys.stderr.write(' describe_consumer_groups ..\n') sys.stderr.write(' describe_topics ..\n') sys.stderr.write(' describe_cluster \n')