@point3/node-rdkafka 3.6.0-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +20 -0
- package/README.md +636 -0
- package/binding.gyp +154 -0
- package/deps/librdkafka/.clang-format +136 -0
- package/deps/librdkafka/.clang-format-cpp +103 -0
- package/deps/librdkafka/.dir-locals.el +10 -0
- package/deps/librdkafka/.formatignore +33 -0
- package/deps/librdkafka/.gdbmacros +19 -0
- package/deps/librdkafka/.github/CODEOWNERS +1 -0
- package/deps/librdkafka/.github/ISSUE_TEMPLATE +34 -0
- package/deps/librdkafka/.semaphore/run-all-tests.yml +77 -0
- package/deps/librdkafka/.semaphore/semaphore-integration.yml +250 -0
- package/deps/librdkafka/.semaphore/semaphore.yml +378 -0
- package/deps/librdkafka/.semaphore/verify-linux-packages.yml +41 -0
- package/deps/librdkafka/CHANGELOG.md +2208 -0
- package/deps/librdkafka/CMakeLists.txt +291 -0
- package/deps/librdkafka/CODE_OF_CONDUCT.md +46 -0
- package/deps/librdkafka/CONFIGURATION.md +209 -0
- package/deps/librdkafka/CONTRIBUTING.md +431 -0
- package/deps/librdkafka/Doxyfile +2375 -0
- package/deps/librdkafka/INTRODUCTION.md +2481 -0
- package/deps/librdkafka/LICENSE +26 -0
- package/deps/librdkafka/LICENSE.cjson +22 -0
- package/deps/librdkafka/LICENSE.crc32c +28 -0
- package/deps/librdkafka/LICENSE.fnv1a +18 -0
- package/deps/librdkafka/LICENSE.hdrhistogram +27 -0
- package/deps/librdkafka/LICENSE.lz4 +26 -0
- package/deps/librdkafka/LICENSE.murmur2 +25 -0
- package/deps/librdkafka/LICENSE.nanopb +22 -0
- package/deps/librdkafka/LICENSE.opentelemetry +203 -0
- package/deps/librdkafka/LICENSE.pycrc +23 -0
- package/deps/librdkafka/LICENSE.queue +31 -0
- package/deps/librdkafka/LICENSE.regexp +5 -0
- package/deps/librdkafka/LICENSE.snappy +36 -0
- package/deps/librdkafka/LICENSE.tinycthread +26 -0
- package/deps/librdkafka/LICENSE.wingetopt +49 -0
- package/deps/librdkafka/LICENSES.txt +625 -0
- package/deps/librdkafka/Makefile +125 -0
- package/deps/librdkafka/README.md +199 -0
- package/deps/librdkafka/README.win32 +26 -0
- package/deps/librdkafka/STATISTICS.md +624 -0
- package/deps/librdkafka/configure +214 -0
- package/deps/librdkafka/configure.self +331 -0
- package/deps/librdkafka/debian/changelog +111 -0
- package/deps/librdkafka/debian/compat +1 -0
- package/deps/librdkafka/debian/control +71 -0
- package/deps/librdkafka/debian/copyright +99 -0
- package/deps/librdkafka/debian/gbp.conf +9 -0
- package/deps/librdkafka/debian/librdkafka++1.install +1 -0
- package/deps/librdkafka/debian/librdkafka-dev.examples +2 -0
- package/deps/librdkafka/debian/librdkafka-dev.install +9 -0
- package/deps/librdkafka/debian/librdkafka1.docs +5 -0
- package/deps/librdkafka/debian/librdkafka1.install +1 -0
- package/deps/librdkafka/debian/librdkafka1.symbols +135 -0
- package/deps/librdkafka/debian/rules +19 -0
- package/deps/librdkafka/debian/source/format +1 -0
- package/deps/librdkafka/debian/watch +2 -0
- package/deps/librdkafka/dev-conf.sh +123 -0
- package/deps/librdkafka/examples/CMakeLists.txt +79 -0
- package/deps/librdkafka/examples/Makefile +167 -0
- package/deps/librdkafka/examples/README.md +42 -0
- package/deps/librdkafka/examples/alter_consumer_group_offsets.c +338 -0
- package/deps/librdkafka/examples/consumer.c +271 -0
- package/deps/librdkafka/examples/delete_records.c +233 -0
- package/deps/librdkafka/examples/describe_cluster.c +322 -0
- package/deps/librdkafka/examples/describe_consumer_groups.c +455 -0
- package/deps/librdkafka/examples/describe_topics.c +427 -0
- package/deps/librdkafka/examples/elect_leaders.c +317 -0
- package/deps/librdkafka/examples/globals.json +11 -0
- package/deps/librdkafka/examples/idempotent_producer.c +344 -0
- package/deps/librdkafka/examples/incremental_alter_configs.c +347 -0
- package/deps/librdkafka/examples/kafkatest_verifiable_client.cpp +945 -0
- package/deps/librdkafka/examples/list_consumer_group_offsets.c +359 -0
- package/deps/librdkafka/examples/list_consumer_groups.c +365 -0
- package/deps/librdkafka/examples/list_offsets.c +327 -0
- package/deps/librdkafka/examples/misc.c +287 -0
- package/deps/librdkafka/examples/openssl_engine_example.cpp +248 -0
- package/deps/librdkafka/examples/producer.c +251 -0
- package/deps/librdkafka/examples/producer.cpp +228 -0
- package/deps/librdkafka/examples/rdkafka_complex_consumer_example.c +617 -0
- package/deps/librdkafka/examples/rdkafka_complex_consumer_example.cpp +467 -0
- package/deps/librdkafka/examples/rdkafka_consume_batch.cpp +264 -0
- package/deps/librdkafka/examples/rdkafka_example.c +853 -0
- package/deps/librdkafka/examples/rdkafka_example.cpp +679 -0
- package/deps/librdkafka/examples/rdkafka_performance.c +1781 -0
- package/deps/librdkafka/examples/transactions-older-broker.c +668 -0
- package/deps/librdkafka/examples/transactions.c +665 -0
- package/deps/librdkafka/examples/user_scram.c +491 -0
- package/deps/librdkafka/examples/win_ssl_cert_store.cpp +396 -0
- package/deps/librdkafka/lds-gen.py +73 -0
- package/deps/librdkafka/mainpage.doxy +40 -0
- package/deps/librdkafka/mklove/Makefile.base +329 -0
- package/deps/librdkafka/mklove/modules/configure.atomics +144 -0
- package/deps/librdkafka/mklove/modules/configure.base +2484 -0
- package/deps/librdkafka/mklove/modules/configure.builtin +70 -0
- package/deps/librdkafka/mklove/modules/configure.cc +186 -0
- package/deps/librdkafka/mklove/modules/configure.cxx +8 -0
- package/deps/librdkafka/mklove/modules/configure.fileversion +65 -0
- package/deps/librdkafka/mklove/modules/configure.gitversion +29 -0
- package/deps/librdkafka/mklove/modules/configure.good_cflags +18 -0
- package/deps/librdkafka/mklove/modules/configure.host +132 -0
- package/deps/librdkafka/mklove/modules/configure.lib +49 -0
- package/deps/librdkafka/mklove/modules/configure.libcurl +99 -0
- package/deps/librdkafka/mklove/modules/configure.libsasl2 +36 -0
- package/deps/librdkafka/mklove/modules/configure.libssl +147 -0
- package/deps/librdkafka/mklove/modules/configure.libzstd +58 -0
- package/deps/librdkafka/mklove/modules/configure.parseversion +95 -0
- package/deps/librdkafka/mklove/modules/configure.pic +16 -0
- package/deps/librdkafka/mklove/modules/configure.socket +20 -0
- package/deps/librdkafka/mklove/modules/configure.zlib +61 -0
- package/deps/librdkafka/mklove/modules/patches/README.md +8 -0
- package/deps/librdkafka/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch +11 -0
- package/deps/librdkafka/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch +56 -0
- package/deps/librdkafka/packaging/RELEASE.md +319 -0
- package/deps/librdkafka/packaging/alpine/build-alpine.sh +38 -0
- package/deps/librdkafka/packaging/archlinux/PKGBUILD +30 -0
- package/deps/librdkafka/packaging/cmake/Config.cmake.in +37 -0
- package/deps/librdkafka/packaging/cmake/Modules/FindLZ4.cmake +38 -0
- package/deps/librdkafka/packaging/cmake/Modules/FindZSTD.cmake +27 -0
- package/deps/librdkafka/packaging/cmake/Modules/LICENSE.FindZstd +178 -0
- package/deps/librdkafka/packaging/cmake/README.md +38 -0
- package/deps/librdkafka/packaging/cmake/config.h.in +52 -0
- package/deps/librdkafka/packaging/cmake/parseversion.cmake +60 -0
- package/deps/librdkafka/packaging/cmake/rdkafka.pc.in +12 -0
- package/deps/librdkafka/packaging/cmake/try_compile/atomic_32_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/atomic_64_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/c11threads_test.c +14 -0
- package/deps/librdkafka/packaging/cmake/try_compile/crc32c_hw_test.c +27 -0
- package/deps/librdkafka/packaging/cmake/try_compile/dlopen_test.c +11 -0
- package/deps/librdkafka/packaging/cmake/try_compile/libsasl2_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_darwin_test.c +6 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_freebsd_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_gnu_test.c +5 -0
- package/deps/librdkafka/packaging/cmake/try_compile/rand_r_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/rdkafka_setup.cmake +122 -0
- package/deps/librdkafka/packaging/cmake/try_compile/regex_test.c +10 -0
- package/deps/librdkafka/packaging/cmake/try_compile/strndup_test.c +5 -0
- package/deps/librdkafka/packaging/cmake/try_compile/sync_32_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/sync_64_test.c +8 -0
- package/deps/librdkafka/packaging/cp/README.md +16 -0
- package/deps/librdkafka/packaging/cp/check_features.c +72 -0
- package/deps/librdkafka/packaging/cp/verify-deb.sh +33 -0
- package/deps/librdkafka/packaging/cp/verify-packages.sh +69 -0
- package/deps/librdkafka/packaging/cp/verify-rpm.sh +32 -0
- package/deps/librdkafka/packaging/debian/changelog +66 -0
- package/deps/librdkafka/packaging/debian/compat +1 -0
- package/deps/librdkafka/packaging/debian/control +49 -0
- package/deps/librdkafka/packaging/debian/copyright +84 -0
- package/deps/librdkafka/packaging/debian/docs +5 -0
- package/deps/librdkafka/packaging/debian/gbp.conf +9 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.dirs +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.examples +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.install +6 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.substvars +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka.dsc +16 -0
- package/deps/librdkafka/packaging/debian/librdkafka1-dbg.substvars +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.dirs +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.install +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.postinst.debhelper +5 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.postrm.debhelper +5 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.symbols +64 -0
- package/deps/librdkafka/packaging/debian/rules +19 -0
- package/deps/librdkafka/packaging/debian/source/format +1 -0
- package/deps/librdkafka/packaging/debian/watch +2 -0
- package/deps/librdkafka/packaging/get_version.py +21 -0
- package/deps/librdkafka/packaging/homebrew/README.md +15 -0
- package/deps/librdkafka/packaging/homebrew/brew-update-pr.sh +31 -0
- package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw-static.sh +52 -0
- package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw.sh +21 -0
- package/deps/librdkafka/packaging/mingw-w64/export-variables.sh +13 -0
- package/deps/librdkafka/packaging/mingw-w64/run-tests.sh +6 -0
- package/deps/librdkafka/packaging/mingw-w64/semaphoreci-build.sh +38 -0
- package/deps/librdkafka/packaging/nuget/README.md +84 -0
- package/deps/librdkafka/packaging/nuget/artifact.py +177 -0
- package/deps/librdkafka/packaging/nuget/cleanup-s3.py +143 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip +0 -0
- package/deps/librdkafka/packaging/nuget/nuget.sh +21 -0
- package/deps/librdkafka/packaging/nuget/nugetpackage.py +278 -0
- package/deps/librdkafka/packaging/nuget/packaging.py +448 -0
- package/deps/librdkafka/packaging/nuget/push-to-nuget.sh +21 -0
- package/deps/librdkafka/packaging/nuget/release.py +167 -0
- package/deps/librdkafka/packaging/nuget/requirements.txt +3 -0
- package/deps/librdkafka/packaging/nuget/staticpackage.py +178 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.nuspec +21 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.props +18 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.targets +19 -0
- package/deps/librdkafka/packaging/nuget/zfile/__init__.py +0 -0
- package/deps/librdkafka/packaging/nuget/zfile/zfile.py +98 -0
- package/deps/librdkafka/packaging/rpm/Makefile +92 -0
- package/deps/librdkafka/packaging/rpm/README.md +23 -0
- package/deps/librdkafka/packaging/rpm/el7-x86_64.cfg +40 -0
- package/deps/librdkafka/packaging/rpm/librdkafka.spec +118 -0
- package/deps/librdkafka/packaging/rpm/mock-on-docker.sh +96 -0
- package/deps/librdkafka/packaging/rpm/tests/Makefile +25 -0
- package/deps/librdkafka/packaging/rpm/tests/README.md +8 -0
- package/deps/librdkafka/packaging/rpm/tests/run-test.sh +42 -0
- package/deps/librdkafka/packaging/rpm/tests/test-on-docker.sh +56 -0
- package/deps/librdkafka/packaging/rpm/tests/test.c +77 -0
- package/deps/librdkafka/packaging/rpm/tests/test.cpp +34 -0
- package/deps/librdkafka/packaging/tools/Dockerfile +31 -0
- package/deps/librdkafka/packaging/tools/build-configurations-checks.sh +12 -0
- package/deps/librdkafka/packaging/tools/build-deb-package.sh +64 -0
- package/deps/librdkafka/packaging/tools/build-debian.sh +65 -0
- package/deps/librdkafka/packaging/tools/build-manylinux.sh +68 -0
- package/deps/librdkafka/packaging/tools/build-release-artifacts.sh +139 -0
- package/deps/librdkafka/packaging/tools/distro-build.sh +38 -0
- package/deps/librdkafka/packaging/tools/gh-release-checksums.py +39 -0
- package/deps/librdkafka/packaging/tools/rdutcoverage.sh +25 -0
- package/deps/librdkafka/packaging/tools/requirements.txt +2 -0
- package/deps/librdkafka/packaging/tools/run-in-docker.sh +28 -0
- package/deps/librdkafka/packaging/tools/run-integration-tests.sh +31 -0
- package/deps/librdkafka/packaging/tools/run-style-check.sh +4 -0
- package/deps/librdkafka/packaging/tools/style-format.sh +149 -0
- package/deps/librdkafka/packaging/tools/update_rpcs_max_versions.py +100 -0
- package/deps/librdkafka/service.yml +172 -0
- package/deps/librdkafka/src/CMakeLists.txt +374 -0
- package/deps/librdkafka/src/Makefile +103 -0
- package/deps/librdkafka/src/README.lz4.md +30 -0
- package/deps/librdkafka/src/cJSON.c +2834 -0
- package/deps/librdkafka/src/cJSON.h +398 -0
- package/deps/librdkafka/src/crc32c.c +430 -0
- package/deps/librdkafka/src/crc32c.h +38 -0
- package/deps/librdkafka/src/generate_proto.sh +66 -0
- package/deps/librdkafka/src/librdkafka_cgrp_synch.png +0 -0
- package/deps/librdkafka/src/lz4.c +2727 -0
- package/deps/librdkafka/src/lz4.h +842 -0
- package/deps/librdkafka/src/lz4frame.c +2078 -0
- package/deps/librdkafka/src/lz4frame.h +692 -0
- package/deps/librdkafka/src/lz4frame_static.h +47 -0
- package/deps/librdkafka/src/lz4hc.c +1631 -0
- package/deps/librdkafka/src/lz4hc.h +413 -0
- package/deps/librdkafka/src/nanopb/pb.h +917 -0
- package/deps/librdkafka/src/nanopb/pb_common.c +388 -0
- package/deps/librdkafka/src/nanopb/pb_common.h +49 -0
- package/deps/librdkafka/src/nanopb/pb_decode.c +1727 -0
- package/deps/librdkafka/src/nanopb/pb_decode.h +193 -0
- package/deps/librdkafka/src/nanopb/pb_encode.c +1000 -0
- package/deps/librdkafka/src/nanopb/pb_encode.h +185 -0
- package/deps/librdkafka/src/opentelemetry/common.pb.c +32 -0
- package/deps/librdkafka/src/opentelemetry/common.pb.h +170 -0
- package/deps/librdkafka/src/opentelemetry/metrics.options +2 -0
- package/deps/librdkafka/src/opentelemetry/metrics.pb.c +67 -0
- package/deps/librdkafka/src/opentelemetry/metrics.pb.h +966 -0
- package/deps/librdkafka/src/opentelemetry/resource.pb.c +12 -0
- package/deps/librdkafka/src/opentelemetry/resource.pb.h +58 -0
- package/deps/librdkafka/src/queue.h +850 -0
- package/deps/librdkafka/src/rd.h +584 -0
- package/deps/librdkafka/src/rdaddr.c +255 -0
- package/deps/librdkafka/src/rdaddr.h +202 -0
- package/deps/librdkafka/src/rdatomic.h +230 -0
- package/deps/librdkafka/src/rdavg.h +260 -0
- package/deps/librdkafka/src/rdavl.c +210 -0
- package/deps/librdkafka/src/rdavl.h +250 -0
- package/deps/librdkafka/src/rdbase64.c +200 -0
- package/deps/librdkafka/src/rdbase64.h +43 -0
- package/deps/librdkafka/src/rdbuf.c +1884 -0
- package/deps/librdkafka/src/rdbuf.h +375 -0
- package/deps/librdkafka/src/rdcrc32.c +114 -0
- package/deps/librdkafka/src/rdcrc32.h +170 -0
- package/deps/librdkafka/src/rddl.c +179 -0
- package/deps/librdkafka/src/rddl.h +43 -0
- package/deps/librdkafka/src/rdendian.h +175 -0
- package/deps/librdkafka/src/rdfloat.h +67 -0
- package/deps/librdkafka/src/rdfnv1a.c +113 -0
- package/deps/librdkafka/src/rdfnv1a.h +35 -0
- package/deps/librdkafka/src/rdgz.c +120 -0
- package/deps/librdkafka/src/rdgz.h +46 -0
- package/deps/librdkafka/src/rdhdrhistogram.c +721 -0
- package/deps/librdkafka/src/rdhdrhistogram.h +87 -0
- package/deps/librdkafka/src/rdhttp.c +830 -0
- package/deps/librdkafka/src/rdhttp.h +101 -0
- package/deps/librdkafka/src/rdinterval.h +177 -0
- package/deps/librdkafka/src/rdkafka.c +5505 -0
- package/deps/librdkafka/src/rdkafka.h +10686 -0
- package/deps/librdkafka/src/rdkafka_admin.c +9794 -0
- package/deps/librdkafka/src/rdkafka_admin.h +661 -0
- package/deps/librdkafka/src/rdkafka_assignment.c +1010 -0
- package/deps/librdkafka/src/rdkafka_assignment.h +73 -0
- package/deps/librdkafka/src/rdkafka_assignor.c +1786 -0
- package/deps/librdkafka/src/rdkafka_assignor.h +402 -0
- package/deps/librdkafka/src/rdkafka_aux.c +409 -0
- package/deps/librdkafka/src/rdkafka_aux.h +174 -0
- package/deps/librdkafka/src/rdkafka_background.c +221 -0
- package/deps/librdkafka/src/rdkafka_broker.c +6337 -0
- package/deps/librdkafka/src/rdkafka_broker.h +744 -0
- package/deps/librdkafka/src/rdkafka_buf.c +543 -0
- package/deps/librdkafka/src/rdkafka_buf.h +1525 -0
- package/deps/librdkafka/src/rdkafka_cert.c +576 -0
- package/deps/librdkafka/src/rdkafka_cert.h +62 -0
- package/deps/librdkafka/src/rdkafka_cgrp.c +7587 -0
- package/deps/librdkafka/src/rdkafka_cgrp.h +477 -0
- package/deps/librdkafka/src/rdkafka_conf.c +4880 -0
- package/deps/librdkafka/src/rdkafka_conf.h +732 -0
- package/deps/librdkafka/src/rdkafka_confval.h +97 -0
- package/deps/librdkafka/src/rdkafka_coord.c +623 -0
- package/deps/librdkafka/src/rdkafka_coord.h +132 -0
- package/deps/librdkafka/src/rdkafka_error.c +228 -0
- package/deps/librdkafka/src/rdkafka_error.h +80 -0
- package/deps/librdkafka/src/rdkafka_event.c +502 -0
- package/deps/librdkafka/src/rdkafka_event.h +126 -0
- package/deps/librdkafka/src/rdkafka_feature.c +898 -0
- package/deps/librdkafka/src/rdkafka_feature.h +104 -0
- package/deps/librdkafka/src/rdkafka_fetcher.c +1422 -0
- package/deps/librdkafka/src/rdkafka_fetcher.h +44 -0
- package/deps/librdkafka/src/rdkafka_header.c +220 -0
- package/deps/librdkafka/src/rdkafka_header.h +76 -0
- package/deps/librdkafka/src/rdkafka_idempotence.c +807 -0
- package/deps/librdkafka/src/rdkafka_idempotence.h +144 -0
- package/deps/librdkafka/src/rdkafka_int.h +1260 -0
- package/deps/librdkafka/src/rdkafka_interceptor.c +819 -0
- package/deps/librdkafka/src/rdkafka_interceptor.h +104 -0
- package/deps/librdkafka/src/rdkafka_lz4.c +450 -0
- package/deps/librdkafka/src/rdkafka_lz4.h +49 -0
- package/deps/librdkafka/src/rdkafka_metadata.c +2209 -0
- package/deps/librdkafka/src/rdkafka_metadata.h +345 -0
- package/deps/librdkafka/src/rdkafka_metadata_cache.c +1183 -0
- package/deps/librdkafka/src/rdkafka_mock.c +3661 -0
- package/deps/librdkafka/src/rdkafka_mock.h +610 -0
- package/deps/librdkafka/src/rdkafka_mock_cgrp.c +1876 -0
- package/deps/librdkafka/src/rdkafka_mock_handlers.c +3113 -0
- package/deps/librdkafka/src/rdkafka_mock_int.h +710 -0
- package/deps/librdkafka/src/rdkafka_msg.c +2589 -0
- package/deps/librdkafka/src/rdkafka_msg.h +614 -0
- package/deps/librdkafka/src/rdkafka_msgbatch.h +62 -0
- package/deps/librdkafka/src/rdkafka_msgset.h +98 -0
- package/deps/librdkafka/src/rdkafka_msgset_reader.c +1806 -0
- package/deps/librdkafka/src/rdkafka_msgset_writer.c +1474 -0
- package/deps/librdkafka/src/rdkafka_offset.c +1565 -0
- package/deps/librdkafka/src/rdkafka_offset.h +150 -0
- package/deps/librdkafka/src/rdkafka_op.c +997 -0
- package/deps/librdkafka/src/rdkafka_op.h +858 -0
- package/deps/librdkafka/src/rdkafka_partition.c +4896 -0
- package/deps/librdkafka/src/rdkafka_partition.h +1182 -0
- package/deps/librdkafka/src/rdkafka_pattern.c +228 -0
- package/deps/librdkafka/src/rdkafka_pattern.h +70 -0
- package/deps/librdkafka/src/rdkafka_plugin.c +213 -0
- package/deps/librdkafka/src/rdkafka_plugin.h +41 -0
- package/deps/librdkafka/src/rdkafka_proto.h +736 -0
- package/deps/librdkafka/src/rdkafka_protocol.h +128 -0
- package/deps/librdkafka/src/rdkafka_queue.c +1230 -0
- package/deps/librdkafka/src/rdkafka_queue.h +1220 -0
- package/deps/librdkafka/src/rdkafka_range_assignor.c +1748 -0
- package/deps/librdkafka/src/rdkafka_request.c +7089 -0
- package/deps/librdkafka/src/rdkafka_request.h +732 -0
- package/deps/librdkafka/src/rdkafka_roundrobin_assignor.c +123 -0
- package/deps/librdkafka/src/rdkafka_sasl.c +530 -0
- package/deps/librdkafka/src/rdkafka_sasl.h +63 -0
- package/deps/librdkafka/src/rdkafka_sasl_cyrus.c +722 -0
- package/deps/librdkafka/src/rdkafka_sasl_int.h +89 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.c +1833 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.h +52 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.c +1666 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.h +47 -0
- package/deps/librdkafka/src/rdkafka_sasl_plain.c +142 -0
- package/deps/librdkafka/src/rdkafka_sasl_scram.c +858 -0
- package/deps/librdkafka/src/rdkafka_sasl_win32.c +550 -0
- package/deps/librdkafka/src/rdkafka_ssl.c +2129 -0
- package/deps/librdkafka/src/rdkafka_ssl.h +86 -0
- package/deps/librdkafka/src/rdkafka_sticky_assignor.c +4785 -0
- package/deps/librdkafka/src/rdkafka_subscription.c +278 -0
- package/deps/librdkafka/src/rdkafka_telemetry.c +760 -0
- package/deps/librdkafka/src/rdkafka_telemetry.h +52 -0
- package/deps/librdkafka/src/rdkafka_telemetry_decode.c +1053 -0
- package/deps/librdkafka/src/rdkafka_telemetry_decode.h +59 -0
- package/deps/librdkafka/src/rdkafka_telemetry_encode.c +997 -0
- package/deps/librdkafka/src/rdkafka_telemetry_encode.h +301 -0
- package/deps/librdkafka/src/rdkafka_timer.c +402 -0
- package/deps/librdkafka/src/rdkafka_timer.h +117 -0
- package/deps/librdkafka/src/rdkafka_topic.c +2161 -0
- package/deps/librdkafka/src/rdkafka_topic.h +334 -0
- package/deps/librdkafka/src/rdkafka_transport.c +1309 -0
- package/deps/librdkafka/src/rdkafka_transport.h +99 -0
- package/deps/librdkafka/src/rdkafka_transport_int.h +100 -0
- package/deps/librdkafka/src/rdkafka_txnmgr.c +3256 -0
- package/deps/librdkafka/src/rdkafka_txnmgr.h +171 -0
- package/deps/librdkafka/src/rdkafka_zstd.c +226 -0
- package/deps/librdkafka/src/rdkafka_zstd.h +57 -0
- package/deps/librdkafka/src/rdlist.c +576 -0
- package/deps/librdkafka/src/rdlist.h +434 -0
- package/deps/librdkafka/src/rdlog.c +89 -0
- package/deps/librdkafka/src/rdlog.h +41 -0
- package/deps/librdkafka/src/rdmap.c +508 -0
- package/deps/librdkafka/src/rdmap.h +492 -0
- package/deps/librdkafka/src/rdmurmur2.c +167 -0
- package/deps/librdkafka/src/rdmurmur2.h +35 -0
- package/deps/librdkafka/src/rdports.c +61 -0
- package/deps/librdkafka/src/rdports.h +38 -0
- package/deps/librdkafka/src/rdposix.h +250 -0
- package/deps/librdkafka/src/rdrand.c +80 -0
- package/deps/librdkafka/src/rdrand.h +43 -0
- package/deps/librdkafka/src/rdregex.c +156 -0
- package/deps/librdkafka/src/rdregex.h +43 -0
- package/deps/librdkafka/src/rdsignal.h +57 -0
- package/deps/librdkafka/src/rdstring.c +645 -0
- package/deps/librdkafka/src/rdstring.h +98 -0
- package/deps/librdkafka/src/rdsysqueue.h +404 -0
- package/deps/librdkafka/src/rdtime.h +356 -0
- package/deps/librdkafka/src/rdtypes.h +86 -0
- package/deps/librdkafka/src/rdunittest.c +549 -0
- package/deps/librdkafka/src/rdunittest.h +232 -0
- package/deps/librdkafka/src/rdvarint.c +134 -0
- package/deps/librdkafka/src/rdvarint.h +165 -0
- package/deps/librdkafka/src/rdwin32.h +382 -0
- package/deps/librdkafka/src/rdxxhash.c +1030 -0
- package/deps/librdkafka/src/rdxxhash.h +328 -0
- package/deps/librdkafka/src/regexp.c +1352 -0
- package/deps/librdkafka/src/regexp.h +41 -0
- package/deps/librdkafka/src/snappy.c +1866 -0
- package/deps/librdkafka/src/snappy.h +62 -0
- package/deps/librdkafka/src/snappy_compat.h +138 -0
- package/deps/librdkafka/src/statistics_schema.json +444 -0
- package/deps/librdkafka/src/tinycthread.c +932 -0
- package/deps/librdkafka/src/tinycthread.h +503 -0
- package/deps/librdkafka/src/tinycthread_extra.c +199 -0
- package/deps/librdkafka/src/tinycthread_extra.h +212 -0
- package/deps/librdkafka/src/win32_config.h +58 -0
- package/deps/librdkafka/src-cpp/CMakeLists.txt +90 -0
- package/deps/librdkafka/src-cpp/ConfImpl.cpp +84 -0
- package/deps/librdkafka/src-cpp/ConsumerImpl.cpp +244 -0
- package/deps/librdkafka/src-cpp/HandleImpl.cpp +436 -0
- package/deps/librdkafka/src-cpp/HeadersImpl.cpp +48 -0
- package/deps/librdkafka/src-cpp/KafkaConsumerImpl.cpp +296 -0
- package/deps/librdkafka/src-cpp/Makefile +55 -0
- package/deps/librdkafka/src-cpp/MessageImpl.cpp +38 -0
- package/deps/librdkafka/src-cpp/MetadataImpl.cpp +170 -0
- package/deps/librdkafka/src-cpp/ProducerImpl.cpp +197 -0
- package/deps/librdkafka/src-cpp/QueueImpl.cpp +70 -0
- package/deps/librdkafka/src-cpp/README.md +16 -0
- package/deps/librdkafka/src-cpp/RdKafka.cpp +59 -0
- package/deps/librdkafka/src-cpp/TopicImpl.cpp +124 -0
- package/deps/librdkafka/src-cpp/TopicPartitionImpl.cpp +57 -0
- package/deps/librdkafka/src-cpp/rdkafkacpp.h +3797 -0
- package/deps/librdkafka/src-cpp/rdkafkacpp_int.h +1641 -0
- package/deps/librdkafka/tests/0000-unittests.c +72 -0
- package/deps/librdkafka/tests/0001-multiobj.c +102 -0
- package/deps/librdkafka/tests/0002-unkpart.c +244 -0
- package/deps/librdkafka/tests/0003-msgmaxsize.c +173 -0
- package/deps/librdkafka/tests/0004-conf.c +934 -0
- package/deps/librdkafka/tests/0005-order.c +133 -0
- package/deps/librdkafka/tests/0006-symbols.c +163 -0
- package/deps/librdkafka/tests/0007-autotopic.c +136 -0
- package/deps/librdkafka/tests/0008-reqacks.c +179 -0
- package/deps/librdkafka/tests/0009-mock_cluster.c +97 -0
- package/deps/librdkafka/tests/0011-produce_batch.c +753 -0
- package/deps/librdkafka/tests/0012-produce_consume.c +537 -0
- package/deps/librdkafka/tests/0013-null-msgs.c +473 -0
- package/deps/librdkafka/tests/0014-reconsume-191.c +512 -0
- package/deps/librdkafka/tests/0015-offset_seeks.c +172 -0
- package/deps/librdkafka/tests/0016-client_swname.c +181 -0
- package/deps/librdkafka/tests/0017-compression.c +140 -0
- package/deps/librdkafka/tests/0018-cgrp_term.c +338 -0
- package/deps/librdkafka/tests/0019-list_groups.c +289 -0
- package/deps/librdkafka/tests/0020-destroy_hang.c +162 -0
- package/deps/librdkafka/tests/0021-rkt_destroy.c +72 -0
- package/deps/librdkafka/tests/0022-consume_batch.c +279 -0
- package/deps/librdkafka/tests/0025-timers.c +147 -0
- package/deps/librdkafka/tests/0026-consume_pause.c +547 -0
- package/deps/librdkafka/tests/0028-long_topicnames.c +79 -0
- package/deps/librdkafka/tests/0029-assign_offset.c +202 -0
- package/deps/librdkafka/tests/0030-offset_commit.c +589 -0
- package/deps/librdkafka/tests/0031-get_offsets.c +235 -0
- package/deps/librdkafka/tests/0033-regex_subscribe.c +536 -0
- package/deps/librdkafka/tests/0034-offset_reset.c +398 -0
- package/deps/librdkafka/tests/0035-api_version.c +73 -0
- package/deps/librdkafka/tests/0036-partial_fetch.c +87 -0
- package/deps/librdkafka/tests/0037-destroy_hang_local.c +85 -0
- package/deps/librdkafka/tests/0038-performance.c +121 -0
- package/deps/librdkafka/tests/0039-event.c +284 -0
- package/deps/librdkafka/tests/0040-io_event.c +257 -0
- package/deps/librdkafka/tests/0041-fetch_max_bytes.c +97 -0
- package/deps/librdkafka/tests/0042-many_topics.c +252 -0
- package/deps/librdkafka/tests/0043-no_connection.c +77 -0
- package/deps/librdkafka/tests/0044-partition_cnt.c +94 -0
- package/deps/librdkafka/tests/0045-subscribe_update.c +1010 -0
- package/deps/librdkafka/tests/0046-rkt_cache.c +65 -0
- package/deps/librdkafka/tests/0047-partial_buf_tmout.c +98 -0
- package/deps/librdkafka/tests/0048-partitioner.c +283 -0
- package/deps/librdkafka/tests/0049-consume_conn_close.c +162 -0
- package/deps/librdkafka/tests/0050-subscribe_adds.c +145 -0
- package/deps/librdkafka/tests/0051-assign_adds.c +126 -0
- package/deps/librdkafka/tests/0052-msg_timestamps.c +238 -0
- package/deps/librdkafka/tests/0053-stats_cb.cpp +527 -0
- package/deps/librdkafka/tests/0054-offset_time.cpp +236 -0
- package/deps/librdkafka/tests/0055-producer_latency.c +539 -0
- package/deps/librdkafka/tests/0056-balanced_group_mt.c +315 -0
- package/deps/librdkafka/tests/0057-invalid_topic.cpp +112 -0
- package/deps/librdkafka/tests/0058-log.cpp +123 -0
- package/deps/librdkafka/tests/0059-bsearch.cpp +241 -0
- package/deps/librdkafka/tests/0060-op_prio.cpp +163 -0
- package/deps/librdkafka/tests/0061-consumer_lag.cpp +295 -0
- package/deps/librdkafka/tests/0062-stats_event.c +126 -0
- package/deps/librdkafka/tests/0063-clusterid.cpp +180 -0
- package/deps/librdkafka/tests/0064-interceptors.c +481 -0
- package/deps/librdkafka/tests/0065-yield.cpp +140 -0
- package/deps/librdkafka/tests/0066-plugins.cpp +129 -0
- package/deps/librdkafka/tests/0067-empty_topic.cpp +151 -0
- package/deps/librdkafka/tests/0068-produce_timeout.c +136 -0
- package/deps/librdkafka/tests/0069-consumer_add_parts.c +119 -0
- package/deps/librdkafka/tests/0070-null_empty.cpp +197 -0
- package/deps/librdkafka/tests/0072-headers_ut.c +448 -0
- package/deps/librdkafka/tests/0073-headers.c +381 -0
- package/deps/librdkafka/tests/0074-producev.c +87 -0
- package/deps/librdkafka/tests/0075-retry.c +290 -0
- package/deps/librdkafka/tests/0076-produce_retry.c +452 -0
- package/deps/librdkafka/tests/0077-compaction.c +363 -0
- package/deps/librdkafka/tests/0078-c_from_cpp.cpp +96 -0
- package/deps/librdkafka/tests/0079-fork.c +93 -0
- package/deps/librdkafka/tests/0080-admin_ut.c +3095 -0
- package/deps/librdkafka/tests/0081-admin.c +5633 -0
- package/deps/librdkafka/tests/0082-fetch_max_bytes.cpp +137 -0
- package/deps/librdkafka/tests/0083-cb_event.c +233 -0
- package/deps/librdkafka/tests/0084-destroy_flags.c +208 -0
- package/deps/librdkafka/tests/0085-headers.cpp +392 -0
- package/deps/librdkafka/tests/0086-purge.c +368 -0
- package/deps/librdkafka/tests/0088-produce_metadata_timeout.c +162 -0
- package/deps/librdkafka/tests/0089-max_poll_interval.c +511 -0
- package/deps/librdkafka/tests/0090-idempotence.c +171 -0
- package/deps/librdkafka/tests/0091-max_poll_interval_timeout.c +295 -0
- package/deps/librdkafka/tests/0092-mixed_msgver.c +103 -0
- package/deps/librdkafka/tests/0093-holb.c +200 -0
- package/deps/librdkafka/tests/0094-idempotence_msg_timeout.c +231 -0
- package/deps/librdkafka/tests/0095-all_brokers_down.cpp +122 -0
- package/deps/librdkafka/tests/0097-ssl_verify.cpp +658 -0
- package/deps/librdkafka/tests/0098-consumer-txn.cpp +1218 -0
- package/deps/librdkafka/tests/0099-commit_metadata.c +194 -0
- package/deps/librdkafka/tests/0100-thread_interceptors.cpp +195 -0
- package/deps/librdkafka/tests/0101-fetch-from-follower.cpp +446 -0
- package/deps/librdkafka/tests/0102-static_group_rebalance.c +836 -0
- package/deps/librdkafka/tests/0103-transactions.c +1383 -0
- package/deps/librdkafka/tests/0104-fetch_from_follower_mock.c +625 -0
- package/deps/librdkafka/tests/0105-transactions_mock.c +3930 -0
- package/deps/librdkafka/tests/0106-cgrp_sess_timeout.c +318 -0
- package/deps/librdkafka/tests/0107-topic_recreate.c +259 -0
- package/deps/librdkafka/tests/0109-auto_create_topics.cpp +278 -0
- package/deps/librdkafka/tests/0110-batch_size.cpp +182 -0
- package/deps/librdkafka/tests/0111-delay_create_topics.cpp +127 -0
- package/deps/librdkafka/tests/0112-assign_unknown_part.c +87 -0
- package/deps/librdkafka/tests/0113-cooperative_rebalance.cpp +3473 -0
- package/deps/librdkafka/tests/0114-sticky_partitioning.cpp +176 -0
- package/deps/librdkafka/tests/0115-producer_auth.cpp +182 -0
- package/deps/librdkafka/tests/0116-kafkaconsumer_close.cpp +216 -0
- package/deps/librdkafka/tests/0117-mock_errors.c +331 -0
- package/deps/librdkafka/tests/0118-commit_rebalance.c +154 -0
- package/deps/librdkafka/tests/0119-consumer_auth.cpp +167 -0
- package/deps/librdkafka/tests/0120-asymmetric_subscription.c +185 -0
- package/deps/librdkafka/tests/0121-clusterid.c +115 -0
- package/deps/librdkafka/tests/0122-buffer_cleaning_after_rebalance.c +227 -0
- package/deps/librdkafka/tests/0123-connections_max_idle.c +98 -0
- package/deps/librdkafka/tests/0124-openssl_invalid_engine.c +69 -0
- package/deps/librdkafka/tests/0125-immediate_flush.c +144 -0
- package/deps/librdkafka/tests/0126-oauthbearer_oidc.c +528 -0
- package/deps/librdkafka/tests/0127-fetch_queue_backoff.cpp +165 -0
- package/deps/librdkafka/tests/0128-sasl_callback_queue.cpp +125 -0
- package/deps/librdkafka/tests/0129-fetch_aborted_msgs.c +79 -0
- package/deps/librdkafka/tests/0130-store_offsets.c +178 -0
- package/deps/librdkafka/tests/0131-connect_timeout.c +81 -0
- package/deps/librdkafka/tests/0132-strategy_ordering.c +179 -0
- package/deps/librdkafka/tests/0133-ssl_keys.c +150 -0
- package/deps/librdkafka/tests/0134-ssl_provider.c +92 -0
- package/deps/librdkafka/tests/0135-sasl_credentials.cpp +143 -0
- package/deps/librdkafka/tests/0136-resolve_cb.c +181 -0
- package/deps/librdkafka/tests/0137-barrier_batch_consume.c +619 -0
- package/deps/librdkafka/tests/0138-admin_mock.c +281 -0
- package/deps/librdkafka/tests/0139-offset_validation_mock.c +950 -0
- package/deps/librdkafka/tests/0140-commit_metadata.cpp +108 -0
- package/deps/librdkafka/tests/0142-reauthentication.c +515 -0
- package/deps/librdkafka/tests/0143-exponential_backoff_mock.c +552 -0
- package/deps/librdkafka/tests/0144-idempotence_mock.c +373 -0
- package/deps/librdkafka/tests/0145-pause_resume_mock.c +119 -0
- package/deps/librdkafka/tests/0146-metadata_mock.c +505 -0
- package/deps/librdkafka/tests/0147-consumer_group_consumer_mock.c +952 -0
- package/deps/librdkafka/tests/0148-offset_fetch_commit_error_mock.c +563 -0
- package/deps/librdkafka/tests/0149-broker-same-host-port.c +140 -0
- package/deps/librdkafka/tests/0150-telemetry_mock.c +651 -0
- package/deps/librdkafka/tests/0151-purge-brokers.c +566 -0
- package/deps/librdkafka/tests/0152-rebootstrap.c +59 -0
- package/deps/librdkafka/tests/0153-memberid.c +128 -0
- package/deps/librdkafka/tests/1000-unktopic.c +164 -0
- package/deps/librdkafka/tests/8000-idle.cpp +60 -0
- package/deps/librdkafka/tests/8001-fetch_from_follower_mock_manual.c +113 -0
- package/deps/librdkafka/tests/CMakeLists.txt +170 -0
- package/deps/librdkafka/tests/LibrdkafkaTestApp.py +291 -0
- package/deps/librdkafka/tests/Makefile +182 -0
- package/deps/librdkafka/tests/README.md +509 -0
- package/deps/librdkafka/tests/autotest.sh +33 -0
- package/deps/librdkafka/tests/backtrace.gdb +30 -0
- package/deps/librdkafka/tests/broker_version_tests.py +315 -0
- package/deps/librdkafka/tests/buildbox.sh +17 -0
- package/deps/librdkafka/tests/cleanup-checker-tests.sh +20 -0
- package/deps/librdkafka/tests/cluster_testing.py +191 -0
- package/deps/librdkafka/tests/delete-test-topics.sh +56 -0
- package/deps/librdkafka/tests/fixtures/oauthbearer/jwt_assertion_template.json +10 -0
- package/deps/librdkafka/tests/fixtures/ssl/Makefile +8 -0
- package/deps/librdkafka/tests/fixtures/ssl/README.md +13 -0
- package/deps/librdkafka/tests/fixtures/ssl/client.keystore.intermediate.p12 +0 -0
- package/deps/librdkafka/tests/fixtures/ssl/client.keystore.p12 +0 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.intermediate.pem +72 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.pem +50 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.intermediate.key +46 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.key +46 -0
- package/deps/librdkafka/tests/fixtures/ssl/create_keys.sh +168 -0
- package/deps/librdkafka/tests/fuzzers/Makefile +12 -0
- package/deps/librdkafka/tests/fuzzers/README.md +31 -0
- package/deps/librdkafka/tests/fuzzers/fuzz_regex.c +74 -0
- package/deps/librdkafka/tests/fuzzers/helpers.h +90 -0
- package/deps/librdkafka/tests/gen-ssl-certs.sh +165 -0
- package/deps/librdkafka/tests/interactive_broker_version.py +170 -0
- package/deps/librdkafka/tests/interceptor_test/CMakeLists.txt +16 -0
- package/deps/librdkafka/tests/interceptor_test/Makefile +22 -0
- package/deps/librdkafka/tests/interceptor_test/interceptor_test.c +314 -0
- package/deps/librdkafka/tests/interceptor_test/interceptor_test.h +54 -0
- package/deps/librdkafka/tests/java/IncrementalRebalanceCli.java +97 -0
- package/deps/librdkafka/tests/java/Makefile +13 -0
- package/deps/librdkafka/tests/java/Murmur2Cli.java +46 -0
- package/deps/librdkafka/tests/java/README.md +14 -0
- package/deps/librdkafka/tests/java/TransactionProducerCli.java +162 -0
- package/deps/librdkafka/tests/java/run-class.sh +11 -0
- package/deps/librdkafka/tests/librdkafka.suppressions +483 -0
- package/deps/librdkafka/tests/lz4_manual_test.sh +59 -0
- package/deps/librdkafka/tests/multi-broker-version-test.sh +50 -0
- package/deps/librdkafka/tests/parse-refcnt.sh +43 -0
- package/deps/librdkafka/tests/performance_plot.py +115 -0
- package/deps/librdkafka/tests/plugin_test/Makefile +19 -0
- package/deps/librdkafka/tests/plugin_test/plugin_test.c +58 -0
- package/deps/librdkafka/tests/requirements.txt +2 -0
- package/deps/librdkafka/tests/run-all-tests.sh +79 -0
- package/deps/librdkafka/tests/run-consumer-tests.sh +16 -0
- package/deps/librdkafka/tests/run-producer-tests.sh +16 -0
- package/deps/librdkafka/tests/run-test-batches.py +157 -0
- package/deps/librdkafka/tests/run-test.sh +140 -0
- package/deps/librdkafka/tests/rusage.c +249 -0
- package/deps/librdkafka/tests/sasl_test.py +289 -0
- package/deps/librdkafka/tests/scenarios/README.md +6 -0
- package/deps/librdkafka/tests/scenarios/ak23.json +6 -0
- package/deps/librdkafka/tests/scenarios/default.json +5 -0
- package/deps/librdkafka/tests/scenarios/noautocreate.json +5 -0
- package/deps/librdkafka/tests/sockem.c +801 -0
- package/deps/librdkafka/tests/sockem.h +85 -0
- package/deps/librdkafka/tests/sockem_ctrl.c +145 -0
- package/deps/librdkafka/tests/sockem_ctrl.h +61 -0
- package/deps/librdkafka/tests/test.c +7778 -0
- package/deps/librdkafka/tests/test.conf.example +27 -0
- package/deps/librdkafka/tests/test.h +1028 -0
- package/deps/librdkafka/tests/testcpp.cpp +131 -0
- package/deps/librdkafka/tests/testcpp.h +388 -0
- package/deps/librdkafka/tests/testshared.h +416 -0
- package/deps/librdkafka/tests/tools/README.md +4 -0
- package/deps/librdkafka/tests/tools/stats/README.md +21 -0
- package/deps/librdkafka/tests/tools/stats/filter.jq +42 -0
- package/deps/librdkafka/tests/tools/stats/graph.py +150 -0
- package/deps/librdkafka/tests/tools/stats/requirements.txt +3 -0
- package/deps/librdkafka/tests/tools/stats/to_csv.py +124 -0
- package/deps/librdkafka/tests/trivup/trivup-0.14.0.tar.gz +0 -0
- package/deps/librdkafka/tests/until-fail.sh +87 -0
- package/deps/librdkafka/tests/xxxx-assign_partition.c +122 -0
- package/deps/librdkafka/tests/xxxx-metadata.cpp +159 -0
- package/deps/librdkafka/vcpkg.json +23 -0
- package/deps/librdkafka/win32/README.md +5 -0
- package/deps/librdkafka/win32/build-package.bat +3 -0
- package/deps/librdkafka/win32/build.bat +19 -0
- package/deps/librdkafka/win32/common.vcxproj +84 -0
- package/deps/librdkafka/win32/interceptor_test/interceptor_test.vcxproj +87 -0
- package/deps/librdkafka/win32/librdkafka.autopkg.template +54 -0
- package/deps/librdkafka/win32/librdkafka.master.testing.targets +13 -0
- package/deps/librdkafka/win32/librdkafka.sln +226 -0
- package/deps/librdkafka/win32/librdkafka.vcxproj +276 -0
- package/deps/librdkafka/win32/librdkafkacpp/librdkafkacpp.vcxproj +104 -0
- package/deps/librdkafka/win32/msbuild.ps1 +15 -0
- package/deps/librdkafka/win32/openssl_engine_example/openssl_engine_example.vcxproj +132 -0
- package/deps/librdkafka/win32/package-zip.ps1 +46 -0
- package/deps/librdkafka/win32/packages/repositories.config +4 -0
- package/deps/librdkafka/win32/push-package.bat +4 -0
- package/deps/librdkafka/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj +67 -0
- package/deps/librdkafka/win32/rdkafka_example/rdkafka_example.vcxproj +97 -0
- package/deps/librdkafka/win32/rdkafka_performance/rdkafka_performance.vcxproj +97 -0
- package/deps/librdkafka/win32/setup-msys2.ps1 +47 -0
- package/deps/librdkafka/win32/setup-vcpkg.ps1 +34 -0
- package/deps/librdkafka/win32/tests/test.conf.example +25 -0
- package/deps/librdkafka/win32/tests/tests.vcxproj +253 -0
- package/deps/librdkafka/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj +132 -0
- package/deps/librdkafka/win32/wingetopt.c +564 -0
- package/deps/librdkafka/win32/wingetopt.h +101 -0
- package/deps/librdkafka/win32/wintime.h +33 -0
- package/deps/librdkafka.gyp +62 -0
- package/lib/admin.js +233 -0
- package/lib/client.js +573 -0
- package/lib/error.js +500 -0
- package/lib/index.js +34 -0
- package/lib/kafka-consumer-stream.js +397 -0
- package/lib/kafka-consumer.js +698 -0
- package/lib/producer/high-level-producer.js +323 -0
- package/lib/producer-stream.js +307 -0
- package/lib/producer.js +375 -0
- package/lib/tools/ref-counter.js +52 -0
- package/lib/topic-partition.js +88 -0
- package/lib/topic.js +42 -0
- package/lib/util.js +29 -0
- package/package.json +61 -0
- package/prebuilds/darwin-arm64/@point3+node-rdkafka.node +0 -0
- package/prebuilds/linux-x64/@point3+node-rdkafka.node +0 -0
- package/util/configure.js +30 -0
- package/util/get-env.js +6 -0
- package/util/test-compile.js +11 -0
- package/util/test-producer-delivery.js +100 -0
|
@@ -0,0 +1,3473 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* librdkafka - Apache Kafka C library
|
|
3
|
+
*
|
|
4
|
+
* Copyright (c) 2020-2022, Magnus Edenhill
|
|
5
|
+
* 2023, Confluent Inc.
|
|
6
|
+
* All rights reserved.
|
|
7
|
+
*
|
|
8
|
+
* Redistribution and use in source and binary forms, with or without
|
|
9
|
+
* modification, are permitted provided that the following conditions are met:
|
|
10
|
+
*
|
|
11
|
+
* 1. Redistributions of source code must retain the above copyright notice,
|
|
12
|
+
* this list of conditions and the following disclaimer.
|
|
13
|
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
14
|
+
* this list of conditions and the following disclaimer in the documentation
|
|
15
|
+
* and/or other materials provided with the distribution.
|
|
16
|
+
*
|
|
17
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
18
|
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
19
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
20
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
21
|
+
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
22
|
+
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
23
|
+
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
24
|
+
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
25
|
+
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
26
|
+
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
27
|
+
* POSSIBILITY OF SUCH DAMAGE.
|
|
28
|
+
*/
|
|
29
|
+
|
|
30
|
+
#include <iostream>
|
|
31
|
+
#include <map>
|
|
32
|
+
#include <set>
|
|
33
|
+
#include <algorithm>
|
|
34
|
+
#include <cstring>
|
|
35
|
+
#include <cstdlib>
|
|
36
|
+
#include <assert.h>
|
|
37
|
+
#include "testcpp.h"
|
|
38
|
+
#include <fstream>
|
|
39
|
+
extern "C" {
|
|
40
|
+
#include "../src/rdkafka_protocol.h"
|
|
41
|
+
#include "test.h"
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
using namespace std;
|
|
45
|
+
|
|
46
|
+
/** Topic+Partition helper class */
|
|
47
|
+
class Toppar {
|
|
48
|
+
public:
|
|
49
|
+
Toppar(const string &topic, int32_t partition) :
|
|
50
|
+
topic(topic), partition(partition) {
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
Toppar(const RdKafka::TopicPartition *tp) :
|
|
54
|
+
topic(tp->topic()), partition(tp->partition()) {
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
friend bool operator==(const Toppar &a, const Toppar &b) {
|
|
58
|
+
return a.partition == b.partition && a.topic == b.topic;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
friend bool operator<(const Toppar &a, const Toppar &b) {
|
|
62
|
+
if (a.topic < b.topic)
|
|
63
|
+
return true;
|
|
64
|
+
if (a.topic > b.topic)
|
|
65
|
+
return false;
|
|
66
|
+
return a.partition < b.partition;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
string str() const {
|
|
70
|
+
return tostr() << topic << "[" << partition << "]";
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
std::string topic;
|
|
74
|
+
int32_t partition;
|
|
75
|
+
};
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
static std::string get_bootstrap_servers() {
|
|
80
|
+
RdKafka::Conf *conf;
|
|
81
|
+
std::string bootstrap_servers;
|
|
82
|
+
Test::conf_init(&conf, NULL, 0);
|
|
83
|
+
conf->get("bootstrap.servers", bootstrap_servers);
|
|
84
|
+
delete conf;
|
|
85
|
+
return bootstrap_servers;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class DrCb : public RdKafka::DeliveryReportCb {
|
|
90
|
+
public:
|
|
91
|
+
void dr_cb(RdKafka::Message &msg) {
|
|
92
|
+
if (msg.err())
|
|
93
|
+
Test::Fail("Delivery failed: " + RdKafka::err2str(msg.err()));
|
|
94
|
+
}
|
|
95
|
+
};
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
/**
|
|
99
|
+
* @brief Produce messages to partitions.
|
|
100
|
+
*
|
|
101
|
+
* The pair is Toppar,msg_cnt_per_partition.
|
|
102
|
+
* The Toppar is topic,partition_cnt.
|
|
103
|
+
*/
|
|
104
|
+
static void produce_msgs(vector<pair<Toppar, int> > partitions) {
|
|
105
|
+
RdKafka::Conf *conf;
|
|
106
|
+
Test::conf_init(&conf, NULL, 0);
|
|
107
|
+
|
|
108
|
+
string errstr;
|
|
109
|
+
DrCb dr;
|
|
110
|
+
conf->set("dr_cb", &dr, errstr);
|
|
111
|
+
RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
|
|
112
|
+
if (!p)
|
|
113
|
+
Test::Fail("Failed to create producer: " + errstr);
|
|
114
|
+
delete conf;
|
|
115
|
+
|
|
116
|
+
for (vector<pair<Toppar, int> >::iterator it = partitions.begin();
|
|
117
|
+
it != partitions.end(); it++) {
|
|
118
|
+
for (int part = 0; part < it->first.partition; part++) {
|
|
119
|
+
for (int i = 0; i < it->second; i++) {
|
|
120
|
+
RdKafka::ErrorCode err =
|
|
121
|
+
p->produce(it->first.topic, part, RdKafka::Producer::RK_MSG_COPY,
|
|
122
|
+
(void *)"Hello there", 11, NULL, 0, 0, NULL);
|
|
123
|
+
TEST_ASSERT(!err, "produce(%s, %d) failed: %s", it->first.topic.c_str(),
|
|
124
|
+
part, RdKafka::err2str(err).c_str());
|
|
125
|
+
|
|
126
|
+
p->poll(0);
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
p->flush(10000);
|
|
132
|
+
|
|
133
|
+
delete p;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
static RdKafka::KafkaConsumer *make_consumer(
|
|
139
|
+
string client_id,
|
|
140
|
+
string group_id,
|
|
141
|
+
string assignment_strategy,
|
|
142
|
+
vector<pair<string, string> > *additional_conf,
|
|
143
|
+
RdKafka::RebalanceCb *rebalance_cb,
|
|
144
|
+
int timeout_s) {
|
|
145
|
+
std::string bootstraps;
|
|
146
|
+
std::string errstr;
|
|
147
|
+
std::vector<std::pair<std::string, std::string> >::iterator itr;
|
|
148
|
+
|
|
149
|
+
RdKafka::Conf *conf;
|
|
150
|
+
Test::conf_init(&conf, NULL, timeout_s);
|
|
151
|
+
Test::conf_set(conf, "client.id", client_id);
|
|
152
|
+
Test::conf_set(conf, "group.id", group_id);
|
|
153
|
+
Test::conf_set(conf, "auto.offset.reset", "earliest");
|
|
154
|
+
Test::conf_set(conf, "enable.auto.commit", "false");
|
|
155
|
+
Test::conf_set(conf, "partition.assignment.strategy", assignment_strategy);
|
|
156
|
+
|
|
157
|
+
if (test_consumer_group_protocol()) {
|
|
158
|
+
Test::conf_set(conf, "group.protocol", test_consumer_group_protocol());
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if (additional_conf != NULL) {
|
|
162
|
+
for (itr = (*additional_conf).begin(); itr != (*additional_conf).end();
|
|
163
|
+
itr++)
|
|
164
|
+
Test::conf_set(conf, itr->first, itr->second);
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
if (rebalance_cb) {
|
|
168
|
+
if (conf->set("rebalance_cb", rebalance_cb, errstr))
|
|
169
|
+
Test::Fail("Failed to set rebalance_cb: " + errstr);
|
|
170
|
+
}
|
|
171
|
+
RdKafka::KafkaConsumer *consumer =
|
|
172
|
+
RdKafka::KafkaConsumer::create(conf, errstr);
|
|
173
|
+
if (!consumer)
|
|
174
|
+
Test::Fail("Failed to create KafkaConsumer: " + errstr);
|
|
175
|
+
delete conf;
|
|
176
|
+
|
|
177
|
+
return consumer;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* @returns a CSV string of the vector
|
|
182
|
+
*/
|
|
183
|
+
static string string_vec_to_str(const vector<string> &v) {
|
|
184
|
+
ostringstream ss;
|
|
185
|
+
for (vector<string>::const_iterator it = v.begin(); it != v.end(); it++)
|
|
186
|
+
ss << (it == v.begin() ? "" : ", ") << *it;
|
|
187
|
+
return ss.str();
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
void expect_assignment(RdKafka::KafkaConsumer *consumer, size_t count) {
|
|
191
|
+
std::vector<RdKafka::TopicPartition *> partitions;
|
|
192
|
+
RdKafka::ErrorCode err;
|
|
193
|
+
err = consumer->assignment(partitions);
|
|
194
|
+
if (err)
|
|
195
|
+
Test::Fail(consumer->name() +
|
|
196
|
+
" assignment() failed: " + RdKafka::err2str(err));
|
|
197
|
+
if (partitions.size() != count)
|
|
198
|
+
Test::Fail(tostr() << "Expecting consumer " << consumer->name()
|
|
199
|
+
<< " to have " << count
|
|
200
|
+
<< " assigned partition(s), not: " << partitions.size());
|
|
201
|
+
RdKafka::TopicPartition::destroy(partitions);
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
static bool TopicPartition_cmp(const RdKafka::TopicPartition *a,
|
|
206
|
+
const RdKafka::TopicPartition *b) {
|
|
207
|
+
if (a->topic() < b->topic())
|
|
208
|
+
return true;
|
|
209
|
+
else if (a->topic() > b->topic())
|
|
210
|
+
return false;
|
|
211
|
+
return a->partition() < b->partition();
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
void expect_assignment(RdKafka::KafkaConsumer *consumer,
|
|
216
|
+
vector<RdKafka::TopicPartition *> &expected) {
|
|
217
|
+
vector<RdKafka::TopicPartition *> partitions;
|
|
218
|
+
RdKafka::ErrorCode err;
|
|
219
|
+
err = consumer->assignment(partitions);
|
|
220
|
+
if (err)
|
|
221
|
+
Test::Fail(consumer->name() +
|
|
222
|
+
" assignment() failed: " + RdKafka::err2str(err));
|
|
223
|
+
|
|
224
|
+
if (partitions.size() != expected.size())
|
|
225
|
+
Test::Fail(tostr() << "Expecting consumer " << consumer->name()
|
|
226
|
+
<< " to have " << expected.size()
|
|
227
|
+
<< " assigned partition(s), not " << partitions.size());
|
|
228
|
+
|
|
229
|
+
sort(partitions.begin(), partitions.end(), TopicPartition_cmp);
|
|
230
|
+
sort(expected.begin(), expected.end(), TopicPartition_cmp);
|
|
231
|
+
|
|
232
|
+
int fails = 0;
|
|
233
|
+
for (int i = 0; i < (int)partitions.size(); i++) {
|
|
234
|
+
if (!TopicPartition_cmp(partitions[i], expected[i]))
|
|
235
|
+
continue;
|
|
236
|
+
|
|
237
|
+
Test::Say(tostr() << _C_RED << consumer->name() << ": expected assignment #"
|
|
238
|
+
<< i << " " << expected[i]->topic() << " ["
|
|
239
|
+
<< expected[i]->partition() << "], not "
|
|
240
|
+
<< partitions[i]->topic() << " ["
|
|
241
|
+
<< partitions[i]->partition() << "]\n");
|
|
242
|
+
fails++;
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
if (fails)
|
|
246
|
+
Test::Fail(consumer->name() + ": Expected assignment mismatch, see above");
|
|
247
|
+
|
|
248
|
+
RdKafka::TopicPartition::destroy(partitions);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class DefaultRebalanceCb : public RdKafka::RebalanceCb {
|
|
253
|
+
private:
|
|
254
|
+
static string part_list_print(
|
|
255
|
+
const vector<RdKafka::TopicPartition *> &partitions) {
|
|
256
|
+
ostringstream ss;
|
|
257
|
+
for (unsigned int i = 0; i < partitions.size(); i++)
|
|
258
|
+
ss << (i == 0 ? "" : ", ") << partitions[i]->topic() << " ["
|
|
259
|
+
<< partitions[i]->partition() << "]";
|
|
260
|
+
return ss.str();
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
public:
|
|
264
|
+
int assign_call_cnt;
|
|
265
|
+
int revoke_call_cnt;
|
|
266
|
+
int nonempty_assign_call_cnt; /**< ASSIGN_PARTITIONS with partitions */
|
|
267
|
+
int lost_call_cnt;
|
|
268
|
+
int partitions_assigned_net;
|
|
269
|
+
bool wait_rebalance;
|
|
270
|
+
int64_t ts_last_assign; /**< Timestamp of last rebalance assignment */
|
|
271
|
+
map<Toppar, int> msg_cnt; /**< Number of consumed messages per partition. */
|
|
272
|
+
|
|
273
|
+
~DefaultRebalanceCb() {
|
|
274
|
+
reset_msg_cnt();
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
DefaultRebalanceCb() :
|
|
278
|
+
assign_call_cnt(0),
|
|
279
|
+
revoke_call_cnt(0),
|
|
280
|
+
nonempty_assign_call_cnt(0),
|
|
281
|
+
lost_call_cnt(0),
|
|
282
|
+
partitions_assigned_net(0),
|
|
283
|
+
wait_rebalance(false),
|
|
284
|
+
ts_last_assign(0) {
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
void rebalance_cb(RdKafka::KafkaConsumer *consumer,
|
|
289
|
+
RdKafka::ErrorCode err,
|
|
290
|
+
std::vector<RdKafka::TopicPartition *> &partitions) {
|
|
291
|
+
wait_rebalance = false;
|
|
292
|
+
|
|
293
|
+
std::string protocol = consumer->rebalance_protocol();
|
|
294
|
+
|
|
295
|
+
if (protocol != "") {
|
|
296
|
+
/* Consumer hasn't been closed */
|
|
297
|
+
TEST_ASSERT(protocol == "COOPERATIVE",
|
|
298
|
+
"%s: Expected rebalance_protocol \"COOPERATIVE\", not %s",
|
|
299
|
+
consumer->name().c_str(), protocol.c_str());
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
const char *lost_str = consumer->assignment_lost() ? " (LOST)" : "";
|
|
303
|
+
Test::Say(tostr() << _C_YEL "RebalanceCb " << protocol << ": "
|
|
304
|
+
<< consumer->name() << " " << RdKafka::err2str(err)
|
|
305
|
+
<< lost_str << ": " << part_list_print(partitions)
|
|
306
|
+
<< "\n");
|
|
307
|
+
|
|
308
|
+
if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
|
|
309
|
+
if (consumer->assignment_lost())
|
|
310
|
+
Test::Fail("unexpected lost assignment during ASSIGN rebalance");
|
|
311
|
+
RdKafka::Error *error = consumer->incremental_assign(partitions);
|
|
312
|
+
if (error)
|
|
313
|
+
Test::Fail(tostr() << "consumer->incremental_assign() failed: "
|
|
314
|
+
<< error->str());
|
|
315
|
+
if (partitions.size() > 0)
|
|
316
|
+
nonempty_assign_call_cnt++;
|
|
317
|
+
assign_call_cnt += 1;
|
|
318
|
+
partitions_assigned_net += (int)partitions.size();
|
|
319
|
+
ts_last_assign = test_clock();
|
|
320
|
+
|
|
321
|
+
} else {
|
|
322
|
+
if (consumer->assignment_lost())
|
|
323
|
+
lost_call_cnt += 1;
|
|
324
|
+
RdKafka::Error *error = consumer->incremental_unassign(partitions);
|
|
325
|
+
if (error)
|
|
326
|
+
Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
|
|
327
|
+
<< error->str());
|
|
328
|
+
if (partitions.size() == 0)
|
|
329
|
+
Test::Fail("revoked partitions size should never be 0");
|
|
330
|
+
revoke_call_cnt += 1;
|
|
331
|
+
partitions_assigned_net -= (int)partitions.size();
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
/* Reset message counters for the given partitions. */
|
|
335
|
+
Test::Say(consumer->name() + ": resetting message counters:\n");
|
|
336
|
+
reset_msg_cnt(partitions);
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) {
|
|
340
|
+
RdKafka::Message *msg = c->consume(timeout_ms);
|
|
341
|
+
bool ret = msg->err() != RdKafka::ERR__TIMED_OUT;
|
|
342
|
+
if (!msg->err())
|
|
343
|
+
msg_cnt[Toppar(msg->topic_name(), msg->partition())]++;
|
|
344
|
+
delete msg;
|
|
345
|
+
return ret;
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
void reset_msg_cnt() {
|
|
349
|
+
msg_cnt.clear();
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
void reset_msg_cnt(Toppar &tp) {
|
|
353
|
+
int msgcnt = get_msg_cnt(tp);
|
|
354
|
+
Test::Say(tostr() << " RESET " << tp.topic << " [" << tp.partition << "]"
|
|
355
|
+
<< " with " << msgcnt << " messages\n");
|
|
356
|
+
if (!msg_cnt.erase(tp) && msgcnt)
|
|
357
|
+
Test::Fail("erase failed!");
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
void reset_msg_cnt(const vector<RdKafka::TopicPartition *> &partitions) {
|
|
361
|
+
for (unsigned int i = 0; i < partitions.size(); i++) {
|
|
362
|
+
Toppar tp(partitions[i]->topic(), partitions[i]->partition());
|
|
363
|
+
reset_msg_cnt(tp);
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
int get_msg_cnt(const Toppar &tp) {
|
|
368
|
+
map<Toppar, int>::iterator it = msg_cnt.find(tp);
|
|
369
|
+
if (it == msg_cnt.end())
|
|
370
|
+
return 0;
|
|
371
|
+
return it->second;
|
|
372
|
+
}
|
|
373
|
+
};
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
/**
|
|
378
|
+
* @brief Verify that the consumer's assignment is a subset of the
|
|
379
|
+
* subscribed topics.
|
|
380
|
+
*
|
|
381
|
+
* @param allow_mismatch Allow assignment of not subscribed topics.
|
|
382
|
+
* This can happen when the subscription is updated
|
|
383
|
+
* but a rebalance callback hasn't been seen yet.
|
|
384
|
+
* @param all_assignments Accumulated assignments for all consumers.
|
|
385
|
+
* If an assigned partition already exists it means
|
|
386
|
+
* the partition is assigned to multiple consumers and
|
|
387
|
+
* the test will fail.
|
|
388
|
+
* @param exp_msg_cnt Expected message count per assigned partition, or -1
|
|
389
|
+
* if not to check.
|
|
390
|
+
*
|
|
391
|
+
* @returns the number of assigned partitions, or fails if the
|
|
392
|
+
* assignment is empty or there is an assignment for
|
|
393
|
+
* topic that is not subscribed.
|
|
394
|
+
*/
|
|
395
|
+
static int verify_consumer_assignment(
|
|
396
|
+
RdKafka::KafkaConsumer *consumer,
|
|
397
|
+
DefaultRebalanceCb &rebalance_cb,
|
|
398
|
+
const vector<string> &topics,
|
|
399
|
+
bool allow_empty,
|
|
400
|
+
bool allow_mismatch,
|
|
401
|
+
map<Toppar, RdKafka::KafkaConsumer *> *all_assignments,
|
|
402
|
+
int exp_msg_cnt) {
|
|
403
|
+
vector<RdKafka::TopicPartition *> partitions;
|
|
404
|
+
RdKafka::ErrorCode err;
|
|
405
|
+
int fails = 0;
|
|
406
|
+
int count;
|
|
407
|
+
ostringstream ss;
|
|
408
|
+
|
|
409
|
+
err = consumer->assignment(partitions);
|
|
410
|
+
TEST_ASSERT(!err, "Failed to get assignment for consumer %s: %s",
|
|
411
|
+
consumer->name().c_str(), RdKafka::err2str(err).c_str());
|
|
412
|
+
|
|
413
|
+
count = (int)partitions.size();
|
|
414
|
+
|
|
415
|
+
for (vector<RdKafka::TopicPartition *>::iterator it = partitions.begin();
|
|
416
|
+
it != partitions.end(); it++) {
|
|
417
|
+
RdKafka::TopicPartition *p = *it;
|
|
418
|
+
|
|
419
|
+
if (find(topics.begin(), topics.end(), p->topic()) == topics.end()) {
|
|
420
|
+
Test::Say(tostr() << (allow_mismatch ? _C_YEL "Warning (allowed)"
|
|
421
|
+
: _C_RED "Error")
|
|
422
|
+
<< ": " << consumer->name() << " is assigned "
|
|
423
|
+
<< p->topic() << " [" << p->partition() << "] which is "
|
|
424
|
+
<< "not in the list of subscribed topics: "
|
|
425
|
+
<< string_vec_to_str(topics) << "\n");
|
|
426
|
+
if (!allow_mismatch)
|
|
427
|
+
fails++;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
Toppar tp(p);
|
|
431
|
+
pair<map<Toppar, RdKafka::KafkaConsumer *>::iterator, bool> ret;
|
|
432
|
+
ret = all_assignments->insert(
|
|
433
|
+
pair<Toppar, RdKafka::KafkaConsumer *>(tp, consumer));
|
|
434
|
+
if (!ret.second) {
|
|
435
|
+
Test::Say(tostr() << _C_RED << "Error: " << consumer->name()
|
|
436
|
+
<< " is assigned " << p->topic() << " ["
|
|
437
|
+
<< p->partition()
|
|
438
|
+
<< "] which is "
|
|
439
|
+
"already assigned to consumer "
|
|
440
|
+
<< ret.first->second->name() << "\n");
|
|
441
|
+
fails++;
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
int msg_cnt = rebalance_cb.get_msg_cnt(tp);
|
|
446
|
+
|
|
447
|
+
if (exp_msg_cnt != -1 && msg_cnt != exp_msg_cnt) {
|
|
448
|
+
Test::Say(tostr() << _C_RED << "Error: " << consumer->name()
|
|
449
|
+
<< " expected " << exp_msg_cnt << " messages on "
|
|
450
|
+
<< p->topic() << " [" << p->partition() << "], not "
|
|
451
|
+
<< msg_cnt << "\n");
|
|
452
|
+
fails++;
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
ss << (it == partitions.begin() ? "" : ", ") << p->topic() << " ["
|
|
456
|
+
<< p->partition() << "] (" << msg_cnt << "msgs)";
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
RdKafka::TopicPartition::destroy(partitions);
|
|
460
|
+
|
|
461
|
+
Test::Say(tostr() << "Consumer " << consumer->name() << " assignment ("
|
|
462
|
+
<< count << "): " << ss.str() << "\n");
|
|
463
|
+
|
|
464
|
+
if (count == 0 && !allow_empty)
|
|
465
|
+
Test::Fail("Consumer " + consumer->name() +
|
|
466
|
+
" has unexpected empty assignment");
|
|
467
|
+
|
|
468
|
+
if (fails)
|
|
469
|
+
Test::Fail(
|
|
470
|
+
tostr() << "Consumer " + consumer->name()
|
|
471
|
+
<< " assignment verification failed (see previous error)");
|
|
472
|
+
|
|
473
|
+
return count;
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
/* -------- a_assign_tests
|
|
479
|
+
*
|
|
480
|
+
* check behavior incremental assign / unassign outside the context of a
|
|
481
|
+
* rebalance.
|
|
482
|
+
*/
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
/** Incremental assign, then assign(NULL).
|
|
486
|
+
*/
|
|
487
|
+
static void assign_test_1(RdKafka::KafkaConsumer *consumer,
|
|
488
|
+
std::vector<RdKafka::TopicPartition *> toppars1,
|
|
489
|
+
std::vector<RdKafka::TopicPartition *> toppars2) {
|
|
490
|
+
RdKafka::ErrorCode err;
|
|
491
|
+
RdKafka::Error *error;
|
|
492
|
+
|
|
493
|
+
Test::Say("Incremental assign, then assign(NULL)\n");
|
|
494
|
+
|
|
495
|
+
if ((error = consumer->incremental_assign(toppars1)))
|
|
496
|
+
Test::Fail(tostr() << "Incremental assign failed: " << error->str());
|
|
497
|
+
Test::check_assignment(consumer, 1, &toppars1[0]->topic());
|
|
498
|
+
|
|
499
|
+
if ((err = consumer->unassign()))
|
|
500
|
+
Test::Fail("Unassign failed: " + RdKafka::err2str(err));
|
|
501
|
+
Test::check_assignment(consumer, 0, NULL);
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
/** Assign, then incremental unassign.
|
|
506
|
+
*/
|
|
507
|
+
static void assign_test_2(RdKafka::KafkaConsumer *consumer,
|
|
508
|
+
std::vector<RdKafka::TopicPartition *> toppars1,
|
|
509
|
+
std::vector<RdKafka::TopicPartition *> toppars2) {
|
|
510
|
+
RdKafka::ErrorCode err;
|
|
511
|
+
RdKafka::Error *error;
|
|
512
|
+
|
|
513
|
+
Test::Say("Assign, then incremental unassign\n");
|
|
514
|
+
|
|
515
|
+
if ((err = consumer->assign(toppars1)))
|
|
516
|
+
Test::Fail("Assign failed: " + RdKafka::err2str(err));
|
|
517
|
+
Test::check_assignment(consumer, 1, &toppars1[0]->topic());
|
|
518
|
+
|
|
519
|
+
if ((error = consumer->incremental_unassign(toppars1)))
|
|
520
|
+
Test::Fail("Incremental unassign failed: " + error->str());
|
|
521
|
+
Test::check_assignment(consumer, 0, NULL);
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
/** Incremental assign, then incremental unassign.
|
|
526
|
+
*/
|
|
527
|
+
static void assign_test_3(RdKafka::KafkaConsumer *consumer,
|
|
528
|
+
std::vector<RdKafka::TopicPartition *> toppars1,
|
|
529
|
+
std::vector<RdKafka::TopicPartition *> toppars2) {
|
|
530
|
+
RdKafka::Error *error;
|
|
531
|
+
|
|
532
|
+
Test::Say("Incremental assign, then incremental unassign\n");
|
|
533
|
+
|
|
534
|
+
if ((error = consumer->incremental_assign(toppars1)))
|
|
535
|
+
Test::Fail("Incremental assign failed: " + error->str());
|
|
536
|
+
Test::check_assignment(consumer, 1, &toppars1[0]->topic());
|
|
537
|
+
|
|
538
|
+
if ((error = consumer->incremental_unassign(toppars1)))
|
|
539
|
+
Test::Fail("Incremental unassign failed: " + error->str());
|
|
540
|
+
Test::check_assignment(consumer, 0, NULL);
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
|
|
544
|
+
/** Multi-topic incremental assign and unassign + message consumption.
|
|
545
|
+
*/
|
|
546
|
+
static void assign_test_4(RdKafka::KafkaConsumer *consumer,
|
|
547
|
+
std::vector<RdKafka::TopicPartition *> toppars1,
|
|
548
|
+
std::vector<RdKafka::TopicPartition *> toppars2) {
|
|
549
|
+
RdKafka::Error *error;
|
|
550
|
+
|
|
551
|
+
Test::Say(
|
|
552
|
+
"Multi-topic incremental assign and unassign + message consumption\n");
|
|
553
|
+
|
|
554
|
+
if ((error = consumer->incremental_assign(toppars1)))
|
|
555
|
+
Test::Fail("Incremental assign failed: " + error->str());
|
|
556
|
+
Test::check_assignment(consumer, 1, &toppars1[0]->topic());
|
|
557
|
+
|
|
558
|
+
RdKafka::Message *m = consumer->consume(5000);
|
|
559
|
+
if (m->err() != RdKafka::ERR_NO_ERROR)
|
|
560
|
+
Test::Fail("Expecting a consumed message.");
|
|
561
|
+
if (m->len() != 100)
|
|
562
|
+
Test::Fail(tostr() << "Expecting msg len to be 100, not: "
|
|
563
|
+
<< m->len()); /* implies read from topic 1. */
|
|
564
|
+
delete m;
|
|
565
|
+
|
|
566
|
+
if ((error = consumer->incremental_unassign(toppars1)))
|
|
567
|
+
Test::Fail("Incremental unassign failed: " + error->str());
|
|
568
|
+
Test::check_assignment(consumer, 0, NULL);
|
|
569
|
+
|
|
570
|
+
m = consumer->consume(100);
|
|
571
|
+
if (m->err() != RdKafka::ERR__TIMED_OUT)
|
|
572
|
+
Test::Fail("Not expecting a consumed message.");
|
|
573
|
+
delete m;
|
|
574
|
+
|
|
575
|
+
if ((error = consumer->incremental_assign(toppars2)))
|
|
576
|
+
Test::Fail("Incremental assign failed: " + error->str());
|
|
577
|
+
Test::check_assignment(consumer, 1, &toppars2[0]->topic());
|
|
578
|
+
|
|
579
|
+
m = consumer->consume(5000);
|
|
580
|
+
if (m->err() != RdKafka::ERR_NO_ERROR)
|
|
581
|
+
Test::Fail("Expecting a consumed message.");
|
|
582
|
+
if (m->len() != 200)
|
|
583
|
+
Test::Fail(tostr() << "Expecting msg len to be 200, not: "
|
|
584
|
+
<< m->len()); /* implies read from topic 2. */
|
|
585
|
+
delete m;
|
|
586
|
+
|
|
587
|
+
if ((error = consumer->incremental_assign(toppars1)))
|
|
588
|
+
Test::Fail("Incremental assign failed: " + error->str());
|
|
589
|
+
if (Test::assignment_partition_count(consumer, NULL) != 2)
|
|
590
|
+
Test::Fail(tostr() << "Expecting current assignment to have size 2, not: "
|
|
591
|
+
<< Test::assignment_partition_count(consumer, NULL));
|
|
592
|
+
|
|
593
|
+
m = consumer->consume(5000);
|
|
594
|
+
if (m->err() != RdKafka::ERR_NO_ERROR)
|
|
595
|
+
Test::Fail("Expecting a consumed message.");
|
|
596
|
+
delete m;
|
|
597
|
+
|
|
598
|
+
if ((error = consumer->incremental_unassign(toppars2)))
|
|
599
|
+
Test::Fail("Incremental unassign failed: " + error->str());
|
|
600
|
+
if ((error = consumer->incremental_unassign(toppars1)))
|
|
601
|
+
Test::Fail("Incremental unassign failed: " + error->str());
|
|
602
|
+
Test::check_assignment(consumer, 0, NULL);
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
|
|
606
|
+
/** Incremental assign and unassign of empty collection.
|
|
607
|
+
*/
|
|
608
|
+
static void assign_test_5(RdKafka::KafkaConsumer *consumer,
|
|
609
|
+
std::vector<RdKafka::TopicPartition *> toppars1,
|
|
610
|
+
std::vector<RdKafka::TopicPartition *> toppars2) {
|
|
611
|
+
RdKafka::Error *error;
|
|
612
|
+
std::vector<RdKafka::TopicPartition *> toppars3;
|
|
613
|
+
|
|
614
|
+
Test::Say("Incremental assign and unassign of empty collection\n");
|
|
615
|
+
|
|
616
|
+
if ((error = consumer->incremental_assign(toppars3)))
|
|
617
|
+
Test::Fail("Incremental assign failed: " + error->str());
|
|
618
|
+
Test::check_assignment(consumer, 0, NULL);
|
|
619
|
+
|
|
620
|
+
if ((error = consumer->incremental_unassign(toppars3)))
|
|
621
|
+
Test::Fail("Incremental unassign failed: " + error->str());
|
|
622
|
+
Test::check_assignment(consumer, 0, NULL);
|
|
623
|
+
}
|
|
624
|
+
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
static void run_test(
|
|
628
|
+
const std::string &t1,
|
|
629
|
+
const std::string &t2,
|
|
630
|
+
void (*test)(RdKafka::KafkaConsumer *consumer,
|
|
631
|
+
std::vector<RdKafka::TopicPartition *> toppars1,
|
|
632
|
+
std::vector<RdKafka::TopicPartition *> toppars2)) {
|
|
633
|
+
std::vector<RdKafka::TopicPartition *> toppars1;
|
|
634
|
+
toppars1.push_back(RdKafka::TopicPartition::create(t1, 0));
|
|
635
|
+
std::vector<RdKafka::TopicPartition *> toppars2;
|
|
636
|
+
toppars2.push_back(RdKafka::TopicPartition::create(t2, 0));
|
|
637
|
+
|
|
638
|
+
RdKafka::KafkaConsumer *consumer =
|
|
639
|
+
make_consumer("C_1", t1, "cooperative-sticky", NULL, NULL, 10);
|
|
640
|
+
|
|
641
|
+
test(consumer, toppars1, toppars2);
|
|
642
|
+
|
|
643
|
+
RdKafka::TopicPartition::destroy(toppars1);
|
|
644
|
+
RdKafka::TopicPartition::destroy(toppars2);
|
|
645
|
+
|
|
646
|
+
consumer->close();
|
|
647
|
+
delete consumer;
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
|
|
651
|
+
static void a_assign_tests() {
|
|
652
|
+
SUB_TEST_QUICK();
|
|
653
|
+
|
|
654
|
+
int msgcnt = 1000;
|
|
655
|
+
const int msgsize1 = 100;
|
|
656
|
+
const int msgsize2 = 200;
|
|
657
|
+
|
|
658
|
+
std::string topic1_str = Test::mk_topic_name("0113-a1", 1);
|
|
659
|
+
test_create_topic(NULL, topic1_str.c_str(), 1, 1);
|
|
660
|
+
std::string topic2_str = Test::mk_topic_name("0113-a2", 1);
|
|
661
|
+
test_create_topic(NULL, topic2_str.c_str(), 1, 1);
|
|
662
|
+
|
|
663
|
+
test_wait_topic_exists(NULL, topic1_str.c_str(), 10 * 1000);
|
|
664
|
+
test_wait_topic_exists(NULL, topic2_str.c_str(), 10 * 1000);
|
|
665
|
+
|
|
666
|
+
test_produce_msgs_easy_size(topic1_str.c_str(), 0, 0, msgcnt, msgsize1);
|
|
667
|
+
test_produce_msgs_easy_size(topic2_str.c_str(), 0, 0, msgcnt, msgsize2);
|
|
668
|
+
|
|
669
|
+
run_test(topic1_str, topic2_str, assign_test_1);
|
|
670
|
+
run_test(topic1_str, topic2_str, assign_test_2);
|
|
671
|
+
run_test(topic1_str, topic2_str, assign_test_3);
|
|
672
|
+
run_test(topic1_str, topic2_str, assign_test_4);
|
|
673
|
+
run_test(topic1_str, topic2_str, assign_test_5);
|
|
674
|
+
|
|
675
|
+
SUB_TEST_PASS();
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
|
|
679
|
+
|
|
680
|
+
/**
|
|
681
|
+
* @brief Quick Assign 1,2, Assign 2,3, Assign 1,2,3 test to verify
|
|
682
|
+
* that the correct OffsetFetch response is used.
|
|
683
|
+
* See note in rdkafka_assignment.c for details.
|
|
684
|
+
*
|
|
685
|
+
* Makes use of the mock cluster to induce latency.
|
|
686
|
+
*/
|
|
687
|
+
static void a_assign_rapid() {
|
|
688
|
+
SUB_TEST_QUICK();
|
|
689
|
+
|
|
690
|
+
std::string group_id = __FUNCTION__;
|
|
691
|
+
|
|
692
|
+
rd_kafka_mock_cluster_t *mcluster;
|
|
693
|
+
const char *bootstraps;
|
|
694
|
+
|
|
695
|
+
mcluster = test_mock_cluster_new(3, &bootstraps);
|
|
696
|
+
int32_t coord_id = 1;
|
|
697
|
+
rd_kafka_mock_coordinator_set(mcluster, "group", group_id.c_str(), coord_id);
|
|
698
|
+
|
|
699
|
+
rd_kafka_mock_topic_create(mcluster, "topic1", 1, 1);
|
|
700
|
+
rd_kafka_mock_topic_create(mcluster, "topic2", 1, 1);
|
|
701
|
+
rd_kafka_mock_topic_create(mcluster, "topic3", 1, 1);
|
|
702
|
+
|
|
703
|
+
/*
|
|
704
|
+
* Produce messages to topics
|
|
705
|
+
*/
|
|
706
|
+
const int msgs_per_partition = 1000;
|
|
707
|
+
|
|
708
|
+
RdKafka::Conf *pconf;
|
|
709
|
+
Test::conf_init(&pconf, NULL, 10);
|
|
710
|
+
Test::conf_set(pconf, "bootstrap.servers", bootstraps);
|
|
711
|
+
Test::conf_set(pconf, "security.protocol", "plaintext");
|
|
712
|
+
std::string errstr;
|
|
713
|
+
RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr);
|
|
714
|
+
if (!p)
|
|
715
|
+
Test::Fail(tostr() << __FUNCTION__
|
|
716
|
+
<< ": Failed to create producer: " << errstr);
|
|
717
|
+
delete pconf;
|
|
718
|
+
|
|
719
|
+
Test::produce_msgs(p, "topic1", 0, msgs_per_partition, 10,
|
|
720
|
+
false /*no flush*/);
|
|
721
|
+
Test::produce_msgs(p, "topic2", 0, msgs_per_partition, 10,
|
|
722
|
+
false /*no flush*/);
|
|
723
|
+
Test::produce_msgs(p, "topic3", 0, msgs_per_partition, 10,
|
|
724
|
+
false /*no flush*/);
|
|
725
|
+
p->flush(10 * 1000);
|
|
726
|
+
|
|
727
|
+
delete p;
|
|
728
|
+
|
|
729
|
+
vector<RdKafka::TopicPartition *> toppars1;
|
|
730
|
+
toppars1.push_back(RdKafka::TopicPartition::create("topic1", 0));
|
|
731
|
+
vector<RdKafka::TopicPartition *> toppars2;
|
|
732
|
+
toppars2.push_back(RdKafka::TopicPartition::create("topic2", 0));
|
|
733
|
+
vector<RdKafka::TopicPartition *> toppars3;
|
|
734
|
+
toppars3.push_back(RdKafka::TopicPartition::create("topic3", 0));
|
|
735
|
+
|
|
736
|
+
|
|
737
|
+
RdKafka::Conf *conf;
|
|
738
|
+
Test::conf_init(&conf, NULL, 20);
|
|
739
|
+
Test::conf_set(conf, "bootstrap.servers", bootstraps);
|
|
740
|
+
Test::conf_set(conf, "security.protocol", "plaintext");
|
|
741
|
+
Test::conf_set(conf, "client.id", __FUNCTION__);
|
|
742
|
+
Test::conf_set(conf, "group.id", group_id);
|
|
743
|
+
Test::conf_set(conf, "auto.offset.reset", "earliest");
|
|
744
|
+
Test::conf_set(conf, "enable.auto.commit", "false");
|
|
745
|
+
if (test_consumer_group_protocol()) {
|
|
746
|
+
Test::conf_set(conf, "group.protocol", test_consumer_group_protocol());
|
|
747
|
+
}
|
|
748
|
+
|
|
749
|
+
RdKafka::KafkaConsumer *consumer;
|
|
750
|
+
consumer = RdKafka::KafkaConsumer::create(conf, errstr);
|
|
751
|
+
if (!consumer)
|
|
752
|
+
Test::Fail(tostr() << __FUNCTION__
|
|
753
|
+
<< ": Failed to create consumer: " << errstr);
|
|
754
|
+
delete conf;
|
|
755
|
+
|
|
756
|
+
vector<RdKafka::TopicPartition *> toppars;
|
|
757
|
+
vector<RdKafka::TopicPartition *> expected;
|
|
758
|
+
|
|
759
|
+
map<Toppar, int64_t> pos; /* Expected consume position per partition */
|
|
760
|
+
pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 0;
|
|
761
|
+
pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 0;
|
|
762
|
+
pos[Toppar(toppars3[0]->topic(), toppars3[0]->partition())] = 0;
|
|
763
|
+
|
|
764
|
+
/* To make sure offset commits are fetched in proper assign sequence
|
|
765
|
+
* we commit an offset that should not be used in the final consume loop.
|
|
766
|
+
* This commit will be overwritten below with another commit. */
|
|
767
|
+
vector<RdKafka::TopicPartition *> offsets;
|
|
768
|
+
offsets.push_back(RdKafka::TopicPartition::create(
|
|
769
|
+
toppars1[0]->topic(), toppars1[0]->partition(), 11));
|
|
770
|
+
/* This partition should start at this position even though
|
|
771
|
+
* there will be a sub-sequent commit to overwrite it, that should not
|
|
772
|
+
* be used since this partition is never unassigned. */
|
|
773
|
+
offsets.push_back(RdKafka::TopicPartition::create(
|
|
774
|
+
toppars2[0]->topic(), toppars2[0]->partition(), 22));
|
|
775
|
+
pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 22;
|
|
776
|
+
|
|
777
|
+
Test::print_TopicPartitions("pre-commit", offsets);
|
|
778
|
+
|
|
779
|
+
RdKafka::ErrorCode err;
|
|
780
|
+
err = consumer->commitSync(offsets);
|
|
781
|
+
if (err)
|
|
782
|
+
Test::Fail(tostr() << __FUNCTION__ << ": pre-commit failed: "
|
|
783
|
+
<< RdKafka::err2str(err) << "\n");
|
|
784
|
+
|
|
785
|
+
/* Add coordinator delay so that the OffsetFetchRequest originating
|
|
786
|
+
* from the coming incremental_assign() will not finish before
|
|
787
|
+
* we call incremental_unassign() and incremental_assign() again, resulting
|
|
788
|
+
* in a situation where the initial OffsetFetchResponse will contain
|
|
789
|
+
* an older offset for a previous assignment of one partition. */
|
|
790
|
+
rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 5000);
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
/* Assign 1,2 == 1,2 */
|
|
794
|
+
toppars.push_back(toppars1[0]);
|
|
795
|
+
toppars.push_back(toppars2[0]);
|
|
796
|
+
expected.push_back(toppars1[0]);
|
|
797
|
+
expected.push_back(toppars2[0]);
|
|
798
|
+
Test::incremental_assign(consumer, toppars);
|
|
799
|
+
expect_assignment(consumer, expected);
|
|
800
|
+
|
|
801
|
+
/* Unassign -1 == 2 */
|
|
802
|
+
toppars.clear();
|
|
803
|
+
toppars.push_back(toppars1[0]);
|
|
804
|
+
vector<RdKafka::TopicPartition *>::iterator it =
|
|
805
|
+
find(expected.begin(), expected.end(), toppars1[0]);
|
|
806
|
+
expected.erase(it);
|
|
807
|
+
|
|
808
|
+
Test::incremental_unassign(consumer, toppars);
|
|
809
|
+
expect_assignment(consumer, expected);
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
/* Commit offset for the removed partition and the partition that is
|
|
813
|
+
* unchanged in the assignment. */
|
|
814
|
+
RdKafka::TopicPartition::destroy(offsets);
|
|
815
|
+
offsets.push_back(RdKafka::TopicPartition::create(
|
|
816
|
+
toppars1[0]->topic(), toppars1[0]->partition(), 55));
|
|
817
|
+
offsets.push_back(RdKafka::TopicPartition::create(
|
|
818
|
+
toppars2[0]->topic(), toppars2[0]->partition(), 33)); /* should not be
|
|
819
|
+
* used. */
|
|
820
|
+
pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 55;
|
|
821
|
+
Test::print_TopicPartitions("commit", offsets);
|
|
822
|
+
|
|
823
|
+
err = consumer->commitAsync(offsets);
|
|
824
|
+
if (err)
|
|
825
|
+
Test::Fail(tostr() << __FUNCTION__
|
|
826
|
+
<< ": commit failed: " << RdKafka::err2str(err) << "\n");
|
|
827
|
+
|
|
828
|
+
/* Assign +3 == 2,3 */
|
|
829
|
+
toppars.clear();
|
|
830
|
+
toppars.push_back(toppars3[0]);
|
|
831
|
+
expected.push_back(toppars3[0]);
|
|
832
|
+
Test::incremental_assign(consumer, toppars);
|
|
833
|
+
expect_assignment(consumer, expected);
|
|
834
|
+
|
|
835
|
+
/* Now remove the latency */
|
|
836
|
+
Test::Say(_C_MAG "Clearing rtt\n");
|
|
837
|
+
rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 0);
|
|
838
|
+
|
|
839
|
+
/* Assign +1 == 1,2,3 */
|
|
840
|
+
toppars.clear();
|
|
841
|
+
toppars.push_back(toppars1[0]);
|
|
842
|
+
expected.push_back(toppars1[0]);
|
|
843
|
+
Test::incremental_assign(consumer, toppars);
|
|
844
|
+
expect_assignment(consumer, expected);
|
|
845
|
+
|
|
846
|
+
/*
|
|
847
|
+
* Verify consumed messages
|
|
848
|
+
*/
|
|
849
|
+
int wait_end = (int)expected.size();
|
|
850
|
+
while (wait_end > 0) {
|
|
851
|
+
RdKafka::Message *msg = consumer->consume(10 * 1000);
|
|
852
|
+
if (msg->err() == RdKafka::ERR__TIMED_OUT)
|
|
853
|
+
Test::Fail(tostr() << __FUNCTION__
|
|
854
|
+
<< ": Consume timed out waiting "
|
|
855
|
+
"for "
|
|
856
|
+
<< wait_end << " more partitions");
|
|
857
|
+
|
|
858
|
+
Toppar tp = Toppar(msg->topic_name(), msg->partition());
|
|
859
|
+
int64_t *exp_pos = &pos[tp];
|
|
860
|
+
|
|
861
|
+
Test::Say(3, tostr() << __FUNCTION__ << ": Received " << tp.topic << " ["
|
|
862
|
+
<< tp.partition << "] at offset " << msg->offset()
|
|
863
|
+
<< " (expected offset " << *exp_pos << ")\n");
|
|
864
|
+
|
|
865
|
+
if (*exp_pos != msg->offset())
|
|
866
|
+
Test::Fail(tostr() << __FUNCTION__ << ": expected message offset "
|
|
867
|
+
<< *exp_pos << " for " << msg->topic_name() << " ["
|
|
868
|
+
<< msg->partition() << "], not " << msg->offset()
|
|
869
|
+
<< "\n");
|
|
870
|
+
(*exp_pos)++;
|
|
871
|
+
if (*exp_pos == msgs_per_partition) {
|
|
872
|
+
TEST_ASSERT(wait_end > 0, "");
|
|
873
|
+
wait_end--;
|
|
874
|
+
} else if (msg->offset() > msgs_per_partition)
|
|
875
|
+
Test::Fail(tostr() << __FUNCTION__ << ": unexpected message with "
|
|
876
|
+
<< "offset " << msg->offset() << " on " << tp.topic
|
|
877
|
+
<< " [" << tp.partition << "]\n");
|
|
878
|
+
|
|
879
|
+
delete msg;
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
RdKafka::TopicPartition::destroy(offsets);
|
|
883
|
+
RdKafka::TopicPartition::destroy(toppars1);
|
|
884
|
+
RdKafka::TopicPartition::destroy(toppars2);
|
|
885
|
+
RdKafka::TopicPartition::destroy(toppars3);
|
|
886
|
+
|
|
887
|
+
delete consumer;
|
|
888
|
+
|
|
889
|
+
test_mock_cluster_destroy(mcluster);
|
|
890
|
+
|
|
891
|
+
SUB_TEST_PASS();
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
|
|
895
|
+
/* Check behavior when:
|
|
896
|
+
* 1. single topic with 2 partitions.
|
|
897
|
+
* 2. consumer 1 (with rebalance_cb) subscribes to it.
|
|
898
|
+
* 3. consumer 2 (with rebalance_cb) subscribes to it.
|
|
899
|
+
* 4. close.
|
|
900
|
+
*/
|
|
901
|
+
|
|
902
|
+
static void b_subscribe_with_cb_test(rd_bool_t close_consumer) {
|
|
903
|
+
SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer");
|
|
904
|
+
int expected_cb1_assign_call_cnt = 3;
|
|
905
|
+
int expected_cb2_assign_call_cnt = 2;
|
|
906
|
+
|
|
907
|
+
std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
908
|
+
std::string group_name =
|
|
909
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
910
|
+
test_create_topic(NULL, topic_name.c_str(), 2, 1);
|
|
911
|
+
|
|
912
|
+
DefaultRebalanceCb rebalance_cb1;
|
|
913
|
+
RdKafka::KafkaConsumer *c1 = make_consumer(
|
|
914
|
+
"C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 25);
|
|
915
|
+
DefaultRebalanceCb rebalance_cb2;
|
|
916
|
+
RdKafka::KafkaConsumer *c2 = make_consumer(
|
|
917
|
+
"C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 25);
|
|
918
|
+
test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000);
|
|
919
|
+
|
|
920
|
+
Test::subscribe(c1, topic_name);
|
|
921
|
+
|
|
922
|
+
bool c2_subscribed = false;
|
|
923
|
+
while (true) {
|
|
924
|
+
Test::poll_once(c1, 500);
|
|
925
|
+
Test::poll_once(c2, 500);
|
|
926
|
+
|
|
927
|
+
/* Start c2 after c1 has received initial assignment */
|
|
928
|
+
if (!c2_subscribed && rebalance_cb1.nonempty_assign_call_cnt > 0) {
|
|
929
|
+
Test::subscribe(c2, topic_name);
|
|
930
|
+
c2_subscribed = true;
|
|
931
|
+
}
|
|
932
|
+
|
|
933
|
+
/* Failure case: test will time out. */
|
|
934
|
+
if (Test::assignment_partition_count(c1, NULL) == 1 &&
|
|
935
|
+
Test::assignment_partition_count(c2, NULL) == 1) {
|
|
936
|
+
/* Callback count can vary in KIP-848 */
|
|
937
|
+
if (test_consumer_group_protocol_classic() &&
|
|
938
|
+
!(rebalance_cb1.assign_call_cnt == expected_cb1_assign_call_cnt &&
|
|
939
|
+
rebalance_cb2.assign_call_cnt == expected_cb2_assign_call_cnt))
|
|
940
|
+
continue;
|
|
941
|
+
break;
|
|
942
|
+
}
|
|
943
|
+
}
|
|
944
|
+
|
|
945
|
+
/* Sequence of events:
|
|
946
|
+
*
|
|
947
|
+
* 1. c1 joins group.
|
|
948
|
+
* 2. c1 gets assigned 2 partitions (+1 assign call).
|
|
949
|
+
* - there isn't a follow-on rebalance because there aren't any revoked
|
|
950
|
+
* partitions.
|
|
951
|
+
* 3. c2 joins group.
|
|
952
|
+
* 4. This results in a rebalance with one partition being revoked from c1 (+1
|
|
953
|
+
* revoke call), and no partitions assigned to either c1 (+1 assign call) or
|
|
954
|
+
* c2 (+1 assign call) (however the rebalance callback will be called in each
|
|
955
|
+
* case with an empty set).
|
|
956
|
+
* 5. c1 then re-joins the group since it had a partition revoked.
|
|
957
|
+
* 6. c2 is now assigned a single partition (+1 assign call), and c1's
|
|
958
|
+
* incremental assignment is empty (+1 assign call).
|
|
959
|
+
* 7. Since there were no revoked partitions, no further rebalance is
|
|
960
|
+
* triggered.
|
|
961
|
+
*/
|
|
962
|
+
|
|
963
|
+
/* Callback count can vary in KIP-848 */
|
|
964
|
+
if (test_consumer_group_protocol_classic()) {
|
|
965
|
+
/* The rebalance cb is always called on assign, even if empty. */
|
|
966
|
+
if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt)
|
|
967
|
+
Test::Fail(tostr() << "Expecting " << expected_cb1_assign_call_cnt
|
|
968
|
+
<< " assign calls on consumer 1, not "
|
|
969
|
+
<< rebalance_cb1.assign_call_cnt);
|
|
970
|
+
if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt)
|
|
971
|
+
Test::Fail(tostr() << "Expecting " << expected_cb2_assign_call_cnt
|
|
972
|
+
<< " assign calls on consumer 2, not: "
|
|
973
|
+
<< rebalance_cb2.assign_call_cnt);
|
|
974
|
+
|
|
975
|
+
/* The rebalance cb is not called on and empty revoke (unless partitions
|
|
976
|
+
* lost, which is not the case here) */
|
|
977
|
+
if (rebalance_cb1.revoke_call_cnt != 1)
|
|
978
|
+
Test::Fail(tostr() << "Expecting 1 revoke call on consumer 1, not: "
|
|
979
|
+
<< rebalance_cb1.revoke_call_cnt);
|
|
980
|
+
if (rebalance_cb2.revoke_call_cnt != 0)
|
|
981
|
+
Test::Fail(tostr() << "Expecting 0 revoke calls on consumer 2, not: "
|
|
982
|
+
<< rebalance_cb2.revoke_call_cnt);
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
/* Final state */
|
|
986
|
+
|
|
987
|
+
/* Expect both consumers to have 1 assigned partition (via net calculation in
|
|
988
|
+
* rebalance_cb) */
|
|
989
|
+
if (rebalance_cb1.partitions_assigned_net != 1)
|
|
990
|
+
Test::Fail(tostr()
|
|
991
|
+
<< "Expecting consumer 1 to have net 1 assigned partition, not: "
|
|
992
|
+
<< rebalance_cb1.partitions_assigned_net);
|
|
993
|
+
if (rebalance_cb2.partitions_assigned_net != 1)
|
|
994
|
+
Test::Fail(tostr()
|
|
995
|
+
<< "Expecting consumer 2 to have net 1 assigned partition, not: "
|
|
996
|
+
<< rebalance_cb2.partitions_assigned_net);
|
|
997
|
+
|
|
998
|
+
/* Expect both consumers to have 1 assigned partition (via ->assignment()
|
|
999
|
+
* query) */
|
|
1000
|
+
expect_assignment(c1, 1);
|
|
1001
|
+
expect_assignment(c2, 1);
|
|
1002
|
+
|
|
1003
|
+
/* Make sure the fetchers are running */
|
|
1004
|
+
int msgcnt = 100;
|
|
1005
|
+
const int msgsize1 = 100;
|
|
1006
|
+
test_produce_msgs_easy_size(topic_name.c_str(), 0, 0, msgcnt, msgsize1);
|
|
1007
|
+
test_produce_msgs_easy_size(topic_name.c_str(), 0, 1, msgcnt, msgsize1);
|
|
1008
|
+
|
|
1009
|
+
bool consumed_from_c1 = false;
|
|
1010
|
+
bool consumed_from_c2 = false;
|
|
1011
|
+
while (true) {
|
|
1012
|
+
RdKafka::Message *msg1 = c1->consume(100);
|
|
1013
|
+
RdKafka::Message *msg2 = c2->consume(100);
|
|
1014
|
+
|
|
1015
|
+
if (msg1->err() == RdKafka::ERR_NO_ERROR)
|
|
1016
|
+
consumed_from_c1 = true;
|
|
1017
|
+
if (msg1->err() == RdKafka::ERR_NO_ERROR)
|
|
1018
|
+
consumed_from_c2 = true;
|
|
1019
|
+
|
|
1020
|
+
delete msg1;
|
|
1021
|
+
delete msg2;
|
|
1022
|
+
|
|
1023
|
+
/* Failure case: test will timeout. */
|
|
1024
|
+
if (consumed_from_c1 && consumed_from_c2)
|
|
1025
|
+
break;
|
|
1026
|
+
}
|
|
1027
|
+
|
|
1028
|
+
if (!close_consumer) {
|
|
1029
|
+
delete c1;
|
|
1030
|
+
delete c2;
|
|
1031
|
+
return;
|
|
1032
|
+
}
|
|
1033
|
+
|
|
1034
|
+
c1->close();
|
|
1035
|
+
c2->close();
|
|
1036
|
+
|
|
1037
|
+
/* Callback count can vary in KIP-848 */
|
|
1038
|
+
if (test_consumer_group_protocol_classic()) {
|
|
1039
|
+
/* Closing the consumer should trigger rebalance_cb (revoke): */
|
|
1040
|
+
if (rebalance_cb1.revoke_call_cnt != 2)
|
|
1041
|
+
Test::Fail(tostr() << "Expecting 2 revoke calls on consumer 1, not: "
|
|
1042
|
+
<< rebalance_cb1.revoke_call_cnt);
|
|
1043
|
+
if (rebalance_cb2.revoke_call_cnt != 1)
|
|
1044
|
+
Test::Fail(tostr() << "Expecting 1 revoke call on consumer 2, not: "
|
|
1045
|
+
<< rebalance_cb2.revoke_call_cnt);
|
|
1046
|
+
}
|
|
1047
|
+
|
|
1048
|
+
/* ..and net assigned partitions should drop to 0 in both cases: */
|
|
1049
|
+
if (rebalance_cb1.partitions_assigned_net != 0)
|
|
1050
|
+
Test::Fail(
|
|
1051
|
+
tostr()
|
|
1052
|
+
<< "Expecting consumer 1 to have net 0 assigned partitions, not: "
|
|
1053
|
+
<< rebalance_cb1.partitions_assigned_net);
|
|
1054
|
+
if (rebalance_cb2.partitions_assigned_net != 0)
|
|
1055
|
+
Test::Fail(
|
|
1056
|
+
tostr()
|
|
1057
|
+
<< "Expecting consumer 2 to have net 0 assigned partitions, not: "
|
|
1058
|
+
<< rebalance_cb2.partitions_assigned_net);
|
|
1059
|
+
|
|
1060
|
+
/* Nothing in this test should result in lost partitions */
|
|
1061
|
+
if (rebalance_cb1.lost_call_cnt > 0)
|
|
1062
|
+
Test::Fail(
|
|
1063
|
+
tostr() << "Expecting consumer 1 to have 0 lost partition events, not: "
|
|
1064
|
+
<< rebalance_cb1.lost_call_cnt);
|
|
1065
|
+
if (rebalance_cb2.lost_call_cnt > 0)
|
|
1066
|
+
Test::Fail(
|
|
1067
|
+
tostr() << "Expecting consumer 2 to have 0 lost partition events, not: "
|
|
1068
|
+
<< rebalance_cb2.lost_call_cnt);
|
|
1069
|
+
|
|
1070
|
+
delete c1;
|
|
1071
|
+
delete c2;
|
|
1072
|
+
|
|
1073
|
+
SUB_TEST_PASS();
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
|
|
1077
|
+
|
|
1078
|
+
/* Check behavior when:
|
|
1079
|
+
* 1. Single topic with 2 partitions.
|
|
1080
|
+
* 2. Consumer 1 (no rebalance_cb) subscribes to it.
|
|
1081
|
+
* 3. Consumer 2 (no rebalance_cb) subscribes to it.
|
|
1082
|
+
* 4. Close.
|
|
1083
|
+
*/
|
|
1084
|
+
|
|
1085
|
+
static void c_subscribe_no_cb_test(rd_bool_t close_consumer) {
|
|
1086
|
+
SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer");
|
|
1087
|
+
|
|
1088
|
+
std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1089
|
+
std::string group_name =
|
|
1090
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1091
|
+
test_create_topic(NULL, topic_name.c_str(), 2, 1);
|
|
1092
|
+
|
|
1093
|
+
RdKafka::KafkaConsumer *c1 =
|
|
1094
|
+
make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 20);
|
|
1095
|
+
RdKafka::KafkaConsumer *c2 =
|
|
1096
|
+
make_consumer("C_2", group_name, "cooperative-sticky", NULL, NULL, 20);
|
|
1097
|
+
test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000);
|
|
1098
|
+
|
|
1099
|
+
Test::subscribe(c1, topic_name);
|
|
1100
|
+
|
|
1101
|
+
bool c2_subscribed = false;
|
|
1102
|
+
bool done = false;
|
|
1103
|
+
while (!done) {
|
|
1104
|
+
Test::poll_once(c1, 500);
|
|
1105
|
+
Test::poll_once(c2, 500);
|
|
1106
|
+
|
|
1107
|
+
if (Test::assignment_partition_count(c1, NULL) == 2 && !c2_subscribed) {
|
|
1108
|
+
Test::subscribe(c2, topic_name);
|
|
1109
|
+
c2_subscribed = true;
|
|
1110
|
+
}
|
|
1111
|
+
|
|
1112
|
+
if (Test::assignment_partition_count(c1, NULL) == 1 &&
|
|
1113
|
+
Test::assignment_partition_count(c2, NULL) == 1) {
|
|
1114
|
+
Test::Say("Consumer 1 and 2 are both assigned to single partition.\n");
|
|
1115
|
+
done = true;
|
|
1116
|
+
}
|
|
1117
|
+
}
|
|
1118
|
+
|
|
1119
|
+
if (close_consumer) {
|
|
1120
|
+
Test::Say("Closing consumer 1\n");
|
|
1121
|
+
c1->close();
|
|
1122
|
+
Test::Say("Closing consumer 2\n");
|
|
1123
|
+
c2->close();
|
|
1124
|
+
} else {
|
|
1125
|
+
Test::Say("Skipping close() of consumer 1 and 2.\n");
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
delete c1;
|
|
1129
|
+
delete c2;
|
|
1130
|
+
|
|
1131
|
+
SUB_TEST_PASS();
|
|
1132
|
+
}
|
|
1133
|
+
|
|
1134
|
+
|
|
1135
|
+
|
|
1136
|
+
/* Check behavior when:
|
|
1137
|
+
* 1. Single consumer (no rebalance_cb) subscribes to topic.
|
|
1138
|
+
* 2. Subscription is changed (topic added).
|
|
1139
|
+
* 3. Consumer is closed.
|
|
1140
|
+
*/
|
|
1141
|
+
|
|
1142
|
+
static void d_change_subscription_add_topic(rd_bool_t close_consumer) {
|
|
1143
|
+
SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer");
|
|
1144
|
+
|
|
1145
|
+
std::string topic_name_1 =
|
|
1146
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1147
|
+
test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
|
|
1148
|
+
std::string topic_name_2 =
|
|
1149
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1150
|
+
test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
|
|
1151
|
+
|
|
1152
|
+
std::string group_name =
|
|
1153
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1154
|
+
|
|
1155
|
+
RdKafka::KafkaConsumer *c =
|
|
1156
|
+
make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
|
|
1157
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
1158
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
|
|
1159
|
+
|
|
1160
|
+
Test::subscribe(c, topic_name_1);
|
|
1161
|
+
|
|
1162
|
+
bool subscribed_to_one_topic = false;
|
|
1163
|
+
bool done = false;
|
|
1164
|
+
while (!done) {
|
|
1165
|
+
Test::poll_once(c, 500);
|
|
1166
|
+
|
|
1167
|
+
if (Test::assignment_partition_count(c, NULL) == 2 &&
|
|
1168
|
+
!subscribed_to_one_topic) {
|
|
1169
|
+
subscribed_to_one_topic = true;
|
|
1170
|
+
Test::subscribe(c, topic_name_1, topic_name_2);
|
|
1171
|
+
}
|
|
1172
|
+
|
|
1173
|
+
if (Test::assignment_partition_count(c, NULL) == 4) {
|
|
1174
|
+
Test::Say("Consumer is assigned to two topics.\n");
|
|
1175
|
+
done = true;
|
|
1176
|
+
}
|
|
1177
|
+
}
|
|
1178
|
+
|
|
1179
|
+
if (close_consumer) {
|
|
1180
|
+
Test::Say("Closing consumer\n");
|
|
1181
|
+
c->close();
|
|
1182
|
+
} else
|
|
1183
|
+
Test::Say("Skipping close() of consumer\n");
|
|
1184
|
+
|
|
1185
|
+
delete c;
|
|
1186
|
+
|
|
1187
|
+
SUB_TEST_PASS();
|
|
1188
|
+
}
|
|
1189
|
+
|
|
1190
|
+
|
|
1191
|
+
|
|
1192
|
+
/* Check behavior when:
|
|
1193
|
+
* 1. Single consumer (no rebalance_cb) subscribes to topic.
|
|
1194
|
+
* 2. Subscription is changed (topic added).
|
|
1195
|
+
* 3. Consumer is closed.
|
|
1196
|
+
*/
|
|
1197
|
+
|
|
1198
|
+
static void e_change_subscription_remove_topic(rd_bool_t close_consumer) {
|
|
1199
|
+
SUB_TEST("%s", close_consumer ? "close consumer" : "don't close consumer");
|
|
1200
|
+
|
|
1201
|
+
std::string topic_name_1 =
|
|
1202
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1203
|
+
test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
|
|
1204
|
+
std::string topic_name_2 =
|
|
1205
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1206
|
+
test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
|
|
1207
|
+
|
|
1208
|
+
std::string group_name =
|
|
1209
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1210
|
+
|
|
1211
|
+
RdKafka::KafkaConsumer *c =
|
|
1212
|
+
make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
|
|
1213
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
1214
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
|
|
1215
|
+
|
|
1216
|
+
Test::subscribe(c, topic_name_1, topic_name_2);
|
|
1217
|
+
|
|
1218
|
+
bool subscribed_to_two_topics = false;
|
|
1219
|
+
bool done = false;
|
|
1220
|
+
while (!done) {
|
|
1221
|
+
Test::poll_once(c, 500);
|
|
1222
|
+
|
|
1223
|
+
if (Test::assignment_partition_count(c, NULL) == 4 &&
|
|
1224
|
+
!subscribed_to_two_topics) {
|
|
1225
|
+
subscribed_to_two_topics = true;
|
|
1226
|
+
Test::subscribe(c, topic_name_1);
|
|
1227
|
+
}
|
|
1228
|
+
|
|
1229
|
+
if (Test::assignment_partition_count(c, NULL) == 2) {
|
|
1230
|
+
Test::Say("Consumer is assigned to one topic\n");
|
|
1231
|
+
done = true;
|
|
1232
|
+
}
|
|
1233
|
+
}
|
|
1234
|
+
|
|
1235
|
+
if (!close_consumer) {
|
|
1236
|
+
Test::Say("Closing consumer\n");
|
|
1237
|
+
c->close();
|
|
1238
|
+
} else
|
|
1239
|
+
Test::Say("Skipping close() of consumer\n");
|
|
1240
|
+
|
|
1241
|
+
delete c;
|
|
1242
|
+
|
|
1243
|
+
SUB_TEST_PASS();
|
|
1244
|
+
}
|
|
1245
|
+
|
|
1246
|
+
|
|
1247
|
+
|
|
1248
|
+
/* Check that use of consumer->assign() and consumer->unassign() is disallowed
|
|
1249
|
+
* when a COOPERATIVE assignor is in use.
|
|
1250
|
+
*
|
|
1251
|
+
* Except when the consumer is closing, where all forms of unassign are
|
|
1252
|
+
* allowed and treated as a full unassign.
|
|
1253
|
+
*/
|
|
1254
|
+
|
|
1255
|
+
class FTestRebalanceCb : public RdKafka::RebalanceCb {
|
|
1256
|
+
public:
|
|
1257
|
+
bool assigned;
|
|
1258
|
+
bool closing;
|
|
1259
|
+
|
|
1260
|
+
FTestRebalanceCb() : assigned(false), closing(false) {
|
|
1261
|
+
}
|
|
1262
|
+
|
|
1263
|
+
void rebalance_cb(RdKafka::KafkaConsumer *consumer,
|
|
1264
|
+
RdKafka::ErrorCode err,
|
|
1265
|
+
std::vector<RdKafka::TopicPartition *> &partitions) {
|
|
1266
|
+
Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " "
|
|
1267
|
+
<< RdKafka::err2str(err) << (closing ? " (closing)" : "")
|
|
1268
|
+
<< "\n");
|
|
1269
|
+
|
|
1270
|
+
if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
|
|
1271
|
+
RdKafka::ErrorCode err_resp = consumer->assign(partitions);
|
|
1272
|
+
Test::Say(tostr() << "consumer->assign() response code: " << err_resp
|
|
1273
|
+
<< "\n");
|
|
1274
|
+
if (err_resp != RdKafka::ERR__STATE)
|
|
1275
|
+
Test::Fail(tostr() << "Expected assign to fail with error code: "
|
|
1276
|
+
<< RdKafka::ERR__STATE << "(ERR__STATE)");
|
|
1277
|
+
|
|
1278
|
+
RdKafka::Error *error = consumer->incremental_assign(partitions);
|
|
1279
|
+
if (error)
|
|
1280
|
+
Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
|
|
1281
|
+
<< error->str());
|
|
1282
|
+
|
|
1283
|
+
assigned = true;
|
|
1284
|
+
|
|
1285
|
+
} else {
|
|
1286
|
+
RdKafka::ErrorCode err_resp = consumer->unassign();
|
|
1287
|
+
Test::Say(tostr() << "consumer->unassign() response code: " << err_resp
|
|
1288
|
+
<< "\n");
|
|
1289
|
+
|
|
1290
|
+
if (!closing) {
|
|
1291
|
+
if (err_resp != RdKafka::ERR__STATE)
|
|
1292
|
+
Test::Fail(tostr() << "Expected assign to fail with error code: "
|
|
1293
|
+
<< RdKafka::ERR__STATE << "(ERR__STATE)");
|
|
1294
|
+
|
|
1295
|
+
RdKafka::Error *error = consumer->incremental_unassign(partitions);
|
|
1296
|
+
if (error)
|
|
1297
|
+
Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
|
|
1298
|
+
<< error->str());
|
|
1299
|
+
|
|
1300
|
+
} else {
|
|
1301
|
+
/* During termination (close()) any type of unassign*() is allowed. */
|
|
1302
|
+
if (err_resp)
|
|
1303
|
+
Test::Fail(tostr() << "Expected unassign to succeed during close, "
|
|
1304
|
+
"but got: "
|
|
1305
|
+
<< RdKafka::ERR__STATE << "(ERR__STATE)");
|
|
1306
|
+
}
|
|
1307
|
+
}
|
|
1308
|
+
}
|
|
1309
|
+
};
|
|
1310
|
+
|
|
1311
|
+
|
|
1312
|
+
static void f_assign_call_cooperative() {
|
|
1313
|
+
SUB_TEST();
|
|
1314
|
+
|
|
1315
|
+
std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1316
|
+
test_create_topic(NULL, topic_name.c_str(), 1, 1);
|
|
1317
|
+
|
|
1318
|
+
std::string group_name =
|
|
1319
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1320
|
+
|
|
1321
|
+
std::vector<std::pair<std::string, std::string> > additional_conf;
|
|
1322
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
1323
|
+
std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
|
|
1324
|
+
FTestRebalanceCb rebalance_cb;
|
|
1325
|
+
RdKafka::KafkaConsumer *c =
|
|
1326
|
+
make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
|
|
1327
|
+
&rebalance_cb, 15);
|
|
1328
|
+
test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
|
|
1329
|
+
|
|
1330
|
+
Test::subscribe(c, topic_name);
|
|
1331
|
+
|
|
1332
|
+
while (!rebalance_cb.assigned)
|
|
1333
|
+
Test::poll_once(c, 500);
|
|
1334
|
+
|
|
1335
|
+
rebalance_cb.closing = true;
|
|
1336
|
+
c->close();
|
|
1337
|
+
delete c;
|
|
1338
|
+
|
|
1339
|
+
SUB_TEST_PASS();
|
|
1340
|
+
}
|
|
1341
|
+
|
|
1342
|
+
|
|
1343
|
+
|
|
1344
|
+
/* Check that use of consumer->incremental_assign() and
|
|
1345
|
+
* consumer->incremental_unassign() is disallowed when an EAGER assignor is in
|
|
1346
|
+
* use.
|
|
1347
|
+
*/
|
|
1348
|
+
class GTestRebalanceCb : public RdKafka::RebalanceCb {
|
|
1349
|
+
public:
|
|
1350
|
+
bool assigned;
|
|
1351
|
+
bool closing;
|
|
1352
|
+
|
|
1353
|
+
GTestRebalanceCb() : assigned(false), closing(false) {
|
|
1354
|
+
}
|
|
1355
|
+
|
|
1356
|
+
void rebalance_cb(RdKafka::KafkaConsumer *consumer,
|
|
1357
|
+
RdKafka::ErrorCode err,
|
|
1358
|
+
std::vector<RdKafka::TopicPartition *> &partitions) {
|
|
1359
|
+
Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " "
|
|
1360
|
+
<< RdKafka::err2str(err) << "\n");
|
|
1361
|
+
|
|
1362
|
+
if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
|
|
1363
|
+
RdKafka::Error *error = consumer->incremental_assign(partitions);
|
|
1364
|
+
Test::Say(tostr() << "consumer->incremental_assign() response: "
|
|
1365
|
+
<< (!error ? "NULL" : error->str()) << "\n");
|
|
1366
|
+
if (!error)
|
|
1367
|
+
Test::Fail("Expected consumer->incremental_assign() to fail");
|
|
1368
|
+
if (error->code() != RdKafka::ERR__STATE)
|
|
1369
|
+
Test::Fail(tostr() << "Expected consumer->incremental_assign() to fail "
|
|
1370
|
+
"with error code "
|
|
1371
|
+
<< RdKafka::ERR__STATE);
|
|
1372
|
+
delete error;
|
|
1373
|
+
|
|
1374
|
+
RdKafka::ErrorCode err_resp = consumer->assign(partitions);
|
|
1375
|
+
if (err_resp)
|
|
1376
|
+
Test::Fail(tostr() << "consumer->assign() failed: " << err_resp);
|
|
1377
|
+
|
|
1378
|
+
assigned = true;
|
|
1379
|
+
|
|
1380
|
+
} else {
|
|
1381
|
+
RdKafka::Error *error = consumer->incremental_unassign(partitions);
|
|
1382
|
+
Test::Say(tostr() << "consumer->incremental_unassign() response: "
|
|
1383
|
+
<< (!error ? "NULL" : error->str()) << "\n");
|
|
1384
|
+
|
|
1385
|
+
if (!closing) {
|
|
1386
|
+
if (!error)
|
|
1387
|
+
Test::Fail("Expected consumer->incremental_unassign() to fail");
|
|
1388
|
+
if (error->code() != RdKafka::ERR__STATE)
|
|
1389
|
+
Test::Fail(tostr() << "Expected consumer->incremental_unassign() to "
|
|
1390
|
+
"fail with error code "
|
|
1391
|
+
<< RdKafka::ERR__STATE);
|
|
1392
|
+
delete error;
|
|
1393
|
+
|
|
1394
|
+
RdKafka::ErrorCode err_resp = consumer->unassign();
|
|
1395
|
+
if (err_resp)
|
|
1396
|
+
Test::Fail(tostr() << "consumer->unassign() failed: " << err_resp);
|
|
1397
|
+
|
|
1398
|
+
} else {
|
|
1399
|
+
/* During termination (close()) any type of unassign*() is allowed. */
|
|
1400
|
+
if (error)
|
|
1401
|
+
Test::Fail(
|
|
1402
|
+
tostr()
|
|
1403
|
+
<< "Expected incremental_unassign to succeed during close, "
|
|
1404
|
+
"but got: "
|
|
1405
|
+
<< RdKafka::ERR__STATE << "(ERR__STATE)");
|
|
1406
|
+
}
|
|
1407
|
+
}
|
|
1408
|
+
}
|
|
1409
|
+
};
|
|
1410
|
+
|
|
1411
|
+
static void g_incremental_assign_call_eager() {
|
|
1412
|
+
SUB_TEST();
|
|
1413
|
+
|
|
1414
|
+
/* Only classic consumer group protocol supports EAGER protocol*/
|
|
1415
|
+
if (!test_consumer_group_protocol_classic()) {
|
|
1416
|
+
SUB_TEST_SKIP(
|
|
1417
|
+
"Skipping incremental assign call eager test as EAGER protocol is only "
|
|
1418
|
+
"supported in `classic` consumer group protocol");
|
|
1419
|
+
}
|
|
1420
|
+
|
|
1421
|
+
std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1422
|
+
test_create_topic(NULL, topic_name.c_str(), 1, 1);
|
|
1423
|
+
|
|
1424
|
+
std::string group_name =
|
|
1425
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1426
|
+
|
|
1427
|
+
std::vector<std::pair<std::string, std::string> > additional_conf;
|
|
1428
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
1429
|
+
std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
|
|
1430
|
+
GTestRebalanceCb rebalance_cb;
|
|
1431
|
+
RdKafka::KafkaConsumer *c = make_consumer(
|
|
1432
|
+
"C_1", group_name, "roundrobin", &additional_conf, &rebalance_cb, 15);
|
|
1433
|
+
test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
|
|
1434
|
+
|
|
1435
|
+
Test::subscribe(c, topic_name);
|
|
1436
|
+
|
|
1437
|
+
while (!rebalance_cb.assigned)
|
|
1438
|
+
Test::poll_once(c, 500);
|
|
1439
|
+
|
|
1440
|
+
rebalance_cb.closing = true;
|
|
1441
|
+
c->close();
|
|
1442
|
+
delete c;
|
|
1443
|
+
|
|
1444
|
+
SUB_TEST_PASS();
|
|
1445
|
+
}
|
|
1446
|
+
|
|
1447
|
+
|
|
1448
|
+
|
|
1449
|
+
/* Check behavior when:
|
|
1450
|
+
* 1. Single consumer (rebalance_cb) subscribes to two topics.
|
|
1451
|
+
* 2. One of the topics is deleted.
|
|
1452
|
+
* 3. Consumer is closed.
|
|
1453
|
+
*/
|
|
1454
|
+
|
|
1455
|
+
static void h_delete_topic() {
|
|
1456
|
+
SUB_TEST();
|
|
1457
|
+
|
|
1458
|
+
std::string topic_name_1 =
|
|
1459
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1460
|
+
test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
|
|
1461
|
+
std::string topic_name_2 =
|
|
1462
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1463
|
+
test_create_topic(NULL, topic_name_2.c_str(), 1, 1);
|
|
1464
|
+
|
|
1465
|
+
std::string group_name =
|
|
1466
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1467
|
+
|
|
1468
|
+
std::vector<std::pair<std::string, std::string> > additional_conf;
|
|
1469
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
1470
|
+
std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
|
|
1471
|
+
DefaultRebalanceCb rebalance_cb;
|
|
1472
|
+
RdKafka::KafkaConsumer *c =
|
|
1473
|
+
make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
|
|
1474
|
+
&rebalance_cb, 15);
|
|
1475
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
1476
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
|
|
1477
|
+
|
|
1478
|
+
Test::subscribe(c, topic_name_1, topic_name_2);
|
|
1479
|
+
|
|
1480
|
+
bool deleted = false;
|
|
1481
|
+
bool done = false;
|
|
1482
|
+
while (!done) {
|
|
1483
|
+
Test::poll_once(c, 500);
|
|
1484
|
+
|
|
1485
|
+
std::vector<RdKafka::TopicPartition *> partitions;
|
|
1486
|
+
c->assignment(partitions);
|
|
1487
|
+
|
|
1488
|
+
if (partitions.size() == 2 && !deleted) {
|
|
1489
|
+
/* Callback count can vary in KIP-848 */
|
|
1490
|
+
if (test_consumer_group_protocol_classic() &&
|
|
1491
|
+
rebalance_cb.assign_call_cnt != 1)
|
|
1492
|
+
Test::Fail(tostr() << "Expected 1 assign call, saw "
|
|
1493
|
+
<< rebalance_cb.assign_call_cnt << "\n");
|
|
1494
|
+
|
|
1495
|
+
Test::delete_topic(c, topic_name_2.c_str());
|
|
1496
|
+
deleted = true;
|
|
1497
|
+
}
|
|
1498
|
+
|
|
1499
|
+
if (partitions.size() == 1 && deleted) {
|
|
1500
|
+
if (partitions[0]->topic() != topic_name_1)
|
|
1501
|
+
Test::Fail(tostr() << "Expecting subscribed topic to be '"
|
|
1502
|
+
<< topic_name_1 << "' not '"
|
|
1503
|
+
<< partitions[0]->topic() << "'");
|
|
1504
|
+
Test::Say(tostr() << "Assignment no longer includes deleted topic '"
|
|
1505
|
+
<< topic_name_2 << "'\n");
|
|
1506
|
+
done = true;
|
|
1507
|
+
}
|
|
1508
|
+
|
|
1509
|
+
RdKafka::TopicPartition::destroy(partitions);
|
|
1510
|
+
}
|
|
1511
|
+
|
|
1512
|
+
Test::Say("Closing consumer\n");
|
|
1513
|
+
c->close();
|
|
1514
|
+
|
|
1515
|
+
delete c;
|
|
1516
|
+
|
|
1517
|
+
SUB_TEST_PASS();
|
|
1518
|
+
}
|
|
1519
|
+
|
|
1520
|
+
|
|
1521
|
+
|
|
1522
|
+
/* Check behavior when:
|
|
1523
|
+
* 1. Single consumer (rebalance_cb) subscribes to a single topic.
|
|
1524
|
+
* 2. That topic is deleted leaving no topics.
|
|
1525
|
+
* 3. Consumer is closed.
|
|
1526
|
+
*/
|
|
1527
|
+
|
|
1528
|
+
static void i_delete_topic_2() {
|
|
1529
|
+
SUB_TEST();
|
|
1530
|
+
|
|
1531
|
+
std::string topic_name_1 =
|
|
1532
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1533
|
+
test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
|
|
1534
|
+
std::string group_name =
|
|
1535
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1536
|
+
|
|
1537
|
+
std::vector<std::pair<std::string, std::string> > additional_conf;
|
|
1538
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
1539
|
+
std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
|
|
1540
|
+
DefaultRebalanceCb rebalance_cb;
|
|
1541
|
+
RdKafka::KafkaConsumer *c =
|
|
1542
|
+
make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
|
|
1543
|
+
&rebalance_cb, 15);
|
|
1544
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
1545
|
+
|
|
1546
|
+
Test::subscribe(c, topic_name_1);
|
|
1547
|
+
|
|
1548
|
+
bool deleted = false;
|
|
1549
|
+
bool done = false;
|
|
1550
|
+
while (!done) {
|
|
1551
|
+
Test::poll_once(c, 500);
|
|
1552
|
+
|
|
1553
|
+
if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) {
|
|
1554
|
+
/* Callback count can vary in KIP-848 */
|
|
1555
|
+
if (test_consumer_group_protocol_classic() &&
|
|
1556
|
+
rebalance_cb.assign_call_cnt != 1)
|
|
1557
|
+
Test::Fail(tostr() << "Expected one assign call, saw "
|
|
1558
|
+
<< rebalance_cb.assign_call_cnt << "\n");
|
|
1559
|
+
Test::delete_topic(c, topic_name_1.c_str());
|
|
1560
|
+
deleted = true;
|
|
1561
|
+
}
|
|
1562
|
+
|
|
1563
|
+
if (Test::assignment_partition_count(c, NULL) == 0 && deleted) {
|
|
1564
|
+
Test::Say(tostr() << "Assignment is empty following deletion of topic\n");
|
|
1565
|
+
done = true;
|
|
1566
|
+
}
|
|
1567
|
+
}
|
|
1568
|
+
|
|
1569
|
+
Test::Say("Closing consumer\n");
|
|
1570
|
+
c->close();
|
|
1571
|
+
|
|
1572
|
+
delete c;
|
|
1573
|
+
|
|
1574
|
+
SUB_TEST_PASS();
|
|
1575
|
+
}
|
|
1576
|
+
|
|
1577
|
+
|
|
1578
|
+
|
|
1579
|
+
/* Check behavior when:
|
|
1580
|
+
* 1. single consumer (without rebalance_cb) subscribes to a single topic.
|
|
1581
|
+
* 2. that topic is deleted leaving no topics.
|
|
1582
|
+
* 3. consumer is closed.
|
|
1583
|
+
*/
|
|
1584
|
+
|
|
1585
|
+
static void j_delete_topic_no_rb_callback() {
|
|
1586
|
+
SUB_TEST();
|
|
1587
|
+
|
|
1588
|
+
std::string topic_name_1 =
|
|
1589
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1590
|
+
test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
|
|
1591
|
+
|
|
1592
|
+
std::string group_name =
|
|
1593
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1594
|
+
|
|
1595
|
+
std::vector<std::pair<std::string, std::string> > additional_conf;
|
|
1596
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
1597
|
+
std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
|
|
1598
|
+
RdKafka::KafkaConsumer *c = make_consumer(
|
|
1599
|
+
"C_1", group_name, "cooperative-sticky", &additional_conf, NULL, 15);
|
|
1600
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
1601
|
+
|
|
1602
|
+
Test::subscribe(c, topic_name_1);
|
|
1603
|
+
|
|
1604
|
+
bool deleted = false;
|
|
1605
|
+
bool done = false;
|
|
1606
|
+
while (!done) {
|
|
1607
|
+
Test::poll_once(c, 500);
|
|
1608
|
+
|
|
1609
|
+
if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) {
|
|
1610
|
+
Test::delete_topic(c, topic_name_1.c_str());
|
|
1611
|
+
deleted = true;
|
|
1612
|
+
}
|
|
1613
|
+
|
|
1614
|
+
if (Test::assignment_partition_count(c, NULL) == 0 && deleted) {
|
|
1615
|
+
Test::Say(tostr() << "Assignment is empty following deletion of topic\n");
|
|
1616
|
+
done = true;
|
|
1617
|
+
}
|
|
1618
|
+
}
|
|
1619
|
+
|
|
1620
|
+
Test::Say("Closing consumer\n");
|
|
1621
|
+
c->close();
|
|
1622
|
+
|
|
1623
|
+
delete c;
|
|
1624
|
+
|
|
1625
|
+
SUB_TEST_PASS();
|
|
1626
|
+
}
|
|
1627
|
+
|
|
1628
|
+
|
|
1629
|
+
|
|
1630
|
+
/* Check behavior when:
|
|
1631
|
+
* 1. Single consumer (rebalance_cb) subscribes to a 1 partition topic.
|
|
1632
|
+
* 2. Number of partitions is increased to 2.
|
|
1633
|
+
* 3. Consumer is closed.
|
|
1634
|
+
*/
|
|
1635
|
+
|
|
1636
|
+
static void k_add_partition() {
|
|
1637
|
+
SUB_TEST();
|
|
1638
|
+
|
|
1639
|
+
std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1640
|
+
test_create_topic(NULL, topic_name.c_str(), 1, 1);
|
|
1641
|
+
|
|
1642
|
+
std::string group_name =
|
|
1643
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1644
|
+
|
|
1645
|
+
std::vector<std::pair<std::string, std::string> > additional_conf;
|
|
1646
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
1647
|
+
std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
|
|
1648
|
+
DefaultRebalanceCb rebalance_cb;
|
|
1649
|
+
RdKafka::KafkaConsumer *c =
|
|
1650
|
+
make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
|
|
1651
|
+
&rebalance_cb, 15);
|
|
1652
|
+
test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
|
|
1653
|
+
|
|
1654
|
+
Test::subscribe(c, topic_name);
|
|
1655
|
+
|
|
1656
|
+
bool subscribed = false;
|
|
1657
|
+
bool done = false;
|
|
1658
|
+
while (!done) {
|
|
1659
|
+
Test::poll_once(c, 500);
|
|
1660
|
+
|
|
1661
|
+
if (Test::assignment_partition_count(c, NULL) == 1 && !subscribed) {
|
|
1662
|
+
/* Callback count can vary in KIP-848 */
|
|
1663
|
+
if (test_consumer_group_protocol_classic()) {
|
|
1664
|
+
if (rebalance_cb.assign_call_cnt != 1)
|
|
1665
|
+
Test::Fail(tostr() << "Expected 1 assign call, saw "
|
|
1666
|
+
<< rebalance_cb.assign_call_cnt);
|
|
1667
|
+
if (rebalance_cb.revoke_call_cnt != 0)
|
|
1668
|
+
Test::Fail(tostr() << "Expected 0 revoke calls, saw "
|
|
1669
|
+
<< rebalance_cb.revoke_call_cnt);
|
|
1670
|
+
}
|
|
1671
|
+
Test::create_partitions(c, topic_name.c_str(), 2);
|
|
1672
|
+
subscribed = true;
|
|
1673
|
+
}
|
|
1674
|
+
|
|
1675
|
+
if (Test::assignment_partition_count(c, NULL) == 2 && subscribed) {
|
|
1676
|
+
/* Callback count can vary in KIP-848 */
|
|
1677
|
+
if (test_consumer_group_protocol_classic()) {
|
|
1678
|
+
if (rebalance_cb.assign_call_cnt != 2)
|
|
1679
|
+
Test::Fail(tostr() << "Expected 2 assign calls, saw "
|
|
1680
|
+
<< rebalance_cb.assign_call_cnt);
|
|
1681
|
+
if (rebalance_cb.revoke_call_cnt != 0)
|
|
1682
|
+
Test::Fail(tostr() << "Expected 0 revoke calls, saw "
|
|
1683
|
+
<< rebalance_cb.revoke_call_cnt);
|
|
1684
|
+
}
|
|
1685
|
+
done = true;
|
|
1686
|
+
}
|
|
1687
|
+
}
|
|
1688
|
+
|
|
1689
|
+
Test::Say("Closing consumer\n");
|
|
1690
|
+
c->close();
|
|
1691
|
+
delete c;
|
|
1692
|
+
|
|
1693
|
+
/* Callback count can vary in KIP-848 */
|
|
1694
|
+
if (test_consumer_group_protocol_classic()) {
|
|
1695
|
+
if (rebalance_cb.assign_call_cnt != 2)
|
|
1696
|
+
Test::Fail(tostr() << "Expected 2 assign calls, saw "
|
|
1697
|
+
<< rebalance_cb.assign_call_cnt);
|
|
1698
|
+
if (rebalance_cb.revoke_call_cnt != 1)
|
|
1699
|
+
Test::Fail(tostr() << "Expected 1 revoke call, saw "
|
|
1700
|
+
<< rebalance_cb.revoke_call_cnt);
|
|
1701
|
+
}
|
|
1702
|
+
|
|
1703
|
+
SUB_TEST_PASS();
|
|
1704
|
+
}
|
|
1705
|
+
|
|
1706
|
+
|
|
1707
|
+
|
|
1708
|
+
/* Check behavior when:
|
|
1709
|
+
* 1. two consumers (with rebalance_cb's) subscribe to two topics.
|
|
1710
|
+
* 2. one of the consumers calls unsubscribe.
|
|
1711
|
+
* 3. consumers closed.
|
|
1712
|
+
*/
|
|
1713
|
+
|
|
1714
|
+
static void l_unsubscribe() {
|
|
1715
|
+
SUB_TEST();
|
|
1716
|
+
|
|
1717
|
+
std::string topic_name_1 =
|
|
1718
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1719
|
+
std::string topic_name_2 =
|
|
1720
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1721
|
+
std::string group_name =
|
|
1722
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1723
|
+
test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
|
|
1724
|
+
test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
|
|
1725
|
+
|
|
1726
|
+
DefaultRebalanceCb rebalance_cb1;
|
|
1727
|
+
RdKafka::KafkaConsumer *c1 = make_consumer(
|
|
1728
|
+
"C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 30);
|
|
1729
|
+
test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
1730
|
+
test_wait_topic_exists(c1->c_ptr(), topic_name_2.c_str(), 10 * 1000);
|
|
1731
|
+
|
|
1732
|
+
Test::subscribe(c1, topic_name_1, topic_name_2);
|
|
1733
|
+
|
|
1734
|
+
DefaultRebalanceCb rebalance_cb2;
|
|
1735
|
+
RdKafka::KafkaConsumer *c2 = make_consumer(
|
|
1736
|
+
"C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 30);
|
|
1737
|
+
Test::subscribe(c2, topic_name_1, topic_name_2);
|
|
1738
|
+
|
|
1739
|
+
bool done = false;
|
|
1740
|
+
bool unsubscribed = false;
|
|
1741
|
+
int expected_cb1_assign_call_cnt = 1;
|
|
1742
|
+
int expected_cb1_revoke_call_cnt = 1;
|
|
1743
|
+
int expected_cb2_assign_call_cnt = 1;
|
|
1744
|
+
|
|
1745
|
+
while (!done) {
|
|
1746
|
+
Test::poll_once(c1, 500);
|
|
1747
|
+
Test::poll_once(c2, 500);
|
|
1748
|
+
|
|
1749
|
+
if (Test::assignment_partition_count(c1, NULL) == 2 &&
|
|
1750
|
+
Test::assignment_partition_count(c2, NULL) == 2) {
|
|
1751
|
+
/* Callback count can vary in KIP-848 */
|
|
1752
|
+
if (test_consumer_group_protocol_classic()) {
|
|
1753
|
+
if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt)
|
|
1754
|
+
Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be "
|
|
1755
|
+
<< expected_cb1_assign_call_cnt
|
|
1756
|
+
<< " not: " << rebalance_cb1.assign_call_cnt);
|
|
1757
|
+
if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt)
|
|
1758
|
+
Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be "
|
|
1759
|
+
<< expected_cb2_assign_call_cnt
|
|
1760
|
+
<< " not: " << rebalance_cb2.assign_call_cnt);
|
|
1761
|
+
}
|
|
1762
|
+
Test::Say("Unsubscribing consumer 1 from both topics\n");
|
|
1763
|
+
c1->unsubscribe();
|
|
1764
|
+
unsubscribed = true;
|
|
1765
|
+
expected_cb2_assign_call_cnt++;
|
|
1766
|
+
}
|
|
1767
|
+
|
|
1768
|
+
if (unsubscribed && Test::assignment_partition_count(c1, NULL) == 0 &&
|
|
1769
|
+
Test::assignment_partition_count(c2, NULL) == 4) {
|
|
1770
|
+
/* Callback count can vary in KIP-848 */
|
|
1771
|
+
if (test_consumer_group_protocol_classic()) {
|
|
1772
|
+
if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt)
|
|
1773
|
+
/* is now unsubscribed, so rebalance_cb will no longer be called. */
|
|
1774
|
+
Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be "
|
|
1775
|
+
<< expected_cb1_assign_call_cnt
|
|
1776
|
+
<< " not: " << rebalance_cb1.assign_call_cnt);
|
|
1777
|
+
if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt)
|
|
1778
|
+
Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be "
|
|
1779
|
+
<< expected_cb2_assign_call_cnt
|
|
1780
|
+
<< " not: " << rebalance_cb2.assign_call_cnt);
|
|
1781
|
+
if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt)
|
|
1782
|
+
Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be "
|
|
1783
|
+
<< expected_cb1_revoke_call_cnt
|
|
1784
|
+
<< " not: " << rebalance_cb1.revoke_call_cnt);
|
|
1785
|
+
if (rebalance_cb2.revoke_call_cnt !=
|
|
1786
|
+
0) /* the rebalance_cb should not be called if the revoked partition
|
|
1787
|
+
list is empty */
|
|
1788
|
+
Test::Fail(tostr()
|
|
1789
|
+
<< "Expecting consumer 2's revoke_call_cnt to be 0 not: "
|
|
1790
|
+
<< rebalance_cb2.revoke_call_cnt);
|
|
1791
|
+
}
|
|
1792
|
+
Test::Say("Unsubscribe completed");
|
|
1793
|
+
done = true;
|
|
1794
|
+
}
|
|
1795
|
+
}
|
|
1796
|
+
|
|
1797
|
+
Test::Say("Closing consumer 1\n");
|
|
1798
|
+
c1->close();
|
|
1799
|
+
Test::Say("Closing consumer 2\n");
|
|
1800
|
+
c2->close();
|
|
1801
|
+
|
|
1802
|
+
/* Callback count can vary in KIP-848 */
|
|
1803
|
+
if (test_consumer_group_protocol_classic()) {
|
|
1804
|
+
/* there should be no assign rebalance_cb calls on close */
|
|
1805
|
+
if (rebalance_cb1.assign_call_cnt != expected_cb1_assign_call_cnt)
|
|
1806
|
+
Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be "
|
|
1807
|
+
<< expected_cb1_assign_call_cnt
|
|
1808
|
+
<< " not: " << rebalance_cb1.assign_call_cnt);
|
|
1809
|
+
if (rebalance_cb2.assign_call_cnt != expected_cb2_assign_call_cnt)
|
|
1810
|
+
Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be "
|
|
1811
|
+
<< expected_cb2_assign_call_cnt
|
|
1812
|
+
<< " not: " << rebalance_cb2.assign_call_cnt);
|
|
1813
|
+
|
|
1814
|
+
if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt)
|
|
1815
|
+
Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be "
|
|
1816
|
+
<< expected_cb1_revoke_call_cnt
|
|
1817
|
+
<< " not: " << rebalance_cb1.revoke_call_cnt);
|
|
1818
|
+
if (rebalance_cb2.revoke_call_cnt != 1)
|
|
1819
|
+
Test::Fail(
|
|
1820
|
+
tostr() << "Expecting consumer 2's revoke_call_cnt to be 1 not: "
|
|
1821
|
+
<< rebalance_cb2.revoke_call_cnt);
|
|
1822
|
+
}
|
|
1823
|
+
|
|
1824
|
+
if (rebalance_cb1.lost_call_cnt != 0)
|
|
1825
|
+
Test::Fail(tostr() << "Expecting consumer 1's lost_call_cnt to be 0, not: "
|
|
1826
|
+
<< rebalance_cb1.lost_call_cnt);
|
|
1827
|
+
if (rebalance_cb2.lost_call_cnt != 0)
|
|
1828
|
+
Test::Fail(tostr() << "Expecting consumer 2's lost_call_cnt to be 0, not: "
|
|
1829
|
+
<< rebalance_cb2.lost_call_cnt);
|
|
1830
|
+
|
|
1831
|
+
delete c1;
|
|
1832
|
+
delete c2;
|
|
1833
|
+
|
|
1834
|
+
SUB_TEST_PASS();
|
|
1835
|
+
}
|
|
1836
|
+
|
|
1837
|
+
|
|
1838
|
+
|
|
1839
|
+
/* Check behavior when:
|
|
1840
|
+
* 1. A consumers (with no rebalance_cb) subscribes to a topic.
|
|
1841
|
+
* 2. The consumer calls unsubscribe.
|
|
1842
|
+
* 3. Consumers closed.
|
|
1843
|
+
*/
|
|
1844
|
+
|
|
1845
|
+
static void m_unsubscribe_2() {
|
|
1846
|
+
SUB_TEST();
|
|
1847
|
+
|
|
1848
|
+
std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
1849
|
+
std::string group_name =
|
|
1850
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
1851
|
+
test_create_topic(NULL, topic_name.c_str(), 2, 1);
|
|
1852
|
+
|
|
1853
|
+
RdKafka::KafkaConsumer *c =
|
|
1854
|
+
make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
|
|
1855
|
+
test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
|
|
1856
|
+
|
|
1857
|
+
Test::subscribe(c, topic_name);
|
|
1858
|
+
|
|
1859
|
+
bool done = false;
|
|
1860
|
+
bool unsubscribed = false;
|
|
1861
|
+
while (!done) {
|
|
1862
|
+
Test::poll_once(c, 500);
|
|
1863
|
+
|
|
1864
|
+
if (Test::assignment_partition_count(c, NULL) == 2) {
|
|
1865
|
+
Test::unsubscribe(c);
|
|
1866
|
+
unsubscribed = true;
|
|
1867
|
+
}
|
|
1868
|
+
|
|
1869
|
+
if (unsubscribed && Test::assignment_partition_count(c, NULL) == 0) {
|
|
1870
|
+
Test::Say("Unsubscribe completed");
|
|
1871
|
+
done = true;
|
|
1872
|
+
}
|
|
1873
|
+
}
|
|
1874
|
+
|
|
1875
|
+
Test::Say("Closing consumer\n");
|
|
1876
|
+
c->close();
|
|
1877
|
+
|
|
1878
|
+
delete c;
|
|
1879
|
+
|
|
1880
|
+
SUB_TEST_PASS();
|
|
1881
|
+
}
|
|
1882
|
+
|
|
1883
|
+
|
|
1884
|
+
|
|
1885
|
+
/* Check behavior when:
|
|
1886
|
+
* 1. Two consumers (with rebalance_cb) subscribe to a regex (no matching
|
|
1887
|
+
* topics exist)
|
|
1888
|
+
* 2. Create two topics.
|
|
1889
|
+
* 3. Remove one of the topics.
|
|
1890
|
+
* 3. Consumers closed.
|
|
1891
|
+
*/
|
|
1892
|
+
|
|
1893
|
+
static void n_wildcard() {
|
|
1894
|
+
SUB_TEST();
|
|
1895
|
+
|
|
1896
|
+
const string topic_base_name = Test::mk_topic_name("0113-n_wildcard", 1);
|
|
1897
|
+
const string topic_name_1 = topic_base_name + "_1";
|
|
1898
|
+
const string topic_name_2 = topic_base_name + "_2";
|
|
1899
|
+
const string topic_regex = "^" + topic_base_name + "_.";
|
|
1900
|
+
const string group_name = Test::mk_unique_group_name("0113-n_wildcard");
|
|
1901
|
+
|
|
1902
|
+
std::vector<std::pair<std::string, std::string> > additional_conf;
|
|
1903
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
1904
|
+
std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
|
|
1905
|
+
|
|
1906
|
+
DefaultRebalanceCb rebalance_cb1;
|
|
1907
|
+
RdKafka::KafkaConsumer *c1 =
|
|
1908
|
+
make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
|
|
1909
|
+
&rebalance_cb1, 30);
|
|
1910
|
+
Test::subscribe(c1, topic_regex);
|
|
1911
|
+
|
|
1912
|
+
DefaultRebalanceCb rebalance_cb2;
|
|
1913
|
+
RdKafka::KafkaConsumer *c2 =
|
|
1914
|
+
make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf,
|
|
1915
|
+
&rebalance_cb2, 30);
|
|
1916
|
+
Test::subscribe(c2, topic_regex);
|
|
1917
|
+
|
|
1918
|
+
/* There are no matching topics, so the consumers should not join the group
|
|
1919
|
+
* initially */
|
|
1920
|
+
Test::poll_once(c1, 500);
|
|
1921
|
+
Test::poll_once(c2, 500);
|
|
1922
|
+
|
|
1923
|
+
if (rebalance_cb1.assign_call_cnt != 0)
|
|
1924
|
+
Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 0 not: "
|
|
1925
|
+
<< rebalance_cb1.assign_call_cnt);
|
|
1926
|
+
if (rebalance_cb2.assign_call_cnt != 0)
|
|
1927
|
+
Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 0 not: "
|
|
1928
|
+
<< rebalance_cb2.assign_call_cnt);
|
|
1929
|
+
|
|
1930
|
+
bool done = false;
|
|
1931
|
+
bool created_topics = false;
|
|
1932
|
+
bool deleted_topic = false;
|
|
1933
|
+
int last_cb1_assign_call_cnt = 0;
|
|
1934
|
+
int last_cb2_assign_call_cnt = 0;
|
|
1935
|
+
int expected_lost_cnt = 0;
|
|
1936
|
+
while (!done) {
|
|
1937
|
+
Test::poll_once(c1, 500);
|
|
1938
|
+
Test::poll_once(c2, 500);
|
|
1939
|
+
|
|
1940
|
+
if (Test::assignment_partition_count(c1, NULL) == 0 &&
|
|
1941
|
+
Test::assignment_partition_count(c2, NULL) == 0 && !created_topics) {
|
|
1942
|
+
Test::Say(
|
|
1943
|
+
"Creating two topics with 2 partitions each that match regex\n");
|
|
1944
|
+
test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
|
|
1945
|
+
test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
|
|
1946
|
+
test_wait_topic_exists(NULL, topic_name_1.c_str(), 5000);
|
|
1947
|
+
test_wait_topic_exists(NULL, topic_name_2.c_str(), 5000);
|
|
1948
|
+
/* The consumers should autonomously discover these topics and start
|
|
1949
|
+
* consuming from them. This happens in the background - is not
|
|
1950
|
+
* influenced by whether we wait for the topics to be created before
|
|
1951
|
+
* continuing the main loop. It is possible that both topics are
|
|
1952
|
+
* discovered simultaneously, requiring a single rebalance OR that
|
|
1953
|
+
* topic 1 is discovered first (it was created first), a rebalance
|
|
1954
|
+
* initiated, then topic 2 discovered, then another rebalance
|
|
1955
|
+
* initiated to include it.
|
|
1956
|
+
*/
|
|
1957
|
+
created_topics = true;
|
|
1958
|
+
}
|
|
1959
|
+
|
|
1960
|
+
if (Test::assignment_partition_count(c1, NULL) == 2 &&
|
|
1961
|
+
Test::assignment_partition_count(c2, NULL) == 2 && !deleted_topic) {
|
|
1962
|
+
if (rebalance_cb1.nonempty_assign_call_cnt == 1) {
|
|
1963
|
+
/* just one rebalance was required */
|
|
1964
|
+
TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 1,
|
|
1965
|
+
"Expecting C_1's nonempty_assign_call_cnt to be 1 not %d ",
|
|
1966
|
+
rebalance_cb1.nonempty_assign_call_cnt);
|
|
1967
|
+
TEST_ASSERT(rebalance_cb2.nonempty_assign_call_cnt == 1,
|
|
1968
|
+
"Expecting C_2's nonempty_assign_call_cnt to be 1 not %d ",
|
|
1969
|
+
rebalance_cb2.nonempty_assign_call_cnt);
|
|
1970
|
+
} else {
|
|
1971
|
+
/* two rebalances were required (occurs infrequently) */
|
|
1972
|
+
TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 2,
|
|
1973
|
+
"Expecting C_1's nonempty_assign_call_cnt to be 2 not %d ",
|
|
1974
|
+
rebalance_cb1.nonempty_assign_call_cnt);
|
|
1975
|
+
TEST_ASSERT(rebalance_cb2.nonempty_assign_call_cnt == 2,
|
|
1976
|
+
"Expecting C_2's nonempty_assign_call_cnt to be 2 not %d ",
|
|
1977
|
+
rebalance_cb2.nonempty_assign_call_cnt);
|
|
1978
|
+
}
|
|
1979
|
+
|
|
1980
|
+
TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 0,
|
|
1981
|
+
"Expecting C_1's revoke_call_cnt to be 0 not %d ",
|
|
1982
|
+
rebalance_cb1.revoke_call_cnt);
|
|
1983
|
+
TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 0,
|
|
1984
|
+
"Expecting C_2's revoke_call_cnt to be 0 not %d ",
|
|
1985
|
+
rebalance_cb2.revoke_call_cnt);
|
|
1986
|
+
|
|
1987
|
+
last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt;
|
|
1988
|
+
last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt;
|
|
1989
|
+
|
|
1990
|
+
Test::Say("Deleting topic 1\n");
|
|
1991
|
+
Test::delete_topic(c1, topic_name_1.c_str());
|
|
1992
|
+
deleted_topic = true;
|
|
1993
|
+
}
|
|
1994
|
+
|
|
1995
|
+
if (Test::assignment_partition_count(c1, NULL) == 1 &&
|
|
1996
|
+
Test::assignment_partition_count(c2, NULL) == 1 && deleted_topic) {
|
|
1997
|
+
/* accumulated in lost case as well for the classic protocol*/
|
|
1998
|
+
TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 1,
|
|
1999
|
+
"Expecting C_1's revoke_call_cnt to be 1 not %d",
|
|
2000
|
+
rebalance_cb1.revoke_call_cnt);
|
|
2001
|
+
TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 1,
|
|
2002
|
+
"Expecting C_2's revoke_call_cnt to be 1 not %d",
|
|
2003
|
+
rebalance_cb2.revoke_call_cnt);
|
|
2004
|
+
|
|
2005
|
+
/* Deleted topics are not counted as lost in KIP-848.
|
|
2006
|
+
* Assignment changes are propogated through ConsumerGroupHeartbeat. */
|
|
2007
|
+
if (test_consumer_group_protocol_classic()) {
|
|
2008
|
+
expected_lost_cnt++;
|
|
2009
|
+
}
|
|
2010
|
+
|
|
2011
|
+
TEST_ASSERT(rebalance_cb1.lost_call_cnt == expected_lost_cnt,
|
|
2012
|
+
"Expecting C_1's lost_call_cnt to be %d not %d",
|
|
2013
|
+
expected_lost_cnt, rebalance_cb1.lost_call_cnt);
|
|
2014
|
+
TEST_ASSERT(rebalance_cb2.lost_call_cnt == expected_lost_cnt,
|
|
2015
|
+
"Expecting C_2's lost_call_cnt to be %d not %d",
|
|
2016
|
+
expected_lost_cnt, rebalance_cb2.lost_call_cnt);
|
|
2017
|
+
|
|
2018
|
+
/* Consumers will rejoin group after revoking the lost partitions.
|
|
2019
|
+
* this will result in an rebalance_cb assign (empty partitions).
|
|
2020
|
+
* it follows the revoke, which has already been confirmed to have
|
|
2021
|
+
* happened. */
|
|
2022
|
+
Test::Say("Waiting for rebalance_cb assigns\n");
|
|
2023
|
+
while (rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt ||
|
|
2024
|
+
rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt) {
|
|
2025
|
+
Test::poll_once(c1, 500);
|
|
2026
|
+
Test::poll_once(c2, 500);
|
|
2027
|
+
}
|
|
2028
|
+
|
|
2029
|
+
Test::Say("Consumers are subscribed to one partition each\n");
|
|
2030
|
+
done = true;
|
|
2031
|
+
}
|
|
2032
|
+
}
|
|
2033
|
+
|
|
2034
|
+
Test::Say("Closing consumer 1\n");
|
|
2035
|
+
last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt;
|
|
2036
|
+
c1->close();
|
|
2037
|
+
|
|
2038
|
+
/* There should be no assign rebalance_cb calls on close */
|
|
2039
|
+
TEST_ASSERT(rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt,
|
|
2040
|
+
"Expecting C_1's assign_call_cnt to be %d not %d",
|
|
2041
|
+
last_cb1_assign_call_cnt, rebalance_cb1.assign_call_cnt);
|
|
2042
|
+
|
|
2043
|
+
/* Let C_2 catch up on the rebalance and get assigned C_1's partitions. */
|
|
2044
|
+
last_cb2_assign_call_cnt = rebalance_cb2.nonempty_assign_call_cnt;
|
|
2045
|
+
while (rebalance_cb2.nonempty_assign_call_cnt == last_cb2_assign_call_cnt)
|
|
2046
|
+
Test::poll_once(c2, 500);
|
|
2047
|
+
|
|
2048
|
+
Test::Say("Closing consumer 2\n");
|
|
2049
|
+
last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt;
|
|
2050
|
+
c2->close();
|
|
2051
|
+
|
|
2052
|
+
/* There should be no assign rebalance_cb calls on close */
|
|
2053
|
+
TEST_ASSERT(rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt,
|
|
2054
|
+
"Expecting C_2's assign_call_cnt to be %d not %d",
|
|
2055
|
+
last_cb2_assign_call_cnt, rebalance_cb2.assign_call_cnt);
|
|
2056
|
+
|
|
2057
|
+
TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 2,
|
|
2058
|
+
"Expecting C_1's revoke_call_cnt to be 2 not %d",
|
|
2059
|
+
rebalance_cb1.revoke_call_cnt);
|
|
2060
|
+
TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 2,
|
|
2061
|
+
"Expecting C_2's revoke_call_cnt to be 2 not %d",
|
|
2062
|
+
rebalance_cb2.revoke_call_cnt);
|
|
2063
|
+
|
|
2064
|
+
TEST_ASSERT(rebalance_cb1.lost_call_cnt == expected_lost_cnt,
|
|
2065
|
+
"Expecting C_1's lost_call_cnt to be %d, not %d",
|
|
2066
|
+
expected_lost_cnt, rebalance_cb1.lost_call_cnt);
|
|
2067
|
+
TEST_ASSERT(rebalance_cb2.lost_call_cnt == expected_lost_cnt,
|
|
2068
|
+
"Expecting C_2's lost_call_cnt to be %d, not %d",
|
|
2069
|
+
expected_lost_cnt, rebalance_cb2.lost_call_cnt);
|
|
2070
|
+
|
|
2071
|
+
delete c1;
|
|
2072
|
+
delete c2;
|
|
2073
|
+
|
|
2074
|
+
SUB_TEST_PASS();
|
|
2075
|
+
}
|
|
2076
|
+
|
|
2077
|
+
|
|
2078
|
+
|
|
2079
|
+
/* Check behavior when:
|
|
2080
|
+
* 1. Consumer (librdkafka) subscribes to two topics (2 and 6 partitions).
|
|
2081
|
+
* 2. Consumer (java) subscribes to the same two topics.
|
|
2082
|
+
* 3. Consumer (librdkafka) unsubscribes from the two partition topic.
|
|
2083
|
+
* 4. Consumer (java) process closes upon detecting the above unsubscribe.
|
|
2084
|
+
* 5. Consumer (librdkafka) will now be subscribed to 6 partitions.
|
|
2085
|
+
* 6. Close librdkafka consumer.
|
|
2086
|
+
*/
|
|
2087
|
+
|
|
2088
|
+
static void o_java_interop() {
|
|
2089
|
+
SUB_TEST();
|
|
2090
|
+
|
|
2091
|
+
if (*test_conf_get(NULL, "sasl.mechanism") != '\0')
|
|
2092
|
+
SUB_TEST_SKIP(
|
|
2093
|
+
"Cluster is set up for SASL: we won't bother with that "
|
|
2094
|
+
"for the Java client\n");
|
|
2095
|
+
|
|
2096
|
+
std::string topic_name_1 = Test::mk_topic_name("0113_o_2", 1);
|
|
2097
|
+
std::string topic_name_2 = Test::mk_topic_name("0113_o_6", 1);
|
|
2098
|
+
std::string group_name = Test::mk_unique_group_name("0113_o");
|
|
2099
|
+
test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
|
|
2100
|
+
test_create_topic(NULL, topic_name_2.c_str(), 6, 1);
|
|
2101
|
+
|
|
2102
|
+
DefaultRebalanceCb rebalance_cb;
|
|
2103
|
+
RdKafka::KafkaConsumer *c = make_consumer(
|
|
2104
|
+
"C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25);
|
|
2105
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
2106
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
|
|
2107
|
+
|
|
2108
|
+
Test::subscribe(c, topic_name_1, topic_name_2);
|
|
2109
|
+
|
|
2110
|
+
bool done = false;
|
|
2111
|
+
bool changed_subscription = false;
|
|
2112
|
+
bool changed_subscription_done = false;
|
|
2113
|
+
int java_pid = 0;
|
|
2114
|
+
while (!done) {
|
|
2115
|
+
Test::poll_once(c, 500);
|
|
2116
|
+
|
|
2117
|
+
if (1) // FIXME: Remove after debugging
|
|
2118
|
+
Test::Say(tostr() << "Assignment partition count: "
|
|
2119
|
+
<< Test::assignment_partition_count(c, NULL)
|
|
2120
|
+
<< ", changed_sub " << changed_subscription
|
|
2121
|
+
<< ", changed_sub_done " << changed_subscription_done
|
|
2122
|
+
<< ", assign_call_cnt " << rebalance_cb.assign_call_cnt
|
|
2123
|
+
<< "\n");
|
|
2124
|
+
if (Test::assignment_partition_count(c, NULL) == 8 && !java_pid) {
|
|
2125
|
+
Test::Say(_C_GRN "librdkafka consumer assigned to 8 partitions\n");
|
|
2126
|
+
string bootstrapServers = get_bootstrap_servers();
|
|
2127
|
+
const char *argv[1 + 1 + 1 + 1 + 1 + 1];
|
|
2128
|
+
size_t i = 0;
|
|
2129
|
+
argv[i++] = "test1";
|
|
2130
|
+
argv[i++] = bootstrapServers.c_str();
|
|
2131
|
+
argv[i++] = topic_name_1.c_str();
|
|
2132
|
+
argv[i++] = topic_name_2.c_str();
|
|
2133
|
+
argv[i++] = group_name.c_str();
|
|
2134
|
+
argv[i] = NULL;
|
|
2135
|
+
java_pid = test_run_java("IncrementalRebalanceCli", argv);
|
|
2136
|
+
if (java_pid <= 0)
|
|
2137
|
+
Test::Fail(tostr() << "Unexpected pid: " << java_pid);
|
|
2138
|
+
}
|
|
2139
|
+
|
|
2140
|
+
if (Test::assignment_partition_count(c, NULL) == 4 && java_pid != 0 &&
|
|
2141
|
+
!changed_subscription) {
|
|
2142
|
+
/* Callback count can vary in KIP-848 */
|
|
2143
|
+
if (test_consumer_group_protocol_classic() &&
|
|
2144
|
+
rebalance_cb.assign_call_cnt != 2)
|
|
2145
|
+
Test::Fail(tostr() << "Expecting consumer's assign_call_cnt to be 2, "
|
|
2146
|
+
"not "
|
|
2147
|
+
<< rebalance_cb.assign_call_cnt);
|
|
2148
|
+
Test::Say(_C_GRN "Java consumer is now part of the group\n");
|
|
2149
|
+
Test::subscribe(c, topic_name_1);
|
|
2150
|
+
changed_subscription = true;
|
|
2151
|
+
}
|
|
2152
|
+
|
|
2153
|
+
/* Depending on the timing of resubscribe rebalancing and the
|
|
2154
|
+
* Java consumer terminating we might have one or two rebalances,
|
|
2155
|
+
* hence the fuzzy <=5 and >=5 checks. */
|
|
2156
|
+
if (Test::assignment_partition_count(c, NULL) == 2 &&
|
|
2157
|
+
changed_subscription && rebalance_cb.assign_call_cnt <= 5 &&
|
|
2158
|
+
!changed_subscription_done) {
|
|
2159
|
+
/* All topic 1 partitions will be allocated to this consumer whether or
|
|
2160
|
+
* not the Java consumer has unsubscribed yet because the sticky algorithm
|
|
2161
|
+
* attempts to ensure partition counts are even. */
|
|
2162
|
+
Test::Say(_C_GRN "Consumer 1 has unsubscribed from topic 2\n");
|
|
2163
|
+
changed_subscription_done = true;
|
|
2164
|
+
}
|
|
2165
|
+
|
|
2166
|
+
if (Test::assignment_partition_count(c, NULL) == 2 &&
|
|
2167
|
+
changed_subscription && rebalance_cb.assign_call_cnt >= 5 &&
|
|
2168
|
+
changed_subscription_done) {
|
|
2169
|
+
/* When the java consumer closes, this will cause an empty assign
|
|
2170
|
+
* rebalance_cb event, allowing detection of when this has happened. */
|
|
2171
|
+
Test::Say(_C_GRN "Java consumer has left the group\n");
|
|
2172
|
+
done = true;
|
|
2173
|
+
}
|
|
2174
|
+
}
|
|
2175
|
+
|
|
2176
|
+
Test::Say("Closing consumer\n");
|
|
2177
|
+
c->close();
|
|
2178
|
+
|
|
2179
|
+
/* Expected behavior is IncrementalRebalanceCli will exit cleanly, timeout
|
|
2180
|
+
* otherwise. */
|
|
2181
|
+
test_waitpid(java_pid);
|
|
2182
|
+
|
|
2183
|
+
delete c;
|
|
2184
|
+
|
|
2185
|
+
SUB_TEST_PASS();
|
|
2186
|
+
}
|
|
2187
|
+
|
|
2188
|
+
|
|
2189
|
+
|
|
2190
|
+
/* Check behavior when:
|
|
2191
|
+
* - Single consumer subscribes to topic.
|
|
2192
|
+
* - Soon after (timing such that rebalance is probably in progress) it
|
|
2193
|
+
* subscribes to a different topic.
|
|
2194
|
+
*/
|
|
2195
|
+
|
|
2196
|
+
static void s_subscribe_when_rebalancing(int variation) {
|
|
2197
|
+
SUB_TEST("variation %d", variation);
|
|
2198
|
+
|
|
2199
|
+
std::string topic_name_1 =
|
|
2200
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
2201
|
+
std::string topic_name_2 =
|
|
2202
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
2203
|
+
std::string topic_name_3 =
|
|
2204
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
2205
|
+
std::string group_name =
|
|
2206
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
2207
|
+
test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
|
|
2208
|
+
test_create_topic(NULL, topic_name_2.c_str(), 1, 1);
|
|
2209
|
+
test_create_topic(NULL, topic_name_3.c_str(), 1, 1);
|
|
2210
|
+
|
|
2211
|
+
DefaultRebalanceCb rebalance_cb;
|
|
2212
|
+
RdKafka::KafkaConsumer *c = make_consumer(
|
|
2213
|
+
"C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25);
|
|
2214
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
2215
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
|
|
2216
|
+
test_wait_topic_exists(c->c_ptr(), topic_name_3.c_str(), 10 * 1000);
|
|
2217
|
+
|
|
2218
|
+
if (variation == 2 || variation == 4 || variation == 6) {
|
|
2219
|
+
/* Pre-cache metadata for all topics. */
|
|
2220
|
+
class RdKafka::Metadata *metadata;
|
|
2221
|
+
c->metadata(true, NULL, &metadata, 5000);
|
|
2222
|
+
delete metadata;
|
|
2223
|
+
}
|
|
2224
|
+
|
|
2225
|
+
Test::subscribe(c, topic_name_1);
|
|
2226
|
+
Test::wait_for_assignment(c, 1, &topic_name_1);
|
|
2227
|
+
|
|
2228
|
+
Test::subscribe(c, topic_name_2);
|
|
2229
|
+
|
|
2230
|
+
if (variation == 3 || variation == 5)
|
|
2231
|
+
Test::poll_once(c, 500);
|
|
2232
|
+
|
|
2233
|
+
if (variation < 5) {
|
|
2234
|
+
// Very quickly after subscribing to topic 2, subscribe to topic 3.
|
|
2235
|
+
Test::subscribe(c, topic_name_3);
|
|
2236
|
+
Test::wait_for_assignment(c, 1, &topic_name_3);
|
|
2237
|
+
} else {
|
|
2238
|
+
// ..or unsubscribe.
|
|
2239
|
+
Test::unsubscribe(c);
|
|
2240
|
+
Test::wait_for_assignment(c, 0, NULL);
|
|
2241
|
+
}
|
|
2242
|
+
|
|
2243
|
+
delete c;
|
|
2244
|
+
|
|
2245
|
+
SUB_TEST_PASS();
|
|
2246
|
+
}
|
|
2247
|
+
|
|
2248
|
+
|
|
2249
|
+
|
|
2250
|
+
/* Check behavior when:
|
|
2251
|
+
* - Two consumer subscribe to a topic.
|
|
2252
|
+
* - Max poll interval is exceeded on the first consumer.
|
|
2253
|
+
*/
|
|
2254
|
+
|
|
2255
|
+
static void t_max_poll_interval_exceeded(int variation) {
|
|
2256
|
+
SUB_TEST("variation %d", variation);
|
|
2257
|
+
|
|
2258
|
+
std::string topic_name_1 =
|
|
2259
|
+
Test::mk_topic_name("0113-cooperative_rebalance", 1);
|
|
2260
|
+
std::string group_name =
|
|
2261
|
+
Test::mk_unique_group_name("0113-cooperative_rebalance");
|
|
2262
|
+
test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
|
|
2263
|
+
|
|
2264
|
+
std::vector<std::pair<std::string, std::string> > additional_conf;
|
|
2265
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
2266
|
+
std::string("session.timeout.ms"), std::string("6000")));
|
|
2267
|
+
additional_conf.push_back(std::pair<std::string, std::string>(
|
|
2268
|
+
std::string("max.poll.interval.ms"), std::string("7000")));
|
|
2269
|
+
|
|
2270
|
+
DefaultRebalanceCb rebalance_cb1;
|
|
2271
|
+
RdKafka::KafkaConsumer *c1 =
|
|
2272
|
+
make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
|
|
2273
|
+
&rebalance_cb1, 30);
|
|
2274
|
+
DefaultRebalanceCb rebalance_cb2;
|
|
2275
|
+
RdKafka::KafkaConsumer *c2 =
|
|
2276
|
+
make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf,
|
|
2277
|
+
&rebalance_cb2, 30);
|
|
2278
|
+
|
|
2279
|
+
test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
2280
|
+
test_wait_topic_exists(c2->c_ptr(), topic_name_1.c_str(), 10 * 1000);
|
|
2281
|
+
|
|
2282
|
+
Test::subscribe(c1, topic_name_1);
|
|
2283
|
+
Test::subscribe(c2, topic_name_1);
|
|
2284
|
+
|
|
2285
|
+
bool done = false;
|
|
2286
|
+
bool both_have_been_assigned = false;
|
|
2287
|
+
int expected_cb1_assign_call_cnt = 1;
|
|
2288
|
+
int expected_cb2_assign_call_cnt = 2;
|
|
2289
|
+
int expected_cb1_revoke_call_cnt = 1;
|
|
2290
|
+
int expected_cb2_revoke_call_cnt = 1;
|
|
2291
|
+
int expected_cb1_lost_call_cnt = 1;
|
|
2292
|
+
|
|
2293
|
+
while (!done) {
|
|
2294
|
+
if (!both_have_been_assigned)
|
|
2295
|
+
Test::poll_once(c1, 500);
|
|
2296
|
+
Test::poll_once(c2, 500);
|
|
2297
|
+
|
|
2298
|
+
if (Test::assignment_partition_count(c1, NULL) == 1 &&
|
|
2299
|
+
Test::assignment_partition_count(c2, NULL) == 1 &&
|
|
2300
|
+
!both_have_been_assigned) {
|
|
2301
|
+
Test::Say(
|
|
2302
|
+
tostr()
|
|
2303
|
+
<< "Both consumers are assigned to topic " << topic_name_1
|
|
2304
|
+
<< ". WAITING 7 seconds for max.poll.interval.ms to be exceeded\n");
|
|
2305
|
+
both_have_been_assigned = true;
|
|
2306
|
+
}
|
|
2307
|
+
|
|
2308
|
+
if (Test::assignment_partition_count(c2, NULL) == 2 &&
|
|
2309
|
+
both_have_been_assigned) {
|
|
2310
|
+
Test::Say("Consumer 1 is no longer assigned any partitions, done\n");
|
|
2311
|
+
done = true;
|
|
2312
|
+
}
|
|
2313
|
+
}
|
|
2314
|
+
|
|
2315
|
+
if (variation == 1 || variation == 3) {
|
|
2316
|
+
if (rebalance_cb1.lost_call_cnt != 0)
|
|
2317
|
+
Test::Fail(
|
|
2318
|
+
tostr() << "Expected consumer 1 lost revoke count to be 0, not: "
|
|
2319
|
+
<< rebalance_cb1.lost_call_cnt);
|
|
2320
|
+
Test::poll_once(c1,
|
|
2321
|
+
500); /* Eat the max poll interval exceeded error message */
|
|
2322
|
+
Test::poll_once(c1,
|
|
2323
|
+
500); /* Trigger the rebalance_cb with lost partitions */
|
|
2324
|
+
if (rebalance_cb1.lost_call_cnt != expected_cb1_lost_call_cnt)
|
|
2325
|
+
Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be "
|
|
2326
|
+
<< expected_cb1_lost_call_cnt
|
|
2327
|
+
<< ", not: " << rebalance_cb1.lost_call_cnt);
|
|
2328
|
+
}
|
|
2329
|
+
|
|
2330
|
+
if (variation == 3) {
|
|
2331
|
+
/* Last poll will cause a rejoin, wait that the rejoin happens. */
|
|
2332
|
+
rd_sleep(5);
|
|
2333
|
+
expected_cb2_revoke_call_cnt++;
|
|
2334
|
+
}
|
|
2335
|
+
|
|
2336
|
+
c1->close();
|
|
2337
|
+
c2->close();
|
|
2338
|
+
|
|
2339
|
+
if (rebalance_cb1.lost_call_cnt != expected_cb1_lost_call_cnt)
|
|
2340
|
+
Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be "
|
|
2341
|
+
<< expected_cb1_lost_call_cnt
|
|
2342
|
+
<< ", not: " << rebalance_cb1.lost_call_cnt);
|
|
2343
|
+
|
|
2344
|
+
/* Callback count can vary in KIP-848 */
|
|
2345
|
+
if (test_consumer_group_protocol_classic()) {
|
|
2346
|
+
if (rebalance_cb1.nonempty_assign_call_cnt != expected_cb1_assign_call_cnt)
|
|
2347
|
+
Test::Fail(tostr() << "Expected consumer 1 non-empty assign count to be "
|
|
2348
|
+
<< expected_cb1_assign_call_cnt << ", not: "
|
|
2349
|
+
<< rebalance_cb1.nonempty_assign_call_cnt);
|
|
2350
|
+
if (rebalance_cb2.nonempty_assign_call_cnt != expected_cb2_assign_call_cnt)
|
|
2351
|
+
Test::Fail(tostr() << "Expected consumer 2 non-empty assign count to be "
|
|
2352
|
+
<< expected_cb2_assign_call_cnt << ", not: "
|
|
2353
|
+
<< rebalance_cb2.nonempty_assign_call_cnt);
|
|
2354
|
+
|
|
2355
|
+
if (rebalance_cb1.revoke_call_cnt != expected_cb1_revoke_call_cnt)
|
|
2356
|
+
Test::Fail(tostr() << "Expected consumer 1 revoke count to be "
|
|
2357
|
+
<< expected_cb1_revoke_call_cnt
|
|
2358
|
+
<< ", not: " << rebalance_cb1.revoke_call_cnt);
|
|
2359
|
+
if (rebalance_cb2.revoke_call_cnt != expected_cb2_revoke_call_cnt)
|
|
2360
|
+
Test::Fail(tostr() << "Expected consumer 2 revoke count to be "
|
|
2361
|
+
<< expected_cb2_revoke_call_cnt
|
|
2362
|
+
<< ", not: " << rebalance_cb2.revoke_call_cnt);
|
|
2363
|
+
}
|
|
2364
|
+
|
|
2365
|
+
delete c1;
|
|
2366
|
+
delete c2;
|
|
2367
|
+
|
|
2368
|
+
SUB_TEST_PASS();
|
|
2369
|
+
}
|
|
2370
|
+
|
|
2371
|
+
|
|
2372
|
+
/**
|
|
2373
|
+
* @brief Poll all consumers until there are no more events or messages
|
|
2374
|
+
* and the timeout has expired.
|
|
2375
|
+
*/
|
|
2376
|
+
static void poll_all_consumers(RdKafka::KafkaConsumer **consumers,
|
|
2377
|
+
DefaultRebalanceCb *rebalance_cbs,
|
|
2378
|
+
size_t num,
|
|
2379
|
+
int timeout_ms) {
|
|
2380
|
+
int64_t ts_end = test_clock() + (timeout_ms * 1000);
|
|
2381
|
+
|
|
2382
|
+
/* Poll all consumers until no more events are seen,
|
|
2383
|
+
* this makes sure we exhaust the current state events before returning. */
|
|
2384
|
+
bool evented;
|
|
2385
|
+
do {
|
|
2386
|
+
evented = false;
|
|
2387
|
+
for (size_t i = 0; i < num; i++) {
|
|
2388
|
+
int block_ms = min(10, (int)((ts_end - test_clock()) / 1000));
|
|
2389
|
+
while (rebalance_cbs[i].poll_once(consumers[i], max(block_ms, 0)))
|
|
2390
|
+
evented = true;
|
|
2391
|
+
}
|
|
2392
|
+
} while (evented || test_clock() < ts_end);
|
|
2393
|
+
}
|
|
2394
|
+
|
|
2395
|
+
|
|
2396
|
+
/**
|
|
2397
|
+
* @brief Stress test with 8 consumers subscribing, fetching and committing.
|
|
2398
|
+
*
|
|
2399
|
+
* @param subscription_variation 0..2
|
|
2400
|
+
*
|
|
2401
|
+
* TODO: incorporate committing offsets.
|
|
2402
|
+
*/
|
|
2403
|
+
|
|
2404
|
+
static void u_multiple_subscription_changes(bool use_rebalance_cb,
|
|
2405
|
+
int subscription_variation) {
|
|
2406
|
+
const int N_CONSUMERS = 8;
|
|
2407
|
+
const int N_TOPICS = 2;
|
|
2408
|
+
const int N_PARTS_PER_TOPIC = N_CONSUMERS * N_TOPICS;
|
|
2409
|
+
const int N_PARTITIONS = N_PARTS_PER_TOPIC * N_TOPICS;
|
|
2410
|
+
const int N_MSGS_PER_PARTITION = 1000;
|
|
2411
|
+
|
|
2412
|
+
SUB_TEST("use_rebalance_cb: %d, subscription_variation: %d",
|
|
2413
|
+
(int)use_rebalance_cb, subscription_variation);
|
|
2414
|
+
|
|
2415
|
+
string topic_name_1 = Test::mk_topic_name("0113u_1", 1);
|
|
2416
|
+
string topic_name_2 = Test::mk_topic_name("0113u_2", 1);
|
|
2417
|
+
string group_name = Test::mk_unique_group_name("0113u");
|
|
2418
|
+
|
|
2419
|
+
test_create_topic(NULL, topic_name_1.c_str(), N_PARTS_PER_TOPIC, 1);
|
|
2420
|
+
test_create_topic(NULL, topic_name_2.c_str(), N_PARTS_PER_TOPIC, 1);
|
|
2421
|
+
|
|
2422
|
+
Test::Say("Creating consumers\n");
|
|
2423
|
+
DefaultRebalanceCb rebalance_cbs[N_CONSUMERS];
|
|
2424
|
+
RdKafka::KafkaConsumer *consumers[N_CONSUMERS];
|
|
2425
|
+
|
|
2426
|
+
for (int i = 0; i < N_CONSUMERS; i++) {
|
|
2427
|
+
std::string name = tostr() << "C_" << i;
|
|
2428
|
+
consumers[i] =
|
|
2429
|
+
make_consumer(name.c_str(), group_name, "cooperative-sticky", NULL,
|
|
2430
|
+
use_rebalance_cb ? &rebalance_cbs[i] : NULL, 120);
|
|
2431
|
+
}
|
|
2432
|
+
|
|
2433
|
+
test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_1.c_str(),
|
|
2434
|
+
10 * 1000);
|
|
2435
|
+
test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_2.c_str(),
|
|
2436
|
+
10 * 1000);
|
|
2437
|
+
|
|
2438
|
+
|
|
2439
|
+
/*
|
|
2440
|
+
* Seed all partitions with the same number of messages so we later can
|
|
2441
|
+
* verify that consumption is working.
|
|
2442
|
+
*/
|
|
2443
|
+
vector<pair<Toppar, int> > ptopics;
|
|
2444
|
+
ptopics.push_back(pair<Toppar, int>(Toppar(topic_name_1, N_PARTS_PER_TOPIC),
|
|
2445
|
+
N_MSGS_PER_PARTITION));
|
|
2446
|
+
ptopics.push_back(pair<Toppar, int>(Toppar(topic_name_2, N_PARTS_PER_TOPIC),
|
|
2447
|
+
N_MSGS_PER_PARTITION));
|
|
2448
|
+
produce_msgs(ptopics);
|
|
2449
|
+
|
|
2450
|
+
|
|
2451
|
+
/*
|
|
2452
|
+
* Track what topics a consumer should be subscribed to and use this to
|
|
2453
|
+
* verify both its subscription and assignment throughout the test.
|
|
2454
|
+
*/
|
|
2455
|
+
|
|
2456
|
+
/* consumer -> currently subscribed topics */
|
|
2457
|
+
map<int, vector<string> > consumer_topics;
|
|
2458
|
+
|
|
2459
|
+
/* topic -> consumers subscribed to topic */
|
|
2460
|
+
map<string, set<int> > topic_consumers;
|
|
2461
|
+
|
|
2462
|
+
/* The subscription alternatives that consumers
|
|
2463
|
+
* alter between in the playbook. */
|
|
2464
|
+
vector<string> SUBSCRIPTION_1;
|
|
2465
|
+
vector<string> SUBSCRIPTION_2;
|
|
2466
|
+
|
|
2467
|
+
SUBSCRIPTION_1.push_back(topic_name_1);
|
|
2468
|
+
|
|
2469
|
+
switch (subscription_variation) {
|
|
2470
|
+
case 0:
|
|
2471
|
+
SUBSCRIPTION_2.push_back(topic_name_1);
|
|
2472
|
+
SUBSCRIPTION_2.push_back(topic_name_2);
|
|
2473
|
+
break;
|
|
2474
|
+
|
|
2475
|
+
case 1:
|
|
2476
|
+
SUBSCRIPTION_2.push_back(topic_name_2);
|
|
2477
|
+
break;
|
|
2478
|
+
|
|
2479
|
+
case 2:
|
|
2480
|
+
/* No subscription */
|
|
2481
|
+
break;
|
|
2482
|
+
}
|
|
2483
|
+
|
|
2484
|
+
sort(SUBSCRIPTION_1.begin(), SUBSCRIPTION_1.end());
|
|
2485
|
+
sort(SUBSCRIPTION_2.begin(), SUBSCRIPTION_2.end());
|
|
2486
|
+
|
|
2487
|
+
|
|
2488
|
+
/*
|
|
2489
|
+
* Define playbook
|
|
2490
|
+
*/
|
|
2491
|
+
const struct {
|
|
2492
|
+
int timestamp_ms;
|
|
2493
|
+
int consumer;
|
|
2494
|
+
const vector<string> *topics;
|
|
2495
|
+
} playbook[] = {/* timestamp_ms, consumer_number, subscribe-to-topics */
|
|
2496
|
+
{0, 0, &SUBSCRIPTION_1}, /* Cmd 0 */
|
|
2497
|
+
{4000, 1, &SUBSCRIPTION_1}, {4000, 1, &SUBSCRIPTION_1},
|
|
2498
|
+
{4000, 1, &SUBSCRIPTION_1}, {4000, 2, &SUBSCRIPTION_1},
|
|
2499
|
+
{6000, 3, &SUBSCRIPTION_1}, /* Cmd 5 */
|
|
2500
|
+
{6000, 4, &SUBSCRIPTION_1}, {6000, 5, &SUBSCRIPTION_1},
|
|
2501
|
+
{6000, 6, &SUBSCRIPTION_1}, {6000, 7, &SUBSCRIPTION_2},
|
|
2502
|
+
{6000, 1, &SUBSCRIPTION_1}, /* Cmd 10 */
|
|
2503
|
+
{6000, 1, &SUBSCRIPTION_2}, {6000, 1, &SUBSCRIPTION_1},
|
|
2504
|
+
{6000, 2, &SUBSCRIPTION_2}, {7000, 2, &SUBSCRIPTION_1},
|
|
2505
|
+
{7000, 1, &SUBSCRIPTION_2}, /* Cmd 15 */
|
|
2506
|
+
{8000, 0, &SUBSCRIPTION_2}, {8000, 1, &SUBSCRIPTION_1},
|
|
2507
|
+
{8000, 0, &SUBSCRIPTION_1}, {13000, 2, &SUBSCRIPTION_1},
|
|
2508
|
+
{13000, 1, &SUBSCRIPTION_2}, /* Cmd 20 */
|
|
2509
|
+
{13000, 5, &SUBSCRIPTION_2}, {14000, 6, &SUBSCRIPTION_2},
|
|
2510
|
+
{15000, 7, &SUBSCRIPTION_1}, {15000, 1, &SUBSCRIPTION_1},
|
|
2511
|
+
{15000, 5, &SUBSCRIPTION_1}, /* Cmd 25 */
|
|
2512
|
+
{15000, 6, &SUBSCRIPTION_1}, {INT_MAX, 0, 0}};
|
|
2513
|
+
|
|
2514
|
+
/*
|
|
2515
|
+
* Run the playbook
|
|
2516
|
+
*/
|
|
2517
|
+
int cmd_number = 0;
|
|
2518
|
+
uint64_t ts_start = test_clock();
|
|
2519
|
+
|
|
2520
|
+
while (playbook[cmd_number].timestamp_ms != INT_MAX) {
|
|
2521
|
+
TEST_ASSERT(playbook[cmd_number].consumer < N_CONSUMERS);
|
|
2522
|
+
|
|
2523
|
+
Test::Say(tostr() << "Cmd #" << cmd_number << ": wait "
|
|
2524
|
+
<< playbook[cmd_number].timestamp_ms << "ms\n");
|
|
2525
|
+
|
|
2526
|
+
poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS,
|
|
2527
|
+
playbook[cmd_number].timestamp_ms -
|
|
2528
|
+
(int)((test_clock() - ts_start) / 1000));
|
|
2529
|
+
|
|
2530
|
+
/* Verify consumer assignments match subscribed topics */
|
|
2531
|
+
map<Toppar, RdKafka::KafkaConsumer *> all_assignments;
|
|
2532
|
+
for (int i = 0; i < N_CONSUMERS; i++)
|
|
2533
|
+
verify_consumer_assignment(
|
|
2534
|
+
consumers[i], rebalance_cbs[i], consumer_topics[i],
|
|
2535
|
+
/* Allow empty assignment */
|
|
2536
|
+
true,
|
|
2537
|
+
/* Allow mismatch between subscribed topics
|
|
2538
|
+
* and actual assignment since we can't
|
|
2539
|
+
* synchronize the last subscription
|
|
2540
|
+
* to the current assignment due to
|
|
2541
|
+
* an unknown number of rebalances required
|
|
2542
|
+
* for the final assignment to settle.
|
|
2543
|
+
* This is instead checked at the end of
|
|
2544
|
+
* this test case. */
|
|
2545
|
+
true, &all_assignments, -1 /* no msgcnt check*/);
|
|
2546
|
+
|
|
2547
|
+
int cid = playbook[cmd_number].consumer;
|
|
2548
|
+
RdKafka::KafkaConsumer *consumer = consumers[playbook[cmd_number].consumer];
|
|
2549
|
+
const vector<string> *topics = playbook[cmd_number].topics;
|
|
2550
|
+
|
|
2551
|
+
/*
|
|
2552
|
+
* Update our view of the consumer's subscribed topics and vice versa.
|
|
2553
|
+
*/
|
|
2554
|
+
for (vector<string>::const_iterator it = consumer_topics[cid].begin();
|
|
2555
|
+
it != consumer_topics[cid].end(); it++) {
|
|
2556
|
+
topic_consumers[*it].erase(cid);
|
|
2557
|
+
}
|
|
2558
|
+
|
|
2559
|
+
consumer_topics[cid].clear();
|
|
2560
|
+
|
|
2561
|
+
for (vector<string>::const_iterator it = topics->begin();
|
|
2562
|
+
it != topics->end(); it++) {
|
|
2563
|
+
consumer_topics[cid].push_back(*it);
|
|
2564
|
+
topic_consumers[*it].insert(cid);
|
|
2565
|
+
}
|
|
2566
|
+
|
|
2567
|
+
RdKafka::ErrorCode err;
|
|
2568
|
+
|
|
2569
|
+
/*
|
|
2570
|
+
* Change subscription
|
|
2571
|
+
*/
|
|
2572
|
+
if (!topics->empty()) {
|
|
2573
|
+
Test::Say(tostr() << "Consumer: " << consumer->name()
|
|
2574
|
+
<< " is subscribing to topics "
|
|
2575
|
+
<< string_vec_to_str(*topics) << " after "
|
|
2576
|
+
<< ((test_clock() - ts_start) / 1000) << "ms\n");
|
|
2577
|
+
err = consumer->subscribe(*topics);
|
|
2578
|
+
TEST_ASSERT(!err, "Expected subscribe() to succeed, got %s",
|
|
2579
|
+
RdKafka::err2str(err).c_str());
|
|
2580
|
+
} else {
|
|
2581
|
+
Test::Say(tostr() << "Consumer: " << consumer->name()
|
|
2582
|
+
<< " is unsubscribing after "
|
|
2583
|
+
<< ((test_clock() - ts_start) / 1000) << "ms\n");
|
|
2584
|
+
Test::unsubscribe(consumer);
|
|
2585
|
+
}
|
|
2586
|
+
|
|
2587
|
+
/* Mark this consumer as waiting for rebalance so that
|
|
2588
|
+
* verify_consumer_assignment() allows assigned partitions that
|
|
2589
|
+
* (no longer) match the subscription. */
|
|
2590
|
+
rebalance_cbs[cid].wait_rebalance = true;
|
|
2591
|
+
|
|
2592
|
+
|
|
2593
|
+
/*
|
|
2594
|
+
* Verify subscription matches what we think it should be.
|
|
2595
|
+
*/
|
|
2596
|
+
vector<string> subscription;
|
|
2597
|
+
err = consumer->subscription(subscription);
|
|
2598
|
+
TEST_ASSERT(!err, "consumer %s subscription() failed: %s",
|
|
2599
|
+
consumer->name().c_str(), RdKafka::err2str(err).c_str());
|
|
2600
|
+
|
|
2601
|
+
sort(subscription.begin(), subscription.end());
|
|
2602
|
+
|
|
2603
|
+
Test::Say(tostr() << "Consumer " << consumer->name()
|
|
2604
|
+
<< " subscription is now "
|
|
2605
|
+
<< string_vec_to_str(subscription) << "\n");
|
|
2606
|
+
|
|
2607
|
+
if (subscription != *topics)
|
|
2608
|
+
Test::Fail(tostr() << "Expected consumer " << consumer->name()
|
|
2609
|
+
<< " subscription: " << string_vec_to_str(*topics)
|
|
2610
|
+
<< " but got: " << string_vec_to_str(subscription));
|
|
2611
|
+
|
|
2612
|
+
cmd_number++;
|
|
2613
|
+
}
|
|
2614
|
+
|
|
2615
|
+
|
|
2616
|
+
/*
|
|
2617
|
+
* Wait for final rebalances and all consumers to settle,
|
|
2618
|
+
* then verify assignments and received message counts.
|
|
2619
|
+
*/
|
|
2620
|
+
Test::Say(_C_YEL "Waiting for final assignment state\n");
|
|
2621
|
+
int done_count = 0;
|
|
2622
|
+
/* Allow at least 20 seconds for group to stabilize. */
|
|
2623
|
+
int64_t stabilize_until = test_clock() + (20 * 1000 * 1000); /* 20s */
|
|
2624
|
+
|
|
2625
|
+
while (done_count < 2) {
|
|
2626
|
+
bool stabilized = test_clock() > stabilize_until;
|
|
2627
|
+
|
|
2628
|
+
poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS, 5000);
|
|
2629
|
+
|
|
2630
|
+
/* Verify consumer assignments */
|
|
2631
|
+
int counts[N_CONSUMERS];
|
|
2632
|
+
map<Toppar, RdKafka::KafkaConsumer *> all_assignments;
|
|
2633
|
+
Test::Say(tostr() << "Consumer assignments " << "(subscription_variation "
|
|
2634
|
+
<< subscription_variation << ")"
|
|
2635
|
+
<< (stabilized ? " (stabilized)" : "")
|
|
2636
|
+
<< (use_rebalance_cb ? " (use_rebalance_cb)"
|
|
2637
|
+
: " (no rebalance cb)")
|
|
2638
|
+
<< ":\n");
|
|
2639
|
+
for (int i = 0; i < N_CONSUMERS; i++) {
|
|
2640
|
+
bool last_rebalance_stabilized =
|
|
2641
|
+
stabilized &&
|
|
2642
|
+
(!use_rebalance_cb ||
|
|
2643
|
+
/* session.timeout.ms * 2 + 1 */
|
|
2644
|
+
test_clock() > rebalance_cbs[i].ts_last_assign + (13 * 1000 * 1000));
|
|
2645
|
+
|
|
2646
|
+
counts[i] = verify_consumer_assignment(
|
|
2647
|
+
consumers[i], rebalance_cbs[i], consumer_topics[i],
|
|
2648
|
+
/* allow empty */
|
|
2649
|
+
true,
|
|
2650
|
+
/* if we're waiting for a
|
|
2651
|
+
* rebalance it is okay for the
|
|
2652
|
+
* current assignment to contain
|
|
2653
|
+
* topics that this consumer
|
|
2654
|
+
* (no longer) subscribes to. */
|
|
2655
|
+
!last_rebalance_stabilized || !use_rebalance_cb ||
|
|
2656
|
+
rebalance_cbs[i].wait_rebalance,
|
|
2657
|
+
/* do not allow assignments for
|
|
2658
|
+
* topics that are not subscribed*/
|
|
2659
|
+
&all_assignments,
|
|
2660
|
+
/* Verify received message counts
|
|
2661
|
+
* once the assignments have
|
|
2662
|
+
* stabilized.
|
|
2663
|
+
* Requires the rebalance cb.*/
|
|
2664
|
+
done_count > 0 && use_rebalance_cb ? N_MSGS_PER_PARTITION : -1);
|
|
2665
|
+
}
|
|
2666
|
+
|
|
2667
|
+
Test::Say(tostr() << all_assignments.size() << "/" << N_PARTITIONS
|
|
2668
|
+
<< " partitions assigned\n");
|
|
2669
|
+
|
|
2670
|
+
bool done = true;
|
|
2671
|
+
for (int i = 0; i < N_CONSUMERS; i++) {
|
|
2672
|
+
/* For each topic the consumer subscribes to it should
|
|
2673
|
+
* be assigned its share of partitions. */
|
|
2674
|
+
int exp_parts = 0;
|
|
2675
|
+
for (vector<string>::const_iterator it = consumer_topics[i].begin();
|
|
2676
|
+
it != consumer_topics[i].end(); it++)
|
|
2677
|
+
exp_parts += N_PARTS_PER_TOPIC / (int)topic_consumers[*it].size();
|
|
2678
|
+
|
|
2679
|
+
Test::Say(tostr() << (counts[i] == exp_parts ? "" : _C_YEL) << "Consumer "
|
|
2680
|
+
<< consumers[i]->name() << " has " << counts[i]
|
|
2681
|
+
<< " assigned partitions (" << consumer_topics[i].size()
|
|
2682
|
+
<< " subscribed topic(s))" << ", expecting "
|
|
2683
|
+
<< exp_parts << " assigned partitions\n");
|
|
2684
|
+
|
|
2685
|
+
if (counts[i] != exp_parts)
|
|
2686
|
+
done = false;
|
|
2687
|
+
}
|
|
2688
|
+
|
|
2689
|
+
if (done && stabilized) {
|
|
2690
|
+
done_count++;
|
|
2691
|
+
Test::Say(tostr() << "All assignments verified, done count is "
|
|
2692
|
+
<< done_count << "\n");
|
|
2693
|
+
}
|
|
2694
|
+
}
|
|
2695
|
+
|
|
2696
|
+
Test::Say("Disposing consumers\n");
|
|
2697
|
+
for (int i = 0; i < N_CONSUMERS; i++) {
|
|
2698
|
+
/* A consumer will not necessarily get a rebalance after a
|
|
2699
|
+
* subscription change with the consumer protocol */
|
|
2700
|
+
if (test_consumer_group_protocol_classic()) {
|
|
2701
|
+
TEST_ASSERT(!use_rebalance_cb || !rebalance_cbs[i].wait_rebalance,
|
|
2702
|
+
"Consumer %d still waiting for rebalance", i);
|
|
2703
|
+
}
|
|
2704
|
+
|
|
2705
|
+
if (i & 1)
|
|
2706
|
+
consumers[i]->close();
|
|
2707
|
+
delete consumers[i];
|
|
2708
|
+
}
|
|
2709
|
+
|
|
2710
|
+
SUB_TEST_PASS();
|
|
2711
|
+
}
|
|
2712
|
+
|
|
2713
|
+
|
|
2714
|
+
|
|
2715
|
+
extern "C" {
|
|
2716
|
+
|
|
2717
|
+
static int rebalance_cnt;
|
|
2718
|
+
static rd_kafka_resp_err_t rebalance_exp_event;
|
|
2719
|
+
static rd_bool_t rebalance_exp_lost;
|
|
2720
|
+
|
|
2721
|
+
extern void test_print_partition_list(
|
|
2722
|
+
const rd_kafka_topic_partition_list_t *partitions);
|
|
2723
|
+
|
|
2724
|
+
|
|
2725
|
+
static void rebalance_cb(rd_kafka_t *rk,
|
|
2726
|
+
rd_kafka_resp_err_t err,
|
|
2727
|
+
rd_kafka_topic_partition_list_t *parts,
|
|
2728
|
+
void *opaque) {
|
|
2729
|
+
rebalance_cnt++;
|
|
2730
|
+
TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt,
|
|
2731
|
+
rd_kafka_err2name(err), parts->cnt);
|
|
2732
|
+
|
|
2733
|
+
test_print_partition_list(parts);
|
|
2734
|
+
|
|
2735
|
+
TEST_ASSERT(err == rebalance_exp_event ||
|
|
2736
|
+
rebalance_exp_event == RD_KAFKA_RESP_ERR_NO_ERROR,
|
|
2737
|
+
"Expected rebalance event %s, not %s",
|
|
2738
|
+
rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err));
|
|
2739
|
+
|
|
2740
|
+
if (rebalance_exp_lost) {
|
|
2741
|
+
TEST_ASSERT(rd_kafka_assignment_lost(rk), "Expected partitions lost");
|
|
2742
|
+
TEST_SAY("Partitions were lost\n");
|
|
2743
|
+
}
|
|
2744
|
+
|
|
2745
|
+
if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
|
|
2746
|
+
test_consumer_incremental_assign("assign", rk, parts);
|
|
2747
|
+
} else {
|
|
2748
|
+
test_consumer_incremental_unassign("unassign", rk, parts);
|
|
2749
|
+
}
|
|
2750
|
+
}
|
|
2751
|
+
|
|
2752
|
+
/**
|
|
2753
|
+
* @brief Wait for an expected rebalance event, or fail.
|
|
2754
|
+
*/
|
|
2755
|
+
static void expect_rebalance0(const char *func,
|
|
2756
|
+
int line,
|
|
2757
|
+
const char *what,
|
|
2758
|
+
rd_kafka_t *c,
|
|
2759
|
+
rd_kafka_resp_err_t exp_event,
|
|
2760
|
+
rd_bool_t exp_lost,
|
|
2761
|
+
int timeout_s) {
|
|
2762
|
+
int64_t tmout = test_clock() + (timeout_s * 1000000);
|
|
2763
|
+
int start_cnt = rebalance_cnt;
|
|
2764
|
+
|
|
2765
|
+
TEST_SAY("%s:%d: Waiting for %s (%s) for %ds\n", func, line, what,
|
|
2766
|
+
rd_kafka_err2name(exp_event), timeout_s);
|
|
2767
|
+
|
|
2768
|
+
rebalance_exp_lost = exp_lost;
|
|
2769
|
+
rebalance_exp_event = exp_event;
|
|
2770
|
+
|
|
2771
|
+
while (tmout > test_clock() && rebalance_cnt == start_cnt) {
|
|
2772
|
+
test_consumer_poll_once(c, NULL, 1000);
|
|
2773
|
+
}
|
|
2774
|
+
|
|
2775
|
+
if (rebalance_cnt == start_cnt + 1) {
|
|
2776
|
+
rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
2777
|
+
rebalance_exp_lost = exp_lost = rd_false;
|
|
2778
|
+
return;
|
|
2779
|
+
}
|
|
2780
|
+
|
|
2781
|
+
TEST_FAIL("%s:%d: Timed out waiting for %s (%s)", func, line, what,
|
|
2782
|
+
rd_kafka_err2name(exp_event));
|
|
2783
|
+
}
|
|
2784
|
+
|
|
2785
|
+
#define expect_rebalance(WHAT, C, EXP_EVENT, EXP_LOST, TIMEOUT_S) \
|
|
2786
|
+
expect_rebalance0(__FUNCTION__, __LINE__, WHAT, C, EXP_EVENT, EXP_LOST, \
|
|
2787
|
+
TIMEOUT_S)
|
|
2788
|
+
|
|
2789
|
+
|
|
2790
|
+
/* Check lost partitions revoke occurs on ILLEGAL_GENERATION heartbeat error.
|
|
2791
|
+
*/
|
|
2792
|
+
|
|
2793
|
+
static void p_lost_partitions_heartbeat_illegal_generation_test() {
|
|
2794
|
+
const char *bootstraps;
|
|
2795
|
+
rd_kafka_mock_cluster_t *mcluster;
|
|
2796
|
+
const char *groupid = "mygroup";
|
|
2797
|
+
const char *topic = "test";
|
|
2798
|
+
rd_kafka_t *c;
|
|
2799
|
+
rd_kafka_conf_t *conf;
|
|
2800
|
+
|
|
2801
|
+
SUB_TEST_QUICK();
|
|
2802
|
+
|
|
2803
|
+
mcluster = test_mock_cluster_new(3, &bootstraps);
|
|
2804
|
+
|
|
2805
|
+
rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
|
|
2806
|
+
|
|
2807
|
+
/* Seed the topic with messages */
|
|
2808
|
+
test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers",
|
|
2809
|
+
bootstraps, "batch.num.messages", "10",
|
|
2810
|
+
"security.protocol", "plaintext", NULL);
|
|
2811
|
+
|
|
2812
|
+
test_conf_init(&conf, NULL, 30);
|
|
2813
|
+
test_conf_set(conf, "bootstrap.servers", bootstraps);
|
|
2814
|
+
test_conf_set(conf, "security.protocol", "PLAINTEXT");
|
|
2815
|
+
test_conf_set(conf, "group.id", groupid);
|
|
2816
|
+
test_conf_set(conf, "session.timeout.ms", "5000");
|
|
2817
|
+
test_conf_set(conf, "heartbeat.interval.ms", "1000");
|
|
2818
|
+
test_conf_set(conf, "auto.offset.reset", "earliest");
|
|
2819
|
+
test_conf_set(conf, "enable.auto.commit", "false");
|
|
2820
|
+
test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
|
|
2821
|
+
|
|
2822
|
+
c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
|
|
2823
|
+
|
|
2824
|
+
test_consumer_subscribe(c, topic);
|
|
2825
|
+
|
|
2826
|
+
expect_rebalance("initial assignment", c,
|
|
2827
|
+
RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
|
|
2828
|
+
rd_false /*don't expect lost*/, 5 + 2);
|
|
2829
|
+
|
|
2830
|
+
if (test_consumer_group_protocol_classic()) {
|
|
2831
|
+
/* Fail heartbeats */
|
|
2832
|
+
rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_Heartbeat, 5,
|
|
2833
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
2834
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
2835
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
2836
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
2837
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
|
|
2838
|
+
} else {
|
|
2839
|
+
/* Fail heartbeats */
|
|
2840
|
+
rd_kafka_mock_broker_push_request_error_rtts(
|
|
2841
|
+
mcluster, 1, RD_KAFKAP_ConsumerGroupHeartbeat, 2,
|
|
2842
|
+
RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH, 0, RD_KAFKA_RESP_ERR_NO_ERROR,
|
|
2843
|
+
1000);
|
|
2844
|
+
}
|
|
2845
|
+
|
|
2846
|
+
expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
|
|
2847
|
+
rd_true /*expect lost*/, 10 + 2);
|
|
2848
|
+
|
|
2849
|
+
rd_kafka_mock_clear_request_errors(mcluster, RD_KAFKAP_Heartbeat);
|
|
2850
|
+
rd_kafka_mock_clear_request_errors(mcluster,
|
|
2851
|
+
RD_KAFKAP_ConsumerGroupHeartbeat);
|
|
2852
|
+
|
|
2853
|
+
expect_rebalance("rejoin after lost", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
|
|
2854
|
+
rd_false /*don't expect lost*/, 10 + 2);
|
|
2855
|
+
|
|
2856
|
+
TEST_SAY("Closing consumer\n");
|
|
2857
|
+
test_consumer_close(c);
|
|
2858
|
+
|
|
2859
|
+
TEST_SAY("Destroying consumer\n");
|
|
2860
|
+
rd_kafka_destroy(c);
|
|
2861
|
+
|
|
2862
|
+
TEST_SAY("Destroying mock cluster\n");
|
|
2863
|
+
test_mock_cluster_destroy(mcluster);
|
|
2864
|
+
|
|
2865
|
+
SUB_TEST_PASS();
|
|
2866
|
+
}
|
|
2867
|
+
|
|
2868
|
+
|
|
2869
|
+
|
|
2870
|
+
/* Check lost partitions revoke occurs on ILLEGAL_GENERATION JoinGroup
|
|
2871
|
+
* or SyncGroup error.
|
|
2872
|
+
*/
|
|
2873
|
+
|
|
2874
|
+
static void q_lost_partitions_illegal_generation_test(
|
|
2875
|
+
rd_bool_t test_joingroup_fail) {
|
|
2876
|
+
const char *bootstraps;
|
|
2877
|
+
rd_kafka_mock_cluster_t *mcluster;
|
|
2878
|
+
const char *groupid = "mygroup";
|
|
2879
|
+
const char *topic1 = "test1";
|
|
2880
|
+
const char *topic2 = "test2";
|
|
2881
|
+
rd_kafka_t *c;
|
|
2882
|
+
rd_kafka_conf_t *conf;
|
|
2883
|
+
rd_kafka_resp_err_t err;
|
|
2884
|
+
rd_kafka_topic_partition_list_t *topics;
|
|
2885
|
+
|
|
2886
|
+
SUB_TEST0(!test_joingroup_fail /*quick*/, "test_joingroup_fail=%d",
|
|
2887
|
+
test_joingroup_fail);
|
|
2888
|
+
|
|
2889
|
+
mcluster = test_mock_cluster_new(3, &bootstraps);
|
|
2890
|
+
|
|
2891
|
+
rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
|
|
2892
|
+
|
|
2893
|
+
/* Seed the topic1 with messages */
|
|
2894
|
+
test_produce_msgs_easy_v(topic1, 0, 0, 0, 100, 10, "bootstrap.servers",
|
|
2895
|
+
bootstraps, "batch.num.messages", "10",
|
|
2896
|
+
"security.protocol", "plaintext", NULL);
|
|
2897
|
+
|
|
2898
|
+
/* Seed the topic2 with messages */
|
|
2899
|
+
test_produce_msgs_easy_v(topic2, 0, 0, 0, 100, 10, "bootstrap.servers",
|
|
2900
|
+
bootstraps, "batch.num.messages", "10",
|
|
2901
|
+
"security.protocol", "plaintext", NULL);
|
|
2902
|
+
|
|
2903
|
+
test_conf_init(&conf, NULL, 30);
|
|
2904
|
+
test_conf_set(conf, "bootstrap.servers", bootstraps);
|
|
2905
|
+
test_conf_set(conf, "security.protocol", "PLAINTEXT");
|
|
2906
|
+
test_conf_set(conf, "group.id", groupid);
|
|
2907
|
+
test_conf_set(conf, "session.timeout.ms", "5000");
|
|
2908
|
+
test_conf_set(conf, "heartbeat.interval.ms", "1000");
|
|
2909
|
+
test_conf_set(conf, "auto.offset.reset", "earliest");
|
|
2910
|
+
test_conf_set(conf, "enable.auto.commit", "false");
|
|
2911
|
+
test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
|
|
2912
|
+
|
|
2913
|
+
c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
|
|
2914
|
+
|
|
2915
|
+
test_consumer_subscribe(c, topic1);
|
|
2916
|
+
|
|
2917
|
+
expect_rebalance("initial assignment", c,
|
|
2918
|
+
RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
|
|
2919
|
+
rd_false /*don't expect lost*/, 5 + 2);
|
|
2920
|
+
|
|
2921
|
+
/* Fail JoinGroups or SyncGroups */
|
|
2922
|
+
rd_kafka_mock_push_request_errors(
|
|
2923
|
+
mcluster, test_joingroup_fail ? RD_KAFKAP_JoinGroup : RD_KAFKAP_SyncGroup,
|
|
2924
|
+
5, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
2925
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
2926
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
2927
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
2928
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
|
|
2929
|
+
|
|
2930
|
+
topics = rd_kafka_topic_partition_list_new(2);
|
|
2931
|
+
rd_kafka_topic_partition_list_add(topics, topic1, RD_KAFKA_PARTITION_UA);
|
|
2932
|
+
rd_kafka_topic_partition_list_add(topics, topic2, RD_KAFKA_PARTITION_UA);
|
|
2933
|
+
err = rd_kafka_subscribe(c, topics);
|
|
2934
|
+
if (err)
|
|
2935
|
+
TEST_FAIL("%s: Failed to subscribe to topics: %s\n", rd_kafka_name(c),
|
|
2936
|
+
rd_kafka_err2str(err));
|
|
2937
|
+
rd_kafka_topic_partition_list_destroy(topics);
|
|
2938
|
+
|
|
2939
|
+
expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
|
|
2940
|
+
rd_true /*expect lost*/, 10 + 2);
|
|
2941
|
+
|
|
2942
|
+
rd_kafka_mock_clear_request_errors(mcluster, test_joingroup_fail
|
|
2943
|
+
? RD_KAFKAP_JoinGroup
|
|
2944
|
+
: RD_KAFKAP_SyncGroup);
|
|
2945
|
+
|
|
2946
|
+
expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
|
|
2947
|
+
rd_false /*expect lost*/, 10 + 2);
|
|
2948
|
+
|
|
2949
|
+
TEST_SAY("Closing consumer\n");
|
|
2950
|
+
test_consumer_close(c);
|
|
2951
|
+
|
|
2952
|
+
TEST_SAY("Destroying consumer\n");
|
|
2953
|
+
rd_kafka_destroy(c);
|
|
2954
|
+
|
|
2955
|
+
TEST_SAY("Destroying mock cluster\n");
|
|
2956
|
+
test_mock_cluster_destroy(mcluster);
|
|
2957
|
+
|
|
2958
|
+
SUB_TEST_PASS();
|
|
2959
|
+
}
|
|
2960
|
+
|
|
2961
|
+
|
|
2962
|
+
|
|
2963
|
+
/* Check lost partitions revoke occurs on ILLEGAL_GENERATION Commit
|
|
2964
|
+
* error.
|
|
2965
|
+
*/
|
|
2966
|
+
|
|
2967
|
+
static void r_lost_partitions_commit_illegal_generation_test_local() {
|
|
2968
|
+
const char *bootstraps;
|
|
2969
|
+
rd_kafka_mock_cluster_t *mcluster;
|
|
2970
|
+
const char *groupid = "mygroup";
|
|
2971
|
+
const char *topic = "test";
|
|
2972
|
+
const int msgcnt = 100;
|
|
2973
|
+
rd_kafka_t *c;
|
|
2974
|
+
rd_kafka_conf_t *conf;
|
|
2975
|
+
|
|
2976
|
+
SUB_TEST();
|
|
2977
|
+
|
|
2978
|
+
mcluster = test_mock_cluster_new(3, &bootstraps);
|
|
2979
|
+
|
|
2980
|
+
rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
|
|
2981
|
+
|
|
2982
|
+
/* Seed the topic with messages */
|
|
2983
|
+
test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, "bootstrap.servers",
|
|
2984
|
+
bootstraps, "batch.num.messages", "10",
|
|
2985
|
+
"security.protocol", "plaintext", NULL);
|
|
2986
|
+
|
|
2987
|
+
test_conf_init(&conf, NULL, 30);
|
|
2988
|
+
test_conf_set(conf, "bootstrap.servers", bootstraps);
|
|
2989
|
+
test_conf_set(conf, "security.protocol", "PLAINTEXT");
|
|
2990
|
+
test_conf_set(conf, "group.id", groupid);
|
|
2991
|
+
test_conf_set(conf, "auto.offset.reset", "earliest");
|
|
2992
|
+
test_conf_set(conf, "enable.auto.commit", "false");
|
|
2993
|
+
test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
|
|
2994
|
+
|
|
2995
|
+
c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
|
|
2996
|
+
|
|
2997
|
+
test_consumer_subscribe(c, topic);
|
|
2998
|
+
|
|
2999
|
+
expect_rebalance("initial assignment", c,
|
|
3000
|
+
RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
|
|
3001
|
+
rd_false /*don't expect lost*/, 5 + 2);
|
|
3002
|
+
|
|
3003
|
+
|
|
3004
|
+
/* Consume some messages so that the commit has something to commit. */
|
|
3005
|
+
test_consumer_poll("consume", c, -1, -1, -1, msgcnt / 2, NULL);
|
|
3006
|
+
|
|
3007
|
+
/* Fail Commit */
|
|
3008
|
+
rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 5,
|
|
3009
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
3010
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
3011
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
3012
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
3013
|
+
RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
|
|
3014
|
+
|
|
3015
|
+
rd_kafka_commit(c, NULL, rd_false);
|
|
3016
|
+
|
|
3017
|
+
expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
|
|
3018
|
+
rd_true /*expect lost*/, 10 + 2);
|
|
3019
|
+
|
|
3020
|
+
expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
|
|
3021
|
+
rd_false /*expect lost*/, 20 + 2);
|
|
3022
|
+
|
|
3023
|
+
TEST_SAY("Closing consumer\n");
|
|
3024
|
+
test_consumer_close(c);
|
|
3025
|
+
|
|
3026
|
+
TEST_SAY("Destroying consumer\n");
|
|
3027
|
+
rd_kafka_destroy(c);
|
|
3028
|
+
|
|
3029
|
+
TEST_SAY("Destroying mock cluster\n");
|
|
3030
|
+
test_mock_cluster_destroy(mcluster);
|
|
3031
|
+
}
|
|
3032
|
+
|
|
3033
|
+
/* Check commit is retried on FENCED_MEMBER_EPOCH, using new epoch taken
|
|
3034
|
+
* from HB. */
|
|
3035
|
+
static void t_consumer_group_consumer_retry_commit_on_fenced_member_epoch() {
|
|
3036
|
+
const char *bootstraps;
|
|
3037
|
+
rd_kafka_mock_cluster_t *mcluster;
|
|
3038
|
+
const char *groupid = "mygroup";
|
|
3039
|
+
const char *topic = "test";
|
|
3040
|
+
const int msgcnt = 100;
|
|
3041
|
+
rd_kafka_t *c;
|
|
3042
|
+
rd_kafka_conf_t *conf;
|
|
3043
|
+
rd_kafka_topic_partition_list_t *rktpars =
|
|
3044
|
+
rd_kafka_topic_partition_list_new(1);
|
|
3045
|
+
|
|
3046
|
+
SUB_TEST();
|
|
3047
|
+
|
|
3048
|
+
mcluster = test_mock_cluster_new(3, &bootstraps);
|
|
3049
|
+
|
|
3050
|
+
rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
|
|
3051
|
+
|
|
3052
|
+
/* Seed the topic with messages */
|
|
3053
|
+
test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, "bootstrap.servers",
|
|
3054
|
+
bootstraps, "batch.num.messages", "10",
|
|
3055
|
+
"security.protocol", "plaintext", NULL);
|
|
3056
|
+
|
|
3057
|
+
test_conf_init(&conf, NULL, 30);
|
|
3058
|
+
test_conf_set(conf, "bootstrap.servers", bootstraps);
|
|
3059
|
+
test_conf_set(conf, "security.protocol", "PLAINTEXT");
|
|
3060
|
+
test_conf_set(conf, "group.id", groupid);
|
|
3061
|
+
test_conf_set(conf, "auto.offset.reset", "earliest");
|
|
3062
|
+
test_conf_set(conf, "enable.auto.commit", "false");
|
|
3063
|
+
test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
|
|
3064
|
+
|
|
3065
|
+
c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
|
|
3066
|
+
|
|
3067
|
+
test_consumer_subscribe(c, topic);
|
|
3068
|
+
|
|
3069
|
+
expect_rebalance("initial assignment", c,
|
|
3070
|
+
RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
|
|
3071
|
+
rd_false /*don't expect lost*/, 5 + 2);
|
|
3072
|
+
|
|
3073
|
+
|
|
3074
|
+
/* Consume some messages so that the commit has something to commit. */
|
|
3075
|
+
test_consumer_poll("consume", c, -1, -1, -1, msgcnt / 2, NULL);
|
|
3076
|
+
|
|
3077
|
+
/* Fail Commit */
|
|
3078
|
+
rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 5,
|
|
3079
|
+
RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH,
|
|
3080
|
+
RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH,
|
|
3081
|
+
RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH,
|
|
3082
|
+
RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH,
|
|
3083
|
+
RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH);
|
|
3084
|
+
|
|
3085
|
+
rd_kafka_commit(c, NULL, rd_false);
|
|
3086
|
+
|
|
3087
|
+
TEST_CALL_ERR__(rd_kafka_committed(c, rktpars, 2000));
|
|
3088
|
+
|
|
3089
|
+
/* Offsets should be committed with retries */
|
|
3090
|
+
TEST_ASSERT(rktpars->cnt == 1);
|
|
3091
|
+
TEST_ASSERT(rktpars->elems[0].offset == msgcnt / 2);
|
|
3092
|
+
|
|
3093
|
+
rd_kafka_topic_partition_list_destroy(rktpars);
|
|
3094
|
+
|
|
3095
|
+
TEST_SAY("Closing consumer\n");
|
|
3096
|
+
test_consumer_close(c);
|
|
3097
|
+
|
|
3098
|
+
TEST_SAY("Destroying consumer\n");
|
|
3099
|
+
rd_kafka_destroy(c);
|
|
3100
|
+
|
|
3101
|
+
TEST_SAY("Destroying mock cluster\n");
|
|
3102
|
+
test_mock_cluster_destroy(mcluster);
|
|
3103
|
+
}
|
|
3104
|
+
|
|
3105
|
+
/**
|
|
3106
|
+
* @brief Test that the consumer is destroyed without segfault if
|
|
3107
|
+
* it happens before first rebalance and there is no assignor
|
|
3108
|
+
* state. See #4312
|
|
3109
|
+
*/
|
|
3110
|
+
static void s_no_segfault_before_first_rebalance(void) {
|
|
3111
|
+
rd_kafka_t *c;
|
|
3112
|
+
rd_kafka_conf_t *conf;
|
|
3113
|
+
rd_kafka_mock_cluster_t *mcluster;
|
|
3114
|
+
const char *topic;
|
|
3115
|
+
const char *bootstraps;
|
|
3116
|
+
|
|
3117
|
+
SUB_TEST_QUICK();
|
|
3118
|
+
|
|
3119
|
+
TEST_SAY("Creating mock cluster\n");
|
|
3120
|
+
mcluster = test_mock_cluster_new(1, &bootstraps);
|
|
3121
|
+
|
|
3122
|
+
topic = test_mk_topic_name("0113_s", 1);
|
|
3123
|
+
|
|
3124
|
+
test_conf_init(&conf, NULL, 60);
|
|
3125
|
+
test_conf_set(conf, "bootstrap.servers", bootstraps);
|
|
3126
|
+
test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
|
|
3127
|
+
|
|
3128
|
+
TEST_SAY("Creating topic %s\n", topic);
|
|
3129
|
+
TEST_CALL_ERR__(rd_kafka_mock_topic_create(
|
|
3130
|
+
mcluster, topic, 2 /* partition_cnt */, 1 /* replication_factor */));
|
|
3131
|
+
|
|
3132
|
+
c = test_create_consumer(topic, NULL, conf, NULL);
|
|
3133
|
+
|
|
3134
|
+
/* Add a 1s delay to the SyncGroup response so next condition can happen. */
|
|
3135
|
+
rd_kafka_mock_broker_push_request_error_rtts(
|
|
3136
|
+
mcluster, 1 /*Broker 1*/, RD_KAFKAP_SyncGroup /*FetchRequest*/, 1,
|
|
3137
|
+
RD_KAFKA_RESP_ERR_NOT_COORDINATOR, 1000);
|
|
3138
|
+
|
|
3139
|
+
test_consumer_subscribe(c, topic);
|
|
3140
|
+
|
|
3141
|
+
/* Wait for initial rebalance 3000 ms (default) + 500 ms for processing
|
|
3142
|
+
* the JoinGroup response. Consumer close must come between the JoinGroup
|
|
3143
|
+
* response and the SyncGroup response, so that rkcg_assignor is set,
|
|
3144
|
+
* but rkcg_assignor_state isn't. */
|
|
3145
|
+
TEST_ASSERT(!test_consumer_poll_once(c, NULL, 3500), "poll should timeout");
|
|
3146
|
+
|
|
3147
|
+
rd_kafka_consumer_close(c);
|
|
3148
|
+
|
|
3149
|
+
rd_kafka_destroy(c);
|
|
3150
|
+
|
|
3151
|
+
TEST_SAY("Destroying mock cluster\n");
|
|
3152
|
+
test_mock_cluster_destroy(mcluster);
|
|
3153
|
+
|
|
3154
|
+
SUB_TEST_PASS();
|
|
3155
|
+
}
|
|
3156
|
+
|
|
3157
|
+
/**
|
|
3158
|
+
* @brief Rebalance callback for the v_.. test below.
|
|
3159
|
+
*/
|
|
3160
|
+
static void v_rebalance_cb(rd_kafka_t *rk,
|
|
3161
|
+
rd_kafka_resp_err_t err,
|
|
3162
|
+
rd_kafka_topic_partition_list_t *parts,
|
|
3163
|
+
void *opaque) {
|
|
3164
|
+
bool *auto_commitp = (bool *)opaque;
|
|
3165
|
+
|
|
3166
|
+
TEST_SAY("%s: %s: %d partition(s)%s\n", rd_kafka_name(rk),
|
|
3167
|
+
rd_kafka_err2name(err), parts->cnt,
|
|
3168
|
+
rd_kafka_assignment_lost(rk) ? " - assignment lost" : "");
|
|
3169
|
+
|
|
3170
|
+
test_print_partition_list(parts);
|
|
3171
|
+
|
|
3172
|
+
if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
|
|
3173
|
+
test_consumer_incremental_assign("assign", rk, parts);
|
|
3174
|
+
} else {
|
|
3175
|
+
TEST_ASSERT(!rd_kafka_assignment_lost(rk),
|
|
3176
|
+
"Assignment must not be lost, "
|
|
3177
|
+
" that is a sign that an ILLEGAL_GENERATION error, "
|
|
3178
|
+
" during a commit happening during a rebalance is "
|
|
3179
|
+
"causing the assignment to be lost.");
|
|
3180
|
+
if (!*auto_commitp) {
|
|
3181
|
+
rd_kafka_resp_err_t commit_err;
|
|
3182
|
+
|
|
3183
|
+
TEST_SAY("Attempting manual commit after unassign, in 2 seconds..\n");
|
|
3184
|
+
/* Sleep enough to have the generation-id bumped by rejoin. */
|
|
3185
|
+
rd_sleep(2);
|
|
3186
|
+
commit_err = rd_kafka_commit(rk, NULL, 0 /*sync*/);
|
|
3187
|
+
TEST_ASSERT(!commit_err || commit_err == RD_KAFKA_RESP_ERR__NO_OFFSET ||
|
|
3188
|
+
commit_err == RD_KAFKA_RESP_ERR__DESTROY ||
|
|
3189
|
+
commit_err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
3190
|
+
"%s: manual commit failed: %s", rd_kafka_name(rk),
|
|
3191
|
+
rd_kafka_err2str(commit_err));
|
|
3192
|
+
}
|
|
3193
|
+
|
|
3194
|
+
/* Unassign must be done after manual commit. */
|
|
3195
|
+
test_consumer_incremental_unassign("unassign", rk, parts);
|
|
3196
|
+
}
|
|
3197
|
+
}
|
|
3198
|
+
|
|
3199
|
+
/**
|
|
3200
|
+
* @brief Commit callback for the v_.. test.
|
|
3201
|
+
*/
|
|
3202
|
+
static void v_commit_cb(rd_kafka_t *rk,
|
|
3203
|
+
rd_kafka_resp_err_t err,
|
|
3204
|
+
rd_kafka_topic_partition_list_t *offsets,
|
|
3205
|
+
void *opaque) {
|
|
3206
|
+
TEST_SAY("%s offset commit for %d offsets: %s\n", rd_kafka_name(rk),
|
|
3207
|
+
offsets ? offsets->cnt : -1, rd_kafka_err2name(err));
|
|
3208
|
+
TEST_ASSERT(!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET ||
|
|
3209
|
+
err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION ||
|
|
3210
|
+
err == RD_KAFKA_RESP_ERR__DESTROY /* consumer was closed */,
|
|
3211
|
+
"%s offset commit failed: %s", rd_kafka_name(rk),
|
|
3212
|
+
rd_kafka_err2str(err));
|
|
3213
|
+
}
|
|
3214
|
+
|
|
3215
|
+
/**
|
|
3216
|
+
* @brief Log callback for the v_.. test.
|
|
3217
|
+
*/
|
|
3218
|
+
static void v_log_cb(const rd_kafka_t *rk,
|
|
3219
|
+
int level,
|
|
3220
|
+
const char *fac,
|
|
3221
|
+
const char *buf) {
|
|
3222
|
+
/* Slow down logging to make ILLEGAL_GENERATION errors caused by
|
|
3223
|
+
* manual commit more likely. */
|
|
3224
|
+
rd_usleep(1000, 0);
|
|
3225
|
+
}
|
|
3226
|
+
|
|
3227
|
+
static void v_commit_during_rebalance(bool with_rebalance_cb,
|
|
3228
|
+
bool auto_commit) {
|
|
3229
|
+
rd_kafka_t *p, *c1, *c2;
|
|
3230
|
+
rd_kafka_conf_t *conf;
|
|
3231
|
+
const char *topic = test_mk_topic_name("0113_v", 1);
|
|
3232
|
+
const int partition_cnt = 6;
|
|
3233
|
+
const int msgcnt_per_partition = 100;
|
|
3234
|
+
const int msgcnt = partition_cnt * msgcnt_per_partition;
|
|
3235
|
+
uint64_t testid;
|
|
3236
|
+
int i;
|
|
3237
|
+
|
|
3238
|
+
|
|
3239
|
+
SUB_TEST("With%s rebalance callback and %s-commit",
|
|
3240
|
+
with_rebalance_cb ? "" : "out", auto_commit ? "auto" : "manual");
|
|
3241
|
+
|
|
3242
|
+
test_conf_init(&conf, NULL, 30);
|
|
3243
|
+
testid = test_id_generate();
|
|
3244
|
+
|
|
3245
|
+
/*
|
|
3246
|
+
* Produce messages to topic
|
|
3247
|
+
*/
|
|
3248
|
+
p = test_create_producer();
|
|
3249
|
+
|
|
3250
|
+
test_create_topic_wait_exists(p, topic, partition_cnt, 1, 5000);
|
|
3251
|
+
|
|
3252
|
+
for (i = 0; i < partition_cnt; i++) {
|
|
3253
|
+
test_produce_msgs2(p, topic, testid, i, i * msgcnt_per_partition,
|
|
3254
|
+
msgcnt_per_partition, NULL, 0);
|
|
3255
|
+
}
|
|
3256
|
+
|
|
3257
|
+
test_flush(p, -1);
|
|
3258
|
+
|
|
3259
|
+
rd_kafka_destroy(p);
|
|
3260
|
+
|
|
3261
|
+
|
|
3262
|
+
test_conf_set(conf, "auto.offset.reset", "earliest");
|
|
3263
|
+
test_conf_set(conf, "debug", "consumer,cgrp,topic,fetch");
|
|
3264
|
+
test_conf_set(conf, "enable.auto.commit", auto_commit ? "true" : "false");
|
|
3265
|
+
test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
|
|
3266
|
+
if (!auto_commit)
|
|
3267
|
+
/* Slowing down logging is necessary only to make assignment lost
|
|
3268
|
+
* errors more evident. */
|
|
3269
|
+
rd_kafka_conf_set_log_cb(conf, v_log_cb);
|
|
3270
|
+
rd_kafka_conf_set_offset_commit_cb(conf, v_commit_cb);
|
|
3271
|
+
rd_kafka_conf_set_opaque(conf, (void *)&auto_commit);
|
|
3272
|
+
|
|
3273
|
+
TEST_SAY("Create and subscribe first consumer\n");
|
|
3274
|
+
c1 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL,
|
|
3275
|
+
rd_kafka_conf_dup(conf), NULL);
|
|
3276
|
+
TEST_ASSERT(rd_kafka_opaque(c1) == (void *)&auto_commit,
|
|
3277
|
+
"c1 opaque mismatch");
|
|
3278
|
+
test_consumer_subscribe(c1, topic);
|
|
3279
|
+
|
|
3280
|
+
/* Consume some messages so that we know we have an assignment
|
|
3281
|
+
* and something to commit. */
|
|
3282
|
+
test_consumer_poll("C1.PRECONSUME", c1, testid, -1, 0,
|
|
3283
|
+
msgcnt / partition_cnt / 2, NULL);
|
|
3284
|
+
|
|
3285
|
+
TEST_SAY("Create and subscribe second consumer\n");
|
|
3286
|
+
c2 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL,
|
|
3287
|
+
conf, NULL);
|
|
3288
|
+
TEST_ASSERT(rd_kafka_opaque(c2) == (void *)&auto_commit,
|
|
3289
|
+
"c2 opaque mismatch");
|
|
3290
|
+
test_consumer_subscribe(c2, topic);
|
|
3291
|
+
|
|
3292
|
+
/* Poll both consumers */
|
|
3293
|
+
for (i = 0; i < 10; i++) {
|
|
3294
|
+
int poll_result1, poll_result2;
|
|
3295
|
+
do {
|
|
3296
|
+
poll_result1 = test_consumer_poll_once(c1, NULL, 1000);
|
|
3297
|
+
poll_result2 = test_consumer_poll_once(c2, NULL, 1000);
|
|
3298
|
+
|
|
3299
|
+
if (poll_result1 == 1 && !auto_commit) {
|
|
3300
|
+
rd_kafka_resp_err_t err;
|
|
3301
|
+
TEST_SAY("Attempting manual commit after poll\n");
|
|
3302
|
+
err = rd_kafka_commit(c1, NULL, 0);
|
|
3303
|
+
TEST_ASSERT(!err || err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
|
|
3304
|
+
"Expected not error or ILLEGAL_GENERATION, got: %s",
|
|
3305
|
+
rd_kafka_err2str(err));
|
|
3306
|
+
}
|
|
3307
|
+
} while (poll_result1 == 0 || poll_result2 == 0);
|
|
3308
|
+
}
|
|
3309
|
+
|
|
3310
|
+
TEST_SAY("Closing consumers\n");
|
|
3311
|
+
test_consumer_close(c1);
|
|
3312
|
+
test_consumer_close(c2);
|
|
3313
|
+
|
|
3314
|
+
rd_kafka_destroy(c1);
|
|
3315
|
+
rd_kafka_destroy(c2);
|
|
3316
|
+
|
|
3317
|
+
SUB_TEST_PASS();
|
|
3318
|
+
}
|
|
3319
|
+
|
|
3320
|
+
|
|
3321
|
+
/**
|
|
3322
|
+
* @brief Verify that incremental rebalances retain stickyness.
|
|
3323
|
+
*/
|
|
3324
|
+
static void x_incremental_rebalances(void) {
|
|
3325
|
+
#define _NUM_CONS 3
|
|
3326
|
+
rd_kafka_t *c[_NUM_CONS];
|
|
3327
|
+
rd_kafka_conf_t *conf;
|
|
3328
|
+
const char *topic = test_mk_topic_name("0113_x", 1);
|
|
3329
|
+
int i;
|
|
3330
|
+
|
|
3331
|
+
SUB_TEST();
|
|
3332
|
+
test_conf_init(&conf, NULL, 60);
|
|
3333
|
+
|
|
3334
|
+
test_create_topic_wait_exists(NULL, topic, 6, 1, 5000);
|
|
3335
|
+
|
|
3336
|
+
test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
|
|
3337
|
+
for (i = 0; i < _NUM_CONS; i++) {
|
|
3338
|
+
char clientid[32];
|
|
3339
|
+
rd_snprintf(clientid, sizeof(clientid), "consumer%d", i);
|
|
3340
|
+
test_conf_set(conf, "client.id", clientid);
|
|
3341
|
+
|
|
3342
|
+
c[i] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
|
|
3343
|
+
}
|
|
3344
|
+
rd_kafka_conf_destroy(conf);
|
|
3345
|
+
|
|
3346
|
+
/* First consumer joins group */
|
|
3347
|
+
TEST_SAY("%s: joining\n", rd_kafka_name(c[0]));
|
|
3348
|
+
test_consumer_subscribe(c[0], topic);
|
|
3349
|
+
test_consumer_wait_assignment(c[0], rd_true /*poll*/);
|
|
3350
|
+
test_consumer_verify_assignment(c[0], rd_true /*fail immediately*/, topic, 0,
|
|
3351
|
+
topic, 1, topic, 2, topic, 3, topic, 4, topic,
|
|
3352
|
+
5, NULL);
|
|
3353
|
+
|
|
3354
|
+
|
|
3355
|
+
/* Second consumer joins group */
|
|
3356
|
+
TEST_SAY("%s: joining\n", rd_kafka_name(c[1]));
|
|
3357
|
+
test_consumer_subscribe(c[1], topic);
|
|
3358
|
+
test_consumer_wait_assignment(c[1], rd_true /*poll*/);
|
|
3359
|
+
rd_sleep(3);
|
|
3360
|
+
if (test_consumer_group_protocol_classic()) {
|
|
3361
|
+
test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 3,
|
|
3362
|
+
topic, 4, topic, 5, NULL);
|
|
3363
|
+
test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 0,
|
|
3364
|
+
topic, 1, topic, 2, NULL);
|
|
3365
|
+
} else {
|
|
3366
|
+
test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 0,
|
|
3367
|
+
topic, 1, topic, 2, NULL);
|
|
3368
|
+
test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 3,
|
|
3369
|
+
topic, 4, topic, 5, NULL);
|
|
3370
|
+
}
|
|
3371
|
+
|
|
3372
|
+
/* Third consumer joins group */
|
|
3373
|
+
TEST_SAY("%s: joining\n", rd_kafka_name(c[2]));
|
|
3374
|
+
test_consumer_subscribe(c[2], topic);
|
|
3375
|
+
test_consumer_wait_assignment(c[2], rd_true /*poll*/);
|
|
3376
|
+
rd_sleep(3);
|
|
3377
|
+
if (test_consumer_group_protocol_classic()) {
|
|
3378
|
+
test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 4,
|
|
3379
|
+
topic, 5, NULL);
|
|
3380
|
+
test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 1,
|
|
3381
|
+
topic, 2, NULL);
|
|
3382
|
+
test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 3,
|
|
3383
|
+
topic, 0, NULL);
|
|
3384
|
+
} else {
|
|
3385
|
+
test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 0,
|
|
3386
|
+
topic, 1, NULL);
|
|
3387
|
+
test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 3,
|
|
3388
|
+
topic, 4, NULL);
|
|
3389
|
+
test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 2,
|
|
3390
|
+
topic, 5, NULL);
|
|
3391
|
+
}
|
|
3392
|
+
|
|
3393
|
+
/* Raise any previously failed verify_assignment calls and fail the test */
|
|
3394
|
+
TEST_LATER_CHECK();
|
|
3395
|
+
|
|
3396
|
+
for (i = 0; i < _NUM_CONS; i++)
|
|
3397
|
+
rd_kafka_destroy(c[i]);
|
|
3398
|
+
|
|
3399
|
+
SUB_TEST_PASS();
|
|
3400
|
+
|
|
3401
|
+
#undef _NUM_CONS
|
|
3402
|
+
}
|
|
3403
|
+
|
|
3404
|
+
/* Local tests not needing a cluster */
|
|
3405
|
+
int main_0113_cooperative_rebalance_local(int argc, char **argv) {
|
|
3406
|
+
TEST_SKIP_MOCK_CLUSTER(0);
|
|
3407
|
+
|
|
3408
|
+
a_assign_rapid();
|
|
3409
|
+
p_lost_partitions_heartbeat_illegal_generation_test();
|
|
3410
|
+
if (test_consumer_group_protocol_classic()) {
|
|
3411
|
+
/* These tests have no correspondence with
|
|
3412
|
+
* the consumer group protocol "consumer" */
|
|
3413
|
+
q_lost_partitions_illegal_generation_test(rd_false /*joingroup*/);
|
|
3414
|
+
q_lost_partitions_illegal_generation_test(rd_true /*syncgroup*/);
|
|
3415
|
+
}
|
|
3416
|
+
if (test_consumer_group_protocol_classic()) {
|
|
3417
|
+
r_lost_partitions_commit_illegal_generation_test_local();
|
|
3418
|
+
} else if (0) {
|
|
3419
|
+
/* FIXME: enable this once new errors are handled in OffsetCommit. */
|
|
3420
|
+
t_consumer_group_consumer_retry_commit_on_fenced_member_epoch();
|
|
3421
|
+
}
|
|
3422
|
+
s_no_segfault_before_first_rebalance();
|
|
3423
|
+
return 0;
|
|
3424
|
+
}
|
|
3425
|
+
|
|
3426
|
+
int main_0113_cooperative_rebalance(int argc, char **argv) {
|
|
3427
|
+
int i;
|
|
3428
|
+
|
|
3429
|
+
a_assign_tests();
|
|
3430
|
+
b_subscribe_with_cb_test(true /*close consumer*/);
|
|
3431
|
+
b_subscribe_with_cb_test(false /*don't close consumer*/);
|
|
3432
|
+
c_subscribe_no_cb_test(true /*close consumer*/);
|
|
3433
|
+
|
|
3434
|
+
if (test_quick) {
|
|
3435
|
+
Test::Say("Skipping tests >= c_ .. due to quick mode\n");
|
|
3436
|
+
return 0;
|
|
3437
|
+
}
|
|
3438
|
+
|
|
3439
|
+
c_subscribe_no_cb_test(false /*don't close consumer*/);
|
|
3440
|
+
d_change_subscription_add_topic(true /*close consumer*/);
|
|
3441
|
+
d_change_subscription_add_topic(false /*don't close consumer*/);
|
|
3442
|
+
e_change_subscription_remove_topic(true /*close consumer*/);
|
|
3443
|
+
e_change_subscription_remove_topic(false /*don't close consumer*/);
|
|
3444
|
+
f_assign_call_cooperative();
|
|
3445
|
+
g_incremental_assign_call_eager();
|
|
3446
|
+
h_delete_topic();
|
|
3447
|
+
i_delete_topic_2();
|
|
3448
|
+
j_delete_topic_no_rb_callback();
|
|
3449
|
+
k_add_partition();
|
|
3450
|
+
l_unsubscribe();
|
|
3451
|
+
m_unsubscribe_2();
|
|
3452
|
+
n_wildcard();
|
|
3453
|
+
o_java_interop();
|
|
3454
|
+
for (i = 1; i <= 6; i++) /* iterate over 6 different test variations */
|
|
3455
|
+
s_subscribe_when_rebalancing(i);
|
|
3456
|
+
for (i = 1; i <= 3; i++)
|
|
3457
|
+
t_max_poll_interval_exceeded(i);
|
|
3458
|
+
/* Run all 2*3 variations of the u_.. test */
|
|
3459
|
+
for (i = 0; i < 3; i++) {
|
|
3460
|
+
u_multiple_subscription_changes(true /*with rebalance_cb*/, i);
|
|
3461
|
+
u_multiple_subscription_changes(false /*without rebalance_cb*/, i);
|
|
3462
|
+
}
|
|
3463
|
+
v_commit_during_rebalance(true /*with rebalance callback*/,
|
|
3464
|
+
true /*auto commit*/);
|
|
3465
|
+
v_commit_during_rebalance(false /*without rebalance callback*/,
|
|
3466
|
+
true /*auto commit*/);
|
|
3467
|
+
v_commit_during_rebalance(true /*with rebalance callback*/,
|
|
3468
|
+
false /*manual commit*/);
|
|
3469
|
+
x_incremental_rebalances();
|
|
3470
|
+
|
|
3471
|
+
return 0;
|
|
3472
|
+
}
|
|
3473
|
+
}
|