@point3/node-rdkafka 3.6.0-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +20 -0
- package/README.md +636 -0
- package/binding.gyp +154 -0
- package/deps/librdkafka/.clang-format +136 -0
- package/deps/librdkafka/.clang-format-cpp +103 -0
- package/deps/librdkafka/.dir-locals.el +10 -0
- package/deps/librdkafka/.formatignore +33 -0
- package/deps/librdkafka/.gdbmacros +19 -0
- package/deps/librdkafka/.github/CODEOWNERS +1 -0
- package/deps/librdkafka/.github/ISSUE_TEMPLATE +34 -0
- package/deps/librdkafka/.semaphore/run-all-tests.yml +77 -0
- package/deps/librdkafka/.semaphore/semaphore-integration.yml +250 -0
- package/deps/librdkafka/.semaphore/semaphore.yml +378 -0
- package/deps/librdkafka/.semaphore/verify-linux-packages.yml +41 -0
- package/deps/librdkafka/CHANGELOG.md +2208 -0
- package/deps/librdkafka/CMakeLists.txt +291 -0
- package/deps/librdkafka/CODE_OF_CONDUCT.md +46 -0
- package/deps/librdkafka/CONFIGURATION.md +209 -0
- package/deps/librdkafka/CONTRIBUTING.md +431 -0
- package/deps/librdkafka/Doxyfile +2375 -0
- package/deps/librdkafka/INTRODUCTION.md +2481 -0
- package/deps/librdkafka/LICENSE +26 -0
- package/deps/librdkafka/LICENSE.cjson +22 -0
- package/deps/librdkafka/LICENSE.crc32c +28 -0
- package/deps/librdkafka/LICENSE.fnv1a +18 -0
- package/deps/librdkafka/LICENSE.hdrhistogram +27 -0
- package/deps/librdkafka/LICENSE.lz4 +26 -0
- package/deps/librdkafka/LICENSE.murmur2 +25 -0
- package/deps/librdkafka/LICENSE.nanopb +22 -0
- package/deps/librdkafka/LICENSE.opentelemetry +203 -0
- package/deps/librdkafka/LICENSE.pycrc +23 -0
- package/deps/librdkafka/LICENSE.queue +31 -0
- package/deps/librdkafka/LICENSE.regexp +5 -0
- package/deps/librdkafka/LICENSE.snappy +36 -0
- package/deps/librdkafka/LICENSE.tinycthread +26 -0
- package/deps/librdkafka/LICENSE.wingetopt +49 -0
- package/deps/librdkafka/LICENSES.txt +625 -0
- package/deps/librdkafka/Makefile +125 -0
- package/deps/librdkafka/README.md +199 -0
- package/deps/librdkafka/README.win32 +26 -0
- package/deps/librdkafka/STATISTICS.md +624 -0
- package/deps/librdkafka/configure +214 -0
- package/deps/librdkafka/configure.self +331 -0
- package/deps/librdkafka/debian/changelog +111 -0
- package/deps/librdkafka/debian/compat +1 -0
- package/deps/librdkafka/debian/control +71 -0
- package/deps/librdkafka/debian/copyright +99 -0
- package/deps/librdkafka/debian/gbp.conf +9 -0
- package/deps/librdkafka/debian/librdkafka++1.install +1 -0
- package/deps/librdkafka/debian/librdkafka-dev.examples +2 -0
- package/deps/librdkafka/debian/librdkafka-dev.install +9 -0
- package/deps/librdkafka/debian/librdkafka1.docs +5 -0
- package/deps/librdkafka/debian/librdkafka1.install +1 -0
- package/deps/librdkafka/debian/librdkafka1.symbols +135 -0
- package/deps/librdkafka/debian/rules +19 -0
- package/deps/librdkafka/debian/source/format +1 -0
- package/deps/librdkafka/debian/watch +2 -0
- package/deps/librdkafka/dev-conf.sh +123 -0
- package/deps/librdkafka/examples/CMakeLists.txt +79 -0
- package/deps/librdkafka/examples/Makefile +167 -0
- package/deps/librdkafka/examples/README.md +42 -0
- package/deps/librdkafka/examples/alter_consumer_group_offsets.c +338 -0
- package/deps/librdkafka/examples/consumer.c +271 -0
- package/deps/librdkafka/examples/delete_records.c +233 -0
- package/deps/librdkafka/examples/describe_cluster.c +322 -0
- package/deps/librdkafka/examples/describe_consumer_groups.c +455 -0
- package/deps/librdkafka/examples/describe_topics.c +427 -0
- package/deps/librdkafka/examples/elect_leaders.c +317 -0
- package/deps/librdkafka/examples/globals.json +11 -0
- package/deps/librdkafka/examples/idempotent_producer.c +344 -0
- package/deps/librdkafka/examples/incremental_alter_configs.c +347 -0
- package/deps/librdkafka/examples/kafkatest_verifiable_client.cpp +945 -0
- package/deps/librdkafka/examples/list_consumer_group_offsets.c +359 -0
- package/deps/librdkafka/examples/list_consumer_groups.c +365 -0
- package/deps/librdkafka/examples/list_offsets.c +327 -0
- package/deps/librdkafka/examples/misc.c +287 -0
- package/deps/librdkafka/examples/openssl_engine_example.cpp +248 -0
- package/deps/librdkafka/examples/producer.c +251 -0
- package/deps/librdkafka/examples/producer.cpp +228 -0
- package/deps/librdkafka/examples/rdkafka_complex_consumer_example.c +617 -0
- package/deps/librdkafka/examples/rdkafka_complex_consumer_example.cpp +467 -0
- package/deps/librdkafka/examples/rdkafka_consume_batch.cpp +264 -0
- package/deps/librdkafka/examples/rdkafka_example.c +853 -0
- package/deps/librdkafka/examples/rdkafka_example.cpp +679 -0
- package/deps/librdkafka/examples/rdkafka_performance.c +1781 -0
- package/deps/librdkafka/examples/transactions-older-broker.c +668 -0
- package/deps/librdkafka/examples/transactions.c +665 -0
- package/deps/librdkafka/examples/user_scram.c +491 -0
- package/deps/librdkafka/examples/win_ssl_cert_store.cpp +396 -0
- package/deps/librdkafka/lds-gen.py +73 -0
- package/deps/librdkafka/mainpage.doxy +40 -0
- package/deps/librdkafka/mklove/Makefile.base +329 -0
- package/deps/librdkafka/mklove/modules/configure.atomics +144 -0
- package/deps/librdkafka/mklove/modules/configure.base +2484 -0
- package/deps/librdkafka/mklove/modules/configure.builtin +70 -0
- package/deps/librdkafka/mklove/modules/configure.cc +186 -0
- package/deps/librdkafka/mklove/modules/configure.cxx +8 -0
- package/deps/librdkafka/mklove/modules/configure.fileversion +65 -0
- package/deps/librdkafka/mklove/modules/configure.gitversion +29 -0
- package/deps/librdkafka/mklove/modules/configure.good_cflags +18 -0
- package/deps/librdkafka/mklove/modules/configure.host +132 -0
- package/deps/librdkafka/mklove/modules/configure.lib +49 -0
- package/deps/librdkafka/mklove/modules/configure.libcurl +99 -0
- package/deps/librdkafka/mklove/modules/configure.libsasl2 +36 -0
- package/deps/librdkafka/mklove/modules/configure.libssl +147 -0
- package/deps/librdkafka/mklove/modules/configure.libzstd +58 -0
- package/deps/librdkafka/mklove/modules/configure.parseversion +95 -0
- package/deps/librdkafka/mklove/modules/configure.pic +16 -0
- package/deps/librdkafka/mklove/modules/configure.socket +20 -0
- package/deps/librdkafka/mklove/modules/configure.zlib +61 -0
- package/deps/librdkafka/mklove/modules/patches/README.md +8 -0
- package/deps/librdkafka/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch +11 -0
- package/deps/librdkafka/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch +56 -0
- package/deps/librdkafka/packaging/RELEASE.md +319 -0
- package/deps/librdkafka/packaging/alpine/build-alpine.sh +38 -0
- package/deps/librdkafka/packaging/archlinux/PKGBUILD +30 -0
- package/deps/librdkafka/packaging/cmake/Config.cmake.in +37 -0
- package/deps/librdkafka/packaging/cmake/Modules/FindLZ4.cmake +38 -0
- package/deps/librdkafka/packaging/cmake/Modules/FindZSTD.cmake +27 -0
- package/deps/librdkafka/packaging/cmake/Modules/LICENSE.FindZstd +178 -0
- package/deps/librdkafka/packaging/cmake/README.md +38 -0
- package/deps/librdkafka/packaging/cmake/config.h.in +52 -0
- package/deps/librdkafka/packaging/cmake/parseversion.cmake +60 -0
- package/deps/librdkafka/packaging/cmake/rdkafka.pc.in +12 -0
- package/deps/librdkafka/packaging/cmake/try_compile/atomic_32_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/atomic_64_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/c11threads_test.c +14 -0
- package/deps/librdkafka/packaging/cmake/try_compile/crc32c_hw_test.c +27 -0
- package/deps/librdkafka/packaging/cmake/try_compile/dlopen_test.c +11 -0
- package/deps/librdkafka/packaging/cmake/try_compile/libsasl2_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_darwin_test.c +6 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_freebsd_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_gnu_test.c +5 -0
- package/deps/librdkafka/packaging/cmake/try_compile/rand_r_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/rdkafka_setup.cmake +122 -0
- package/deps/librdkafka/packaging/cmake/try_compile/regex_test.c +10 -0
- package/deps/librdkafka/packaging/cmake/try_compile/strndup_test.c +5 -0
- package/deps/librdkafka/packaging/cmake/try_compile/sync_32_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/sync_64_test.c +8 -0
- package/deps/librdkafka/packaging/cp/README.md +16 -0
- package/deps/librdkafka/packaging/cp/check_features.c +72 -0
- package/deps/librdkafka/packaging/cp/verify-deb.sh +33 -0
- package/deps/librdkafka/packaging/cp/verify-packages.sh +69 -0
- package/deps/librdkafka/packaging/cp/verify-rpm.sh +32 -0
- package/deps/librdkafka/packaging/debian/changelog +66 -0
- package/deps/librdkafka/packaging/debian/compat +1 -0
- package/deps/librdkafka/packaging/debian/control +49 -0
- package/deps/librdkafka/packaging/debian/copyright +84 -0
- package/deps/librdkafka/packaging/debian/docs +5 -0
- package/deps/librdkafka/packaging/debian/gbp.conf +9 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.dirs +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.examples +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.install +6 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.substvars +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka.dsc +16 -0
- package/deps/librdkafka/packaging/debian/librdkafka1-dbg.substvars +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.dirs +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.install +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.postinst.debhelper +5 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.postrm.debhelper +5 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.symbols +64 -0
- package/deps/librdkafka/packaging/debian/rules +19 -0
- package/deps/librdkafka/packaging/debian/source/format +1 -0
- package/deps/librdkafka/packaging/debian/watch +2 -0
- package/deps/librdkafka/packaging/get_version.py +21 -0
- package/deps/librdkafka/packaging/homebrew/README.md +15 -0
- package/deps/librdkafka/packaging/homebrew/brew-update-pr.sh +31 -0
- package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw-static.sh +52 -0
- package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw.sh +21 -0
- package/deps/librdkafka/packaging/mingw-w64/export-variables.sh +13 -0
- package/deps/librdkafka/packaging/mingw-w64/run-tests.sh +6 -0
- package/deps/librdkafka/packaging/mingw-w64/semaphoreci-build.sh +38 -0
- package/deps/librdkafka/packaging/nuget/README.md +84 -0
- package/deps/librdkafka/packaging/nuget/artifact.py +177 -0
- package/deps/librdkafka/packaging/nuget/cleanup-s3.py +143 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip +0 -0
- package/deps/librdkafka/packaging/nuget/nuget.sh +21 -0
- package/deps/librdkafka/packaging/nuget/nugetpackage.py +278 -0
- package/deps/librdkafka/packaging/nuget/packaging.py +448 -0
- package/deps/librdkafka/packaging/nuget/push-to-nuget.sh +21 -0
- package/deps/librdkafka/packaging/nuget/release.py +167 -0
- package/deps/librdkafka/packaging/nuget/requirements.txt +3 -0
- package/deps/librdkafka/packaging/nuget/staticpackage.py +178 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.nuspec +21 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.props +18 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.targets +19 -0
- package/deps/librdkafka/packaging/nuget/zfile/__init__.py +0 -0
- package/deps/librdkafka/packaging/nuget/zfile/zfile.py +98 -0
- package/deps/librdkafka/packaging/rpm/Makefile +92 -0
- package/deps/librdkafka/packaging/rpm/README.md +23 -0
- package/deps/librdkafka/packaging/rpm/el7-x86_64.cfg +40 -0
- package/deps/librdkafka/packaging/rpm/librdkafka.spec +118 -0
- package/deps/librdkafka/packaging/rpm/mock-on-docker.sh +96 -0
- package/deps/librdkafka/packaging/rpm/tests/Makefile +25 -0
- package/deps/librdkafka/packaging/rpm/tests/README.md +8 -0
- package/deps/librdkafka/packaging/rpm/tests/run-test.sh +42 -0
- package/deps/librdkafka/packaging/rpm/tests/test-on-docker.sh +56 -0
- package/deps/librdkafka/packaging/rpm/tests/test.c +77 -0
- package/deps/librdkafka/packaging/rpm/tests/test.cpp +34 -0
- package/deps/librdkafka/packaging/tools/Dockerfile +31 -0
- package/deps/librdkafka/packaging/tools/build-configurations-checks.sh +12 -0
- package/deps/librdkafka/packaging/tools/build-deb-package.sh +64 -0
- package/deps/librdkafka/packaging/tools/build-debian.sh +65 -0
- package/deps/librdkafka/packaging/tools/build-manylinux.sh +68 -0
- package/deps/librdkafka/packaging/tools/build-release-artifacts.sh +139 -0
- package/deps/librdkafka/packaging/tools/distro-build.sh +38 -0
- package/deps/librdkafka/packaging/tools/gh-release-checksums.py +39 -0
- package/deps/librdkafka/packaging/tools/rdutcoverage.sh +25 -0
- package/deps/librdkafka/packaging/tools/requirements.txt +2 -0
- package/deps/librdkafka/packaging/tools/run-in-docker.sh +28 -0
- package/deps/librdkafka/packaging/tools/run-integration-tests.sh +31 -0
- package/deps/librdkafka/packaging/tools/run-style-check.sh +4 -0
- package/deps/librdkafka/packaging/tools/style-format.sh +149 -0
- package/deps/librdkafka/packaging/tools/update_rpcs_max_versions.py +100 -0
- package/deps/librdkafka/service.yml +172 -0
- package/deps/librdkafka/src/CMakeLists.txt +374 -0
- package/deps/librdkafka/src/Makefile +103 -0
- package/deps/librdkafka/src/README.lz4.md +30 -0
- package/deps/librdkafka/src/cJSON.c +2834 -0
- package/deps/librdkafka/src/cJSON.h +398 -0
- package/deps/librdkafka/src/crc32c.c +430 -0
- package/deps/librdkafka/src/crc32c.h +38 -0
- package/deps/librdkafka/src/generate_proto.sh +66 -0
- package/deps/librdkafka/src/librdkafka_cgrp_synch.png +0 -0
- package/deps/librdkafka/src/lz4.c +2727 -0
- package/deps/librdkafka/src/lz4.h +842 -0
- package/deps/librdkafka/src/lz4frame.c +2078 -0
- package/deps/librdkafka/src/lz4frame.h +692 -0
- package/deps/librdkafka/src/lz4frame_static.h +47 -0
- package/deps/librdkafka/src/lz4hc.c +1631 -0
- package/deps/librdkafka/src/lz4hc.h +413 -0
- package/deps/librdkafka/src/nanopb/pb.h +917 -0
- package/deps/librdkafka/src/nanopb/pb_common.c +388 -0
- package/deps/librdkafka/src/nanopb/pb_common.h +49 -0
- package/deps/librdkafka/src/nanopb/pb_decode.c +1727 -0
- package/deps/librdkafka/src/nanopb/pb_decode.h +193 -0
- package/deps/librdkafka/src/nanopb/pb_encode.c +1000 -0
- package/deps/librdkafka/src/nanopb/pb_encode.h +185 -0
- package/deps/librdkafka/src/opentelemetry/common.pb.c +32 -0
- package/deps/librdkafka/src/opentelemetry/common.pb.h +170 -0
- package/deps/librdkafka/src/opentelemetry/metrics.options +2 -0
- package/deps/librdkafka/src/opentelemetry/metrics.pb.c +67 -0
- package/deps/librdkafka/src/opentelemetry/metrics.pb.h +966 -0
- package/deps/librdkafka/src/opentelemetry/resource.pb.c +12 -0
- package/deps/librdkafka/src/opentelemetry/resource.pb.h +58 -0
- package/deps/librdkafka/src/queue.h +850 -0
- package/deps/librdkafka/src/rd.h +584 -0
- package/deps/librdkafka/src/rdaddr.c +255 -0
- package/deps/librdkafka/src/rdaddr.h +202 -0
- package/deps/librdkafka/src/rdatomic.h +230 -0
- package/deps/librdkafka/src/rdavg.h +260 -0
- package/deps/librdkafka/src/rdavl.c +210 -0
- package/deps/librdkafka/src/rdavl.h +250 -0
- package/deps/librdkafka/src/rdbase64.c +200 -0
- package/deps/librdkafka/src/rdbase64.h +43 -0
- package/deps/librdkafka/src/rdbuf.c +1884 -0
- package/deps/librdkafka/src/rdbuf.h +375 -0
- package/deps/librdkafka/src/rdcrc32.c +114 -0
- package/deps/librdkafka/src/rdcrc32.h +170 -0
- package/deps/librdkafka/src/rddl.c +179 -0
- package/deps/librdkafka/src/rddl.h +43 -0
- package/deps/librdkafka/src/rdendian.h +175 -0
- package/deps/librdkafka/src/rdfloat.h +67 -0
- package/deps/librdkafka/src/rdfnv1a.c +113 -0
- package/deps/librdkafka/src/rdfnv1a.h +35 -0
- package/deps/librdkafka/src/rdgz.c +120 -0
- package/deps/librdkafka/src/rdgz.h +46 -0
- package/deps/librdkafka/src/rdhdrhistogram.c +721 -0
- package/deps/librdkafka/src/rdhdrhistogram.h +87 -0
- package/deps/librdkafka/src/rdhttp.c +830 -0
- package/deps/librdkafka/src/rdhttp.h +101 -0
- package/deps/librdkafka/src/rdinterval.h +177 -0
- package/deps/librdkafka/src/rdkafka.c +5505 -0
- package/deps/librdkafka/src/rdkafka.h +10686 -0
- package/deps/librdkafka/src/rdkafka_admin.c +9794 -0
- package/deps/librdkafka/src/rdkafka_admin.h +661 -0
- package/deps/librdkafka/src/rdkafka_assignment.c +1010 -0
- package/deps/librdkafka/src/rdkafka_assignment.h +73 -0
- package/deps/librdkafka/src/rdkafka_assignor.c +1786 -0
- package/deps/librdkafka/src/rdkafka_assignor.h +402 -0
- package/deps/librdkafka/src/rdkafka_aux.c +409 -0
- package/deps/librdkafka/src/rdkafka_aux.h +174 -0
- package/deps/librdkafka/src/rdkafka_background.c +221 -0
- package/deps/librdkafka/src/rdkafka_broker.c +6337 -0
- package/deps/librdkafka/src/rdkafka_broker.h +744 -0
- package/deps/librdkafka/src/rdkafka_buf.c +543 -0
- package/deps/librdkafka/src/rdkafka_buf.h +1525 -0
- package/deps/librdkafka/src/rdkafka_cert.c +576 -0
- package/deps/librdkafka/src/rdkafka_cert.h +62 -0
- package/deps/librdkafka/src/rdkafka_cgrp.c +7587 -0
- package/deps/librdkafka/src/rdkafka_cgrp.h +477 -0
- package/deps/librdkafka/src/rdkafka_conf.c +4880 -0
- package/deps/librdkafka/src/rdkafka_conf.h +732 -0
- package/deps/librdkafka/src/rdkafka_confval.h +97 -0
- package/deps/librdkafka/src/rdkafka_coord.c +623 -0
- package/deps/librdkafka/src/rdkafka_coord.h +132 -0
- package/deps/librdkafka/src/rdkafka_error.c +228 -0
- package/deps/librdkafka/src/rdkafka_error.h +80 -0
- package/deps/librdkafka/src/rdkafka_event.c +502 -0
- package/deps/librdkafka/src/rdkafka_event.h +126 -0
- package/deps/librdkafka/src/rdkafka_feature.c +898 -0
- package/deps/librdkafka/src/rdkafka_feature.h +104 -0
- package/deps/librdkafka/src/rdkafka_fetcher.c +1422 -0
- package/deps/librdkafka/src/rdkafka_fetcher.h +44 -0
- package/deps/librdkafka/src/rdkafka_header.c +220 -0
- package/deps/librdkafka/src/rdkafka_header.h +76 -0
- package/deps/librdkafka/src/rdkafka_idempotence.c +807 -0
- package/deps/librdkafka/src/rdkafka_idempotence.h +144 -0
- package/deps/librdkafka/src/rdkafka_int.h +1260 -0
- package/deps/librdkafka/src/rdkafka_interceptor.c +819 -0
- package/deps/librdkafka/src/rdkafka_interceptor.h +104 -0
- package/deps/librdkafka/src/rdkafka_lz4.c +450 -0
- package/deps/librdkafka/src/rdkafka_lz4.h +49 -0
- package/deps/librdkafka/src/rdkafka_metadata.c +2209 -0
- package/deps/librdkafka/src/rdkafka_metadata.h +345 -0
- package/deps/librdkafka/src/rdkafka_metadata_cache.c +1183 -0
- package/deps/librdkafka/src/rdkafka_mock.c +3661 -0
- package/deps/librdkafka/src/rdkafka_mock.h +610 -0
- package/deps/librdkafka/src/rdkafka_mock_cgrp.c +1876 -0
- package/deps/librdkafka/src/rdkafka_mock_handlers.c +3113 -0
- package/deps/librdkafka/src/rdkafka_mock_int.h +710 -0
- package/deps/librdkafka/src/rdkafka_msg.c +2589 -0
- package/deps/librdkafka/src/rdkafka_msg.h +614 -0
- package/deps/librdkafka/src/rdkafka_msgbatch.h +62 -0
- package/deps/librdkafka/src/rdkafka_msgset.h +98 -0
- package/deps/librdkafka/src/rdkafka_msgset_reader.c +1806 -0
- package/deps/librdkafka/src/rdkafka_msgset_writer.c +1474 -0
- package/deps/librdkafka/src/rdkafka_offset.c +1565 -0
- package/deps/librdkafka/src/rdkafka_offset.h +150 -0
- package/deps/librdkafka/src/rdkafka_op.c +997 -0
- package/deps/librdkafka/src/rdkafka_op.h +858 -0
- package/deps/librdkafka/src/rdkafka_partition.c +4896 -0
- package/deps/librdkafka/src/rdkafka_partition.h +1182 -0
- package/deps/librdkafka/src/rdkafka_pattern.c +228 -0
- package/deps/librdkafka/src/rdkafka_pattern.h +70 -0
- package/deps/librdkafka/src/rdkafka_plugin.c +213 -0
- package/deps/librdkafka/src/rdkafka_plugin.h +41 -0
- package/deps/librdkafka/src/rdkafka_proto.h +736 -0
- package/deps/librdkafka/src/rdkafka_protocol.h +128 -0
- package/deps/librdkafka/src/rdkafka_queue.c +1230 -0
- package/deps/librdkafka/src/rdkafka_queue.h +1220 -0
- package/deps/librdkafka/src/rdkafka_range_assignor.c +1748 -0
- package/deps/librdkafka/src/rdkafka_request.c +7089 -0
- package/deps/librdkafka/src/rdkafka_request.h +732 -0
- package/deps/librdkafka/src/rdkafka_roundrobin_assignor.c +123 -0
- package/deps/librdkafka/src/rdkafka_sasl.c +530 -0
- package/deps/librdkafka/src/rdkafka_sasl.h +63 -0
- package/deps/librdkafka/src/rdkafka_sasl_cyrus.c +722 -0
- package/deps/librdkafka/src/rdkafka_sasl_int.h +89 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.c +1833 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.h +52 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.c +1666 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.h +47 -0
- package/deps/librdkafka/src/rdkafka_sasl_plain.c +142 -0
- package/deps/librdkafka/src/rdkafka_sasl_scram.c +858 -0
- package/deps/librdkafka/src/rdkafka_sasl_win32.c +550 -0
- package/deps/librdkafka/src/rdkafka_ssl.c +2129 -0
- package/deps/librdkafka/src/rdkafka_ssl.h +86 -0
- package/deps/librdkafka/src/rdkafka_sticky_assignor.c +4785 -0
- package/deps/librdkafka/src/rdkafka_subscription.c +278 -0
- package/deps/librdkafka/src/rdkafka_telemetry.c +760 -0
- package/deps/librdkafka/src/rdkafka_telemetry.h +52 -0
- package/deps/librdkafka/src/rdkafka_telemetry_decode.c +1053 -0
- package/deps/librdkafka/src/rdkafka_telemetry_decode.h +59 -0
- package/deps/librdkafka/src/rdkafka_telemetry_encode.c +997 -0
- package/deps/librdkafka/src/rdkafka_telemetry_encode.h +301 -0
- package/deps/librdkafka/src/rdkafka_timer.c +402 -0
- package/deps/librdkafka/src/rdkafka_timer.h +117 -0
- package/deps/librdkafka/src/rdkafka_topic.c +2161 -0
- package/deps/librdkafka/src/rdkafka_topic.h +334 -0
- package/deps/librdkafka/src/rdkafka_transport.c +1309 -0
- package/deps/librdkafka/src/rdkafka_transport.h +99 -0
- package/deps/librdkafka/src/rdkafka_transport_int.h +100 -0
- package/deps/librdkafka/src/rdkafka_txnmgr.c +3256 -0
- package/deps/librdkafka/src/rdkafka_txnmgr.h +171 -0
- package/deps/librdkafka/src/rdkafka_zstd.c +226 -0
- package/deps/librdkafka/src/rdkafka_zstd.h +57 -0
- package/deps/librdkafka/src/rdlist.c +576 -0
- package/deps/librdkafka/src/rdlist.h +434 -0
- package/deps/librdkafka/src/rdlog.c +89 -0
- package/deps/librdkafka/src/rdlog.h +41 -0
- package/deps/librdkafka/src/rdmap.c +508 -0
- package/deps/librdkafka/src/rdmap.h +492 -0
- package/deps/librdkafka/src/rdmurmur2.c +167 -0
- package/deps/librdkafka/src/rdmurmur2.h +35 -0
- package/deps/librdkafka/src/rdports.c +61 -0
- package/deps/librdkafka/src/rdports.h +38 -0
- package/deps/librdkafka/src/rdposix.h +250 -0
- package/deps/librdkafka/src/rdrand.c +80 -0
- package/deps/librdkafka/src/rdrand.h +43 -0
- package/deps/librdkafka/src/rdregex.c +156 -0
- package/deps/librdkafka/src/rdregex.h +43 -0
- package/deps/librdkafka/src/rdsignal.h +57 -0
- package/deps/librdkafka/src/rdstring.c +645 -0
- package/deps/librdkafka/src/rdstring.h +98 -0
- package/deps/librdkafka/src/rdsysqueue.h +404 -0
- package/deps/librdkafka/src/rdtime.h +356 -0
- package/deps/librdkafka/src/rdtypes.h +86 -0
- package/deps/librdkafka/src/rdunittest.c +549 -0
- package/deps/librdkafka/src/rdunittest.h +232 -0
- package/deps/librdkafka/src/rdvarint.c +134 -0
- package/deps/librdkafka/src/rdvarint.h +165 -0
- package/deps/librdkafka/src/rdwin32.h +382 -0
- package/deps/librdkafka/src/rdxxhash.c +1030 -0
- package/deps/librdkafka/src/rdxxhash.h +328 -0
- package/deps/librdkafka/src/regexp.c +1352 -0
- package/deps/librdkafka/src/regexp.h +41 -0
- package/deps/librdkafka/src/snappy.c +1866 -0
- package/deps/librdkafka/src/snappy.h +62 -0
- package/deps/librdkafka/src/snappy_compat.h +138 -0
- package/deps/librdkafka/src/statistics_schema.json +444 -0
- package/deps/librdkafka/src/tinycthread.c +932 -0
- package/deps/librdkafka/src/tinycthread.h +503 -0
- package/deps/librdkafka/src/tinycthread_extra.c +199 -0
- package/deps/librdkafka/src/tinycthread_extra.h +212 -0
- package/deps/librdkafka/src/win32_config.h +58 -0
- package/deps/librdkafka/src-cpp/CMakeLists.txt +90 -0
- package/deps/librdkafka/src-cpp/ConfImpl.cpp +84 -0
- package/deps/librdkafka/src-cpp/ConsumerImpl.cpp +244 -0
- package/deps/librdkafka/src-cpp/HandleImpl.cpp +436 -0
- package/deps/librdkafka/src-cpp/HeadersImpl.cpp +48 -0
- package/deps/librdkafka/src-cpp/KafkaConsumerImpl.cpp +296 -0
- package/deps/librdkafka/src-cpp/Makefile +55 -0
- package/deps/librdkafka/src-cpp/MessageImpl.cpp +38 -0
- package/deps/librdkafka/src-cpp/MetadataImpl.cpp +170 -0
- package/deps/librdkafka/src-cpp/ProducerImpl.cpp +197 -0
- package/deps/librdkafka/src-cpp/QueueImpl.cpp +70 -0
- package/deps/librdkafka/src-cpp/README.md +16 -0
- package/deps/librdkafka/src-cpp/RdKafka.cpp +59 -0
- package/deps/librdkafka/src-cpp/TopicImpl.cpp +124 -0
- package/deps/librdkafka/src-cpp/TopicPartitionImpl.cpp +57 -0
- package/deps/librdkafka/src-cpp/rdkafkacpp.h +3797 -0
- package/deps/librdkafka/src-cpp/rdkafkacpp_int.h +1641 -0
- package/deps/librdkafka/tests/0000-unittests.c +72 -0
- package/deps/librdkafka/tests/0001-multiobj.c +102 -0
- package/deps/librdkafka/tests/0002-unkpart.c +244 -0
- package/deps/librdkafka/tests/0003-msgmaxsize.c +173 -0
- package/deps/librdkafka/tests/0004-conf.c +934 -0
- package/deps/librdkafka/tests/0005-order.c +133 -0
- package/deps/librdkafka/tests/0006-symbols.c +163 -0
- package/deps/librdkafka/tests/0007-autotopic.c +136 -0
- package/deps/librdkafka/tests/0008-reqacks.c +179 -0
- package/deps/librdkafka/tests/0009-mock_cluster.c +97 -0
- package/deps/librdkafka/tests/0011-produce_batch.c +753 -0
- package/deps/librdkafka/tests/0012-produce_consume.c +537 -0
- package/deps/librdkafka/tests/0013-null-msgs.c +473 -0
- package/deps/librdkafka/tests/0014-reconsume-191.c +512 -0
- package/deps/librdkafka/tests/0015-offset_seeks.c +172 -0
- package/deps/librdkafka/tests/0016-client_swname.c +181 -0
- package/deps/librdkafka/tests/0017-compression.c +140 -0
- package/deps/librdkafka/tests/0018-cgrp_term.c +338 -0
- package/deps/librdkafka/tests/0019-list_groups.c +289 -0
- package/deps/librdkafka/tests/0020-destroy_hang.c +162 -0
- package/deps/librdkafka/tests/0021-rkt_destroy.c +72 -0
- package/deps/librdkafka/tests/0022-consume_batch.c +279 -0
- package/deps/librdkafka/tests/0025-timers.c +147 -0
- package/deps/librdkafka/tests/0026-consume_pause.c +547 -0
- package/deps/librdkafka/tests/0028-long_topicnames.c +79 -0
- package/deps/librdkafka/tests/0029-assign_offset.c +202 -0
- package/deps/librdkafka/tests/0030-offset_commit.c +589 -0
- package/deps/librdkafka/tests/0031-get_offsets.c +235 -0
- package/deps/librdkafka/tests/0033-regex_subscribe.c +536 -0
- package/deps/librdkafka/tests/0034-offset_reset.c +398 -0
- package/deps/librdkafka/tests/0035-api_version.c +73 -0
- package/deps/librdkafka/tests/0036-partial_fetch.c +87 -0
- package/deps/librdkafka/tests/0037-destroy_hang_local.c +85 -0
- package/deps/librdkafka/tests/0038-performance.c +121 -0
- package/deps/librdkafka/tests/0039-event.c +284 -0
- package/deps/librdkafka/tests/0040-io_event.c +257 -0
- package/deps/librdkafka/tests/0041-fetch_max_bytes.c +97 -0
- package/deps/librdkafka/tests/0042-many_topics.c +252 -0
- package/deps/librdkafka/tests/0043-no_connection.c +77 -0
- package/deps/librdkafka/tests/0044-partition_cnt.c +94 -0
- package/deps/librdkafka/tests/0045-subscribe_update.c +1010 -0
- package/deps/librdkafka/tests/0046-rkt_cache.c +65 -0
- package/deps/librdkafka/tests/0047-partial_buf_tmout.c +98 -0
- package/deps/librdkafka/tests/0048-partitioner.c +283 -0
- package/deps/librdkafka/tests/0049-consume_conn_close.c +162 -0
- package/deps/librdkafka/tests/0050-subscribe_adds.c +145 -0
- package/deps/librdkafka/tests/0051-assign_adds.c +126 -0
- package/deps/librdkafka/tests/0052-msg_timestamps.c +238 -0
- package/deps/librdkafka/tests/0053-stats_cb.cpp +527 -0
- package/deps/librdkafka/tests/0054-offset_time.cpp +236 -0
- package/deps/librdkafka/tests/0055-producer_latency.c +539 -0
- package/deps/librdkafka/tests/0056-balanced_group_mt.c +315 -0
- package/deps/librdkafka/tests/0057-invalid_topic.cpp +112 -0
- package/deps/librdkafka/tests/0058-log.cpp +123 -0
- package/deps/librdkafka/tests/0059-bsearch.cpp +241 -0
- package/deps/librdkafka/tests/0060-op_prio.cpp +163 -0
- package/deps/librdkafka/tests/0061-consumer_lag.cpp +295 -0
- package/deps/librdkafka/tests/0062-stats_event.c +126 -0
- package/deps/librdkafka/tests/0063-clusterid.cpp +180 -0
- package/deps/librdkafka/tests/0064-interceptors.c +481 -0
- package/deps/librdkafka/tests/0065-yield.cpp +140 -0
- package/deps/librdkafka/tests/0066-plugins.cpp +129 -0
- package/deps/librdkafka/tests/0067-empty_topic.cpp +151 -0
- package/deps/librdkafka/tests/0068-produce_timeout.c +136 -0
- package/deps/librdkafka/tests/0069-consumer_add_parts.c +119 -0
- package/deps/librdkafka/tests/0070-null_empty.cpp +197 -0
- package/deps/librdkafka/tests/0072-headers_ut.c +448 -0
- package/deps/librdkafka/tests/0073-headers.c +381 -0
- package/deps/librdkafka/tests/0074-producev.c +87 -0
- package/deps/librdkafka/tests/0075-retry.c +290 -0
- package/deps/librdkafka/tests/0076-produce_retry.c +452 -0
- package/deps/librdkafka/tests/0077-compaction.c +363 -0
- package/deps/librdkafka/tests/0078-c_from_cpp.cpp +96 -0
- package/deps/librdkafka/tests/0079-fork.c +93 -0
- package/deps/librdkafka/tests/0080-admin_ut.c +3095 -0
- package/deps/librdkafka/tests/0081-admin.c +5633 -0
- package/deps/librdkafka/tests/0082-fetch_max_bytes.cpp +137 -0
- package/deps/librdkafka/tests/0083-cb_event.c +233 -0
- package/deps/librdkafka/tests/0084-destroy_flags.c +208 -0
- package/deps/librdkafka/tests/0085-headers.cpp +392 -0
- package/deps/librdkafka/tests/0086-purge.c +368 -0
- package/deps/librdkafka/tests/0088-produce_metadata_timeout.c +162 -0
- package/deps/librdkafka/tests/0089-max_poll_interval.c +511 -0
- package/deps/librdkafka/tests/0090-idempotence.c +171 -0
- package/deps/librdkafka/tests/0091-max_poll_interval_timeout.c +295 -0
- package/deps/librdkafka/tests/0092-mixed_msgver.c +103 -0
- package/deps/librdkafka/tests/0093-holb.c +200 -0
- package/deps/librdkafka/tests/0094-idempotence_msg_timeout.c +231 -0
- package/deps/librdkafka/tests/0095-all_brokers_down.cpp +122 -0
- package/deps/librdkafka/tests/0097-ssl_verify.cpp +658 -0
- package/deps/librdkafka/tests/0098-consumer-txn.cpp +1218 -0
- package/deps/librdkafka/tests/0099-commit_metadata.c +194 -0
- package/deps/librdkafka/tests/0100-thread_interceptors.cpp +195 -0
- package/deps/librdkafka/tests/0101-fetch-from-follower.cpp +446 -0
- package/deps/librdkafka/tests/0102-static_group_rebalance.c +836 -0
- package/deps/librdkafka/tests/0103-transactions.c +1383 -0
- package/deps/librdkafka/tests/0104-fetch_from_follower_mock.c +625 -0
- package/deps/librdkafka/tests/0105-transactions_mock.c +3930 -0
- package/deps/librdkafka/tests/0106-cgrp_sess_timeout.c +318 -0
- package/deps/librdkafka/tests/0107-topic_recreate.c +259 -0
- package/deps/librdkafka/tests/0109-auto_create_topics.cpp +278 -0
- package/deps/librdkafka/tests/0110-batch_size.cpp +182 -0
- package/deps/librdkafka/tests/0111-delay_create_topics.cpp +127 -0
- package/deps/librdkafka/tests/0112-assign_unknown_part.c +87 -0
- package/deps/librdkafka/tests/0113-cooperative_rebalance.cpp +3473 -0
- package/deps/librdkafka/tests/0114-sticky_partitioning.cpp +176 -0
- package/deps/librdkafka/tests/0115-producer_auth.cpp +182 -0
- package/deps/librdkafka/tests/0116-kafkaconsumer_close.cpp +216 -0
- package/deps/librdkafka/tests/0117-mock_errors.c +331 -0
- package/deps/librdkafka/tests/0118-commit_rebalance.c +154 -0
- package/deps/librdkafka/tests/0119-consumer_auth.cpp +167 -0
- package/deps/librdkafka/tests/0120-asymmetric_subscription.c +185 -0
- package/deps/librdkafka/tests/0121-clusterid.c +115 -0
- package/deps/librdkafka/tests/0122-buffer_cleaning_after_rebalance.c +227 -0
- package/deps/librdkafka/tests/0123-connections_max_idle.c +98 -0
- package/deps/librdkafka/tests/0124-openssl_invalid_engine.c +69 -0
- package/deps/librdkafka/tests/0125-immediate_flush.c +144 -0
- package/deps/librdkafka/tests/0126-oauthbearer_oidc.c +528 -0
- package/deps/librdkafka/tests/0127-fetch_queue_backoff.cpp +165 -0
- package/deps/librdkafka/tests/0128-sasl_callback_queue.cpp +125 -0
- package/deps/librdkafka/tests/0129-fetch_aborted_msgs.c +79 -0
- package/deps/librdkafka/tests/0130-store_offsets.c +178 -0
- package/deps/librdkafka/tests/0131-connect_timeout.c +81 -0
- package/deps/librdkafka/tests/0132-strategy_ordering.c +179 -0
- package/deps/librdkafka/tests/0133-ssl_keys.c +150 -0
- package/deps/librdkafka/tests/0134-ssl_provider.c +92 -0
- package/deps/librdkafka/tests/0135-sasl_credentials.cpp +143 -0
- package/deps/librdkafka/tests/0136-resolve_cb.c +181 -0
- package/deps/librdkafka/tests/0137-barrier_batch_consume.c +619 -0
- package/deps/librdkafka/tests/0138-admin_mock.c +281 -0
- package/deps/librdkafka/tests/0139-offset_validation_mock.c +950 -0
- package/deps/librdkafka/tests/0140-commit_metadata.cpp +108 -0
- package/deps/librdkafka/tests/0142-reauthentication.c +515 -0
- package/deps/librdkafka/tests/0143-exponential_backoff_mock.c +552 -0
- package/deps/librdkafka/tests/0144-idempotence_mock.c +373 -0
- package/deps/librdkafka/tests/0145-pause_resume_mock.c +119 -0
- package/deps/librdkafka/tests/0146-metadata_mock.c +505 -0
- package/deps/librdkafka/tests/0147-consumer_group_consumer_mock.c +952 -0
- package/deps/librdkafka/tests/0148-offset_fetch_commit_error_mock.c +563 -0
- package/deps/librdkafka/tests/0149-broker-same-host-port.c +140 -0
- package/deps/librdkafka/tests/0150-telemetry_mock.c +651 -0
- package/deps/librdkafka/tests/0151-purge-brokers.c +566 -0
- package/deps/librdkafka/tests/0152-rebootstrap.c +59 -0
- package/deps/librdkafka/tests/0153-memberid.c +128 -0
- package/deps/librdkafka/tests/1000-unktopic.c +164 -0
- package/deps/librdkafka/tests/8000-idle.cpp +60 -0
- package/deps/librdkafka/tests/8001-fetch_from_follower_mock_manual.c +113 -0
- package/deps/librdkafka/tests/CMakeLists.txt +170 -0
- package/deps/librdkafka/tests/LibrdkafkaTestApp.py +291 -0
- package/deps/librdkafka/tests/Makefile +182 -0
- package/deps/librdkafka/tests/README.md +509 -0
- package/deps/librdkafka/tests/autotest.sh +33 -0
- package/deps/librdkafka/tests/backtrace.gdb +30 -0
- package/deps/librdkafka/tests/broker_version_tests.py +315 -0
- package/deps/librdkafka/tests/buildbox.sh +17 -0
- package/deps/librdkafka/tests/cleanup-checker-tests.sh +20 -0
- package/deps/librdkafka/tests/cluster_testing.py +191 -0
- package/deps/librdkafka/tests/delete-test-topics.sh +56 -0
- package/deps/librdkafka/tests/fixtures/oauthbearer/jwt_assertion_template.json +10 -0
- package/deps/librdkafka/tests/fixtures/ssl/Makefile +8 -0
- package/deps/librdkafka/tests/fixtures/ssl/README.md +13 -0
- package/deps/librdkafka/tests/fixtures/ssl/client.keystore.intermediate.p12 +0 -0
- package/deps/librdkafka/tests/fixtures/ssl/client.keystore.p12 +0 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.intermediate.pem +72 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.pem +50 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.intermediate.key +46 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.key +46 -0
- package/deps/librdkafka/tests/fixtures/ssl/create_keys.sh +168 -0
- package/deps/librdkafka/tests/fuzzers/Makefile +12 -0
- package/deps/librdkafka/tests/fuzzers/README.md +31 -0
- package/deps/librdkafka/tests/fuzzers/fuzz_regex.c +74 -0
- package/deps/librdkafka/tests/fuzzers/helpers.h +90 -0
- package/deps/librdkafka/tests/gen-ssl-certs.sh +165 -0
- package/deps/librdkafka/tests/interactive_broker_version.py +170 -0
- package/deps/librdkafka/tests/interceptor_test/CMakeLists.txt +16 -0
- package/deps/librdkafka/tests/interceptor_test/Makefile +22 -0
- package/deps/librdkafka/tests/interceptor_test/interceptor_test.c +314 -0
- package/deps/librdkafka/tests/interceptor_test/interceptor_test.h +54 -0
- package/deps/librdkafka/tests/java/IncrementalRebalanceCli.java +97 -0
- package/deps/librdkafka/tests/java/Makefile +13 -0
- package/deps/librdkafka/tests/java/Murmur2Cli.java +46 -0
- package/deps/librdkafka/tests/java/README.md +14 -0
- package/deps/librdkafka/tests/java/TransactionProducerCli.java +162 -0
- package/deps/librdkafka/tests/java/run-class.sh +11 -0
- package/deps/librdkafka/tests/librdkafka.suppressions +483 -0
- package/deps/librdkafka/tests/lz4_manual_test.sh +59 -0
- package/deps/librdkafka/tests/multi-broker-version-test.sh +50 -0
- package/deps/librdkafka/tests/parse-refcnt.sh +43 -0
- package/deps/librdkafka/tests/performance_plot.py +115 -0
- package/deps/librdkafka/tests/plugin_test/Makefile +19 -0
- package/deps/librdkafka/tests/plugin_test/plugin_test.c +58 -0
- package/deps/librdkafka/tests/requirements.txt +2 -0
- package/deps/librdkafka/tests/run-all-tests.sh +79 -0
- package/deps/librdkafka/tests/run-consumer-tests.sh +16 -0
- package/deps/librdkafka/tests/run-producer-tests.sh +16 -0
- package/deps/librdkafka/tests/run-test-batches.py +157 -0
- package/deps/librdkafka/tests/run-test.sh +140 -0
- package/deps/librdkafka/tests/rusage.c +249 -0
- package/deps/librdkafka/tests/sasl_test.py +289 -0
- package/deps/librdkafka/tests/scenarios/README.md +6 -0
- package/deps/librdkafka/tests/scenarios/ak23.json +6 -0
- package/deps/librdkafka/tests/scenarios/default.json +5 -0
- package/deps/librdkafka/tests/scenarios/noautocreate.json +5 -0
- package/deps/librdkafka/tests/sockem.c +801 -0
- package/deps/librdkafka/tests/sockem.h +85 -0
- package/deps/librdkafka/tests/sockem_ctrl.c +145 -0
- package/deps/librdkafka/tests/sockem_ctrl.h +61 -0
- package/deps/librdkafka/tests/test.c +7778 -0
- package/deps/librdkafka/tests/test.conf.example +27 -0
- package/deps/librdkafka/tests/test.h +1028 -0
- package/deps/librdkafka/tests/testcpp.cpp +131 -0
- package/deps/librdkafka/tests/testcpp.h +388 -0
- package/deps/librdkafka/tests/testshared.h +416 -0
- package/deps/librdkafka/tests/tools/README.md +4 -0
- package/deps/librdkafka/tests/tools/stats/README.md +21 -0
- package/deps/librdkafka/tests/tools/stats/filter.jq +42 -0
- package/deps/librdkafka/tests/tools/stats/graph.py +150 -0
- package/deps/librdkafka/tests/tools/stats/requirements.txt +3 -0
- package/deps/librdkafka/tests/tools/stats/to_csv.py +124 -0
- package/deps/librdkafka/tests/trivup/trivup-0.14.0.tar.gz +0 -0
- package/deps/librdkafka/tests/until-fail.sh +87 -0
- package/deps/librdkafka/tests/xxxx-assign_partition.c +122 -0
- package/deps/librdkafka/tests/xxxx-metadata.cpp +159 -0
- package/deps/librdkafka/vcpkg.json +23 -0
- package/deps/librdkafka/win32/README.md +5 -0
- package/deps/librdkafka/win32/build-package.bat +3 -0
- package/deps/librdkafka/win32/build.bat +19 -0
- package/deps/librdkafka/win32/common.vcxproj +84 -0
- package/deps/librdkafka/win32/interceptor_test/interceptor_test.vcxproj +87 -0
- package/deps/librdkafka/win32/librdkafka.autopkg.template +54 -0
- package/deps/librdkafka/win32/librdkafka.master.testing.targets +13 -0
- package/deps/librdkafka/win32/librdkafka.sln +226 -0
- package/deps/librdkafka/win32/librdkafka.vcxproj +276 -0
- package/deps/librdkafka/win32/librdkafkacpp/librdkafkacpp.vcxproj +104 -0
- package/deps/librdkafka/win32/msbuild.ps1 +15 -0
- package/deps/librdkafka/win32/openssl_engine_example/openssl_engine_example.vcxproj +132 -0
- package/deps/librdkafka/win32/package-zip.ps1 +46 -0
- package/deps/librdkafka/win32/packages/repositories.config +4 -0
- package/deps/librdkafka/win32/push-package.bat +4 -0
- package/deps/librdkafka/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj +67 -0
- package/deps/librdkafka/win32/rdkafka_example/rdkafka_example.vcxproj +97 -0
- package/deps/librdkafka/win32/rdkafka_performance/rdkafka_performance.vcxproj +97 -0
- package/deps/librdkafka/win32/setup-msys2.ps1 +47 -0
- package/deps/librdkafka/win32/setup-vcpkg.ps1 +34 -0
- package/deps/librdkafka/win32/tests/test.conf.example +25 -0
- package/deps/librdkafka/win32/tests/tests.vcxproj +253 -0
- package/deps/librdkafka/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj +132 -0
- package/deps/librdkafka/win32/wingetopt.c +564 -0
- package/deps/librdkafka/win32/wingetopt.h +101 -0
- package/deps/librdkafka/win32/wintime.h +33 -0
- package/deps/librdkafka.gyp +62 -0
- package/lib/admin.js +233 -0
- package/lib/client.js +573 -0
- package/lib/error.js +500 -0
- package/lib/index.js +34 -0
- package/lib/kafka-consumer-stream.js +397 -0
- package/lib/kafka-consumer.js +698 -0
- package/lib/producer/high-level-producer.js +323 -0
- package/lib/producer-stream.js +307 -0
- package/lib/producer.js +375 -0
- package/lib/tools/ref-counter.js +52 -0
- package/lib/topic-partition.js +88 -0
- package/lib/topic.js +42 -0
- package/lib/util.js +29 -0
- package/package.json +61 -0
- package/prebuilds/darwin-arm64/@point3+node-rdkafka.node +0 -0
- package/prebuilds/linux-x64/@point3+node-rdkafka.node +0 -0
- package/util/configure.js +30 -0
- package/util/get-env.js +6 -0
- package/util/test-compile.js +11 -0
- package/util/test-producer-delivery.js +100 -0
|
@@ -0,0 +1,4896 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* librdkafka - The Apache Kafka C/C++ library
|
|
3
|
+
*
|
|
4
|
+
* Copyright (c) 2015-2022, Magnus Edenhill,
|
|
5
|
+
* 2023, Confluent Inc.
|
|
6
|
+
* All rights reserved.
|
|
7
|
+
*
|
|
8
|
+
* Redistribution and use in source and binary forms, with or without
|
|
9
|
+
* modification, are permitted provided that the following conditions are met:
|
|
10
|
+
*
|
|
11
|
+
* 1. Redistributions of source code must retain the above copyright notice,
|
|
12
|
+
* this list of conditions and the following disclaimer.
|
|
13
|
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
14
|
+
* this list of conditions and the following disclaimer in the documentation
|
|
15
|
+
* and/or other materials provided with the distribution.
|
|
16
|
+
*
|
|
17
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
18
|
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
19
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
20
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
21
|
+
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
22
|
+
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
23
|
+
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
24
|
+
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
25
|
+
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
26
|
+
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
27
|
+
* POSSIBILITY OF SUCH DAMAGE.
|
|
28
|
+
*/
|
|
29
|
+
#include "rdkafka_int.h"
|
|
30
|
+
#include "rdkafka_topic.h"
|
|
31
|
+
#include "rdkafka_broker.h"
|
|
32
|
+
#include "rdkafka_request.h"
|
|
33
|
+
#include "rdkafka_offset.h"
|
|
34
|
+
#include "rdkafka_partition.h"
|
|
35
|
+
#include "rdkafka_fetcher.h"
|
|
36
|
+
#include "rdregex.h"
|
|
37
|
+
#include "rdports.h" /* rd_qsort_r() */
|
|
38
|
+
|
|
39
|
+
#include "rdunittest.h"
|
|
40
|
+
|
|
41
|
+
const char *rd_kafka_fetch_states[] = {"none", "stopping",
|
|
42
|
+
"stopped", "offset-query",
|
|
43
|
+
"offset-wait", "validate-epoch-wait",
|
|
44
|
+
"active"};
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk,
|
|
48
|
+
rd_kafka_q_t *rkq,
|
|
49
|
+
rd_kafka_op_t *rko,
|
|
50
|
+
rd_kafka_q_cb_type_t cb_type,
|
|
51
|
+
void *opaque);
|
|
52
|
+
|
|
53
|
+
static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp,
|
|
54
|
+
int backoff_ms,
|
|
55
|
+
const char *reason);
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
static RD_INLINE int32_t
|
|
59
|
+
rd_kafka_toppar_version_new_barrier0(rd_kafka_toppar_t *rktp,
|
|
60
|
+
const char *func,
|
|
61
|
+
int line) {
|
|
62
|
+
int32_t version = rd_atomic32_add(&rktp->rktp_version, 1);
|
|
63
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BARRIER",
|
|
64
|
+
"%s [%" PRId32 "]: %s:%d: new version barrier v%" PRId32,
|
|
65
|
+
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, func,
|
|
66
|
+
line, version);
|
|
67
|
+
return version;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
#define rd_kafka_toppar_version_new_barrier(rktp) \
|
|
71
|
+
rd_kafka_toppar_version_new_barrier0(rktp, __FUNCTION__, __LINE__)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Toppar based OffsetResponse handling.
|
|
76
|
+
* This is used for updating the low water mark for consumer lag.
|
|
77
|
+
*/
|
|
78
|
+
static void rd_kafka_toppar_lag_handle_Offset(rd_kafka_t *rk,
|
|
79
|
+
rd_kafka_broker_t *rkb,
|
|
80
|
+
rd_kafka_resp_err_t err,
|
|
81
|
+
rd_kafka_buf_t *rkbuf,
|
|
82
|
+
rd_kafka_buf_t *request,
|
|
83
|
+
void *opaque) {
|
|
84
|
+
rd_kafka_toppar_t *rktp = opaque;
|
|
85
|
+
rd_kafka_topic_partition_list_t *offsets;
|
|
86
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
87
|
+
|
|
88
|
+
offsets = rd_kafka_topic_partition_list_new(1);
|
|
89
|
+
|
|
90
|
+
/* Parse and return Offset */
|
|
91
|
+
err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets,
|
|
92
|
+
NULL);
|
|
93
|
+
|
|
94
|
+
if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
|
|
95
|
+
rd_kafka_topic_partition_list_destroy(offsets);
|
|
96
|
+
return; /* Retrying */
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
if (!err && !(rktpar = rd_kafka_topic_partition_list_find(
|
|
100
|
+
offsets, rktp->rktp_rkt->rkt_topic->str,
|
|
101
|
+
rktp->rktp_partition)))
|
|
102
|
+
err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
103
|
+
|
|
104
|
+
if (!err && !rktpar->err) {
|
|
105
|
+
rd_kafka_toppar_lock(rktp);
|
|
106
|
+
rktp->rktp_lo_offset = rktpar->offset;
|
|
107
|
+
rd_kafka_toppar_unlock(rktp);
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
rd_kafka_topic_partition_list_destroy(offsets);
|
|
111
|
+
|
|
112
|
+
rktp->rktp_wait_consumer_lag_resp = 0;
|
|
113
|
+
|
|
114
|
+
rd_kafka_toppar_destroy(rktp); /* from request.opaque */
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
/**
|
|
120
|
+
* Request information from broker to keep track of consumer lag.
|
|
121
|
+
*
|
|
122
|
+
* @locality toppar handle thread
|
|
123
|
+
* @locks none
|
|
124
|
+
*/
|
|
125
|
+
static void rd_kafka_toppar_consumer_lag_req(rd_kafka_toppar_t *rktp) {
|
|
126
|
+
rd_kafka_topic_partition_list_t *partitions;
|
|
127
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
128
|
+
|
|
129
|
+
if (rktp->rktp_wait_consumer_lag_resp)
|
|
130
|
+
return; /* Previous request not finished yet */
|
|
131
|
+
|
|
132
|
+
rd_kafka_toppar_lock(rktp);
|
|
133
|
+
|
|
134
|
+
/* Offset requests can only be sent to the leader replica.
|
|
135
|
+
*
|
|
136
|
+
* Note: If rktp is delegated to a preferred replica, it is
|
|
137
|
+
* certain that FETCH >= v5 and so rktp_lo_offset will be
|
|
138
|
+
* updated via LogStartOffset in the FETCH response.
|
|
139
|
+
*/
|
|
140
|
+
if (!rktp->rktp_leader || (rktp->rktp_leader != rktp->rktp_broker)) {
|
|
141
|
+
rd_kafka_toppar_unlock(rktp);
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/* Also don't send a timed log start offset request if leader
|
|
146
|
+
* broker supports FETCH >= v5, since this will be set when
|
|
147
|
+
* doing fetch requests.
|
|
148
|
+
*/
|
|
149
|
+
if (rd_kafka_broker_ApiVersion_at_least(rktp->rktp_broker,
|
|
150
|
+
RD_KAFKAP_Fetch, 5)) {
|
|
151
|
+
rd_kafka_toppar_unlock(rktp);
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
rktp->rktp_wait_consumer_lag_resp = 1;
|
|
156
|
+
|
|
157
|
+
partitions = rd_kafka_topic_partition_list_new(1);
|
|
158
|
+
rktpar = rd_kafka_topic_partition_list_add(
|
|
159
|
+
partitions, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
|
|
160
|
+
rktpar->offset = RD_KAFKA_OFFSET_BEGINNING;
|
|
161
|
+
rd_kafka_topic_partition_set_current_leader_epoch(
|
|
162
|
+
rktpar, rktp->rktp_leader_epoch);
|
|
163
|
+
|
|
164
|
+
/* Ask for oldest offset. The newest offset is automatically
|
|
165
|
+
* propagated in FetchResponse.HighwaterMark. */
|
|
166
|
+
rd_kafka_ListOffsetsRequest(rktp->rktp_broker, partitions,
|
|
167
|
+
RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
|
|
168
|
+
rd_kafka_toppar_lag_handle_Offset,
|
|
169
|
+
-1, /* don't set an absolute timeout */
|
|
170
|
+
rd_kafka_toppar_keep(rktp));
|
|
171
|
+
|
|
172
|
+
rd_kafka_toppar_unlock(rktp);
|
|
173
|
+
|
|
174
|
+
rd_kafka_topic_partition_list_destroy(partitions);
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
/**
|
|
180
|
+
* Request earliest offset for a partition
|
|
181
|
+
*
|
|
182
|
+
* Locality: toppar handler thread
|
|
183
|
+
*/
|
|
184
|
+
static void rd_kafka_toppar_consumer_lag_tmr_cb(rd_kafka_timers_t *rkts,
|
|
185
|
+
void *arg) {
|
|
186
|
+
rd_kafka_toppar_t *rktp = arg;
|
|
187
|
+
rd_kafka_toppar_consumer_lag_req(rktp);
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* @brief Update rktp_op_version.
|
|
192
|
+
* Enqueue an RD_KAFKA_OP_BARRIER type of operation
|
|
193
|
+
* when the op_version is updated.
|
|
194
|
+
*
|
|
195
|
+
* @locks_required rd_kafka_toppar_lock() must be held.
|
|
196
|
+
* @locality Toppar handler thread
|
|
197
|
+
*/
|
|
198
|
+
void rd_kafka_toppar_op_version_bump(rd_kafka_toppar_t *rktp, int32_t version) {
|
|
199
|
+
rd_kafka_op_t *rko;
|
|
200
|
+
|
|
201
|
+
rktp->rktp_op_version = version;
|
|
202
|
+
rko = rd_kafka_op_new(RD_KAFKA_OP_BARRIER);
|
|
203
|
+
rko->rko_version = version;
|
|
204
|
+
rko->rko_prio = RD_KAFKA_PRIO_FLASH;
|
|
205
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
206
|
+
rd_kafka_q_enq(rktp->rktp_fetchq, rko);
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
/**
|
|
211
|
+
* Add new partition to topic.
|
|
212
|
+
*
|
|
213
|
+
* Locks: rd_kafka_topic_wrlock() must be held.
|
|
214
|
+
* Locks: rd_kafka_wrlock() must be held.
|
|
215
|
+
*/
|
|
216
|
+
rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt,
|
|
217
|
+
int32_t partition,
|
|
218
|
+
const char *func,
|
|
219
|
+
int line) {
|
|
220
|
+
rd_kafka_toppar_t *rktp;
|
|
221
|
+
|
|
222
|
+
rktp = rd_calloc(1, sizeof(*rktp));
|
|
223
|
+
|
|
224
|
+
rktp->rktp_partition = partition;
|
|
225
|
+
rktp->rktp_rkt = rkt;
|
|
226
|
+
rktp->rktp_leader_id = -1;
|
|
227
|
+
rktp->rktp_broker_id = -1;
|
|
228
|
+
rktp->rktp_leader_epoch = -1;
|
|
229
|
+
rd_interval_init(&rktp->rktp_lease_intvl);
|
|
230
|
+
rd_interval_init(&rktp->rktp_new_lease_intvl);
|
|
231
|
+
rd_interval_init(&rktp->rktp_new_lease_log_intvl);
|
|
232
|
+
rd_interval_init(&rktp->rktp_metadata_intvl);
|
|
233
|
+
/* Mark partition as unknown (does not exist) until we see the
|
|
234
|
+
* partition in topic metadata. */
|
|
235
|
+
if (partition != RD_KAFKA_PARTITION_UA)
|
|
236
|
+
rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN;
|
|
237
|
+
rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE;
|
|
238
|
+
rktp->rktp_fetch_msg_max_bytes =
|
|
239
|
+
rkt->rkt_rk->rk_conf.fetch_msg_max_bytes;
|
|
240
|
+
rktp->rktp_offset_fp = NULL;
|
|
241
|
+
rd_kafka_offset_stats_reset(&rktp->rktp_offsets);
|
|
242
|
+
rd_kafka_offset_stats_reset(&rktp->rktp_offsets_fin);
|
|
243
|
+
rktp->rktp_ls_offset = RD_KAFKA_OFFSET_INVALID;
|
|
244
|
+
rktp->rktp_hi_offset = RD_KAFKA_OFFSET_INVALID;
|
|
245
|
+
rktp->rktp_lo_offset = RD_KAFKA_OFFSET_INVALID;
|
|
246
|
+
rd_kafka_fetch_pos_init(&rktp->rktp_query_pos);
|
|
247
|
+
rd_kafka_fetch_pos_init(&rktp->rktp_next_fetch_start);
|
|
248
|
+
rd_kafka_fetch_pos_init(&rktp->rktp_last_next_fetch_start);
|
|
249
|
+
rd_kafka_fetch_pos_init(&rktp->rktp_offset_validation_pos);
|
|
250
|
+
rd_kafka_fetch_pos_init(&rktp->rktp_app_pos);
|
|
251
|
+
rd_kafka_fetch_pos_init(&rktp->rktp_stored_pos);
|
|
252
|
+
rd_kafka_fetch_pos_init(&rktp->rktp_committing_pos);
|
|
253
|
+
rd_kafka_fetch_pos_init(&rktp->rktp_committed_pos);
|
|
254
|
+
rd_kafka_msgq_init(&rktp->rktp_msgq);
|
|
255
|
+
rd_kafka_msgq_init(&rktp->rktp_xmit_msgq);
|
|
256
|
+
mtx_init(&rktp->rktp_lock, mtx_plain);
|
|
257
|
+
|
|
258
|
+
rd_refcnt_init(&rktp->rktp_refcnt, 0);
|
|
259
|
+
rktp->rktp_fetchq = rd_kafka_consume_q_new(rkt->rkt_rk);
|
|
260
|
+
rktp->rktp_ops = rd_kafka_q_new(rkt->rkt_rk);
|
|
261
|
+
rktp->rktp_ops->rkq_serve = rd_kafka_toppar_op_serve;
|
|
262
|
+
rktp->rktp_ops->rkq_opaque = rktp;
|
|
263
|
+
rd_atomic32_init(&rktp->rktp_version, 1);
|
|
264
|
+
rktp->rktp_op_version = rd_atomic32_get(&rktp->rktp_version);
|
|
265
|
+
|
|
266
|
+
rd_atomic32_init(&rktp->rktp_msgs_inflight, 0);
|
|
267
|
+
rd_kafka_pid_reset(&rktp->rktp_eos.pid);
|
|
268
|
+
|
|
269
|
+
/* Consumer: If statistics is available we query the log start offset
|
|
270
|
+
* of each partition.
|
|
271
|
+
* Since the oldest offset only moves on log retention, we cap this
|
|
272
|
+
* value on the low end to a reasonable value to avoid flooding
|
|
273
|
+
* the brokers with OffsetRequests when our statistics interval is low.
|
|
274
|
+
* FIXME: Use a global timer to collect offsets for all partitions
|
|
275
|
+
* FIXME: This timer is superfulous for FETCH >= v5 because the log
|
|
276
|
+
* start offset is included in fetch responses.
|
|
277
|
+
* */
|
|
278
|
+
if (rktp->rktp_rkt->rkt_rk->rk_conf.stats_interval_ms > 0 &&
|
|
279
|
+
rkt->rkt_rk->rk_type == RD_KAFKA_CONSUMER &&
|
|
280
|
+
rktp->rktp_partition != RD_KAFKA_PARTITION_UA) {
|
|
281
|
+
int intvl = rkt->rkt_rk->rk_conf.stats_interval_ms;
|
|
282
|
+
if (intvl < 10 * 1000 /* 10s */)
|
|
283
|
+
intvl = 10 * 1000;
|
|
284
|
+
rd_kafka_timer_start(
|
|
285
|
+
&rkt->rkt_rk->rk_timers, &rktp->rktp_consumer_lag_tmr,
|
|
286
|
+
intvl * 1000ll, rd_kafka_toppar_consumer_lag_tmr_cb, rktp);
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
rktp->rktp_rkt = rd_kafka_topic_keep(rkt);
|
|
290
|
+
|
|
291
|
+
rd_kafka_q_fwd_set(rktp->rktp_ops, rkt->rkt_rk->rk_ops);
|
|
292
|
+
rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARNEW",
|
|
293
|
+
"NEW %s [%" PRId32 "] %p refcnt %p (at %s:%d)",
|
|
294
|
+
rkt->rkt_topic->str, rktp->rktp_partition, rktp,
|
|
295
|
+
&rktp->rktp_refcnt, func, line);
|
|
296
|
+
|
|
297
|
+
return rd_kafka_toppar_keep(rktp);
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
/**
|
|
303
|
+
* Removes a toppar from its duties, global lists, etc.
|
|
304
|
+
*
|
|
305
|
+
* Locks: rd_kafka_toppar_lock() MUST be held
|
|
306
|
+
*/
|
|
307
|
+
static void rd_kafka_toppar_remove(rd_kafka_toppar_t *rktp) {
|
|
308
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARREMOVE",
|
|
309
|
+
"Removing toppar %s [%" PRId32 "] %p",
|
|
310
|
+
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
|
|
311
|
+
rktp);
|
|
312
|
+
|
|
313
|
+
rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
314
|
+
&rktp->rktp_validate_tmr, 1 /*lock*/);
|
|
315
|
+
rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
316
|
+
&rktp->rktp_offset_query_tmr, 1 /*lock*/);
|
|
317
|
+
rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
318
|
+
&rktp->rktp_consumer_lag_tmr, 1 /*lock*/);
|
|
319
|
+
|
|
320
|
+
rd_kafka_q_fwd_set(rktp->rktp_ops, NULL);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
/**
|
|
325
|
+
* Final destructor for partition.
|
|
326
|
+
*/
|
|
327
|
+
void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp) {
|
|
328
|
+
|
|
329
|
+
rd_kafka_toppar_remove(rktp);
|
|
330
|
+
|
|
331
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESTROY",
|
|
332
|
+
"%s [%" PRId32 "]: %p DESTROY_FINAL",
|
|
333
|
+
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
|
|
334
|
+
rktp);
|
|
335
|
+
|
|
336
|
+
/* Clear queues */
|
|
337
|
+
rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
|
|
338
|
+
rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0);
|
|
339
|
+
rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq,
|
|
340
|
+
RD_KAFKA_RESP_ERR__DESTROY);
|
|
341
|
+
rd_kafka_q_destroy_owner(rktp->rktp_fetchq);
|
|
342
|
+
rd_kafka_q_destroy_owner(rktp->rktp_ops);
|
|
343
|
+
|
|
344
|
+
rd_kafka_replyq_destroy(&rktp->rktp_replyq);
|
|
345
|
+
|
|
346
|
+
rd_kafka_topic_destroy0(rktp->rktp_rkt);
|
|
347
|
+
|
|
348
|
+
mtx_destroy(&rktp->rktp_lock);
|
|
349
|
+
|
|
350
|
+
if (rktp->rktp_leader)
|
|
351
|
+
rd_kafka_broker_destroy(rktp->rktp_leader);
|
|
352
|
+
|
|
353
|
+
rd_refcnt_destroy(&rktp->rktp_refcnt);
|
|
354
|
+
|
|
355
|
+
rd_free(rktp->rktp_stored_metadata);
|
|
356
|
+
rd_free(rktp);
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
/**
|
|
361
|
+
* Set toppar fetching state.
|
|
362
|
+
*
|
|
363
|
+
* @locality any
|
|
364
|
+
* @locks_required rd_kafka_toppar_lock() MUST be held.
|
|
365
|
+
*/
|
|
366
|
+
void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state) {
|
|
367
|
+
if ((int)rktp->rktp_fetch_state == fetch_state)
|
|
368
|
+
return;
|
|
369
|
+
|
|
370
|
+
rd_kafka_dbg(
|
|
371
|
+
rktp->rktp_rkt->rkt_rk, TOPIC, "PARTSTATE",
|
|
372
|
+
"Partition %.*s [%" PRId32 "] changed fetch state %s -> %s",
|
|
373
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
|
|
374
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state],
|
|
375
|
+
rd_kafka_fetch_states[fetch_state]);
|
|
376
|
+
|
|
377
|
+
rktp->rktp_fetch_state = fetch_state;
|
|
378
|
+
|
|
379
|
+
if (fetch_state == RD_KAFKA_TOPPAR_FETCH_ACTIVE) {
|
|
380
|
+
rktp->rktp_ts_fetch_backoff = 0;
|
|
381
|
+
|
|
382
|
+
/* Wake-up broker thread which might be idling on IO */
|
|
383
|
+
if (rktp->rktp_broker)
|
|
384
|
+
rd_kafka_broker_wakeup(rktp->rktp_broker,
|
|
385
|
+
"fetch start");
|
|
386
|
+
|
|
387
|
+
rd_kafka_dbg(
|
|
388
|
+
rktp->rktp_rkt->rkt_rk, CONSUMER | RD_KAFKA_DBG_TOPIC,
|
|
389
|
+
"FETCH",
|
|
390
|
+
"Partition %.*s [%" PRId32 "] start fetching at %s",
|
|
391
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
392
|
+
rktp->rktp_partition,
|
|
393
|
+
rd_kafka_fetch_pos2str(
|
|
394
|
+
rd_kafka_toppar_fetch_decide_next_fetch_start_pos(
|
|
395
|
+
rktp)));
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
/**
|
|
401
|
+
* Returns the appropriate toppar for a given rkt and partition.
|
|
402
|
+
* The returned toppar has increased refcnt and must be unreffed by calling
|
|
403
|
+
* rd_kafka_toppar_destroy().
|
|
404
|
+
* May return NULL.
|
|
405
|
+
*
|
|
406
|
+
* If 'ua_on_miss' is true the UA (unassigned) toppar is returned if
|
|
407
|
+
* 'partition' was not known locally, else NULL is returned.
|
|
408
|
+
*
|
|
409
|
+
* Locks: Caller must hold rd_kafka_topic_*lock()
|
|
410
|
+
*/
|
|
411
|
+
rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func,
|
|
412
|
+
int line,
|
|
413
|
+
const rd_kafka_topic_t *rkt,
|
|
414
|
+
int32_t partition,
|
|
415
|
+
int ua_on_miss) {
|
|
416
|
+
rd_kafka_toppar_t *rktp;
|
|
417
|
+
|
|
418
|
+
if (partition >= 0 && partition < rkt->rkt_partition_cnt)
|
|
419
|
+
rktp = rkt->rkt_p[partition];
|
|
420
|
+
else if (partition == RD_KAFKA_PARTITION_UA || ua_on_miss)
|
|
421
|
+
rktp = rkt->rkt_ua;
|
|
422
|
+
else
|
|
423
|
+
return NULL;
|
|
424
|
+
|
|
425
|
+
if (rktp)
|
|
426
|
+
return rd_kafka_toppar_keep_fl(func, line, rktp);
|
|
427
|
+
|
|
428
|
+
return NULL;
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
/**
|
|
433
|
+
* Same as rd_kafka_toppar_get() but no need for locking and
|
|
434
|
+
* looks up the topic first.
|
|
435
|
+
*
|
|
436
|
+
* Locality: any
|
|
437
|
+
* Locks: none
|
|
438
|
+
*/
|
|
439
|
+
rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk,
|
|
440
|
+
const char *topic,
|
|
441
|
+
int32_t partition,
|
|
442
|
+
int ua_on_miss,
|
|
443
|
+
int create_on_miss) {
|
|
444
|
+
rd_kafka_topic_t *rkt;
|
|
445
|
+
rd_kafka_toppar_t *rktp;
|
|
446
|
+
|
|
447
|
+
rd_kafka_wrlock(rk);
|
|
448
|
+
|
|
449
|
+
/* Find or create topic */
|
|
450
|
+
if (unlikely(!(rkt = rd_kafka_topic_find(rk, topic, 0 /*no-lock*/)))) {
|
|
451
|
+
if (!create_on_miss) {
|
|
452
|
+
rd_kafka_wrunlock(rk);
|
|
453
|
+
return NULL;
|
|
454
|
+
}
|
|
455
|
+
rkt = rd_kafka_topic_new0(rk, topic, NULL, NULL, 0 /*no-lock*/);
|
|
456
|
+
if (!rkt) {
|
|
457
|
+
rd_kafka_wrunlock(rk);
|
|
458
|
+
rd_kafka_log(rk, LOG_ERR, "TOPIC",
|
|
459
|
+
"Failed to create local topic \"%s\": %s",
|
|
460
|
+
topic, rd_strerror(errno));
|
|
461
|
+
return NULL;
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
rd_kafka_wrunlock(rk);
|
|
466
|
+
|
|
467
|
+
rd_kafka_topic_wrlock(rkt);
|
|
468
|
+
rktp = rd_kafka_toppar_desired_add(rkt, partition);
|
|
469
|
+
rd_kafka_topic_wrunlock(rkt);
|
|
470
|
+
|
|
471
|
+
rd_kafka_topic_destroy0(rkt);
|
|
472
|
+
|
|
473
|
+
return rktp;
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
/**
|
|
478
|
+
* Returns a toppar if it is available in the cluster.
|
|
479
|
+
* '*errp' is set to the error-code if lookup fails.
|
|
480
|
+
*
|
|
481
|
+
* Locks: topic_*lock() MUST be held
|
|
482
|
+
*/
|
|
483
|
+
rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt,
|
|
484
|
+
int32_t partition,
|
|
485
|
+
int ua_on_miss,
|
|
486
|
+
rd_kafka_resp_err_t *errp) {
|
|
487
|
+
rd_kafka_toppar_t *rktp;
|
|
488
|
+
|
|
489
|
+
switch (rkt->rkt_state) {
|
|
490
|
+
case RD_KAFKA_TOPIC_S_UNKNOWN:
|
|
491
|
+
/* No metadata received from cluster yet.
|
|
492
|
+
* Put message in UA partition and re-run partitioner when
|
|
493
|
+
* cluster comes up. */
|
|
494
|
+
partition = RD_KAFKA_PARTITION_UA;
|
|
495
|
+
break;
|
|
496
|
+
|
|
497
|
+
case RD_KAFKA_TOPIC_S_NOTEXISTS:
|
|
498
|
+
/* Topic not found in cluster.
|
|
499
|
+
* Fail message immediately. */
|
|
500
|
+
*errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
|
|
501
|
+
return NULL;
|
|
502
|
+
|
|
503
|
+
case RD_KAFKA_TOPIC_S_ERROR:
|
|
504
|
+
/* Permanent topic error. */
|
|
505
|
+
*errp = rkt->rkt_err;
|
|
506
|
+
return NULL;
|
|
507
|
+
|
|
508
|
+
case RD_KAFKA_TOPIC_S_EXISTS:
|
|
509
|
+
/* Topic exists in cluster. */
|
|
510
|
+
|
|
511
|
+
/* Topic exists but has no partitions.
|
|
512
|
+
* This is usually an transient state following the
|
|
513
|
+
* auto-creation of a topic. */
|
|
514
|
+
if (unlikely(rkt->rkt_partition_cnt == 0)) {
|
|
515
|
+
partition = RD_KAFKA_PARTITION_UA;
|
|
516
|
+
break;
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
/* Check that partition exists. */
|
|
520
|
+
if (partition >= rkt->rkt_partition_cnt) {
|
|
521
|
+
*errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
522
|
+
return NULL;
|
|
523
|
+
}
|
|
524
|
+
break;
|
|
525
|
+
|
|
526
|
+
default:
|
|
527
|
+
rd_kafka_assert(rkt->rkt_rk, !*"NOTREACHED");
|
|
528
|
+
break;
|
|
529
|
+
}
|
|
530
|
+
|
|
531
|
+
/* Get new partition */
|
|
532
|
+
rktp = rd_kafka_toppar_get(rkt, partition, 0);
|
|
533
|
+
|
|
534
|
+
if (unlikely(!rktp)) {
|
|
535
|
+
/* Unknown topic or partition */
|
|
536
|
+
if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
|
|
537
|
+
*errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
|
|
538
|
+
else
|
|
539
|
+
*errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
540
|
+
|
|
541
|
+
return NULL;
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
return rktp;
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
/**
|
|
549
|
+
* Looks for partition 'i' in topic 'rkt's desired list.
|
|
550
|
+
*
|
|
551
|
+
* The desired partition list is the list of partitions that are desired
|
|
552
|
+
* (e.g., by the consumer) but not yet seen on a broker.
|
|
553
|
+
* As soon as the partition is seen on a broker the toppar is moved from
|
|
554
|
+
* the desired list and onto the normal rkt_p array.
|
|
555
|
+
* When the partition on the broker goes away a desired partition is put
|
|
556
|
+
* back on the desired list.
|
|
557
|
+
*
|
|
558
|
+
* Locks: rd_kafka_topic_*lock() must be held.
|
|
559
|
+
* Note: 'rktp' refcount is increased.
|
|
560
|
+
*/
|
|
561
|
+
|
|
562
|
+
rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt,
|
|
563
|
+
int32_t partition) {
|
|
564
|
+
rd_kafka_toppar_t *rktp;
|
|
565
|
+
int i;
|
|
566
|
+
|
|
567
|
+
RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) {
|
|
568
|
+
if (rktp->rktp_partition == partition)
|
|
569
|
+
return rd_kafka_toppar_keep(rktp);
|
|
570
|
+
}
|
|
571
|
+
|
|
572
|
+
return NULL;
|
|
573
|
+
}
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
/**
|
|
577
|
+
* Link toppar on desired list.
|
|
578
|
+
*
|
|
579
|
+
* Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held.
|
|
580
|
+
*/
|
|
581
|
+
void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp) {
|
|
582
|
+
|
|
583
|
+
if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP)
|
|
584
|
+
return; /* Already linked */
|
|
585
|
+
|
|
586
|
+
rd_kafka_toppar_keep(rktp);
|
|
587
|
+
rd_list_add(&rktp->rktp_rkt->rkt_desp, rktp);
|
|
588
|
+
rd_interval_reset(&rktp->rktp_rkt->rkt_desp_refresh_intvl);
|
|
589
|
+
rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_DESP;
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
/**
|
|
593
|
+
* Unlink toppar from desired list.
|
|
594
|
+
*
|
|
595
|
+
* Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held.
|
|
596
|
+
*/
|
|
597
|
+
void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp) {
|
|
598
|
+
if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP))
|
|
599
|
+
return; /* Not linked */
|
|
600
|
+
|
|
601
|
+
rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_DESP;
|
|
602
|
+
rd_list_remove(&rktp->rktp_rkt->rkt_desp, rktp);
|
|
603
|
+
rd_interval_reset(&rktp->rktp_rkt->rkt_desp_refresh_intvl);
|
|
604
|
+
rd_kafka_toppar_destroy(rktp);
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
/**
|
|
609
|
+
* @brief If rktp is not already desired:
|
|
610
|
+
* - mark as DESIRED|~REMOVE
|
|
611
|
+
* - add to desired list if unknown
|
|
612
|
+
*
|
|
613
|
+
* @remark toppar_lock() MUST be held
|
|
614
|
+
*/
|
|
615
|
+
void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp) {
|
|
616
|
+
if ((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))
|
|
617
|
+
return;
|
|
618
|
+
|
|
619
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED",
|
|
620
|
+
"%s [%" PRId32 "]: marking as DESIRED",
|
|
621
|
+
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
|
|
622
|
+
|
|
623
|
+
/* If toppar was marked for removal this is no longer
|
|
624
|
+
* the case since the partition is now desired. */
|
|
625
|
+
rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_REMOVE;
|
|
626
|
+
|
|
627
|
+
rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_DESIRED;
|
|
628
|
+
|
|
629
|
+
if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) {
|
|
630
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED",
|
|
631
|
+
"%s [%" PRId32 "]: adding to DESIRED list",
|
|
632
|
+
rktp->rktp_rkt->rkt_topic->str,
|
|
633
|
+
rktp->rktp_partition);
|
|
634
|
+
rd_kafka_toppar_desired_link(rktp);
|
|
635
|
+
}
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
|
|
639
|
+
/**
|
|
640
|
+
* Adds 'partition' as a desired partition to topic 'rkt', or updates
|
|
641
|
+
* an existing partition to be desired.
|
|
642
|
+
*
|
|
643
|
+
* Locks: rd_kafka_topic_wrlock() must be held.
|
|
644
|
+
*/
|
|
645
|
+
rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt,
|
|
646
|
+
int32_t partition) {
|
|
647
|
+
rd_kafka_toppar_t *rktp;
|
|
648
|
+
|
|
649
|
+
rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no_ua_on_miss*/);
|
|
650
|
+
|
|
651
|
+
if (!rktp)
|
|
652
|
+
rktp = rd_kafka_toppar_desired_get(rkt, partition);
|
|
653
|
+
|
|
654
|
+
if (!rktp)
|
|
655
|
+
rktp = rd_kafka_toppar_new(rkt, partition);
|
|
656
|
+
|
|
657
|
+
rd_kafka_toppar_lock(rktp);
|
|
658
|
+
rd_kafka_toppar_desired_add0(rktp);
|
|
659
|
+
rd_kafka_toppar_unlock(rktp);
|
|
660
|
+
|
|
661
|
+
return rktp; /* Callers refcount */
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
/**
|
|
667
|
+
* Unmarks an 'rktp' as desired.
|
|
668
|
+
*
|
|
669
|
+
* Locks: rd_kafka_topic_wrlock() and rd_kafka_toppar_lock() MUST be held.
|
|
670
|
+
*/
|
|
671
|
+
void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp) {
|
|
672
|
+
|
|
673
|
+
if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))
|
|
674
|
+
return;
|
|
675
|
+
|
|
676
|
+
rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_DESIRED;
|
|
677
|
+
rd_kafka_toppar_desired_unlink(rktp);
|
|
678
|
+
|
|
679
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESP",
|
|
680
|
+
"Removing (un)desired topic %s [%" PRId32 "]",
|
|
681
|
+
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
|
|
682
|
+
|
|
683
|
+
if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) {
|
|
684
|
+
/* If this partition does not exist in the cluster
|
|
685
|
+
* and is no longer desired, remove it. */
|
|
686
|
+
rd_kafka_toppar_broker_leave_for_remove(rktp);
|
|
687
|
+
}
|
|
688
|
+
}
|
|
689
|
+
|
|
690
|
+
|
|
691
|
+
|
|
692
|
+
/**
|
|
693
|
+
* Append message at tail of 'rktp' message queue.
|
|
694
|
+
*/
|
|
695
|
+
void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp,
|
|
696
|
+
rd_kafka_msg_t *rkm,
|
|
697
|
+
rd_ts_t now) {
|
|
698
|
+
rd_kafka_q_t *wakeup_q = NULL;
|
|
699
|
+
|
|
700
|
+
rd_kafka_toppar_lock(rktp);
|
|
701
|
+
|
|
702
|
+
if (!rkm->rkm_u.producer.msgid &&
|
|
703
|
+
rktp->rktp_partition != RD_KAFKA_PARTITION_UA)
|
|
704
|
+
rkm->rkm_u.producer.msgid = ++rktp->rktp_msgid;
|
|
705
|
+
|
|
706
|
+
if (rktp->rktp_partition == RD_KAFKA_PARTITION_UA ||
|
|
707
|
+
rktp->rktp_rkt->rkt_conf.queuing_strategy == RD_KAFKA_QUEUE_FIFO) {
|
|
708
|
+
/* No need for enq_sorted(), this is the oldest message. */
|
|
709
|
+
rd_kafka_msgq_enq(&rktp->rktp_msgq, rkm);
|
|
710
|
+
} else {
|
|
711
|
+
rd_kafka_msgq_enq_sorted(rktp->rktp_rkt, &rktp->rktp_msgq, rkm);
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
if (unlikely(rktp->rktp_partition != RD_KAFKA_PARTITION_UA &&
|
|
715
|
+
rd_kafka_msgq_may_wakeup(&rktp->rktp_msgq, now) &&
|
|
716
|
+
(wakeup_q = rktp->rktp_msgq_wakeup_q))) {
|
|
717
|
+
/* Wake-up broker thread */
|
|
718
|
+
rktp->rktp_msgq.rkmq_wakeup.signalled = rd_true;
|
|
719
|
+
rd_kafka_q_keep(wakeup_q);
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
rd_kafka_toppar_unlock(rktp);
|
|
723
|
+
|
|
724
|
+
if (unlikely(wakeup_q != NULL)) {
|
|
725
|
+
rd_kafka_q_yield(wakeup_q);
|
|
726
|
+
rd_kafka_q_destroy(wakeup_q);
|
|
727
|
+
}
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
|
|
731
|
+
/**
|
|
732
|
+
* @brief Insert \p srcq before \p insert_before in \p destq.
|
|
733
|
+
*
|
|
734
|
+
* If \p srcq and \p destq overlaps only part of the \p srcq will be inserted.
|
|
735
|
+
*
|
|
736
|
+
* Upon return \p srcq will contain any remaining messages that require
|
|
737
|
+
* another insert position in \p destq.
|
|
738
|
+
*/
|
|
739
|
+
static void rd_kafka_msgq_insert_msgq_before(rd_kafka_msgq_t *destq,
|
|
740
|
+
rd_kafka_msg_t *insert_before,
|
|
741
|
+
rd_kafka_msgq_t *srcq,
|
|
742
|
+
int (*cmp)(const void *a,
|
|
743
|
+
const void *b)) {
|
|
744
|
+
rd_kafka_msg_t *slast;
|
|
745
|
+
rd_kafka_msgq_t tmpq;
|
|
746
|
+
|
|
747
|
+
if (!insert_before) {
|
|
748
|
+
/* Append all of srcq to destq */
|
|
749
|
+
rd_kafka_msgq_concat(destq, srcq);
|
|
750
|
+
rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
|
|
751
|
+
return;
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
slast = rd_kafka_msgq_last(srcq);
|
|
755
|
+
rd_dassert(slast);
|
|
756
|
+
|
|
757
|
+
if (cmp(slast, insert_before) > 0) {
|
|
758
|
+
rd_kafka_msg_t *new_sfirst;
|
|
759
|
+
int cnt;
|
|
760
|
+
int64_t bytes;
|
|
761
|
+
|
|
762
|
+
/* destq insert_before resides somewhere between
|
|
763
|
+
* srcq.first and srcq.last, find the first message in
|
|
764
|
+
* srcq that is > insert_before and split srcq into
|
|
765
|
+
* a left part that contains the messages to insert before
|
|
766
|
+
* insert_before, and a right part that will need another
|
|
767
|
+
* insert position. */
|
|
768
|
+
|
|
769
|
+
new_sfirst = rd_kafka_msgq_find_pos(srcq, NULL, insert_before,
|
|
770
|
+
cmp, &cnt, &bytes);
|
|
771
|
+
rd_assert(new_sfirst);
|
|
772
|
+
|
|
773
|
+
/* split srcq into two parts using the divider message */
|
|
774
|
+
rd_kafka_msgq_split(srcq, &tmpq, new_sfirst, cnt, bytes);
|
|
775
|
+
|
|
776
|
+
rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
|
|
777
|
+
rd_kafka_msgq_verify_order(NULL, &tmpq, 0, rd_false);
|
|
778
|
+
} else {
|
|
779
|
+
rd_kafka_msgq_init(&tmpq);
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
/* srcq now contains messages up to the first message in destq,
|
|
783
|
+
* insert srcq at insert_before in destq. */
|
|
784
|
+
rd_dassert(!TAILQ_EMPTY(&destq->rkmq_msgs));
|
|
785
|
+
rd_dassert(!TAILQ_EMPTY(&srcq->rkmq_msgs));
|
|
786
|
+
TAILQ_INSERT_LIST_BEFORE(&destq->rkmq_msgs, insert_before,
|
|
787
|
+
&srcq->rkmq_msgs, rd_kafka_msgs_head_s,
|
|
788
|
+
rd_kafka_msg_t *, rkm_link);
|
|
789
|
+
destq->rkmq_msg_cnt += srcq->rkmq_msg_cnt;
|
|
790
|
+
destq->rkmq_msg_bytes += srcq->rkmq_msg_bytes;
|
|
791
|
+
srcq->rkmq_msg_cnt = 0;
|
|
792
|
+
srcq->rkmq_msg_bytes = 0;
|
|
793
|
+
|
|
794
|
+
rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
|
|
795
|
+
rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
|
|
796
|
+
|
|
797
|
+
/* tmpq contains the remaining messages in srcq, move it over. */
|
|
798
|
+
rd_kafka_msgq_move(srcq, &tmpq);
|
|
799
|
+
|
|
800
|
+
rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
|
|
804
|
+
/**
|
|
805
|
+
* @brief Insert all messages from \p srcq into \p destq in their sorted
|
|
806
|
+
* position (using \p cmp)
|
|
807
|
+
*/
|
|
808
|
+
void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq,
|
|
809
|
+
rd_kafka_msgq_t *srcq,
|
|
810
|
+
int (*cmp)(const void *a, const void *b)) {
|
|
811
|
+
rd_kafka_msg_t *sfirst, *dlast, *start_pos = NULL;
|
|
812
|
+
|
|
813
|
+
if (unlikely(RD_KAFKA_MSGQ_EMPTY(srcq))) {
|
|
814
|
+
/* srcq is empty */
|
|
815
|
+
return;
|
|
816
|
+
}
|
|
817
|
+
|
|
818
|
+
if (unlikely(RD_KAFKA_MSGQ_EMPTY(destq))) {
|
|
819
|
+
/* destq is empty, simply move the srcq. */
|
|
820
|
+
rd_kafka_msgq_move(destq, srcq);
|
|
821
|
+
rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
|
|
822
|
+
return;
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
/* Optimize insertion by bulk-moving messages in place.
|
|
826
|
+
* We know that:
|
|
827
|
+
* - destq is sorted but might not be continous (1,2,3,7)
|
|
828
|
+
* - srcq is sorted but might not be continous (4,5,6,8)
|
|
829
|
+
* - there migt be (multiple) overlaps between the two, e.g:
|
|
830
|
+
* destq = (1,2,3,7), srcq = (4,5,6,8)
|
|
831
|
+
* - there may be millions of messages.
|
|
832
|
+
*/
|
|
833
|
+
|
|
834
|
+
rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
|
|
835
|
+
rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
|
|
836
|
+
|
|
837
|
+
dlast = rd_kafka_msgq_last(destq);
|
|
838
|
+
sfirst = rd_kafka_msgq_first(srcq);
|
|
839
|
+
|
|
840
|
+
/* Most common case, all of srcq goes after destq */
|
|
841
|
+
if (likely(cmp(dlast, sfirst) < 0)) {
|
|
842
|
+
rd_kafka_msgq_concat(destq, srcq);
|
|
843
|
+
|
|
844
|
+
rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
|
|
845
|
+
|
|
846
|
+
rd_assert(RD_KAFKA_MSGQ_EMPTY(srcq));
|
|
847
|
+
return;
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
/* Insert messages from srcq into destq in non-overlapping
|
|
851
|
+
* chunks until srcq is exhausted. */
|
|
852
|
+
while (likely(sfirst != NULL)) {
|
|
853
|
+
rd_kafka_msg_t *insert_before;
|
|
854
|
+
|
|
855
|
+
/* Get insert position in destq of first element in srcq */
|
|
856
|
+
insert_before = rd_kafka_msgq_find_pos(destq, start_pos, sfirst,
|
|
857
|
+
cmp, NULL, NULL);
|
|
858
|
+
|
|
859
|
+
/* Insert as much of srcq as possible at insert_before */
|
|
860
|
+
rd_kafka_msgq_insert_msgq_before(destq, insert_before, srcq,
|
|
861
|
+
cmp);
|
|
862
|
+
|
|
863
|
+
/* Remember the current destq position so the next find_pos()
|
|
864
|
+
* does not have to re-scan destq and what was
|
|
865
|
+
* added from srcq. */
|
|
866
|
+
start_pos = insert_before;
|
|
867
|
+
|
|
868
|
+
/* For next iteration */
|
|
869
|
+
sfirst = rd_kafka_msgq_first(srcq);
|
|
870
|
+
|
|
871
|
+
rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
|
|
872
|
+
rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
|
|
873
|
+
}
|
|
874
|
+
|
|
875
|
+
rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
|
|
876
|
+
|
|
877
|
+
rd_assert(RD_KAFKA_MSGQ_EMPTY(srcq));
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
|
|
881
|
+
/**
|
|
882
|
+
* @brief Inserts messages from \p srcq according to their sorted position
|
|
883
|
+
* into \p destq, filtering out messages that can not be retried.
|
|
884
|
+
*
|
|
885
|
+
* @param incr_retry Increment retry count for messages.
|
|
886
|
+
* @param max_retries Maximum retries allowed per message.
|
|
887
|
+
* @param backoff Absolute retry backoff for retried messages.
|
|
888
|
+
* @param exponential_backoff If true the backoff should be exponential with
|
|
889
|
+
* 2**(retry_count - 1)*retry_ms with jitter. The
|
|
890
|
+
* \p backoff is ignored.
|
|
891
|
+
* @param retry_ms The retry ms used for exponential backoff calculation
|
|
892
|
+
* @param retry_max_ms The max backoff limit for exponential backoff calculation
|
|
893
|
+
*
|
|
894
|
+
* @returns 0 if all messages were retried, or 1 if some messages
|
|
895
|
+
* could not be retried.
|
|
896
|
+
*/
|
|
897
|
+
int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq,
|
|
898
|
+
rd_kafka_msgq_t *srcq,
|
|
899
|
+
int incr_retry,
|
|
900
|
+
int max_retries,
|
|
901
|
+
rd_ts_t backoff,
|
|
902
|
+
rd_kafka_msg_status_t status,
|
|
903
|
+
int (*cmp)(const void *a, const void *b),
|
|
904
|
+
rd_bool_t exponential_backoff,
|
|
905
|
+
int retry_ms,
|
|
906
|
+
int retry_max_ms) {
|
|
907
|
+
rd_kafka_msgq_t retryable = RD_KAFKA_MSGQ_INITIALIZER(retryable);
|
|
908
|
+
rd_kafka_msg_t *rkm, *tmp;
|
|
909
|
+
rd_ts_t now;
|
|
910
|
+
int64_t jitter = rd_jitter(100 - RD_KAFKA_RETRY_JITTER_PERCENT,
|
|
911
|
+
100 + RD_KAFKA_RETRY_JITTER_PERCENT);
|
|
912
|
+
/* Scan through messages to see which ones are eligible for retry,
|
|
913
|
+
* move the retryable ones to temporary queue and
|
|
914
|
+
* set backoff time for first message and optionally
|
|
915
|
+
* increase retry count for each message.
|
|
916
|
+
* Sorted insert is not necessary since the original order
|
|
917
|
+
* srcq order is maintained.
|
|
918
|
+
*
|
|
919
|
+
* Start timestamp for calculating backoff is common,
|
|
920
|
+
* to avoid that messages from the same batch
|
|
921
|
+
* have different backoff, as they need to be retried
|
|
922
|
+
* by reconstructing the same batch, when idempotency is
|
|
923
|
+
* enabled. */
|
|
924
|
+
now = rd_clock();
|
|
925
|
+
TAILQ_FOREACH_SAFE(rkm, &srcq->rkmq_msgs, rkm_link, tmp) {
|
|
926
|
+
if (rkm->rkm_u.producer.retries + incr_retry > max_retries)
|
|
927
|
+
continue;
|
|
928
|
+
|
|
929
|
+
rd_kafka_msgq_deq(srcq, rkm, 1);
|
|
930
|
+
rd_kafka_msgq_enq(&retryable, rkm);
|
|
931
|
+
|
|
932
|
+
rkm->rkm_u.producer.retries += incr_retry;
|
|
933
|
+
if (exponential_backoff) {
|
|
934
|
+
/* In some cases, like failed Produce requests do not
|
|
935
|
+
* increment the retry count, see
|
|
936
|
+
* rd_kafka_handle_Produce_error. */
|
|
937
|
+
if (rkm->rkm_u.producer.retries > 0)
|
|
938
|
+
backoff =
|
|
939
|
+
(1 << (rkm->rkm_u.producer.retries - 1)) *
|
|
940
|
+
retry_ms;
|
|
941
|
+
else
|
|
942
|
+
backoff = retry_ms;
|
|
943
|
+
/* Multiplied by 10 as backoff should be in nano
|
|
944
|
+
* seconds. */
|
|
945
|
+
backoff = jitter * backoff * 10;
|
|
946
|
+
if (backoff > retry_max_ms * 1000)
|
|
947
|
+
backoff = retry_max_ms * 1000;
|
|
948
|
+
backoff = now + backoff;
|
|
949
|
+
}
|
|
950
|
+
rkm->rkm_u.producer.ts_backoff = backoff;
|
|
951
|
+
|
|
952
|
+
/* Don't downgrade a message from any form of PERSISTED
|
|
953
|
+
* to NOT_PERSISTED, since the original cause of indicating
|
|
954
|
+
* PERSISTED can't be changed.
|
|
955
|
+
* E.g., a previous ack or in-flight timeout. */
|
|
956
|
+
if (likely(!(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED &&
|
|
957
|
+
rkm->rkm_status !=
|
|
958
|
+
RD_KAFKA_MSG_STATUS_NOT_PERSISTED)))
|
|
959
|
+
rkm->rkm_status = status;
|
|
960
|
+
}
|
|
961
|
+
|
|
962
|
+
/* No messages are retryable */
|
|
963
|
+
if (RD_KAFKA_MSGQ_EMPTY(&retryable))
|
|
964
|
+
return 0;
|
|
965
|
+
|
|
966
|
+
/* Insert retryable list at sorted position */
|
|
967
|
+
rd_kafka_msgq_insert_msgq(destq, &retryable, cmp);
|
|
968
|
+
|
|
969
|
+
return 1;
|
|
970
|
+
}
|
|
971
|
+
|
|
972
|
+
/**
|
|
973
|
+
* @brief Inserts messages from \p rkmq according to their sorted position
|
|
974
|
+
* into the partition's message queue.
|
|
975
|
+
*
|
|
976
|
+
* @param incr_retry Increment retry count for messages.
|
|
977
|
+
* @param status Set status on each message.
|
|
978
|
+
*
|
|
979
|
+
* @returns 0 if all messages were retried, or 1 if some messages
|
|
980
|
+
* could not be retried.
|
|
981
|
+
*
|
|
982
|
+
* @locality Broker thread (but not necessarily the leader broker thread)
|
|
983
|
+
*/
|
|
984
|
+
|
|
985
|
+
int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp,
|
|
986
|
+
rd_kafka_msgq_t *rkmq,
|
|
987
|
+
int incr_retry,
|
|
988
|
+
rd_kafka_msg_status_t status) {
|
|
989
|
+
rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
|
|
990
|
+
int retry_ms = rk->rk_conf.retry_backoff_ms;
|
|
991
|
+
int retry_max_ms = rk->rk_conf.retry_backoff_max_ms;
|
|
992
|
+
int r;
|
|
993
|
+
|
|
994
|
+
if (rd_kafka_terminating(rk))
|
|
995
|
+
return 1;
|
|
996
|
+
|
|
997
|
+
rd_kafka_toppar_lock(rktp);
|
|
998
|
+
/* Exponential backoff applied. */
|
|
999
|
+
r = rd_kafka_retry_msgq(&rktp->rktp_msgq, rkmq, incr_retry,
|
|
1000
|
+
rk->rk_conf.max_retries,
|
|
1001
|
+
0 /* backoff will be calculated */, status,
|
|
1002
|
+
rktp->rktp_rkt->rkt_conf.msg_order_cmp, rd_true,
|
|
1003
|
+
retry_ms, retry_max_ms);
|
|
1004
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1005
|
+
|
|
1006
|
+
return r;
|
|
1007
|
+
}
|
|
1008
|
+
|
|
1009
|
+
/**
|
|
1010
|
+
* @brief Insert sorted message list \p rkmq at sorted position in \p rktp 's
|
|
1011
|
+
* message queue. The queues must not overlap.
|
|
1012
|
+
* @remark \p rkmq will be cleared.
|
|
1013
|
+
*/
|
|
1014
|
+
void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp,
|
|
1015
|
+
rd_kafka_msgq_t *rkmq) {
|
|
1016
|
+
rd_kafka_toppar_lock(rktp);
|
|
1017
|
+
rd_kafka_msgq_insert_msgq(&rktp->rktp_msgq, rkmq,
|
|
1018
|
+
rktp->rktp_rkt->rkt_conf.msg_order_cmp);
|
|
1019
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1020
|
+
}
|
|
1021
|
+
|
|
1022
|
+
/**
|
|
1023
|
+
* @brief Purge internal fetch queue if toppar is stopped
|
|
1024
|
+
* (RD_KAFKA_TOPPAR_FETCH_STOPPED) and removed from the cluster
|
|
1025
|
+
* (RD_KAFKA_TOPPAR_F_REMOVE). Will be called from different places as it's
|
|
1026
|
+
* removed starting from a metadata response and stopped from a rebalance or a
|
|
1027
|
+
* consumer close.
|
|
1028
|
+
*
|
|
1029
|
+
* @remark Avoids circular dependencies in from `rktp_fetchq` ops to the same
|
|
1030
|
+
* toppar that stop destroying a consumer.
|
|
1031
|
+
*
|
|
1032
|
+
* @locks rd_kafka_toppar_lock() MUST be held
|
|
1033
|
+
*/
|
|
1034
|
+
void rd_kafka_toppar_purge_internal_fetch_queue_maybe(rd_kafka_toppar_t *rktp) {
|
|
1035
|
+
rd_kafka_q_t *rkq;
|
|
1036
|
+
rkq = rktp->rktp_fetchq;
|
|
1037
|
+
mtx_lock(&rkq->rkq_lock);
|
|
1038
|
+
if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE &&
|
|
1039
|
+
!rktp->rktp_fetchq->rkq_fwdq) {
|
|
1040
|
+
rd_kafka_op_t *rko;
|
|
1041
|
+
int cnt = 0, barrier_cnt = 0, message_cnt = 0, other_cnt = 0;
|
|
1042
|
+
|
|
1043
|
+
/* Partition is being removed from the cluster and it's stopped,
|
|
1044
|
+
* so rktp->rktp_fetchq->rkq_fwdq is NULL.
|
|
1045
|
+
* Purge remaining operations in rktp->rktp_fetchq->rkq_q,
|
|
1046
|
+
* while holding lock, to avoid circular references */
|
|
1047
|
+
rko = TAILQ_FIRST(&rkq->rkq_q);
|
|
1048
|
+
while (rko) {
|
|
1049
|
+
if (rko->rko_type != RD_KAFKA_OP_BARRIER &&
|
|
1050
|
+
rko->rko_type != RD_KAFKA_OP_FETCH) {
|
|
1051
|
+
rd_kafka_log(
|
|
1052
|
+
rktp->rktp_rkt->rkt_rk, LOG_WARNING,
|
|
1053
|
+
"PARTDEL",
|
|
1054
|
+
"Purging toppar fetch queue buffer op"
|
|
1055
|
+
"with unexpected type: %s",
|
|
1056
|
+
rd_kafka_op2str(rko->rko_type));
|
|
1057
|
+
}
|
|
1058
|
+
|
|
1059
|
+
if (rko->rko_type == RD_KAFKA_OP_BARRIER)
|
|
1060
|
+
barrier_cnt++;
|
|
1061
|
+
else if (rko->rko_type == RD_KAFKA_OP_FETCH)
|
|
1062
|
+
message_cnt++;
|
|
1063
|
+
else
|
|
1064
|
+
other_cnt++;
|
|
1065
|
+
|
|
1066
|
+
rko = TAILQ_NEXT(rko, rko_link);
|
|
1067
|
+
cnt++;
|
|
1068
|
+
}
|
|
1069
|
+
|
|
1070
|
+
if (cnt) {
|
|
1071
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, CGRP, "PARTDEL",
|
|
1072
|
+
"Purge toppar fetch queue buffer "
|
|
1073
|
+
"containing %d op(s) "
|
|
1074
|
+
"(%d barrier(s), %d message(s), %d other)"
|
|
1075
|
+
" to avoid "
|
|
1076
|
+
"circular references",
|
|
1077
|
+
cnt, barrier_cnt, message_cnt, other_cnt);
|
|
1078
|
+
rd_kafka_q_purge0(rktp->rktp_fetchq, rd_false);
|
|
1079
|
+
} else {
|
|
1080
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, CGRP, "PARTDEL",
|
|
1081
|
+
"Not purging toppar fetch queue buffer."
|
|
1082
|
+
" No ops present in the buffer.");
|
|
1083
|
+
}
|
|
1084
|
+
}
|
|
1085
|
+
mtx_unlock(&rkq->rkq_lock);
|
|
1086
|
+
}
|
|
1087
|
+
|
|
1088
|
+
/**
|
|
1089
|
+
* Helper method for purging queues when removing a toppar.
|
|
1090
|
+
* Locks: rd_kafka_toppar_lock() MUST be held
|
|
1091
|
+
*/
|
|
1092
|
+
void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp) {
|
|
1093
|
+
rd_kafka_q_disable(rktp->rktp_fetchq);
|
|
1094
|
+
rd_kafka_q_purge(rktp->rktp_fetchq);
|
|
1095
|
+
rd_kafka_q_disable(rktp->rktp_ops);
|
|
1096
|
+
rd_kafka_q_purge(rktp->rktp_ops);
|
|
1097
|
+
}
|
|
1098
|
+
|
|
1099
|
+
|
|
1100
|
+
/**
|
|
1101
|
+
* @brief Migrate rktp from (optional) \p old_rkb to (optional) \p new_rkb,
|
|
1102
|
+
* but at least one is required to be non-NULL.
|
|
1103
|
+
*
|
|
1104
|
+
* This is an async operation.
|
|
1105
|
+
*
|
|
1106
|
+
* @locks rd_kafka_toppar_lock() MUST be held
|
|
1107
|
+
*/
|
|
1108
|
+
static void rd_kafka_toppar_broker_migrate(rd_kafka_toppar_t *rktp,
|
|
1109
|
+
rd_kafka_broker_t *old_rkb,
|
|
1110
|
+
rd_kafka_broker_t *new_rkb) {
|
|
1111
|
+
rd_kafka_op_t *rko;
|
|
1112
|
+
rd_kafka_broker_t *dest_rkb;
|
|
1113
|
+
int had_next_broker = rktp->rktp_next_broker ? 1 : 0;
|
|
1114
|
+
|
|
1115
|
+
rd_assert(old_rkb || new_rkb);
|
|
1116
|
+
|
|
1117
|
+
/* Update next broker */
|
|
1118
|
+
if (new_rkb)
|
|
1119
|
+
rd_kafka_broker_keep(new_rkb);
|
|
1120
|
+
if (rktp->rktp_next_broker)
|
|
1121
|
+
rd_kafka_broker_destroy(rktp->rktp_next_broker);
|
|
1122
|
+
rktp->rktp_next_broker = new_rkb;
|
|
1123
|
+
|
|
1124
|
+
/* If next_broker is set it means there is already an async
|
|
1125
|
+
* migration op going on and we should not send a new one
|
|
1126
|
+
* but simply change the next_broker (which we did above). */
|
|
1127
|
+
if (had_next_broker)
|
|
1128
|
+
return;
|
|
1129
|
+
|
|
1130
|
+
/* Revert from offset-wait state back to offset-query
|
|
1131
|
+
* prior to leaving the broker to avoid stalling
|
|
1132
|
+
* on the new broker waiting for a offset reply from
|
|
1133
|
+
* this old broker (that might not come and thus need
|
|
1134
|
+
* to time out..slowly) */
|
|
1135
|
+
if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT)
|
|
1136
|
+
rd_kafka_toppar_offset_retry(rktp, 500,
|
|
1137
|
+
"migrating to new broker");
|
|
1138
|
+
|
|
1139
|
+
if (old_rkb) {
|
|
1140
|
+
/* If there is an existing broker for this toppar we let it
|
|
1141
|
+
* first handle its own leave and then trigger the join for
|
|
1142
|
+
* the next broker, if any. */
|
|
1143
|
+
rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE);
|
|
1144
|
+
dest_rkb = old_rkb;
|
|
1145
|
+
} else {
|
|
1146
|
+
/* No existing broker, send join op directly to new broker. */
|
|
1147
|
+
rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_JOIN);
|
|
1148
|
+
dest_rkb = new_rkb;
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
1152
|
+
|
|
1153
|
+
rd_kafka_dbg(
|
|
1154
|
+
rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR",
|
|
1155
|
+
"Migrating topic %.*s [%" PRId32
|
|
1156
|
+
"] %p from %s to %s "
|
|
1157
|
+
"(sending %s to %s)",
|
|
1158
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
|
|
1159
|
+
rktp, old_rkb ? rd_kafka_broker_name(old_rkb) : "(none)",
|
|
1160
|
+
new_rkb ? rd_kafka_broker_name(new_rkb) : "(none)",
|
|
1161
|
+
rd_kafka_op2str(rko->rko_type), rd_kafka_broker_name(dest_rkb));
|
|
1162
|
+
|
|
1163
|
+
rd_kafka_q_enq(dest_rkb->rkb_ops, rko);
|
|
1164
|
+
}
|
|
1165
|
+
|
|
1166
|
+
|
|
1167
|
+
/**
|
|
1168
|
+
* Async toppar leave from broker.
|
|
1169
|
+
* Only use this when partitions are to be removed.
|
|
1170
|
+
*
|
|
1171
|
+
* Locks: rd_kafka_toppar_lock() MUST be held
|
|
1172
|
+
*/
|
|
1173
|
+
void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp) {
|
|
1174
|
+
rd_kafka_op_t *rko;
|
|
1175
|
+
rd_kafka_broker_t *dest_rkb;
|
|
1176
|
+
|
|
1177
|
+
rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_REMOVE;
|
|
1178
|
+
|
|
1179
|
+
if (rktp->rktp_next_broker)
|
|
1180
|
+
dest_rkb = rktp->rktp_next_broker;
|
|
1181
|
+
else if (rktp->rktp_broker)
|
|
1182
|
+
dest_rkb = rktp->rktp_broker;
|
|
1183
|
+
else {
|
|
1184
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARDEL",
|
|
1185
|
+
"%.*s [%" PRId32
|
|
1186
|
+
"] %p not handled by any broker: "
|
|
1187
|
+
"not sending LEAVE for remove",
|
|
1188
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1189
|
+
rktp->rktp_partition, rktp);
|
|
1190
|
+
return;
|
|
1191
|
+
}
|
|
1192
|
+
|
|
1193
|
+
|
|
1194
|
+
/* Revert from offset-wait state back to offset-query
|
|
1195
|
+
* prior to leaving the broker to avoid stalling
|
|
1196
|
+
* on the new broker waiting for a offset reply from
|
|
1197
|
+
* this old broker (that might not come and thus need
|
|
1198
|
+
* to time out..slowly) */
|
|
1199
|
+
if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT)
|
|
1200
|
+
rd_kafka_toppar_set_fetch_state(
|
|
1201
|
+
rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
|
|
1202
|
+
|
|
1203
|
+
rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE);
|
|
1204
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
1205
|
+
|
|
1206
|
+
rd_kafka_dbg(
|
|
1207
|
+
rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR",
|
|
1208
|
+
"%.*s [%" PRId32 "] %p sending final LEAVE for removal by %s",
|
|
1209
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
|
|
1210
|
+
rktp, rd_kafka_broker_name(dest_rkb));
|
|
1211
|
+
|
|
1212
|
+
rd_kafka_q_enq(dest_rkb->rkb_ops, rko);
|
|
1213
|
+
}
|
|
1214
|
+
|
|
1215
|
+
|
|
1216
|
+
/**
|
|
1217
|
+
* @brief Delegates toppar 'rktp' to broker 'rkb'. 'rkb' may be NULL to
|
|
1218
|
+
* undelegate broker.
|
|
1219
|
+
*
|
|
1220
|
+
* @locks Caller must have rd_kafka_toppar_lock(rktp) held.
|
|
1221
|
+
*/
|
|
1222
|
+
void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp,
|
|
1223
|
+
rd_kafka_broker_t *rkb) {
|
|
1224
|
+
rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
|
|
1225
|
+
int internal_fallback = 0;
|
|
1226
|
+
|
|
1227
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
|
|
1228
|
+
"%s [%" PRId32
|
|
1229
|
+
"]: delegate to broker %s "
|
|
1230
|
+
"(rktp %p, term %d, ref %d)",
|
|
1231
|
+
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
|
|
1232
|
+
rkb ? rkb->rkb_name : "(none)", rktp,
|
|
1233
|
+
rd_kafka_terminating(rk),
|
|
1234
|
+
rd_refcnt_get(&rktp->rktp_refcnt));
|
|
1235
|
+
|
|
1236
|
+
/* Undelegated toppars are delgated to the internal
|
|
1237
|
+
* broker for bookkeeping. */
|
|
1238
|
+
if (!rd_kafka_terminating(rk) &&
|
|
1239
|
+
(!rkb || rd_kafka_broker_termination_in_progress(rkb))) {
|
|
1240
|
+
rkb = rd_kafka_broker_internal(rk);
|
|
1241
|
+
internal_fallback = 1;
|
|
1242
|
+
}
|
|
1243
|
+
|
|
1244
|
+
if (rktp->rktp_broker == rkb && !rktp->rktp_next_broker) {
|
|
1245
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
|
|
1246
|
+
"%.*s [%" PRId32
|
|
1247
|
+
"]: not updating broker: "
|
|
1248
|
+
"already on correct broker %s",
|
|
1249
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1250
|
+
rktp->rktp_partition,
|
|
1251
|
+
rkb ? rd_kafka_broker_name(rkb) : "(none)");
|
|
1252
|
+
|
|
1253
|
+
if (internal_fallback)
|
|
1254
|
+
rd_kafka_broker_destroy(rkb);
|
|
1255
|
+
return;
|
|
1256
|
+
}
|
|
1257
|
+
|
|
1258
|
+
if (rktp->rktp_broker)
|
|
1259
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
|
|
1260
|
+
"%.*s [%" PRId32
|
|
1261
|
+
"]: no longer delegated to "
|
|
1262
|
+
"broker %s",
|
|
1263
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1264
|
+
rktp->rktp_partition,
|
|
1265
|
+
rd_kafka_broker_name(rktp->rktp_broker));
|
|
1266
|
+
|
|
1267
|
+
|
|
1268
|
+
if (rkb) {
|
|
1269
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
|
|
1270
|
+
"%.*s [%" PRId32
|
|
1271
|
+
"]: delegating to broker %s "
|
|
1272
|
+
"for partition with %i messages "
|
|
1273
|
+
"(%" PRIu64 " bytes) queued",
|
|
1274
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1275
|
+
rktp->rktp_partition, rd_kafka_broker_name(rkb),
|
|
1276
|
+
rktp->rktp_msgq.rkmq_msg_cnt,
|
|
1277
|
+
rktp->rktp_msgq.rkmq_msg_bytes);
|
|
1278
|
+
|
|
1279
|
+
|
|
1280
|
+
} else {
|
|
1281
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
|
|
1282
|
+
"%.*s [%" PRId32 "]: no broker delegated",
|
|
1283
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1284
|
+
rktp->rktp_partition);
|
|
1285
|
+
}
|
|
1286
|
+
|
|
1287
|
+
if (rktp->rktp_broker || rkb)
|
|
1288
|
+
rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_broker, rkb);
|
|
1289
|
+
|
|
1290
|
+
if (internal_fallback)
|
|
1291
|
+
rd_kafka_broker_destroy(rkb);
|
|
1292
|
+
}
|
|
1293
|
+
|
|
1294
|
+
|
|
1295
|
+
|
|
1296
|
+
void rd_kafka_toppar_offset_commit_result(
|
|
1297
|
+
rd_kafka_toppar_t *rktp,
|
|
1298
|
+
rd_kafka_resp_err_t err,
|
|
1299
|
+
rd_kafka_topic_partition_list_t *offsets) {
|
|
1300
|
+
if (err)
|
|
1301
|
+
rd_kafka_consumer_err(
|
|
1302
|
+
rktp->rktp_fetchq,
|
|
1303
|
+
/* FIXME: propagate broker_id */
|
|
1304
|
+
RD_KAFKA_NODEID_UA, err, 0 /* FIXME:VERSION*/, NULL, rktp,
|
|
1305
|
+
RD_KAFKA_OFFSET_INVALID, "Offset commit failed: %s",
|
|
1306
|
+
rd_kafka_err2str(err));
|
|
1307
|
+
|
|
1308
|
+
rd_kafka_toppar_lock(rktp);
|
|
1309
|
+
if (!err)
|
|
1310
|
+
rktp->rktp_committed_pos =
|
|
1311
|
+
rd_kafka_topic_partition_get_fetch_pos(&offsets->elems[0]);
|
|
1312
|
+
|
|
1313
|
+
/* When stopping toppars:
|
|
1314
|
+
* Final commit is now done (or failed), propagate. */
|
|
1315
|
+
if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING)
|
|
1316
|
+
rd_kafka_toppar_fetch_stopped(rktp, err);
|
|
1317
|
+
|
|
1318
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1319
|
+
}
|
|
1320
|
+
|
|
1321
|
+
|
|
1322
|
+
|
|
1323
|
+
/**
|
|
1324
|
+
* Handle the next offset to consume for a toppar.
|
|
1325
|
+
* This is used during initial setup when trying to figure out what
|
|
1326
|
+
* offset to start consuming from.
|
|
1327
|
+
*
|
|
1328
|
+
* Locality: toppar handler thread.
|
|
1329
|
+
* Locks: toppar_lock(rktp) must be held
|
|
1330
|
+
*/
|
|
1331
|
+
void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp,
|
|
1332
|
+
rd_kafka_fetch_pos_t next_pos) {
|
|
1333
|
+
|
|
1334
|
+
if (RD_KAFKA_OFFSET_IS_LOGICAL(next_pos.offset)) {
|
|
1335
|
+
/* Offset storage returned logical offset (e.g. "end"),
|
|
1336
|
+
* look it up. */
|
|
1337
|
+
|
|
1338
|
+
/* Save next offset, even if logical, so that e.g.,
|
|
1339
|
+
* assign(BEGINNING) survives a pause+resume, etc.
|
|
1340
|
+
* See issue #2105. */
|
|
1341
|
+
rd_kafka_toppar_set_next_fetch_position(rktp, next_pos);
|
|
1342
|
+
|
|
1343
|
+
rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, next_pos,
|
|
1344
|
+
RD_KAFKA_RESP_ERR_NO_ERROR, "update");
|
|
1345
|
+
return;
|
|
1346
|
+
}
|
|
1347
|
+
|
|
1348
|
+
/* Adjust by TAIL count if, if wanted */
|
|
1349
|
+
if (rktp->rktp_query_pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
|
|
1350
|
+
int64_t orig_offset = next_pos.offset;
|
|
1351
|
+
int64_t tail_cnt = llabs(rktp->rktp_query_pos.offset -
|
|
1352
|
+
RD_KAFKA_OFFSET_TAIL_BASE);
|
|
1353
|
+
|
|
1354
|
+
if (tail_cnt > next_pos.offset)
|
|
1355
|
+
next_pos.offset = 0;
|
|
1356
|
+
else
|
|
1357
|
+
next_pos.offset -= tail_cnt;
|
|
1358
|
+
|
|
1359
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
|
|
1360
|
+
"OffsetReply for topic %s [%" PRId32
|
|
1361
|
+
"]: "
|
|
1362
|
+
"offset %" PRId64
|
|
1363
|
+
": adjusting for "
|
|
1364
|
+
"OFFSET_TAIL(%" PRId64 "): effective %s",
|
|
1365
|
+
rktp->rktp_rkt->rkt_topic->str,
|
|
1366
|
+
rktp->rktp_partition, orig_offset, tail_cnt,
|
|
1367
|
+
rd_kafka_fetch_pos2str(next_pos));
|
|
1368
|
+
}
|
|
1369
|
+
|
|
1370
|
+
rd_kafka_toppar_set_next_fetch_position(rktp, next_pos);
|
|
1371
|
+
|
|
1372
|
+
rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_ACTIVE);
|
|
1373
|
+
}
|
|
1374
|
+
|
|
1375
|
+
|
|
1376
|
+
|
|
1377
|
+
/**
|
|
1378
|
+
* Fetch committed offset for a single partition. (simple consumer)
|
|
1379
|
+
*
|
|
1380
|
+
* Locality: toppar thread
|
|
1381
|
+
*/
|
|
1382
|
+
void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp,
|
|
1383
|
+
rd_kafka_replyq_t replyq) {
|
|
1384
|
+
rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
|
|
1385
|
+
rd_kafka_topic_partition_list_t *part;
|
|
1386
|
+
rd_kafka_op_t *rko;
|
|
1387
|
+
|
|
1388
|
+
rd_kafka_dbg(rk, TOPIC, "OFFSETREQ",
|
|
1389
|
+
"Partition %.*s [%" PRId32
|
|
1390
|
+
"]: querying cgrp for "
|
|
1391
|
+
"committed offset (opv %d)",
|
|
1392
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1393
|
+
rktp->rktp_partition, replyq.version);
|
|
1394
|
+
|
|
1395
|
+
part = rd_kafka_topic_partition_list_new(1);
|
|
1396
|
+
rd_kafka_topic_partition_list_add0(__FUNCTION__, __LINE__, part,
|
|
1397
|
+
rktp->rktp_rkt->rkt_topic->str,
|
|
1398
|
+
rktp->rktp_partition, rktp, NULL);
|
|
1399
|
+
|
|
1400
|
+
rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH);
|
|
1401
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
1402
|
+
rko->rko_replyq = replyq;
|
|
1403
|
+
|
|
1404
|
+
rko->rko_u.offset_fetch.partitions = part;
|
|
1405
|
+
rko->rko_u.offset_fetch.require_stable_offsets =
|
|
1406
|
+
rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED;
|
|
1407
|
+
rko->rko_u.offset_fetch.do_free = 1;
|
|
1408
|
+
|
|
1409
|
+
rd_kafka_q_enq(rktp->rktp_cgrp->rkcg_ops, rko);
|
|
1410
|
+
}
|
|
1411
|
+
|
|
1412
|
+
|
|
1413
|
+
|
|
1414
|
+
/**
|
|
1415
|
+
* Toppar based OffsetResponse handling.
|
|
1416
|
+
* This is used for finding the next offset to Fetch.
|
|
1417
|
+
*
|
|
1418
|
+
* Locality: toppar handler thread
|
|
1419
|
+
*/
|
|
1420
|
+
static void rd_kafka_toppar_handle_Offset(rd_kafka_t *rk,
|
|
1421
|
+
rd_kafka_broker_t *rkb,
|
|
1422
|
+
rd_kafka_resp_err_t err,
|
|
1423
|
+
rd_kafka_buf_t *rkbuf,
|
|
1424
|
+
rd_kafka_buf_t *request,
|
|
1425
|
+
void *opaque) {
|
|
1426
|
+
rd_kafka_toppar_t *rktp = opaque;
|
|
1427
|
+
rd_kafka_topic_partition_list_t *offsets;
|
|
1428
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
1429
|
+
int actions = 0;
|
|
1430
|
+
|
|
1431
|
+
rd_kafka_toppar_lock(rktp);
|
|
1432
|
+
/* Drop reply from previous partition leader */
|
|
1433
|
+
if (err != RD_KAFKA_RESP_ERR__DESTROY && rktp->rktp_leader != rkb)
|
|
1434
|
+
err = RD_KAFKA_RESP_ERR__OUTDATED;
|
|
1435
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1436
|
+
|
|
1437
|
+
offsets = rd_kafka_topic_partition_list_new(1);
|
|
1438
|
+
|
|
1439
|
+
rd_rkb_dbg(rkb, TOPIC, "OFFSET",
|
|
1440
|
+
"Offset reply for "
|
|
1441
|
+
"topic %.*s [%" PRId32 "] (v%d vs v%d)",
|
|
1442
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1443
|
+
rktp->rktp_partition, request->rkbuf_replyq.version,
|
|
1444
|
+
rktp->rktp_op_version);
|
|
1445
|
+
|
|
1446
|
+
rd_dassert(request->rkbuf_replyq.version > 0);
|
|
1447
|
+
if (err != RD_KAFKA_RESP_ERR__DESTROY &&
|
|
1448
|
+
rd_kafka_buf_version_outdated(request, rktp->rktp_op_version)) {
|
|
1449
|
+
/* Outdated request response, ignore. */
|
|
1450
|
+
err = RD_KAFKA_RESP_ERR__OUTDATED;
|
|
1451
|
+
}
|
|
1452
|
+
|
|
1453
|
+
/* Parse and return Offset */
|
|
1454
|
+
if (err != RD_KAFKA_RESP_ERR__OUTDATED)
|
|
1455
|
+
err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request,
|
|
1456
|
+
offsets, &actions);
|
|
1457
|
+
|
|
1458
|
+
if (!err && !(rktpar = rd_kafka_topic_partition_list_find(
|
|
1459
|
+
offsets, rktp->rktp_rkt->rkt_topic->str,
|
|
1460
|
+
rktp->rktp_partition))) {
|
|
1461
|
+
/* Requested partition not found in response */
|
|
1462
|
+
err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
1463
|
+
actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
|
|
1464
|
+
}
|
|
1465
|
+
|
|
1466
|
+
if (err) {
|
|
1467
|
+
rd_rkb_dbg(rkb, TOPIC, "OFFSET",
|
|
1468
|
+
"Offset reply error for "
|
|
1469
|
+
"topic %.*s [%" PRId32 "] (v%d, %s): %s",
|
|
1470
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1471
|
+
rktp->rktp_partition, request->rkbuf_replyq.version,
|
|
1472
|
+
rd_kafka_err2str(err),
|
|
1473
|
+
rd_kafka_actions2str(actions));
|
|
1474
|
+
|
|
1475
|
+
rd_kafka_topic_partition_list_destroy(offsets);
|
|
1476
|
+
|
|
1477
|
+
if (err == RD_KAFKA_RESP_ERR__DESTROY ||
|
|
1478
|
+
err == RD_KAFKA_RESP_ERR__OUTDATED) {
|
|
1479
|
+
/* Termination or outdated, quick cleanup. */
|
|
1480
|
+
|
|
1481
|
+
if (err == RD_KAFKA_RESP_ERR__OUTDATED) {
|
|
1482
|
+
rd_kafka_toppar_lock(rktp);
|
|
1483
|
+
rd_kafka_toppar_offset_retry(
|
|
1484
|
+
rktp, 500, "outdated offset response");
|
|
1485
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1486
|
+
}
|
|
1487
|
+
|
|
1488
|
+
/* from request.opaque */
|
|
1489
|
+
rd_kafka_toppar_destroy(rktp);
|
|
1490
|
+
return;
|
|
1491
|
+
|
|
1492
|
+
} else if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
|
|
1493
|
+
return; /* Retry in progress */
|
|
1494
|
+
|
|
1495
|
+
|
|
1496
|
+
rd_kafka_toppar_lock(rktp);
|
|
1497
|
+
|
|
1498
|
+
if (!(actions & (RD_KAFKA_ERR_ACTION_RETRY |
|
|
1499
|
+
RD_KAFKA_ERR_ACTION_REFRESH))) {
|
|
1500
|
+
/* Permanent error. Trigger auto.offset.reset policy
|
|
1501
|
+
* and signal error back to application. */
|
|
1502
|
+
|
|
1503
|
+
rd_kafka_offset_reset(rktp, rkb->rkb_nodeid,
|
|
1504
|
+
rktp->rktp_query_pos, err,
|
|
1505
|
+
"failed to query logical offset");
|
|
1506
|
+
|
|
1507
|
+
rd_kafka_consumer_err(
|
|
1508
|
+
rktp->rktp_fetchq, rkb->rkb_nodeid, err, 0, NULL,
|
|
1509
|
+
rktp,
|
|
1510
|
+
(rktp->rktp_query_pos.offset <=
|
|
1511
|
+
RD_KAFKA_OFFSET_TAIL_BASE
|
|
1512
|
+
? rktp->rktp_query_pos.offset -
|
|
1513
|
+
RD_KAFKA_OFFSET_TAIL_BASE
|
|
1514
|
+
: rktp->rktp_query_pos.offset),
|
|
1515
|
+
"Failed to query logical offset %s: %s",
|
|
1516
|
+
rd_kafka_offset2str(rktp->rktp_query_pos.offset),
|
|
1517
|
+
rd_kafka_err2str(err));
|
|
1518
|
+
|
|
1519
|
+
} else {
|
|
1520
|
+
/* Temporary error. Schedule retry. */
|
|
1521
|
+
char tmp[256];
|
|
1522
|
+
|
|
1523
|
+
rd_snprintf(
|
|
1524
|
+
tmp, sizeof(tmp),
|
|
1525
|
+
"failed to query logical offset %s: %s",
|
|
1526
|
+
rd_kafka_offset2str(rktp->rktp_query_pos.offset),
|
|
1527
|
+
rd_kafka_err2str(err));
|
|
1528
|
+
|
|
1529
|
+
rd_kafka_toppar_offset_retry(rktp, 500, tmp);
|
|
1530
|
+
}
|
|
1531
|
+
|
|
1532
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1533
|
+
|
|
1534
|
+
rd_kafka_toppar_destroy(rktp); /* from request.opaque */
|
|
1535
|
+
return;
|
|
1536
|
+
}
|
|
1537
|
+
|
|
1538
|
+
|
|
1539
|
+
rd_kafka_toppar_lock(rktp);
|
|
1540
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
|
|
1541
|
+
"Offset %s request for %.*s [%" PRId32
|
|
1542
|
+
"] "
|
|
1543
|
+
"returned offset %s (%" PRId64 ") leader epoch %" PRId32,
|
|
1544
|
+
rd_kafka_offset2str(rktp->rktp_query_pos.offset),
|
|
1545
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1546
|
+
rktp->rktp_partition, rd_kafka_offset2str(rktpar->offset),
|
|
1547
|
+
rktpar->offset,
|
|
1548
|
+
rd_kafka_topic_partition_get_leader_epoch(rktpar));
|
|
1549
|
+
|
|
1550
|
+
|
|
1551
|
+
rd_kafka_toppar_next_offset_handle(
|
|
1552
|
+
rktp, RD_KAFKA_FETCH_POS(
|
|
1553
|
+
rktpar->offset,
|
|
1554
|
+
rd_kafka_topic_partition_get_leader_epoch(rktpar)));
|
|
1555
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1556
|
+
|
|
1557
|
+
rd_kafka_topic_partition_list_destroy(offsets);
|
|
1558
|
+
|
|
1559
|
+
rd_kafka_toppar_destroy(rktp); /* from request.opaque */
|
|
1560
|
+
}
|
|
1561
|
+
|
|
1562
|
+
|
|
1563
|
+
/**
|
|
1564
|
+
* @brief An Offset fetch failed (for whatever reason) in
|
|
1565
|
+
* the RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT state:
|
|
1566
|
+
* set the state back to FETCH_OFFSET_QUERY and start the
|
|
1567
|
+
* offset_query_tmr to trigger a new request eventually.
|
|
1568
|
+
*
|
|
1569
|
+
* @locality toppar handler thread
|
|
1570
|
+
* @locks toppar_lock() MUST be held
|
|
1571
|
+
*/
|
|
1572
|
+
static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp,
|
|
1573
|
+
int backoff_ms,
|
|
1574
|
+
const char *reason) {
|
|
1575
|
+
rd_ts_t tmr_next;
|
|
1576
|
+
int restart_tmr;
|
|
1577
|
+
|
|
1578
|
+
/* (Re)start timer if not started or the current timeout
|
|
1579
|
+
* is larger than \p backoff_ms. */
|
|
1580
|
+
tmr_next = rd_kafka_timer_next(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
1581
|
+
&rktp->rktp_offset_query_tmr, 1);
|
|
1582
|
+
|
|
1583
|
+
restart_tmr =
|
|
1584
|
+
(tmr_next == -1 || tmr_next > rd_clock() + (backoff_ms * 1000ll));
|
|
1585
|
+
|
|
1586
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
|
|
1587
|
+
"%s [%" PRId32 "]: %s: %s for %s",
|
|
1588
|
+
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
|
|
1589
|
+
reason,
|
|
1590
|
+
restart_tmr ? "(re)starting offset query timer"
|
|
1591
|
+
: "offset query timer already scheduled",
|
|
1592
|
+
rd_kafka_fetch_pos2str(rktp->rktp_query_pos));
|
|
1593
|
+
|
|
1594
|
+
rd_kafka_toppar_set_fetch_state(rktp,
|
|
1595
|
+
RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
|
|
1596
|
+
|
|
1597
|
+
if (restart_tmr)
|
|
1598
|
+
rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
1599
|
+
&rktp->rktp_offset_query_tmr,
|
|
1600
|
+
backoff_ms * 1000ll,
|
|
1601
|
+
rd_kafka_offset_query_tmr_cb, rktp);
|
|
1602
|
+
}
|
|
1603
|
+
|
|
1604
|
+
|
|
1605
|
+
|
|
1606
|
+
/**
|
|
1607
|
+
* Send OffsetRequest for toppar.
|
|
1608
|
+
*
|
|
1609
|
+
* If \p backoff_ms is non-zero only the query timer is started,
|
|
1610
|
+
* otherwise a query is triggered directly.
|
|
1611
|
+
*
|
|
1612
|
+
* Locality: toppar handler thread
|
|
1613
|
+
* Locks: toppar_lock() must be held
|
|
1614
|
+
*/
|
|
1615
|
+
void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp,
|
|
1616
|
+
rd_kafka_fetch_pos_t query_pos,
|
|
1617
|
+
int backoff_ms) {
|
|
1618
|
+
rd_kafka_broker_t *rkb;
|
|
1619
|
+
|
|
1620
|
+
rd_kafka_assert(NULL,
|
|
1621
|
+
thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread));
|
|
1622
|
+
|
|
1623
|
+
rkb = rktp->rktp_leader;
|
|
1624
|
+
|
|
1625
|
+
if (!backoff_ms && (!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL))
|
|
1626
|
+
backoff_ms = 500;
|
|
1627
|
+
|
|
1628
|
+
if (backoff_ms) {
|
|
1629
|
+
rd_kafka_toppar_offset_retry(
|
|
1630
|
+
rktp, backoff_ms,
|
|
1631
|
+
!rkb ? "no current leader for partition" : "backoff");
|
|
1632
|
+
return;
|
|
1633
|
+
}
|
|
1634
|
+
|
|
1635
|
+
|
|
1636
|
+
rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
1637
|
+
&rktp->rktp_offset_query_tmr, 1 /*lock*/);
|
|
1638
|
+
|
|
1639
|
+
|
|
1640
|
+
if (query_pos.offset == RD_KAFKA_OFFSET_STORED &&
|
|
1641
|
+
rktp->rktp_rkt->rkt_conf.offset_store_method ==
|
|
1642
|
+
RD_KAFKA_OFFSET_METHOD_BROKER) {
|
|
1643
|
+
/*
|
|
1644
|
+
* Get stored offset from broker based storage:
|
|
1645
|
+
* ask cgrp manager for offsets
|
|
1646
|
+
*/
|
|
1647
|
+
rd_kafka_toppar_offset_fetch(
|
|
1648
|
+
rktp,
|
|
1649
|
+
RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version));
|
|
1650
|
+
|
|
1651
|
+
} else {
|
|
1652
|
+
rd_kafka_topic_partition_list_t *offsets;
|
|
1653
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
1654
|
+
|
|
1655
|
+
/*
|
|
1656
|
+
* Look up logical offset (end,beginning,tail,..)
|
|
1657
|
+
*/
|
|
1658
|
+
|
|
1659
|
+
rd_rkb_dbg(rkb, TOPIC, "OFFREQ",
|
|
1660
|
+
"Partition %.*s [%" PRId32
|
|
1661
|
+
"]: querying for logical "
|
|
1662
|
+
"offset %s (opv %d)",
|
|
1663
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1664
|
+
rktp->rktp_partition,
|
|
1665
|
+
rd_kafka_offset2str(query_pos.offset),
|
|
1666
|
+
rktp->rktp_op_version);
|
|
1667
|
+
|
|
1668
|
+
rd_kafka_toppar_keep(rktp); /* refcnt for OffsetRequest opaque*/
|
|
1669
|
+
|
|
1670
|
+
if (query_pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE)
|
|
1671
|
+
query_pos.offset = RD_KAFKA_OFFSET_END;
|
|
1672
|
+
|
|
1673
|
+
offsets = rd_kafka_topic_partition_list_new(1);
|
|
1674
|
+
rktpar = rd_kafka_topic_partition_list_add(
|
|
1675
|
+
offsets, rktp->rktp_rkt->rkt_topic->str,
|
|
1676
|
+
rktp->rktp_partition);
|
|
1677
|
+
rd_kafka_topic_partition_set_from_fetch_pos(rktpar, query_pos);
|
|
1678
|
+
rd_kafka_topic_partition_set_current_leader_epoch(
|
|
1679
|
+
rktpar, rktp->rktp_leader_epoch);
|
|
1680
|
+
|
|
1681
|
+
rd_kafka_ListOffsetsRequest(
|
|
1682
|
+
rkb, offsets,
|
|
1683
|
+
RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version),
|
|
1684
|
+
rd_kafka_toppar_handle_Offset,
|
|
1685
|
+
-1, /* don't set an absolute timeout */
|
|
1686
|
+
rktp);
|
|
1687
|
+
|
|
1688
|
+
rd_kafka_topic_partition_list_destroy(offsets);
|
|
1689
|
+
}
|
|
1690
|
+
|
|
1691
|
+
rd_kafka_toppar_set_fetch_state(rktp,
|
|
1692
|
+
RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT);
|
|
1693
|
+
}
|
|
1694
|
+
|
|
1695
|
+
|
|
1696
|
+
/**
|
|
1697
|
+
* Start fetching toppar.
|
|
1698
|
+
*
|
|
1699
|
+
* Locality: toppar handler thread
|
|
1700
|
+
* Locks: none
|
|
1701
|
+
*/
|
|
1702
|
+
static void rd_kafka_toppar_fetch_start(rd_kafka_toppar_t *rktp,
|
|
1703
|
+
rd_kafka_fetch_pos_t pos,
|
|
1704
|
+
rd_kafka_op_t *rko_orig) {
|
|
1705
|
+
rd_kafka_cgrp_t *rkcg = rko_orig->rko_u.fetch_start.rkcg;
|
|
1706
|
+
rd_kafka_resp_err_t err = 0;
|
|
1707
|
+
int32_t version = rko_orig->rko_version;
|
|
1708
|
+
|
|
1709
|
+
rd_kafka_toppar_lock(rktp);
|
|
1710
|
+
|
|
1711
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
|
|
1712
|
+
"Start fetch for %.*s [%" PRId32
|
|
1713
|
+
"] in "
|
|
1714
|
+
"state %s at %s (v%" PRId32 ")",
|
|
1715
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1716
|
+
rktp->rktp_partition,
|
|
1717
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state],
|
|
1718
|
+
rd_kafka_fetch_pos2str(pos), version);
|
|
1719
|
+
|
|
1720
|
+
if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) {
|
|
1721
|
+
err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
|
|
1722
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1723
|
+
goto err_reply;
|
|
1724
|
+
}
|
|
1725
|
+
|
|
1726
|
+
rd_kafka_toppar_op_version_bump(rktp, version);
|
|
1727
|
+
|
|
1728
|
+
if (rkcg) {
|
|
1729
|
+
rd_kafka_assert(rktp->rktp_rkt->rkt_rk, !rktp->rktp_cgrp);
|
|
1730
|
+
/* Attach toppar to cgrp */
|
|
1731
|
+
rktp->rktp_cgrp = rkcg;
|
|
1732
|
+
rd_kafka_cgrp_op(rkcg, rktp, RD_KAFKA_NO_REPLYQ,
|
|
1733
|
+
RD_KAFKA_OP_PARTITION_JOIN, 0);
|
|
1734
|
+
}
|
|
1735
|
+
|
|
1736
|
+
|
|
1737
|
+
if (pos.offset == RD_KAFKA_OFFSET_BEGINNING ||
|
|
1738
|
+
pos.offset == RD_KAFKA_OFFSET_END ||
|
|
1739
|
+
pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
|
|
1740
|
+
rd_kafka_toppar_next_offset_handle(rktp, pos);
|
|
1741
|
+
|
|
1742
|
+
} else if (pos.offset == RD_KAFKA_OFFSET_STORED) {
|
|
1743
|
+
rd_kafka_offset_store_init(rktp);
|
|
1744
|
+
|
|
1745
|
+
} else if (pos.offset == RD_KAFKA_OFFSET_INVALID) {
|
|
1746
|
+
rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, pos,
|
|
1747
|
+
RD_KAFKA_RESP_ERR__NO_OFFSET,
|
|
1748
|
+
"no previously committed offset "
|
|
1749
|
+
"available");
|
|
1750
|
+
|
|
1751
|
+
} else {
|
|
1752
|
+
rd_kafka_toppar_set_next_fetch_position(rktp, pos);
|
|
1753
|
+
|
|
1754
|
+
rd_kafka_toppar_set_fetch_state(rktp,
|
|
1755
|
+
RD_KAFKA_TOPPAR_FETCH_ACTIVE);
|
|
1756
|
+
}
|
|
1757
|
+
|
|
1758
|
+
rktp->rktp_offsets_fin.eof_offset = RD_KAFKA_OFFSET_INVALID;
|
|
1759
|
+
|
|
1760
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1761
|
+
|
|
1762
|
+
/* Signal back to caller thread that start has commenced, or err */
|
|
1763
|
+
err_reply:
|
|
1764
|
+
if (rko_orig->rko_replyq.q) {
|
|
1765
|
+
rd_kafka_op_t *rko;
|
|
1766
|
+
|
|
1767
|
+
rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_START);
|
|
1768
|
+
|
|
1769
|
+
rko->rko_err = err;
|
|
1770
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
1771
|
+
|
|
1772
|
+
rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0);
|
|
1773
|
+
}
|
|
1774
|
+
}
|
|
1775
|
+
|
|
1776
|
+
|
|
1777
|
+
|
|
1778
|
+
/**
|
|
1779
|
+
* Mark toppar's fetch state as stopped (all decommissioning is done,
|
|
1780
|
+
* offsets are stored, etc).
|
|
1781
|
+
*
|
|
1782
|
+
* Locality: toppar handler thread
|
|
1783
|
+
* Locks: toppar_lock(rktp) MUST be held
|
|
1784
|
+
*/
|
|
1785
|
+
void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp,
|
|
1786
|
+
rd_kafka_resp_err_t err) {
|
|
1787
|
+
|
|
1788
|
+
|
|
1789
|
+
rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPED);
|
|
1790
|
+
|
|
1791
|
+
rktp->rktp_app_pos.offset = RD_KAFKA_OFFSET_INVALID;
|
|
1792
|
+
rktp->rktp_app_pos.leader_epoch = -1;
|
|
1793
|
+
|
|
1794
|
+
if (rktp->rktp_cgrp) {
|
|
1795
|
+
/* Detach toppar from cgrp */
|
|
1796
|
+
rd_kafka_cgrp_op(rktp->rktp_cgrp, rktp, RD_KAFKA_NO_REPLYQ,
|
|
1797
|
+
RD_KAFKA_OP_PARTITION_LEAVE, 0);
|
|
1798
|
+
rktp->rktp_cgrp = NULL;
|
|
1799
|
+
}
|
|
1800
|
+
|
|
1801
|
+
/* Signal back to application thread that stop is done. */
|
|
1802
|
+
if (rktp->rktp_replyq.q) {
|
|
1803
|
+
rd_kafka_op_t *rko;
|
|
1804
|
+
rko =
|
|
1805
|
+
rd_kafka_op_new(RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY);
|
|
1806
|
+
rko->rko_err = err;
|
|
1807
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
1808
|
+
|
|
1809
|
+
rd_kafka_replyq_enq(&rktp->rktp_replyq, rko, 0);
|
|
1810
|
+
}
|
|
1811
|
+
}
|
|
1812
|
+
|
|
1813
|
+
|
|
1814
|
+
/**
|
|
1815
|
+
* Stop toppar fetcher.
|
|
1816
|
+
* This is usually an async operation.
|
|
1817
|
+
*
|
|
1818
|
+
* Locality: toppar handler thread
|
|
1819
|
+
*/
|
|
1820
|
+
void rd_kafka_toppar_fetch_stop(rd_kafka_toppar_t *rktp,
|
|
1821
|
+
rd_kafka_op_t *rko_orig) {
|
|
1822
|
+
int32_t version = rko_orig->rko_version;
|
|
1823
|
+
|
|
1824
|
+
rd_kafka_toppar_lock(rktp);
|
|
1825
|
+
|
|
1826
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
|
|
1827
|
+
"Stopping fetch for %.*s [%" PRId32 "] in state %s (v%d)",
|
|
1828
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1829
|
+
rktp->rktp_partition,
|
|
1830
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state], version);
|
|
1831
|
+
|
|
1832
|
+
rd_kafka_toppar_op_version_bump(rktp, version);
|
|
1833
|
+
|
|
1834
|
+
/* Abort pending offset lookups. */
|
|
1835
|
+
if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
|
|
1836
|
+
rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
1837
|
+
&rktp->rktp_offset_query_tmr, 1 /*lock*/);
|
|
1838
|
+
|
|
1839
|
+
/* Clear out the forwarding queue. */
|
|
1840
|
+
rd_kafka_q_fwd_set(rktp->rktp_fetchq, NULL);
|
|
1841
|
+
|
|
1842
|
+
/* Assign the future replyq to propagate stop results. */
|
|
1843
|
+
rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_replyq.q == NULL);
|
|
1844
|
+
rktp->rktp_replyq = rko_orig->rko_replyq;
|
|
1845
|
+
rd_kafka_replyq_clear(&rko_orig->rko_replyq);
|
|
1846
|
+
|
|
1847
|
+
rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPING);
|
|
1848
|
+
|
|
1849
|
+
/* Stop offset store (possibly async).
|
|
1850
|
+
* NOTE: will call .._stopped() if store finishes immediately,
|
|
1851
|
+
* so no more operations after this call! */
|
|
1852
|
+
rd_kafka_offset_store_stop(rktp);
|
|
1853
|
+
|
|
1854
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1855
|
+
}
|
|
1856
|
+
|
|
1857
|
+
|
|
1858
|
+
/**
|
|
1859
|
+
* Update a toppars offset.
|
|
1860
|
+
* The toppar must have been previously FETCH_START:ed
|
|
1861
|
+
*
|
|
1862
|
+
* Locality: toppar handler thread
|
|
1863
|
+
*/
|
|
1864
|
+
void rd_kafka_toppar_seek(rd_kafka_toppar_t *rktp,
|
|
1865
|
+
rd_kafka_fetch_pos_t pos,
|
|
1866
|
+
rd_kafka_op_t *rko_orig) {
|
|
1867
|
+
rd_kafka_resp_err_t err = 0;
|
|
1868
|
+
int32_t version = rko_orig->rko_version;
|
|
1869
|
+
|
|
1870
|
+
rd_kafka_toppar_lock(rktp);
|
|
1871
|
+
|
|
1872
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
|
|
1873
|
+
"Seek %.*s [%" PRId32 "] to %s in state %s (v%" PRId32 ")",
|
|
1874
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
1875
|
+
rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
|
|
1876
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state], version);
|
|
1877
|
+
|
|
1878
|
+
|
|
1879
|
+
if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) {
|
|
1880
|
+
err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
|
|
1881
|
+
goto err_reply;
|
|
1882
|
+
} else if (!RD_KAFKA_TOPPAR_FETCH_IS_STARTED(rktp->rktp_fetch_state)) {
|
|
1883
|
+
err = RD_KAFKA_RESP_ERR__STATE;
|
|
1884
|
+
goto err_reply;
|
|
1885
|
+
} else if (pos.offset == RD_KAFKA_OFFSET_STORED) {
|
|
1886
|
+
err = RD_KAFKA_RESP_ERR__INVALID_ARG;
|
|
1887
|
+
goto err_reply;
|
|
1888
|
+
}
|
|
1889
|
+
|
|
1890
|
+
rd_kafka_toppar_op_version_bump(rktp, version);
|
|
1891
|
+
|
|
1892
|
+
/* Reset app offsets since seek()ing is analogue to a (re)assign(),
|
|
1893
|
+
* and we want to avoid using the current app offset on resume()
|
|
1894
|
+
* following a seek (#3567). */
|
|
1895
|
+
rktp->rktp_app_pos.offset = RD_KAFKA_OFFSET_INVALID;
|
|
1896
|
+
rktp->rktp_app_pos.leader_epoch = -1;
|
|
1897
|
+
|
|
1898
|
+
/* Abort pending offset lookups. */
|
|
1899
|
+
if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
|
|
1900
|
+
rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
1901
|
+
&rktp->rktp_offset_query_tmr, 1 /*lock*/);
|
|
1902
|
+
|
|
1903
|
+
if (pos.offset <= 0 || pos.validated) {
|
|
1904
|
+
rd_kafka_toppar_next_offset_handle(rktp, pos);
|
|
1905
|
+
} else {
|
|
1906
|
+
rd_kafka_toppar_set_fetch_state(
|
|
1907
|
+
rktp, RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT);
|
|
1908
|
+
rd_kafka_toppar_set_next_fetch_position(rktp, pos);
|
|
1909
|
+
rd_kafka_toppar_set_offset_validation_position(rktp, pos);
|
|
1910
|
+
rd_kafka_offset_validate(rktp, "seek");
|
|
1911
|
+
}
|
|
1912
|
+
|
|
1913
|
+
/* Signal back to caller thread that seek has commenced, or err */
|
|
1914
|
+
err_reply:
|
|
1915
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1916
|
+
|
|
1917
|
+
if (rko_orig->rko_replyq.q) {
|
|
1918
|
+
rd_kafka_op_t *rko;
|
|
1919
|
+
|
|
1920
|
+
rko = rd_kafka_op_new(RD_KAFKA_OP_SEEK | RD_KAFKA_OP_REPLY);
|
|
1921
|
+
|
|
1922
|
+
rko->rko_err = err;
|
|
1923
|
+
rko->rko_u.fetch_start.pos = rko_orig->rko_u.fetch_start.pos;
|
|
1924
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
1925
|
+
|
|
1926
|
+
rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0);
|
|
1927
|
+
}
|
|
1928
|
+
}
|
|
1929
|
+
|
|
1930
|
+
|
|
1931
|
+
/**
|
|
1932
|
+
* @brief Pause/resume toppar.
|
|
1933
|
+
*
|
|
1934
|
+
* This is the internal handler of the pause/resume op.
|
|
1935
|
+
*
|
|
1936
|
+
* @locality toppar's handler thread
|
|
1937
|
+
*/
|
|
1938
|
+
static void rd_kafka_toppar_pause_resume(rd_kafka_toppar_t *rktp,
|
|
1939
|
+
rd_kafka_op_t *rko_orig) {
|
|
1940
|
+
rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
|
|
1941
|
+
int pause = rko_orig->rko_u.pause.pause;
|
|
1942
|
+
int flag = rko_orig->rko_u.pause.flag;
|
|
1943
|
+
int32_t version = rko_orig->rko_version;
|
|
1944
|
+
|
|
1945
|
+
rd_kafka_toppar_lock(rktp);
|
|
1946
|
+
|
|
1947
|
+
rd_kafka_toppar_op_version_bump(rktp, version);
|
|
1948
|
+
|
|
1949
|
+
if (!pause && (rktp->rktp_flags & flag) != flag) {
|
|
1950
|
+
rd_kafka_dbg(rk, TOPIC, "RESUME",
|
|
1951
|
+
"Not resuming %s [%" PRId32
|
|
1952
|
+
"]: "
|
|
1953
|
+
"partition is not paused by %s",
|
|
1954
|
+
rktp->rktp_rkt->rkt_topic->str,
|
|
1955
|
+
rktp->rktp_partition,
|
|
1956
|
+
(flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "application"
|
|
1957
|
+
: "library"));
|
|
1958
|
+
rd_kafka_toppar_unlock(rktp);
|
|
1959
|
+
return;
|
|
1960
|
+
}
|
|
1961
|
+
|
|
1962
|
+
if (pause) {
|
|
1963
|
+
/* Pause partition by setting either
|
|
1964
|
+
* RD_KAFKA_TOPPAR_F_APP_PAUSE or
|
|
1965
|
+
* RD_KAFKA_TOPPAR_F_LIB_PAUSE */
|
|
1966
|
+
rktp->rktp_flags |= flag;
|
|
1967
|
+
|
|
1968
|
+
if (rk->rk_type == RD_KAFKA_CONSUMER) {
|
|
1969
|
+
/* Save offset of last consumed message+1 as the
|
|
1970
|
+
* next message to fetch on resume. */
|
|
1971
|
+
if (rktp->rktp_app_pos.offset !=
|
|
1972
|
+
RD_KAFKA_OFFSET_INVALID)
|
|
1973
|
+
rd_kafka_toppar_set_next_fetch_position(
|
|
1974
|
+
rktp, rktp->rktp_app_pos);
|
|
1975
|
+
|
|
1976
|
+
rd_kafka_dbg(
|
|
1977
|
+
rk, TOPIC, pause ? "PAUSE" : "RESUME",
|
|
1978
|
+
"%s %s [%" PRId32 "]: at %s (state %s, v%d)",
|
|
1979
|
+
pause ? "Pause" : "Resume",
|
|
1980
|
+
rktp->rktp_rkt->rkt_topic->str,
|
|
1981
|
+
rktp->rktp_partition,
|
|
1982
|
+
rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
|
|
1983
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state],
|
|
1984
|
+
version);
|
|
1985
|
+
} else {
|
|
1986
|
+
rd_kafka_dbg(
|
|
1987
|
+
rk, TOPIC, pause ? "PAUSE" : "RESUME",
|
|
1988
|
+
"%s %s [%" PRId32 "] (state %s, v%d)",
|
|
1989
|
+
pause ? "Pause" : "Resume",
|
|
1990
|
+
rktp->rktp_rkt->rkt_topic->str,
|
|
1991
|
+
rktp->rktp_partition,
|
|
1992
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state],
|
|
1993
|
+
version);
|
|
1994
|
+
}
|
|
1995
|
+
|
|
1996
|
+
} else {
|
|
1997
|
+
/* Unset the RD_KAFKA_TOPPAR_F_APP_PAUSE or
|
|
1998
|
+
* RD_KAFKA_TOPPAR_F_LIB_PAUSE flag */
|
|
1999
|
+
rktp->rktp_flags &= ~flag;
|
|
2000
|
+
|
|
2001
|
+
if (rk->rk_type == RD_KAFKA_CONSUMER) {
|
|
2002
|
+
rd_kafka_dbg(
|
|
2003
|
+
rk, TOPIC, pause ? "PAUSE" : "RESUME",
|
|
2004
|
+
"%s %s [%" PRId32 "]: at %s (state %s, v%d)",
|
|
2005
|
+
rktp->rktp_fetch_state ==
|
|
2006
|
+
RD_KAFKA_TOPPAR_FETCH_ACTIVE
|
|
2007
|
+
? "Resuming"
|
|
2008
|
+
: "Not resuming stopped",
|
|
2009
|
+
rktp->rktp_rkt->rkt_topic->str,
|
|
2010
|
+
rktp->rktp_partition,
|
|
2011
|
+
rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
|
|
2012
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state],
|
|
2013
|
+
version);
|
|
2014
|
+
|
|
2015
|
+
/* If the resuming offset is logical we
|
|
2016
|
+
* need to trigger a seek (that performs the
|
|
2017
|
+
* logical->absolute lookup logic) to get
|
|
2018
|
+
* things going.
|
|
2019
|
+
* Typical case is when a partition is paused
|
|
2020
|
+
* before anything has been consumed by app
|
|
2021
|
+
* yet thus having rktp_app_offset=INVALID. */
|
|
2022
|
+
if (!RD_KAFKA_TOPPAR_IS_PAUSED(rktp) &&
|
|
2023
|
+
(rktp->rktp_fetch_state ==
|
|
2024
|
+
RD_KAFKA_TOPPAR_FETCH_ACTIVE ||
|
|
2025
|
+
rktp->rktp_fetch_state ==
|
|
2026
|
+
RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) &&
|
|
2027
|
+
rktp->rktp_next_fetch_start.offset ==
|
|
2028
|
+
RD_KAFKA_OFFSET_INVALID)
|
|
2029
|
+
rd_kafka_toppar_next_offset_handle(
|
|
2030
|
+
rktp, rktp->rktp_next_fetch_start);
|
|
2031
|
+
|
|
2032
|
+
} else
|
|
2033
|
+
rd_kafka_dbg(
|
|
2034
|
+
rk, TOPIC, pause ? "PAUSE" : "RESUME",
|
|
2035
|
+
"%s %s [%" PRId32 "] (state %s, v%d)",
|
|
2036
|
+
pause ? "Pause" : "Resume",
|
|
2037
|
+
rktp->rktp_rkt->rkt_topic->str,
|
|
2038
|
+
rktp->rktp_partition,
|
|
2039
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state],
|
|
2040
|
+
version);
|
|
2041
|
+
}
|
|
2042
|
+
rd_kafka_toppar_unlock(rktp);
|
|
2043
|
+
|
|
2044
|
+
if (pause && rk->rk_type == RD_KAFKA_CONSUMER) {
|
|
2045
|
+
/* Flush partition's fetch queue */
|
|
2046
|
+
rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp,
|
|
2047
|
+
rko_orig->rko_version);
|
|
2048
|
+
}
|
|
2049
|
+
}
|
|
2050
|
+
|
|
2051
|
+
|
|
2052
|
+
|
|
2053
|
+
/**
|
|
2054
|
+
* @brief Serve a toppar in a consumer broker thread.
|
|
2055
|
+
* This is considered the fast path and should be minimal,
|
|
2056
|
+
* mostly focusing on fetch related mechanisms.
|
|
2057
|
+
*
|
|
2058
|
+
* @returns the partition's Fetch backoff timestamp, or 0 if no backoff.
|
|
2059
|
+
*
|
|
2060
|
+
* @locality broker thread
|
|
2061
|
+
* @locks none
|
|
2062
|
+
*/
|
|
2063
|
+
rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb,
|
|
2064
|
+
rd_kafka_toppar_t *rktp) {
|
|
2065
|
+
return rd_kafka_toppar_fetch_decide(rktp, rkb, 0);
|
|
2066
|
+
}
|
|
2067
|
+
|
|
2068
|
+
|
|
2069
|
+
|
|
2070
|
+
/**
|
|
2071
|
+
* @brief Serve a toppar op
|
|
2072
|
+
*
|
|
2073
|
+
* @param rktp may be NULL for certain ops (OP_RECV_BUF)
|
|
2074
|
+
*
|
|
2075
|
+
* Will send an empty reply op if the request rko has a replyq set,
|
|
2076
|
+
* providing synchronous operation.
|
|
2077
|
+
*
|
|
2078
|
+
* @locality toppar handler thread
|
|
2079
|
+
*/
|
|
2080
|
+
static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk,
|
|
2081
|
+
rd_kafka_q_t *rkq,
|
|
2082
|
+
rd_kafka_op_t *rko,
|
|
2083
|
+
rd_kafka_q_cb_type_t cb_type,
|
|
2084
|
+
void *opaque) {
|
|
2085
|
+
rd_kafka_toppar_t *rktp = NULL;
|
|
2086
|
+
int outdated = 0;
|
|
2087
|
+
|
|
2088
|
+
if (rko->rko_rktp)
|
|
2089
|
+
rktp = rko->rko_rktp;
|
|
2090
|
+
|
|
2091
|
+
if (rktp) {
|
|
2092
|
+
outdated =
|
|
2093
|
+
rd_kafka_op_version_outdated(rko, rktp->rktp_op_version);
|
|
2094
|
+
|
|
2095
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OP",
|
|
2096
|
+
"%.*s [%" PRId32
|
|
2097
|
+
"] received %sop %s "
|
|
2098
|
+
"(v%" PRId32 ") in fetch-state %s (opv%d)",
|
|
2099
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
2100
|
+
rktp->rktp_partition, outdated ? "outdated " : "",
|
|
2101
|
+
rd_kafka_op2str(rko->rko_type), rko->rko_version,
|
|
2102
|
+
rd_kafka_fetch_states[rktp->rktp_fetch_state],
|
|
2103
|
+
rktp->rktp_op_version);
|
|
2104
|
+
|
|
2105
|
+
if (outdated) {
|
|
2106
|
+
#if ENABLE_DEVEL
|
|
2107
|
+
rd_kafka_op_print(stdout, "PART_OUTDATED", rko);
|
|
2108
|
+
#endif
|
|
2109
|
+
rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__OUTDATED);
|
|
2110
|
+
return RD_KAFKA_OP_RES_HANDLED;
|
|
2111
|
+
}
|
|
2112
|
+
}
|
|
2113
|
+
|
|
2114
|
+
switch ((int)rko->rko_type) {
|
|
2115
|
+
case RD_KAFKA_OP_FETCH_START:
|
|
2116
|
+
rd_kafka_toppar_fetch_start(rktp, rko->rko_u.fetch_start.pos,
|
|
2117
|
+
rko);
|
|
2118
|
+
break;
|
|
2119
|
+
|
|
2120
|
+
case RD_KAFKA_OP_FETCH_STOP:
|
|
2121
|
+
rd_kafka_toppar_fetch_stop(rktp, rko);
|
|
2122
|
+
break;
|
|
2123
|
+
|
|
2124
|
+
case RD_KAFKA_OP_SEEK:
|
|
2125
|
+
rd_kafka_toppar_seek(rktp, rko->rko_u.fetch_start.pos, rko);
|
|
2126
|
+
break;
|
|
2127
|
+
|
|
2128
|
+
case RD_KAFKA_OP_PAUSE:
|
|
2129
|
+
rd_kafka_toppar_pause_resume(rktp, rko);
|
|
2130
|
+
break;
|
|
2131
|
+
|
|
2132
|
+
case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY:
|
|
2133
|
+
rd_kafka_assert(NULL, rko->rko_u.offset_commit.cb);
|
|
2134
|
+
rko->rko_u.offset_commit.cb(rk, rko->rko_err,
|
|
2135
|
+
rko->rko_u.offset_commit.partitions,
|
|
2136
|
+
rko->rko_u.offset_commit.opaque);
|
|
2137
|
+
break;
|
|
2138
|
+
|
|
2139
|
+
case RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY: {
|
|
2140
|
+
/* OffsetFetch reply */
|
|
2141
|
+
rd_kafka_topic_partition_list_t *offsets =
|
|
2142
|
+
rko->rko_u.offset_fetch.partitions;
|
|
2143
|
+
rd_kafka_fetch_pos_t pos = {RD_KAFKA_OFFSET_INVALID, -1};
|
|
2144
|
+
|
|
2145
|
+
rktp = rd_kafka_topic_partition_get_toppar(
|
|
2146
|
+
rk, &offsets->elems[0], rd_true /*create-on-miss*/);
|
|
2147
|
+
|
|
2148
|
+
if (!rko->rko_err) {
|
|
2149
|
+
/* Request succeeded but per-partition might have failed
|
|
2150
|
+
*/
|
|
2151
|
+
rko->rko_err = offsets->elems[0].err;
|
|
2152
|
+
pos = rd_kafka_topic_partition_get_fetch_pos(
|
|
2153
|
+
&offsets->elems[0]);
|
|
2154
|
+
}
|
|
2155
|
+
|
|
2156
|
+
rd_kafka_topic_partition_list_destroy(offsets);
|
|
2157
|
+
rko->rko_u.offset_fetch.partitions = NULL;
|
|
2158
|
+
|
|
2159
|
+
rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
|
|
2160
|
+
&rktp->rktp_offset_query_tmr, 1 /*lock*/);
|
|
2161
|
+
|
|
2162
|
+
rd_kafka_toppar_lock(rktp);
|
|
2163
|
+
|
|
2164
|
+
if (rko->rko_err) {
|
|
2165
|
+
int actions;
|
|
2166
|
+
rd_kafka_dbg(
|
|
2167
|
+
rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
|
|
2168
|
+
"Failed to fetch offset for "
|
|
2169
|
+
"%.*s [%" PRId32 "]: %s",
|
|
2170
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
2171
|
+
rktp->rktp_partition,
|
|
2172
|
+
rd_kafka_err2str(rko->rko_err));
|
|
2173
|
+
|
|
2174
|
+
/* Keep on querying until we succeed. */
|
|
2175
|
+
rd_kafka_toppar_offset_retry(rktp, 500,
|
|
2176
|
+
"failed to fetch offsets");
|
|
2177
|
+
rd_kafka_toppar_unlock(rktp);
|
|
2178
|
+
|
|
2179
|
+
|
|
2180
|
+
actions = rd_kafka_handle_OffsetFetch_err_action(
|
|
2181
|
+
NULL, rko->rko_err, NULL);
|
|
2182
|
+
/* Propagate error to application. Exclude
|
|
2183
|
+
* permanent errors that caused a coordinator
|
|
2184
|
+
* refresh like `NOT_COORDINATOR` */
|
|
2185
|
+
if (rko->rko_err != RD_KAFKA_RESP_ERR__WAIT_COORD &&
|
|
2186
|
+
rko->rko_err !=
|
|
2187
|
+
RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT &&
|
|
2188
|
+
!(actions & RD_KAFKA_ERR_ACTION_REFRESH))
|
|
2189
|
+
rd_kafka_consumer_err(
|
|
2190
|
+
rktp->rktp_fetchq, RD_KAFKA_NODEID_UA,
|
|
2191
|
+
rko->rko_err, 0, NULL, rktp,
|
|
2192
|
+
RD_KAFKA_OFFSET_INVALID,
|
|
2193
|
+
"Failed to fetch "
|
|
2194
|
+
"offsets from brokers: %s",
|
|
2195
|
+
rd_kafka_err2str(rko->rko_err));
|
|
2196
|
+
|
|
2197
|
+
/* Refcount from get_toppar() */
|
|
2198
|
+
rd_kafka_toppar_destroy(rktp);
|
|
2199
|
+
|
|
2200
|
+
break;
|
|
2201
|
+
}
|
|
2202
|
+
|
|
2203
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
|
|
2204
|
+
"%.*s [%" PRId32 "]: OffsetFetch returned %s",
|
|
2205
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
2206
|
+
rktp->rktp_partition, rd_kafka_fetch_pos2str(pos));
|
|
2207
|
+
|
|
2208
|
+
if (pos.offset > 0)
|
|
2209
|
+
rktp->rktp_committed_pos = pos;
|
|
2210
|
+
|
|
2211
|
+
if (pos.offset >= 0)
|
|
2212
|
+
rd_kafka_toppar_next_offset_handle(rktp, pos);
|
|
2213
|
+
else
|
|
2214
|
+
rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, pos,
|
|
2215
|
+
RD_KAFKA_RESP_ERR__NO_OFFSET,
|
|
2216
|
+
"no previously committed offset "
|
|
2217
|
+
"available");
|
|
2218
|
+
rd_kafka_toppar_unlock(rktp);
|
|
2219
|
+
|
|
2220
|
+
/* Refcount from get_toppar() */
|
|
2221
|
+
rd_kafka_toppar_destroy(rktp);
|
|
2222
|
+
} break;
|
|
2223
|
+
|
|
2224
|
+
default:
|
|
2225
|
+
rd_kafka_assert(NULL, !*"unknown type");
|
|
2226
|
+
break;
|
|
2227
|
+
}
|
|
2228
|
+
|
|
2229
|
+
rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
|
|
2230
|
+
|
|
2231
|
+
return RD_KAFKA_OP_RES_HANDLED;
|
|
2232
|
+
}
|
|
2233
|
+
|
|
2234
|
+
|
|
2235
|
+
|
|
2236
|
+
/**
|
|
2237
|
+
* Send command op to toppar (handled by toppar's thread).
|
|
2238
|
+
*
|
|
2239
|
+
* Locality: any thread
|
|
2240
|
+
*/
|
|
2241
|
+
static void rd_kafka_toppar_op0(rd_kafka_toppar_t *rktp,
|
|
2242
|
+
rd_kafka_op_t *rko,
|
|
2243
|
+
rd_kafka_replyq_t replyq) {
|
|
2244
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
2245
|
+
rko->rko_replyq = replyq;
|
|
2246
|
+
|
|
2247
|
+
rd_kafka_q_enq(rktp->rktp_ops, rko);
|
|
2248
|
+
}
|
|
2249
|
+
|
|
2250
|
+
|
|
2251
|
+
/**
|
|
2252
|
+
* Send command op to toppar (handled by toppar's thread).
|
|
2253
|
+
*
|
|
2254
|
+
* Locality: any thread
|
|
2255
|
+
*/
|
|
2256
|
+
static void rd_kafka_toppar_op(rd_kafka_toppar_t *rktp,
|
|
2257
|
+
rd_kafka_op_type_t type,
|
|
2258
|
+
int32_t version,
|
|
2259
|
+
rd_kafka_fetch_pos_t pos,
|
|
2260
|
+
rd_kafka_cgrp_t *rkcg,
|
|
2261
|
+
rd_kafka_replyq_t replyq) {
|
|
2262
|
+
rd_kafka_op_t *rko;
|
|
2263
|
+
|
|
2264
|
+
rko = rd_kafka_op_new(type);
|
|
2265
|
+
rko->rko_version = version;
|
|
2266
|
+
if (type == RD_KAFKA_OP_FETCH_START || type == RD_KAFKA_OP_SEEK) {
|
|
2267
|
+
if (rkcg)
|
|
2268
|
+
rko->rko_u.fetch_start.rkcg = rkcg;
|
|
2269
|
+
rko->rko_u.fetch_start.pos = pos;
|
|
2270
|
+
}
|
|
2271
|
+
|
|
2272
|
+
rd_kafka_toppar_op0(rktp, rko, replyq);
|
|
2273
|
+
}
|
|
2274
|
+
|
|
2275
|
+
|
|
2276
|
+
|
|
2277
|
+
/**
|
|
2278
|
+
* Start consuming partition (async operation).
|
|
2279
|
+
* 'offset' is the initial offset
|
|
2280
|
+
* 'fwdq' is an optional queue to forward messages to, if this is NULL
|
|
2281
|
+
* then messages will be enqueued on rktp_fetchq.
|
|
2282
|
+
* 'replyq' is an optional queue for handling the consume_start ack.
|
|
2283
|
+
*
|
|
2284
|
+
* This is the thread-safe interface that can be called from any thread.
|
|
2285
|
+
*/
|
|
2286
|
+
rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp,
|
|
2287
|
+
rd_kafka_fetch_pos_t pos,
|
|
2288
|
+
rd_kafka_q_t *fwdq,
|
|
2289
|
+
rd_kafka_replyq_t replyq) {
|
|
2290
|
+
int32_t version;
|
|
2291
|
+
|
|
2292
|
+
rd_kafka_q_lock(rktp->rktp_fetchq);
|
|
2293
|
+
if (fwdq && !(rktp->rktp_fetchq->rkq_flags & RD_KAFKA_Q_F_FWD_APP))
|
|
2294
|
+
rd_kafka_q_fwd_set0(rktp->rktp_fetchq, fwdq, 0, /* no do_lock */
|
|
2295
|
+
0 /* no fwd_app */);
|
|
2296
|
+
rd_kafka_q_unlock(rktp->rktp_fetchq);
|
|
2297
|
+
|
|
2298
|
+
/* Bump version barrier. */
|
|
2299
|
+
version = rd_kafka_toppar_version_new_barrier(rktp);
|
|
2300
|
+
|
|
2301
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
|
|
2302
|
+
"Start consuming %.*s [%" PRId32 "] at %s (v%" PRId32 ")",
|
|
2303
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
2304
|
+
rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
|
|
2305
|
+
version);
|
|
2306
|
+
|
|
2307
|
+
rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_START, version, pos,
|
|
2308
|
+
rktp->rktp_rkt->rkt_rk->rk_cgrp, replyq);
|
|
2309
|
+
|
|
2310
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
2311
|
+
}
|
|
2312
|
+
|
|
2313
|
+
|
|
2314
|
+
/**
|
|
2315
|
+
* Stop consuming partition (async operatoin)
|
|
2316
|
+
* This is thread-safe interface that can be called from any thread.
|
|
2317
|
+
*
|
|
2318
|
+
* Locality: any thread
|
|
2319
|
+
*/
|
|
2320
|
+
rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp,
|
|
2321
|
+
rd_kafka_replyq_t replyq) {
|
|
2322
|
+
int32_t version;
|
|
2323
|
+
|
|
2324
|
+
/* Bump version barrier. */
|
|
2325
|
+
version = rd_kafka_toppar_version_new_barrier(rktp);
|
|
2326
|
+
|
|
2327
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
|
|
2328
|
+
"Stop consuming %.*s [%" PRId32 "] (v%" PRId32 ")",
|
|
2329
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
2330
|
+
rktp->rktp_partition, version);
|
|
2331
|
+
|
|
2332
|
+
rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_STOP, version,
|
|
2333
|
+
RD_KAFKA_FETCH_POS(-1, -1), NULL, replyq);
|
|
2334
|
+
|
|
2335
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
2336
|
+
}
|
|
2337
|
+
|
|
2338
|
+
|
|
2339
|
+
/**
|
|
2340
|
+
* @brief Set/Seek offset of a consumed partition (async operation).
|
|
2341
|
+
*
|
|
2342
|
+
* @param offset is the target offset.
|
|
2343
|
+
* @param leader_epoch is the partition leader epoch, or -1.
|
|
2344
|
+
* @param replyq is an optional queue for handling the ack.
|
|
2345
|
+
*
|
|
2346
|
+
* This is the thread-safe interface that can be called from any thread.
|
|
2347
|
+
*/
|
|
2348
|
+
rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp,
|
|
2349
|
+
rd_kafka_fetch_pos_t pos,
|
|
2350
|
+
rd_kafka_replyq_t replyq) {
|
|
2351
|
+
int32_t version;
|
|
2352
|
+
|
|
2353
|
+
/* Bump version barrier. */
|
|
2354
|
+
version = rd_kafka_toppar_version_new_barrier(rktp);
|
|
2355
|
+
|
|
2356
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
|
|
2357
|
+
"Seek %.*s [%" PRId32 "] to %s (v%" PRId32 ")",
|
|
2358
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
2359
|
+
rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
|
|
2360
|
+
version);
|
|
2361
|
+
|
|
2362
|
+
rd_kafka_toppar_op(rktp, RD_KAFKA_OP_SEEK, version, pos, NULL, replyq);
|
|
2363
|
+
|
|
2364
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
2365
|
+
}
|
|
2366
|
+
|
|
2367
|
+
|
|
2368
|
+
/**
|
|
2369
|
+
* @brief Pause/resume partition (async operation).
|
|
2370
|
+
*
|
|
2371
|
+
* @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
|
|
2372
|
+
* depending on if the app paused or librdkafka.
|
|
2373
|
+
* @param pause is 1 for pausing or 0 for resuming.
|
|
2374
|
+
*
|
|
2375
|
+
* @locality any
|
|
2376
|
+
*/
|
|
2377
|
+
rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp,
|
|
2378
|
+
int pause,
|
|
2379
|
+
int flag,
|
|
2380
|
+
rd_kafka_replyq_t replyq) {
|
|
2381
|
+
int32_t version;
|
|
2382
|
+
rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE);
|
|
2383
|
+
|
|
2384
|
+
if (!pause) {
|
|
2385
|
+
/* If partitions isn't paused, avoid bumping its version,
|
|
2386
|
+
* as it'll result in resuming fetches from a stale
|
|
2387
|
+
* next_fetch_start */
|
|
2388
|
+
rd_bool_t is_paused = rd_false;
|
|
2389
|
+
rd_kafka_toppar_lock(rktp);
|
|
2390
|
+
is_paused = RD_KAFKA_TOPPAR_IS_PAUSED(rktp);
|
|
2391
|
+
rd_kafka_toppar_unlock(rktp);
|
|
2392
|
+
if (!is_paused) {
|
|
2393
|
+
rko->rko_replyq = replyq;
|
|
2394
|
+
rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
|
|
2395
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
2396
|
+
}
|
|
2397
|
+
}
|
|
2398
|
+
|
|
2399
|
+
/* Bump version barrier. */
|
|
2400
|
+
version = rd_kafka_toppar_version_new_barrier(rktp);
|
|
2401
|
+
|
|
2402
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, pause ? "PAUSE" : "RESUME",
|
|
2403
|
+
"%s %.*s [%" PRId32 "] (v%" PRId32 ")",
|
|
2404
|
+
pause ? "Pause" : "Resume",
|
|
2405
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
2406
|
+
rktp->rktp_partition, version);
|
|
2407
|
+
|
|
2408
|
+
rko->rko_version = version;
|
|
2409
|
+
rko->rko_u.pause.pause = pause;
|
|
2410
|
+
rko->rko_u.pause.flag = flag;
|
|
2411
|
+
|
|
2412
|
+
rd_kafka_toppar_op0(rktp, rko, replyq);
|
|
2413
|
+
|
|
2414
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
2415
|
+
}
|
|
2416
|
+
|
|
2417
|
+
|
|
2418
|
+
/**
|
|
2419
|
+
* @brief Pause a toppar (asynchronous).
|
|
2420
|
+
*
|
|
2421
|
+
* @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
|
|
2422
|
+
* depending on if the app paused or librdkafka.
|
|
2423
|
+
*
|
|
2424
|
+
* @locality any
|
|
2425
|
+
* @locks none needed
|
|
2426
|
+
*/
|
|
2427
|
+
void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag) {
|
|
2428
|
+
rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag,
|
|
2429
|
+
RD_KAFKA_NO_REPLYQ);
|
|
2430
|
+
}
|
|
2431
|
+
|
|
2432
|
+
/**
|
|
2433
|
+
* @brief Resume a toppar (asynchronous).
|
|
2434
|
+
*
|
|
2435
|
+
* @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
|
|
2436
|
+
* depending on if the app paused or librdkafka.
|
|
2437
|
+
*
|
|
2438
|
+
* @locality any
|
|
2439
|
+
* @locks none needed
|
|
2440
|
+
*/
|
|
2441
|
+
void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag) {
|
|
2442
|
+
rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag,
|
|
2443
|
+
RD_KAFKA_NO_REPLYQ);
|
|
2444
|
+
}
|
|
2445
|
+
|
|
2446
|
+
|
|
2447
|
+
|
|
2448
|
+
/**
|
|
2449
|
+
* @brief Pause or resume a list of partitions.
|
|
2450
|
+
*
|
|
2451
|
+
* @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
|
|
2452
|
+
* depending on if the app paused or librdkafka.
|
|
2453
|
+
* @param pause true for pausing, false for resuming.
|
|
2454
|
+
* @param async RD_SYNC to wait for background thread to handle op,
|
|
2455
|
+
* RD_ASYNC for asynchronous operation.
|
|
2456
|
+
*
|
|
2457
|
+
* @locality any
|
|
2458
|
+
*
|
|
2459
|
+
* @remark This is an asynchronous call, the actual pause/resume is performed
|
|
2460
|
+
* by toppar_pause() in the toppar's handler thread.
|
|
2461
|
+
*/
|
|
2462
|
+
rd_kafka_resp_err_t
|
|
2463
|
+
rd_kafka_toppars_pause_resume(rd_kafka_t *rk,
|
|
2464
|
+
rd_bool_t pause,
|
|
2465
|
+
rd_async_t async,
|
|
2466
|
+
int flag,
|
|
2467
|
+
rd_kafka_topic_partition_list_t *partitions) {
|
|
2468
|
+
int i;
|
|
2469
|
+
int waitcnt = 0;
|
|
2470
|
+
rd_kafka_q_t *tmpq = NULL;
|
|
2471
|
+
|
|
2472
|
+
if (!async)
|
|
2473
|
+
tmpq = rd_kafka_q_new(rk);
|
|
2474
|
+
|
|
2475
|
+
rd_kafka_dbg(
|
|
2476
|
+
rk, TOPIC, pause ? "PAUSE" : "RESUME", "%s %s %d partition(s)",
|
|
2477
|
+
flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "Application" : "Library",
|
|
2478
|
+
pause ? "pausing" : "resuming", partitions->cnt);
|
|
2479
|
+
|
|
2480
|
+
for (i = 0; i < partitions->cnt; i++) {
|
|
2481
|
+
rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
|
|
2482
|
+
rd_kafka_toppar_t *rktp;
|
|
2483
|
+
|
|
2484
|
+
rktp =
|
|
2485
|
+
rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
|
|
2486
|
+
if (!rktp) {
|
|
2487
|
+
rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE" : "RESUME",
|
|
2488
|
+
"%s %s [%" PRId32
|
|
2489
|
+
"]: skipped: "
|
|
2490
|
+
"unknown partition",
|
|
2491
|
+
pause ? "Pause" : "Resume", rktpar->topic,
|
|
2492
|
+
rktpar->partition);
|
|
2493
|
+
|
|
2494
|
+
rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
2495
|
+
continue;
|
|
2496
|
+
}
|
|
2497
|
+
|
|
2498
|
+
rd_kafka_toppar_op_pause_resume(rktp, pause, flag,
|
|
2499
|
+
RD_KAFKA_REPLYQ(tmpq, 0));
|
|
2500
|
+
|
|
2501
|
+
if (!async)
|
|
2502
|
+
waitcnt++;
|
|
2503
|
+
|
|
2504
|
+
rd_kafka_toppar_destroy(rktp);
|
|
2505
|
+
|
|
2506
|
+
rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
2507
|
+
}
|
|
2508
|
+
|
|
2509
|
+
if (!async) {
|
|
2510
|
+
while (waitcnt-- > 0)
|
|
2511
|
+
rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE);
|
|
2512
|
+
|
|
2513
|
+
rd_kafka_q_destroy_owner(tmpq);
|
|
2514
|
+
}
|
|
2515
|
+
|
|
2516
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
2517
|
+
}
|
|
2518
|
+
|
|
2519
|
+
|
|
2520
|
+
|
|
2521
|
+
/**
|
|
2522
|
+
* Propagate error for toppar
|
|
2523
|
+
*/
|
|
2524
|
+
void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp,
|
|
2525
|
+
rd_kafka_resp_err_t err,
|
|
2526
|
+
const char *reason) {
|
|
2527
|
+
rd_kafka_op_t *rko;
|
|
2528
|
+
char buf[512];
|
|
2529
|
+
|
|
2530
|
+
rko = rd_kafka_op_new(RD_KAFKA_OP_ERR);
|
|
2531
|
+
rko->rko_err = err;
|
|
2532
|
+
rko->rko_rktp = rd_kafka_toppar_keep(rktp);
|
|
2533
|
+
|
|
2534
|
+
rd_snprintf(buf, sizeof(buf), "%.*s [%" PRId32 "]: %s (%s)",
|
|
2535
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
2536
|
+
rktp->rktp_partition, reason, rd_kafka_err2str(err));
|
|
2537
|
+
|
|
2538
|
+
rko->rko_u.err.errstr = rd_strdup(buf);
|
|
2539
|
+
|
|
2540
|
+
rd_kafka_q_enq(rktp->rktp_fetchq, rko);
|
|
2541
|
+
}
|
|
2542
|
+
|
|
2543
|
+
|
|
2544
|
+
|
|
2545
|
+
/**
|
|
2546
|
+
* Returns the currently delegated broker for this toppar.
|
|
2547
|
+
* If \p proper_broker is set NULL will be returned if current handler
|
|
2548
|
+
* is not a proper broker (INTERNAL broker).
|
|
2549
|
+
*
|
|
2550
|
+
* The returned broker has an increased refcount.
|
|
2551
|
+
*
|
|
2552
|
+
* Locks: none
|
|
2553
|
+
*/
|
|
2554
|
+
rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp,
|
|
2555
|
+
int proper_broker) {
|
|
2556
|
+
rd_kafka_broker_t *rkb;
|
|
2557
|
+
rd_kafka_toppar_lock(rktp);
|
|
2558
|
+
rkb = rktp->rktp_broker;
|
|
2559
|
+
if (rkb) {
|
|
2560
|
+
if (proper_broker && rkb->rkb_source == RD_KAFKA_INTERNAL)
|
|
2561
|
+
rkb = NULL;
|
|
2562
|
+
else
|
|
2563
|
+
rd_kafka_broker_keep(rkb);
|
|
2564
|
+
}
|
|
2565
|
+
rd_kafka_toppar_unlock(rktp);
|
|
2566
|
+
|
|
2567
|
+
return rkb;
|
|
2568
|
+
}
|
|
2569
|
+
|
|
2570
|
+
|
|
2571
|
+
/**
|
|
2572
|
+
* @brief Take action when partition broker becomes unavailable.
|
|
2573
|
+
* This should be called when requests fail with
|
|
2574
|
+
* NOT_LEADER_FOR.. or similar error codes, e.g. ProduceRequest.
|
|
2575
|
+
*
|
|
2576
|
+
* @locks none
|
|
2577
|
+
* @locality any
|
|
2578
|
+
*/
|
|
2579
|
+
void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp,
|
|
2580
|
+
const char *reason,
|
|
2581
|
+
rd_kafka_resp_err_t err) {
|
|
2582
|
+
rd_kafka_topic_t *rkt = rktp->rktp_rkt;
|
|
2583
|
+
|
|
2584
|
+
rd_kafka_dbg(rkt->rkt_rk, TOPIC, "BROKERUA",
|
|
2585
|
+
"%s [%" PRId32 "]: broker unavailable: %s: %s",
|
|
2586
|
+
rkt->rkt_topic->str, rktp->rktp_partition, reason,
|
|
2587
|
+
rd_kafka_err2str(err));
|
|
2588
|
+
|
|
2589
|
+
rd_kafka_topic_wrlock(rkt);
|
|
2590
|
+
rkt->rkt_flags |= RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
|
|
2591
|
+
rd_kafka_topic_wrunlock(rkt);
|
|
2592
|
+
|
|
2593
|
+
rd_kafka_topic_fast_leader_query(rkt->rkt_rk,
|
|
2594
|
+
rd_false /* don't force */);
|
|
2595
|
+
}
|
|
2596
|
+
|
|
2597
|
+
|
|
2598
|
+
const char *
|
|
2599
|
+
rd_kafka_topic_partition_topic(const rd_kafka_topic_partition_t *rktpar) {
|
|
2600
|
+
const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
|
|
2601
|
+
return rktp->rktp_rkt->rkt_topic->str;
|
|
2602
|
+
}
|
|
2603
|
+
|
|
2604
|
+
int32_t
|
|
2605
|
+
rd_kafka_topic_partition_partition(const rd_kafka_topic_partition_t *rktpar) {
|
|
2606
|
+
const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
|
|
2607
|
+
return rktp->rktp_partition;
|
|
2608
|
+
}
|
|
2609
|
+
|
|
2610
|
+
void rd_kafka_topic_partition_get(const rd_kafka_topic_partition_t *rktpar,
|
|
2611
|
+
const char **name,
|
|
2612
|
+
int32_t *partition) {
|
|
2613
|
+
const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
|
|
2614
|
+
*name = rktp->rktp_rkt->rkt_topic->str;
|
|
2615
|
+
*partition = rktp->rktp_partition;
|
|
2616
|
+
}
|
|
2617
|
+
|
|
2618
|
+
|
|
2619
|
+
/**
|
|
2620
|
+
*
|
|
2621
|
+
* rd_kafka_topic_partition_t lists
|
|
2622
|
+
* Fixed-size non-growable list of partitions for propagation to application.
|
|
2623
|
+
*
|
|
2624
|
+
*/
|
|
2625
|
+
|
|
2626
|
+
|
|
2627
|
+
static void
|
|
2628
|
+
rd_kafka_topic_partition_list_grow(rd_kafka_topic_partition_list_t *rktparlist,
|
|
2629
|
+
int add_size) {
|
|
2630
|
+
if (add_size < rktparlist->size)
|
|
2631
|
+
add_size = RD_MAX(rktparlist->size, 32);
|
|
2632
|
+
|
|
2633
|
+
rktparlist->size += add_size;
|
|
2634
|
+
rktparlist->elems = rd_realloc(
|
|
2635
|
+
rktparlist->elems, sizeof(*rktparlist->elems) * rktparlist->size);
|
|
2636
|
+
}
|
|
2637
|
+
|
|
2638
|
+
|
|
2639
|
+
/**
|
|
2640
|
+
* @brief Initialize a list for fitting \p size partitions.
|
|
2641
|
+
*/
|
|
2642
|
+
void rd_kafka_topic_partition_list_init(
|
|
2643
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
2644
|
+
int size) {
|
|
2645
|
+
memset(rktparlist, 0, sizeof(*rktparlist));
|
|
2646
|
+
|
|
2647
|
+
if (size > 0)
|
|
2648
|
+
rd_kafka_topic_partition_list_grow(rktparlist, size);
|
|
2649
|
+
}
|
|
2650
|
+
|
|
2651
|
+
|
|
2652
|
+
/**
|
|
2653
|
+
* Create a list for fitting 'size' topic_partitions (rktp).
|
|
2654
|
+
*/
|
|
2655
|
+
rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size) {
|
|
2656
|
+
rd_kafka_topic_partition_list_t *rktparlist;
|
|
2657
|
+
|
|
2658
|
+
rktparlist = rd_calloc(1, sizeof(*rktparlist));
|
|
2659
|
+
|
|
2660
|
+
if (size > 0)
|
|
2661
|
+
rd_kafka_topic_partition_list_grow(rktparlist, size);
|
|
2662
|
+
|
|
2663
|
+
return rktparlist;
|
|
2664
|
+
}
|
|
2665
|
+
|
|
2666
|
+
rd_kafka_topic_partition_t *
|
|
2667
|
+
rd_kafka_topic_partition_new_with_topic_id(rd_kafka_Uuid_t topic_id,
|
|
2668
|
+
int32_t partition) {
|
|
2669
|
+
rd_kafka_topic_partition_private_t *parpriv;
|
|
2670
|
+
rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
|
|
2671
|
+
|
|
2672
|
+
rktpar->partition = partition;
|
|
2673
|
+
parpriv = rd_kafka_topic_partition_get_private(rktpar);
|
|
2674
|
+
parpriv->topic_id = topic_id;
|
|
2675
|
+
return rktpar;
|
|
2676
|
+
}
|
|
2677
|
+
|
|
2678
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic,
|
|
2679
|
+
int32_t partition) {
|
|
2680
|
+
rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
|
|
2681
|
+
|
|
2682
|
+
rktpar->topic = rd_strdup(topic);
|
|
2683
|
+
rktpar->partition = partition;
|
|
2684
|
+
|
|
2685
|
+
return rktpar;
|
|
2686
|
+
}
|
|
2687
|
+
|
|
2688
|
+
/**
|
|
2689
|
+
* @brief Update \p dst with info from \p src.
|
|
2690
|
+
*/
|
|
2691
|
+
static void
|
|
2692
|
+
rd_kafka_topic_partition_update(rd_kafka_topic_partition_t *dst,
|
|
2693
|
+
const rd_kafka_topic_partition_t *src) {
|
|
2694
|
+
const rd_kafka_topic_partition_private_t *srcpriv;
|
|
2695
|
+
rd_kafka_topic_partition_private_t *dstpriv;
|
|
2696
|
+
|
|
2697
|
+
rd_dassert(!strcmp(dst->topic, src->topic));
|
|
2698
|
+
rd_dassert(dst->partition == src->partition);
|
|
2699
|
+
rd_dassert(dst != src);
|
|
2700
|
+
|
|
2701
|
+
dst->offset = src->offset;
|
|
2702
|
+
dst->opaque = src->opaque;
|
|
2703
|
+
dst->err = src->err;
|
|
2704
|
+
|
|
2705
|
+
if (src->metadata_size > 0) {
|
|
2706
|
+
dst->metadata = rd_malloc(src->metadata_size);
|
|
2707
|
+
dst->metadata_size = src->metadata_size;
|
|
2708
|
+
;
|
|
2709
|
+
memcpy(dst->metadata, src->metadata, dst->metadata_size);
|
|
2710
|
+
}
|
|
2711
|
+
|
|
2712
|
+
if ((srcpriv = src->_private)) {
|
|
2713
|
+
dstpriv = rd_kafka_topic_partition_get_private(dst);
|
|
2714
|
+
if (srcpriv->rktp && !dstpriv->rktp)
|
|
2715
|
+
dstpriv->rktp = rd_kafka_toppar_keep(srcpriv->rktp);
|
|
2716
|
+
|
|
2717
|
+
rd_assert(dstpriv->rktp == srcpriv->rktp);
|
|
2718
|
+
|
|
2719
|
+
dstpriv->leader_epoch = srcpriv->leader_epoch;
|
|
2720
|
+
|
|
2721
|
+
dstpriv->current_leader_epoch = srcpriv->current_leader_epoch;
|
|
2722
|
+
|
|
2723
|
+
dstpriv->topic_id = srcpriv->topic_id;
|
|
2724
|
+
|
|
2725
|
+
} else if ((dstpriv = dst->_private)) {
|
|
2726
|
+
/* No private object in source, reset the fields. */
|
|
2727
|
+
dstpriv->leader_epoch = -1;
|
|
2728
|
+
dstpriv->current_leader_epoch = -1;
|
|
2729
|
+
dstpriv->topic_id = RD_KAFKA_UUID_ZERO;
|
|
2730
|
+
}
|
|
2731
|
+
}
|
|
2732
|
+
|
|
2733
|
+
|
|
2734
|
+
rd_kafka_topic_partition_t *
|
|
2735
|
+
rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src) {
|
|
2736
|
+
rd_kafka_topic_partition_t *dst =
|
|
2737
|
+
rd_kafka_topic_partition_new(src->topic, src->partition);
|
|
2738
|
+
|
|
2739
|
+
rd_kafka_topic_partition_update(dst, src);
|
|
2740
|
+
|
|
2741
|
+
return dst;
|
|
2742
|
+
}
|
|
2743
|
+
|
|
2744
|
+
|
|
2745
|
+
/** Same as above but with generic void* signature */
|
|
2746
|
+
void *rd_kafka_topic_partition_copy_void(const void *src) {
|
|
2747
|
+
return rd_kafka_topic_partition_copy(src);
|
|
2748
|
+
}
|
|
2749
|
+
|
|
2750
|
+
|
|
2751
|
+
rd_kafka_topic_partition_t *
|
|
2752
|
+
rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp) {
|
|
2753
|
+
rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
|
|
2754
|
+
|
|
2755
|
+
rktpar->topic = RD_KAFKAP_STR_DUP(rktp->rktp_rkt->rkt_topic);
|
|
2756
|
+
rktpar->partition = rktp->rktp_partition;
|
|
2757
|
+
|
|
2758
|
+
return rktpar;
|
|
2759
|
+
}
|
|
2760
|
+
|
|
2761
|
+
/**
|
|
2762
|
+
* @brief Destroy a partition private glue object.
|
|
2763
|
+
*/
|
|
2764
|
+
static void rd_kafka_topic_partition_private_destroy(
|
|
2765
|
+
rd_kafka_topic_partition_private_t *parpriv) {
|
|
2766
|
+
if (parpriv->rktp)
|
|
2767
|
+
rd_kafka_toppar_destroy(parpriv->rktp);
|
|
2768
|
+
rd_free(parpriv);
|
|
2769
|
+
}
|
|
2770
|
+
|
|
2771
|
+
static void
|
|
2772
|
+
rd_kafka_topic_partition_destroy0(rd_kafka_topic_partition_t *rktpar,
|
|
2773
|
+
int do_free) {
|
|
2774
|
+
if (rktpar->topic)
|
|
2775
|
+
rd_free(rktpar->topic);
|
|
2776
|
+
if (rktpar->metadata)
|
|
2777
|
+
rd_free(rktpar->metadata);
|
|
2778
|
+
if (rktpar->_private)
|
|
2779
|
+
rd_kafka_topic_partition_private_destroy(
|
|
2780
|
+
(rd_kafka_topic_partition_private_t *)rktpar->_private);
|
|
2781
|
+
|
|
2782
|
+
if (do_free)
|
|
2783
|
+
rd_free(rktpar);
|
|
2784
|
+
}
|
|
2785
|
+
|
|
2786
|
+
|
|
2787
|
+
int32_t rd_kafka_topic_partition_get_leader_epoch(
|
|
2788
|
+
const rd_kafka_topic_partition_t *rktpar) {
|
|
2789
|
+
const rd_kafka_topic_partition_private_t *parpriv;
|
|
2790
|
+
|
|
2791
|
+
if (!(parpriv = rktpar->_private))
|
|
2792
|
+
return -1;
|
|
2793
|
+
|
|
2794
|
+
return parpriv->leader_epoch;
|
|
2795
|
+
}
|
|
2796
|
+
|
|
2797
|
+
void rd_kafka_topic_partition_set_leader_epoch(
|
|
2798
|
+
rd_kafka_topic_partition_t *rktpar,
|
|
2799
|
+
int32_t leader_epoch) {
|
|
2800
|
+
rd_kafka_topic_partition_private_t *parpriv;
|
|
2801
|
+
|
|
2802
|
+
/* Avoid allocating private_t if clearing the epoch */
|
|
2803
|
+
if (leader_epoch == -1 && !rktpar->_private)
|
|
2804
|
+
return;
|
|
2805
|
+
|
|
2806
|
+
parpriv = rd_kafka_topic_partition_get_private(rktpar);
|
|
2807
|
+
|
|
2808
|
+
parpriv->leader_epoch = leader_epoch;
|
|
2809
|
+
}
|
|
2810
|
+
|
|
2811
|
+
int32_t rd_kafka_topic_partition_get_current_leader_epoch(
|
|
2812
|
+
const rd_kafka_topic_partition_t *rktpar) {
|
|
2813
|
+
const rd_kafka_topic_partition_private_t *parpriv;
|
|
2814
|
+
|
|
2815
|
+
if (!(parpriv = rktpar->_private))
|
|
2816
|
+
return -1;
|
|
2817
|
+
|
|
2818
|
+
return parpriv->current_leader_epoch;
|
|
2819
|
+
}
|
|
2820
|
+
|
|
2821
|
+
/**
|
|
2822
|
+
* @brief Sets topic id for partition \p rktpar.
|
|
2823
|
+
*
|
|
2824
|
+
* @param rktpar Topic partition.
|
|
2825
|
+
* @param topic_id Topic id to set.
|
|
2826
|
+
*/
|
|
2827
|
+
void rd_kafka_topic_partition_set_topic_id(rd_kafka_topic_partition_t *rktpar,
|
|
2828
|
+
rd_kafka_Uuid_t topic_id) {
|
|
2829
|
+
rd_kafka_topic_partition_private_t *parpriv;
|
|
2830
|
+
parpriv = rd_kafka_topic_partition_get_private(rktpar);
|
|
2831
|
+
parpriv->topic_id = topic_id;
|
|
2832
|
+
}
|
|
2833
|
+
|
|
2834
|
+
/**
|
|
2835
|
+
* @brief Gets topic id from topic-partition \p rktpar.
|
|
2836
|
+
*
|
|
2837
|
+
* @param rktpar Topic partition.
|
|
2838
|
+
* @return Topic id, or RD_KAFKA_UUID_ZERO.
|
|
2839
|
+
*/
|
|
2840
|
+
rd_kafka_Uuid_t rd_kafka_topic_partition_get_topic_id(
|
|
2841
|
+
const rd_kafka_topic_partition_t *rktpar) {
|
|
2842
|
+
const rd_kafka_topic_partition_private_t *parpriv;
|
|
2843
|
+
|
|
2844
|
+
if (!(parpriv = rktpar->_private))
|
|
2845
|
+
return RD_KAFKA_UUID_ZERO;
|
|
2846
|
+
|
|
2847
|
+
return parpriv->topic_id;
|
|
2848
|
+
}
|
|
2849
|
+
|
|
2850
|
+
void rd_kafka_topic_partition_set_current_leader_epoch(
|
|
2851
|
+
rd_kafka_topic_partition_t *rktpar,
|
|
2852
|
+
int32_t current_leader_epoch) {
|
|
2853
|
+
rd_kafka_topic_partition_private_t *parpriv;
|
|
2854
|
+
|
|
2855
|
+
/* Avoid allocating private_t if clearing the epoch */
|
|
2856
|
+
if (current_leader_epoch == -1 && !rktpar->_private)
|
|
2857
|
+
return;
|
|
2858
|
+
|
|
2859
|
+
parpriv = rd_kafka_topic_partition_get_private(rktpar);
|
|
2860
|
+
|
|
2861
|
+
parpriv->current_leader_epoch = current_leader_epoch;
|
|
2862
|
+
}
|
|
2863
|
+
|
|
2864
|
+
/**
|
|
2865
|
+
* @brief Set offset and leader epoch from a fetchpos.
|
|
2866
|
+
*/
|
|
2867
|
+
void rd_kafka_topic_partition_set_from_fetch_pos(
|
|
2868
|
+
rd_kafka_topic_partition_t *rktpar,
|
|
2869
|
+
const rd_kafka_fetch_pos_t fetchpos) {
|
|
2870
|
+
rktpar->offset = fetchpos.offset;
|
|
2871
|
+
rd_kafka_topic_partition_set_leader_epoch(rktpar,
|
|
2872
|
+
fetchpos.leader_epoch);
|
|
2873
|
+
}
|
|
2874
|
+
|
|
2875
|
+
/**
|
|
2876
|
+
* @brief Set partition metadata from rktp stored one.
|
|
2877
|
+
*/
|
|
2878
|
+
void rd_kafka_topic_partition_set_metadata_from_rktp_stored(
|
|
2879
|
+
rd_kafka_topic_partition_t *rktpar,
|
|
2880
|
+
const rd_kafka_toppar_t *rktp) {
|
|
2881
|
+
rktpar->metadata_size = rktp->rktp_stored_metadata_size;
|
|
2882
|
+
if (rktp->rktp_stored_metadata) {
|
|
2883
|
+
rktpar->metadata = rd_malloc(rktp->rktp_stored_metadata_size);
|
|
2884
|
+
memcpy(rktpar->metadata, rktp->rktp_stored_metadata,
|
|
2885
|
+
rktpar->metadata_size);
|
|
2886
|
+
}
|
|
2887
|
+
}
|
|
2888
|
+
|
|
2889
|
+
|
|
2890
|
+
/**
|
|
2891
|
+
* @brief Destroy all partitions in list.
|
|
2892
|
+
*
|
|
2893
|
+
* @remark The allocated size of the list will not shrink.
|
|
2894
|
+
*/
|
|
2895
|
+
void rd_kafka_topic_partition_list_clear(
|
|
2896
|
+
rd_kafka_topic_partition_list_t *rktparlist) {
|
|
2897
|
+
int i;
|
|
2898
|
+
|
|
2899
|
+
for (i = 0; i < rktparlist->cnt; i++)
|
|
2900
|
+
rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0);
|
|
2901
|
+
|
|
2902
|
+
rktparlist->cnt = 0;
|
|
2903
|
+
}
|
|
2904
|
+
|
|
2905
|
+
|
|
2906
|
+
void rd_kafka_topic_partition_destroy_free(void *ptr) {
|
|
2907
|
+
rd_kafka_topic_partition_destroy0(ptr, rd_true /*do_free*/);
|
|
2908
|
+
}
|
|
2909
|
+
|
|
2910
|
+
void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar) {
|
|
2911
|
+
rd_kafka_topic_partition_destroy0(rktpar, 1);
|
|
2912
|
+
}
|
|
2913
|
+
|
|
2914
|
+
|
|
2915
|
+
/**
|
|
2916
|
+
* Destroys a list previously created with .._list_new() and drops
|
|
2917
|
+
* any references to contained toppars.
|
|
2918
|
+
*/
|
|
2919
|
+
void rd_kafka_topic_partition_list_destroy(
|
|
2920
|
+
rd_kafka_topic_partition_list_t *rktparlist) {
|
|
2921
|
+
int i;
|
|
2922
|
+
|
|
2923
|
+
for (i = 0; i < rktparlist->cnt; i++)
|
|
2924
|
+
rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0);
|
|
2925
|
+
|
|
2926
|
+
if (rktparlist->elems)
|
|
2927
|
+
rd_free(rktparlist->elems);
|
|
2928
|
+
|
|
2929
|
+
rd_free(rktparlist);
|
|
2930
|
+
}
|
|
2931
|
+
|
|
2932
|
+
|
|
2933
|
+
/**
|
|
2934
|
+
* @brief Wrapper for rd_kafka_topic_partition_list_destroy() that
|
|
2935
|
+
* matches the standard free(void *) signature, for callback use.
|
|
2936
|
+
*/
|
|
2937
|
+
void rd_kafka_topic_partition_list_destroy_free(void *ptr) {
|
|
2938
|
+
rd_kafka_topic_partition_list_destroy(
|
|
2939
|
+
(rd_kafka_topic_partition_list_t *)ptr);
|
|
2940
|
+
}
|
|
2941
|
+
|
|
2942
|
+
/**
|
|
2943
|
+
* @brief Add a partition to an rktpar list.
|
|
2944
|
+
* The list must have enough room to fit it.
|
|
2945
|
+
*
|
|
2946
|
+
* @param rktp Optional partition object that will be stored on the
|
|
2947
|
+
* ._private object (with refcount increased).
|
|
2948
|
+
*
|
|
2949
|
+
* @returns a pointer to the added element.
|
|
2950
|
+
*/
|
|
2951
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0(
|
|
2952
|
+
const char *func,
|
|
2953
|
+
int line,
|
|
2954
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
2955
|
+
const char *topic,
|
|
2956
|
+
int32_t partition,
|
|
2957
|
+
rd_kafka_toppar_t *rktp,
|
|
2958
|
+
const rd_kafka_topic_partition_private_t *parpriv) {
|
|
2959
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
2960
|
+
if (rktparlist->cnt == rktparlist->size)
|
|
2961
|
+
rd_kafka_topic_partition_list_grow(rktparlist, 1);
|
|
2962
|
+
rd_kafka_assert(NULL, rktparlist->cnt < rktparlist->size);
|
|
2963
|
+
|
|
2964
|
+
rktpar = &rktparlist->elems[rktparlist->cnt++];
|
|
2965
|
+
memset(rktpar, 0, sizeof(*rktpar));
|
|
2966
|
+
if (topic)
|
|
2967
|
+
rktpar->topic = rd_strdup(topic);
|
|
2968
|
+
rktpar->partition = partition;
|
|
2969
|
+
rktpar->offset = RD_KAFKA_OFFSET_INVALID;
|
|
2970
|
+
|
|
2971
|
+
if (parpriv) {
|
|
2972
|
+
rd_kafka_topic_partition_private_t *parpriv_copy =
|
|
2973
|
+
rd_kafka_topic_partition_get_private(rktpar);
|
|
2974
|
+
if (parpriv->rktp) {
|
|
2975
|
+
parpriv_copy->rktp =
|
|
2976
|
+
rd_kafka_toppar_keep_fl(func, line, parpriv->rktp);
|
|
2977
|
+
}
|
|
2978
|
+
parpriv_copy->leader_epoch = parpriv->leader_epoch;
|
|
2979
|
+
parpriv_copy->current_leader_epoch =
|
|
2980
|
+
parpriv->current_leader_epoch;
|
|
2981
|
+
parpriv_copy->topic_id = parpriv->topic_id;
|
|
2982
|
+
} else if (rktp) {
|
|
2983
|
+
rd_kafka_topic_partition_private_t *parpriv_copy =
|
|
2984
|
+
rd_kafka_topic_partition_get_private(rktpar);
|
|
2985
|
+
parpriv_copy->rktp = rd_kafka_toppar_keep_fl(func, line, rktp);
|
|
2986
|
+
}
|
|
2987
|
+
|
|
2988
|
+
return rktpar;
|
|
2989
|
+
}
|
|
2990
|
+
|
|
2991
|
+
|
|
2992
|
+
rd_kafka_topic_partition_t *
|
|
2993
|
+
rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist,
|
|
2994
|
+
const char *topic,
|
|
2995
|
+
int32_t partition) {
|
|
2996
|
+
return rd_kafka_topic_partition_list_add0(
|
|
2997
|
+
__FUNCTION__, __LINE__, rktparlist, topic, partition, NULL, NULL);
|
|
2998
|
+
}
|
|
2999
|
+
|
|
3000
|
+
|
|
3001
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_with_topic_id(
|
|
3002
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3003
|
+
rd_kafka_Uuid_t topic_id,
|
|
3004
|
+
int32_t partition) {
|
|
3005
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
3006
|
+
rktpar = rd_kafka_topic_partition_list_add0(
|
|
3007
|
+
__FUNCTION__, __LINE__, rktparlist, NULL, partition, NULL, NULL);
|
|
3008
|
+
rd_kafka_topic_partition_private_t *parpriv =
|
|
3009
|
+
rd_kafka_topic_partition_get_private(rktpar);
|
|
3010
|
+
parpriv->topic_id = topic_id;
|
|
3011
|
+
return rktpar;
|
|
3012
|
+
}
|
|
3013
|
+
|
|
3014
|
+
|
|
3015
|
+
rd_kafka_topic_partition_t *
|
|
3016
|
+
rd_kafka_topic_partition_list_add_with_topic_name_and_id(
|
|
3017
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3018
|
+
rd_kafka_Uuid_t topic_id,
|
|
3019
|
+
const char *topic,
|
|
3020
|
+
int32_t partition) {
|
|
3021
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
3022
|
+
rktpar = rd_kafka_topic_partition_list_add0(
|
|
3023
|
+
__FUNCTION__, __LINE__, rktparlist, topic, partition, NULL, NULL);
|
|
3024
|
+
rd_kafka_topic_partition_private_t *parpriv =
|
|
3025
|
+
rd_kafka_topic_partition_get_private(rktpar);
|
|
3026
|
+
parpriv->topic_id = topic_id;
|
|
3027
|
+
return rktpar;
|
|
3028
|
+
}
|
|
3029
|
+
|
|
3030
|
+
|
|
3031
|
+
/**
|
|
3032
|
+
* Adds a consecutive list of partitions to a list
|
|
3033
|
+
*/
|
|
3034
|
+
void rd_kafka_topic_partition_list_add_range(
|
|
3035
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3036
|
+
const char *topic,
|
|
3037
|
+
int32_t start,
|
|
3038
|
+
int32_t stop) {
|
|
3039
|
+
|
|
3040
|
+
for (; start <= stop; start++)
|
|
3041
|
+
rd_kafka_topic_partition_list_add(rktparlist, topic, start);
|
|
3042
|
+
}
|
|
3043
|
+
|
|
3044
|
+
|
|
3045
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert(
|
|
3046
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3047
|
+
const char *topic,
|
|
3048
|
+
int32_t partition) {
|
|
3049
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
3050
|
+
|
|
3051
|
+
if ((rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic,
|
|
3052
|
+
partition)))
|
|
3053
|
+
return rktpar;
|
|
3054
|
+
|
|
3055
|
+
return rd_kafka_topic_partition_list_add(rktparlist, topic, partition);
|
|
3056
|
+
}
|
|
3057
|
+
|
|
3058
|
+
|
|
3059
|
+
|
|
3060
|
+
/**
|
|
3061
|
+
* @brief Creates a copy of \p rktpar and adds it to \p rktparlist
|
|
3062
|
+
*
|
|
3063
|
+
* @return Copy of passed partition that was added to the list
|
|
3064
|
+
*
|
|
3065
|
+
* @remark Ownership of returned partition remains of the list.
|
|
3066
|
+
*/
|
|
3067
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add_copy(
|
|
3068
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3069
|
+
const rd_kafka_topic_partition_t *rktpar) {
|
|
3070
|
+
rd_kafka_topic_partition_t *dst;
|
|
3071
|
+
|
|
3072
|
+
dst = rd_kafka_topic_partition_list_add0(
|
|
3073
|
+
__FUNCTION__, __LINE__, rktparlist, rktpar->topic,
|
|
3074
|
+
rktpar->partition, NULL, rktpar->_private);
|
|
3075
|
+
rd_kafka_topic_partition_update(dst, rktpar);
|
|
3076
|
+
return dst;
|
|
3077
|
+
}
|
|
3078
|
+
|
|
3079
|
+
|
|
3080
|
+
|
|
3081
|
+
/**
|
|
3082
|
+
* Create and return a copy of list 'src'
|
|
3083
|
+
*/
|
|
3084
|
+
rd_kafka_topic_partition_list_t *
|
|
3085
|
+
rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src) {
|
|
3086
|
+
rd_kafka_topic_partition_list_t *dst;
|
|
3087
|
+
int i;
|
|
3088
|
+
|
|
3089
|
+
dst = rd_kafka_topic_partition_list_new(src->size);
|
|
3090
|
+
|
|
3091
|
+
for (i = 0; i < src->cnt; i++)
|
|
3092
|
+
rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]);
|
|
3093
|
+
return dst;
|
|
3094
|
+
}
|
|
3095
|
+
|
|
3096
|
+
/**
|
|
3097
|
+
* @brief Same as rd_kafka_topic_partition_list_copy() but suitable for
|
|
3098
|
+
* rd_list_copy(). The \p opaque is ignored.
|
|
3099
|
+
*/
|
|
3100
|
+
void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque) {
|
|
3101
|
+
return rd_kafka_topic_partition_list_copy(src);
|
|
3102
|
+
}
|
|
3103
|
+
|
|
3104
|
+
/**
|
|
3105
|
+
* @brief Append copies of all elements in \p src to \p dst.
|
|
3106
|
+
* No duplicate-checks are performed.
|
|
3107
|
+
*/
|
|
3108
|
+
void rd_kafka_topic_partition_list_add_list(
|
|
3109
|
+
rd_kafka_topic_partition_list_t *dst,
|
|
3110
|
+
const rd_kafka_topic_partition_list_t *src) {
|
|
3111
|
+
int i;
|
|
3112
|
+
|
|
3113
|
+
if (src->cnt == 0)
|
|
3114
|
+
return;
|
|
3115
|
+
|
|
3116
|
+
if (dst->size < dst->cnt + src->cnt)
|
|
3117
|
+
rd_kafka_topic_partition_list_grow(dst, src->cnt);
|
|
3118
|
+
|
|
3119
|
+
for (i = 0; i < src->cnt; i++)
|
|
3120
|
+
rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]);
|
|
3121
|
+
}
|
|
3122
|
+
|
|
3123
|
+
|
|
3124
|
+
/**
|
|
3125
|
+
* @brief Compare two partition lists using partition comparator \p cmp.
|
|
3126
|
+
*
|
|
3127
|
+
* @warning This is an O(Na*Nb) operation.
|
|
3128
|
+
*/
|
|
3129
|
+
int rd_kafka_topic_partition_list_cmp(const void *_a,
|
|
3130
|
+
const void *_b,
|
|
3131
|
+
int (*cmp)(const void *, const void *)) {
|
|
3132
|
+
const rd_kafka_topic_partition_list_t *a = _a, *b = _b;
|
|
3133
|
+
int r;
|
|
3134
|
+
int i;
|
|
3135
|
+
|
|
3136
|
+
r = a->cnt - b->cnt;
|
|
3137
|
+
if (r || a->cnt == 0)
|
|
3138
|
+
return r;
|
|
3139
|
+
|
|
3140
|
+
/* Since the lists may not be sorted we need to scan all of B
|
|
3141
|
+
* for each element in A.
|
|
3142
|
+
* FIXME: If the list sizes are larger than X we could create a
|
|
3143
|
+
* temporary hash map instead. */
|
|
3144
|
+
for (i = 0; i < a->cnt; i++) {
|
|
3145
|
+
int j;
|
|
3146
|
+
|
|
3147
|
+
for (j = 0; j < b->cnt; j++) {
|
|
3148
|
+
r = cmp(&a->elems[i], &b->elems[j]);
|
|
3149
|
+
if (!r)
|
|
3150
|
+
break;
|
|
3151
|
+
}
|
|
3152
|
+
|
|
3153
|
+
if (j == b->cnt)
|
|
3154
|
+
return 1;
|
|
3155
|
+
}
|
|
3156
|
+
|
|
3157
|
+
return 0;
|
|
3158
|
+
}
|
|
3159
|
+
|
|
3160
|
+
|
|
3161
|
+
/**
|
|
3162
|
+
* @brief Ensures the \p rktpar has a toppar set in _private.
|
|
3163
|
+
*
|
|
3164
|
+
* @returns the toppar object (or possibly NULL if \p create_on_miss is true)
|
|
3165
|
+
* WITHOUT refcnt increased.
|
|
3166
|
+
*/
|
|
3167
|
+
rd_kafka_toppar_t *
|
|
3168
|
+
rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk,
|
|
3169
|
+
rd_kafka_topic_partition_t *rktpar,
|
|
3170
|
+
rd_bool_t create_on_miss) {
|
|
3171
|
+
rd_kafka_topic_partition_private_t *parpriv;
|
|
3172
|
+
|
|
3173
|
+
parpriv = rd_kafka_topic_partition_get_private(rktpar);
|
|
3174
|
+
|
|
3175
|
+
if (!parpriv->rktp)
|
|
3176
|
+
parpriv->rktp = rd_kafka_toppar_get2(
|
|
3177
|
+
rk, rktpar->topic, rktpar->partition,
|
|
3178
|
+
0 /* not ua on miss */, create_on_miss);
|
|
3179
|
+
|
|
3180
|
+
return parpriv->rktp;
|
|
3181
|
+
}
|
|
3182
|
+
|
|
3183
|
+
|
|
3184
|
+
int rd_kafka_topic_partition_cmp(const void *_a, const void *_b) {
|
|
3185
|
+
const rd_kafka_topic_partition_t *a = _a;
|
|
3186
|
+
const rd_kafka_topic_partition_t *b = _b;
|
|
3187
|
+
int r = strcmp(a->topic, b->topic);
|
|
3188
|
+
if (r)
|
|
3189
|
+
return r;
|
|
3190
|
+
else
|
|
3191
|
+
return RD_CMP(a->partition, b->partition);
|
|
3192
|
+
}
|
|
3193
|
+
|
|
3194
|
+
/**
|
|
3195
|
+
* @brief Compare topic partitions \p a and \p b by topic id first
|
|
3196
|
+
* and then by partition.
|
|
3197
|
+
*/
|
|
3198
|
+
int rd_kafka_topic_partition_by_id_cmp(const void *_a, const void *_b) {
|
|
3199
|
+
const rd_kafka_topic_partition_t *a = _a;
|
|
3200
|
+
const rd_kafka_topic_partition_t *b = _b;
|
|
3201
|
+
rd_kafka_Uuid_t topic_id_a = rd_kafka_topic_partition_get_topic_id(a);
|
|
3202
|
+
rd_kafka_Uuid_t topic_id_b = rd_kafka_topic_partition_get_topic_id(b);
|
|
3203
|
+
int are_topic_ids_different = rd_kafka_Uuid_cmp(topic_id_a, topic_id_b);
|
|
3204
|
+
return are_topic_ids_different || RD_CMP(a->partition, b->partition);
|
|
3205
|
+
}
|
|
3206
|
+
|
|
3207
|
+
static int rd_kafka_topic_partition_by_id_cmp_opaque(const void *_a,
|
|
3208
|
+
const void *_b,
|
|
3209
|
+
void *opaque) {
|
|
3210
|
+
return rd_kafka_topic_partition_by_id_cmp(_a, _b);
|
|
3211
|
+
}
|
|
3212
|
+
|
|
3213
|
+
/** @brief Compare only the topic */
|
|
3214
|
+
int rd_kafka_topic_partition_cmp_topic(const void *_a, const void *_b) {
|
|
3215
|
+
const rd_kafka_topic_partition_t *a = _a;
|
|
3216
|
+
const rd_kafka_topic_partition_t *b = _b;
|
|
3217
|
+
return strcmp(a->topic, b->topic);
|
|
3218
|
+
}
|
|
3219
|
+
|
|
3220
|
+
/** @brief Compare only the topic id */
|
|
3221
|
+
int rd_kafka_topic_partition_cmp_topic_id(const void *_a, const void *_b) {
|
|
3222
|
+
const rd_kafka_topic_partition_t *a = _a;
|
|
3223
|
+
const rd_kafka_topic_partition_t *b = _b;
|
|
3224
|
+
return rd_kafka_Uuid_cmp(rd_kafka_topic_partition_get_topic_id(a),
|
|
3225
|
+
rd_kafka_topic_partition_get_topic_id(b));
|
|
3226
|
+
}
|
|
3227
|
+
|
|
3228
|
+
static int rd_kafka_topic_partition_cmp_opaque(const void *_a,
|
|
3229
|
+
const void *_b,
|
|
3230
|
+
void *opaque) {
|
|
3231
|
+
return rd_kafka_topic_partition_cmp(_a, _b);
|
|
3232
|
+
}
|
|
3233
|
+
|
|
3234
|
+
/** @returns a hash of the topic name and partition */
|
|
3235
|
+
unsigned int rd_kafka_topic_partition_hash(const void *_a) {
|
|
3236
|
+
const rd_kafka_topic_partition_t *a = _a;
|
|
3237
|
+
int r = 31 * 17 + a->partition;
|
|
3238
|
+
return 31 * r + rd_string_hash(a->topic, -1);
|
|
3239
|
+
}
|
|
3240
|
+
|
|
3241
|
+
/** @returns a hash of the topic id and partition */
|
|
3242
|
+
unsigned int rd_kafka_topic_partition_hash_by_id(const void *_a) {
|
|
3243
|
+
const rd_kafka_topic_partition_t *a = _a;
|
|
3244
|
+
const rd_kafka_Uuid_t topic_id =
|
|
3245
|
+
rd_kafka_topic_partition_get_topic_id(a);
|
|
3246
|
+
int r = 31 * 17 + a->partition;
|
|
3247
|
+
return 31 * r + rd_kafka_Uuid_hash(&topic_id);
|
|
3248
|
+
}
|
|
3249
|
+
|
|
3250
|
+
|
|
3251
|
+
|
|
3252
|
+
/**
|
|
3253
|
+
* @brief Search 'rktparlist' for 'topic' and 'partition'.
|
|
3254
|
+
* @returns the elems[] index or -1 on miss.
|
|
3255
|
+
*/
|
|
3256
|
+
static int rd_kafka_topic_partition_list_find0(
|
|
3257
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3258
|
+
const char *topic,
|
|
3259
|
+
int32_t partition,
|
|
3260
|
+
int (*cmp)(const void *, const void *)) {
|
|
3261
|
+
rd_kafka_topic_partition_t skel;
|
|
3262
|
+
int i;
|
|
3263
|
+
|
|
3264
|
+
skel.topic = (char *)topic;
|
|
3265
|
+
skel.partition = partition;
|
|
3266
|
+
|
|
3267
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
3268
|
+
if (!cmp(&skel, &rktparlist->elems[i]))
|
|
3269
|
+
return i;
|
|
3270
|
+
}
|
|
3271
|
+
|
|
3272
|
+
return -1;
|
|
3273
|
+
}
|
|
3274
|
+
|
|
3275
|
+
/**
|
|
3276
|
+
* @brief Search 'rktparlist' for \p topic_id and \p partition with comparator
|
|
3277
|
+
* \p cmp.
|
|
3278
|
+
* @returns the elems[] index or -1 on miss.
|
|
3279
|
+
*/
|
|
3280
|
+
static int rd_kafka_topic_partition_list_find_by_id0(
|
|
3281
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3282
|
+
rd_kafka_Uuid_t topic_id,
|
|
3283
|
+
int32_t partition,
|
|
3284
|
+
int (*cmp)(const void *, const void *)) {
|
|
3285
|
+
int i, ret = -1;
|
|
3286
|
+
rd_kafka_topic_partition_t *rktpar =
|
|
3287
|
+
rd_kafka_topic_partition_new_with_topic_id(topic_id, partition);
|
|
3288
|
+
|
|
3289
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
3290
|
+
if (!cmp(rktpar, &rktparlist->elems[i])) {
|
|
3291
|
+
ret = i;
|
|
3292
|
+
break;
|
|
3293
|
+
}
|
|
3294
|
+
}
|
|
3295
|
+
|
|
3296
|
+
rd_kafka_topic_partition_destroy(rktpar);
|
|
3297
|
+
return ret;
|
|
3298
|
+
}
|
|
3299
|
+
|
|
3300
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(
|
|
3301
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3302
|
+
const char *topic,
|
|
3303
|
+
int32_t partition) {
|
|
3304
|
+
int i = rd_kafka_topic_partition_list_find0(
|
|
3305
|
+
rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
|
|
3306
|
+
if (i == -1)
|
|
3307
|
+
return NULL;
|
|
3308
|
+
else
|
|
3309
|
+
return &rktparlist->elems[i];
|
|
3310
|
+
}
|
|
3311
|
+
|
|
3312
|
+
/**
|
|
3313
|
+
* @brief Search 'rktparlist' for 'topic_id' and 'partition'.
|
|
3314
|
+
* @returns Found topic partition or NULL.
|
|
3315
|
+
*/
|
|
3316
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_by_id(
|
|
3317
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3318
|
+
rd_kafka_Uuid_t topic_id,
|
|
3319
|
+
int32_t partition) {
|
|
3320
|
+
int i = rd_kafka_topic_partition_list_find_by_id0(
|
|
3321
|
+
rktparlist, topic_id, partition,
|
|
3322
|
+
rd_kafka_topic_partition_by_id_cmp);
|
|
3323
|
+
if (i == -1)
|
|
3324
|
+
return NULL;
|
|
3325
|
+
else
|
|
3326
|
+
return &rktparlist->elems[i];
|
|
3327
|
+
}
|
|
3328
|
+
|
|
3329
|
+
int rd_kafka_topic_partition_list_find_idx(
|
|
3330
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3331
|
+
const char *topic,
|
|
3332
|
+
int32_t partition) {
|
|
3333
|
+
return rd_kafka_topic_partition_list_find0(
|
|
3334
|
+
rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
|
|
3335
|
+
}
|
|
3336
|
+
|
|
3337
|
+
/**
|
|
3338
|
+
* @brief Search 'rktparlist' for \p topic_id and \p partition.
|
|
3339
|
+
* @returns the elems[] index or -1 on miss.
|
|
3340
|
+
*/
|
|
3341
|
+
int rd_kafka_topic_partition_list_find_idx_by_id(
|
|
3342
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3343
|
+
rd_kafka_Uuid_t topic_id,
|
|
3344
|
+
int32_t partition) {
|
|
3345
|
+
return rd_kafka_topic_partition_list_find_by_id0(
|
|
3346
|
+
rktparlist, topic_id, partition,
|
|
3347
|
+
rd_kafka_topic_partition_by_id_cmp);
|
|
3348
|
+
}
|
|
3349
|
+
|
|
3350
|
+
|
|
3351
|
+
/**
|
|
3352
|
+
* @returns the first element that matches \p topic, regardless of partition.
|
|
3353
|
+
*/
|
|
3354
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_name(
|
|
3355
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3356
|
+
const char *topic) {
|
|
3357
|
+
int i = rd_kafka_topic_partition_list_find0(
|
|
3358
|
+
rktparlist, topic, RD_KAFKA_PARTITION_UA,
|
|
3359
|
+
rd_kafka_topic_partition_cmp_topic);
|
|
3360
|
+
if (i == -1)
|
|
3361
|
+
return NULL;
|
|
3362
|
+
else
|
|
3363
|
+
return &rktparlist->elems[i];
|
|
3364
|
+
}
|
|
3365
|
+
|
|
3366
|
+
/**
|
|
3367
|
+
* @returns the first element that matches \p topic_id, regardless of partition.
|
|
3368
|
+
*/
|
|
3369
|
+
rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic_by_id(
|
|
3370
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3371
|
+
const rd_kafka_Uuid_t topic_id) {
|
|
3372
|
+
int i = rd_kafka_topic_partition_list_find_by_id0(
|
|
3373
|
+
rktparlist, topic_id, RD_KAFKA_PARTITION_UA,
|
|
3374
|
+
rd_kafka_topic_partition_cmp_topic_id);
|
|
3375
|
+
if (i == -1)
|
|
3376
|
+
return NULL;
|
|
3377
|
+
else
|
|
3378
|
+
return &rktparlist->elems[i];
|
|
3379
|
+
}
|
|
3380
|
+
|
|
3381
|
+
|
|
3382
|
+
int rd_kafka_topic_partition_list_del_by_idx(
|
|
3383
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3384
|
+
int idx) {
|
|
3385
|
+
if (unlikely(idx < 0 || idx >= rktparlist->cnt))
|
|
3386
|
+
return 0;
|
|
3387
|
+
|
|
3388
|
+
rd_kafka_topic_partition_destroy0(&rktparlist->elems[idx], 0);
|
|
3389
|
+
memmove(&rktparlist->elems[idx], &rktparlist->elems[idx + 1],
|
|
3390
|
+
(rktparlist->cnt - idx - 1) * sizeof(rktparlist->elems[idx]));
|
|
3391
|
+
rktparlist->cnt--;
|
|
3392
|
+
|
|
3393
|
+
return 1;
|
|
3394
|
+
}
|
|
3395
|
+
|
|
3396
|
+
|
|
3397
|
+
int rd_kafka_topic_partition_list_del(
|
|
3398
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3399
|
+
const char *topic,
|
|
3400
|
+
int32_t partition) {
|
|
3401
|
+
int i = rd_kafka_topic_partition_list_find0(
|
|
3402
|
+
rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
|
|
3403
|
+
if (i == -1)
|
|
3404
|
+
return 0;
|
|
3405
|
+
|
|
3406
|
+
return rd_kafka_topic_partition_list_del_by_idx(rktparlist, i);
|
|
3407
|
+
}
|
|
3408
|
+
|
|
3409
|
+
|
|
3410
|
+
|
|
3411
|
+
/**
|
|
3412
|
+
* Returns true if 'topic' matches the 'rktpar', else false.
|
|
3413
|
+
* On match, if rktpar is a regex pattern then 'matched_by_regex' is set to 1.
|
|
3414
|
+
*/
|
|
3415
|
+
int rd_kafka_topic_partition_match(rd_kafka_t *rk,
|
|
3416
|
+
const rd_kafka_group_member_t *rkgm,
|
|
3417
|
+
const rd_kafka_topic_partition_t *rktpar,
|
|
3418
|
+
const char *topic,
|
|
3419
|
+
int *matched_by_regex) {
|
|
3420
|
+
int ret = 0;
|
|
3421
|
+
|
|
3422
|
+
if (*rktpar->topic == '^') {
|
|
3423
|
+
char errstr[128];
|
|
3424
|
+
|
|
3425
|
+
ret = rd_regex_match(rktpar->topic, topic, errstr,
|
|
3426
|
+
sizeof(errstr));
|
|
3427
|
+
if (ret == -1) {
|
|
3428
|
+
rd_kafka_dbg(rk, CGRP, "SUBMATCH",
|
|
3429
|
+
"Invalid regex for member "
|
|
3430
|
+
"\"%.*s\" subscription \"%s\": %s",
|
|
3431
|
+
RD_KAFKAP_STR_PR(rkgm->rkgm_member_id),
|
|
3432
|
+
rktpar->topic, errstr);
|
|
3433
|
+
return 0;
|
|
3434
|
+
}
|
|
3435
|
+
|
|
3436
|
+
if (ret && matched_by_regex)
|
|
3437
|
+
*matched_by_regex = 1;
|
|
3438
|
+
|
|
3439
|
+
} else if (!strcmp(rktpar->topic, topic)) {
|
|
3440
|
+
|
|
3441
|
+
if (matched_by_regex)
|
|
3442
|
+
*matched_by_regex = 0;
|
|
3443
|
+
|
|
3444
|
+
ret = 1;
|
|
3445
|
+
}
|
|
3446
|
+
|
|
3447
|
+
return ret;
|
|
3448
|
+
}
|
|
3449
|
+
|
|
3450
|
+
|
|
3451
|
+
|
|
3452
|
+
void rd_kafka_topic_partition_list_sort(
|
|
3453
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3454
|
+
int (*cmp)(const void *, const void *, void *),
|
|
3455
|
+
void *opaque) {
|
|
3456
|
+
|
|
3457
|
+
if (!cmp)
|
|
3458
|
+
cmp = rd_kafka_topic_partition_cmp_opaque;
|
|
3459
|
+
|
|
3460
|
+
rd_qsort_r(rktparlist->elems, rktparlist->cnt,
|
|
3461
|
+
sizeof(*rktparlist->elems), cmp, opaque);
|
|
3462
|
+
}
|
|
3463
|
+
|
|
3464
|
+
|
|
3465
|
+
void rd_kafka_topic_partition_list_sort_by_topic(
|
|
3466
|
+
rd_kafka_topic_partition_list_t *rktparlist) {
|
|
3467
|
+
rd_kafka_topic_partition_list_sort(
|
|
3468
|
+
rktparlist, rd_kafka_topic_partition_cmp_opaque, NULL);
|
|
3469
|
+
}
|
|
3470
|
+
|
|
3471
|
+
void rd_kafka_topic_partition_list_sort_by_topic_id(
|
|
3472
|
+
rd_kafka_topic_partition_list_t *rktparlist) {
|
|
3473
|
+
rd_kafka_topic_partition_list_sort(
|
|
3474
|
+
rktparlist, rd_kafka_topic_partition_by_id_cmp_opaque, NULL);
|
|
3475
|
+
}
|
|
3476
|
+
|
|
3477
|
+
rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(
|
|
3478
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3479
|
+
const char *topic,
|
|
3480
|
+
int32_t partition,
|
|
3481
|
+
int64_t offset) {
|
|
3482
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
3483
|
+
|
|
3484
|
+
if (!(rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic,
|
|
3485
|
+
partition)))
|
|
3486
|
+
return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
3487
|
+
|
|
3488
|
+
rktpar->offset = offset;
|
|
3489
|
+
|
|
3490
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
3491
|
+
}
|
|
3492
|
+
|
|
3493
|
+
|
|
3494
|
+
/**
|
|
3495
|
+
* @brief Reset all offsets to the provided value.
|
|
3496
|
+
*/
|
|
3497
|
+
void rd_kafka_topic_partition_list_reset_offsets(
|
|
3498
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3499
|
+
int64_t offset) {
|
|
3500
|
+
|
|
3501
|
+
int i;
|
|
3502
|
+
for (i = 0; i < rktparlist->cnt; i++)
|
|
3503
|
+
rktparlist->elems[i].offset = offset;
|
|
3504
|
+
}
|
|
3505
|
+
|
|
3506
|
+
|
|
3507
|
+
/**
|
|
3508
|
+
* Set offset values in partition list based on toppar's last stored offset.
|
|
3509
|
+
*
|
|
3510
|
+
* from_rktp - true: set rktp's last stored offset, false: set def_value
|
|
3511
|
+
* unless a concrete offset is set.
|
|
3512
|
+
* is_commit: indicates that set offset is to be committed (for debug log)
|
|
3513
|
+
*
|
|
3514
|
+
* Returns the number of valid non-logical offsets (>=0).
|
|
3515
|
+
*/
|
|
3516
|
+
int rd_kafka_topic_partition_list_set_offsets(
|
|
3517
|
+
rd_kafka_t *rk,
|
|
3518
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3519
|
+
int from_rktp,
|
|
3520
|
+
int64_t def_value,
|
|
3521
|
+
int is_commit) {
|
|
3522
|
+
int i;
|
|
3523
|
+
int valid_cnt = 0;
|
|
3524
|
+
|
|
3525
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
3526
|
+
rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
|
|
3527
|
+
const char *verb = "setting";
|
|
3528
|
+
char preamble[128];
|
|
3529
|
+
|
|
3530
|
+
*preamble = '\0'; /* Avoid warning */
|
|
3531
|
+
|
|
3532
|
+
if (from_rktp) {
|
|
3533
|
+
rd_kafka_toppar_t *rktp =
|
|
3534
|
+
rd_kafka_topic_partition_ensure_toppar(rk, rktpar,
|
|
3535
|
+
rd_true);
|
|
3536
|
+
rd_kafka_toppar_lock(rktp);
|
|
3537
|
+
|
|
3538
|
+
if (rk->rk_conf.debug &
|
|
3539
|
+
(RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_TOPIC))
|
|
3540
|
+
rd_snprintf(preamble, sizeof(preamble),
|
|
3541
|
+
"stored %s, committed %s: ",
|
|
3542
|
+
rd_kafka_fetch_pos2str(
|
|
3543
|
+
rktp->rktp_stored_pos),
|
|
3544
|
+
rd_kafka_fetch_pos2str(
|
|
3545
|
+
rktp->rktp_committed_pos));
|
|
3546
|
+
|
|
3547
|
+
if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
|
|
3548
|
+
&rktp->rktp_committed_pos) >
|
|
3549
|
+
0) {
|
|
3550
|
+
verb = "setting stored";
|
|
3551
|
+
rd_kafka_topic_partition_set_from_fetch_pos(
|
|
3552
|
+
rktpar, rktp->rktp_stored_pos);
|
|
3553
|
+
rd_kafka_topic_partition_set_metadata_from_rktp_stored(
|
|
3554
|
+
rktpar, rktp);
|
|
3555
|
+
} else {
|
|
3556
|
+
rktpar->offset = RD_KAFKA_OFFSET_INVALID;
|
|
3557
|
+
}
|
|
3558
|
+
rd_kafka_toppar_unlock(rktp);
|
|
3559
|
+
} else {
|
|
3560
|
+
if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) {
|
|
3561
|
+
verb = "setting default";
|
|
3562
|
+
rktpar->offset = def_value;
|
|
3563
|
+
rd_kafka_topic_partition_set_leader_epoch(
|
|
3564
|
+
rktpar, -1);
|
|
3565
|
+
} else
|
|
3566
|
+
verb = "keeping";
|
|
3567
|
+
}
|
|
3568
|
+
|
|
3569
|
+
if (is_commit && rktpar->offset == RD_KAFKA_OFFSET_INVALID)
|
|
3570
|
+
rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET",
|
|
3571
|
+
"Topic %s [%" PRId32
|
|
3572
|
+
"]: "
|
|
3573
|
+
"%snot including in commit",
|
|
3574
|
+
rktpar->topic, rktpar->partition,
|
|
3575
|
+
preamble);
|
|
3576
|
+
else
|
|
3577
|
+
rd_kafka_dbg(
|
|
3578
|
+
rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET",
|
|
3579
|
+
"Topic %s [%" PRId32
|
|
3580
|
+
"]: "
|
|
3581
|
+
"%s%s offset %s (leader epoch %" PRId32 ") %s",
|
|
3582
|
+
rktpar->topic, rktpar->partition, preamble, verb,
|
|
3583
|
+
rd_kafka_offset2str(rktpar->offset),
|
|
3584
|
+
rd_kafka_topic_partition_get_leader_epoch(rktpar),
|
|
3585
|
+
is_commit ? " for commit" : "");
|
|
3586
|
+
|
|
3587
|
+
if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset))
|
|
3588
|
+
valid_cnt++;
|
|
3589
|
+
}
|
|
3590
|
+
|
|
3591
|
+
return valid_cnt;
|
|
3592
|
+
}
|
|
3593
|
+
|
|
3594
|
+
|
|
3595
|
+
/**
|
|
3596
|
+
* @returns the number of partitions with absolute (non-logical) offsets set.
|
|
3597
|
+
*/
|
|
3598
|
+
int rd_kafka_topic_partition_list_count_abs_offsets(
|
|
3599
|
+
const rd_kafka_topic_partition_list_t *rktparlist) {
|
|
3600
|
+
int i;
|
|
3601
|
+
int valid_cnt = 0;
|
|
3602
|
+
|
|
3603
|
+
for (i = 0; i < rktparlist->cnt; i++)
|
|
3604
|
+
if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktparlist->elems[i].offset))
|
|
3605
|
+
valid_cnt++;
|
|
3606
|
+
|
|
3607
|
+
return valid_cnt;
|
|
3608
|
+
}
|
|
3609
|
+
|
|
3610
|
+
|
|
3611
|
+
/**
|
|
3612
|
+
* @brief Update _private (toppar) field to point to valid rktp
|
|
3613
|
+
* for each parition.
|
|
3614
|
+
*
|
|
3615
|
+
* @param create_on_miss Create partition (and topic_t object) if necessary.
|
|
3616
|
+
*/
|
|
3617
|
+
void rd_kafka_topic_partition_list_update_toppars(
|
|
3618
|
+
rd_kafka_t *rk,
|
|
3619
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3620
|
+
rd_bool_t create_on_miss) {
|
|
3621
|
+
int i;
|
|
3622
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
3623
|
+
rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
|
|
3624
|
+
|
|
3625
|
+
rd_kafka_topic_partition_ensure_toppar(rk, rktpar,
|
|
3626
|
+
create_on_miss);
|
|
3627
|
+
}
|
|
3628
|
+
}
|
|
3629
|
+
|
|
3630
|
+
|
|
3631
|
+
/**
|
|
3632
|
+
* @brief Populate \p leaders with the leaders+partitions for the partitions in
|
|
3633
|
+
* \p rktparlist. Duplicates are suppressed.
|
|
3634
|
+
*
|
|
3635
|
+
* If no leader is found for a partition that element's \c .err will
|
|
3636
|
+
* be set to RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE.
|
|
3637
|
+
*
|
|
3638
|
+
* If the partition does not exist \c .err will be set to
|
|
3639
|
+
* RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION.
|
|
3640
|
+
*
|
|
3641
|
+
* @param rktparlist The partitions to look up leaders for, the .err field
|
|
3642
|
+
* will be set according to outcome, e.g., ERR_NO_ERROR,
|
|
3643
|
+
* ERR_UNKNOWN_TOPIC_OR_PART, etc.
|
|
3644
|
+
* @param leaders rd_list_t of allocated (struct rd_kafka_partition_leader *)
|
|
3645
|
+
* @param query_topics (optional) rd_list of strdupped (char *)
|
|
3646
|
+
* @param query_unknown Add unknown topics to \p query_topics.
|
|
3647
|
+
* @param eonce (optional) For triggering asynchronously on cache change
|
|
3648
|
+
* in case not all leaders are known now.
|
|
3649
|
+
*
|
|
3650
|
+
* @remark This is based on the current topic_t and partition state
|
|
3651
|
+
* which may lag behind the last metadata update due to internal
|
|
3652
|
+
* threading and also the fact that no topic_t may have been created.
|
|
3653
|
+
*
|
|
3654
|
+
* @param leaders rd_list_t of type (struct rd_kafka_partition_leader *)
|
|
3655
|
+
*
|
|
3656
|
+
* @returns true if all partitions have leaders, else false.
|
|
3657
|
+
*
|
|
3658
|
+
* @sa rd_kafka_topic_partition_list_get_leaders_by_metadata
|
|
3659
|
+
*
|
|
3660
|
+
* @locks rd_kafka_*lock() MUST NOT be held
|
|
3661
|
+
*/
|
|
3662
|
+
static rd_bool_t rd_kafka_topic_partition_list_get_leaders(
|
|
3663
|
+
rd_kafka_t *rk,
|
|
3664
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
3665
|
+
rd_list_t *leaders,
|
|
3666
|
+
rd_list_t *query_topics,
|
|
3667
|
+
rd_bool_t query_unknown,
|
|
3668
|
+
rd_kafka_enq_once_t *eonce) {
|
|
3669
|
+
rd_bool_t complete;
|
|
3670
|
+
int cnt = 0;
|
|
3671
|
+
int i;
|
|
3672
|
+
|
|
3673
|
+
if (eonce)
|
|
3674
|
+
rd_kafka_wrlock(rk);
|
|
3675
|
+
else
|
|
3676
|
+
rd_kafka_rdlock(rk);
|
|
3677
|
+
|
|
3678
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
3679
|
+
rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
|
|
3680
|
+
rd_kafka_topic_partition_t *rktpar2;
|
|
3681
|
+
rd_kafka_broker_t *rkb = NULL;
|
|
3682
|
+
struct rd_kafka_partition_leader leader_skel;
|
|
3683
|
+
struct rd_kafka_partition_leader *leader;
|
|
3684
|
+
const rd_kafka_metadata_topic_t *mtopic;
|
|
3685
|
+
const rd_kafka_metadata_partition_t *mpart;
|
|
3686
|
+
const rd_kafka_metadata_partition_internal_t *mdpi;
|
|
3687
|
+
rd_bool_t topic_wait_cache;
|
|
3688
|
+
|
|
3689
|
+
rd_kafka_metadata_cache_topic_partition_get(
|
|
3690
|
+
rk, &mtopic, &mpart, &mdpi, rktpar->topic,
|
|
3691
|
+
rktpar->partition, 0 /*negative entries too*/);
|
|
3692
|
+
|
|
3693
|
+
topic_wait_cache =
|
|
3694
|
+
!mtopic ||
|
|
3695
|
+
RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(mtopic->err);
|
|
3696
|
+
|
|
3697
|
+
if (!topic_wait_cache && mtopic &&
|
|
3698
|
+
mtopic->err != RD_KAFKA_RESP_ERR_NO_ERROR &&
|
|
3699
|
+
mtopic->err != RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) {
|
|
3700
|
+
/* Topic permanently errored */
|
|
3701
|
+
rktpar->err = mtopic->err;
|
|
3702
|
+
continue;
|
|
3703
|
+
}
|
|
3704
|
+
|
|
3705
|
+
if (mtopic && !mpart && mtopic->partition_cnt > 0) {
|
|
3706
|
+
/* Topic exists but partition doesnt.
|
|
3707
|
+
* This is a permanent error. */
|
|
3708
|
+
rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
3709
|
+
continue;
|
|
3710
|
+
}
|
|
3711
|
+
|
|
3712
|
+
if (mpart &&
|
|
3713
|
+
(mpart->leader == -1 ||
|
|
3714
|
+
!(rkb = rd_kafka_broker_find_by_nodeid0(
|
|
3715
|
+
rk, mpart->leader, -1 /*any state*/, rd_false)))) {
|
|
3716
|
+
/* Partition has no (valid) leader.
|
|
3717
|
+
* This is a permanent error. */
|
|
3718
|
+
rktpar->err =
|
|
3719
|
+
mtopic->err
|
|
3720
|
+
? mtopic->err
|
|
3721
|
+
: RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE;
|
|
3722
|
+
continue;
|
|
3723
|
+
}
|
|
3724
|
+
|
|
3725
|
+
if (topic_wait_cache || !rkb) {
|
|
3726
|
+
/* Topic unknown or no current leader for partition,
|
|
3727
|
+
* add topic to query list. */
|
|
3728
|
+
rktpar->err = RD_KAFKA_RESP_ERR__IN_PROGRESS;
|
|
3729
|
+
if (query_topics &&
|
|
3730
|
+
!rd_list_find(query_topics, rktpar->topic,
|
|
3731
|
+
(void *)strcmp))
|
|
3732
|
+
rd_list_add(query_topics,
|
|
3733
|
+
rd_strdup(rktpar->topic));
|
|
3734
|
+
continue;
|
|
3735
|
+
}
|
|
3736
|
+
|
|
3737
|
+
/* Leader exists, add to leader list. */
|
|
3738
|
+
|
|
3739
|
+
rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
3740
|
+
|
|
3741
|
+
memset(&leader_skel, 0, sizeof(leader_skel));
|
|
3742
|
+
leader_skel.rkb = rkb;
|
|
3743
|
+
|
|
3744
|
+
leader = rd_list_find(leaders, &leader_skel,
|
|
3745
|
+
rd_kafka_partition_leader_cmp);
|
|
3746
|
+
|
|
3747
|
+
if (!leader) {
|
|
3748
|
+
leader = rd_kafka_partition_leader_new(rkb);
|
|
3749
|
+
rd_list_add(leaders, leader);
|
|
3750
|
+
}
|
|
3751
|
+
|
|
3752
|
+
rktpar2 = rd_kafka_topic_partition_list_find(
|
|
3753
|
+
leader->partitions, rktpar->topic, rktpar->partition);
|
|
3754
|
+
if (rktpar2) {
|
|
3755
|
+
/* Already exists in partitions list, just update. */
|
|
3756
|
+
rd_kafka_topic_partition_update(rktpar2, rktpar);
|
|
3757
|
+
} else {
|
|
3758
|
+
/* Make a copy of rktpar and add to partitions list */
|
|
3759
|
+
rktpar2 = rd_kafka_topic_partition_list_add_copy(
|
|
3760
|
+
leader->partitions, rktpar);
|
|
3761
|
+
}
|
|
3762
|
+
rd_kafka_topic_partition_set_current_leader_epoch(
|
|
3763
|
+
rktpar2, mdpi->leader_epoch);
|
|
3764
|
+
|
|
3765
|
+
rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
3766
|
+
|
|
3767
|
+
rd_kafka_broker_destroy(rkb); /* loose refcount */
|
|
3768
|
+
cnt++;
|
|
3769
|
+
}
|
|
3770
|
+
|
|
3771
|
+
complete = cnt == rktparlist->cnt;
|
|
3772
|
+
|
|
3773
|
+
if (!complete && eonce)
|
|
3774
|
+
/* Add eonce to cache observers */
|
|
3775
|
+
rd_kafka_metadata_cache_wait_state_change_async(rk, eonce);
|
|
3776
|
+
|
|
3777
|
+
if (eonce)
|
|
3778
|
+
rd_kafka_wrunlock(rk);
|
|
3779
|
+
else
|
|
3780
|
+
rd_kafka_rdunlock(rk);
|
|
3781
|
+
|
|
3782
|
+
return complete;
|
|
3783
|
+
}
|
|
3784
|
+
|
|
3785
|
+
|
|
3786
|
+
/**
|
|
3787
|
+
* @brief Timer timeout callback for query_leaders_async rko's eonce object.
|
|
3788
|
+
*/
|
|
3789
|
+
static void
|
|
3790
|
+
rd_kafka_partition_leader_query_eonce_timeout_cb(rd_kafka_timers_t *rkts,
|
|
3791
|
+
void *arg) {
|
|
3792
|
+
rd_kafka_enq_once_t *eonce = arg;
|
|
3793
|
+
rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT,
|
|
3794
|
+
"timeout timer");
|
|
3795
|
+
}
|
|
3796
|
+
|
|
3797
|
+
|
|
3798
|
+
/**
|
|
3799
|
+
* @brief Query timer callback for query_leaders_async rko's eonce object.
|
|
3800
|
+
*/
|
|
3801
|
+
static void
|
|
3802
|
+
rd_kafka_partition_leader_query_eonce_timer_cb(rd_kafka_timers_t *rkts,
|
|
3803
|
+
void *arg) {
|
|
3804
|
+
rd_kafka_enq_once_t *eonce = arg;
|
|
3805
|
+
rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR,
|
|
3806
|
+
"query timer");
|
|
3807
|
+
}
|
|
3808
|
+
|
|
3809
|
+
|
|
3810
|
+
/**
|
|
3811
|
+
* @brief Query metadata cache for partition leaders, or trigger metadata
|
|
3812
|
+
* refresh if leaders not known.
|
|
3813
|
+
*
|
|
3814
|
+
* @locks_required none
|
|
3815
|
+
* @locality any
|
|
3816
|
+
*/
|
|
3817
|
+
static rd_kafka_op_res_t
|
|
3818
|
+
rd_kafka_topic_partition_list_query_leaders_async_worker(rd_kafka_op_t *rko) {
|
|
3819
|
+
rd_kafka_t *rk = rko->rko_rk;
|
|
3820
|
+
rd_list_t query_topics, *leaders = NULL;
|
|
3821
|
+
rd_kafka_op_t *reply;
|
|
3822
|
+
|
|
3823
|
+
RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_LEADERS);
|
|
3824
|
+
|
|
3825
|
+
if (rko->rko_err)
|
|
3826
|
+
goto reply; /* Timeout or ERR__DESTROY */
|
|
3827
|
+
|
|
3828
|
+
/* Since we're iterating over get_leaders() until all partition leaders
|
|
3829
|
+
* are known we need to re-enable the eonce to be triggered again (which
|
|
3830
|
+
* is not necessary the first time we get here, but there
|
|
3831
|
+
* is no harm doing it then either). */
|
|
3832
|
+
rd_kafka_enq_once_reenable(rko->rko_u.leaders.eonce, rko,
|
|
3833
|
+
RD_KAFKA_REPLYQ(rk->rk_ops, 0));
|
|
3834
|
+
|
|
3835
|
+
/* Look up the leaders in the metadata cache, if not all leaders
|
|
3836
|
+
* are known the eonce is registered for metadata cache changes
|
|
3837
|
+
* which will cause our function to be called
|
|
3838
|
+
* again on (any) metadata cache change.
|
|
3839
|
+
*
|
|
3840
|
+
* When we are called again we perform the cache lookup again and
|
|
3841
|
+
* hopefully get all leaders, otherwise defer a new async wait.
|
|
3842
|
+
* Repeat until success or timeout. */
|
|
3843
|
+
|
|
3844
|
+
rd_list_init(&query_topics, 4 + rko->rko_u.leaders.partitions->cnt / 2,
|
|
3845
|
+
rd_free);
|
|
3846
|
+
|
|
3847
|
+
leaders = rd_list_new(1 + rko->rko_u.leaders.partitions->cnt / 2,
|
|
3848
|
+
rd_kafka_partition_leader_destroy_free);
|
|
3849
|
+
|
|
3850
|
+
if (rd_kafka_topic_partition_list_get_leaders(
|
|
3851
|
+
rk, rko->rko_u.leaders.partitions, leaders, &query_topics,
|
|
3852
|
+
/* Add unknown topics to query_topics only on the
|
|
3853
|
+
* first query, after that we consider them permanently
|
|
3854
|
+
* non-existent */
|
|
3855
|
+
rko->rko_u.leaders.query_cnt == 0, rko->rko_u.leaders.eonce)) {
|
|
3856
|
+
/* All leaders now known (or failed), reply to caller */
|
|
3857
|
+
rd_list_destroy(&query_topics);
|
|
3858
|
+
goto reply;
|
|
3859
|
+
}
|
|
3860
|
+
|
|
3861
|
+
if (rd_list_empty(&query_topics)) {
|
|
3862
|
+
/* Not all leaders known but no topics left to query,
|
|
3863
|
+
* reply to caller. */
|
|
3864
|
+
rd_list_destroy(&query_topics);
|
|
3865
|
+
goto reply;
|
|
3866
|
+
}
|
|
3867
|
+
|
|
3868
|
+
/* Need to refresh topic metadata, but at most every interval. */
|
|
3869
|
+
if (!rd_kafka_timer_is_started(&rk->rk_timers,
|
|
3870
|
+
&rko->rko_u.leaders.query_tmr)) {
|
|
3871
|
+
|
|
3872
|
+
rko->rko_u.leaders.query_cnt++;
|
|
3873
|
+
|
|
3874
|
+
/* Add query interval timer. */
|
|
3875
|
+
rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce,
|
|
3876
|
+
"query timer");
|
|
3877
|
+
rd_kafka_timer_start_oneshot(
|
|
3878
|
+
&rk->rk_timers, &rko->rko_u.leaders.query_tmr, rd_true,
|
|
3879
|
+
3 * 1000 * 1000 /* 3s */,
|
|
3880
|
+
rd_kafka_partition_leader_query_eonce_timer_cb,
|
|
3881
|
+
rko->rko_u.leaders.eonce);
|
|
3882
|
+
|
|
3883
|
+
/* Request metadata refresh */
|
|
3884
|
+
rd_kafka_metadata_refresh_topics(
|
|
3885
|
+
rk, NULL, &query_topics, rd_true /*force*/,
|
|
3886
|
+
rd_false /*!allow_auto_create*/, rd_false /*!cgrp_update*/,
|
|
3887
|
+
-1, "query partition leaders");
|
|
3888
|
+
}
|
|
3889
|
+
|
|
3890
|
+
rd_list_destroy(leaders);
|
|
3891
|
+
rd_list_destroy(&query_topics);
|
|
3892
|
+
|
|
3893
|
+
/* Wait for next eonce trigger */
|
|
3894
|
+
return RD_KAFKA_OP_RES_KEEP; /* rko is still used */
|
|
3895
|
+
|
|
3896
|
+
reply:
|
|
3897
|
+
/* Decommission worker state and reply to caller */
|
|
3898
|
+
|
|
3899
|
+
if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.query_tmr,
|
|
3900
|
+
RD_DO_LOCK))
|
|
3901
|
+
rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce,
|
|
3902
|
+
"query timer");
|
|
3903
|
+
if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.timeout_tmr,
|
|
3904
|
+
RD_DO_LOCK))
|
|
3905
|
+
rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce,
|
|
3906
|
+
"timeout timer");
|
|
3907
|
+
|
|
3908
|
+
if (rko->rko_u.leaders.eonce) {
|
|
3909
|
+
rd_kafka_enq_once_disable(rko->rko_u.leaders.eonce);
|
|
3910
|
+
rko->rko_u.leaders.eonce = NULL;
|
|
3911
|
+
}
|
|
3912
|
+
|
|
3913
|
+
/* No leaders found, set a request-level error */
|
|
3914
|
+
if (leaders && rd_list_cnt(leaders) == 0) {
|
|
3915
|
+
if (!rko->rko_err)
|
|
3916
|
+
rko->rko_err = RD_KAFKA_RESP_ERR__NOENT;
|
|
3917
|
+
rd_list_destroy(leaders);
|
|
3918
|
+
leaders = NULL;
|
|
3919
|
+
}
|
|
3920
|
+
|
|
3921
|
+
/* Create and enqueue reply rko */
|
|
3922
|
+
if (rko->rko_u.leaders.replyq.q) {
|
|
3923
|
+
reply = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_LEADERS,
|
|
3924
|
+
rko->rko_u.leaders.cb);
|
|
3925
|
+
rd_kafka_op_get_reply_version(reply, rko);
|
|
3926
|
+
reply->rko_err = rko->rko_err;
|
|
3927
|
+
reply->rko_u.leaders.partitions =
|
|
3928
|
+
rko->rko_u.leaders.partitions; /* Transfer ownership for
|
|
3929
|
+
* partition list that
|
|
3930
|
+
* now contains
|
|
3931
|
+
* per-partition errors*/
|
|
3932
|
+
rko->rko_u.leaders.partitions = NULL;
|
|
3933
|
+
reply->rko_u.leaders.leaders = leaders; /* Possibly NULL */
|
|
3934
|
+
reply->rko_u.leaders.opaque = rko->rko_u.leaders.opaque;
|
|
3935
|
+
|
|
3936
|
+
rd_kafka_replyq_enq(&rko->rko_u.leaders.replyq, reply, 0);
|
|
3937
|
+
}
|
|
3938
|
+
|
|
3939
|
+
return RD_KAFKA_OP_RES_HANDLED;
|
|
3940
|
+
}
|
|
3941
|
+
|
|
3942
|
+
|
|
3943
|
+
static rd_kafka_op_res_t
|
|
3944
|
+
rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb(
|
|
3945
|
+
rd_kafka_t *rk,
|
|
3946
|
+
rd_kafka_q_t *rkq,
|
|
3947
|
+
rd_kafka_op_t *rko) {
|
|
3948
|
+
return rd_kafka_topic_partition_list_query_leaders_async_worker(rko);
|
|
3949
|
+
}
|
|
3950
|
+
|
|
3951
|
+
/**
|
|
3952
|
+
* @brief Async variant of rd_kafka_topic_partition_list_query_leaders().
|
|
3953
|
+
*
|
|
3954
|
+
* The reply rko op will contain:
|
|
3955
|
+
* - .leaders which is a list of leaders and their partitions, this may be
|
|
3956
|
+
* NULL for overall errors (such as no leaders are found), or a
|
|
3957
|
+
* partial or complete list of leaders.
|
|
3958
|
+
* - .partitions which is a copy of the input list of partitions with the
|
|
3959
|
+
* .err field set to the outcome of the leader query, typically ERR_NO_ERROR
|
|
3960
|
+
* or ERR_UNKNOWN_TOPIC_OR_PART.
|
|
3961
|
+
*
|
|
3962
|
+
* @locks_acquired rd_kafka_*lock()
|
|
3963
|
+
*
|
|
3964
|
+
* @remark rd_kafka_*lock() MUST NOT be held
|
|
3965
|
+
*/
|
|
3966
|
+
void rd_kafka_topic_partition_list_query_leaders_async(
|
|
3967
|
+
rd_kafka_t *rk,
|
|
3968
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
3969
|
+
int timeout_ms,
|
|
3970
|
+
rd_kafka_replyq_t replyq,
|
|
3971
|
+
rd_kafka_op_cb_t *cb,
|
|
3972
|
+
void *opaque) {
|
|
3973
|
+
rd_kafka_op_t *rko;
|
|
3974
|
+
|
|
3975
|
+
rd_assert(rktparlist && rktparlist->cnt > 0);
|
|
3976
|
+
rd_assert(replyq.q);
|
|
3977
|
+
|
|
3978
|
+
rko = rd_kafka_op_new_cb(
|
|
3979
|
+
rk, RD_KAFKA_OP_LEADERS,
|
|
3980
|
+
rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb);
|
|
3981
|
+
rko->rko_u.leaders.replyq = replyq;
|
|
3982
|
+
rko->rko_u.leaders.partitions =
|
|
3983
|
+
rd_kafka_topic_partition_list_copy(rktparlist);
|
|
3984
|
+
rko->rko_u.leaders.ts_timeout = rd_timeout_init(timeout_ms);
|
|
3985
|
+
rko->rko_u.leaders.cb = cb;
|
|
3986
|
+
rko->rko_u.leaders.opaque = opaque;
|
|
3987
|
+
|
|
3988
|
+
/* Create an eonce to be triggered either by metadata cache update
|
|
3989
|
+
* (from refresh_topics()), query interval, or timeout. */
|
|
3990
|
+
rko->rko_u.leaders.eonce =
|
|
3991
|
+
rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
|
|
3992
|
+
|
|
3993
|
+
rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce, "timeout timer");
|
|
3994
|
+
rd_kafka_timer_start_oneshot(
|
|
3995
|
+
&rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, rd_true,
|
|
3996
|
+
rd_timeout_remains_us(rko->rko_u.leaders.ts_timeout),
|
|
3997
|
+
rd_kafka_partition_leader_query_eonce_timeout_cb,
|
|
3998
|
+
rko->rko_u.leaders.eonce);
|
|
3999
|
+
|
|
4000
|
+
if (rd_kafka_topic_partition_list_query_leaders_async_worker(rko) ==
|
|
4001
|
+
RD_KAFKA_OP_RES_HANDLED)
|
|
4002
|
+
rd_kafka_op_destroy(rko); /* Reply queue already disabled */
|
|
4003
|
+
}
|
|
4004
|
+
|
|
4005
|
+
|
|
4006
|
+
/**
|
|
4007
|
+
* @brief Get leaders for all partitions in \p rktparlist, querying metadata
|
|
4008
|
+
* if needed.
|
|
4009
|
+
*
|
|
4010
|
+
* @param leaders is a pre-initialized (empty) list which will be populated
|
|
4011
|
+
* with the leader brokers and their partitions
|
|
4012
|
+
* (struct rd_kafka_partition_leader *)
|
|
4013
|
+
*
|
|
4014
|
+
* @remark Will not trigger topic auto creation (unless configured).
|
|
4015
|
+
*
|
|
4016
|
+
* @returns an error code on error.
|
|
4017
|
+
*
|
|
4018
|
+
* @locks rd_kafka_*lock() MUST NOT be held
|
|
4019
|
+
*/
|
|
4020
|
+
rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders(
|
|
4021
|
+
rd_kafka_t *rk,
|
|
4022
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
4023
|
+
rd_list_t *leaders,
|
|
4024
|
+
int timeout_ms) {
|
|
4025
|
+
rd_ts_t ts_end = rd_timeout_init(timeout_ms);
|
|
4026
|
+
rd_ts_t ts_query = 0;
|
|
4027
|
+
rd_ts_t now;
|
|
4028
|
+
int query_cnt = 0;
|
|
4029
|
+
int i = 0;
|
|
4030
|
+
|
|
4031
|
+
/* Get all the partition leaders, try multiple times:
|
|
4032
|
+
* if there are no leaders after the first run fire off a leader
|
|
4033
|
+
* query and wait for broker state update before trying again,
|
|
4034
|
+
* keep trying and re-querying at increasing intervals until
|
|
4035
|
+
* success or timeout. */
|
|
4036
|
+
do {
|
|
4037
|
+
rd_list_t query_topics;
|
|
4038
|
+
int query_intvl;
|
|
4039
|
+
|
|
4040
|
+
rd_list_init(&query_topics, rktparlist->cnt, rd_free);
|
|
4041
|
+
|
|
4042
|
+
rd_kafka_topic_partition_list_get_leaders(
|
|
4043
|
+
rk, rktparlist, leaders, &query_topics,
|
|
4044
|
+
/* Add unknown topics to query_topics only on the
|
|
4045
|
+
* first query, after that we consider them
|
|
4046
|
+
* permanently non-existent */
|
|
4047
|
+
query_cnt == 0, NULL);
|
|
4048
|
+
|
|
4049
|
+
if (rd_list_empty(&query_topics)) {
|
|
4050
|
+
/* No remaining topics to query: leader-list complete.*/
|
|
4051
|
+
rd_list_destroy(&query_topics);
|
|
4052
|
+
|
|
4053
|
+
/* No leader(s) for partitions means all partitions
|
|
4054
|
+
* are unknown. */
|
|
4055
|
+
if (rd_list_empty(leaders))
|
|
4056
|
+
return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
4057
|
+
|
|
4058
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
4059
|
+
}
|
|
4060
|
+
|
|
4061
|
+
now = rd_clock();
|
|
4062
|
+
|
|
4063
|
+
/*
|
|
4064
|
+
* Missing leader for some partitions
|
|
4065
|
+
*/
|
|
4066
|
+
query_intvl = (i + 1) * 100; /* add 100ms per iteration */
|
|
4067
|
+
if (query_intvl > 2 * 1000)
|
|
4068
|
+
query_intvl = 2 * 1000; /* Cap to 2s */
|
|
4069
|
+
|
|
4070
|
+
if (now >= ts_query + (query_intvl * 1000)) {
|
|
4071
|
+
/* Query metadata for missing leaders,
|
|
4072
|
+
* possibly creating the topic. */
|
|
4073
|
+
rd_kafka_metadata_refresh_topics(
|
|
4074
|
+
rk, NULL, &query_topics, rd_true /*force*/,
|
|
4075
|
+
rd_false /*!allow_auto_create*/,
|
|
4076
|
+
rd_false /*!cgrp_update*/, -1,
|
|
4077
|
+
"query partition leaders");
|
|
4078
|
+
ts_query = now;
|
|
4079
|
+
query_cnt++;
|
|
4080
|
+
|
|
4081
|
+
} else {
|
|
4082
|
+
/* Wait for broker ids to be updated from
|
|
4083
|
+
* metadata refresh above. */
|
|
4084
|
+
int wait_ms =
|
|
4085
|
+
rd_timeout_remains_limit(ts_end, query_intvl);
|
|
4086
|
+
rd_kafka_metadata_cache_wait_change(rk, wait_ms);
|
|
4087
|
+
}
|
|
4088
|
+
|
|
4089
|
+
rd_list_destroy(&query_topics);
|
|
4090
|
+
|
|
4091
|
+
i++;
|
|
4092
|
+
} while (ts_end == RD_POLL_INFINITE ||
|
|
4093
|
+
now < ts_end); /* now is deliberately outdated here
|
|
4094
|
+
* since wait_change() will block.
|
|
4095
|
+
* This gives us one more chance to spin thru*/
|
|
4096
|
+
|
|
4097
|
+
if (rd_atomic32_get(&rk->rk_broker_up_cnt) == 0)
|
|
4098
|
+
return RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN;
|
|
4099
|
+
|
|
4100
|
+
return RD_KAFKA_RESP_ERR__TIMED_OUT;
|
|
4101
|
+
}
|
|
4102
|
+
|
|
4103
|
+
|
|
4104
|
+
/**
|
|
4105
|
+
* @brief Populate \p rkts with the rd_kafka_topic_t objects for the
|
|
4106
|
+
* partitions in. Duplicates are suppressed.
|
|
4107
|
+
*
|
|
4108
|
+
* @returns the number of topics added.
|
|
4109
|
+
*/
|
|
4110
|
+
int rd_kafka_topic_partition_list_get_topics(
|
|
4111
|
+
rd_kafka_t *rk,
|
|
4112
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
4113
|
+
rd_list_t *rkts) {
|
|
4114
|
+
int cnt = 0;
|
|
4115
|
+
|
|
4116
|
+
int i;
|
|
4117
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4118
|
+
rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
|
|
4119
|
+
rd_kafka_toppar_t *rktp;
|
|
4120
|
+
|
|
4121
|
+
rktp =
|
|
4122
|
+
rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
|
|
4123
|
+
if (!rktp) {
|
|
4124
|
+
rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
4125
|
+
continue;
|
|
4126
|
+
}
|
|
4127
|
+
|
|
4128
|
+
if (!rd_list_find(rkts, rktp->rktp_rkt,
|
|
4129
|
+
rd_kafka_topic_cmp_rkt)) {
|
|
4130
|
+
rd_list_add(rkts, rd_kafka_topic_keep(rktp->rktp_rkt));
|
|
4131
|
+
cnt++;
|
|
4132
|
+
}
|
|
4133
|
+
|
|
4134
|
+
rd_kafka_toppar_destroy(rktp);
|
|
4135
|
+
}
|
|
4136
|
+
|
|
4137
|
+
return cnt;
|
|
4138
|
+
}
|
|
4139
|
+
|
|
4140
|
+
|
|
4141
|
+
/**
|
|
4142
|
+
* @brief Populate \p topics with the strdupped topic names in \p rktparlist.
|
|
4143
|
+
* Duplicates are suppressed.
|
|
4144
|
+
*
|
|
4145
|
+
* @param include_regex: include regex topics
|
|
4146
|
+
*
|
|
4147
|
+
* @returns the number of topics added.
|
|
4148
|
+
*/
|
|
4149
|
+
int rd_kafka_topic_partition_list_get_topic_names(
|
|
4150
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
4151
|
+
rd_list_t *topics,
|
|
4152
|
+
int include_regex) {
|
|
4153
|
+
int cnt = 0;
|
|
4154
|
+
int i;
|
|
4155
|
+
|
|
4156
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4157
|
+
const rd_kafka_topic_partition_t *rktpar =
|
|
4158
|
+
&rktparlist->elems[i];
|
|
4159
|
+
|
|
4160
|
+
if (!include_regex && *rktpar->topic == '^')
|
|
4161
|
+
continue;
|
|
4162
|
+
|
|
4163
|
+
if (!rd_list_find(topics, rktpar->topic, (void *)strcmp)) {
|
|
4164
|
+
rd_list_add(topics, rd_strdup(rktpar->topic));
|
|
4165
|
+
cnt++;
|
|
4166
|
+
}
|
|
4167
|
+
}
|
|
4168
|
+
|
|
4169
|
+
return cnt;
|
|
4170
|
+
}
|
|
4171
|
+
|
|
4172
|
+
|
|
4173
|
+
/**
|
|
4174
|
+
* @brief Create a copy of \p rktparlist only containing the partitions
|
|
4175
|
+
* matched by \p match function.
|
|
4176
|
+
*
|
|
4177
|
+
* \p match shall return 1 for match, else 0.
|
|
4178
|
+
*
|
|
4179
|
+
* @returns a new list
|
|
4180
|
+
*/
|
|
4181
|
+
rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match(
|
|
4182
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
4183
|
+
int (*match)(const void *elem, const void *opaque),
|
|
4184
|
+
void *opaque) {
|
|
4185
|
+
rd_kafka_topic_partition_list_t *newlist;
|
|
4186
|
+
int i;
|
|
4187
|
+
|
|
4188
|
+
newlist = rd_kafka_topic_partition_list_new(0);
|
|
4189
|
+
|
|
4190
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4191
|
+
const rd_kafka_topic_partition_t *rktpar =
|
|
4192
|
+
&rktparlist->elems[i];
|
|
4193
|
+
|
|
4194
|
+
if (!match(rktpar, opaque))
|
|
4195
|
+
continue;
|
|
4196
|
+
|
|
4197
|
+
rd_kafka_topic_partition_list_add_copy(newlist, rktpar);
|
|
4198
|
+
}
|
|
4199
|
+
|
|
4200
|
+
return newlist;
|
|
4201
|
+
}
|
|
4202
|
+
|
|
4203
|
+
void rd_kafka_topic_partition_list_log(
|
|
4204
|
+
rd_kafka_t *rk,
|
|
4205
|
+
const char *fac,
|
|
4206
|
+
int dbg,
|
|
4207
|
+
const rd_kafka_topic_partition_list_t *rktparlist) {
|
|
4208
|
+
int i;
|
|
4209
|
+
|
|
4210
|
+
rd_kafka_dbg(rk, NONE | dbg, fac,
|
|
4211
|
+
"List with %d partition(s):", rktparlist->cnt);
|
|
4212
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4213
|
+
const rd_kafka_topic_partition_t *rktpar =
|
|
4214
|
+
&rktparlist->elems[i];
|
|
4215
|
+
rd_kafka_dbg(rk, NONE | dbg, fac,
|
|
4216
|
+
" %s [%" PRId32 "] offset %s%s%s", rktpar->topic,
|
|
4217
|
+
rktpar->partition,
|
|
4218
|
+
rd_kafka_offset2str(rktpar->offset),
|
|
4219
|
+
rktpar->err ? ": error: " : "",
|
|
4220
|
+
rktpar->err ? rd_kafka_err2str(rktpar->err) : "");
|
|
4221
|
+
}
|
|
4222
|
+
}
|
|
4223
|
+
|
|
4224
|
+
/**
|
|
4225
|
+
* @returns a comma-separated list of partitions.
|
|
4226
|
+
*/
|
|
4227
|
+
const char *rd_kafka_topic_partition_list_str(
|
|
4228
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
4229
|
+
char *dest,
|
|
4230
|
+
size_t dest_size,
|
|
4231
|
+
int fmt_flags) {
|
|
4232
|
+
int i;
|
|
4233
|
+
size_t of = 0;
|
|
4234
|
+
|
|
4235
|
+
if (!rktparlist->cnt)
|
|
4236
|
+
dest[0] = '\0';
|
|
4237
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4238
|
+
const rd_kafka_topic_partition_t *rktpar =
|
|
4239
|
+
&rktparlist->elems[i];
|
|
4240
|
+
char errstr[128];
|
|
4241
|
+
char offsetstr[32];
|
|
4242
|
+
const char *topic_id_str = NULL;
|
|
4243
|
+
const rd_kafka_Uuid_t topic_id =
|
|
4244
|
+
rd_kafka_topic_partition_get_topic_id(rktpar);
|
|
4245
|
+
int r;
|
|
4246
|
+
|
|
4247
|
+
if (!rktpar->err && (fmt_flags & RD_KAFKA_FMT_F_ONLY_ERR))
|
|
4248
|
+
continue;
|
|
4249
|
+
|
|
4250
|
+
if (rktpar->err && !(fmt_flags & RD_KAFKA_FMT_F_NO_ERR))
|
|
4251
|
+
rd_snprintf(errstr, sizeof(errstr), "(%s)",
|
|
4252
|
+
rd_kafka_err2str(rktpar->err));
|
|
4253
|
+
else
|
|
4254
|
+
errstr[0] = '\0';
|
|
4255
|
+
|
|
4256
|
+
if (rktpar->offset != RD_KAFKA_OFFSET_INVALID)
|
|
4257
|
+
rd_snprintf(offsetstr, sizeof(offsetstr), "@%" PRId64,
|
|
4258
|
+
rktpar->offset);
|
|
4259
|
+
else
|
|
4260
|
+
offsetstr[0] = '\0';
|
|
4261
|
+
|
|
4262
|
+
|
|
4263
|
+
if (!RD_KAFKA_UUID_IS_ZERO(topic_id))
|
|
4264
|
+
topic_id_str = rd_kafka_Uuid_base64str(&topic_id);
|
|
4265
|
+
|
|
4266
|
+
r = rd_snprintf(&dest[of], dest_size - of,
|
|
4267
|
+
"%s"
|
|
4268
|
+
"%s(%s)[%" PRId32
|
|
4269
|
+
"]"
|
|
4270
|
+
"%s"
|
|
4271
|
+
"%s",
|
|
4272
|
+
of == 0 ? "" : ", ", rktpar->topic,
|
|
4273
|
+
topic_id_str, rktpar->partition, offsetstr,
|
|
4274
|
+
errstr);
|
|
4275
|
+
|
|
4276
|
+
if ((size_t)r >= dest_size - of) {
|
|
4277
|
+
rd_snprintf(&dest[dest_size - 4], 4, "...");
|
|
4278
|
+
break;
|
|
4279
|
+
}
|
|
4280
|
+
|
|
4281
|
+
of += r;
|
|
4282
|
+
}
|
|
4283
|
+
|
|
4284
|
+
return dest;
|
|
4285
|
+
}
|
|
4286
|
+
|
|
4287
|
+
|
|
4288
|
+
|
|
4289
|
+
/**
|
|
4290
|
+
* @brief Update \p dst with info from \p src.
|
|
4291
|
+
*
|
|
4292
|
+
* Fields updated:
|
|
4293
|
+
* - metadata
|
|
4294
|
+
* - metadata_size
|
|
4295
|
+
* - offset
|
|
4296
|
+
* - offset leader epoch
|
|
4297
|
+
* - err
|
|
4298
|
+
*
|
|
4299
|
+
* Will only update partitions that are in both dst and src, other partitions
|
|
4300
|
+
* will remain unchanged.
|
|
4301
|
+
*/
|
|
4302
|
+
void rd_kafka_topic_partition_list_update(
|
|
4303
|
+
rd_kafka_topic_partition_list_t *dst,
|
|
4304
|
+
const rd_kafka_topic_partition_list_t *src) {
|
|
4305
|
+
int i;
|
|
4306
|
+
|
|
4307
|
+
for (i = 0; i < dst->cnt; i++) {
|
|
4308
|
+
rd_kafka_topic_partition_t *d = &dst->elems[i];
|
|
4309
|
+
rd_kafka_topic_partition_t *s;
|
|
4310
|
+
rd_kafka_topic_partition_private_t *s_priv, *d_priv;
|
|
4311
|
+
|
|
4312
|
+
if (!(s = rd_kafka_topic_partition_list_find(
|
|
4313
|
+
(rd_kafka_topic_partition_list_t *)src, d->topic,
|
|
4314
|
+
d->partition)))
|
|
4315
|
+
continue;
|
|
4316
|
+
|
|
4317
|
+
d->offset = s->offset;
|
|
4318
|
+
d->err = s->err;
|
|
4319
|
+
if (d->metadata) {
|
|
4320
|
+
rd_free(d->metadata);
|
|
4321
|
+
d->metadata = NULL;
|
|
4322
|
+
d->metadata_size = 0;
|
|
4323
|
+
}
|
|
4324
|
+
if (s->metadata_size > 0) {
|
|
4325
|
+
d->metadata = rd_malloc(s->metadata_size);
|
|
4326
|
+
d->metadata_size = s->metadata_size;
|
|
4327
|
+
memcpy((void *)d->metadata, s->metadata,
|
|
4328
|
+
s->metadata_size);
|
|
4329
|
+
}
|
|
4330
|
+
|
|
4331
|
+
s_priv = rd_kafka_topic_partition_get_private(s);
|
|
4332
|
+
d_priv = rd_kafka_topic_partition_get_private(d);
|
|
4333
|
+
d_priv->leader_epoch = s_priv->leader_epoch;
|
|
4334
|
+
d_priv->current_leader_epoch = s_priv->current_leader_epoch;
|
|
4335
|
+
d_priv->topic_id = s_priv->topic_id;
|
|
4336
|
+
}
|
|
4337
|
+
}
|
|
4338
|
+
|
|
4339
|
+
|
|
4340
|
+
/**
|
|
4341
|
+
* @returns the sum of \p cb called for each element.
|
|
4342
|
+
*/
|
|
4343
|
+
size_t rd_kafka_topic_partition_list_sum(
|
|
4344
|
+
const rd_kafka_topic_partition_list_t *rktparlist,
|
|
4345
|
+
size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque),
|
|
4346
|
+
void *opaque) {
|
|
4347
|
+
int i;
|
|
4348
|
+
size_t sum = 0;
|
|
4349
|
+
|
|
4350
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4351
|
+
const rd_kafka_topic_partition_t *rktpar =
|
|
4352
|
+
&rktparlist->elems[i];
|
|
4353
|
+
sum += cb(rktpar, opaque);
|
|
4354
|
+
}
|
|
4355
|
+
|
|
4356
|
+
return sum;
|
|
4357
|
+
}
|
|
4358
|
+
|
|
4359
|
+
|
|
4360
|
+
/**
|
|
4361
|
+
* @returns rd_true if there are duplicate topic/partitions in the list,
|
|
4362
|
+
* rd_false if not.
|
|
4363
|
+
*
|
|
4364
|
+
* @remarks sorts the elements of the list.
|
|
4365
|
+
*/
|
|
4366
|
+
rd_bool_t rd_kafka_topic_partition_list_has_duplicates(
|
|
4367
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
4368
|
+
rd_bool_t ignore_partition) {
|
|
4369
|
+
|
|
4370
|
+
int i;
|
|
4371
|
+
|
|
4372
|
+
if (rktparlist->cnt <= 1)
|
|
4373
|
+
return rd_false;
|
|
4374
|
+
|
|
4375
|
+
rd_kafka_topic_partition_list_sort_by_topic(rktparlist);
|
|
4376
|
+
|
|
4377
|
+
for (i = 1; i < rktparlist->cnt; i++) {
|
|
4378
|
+
const rd_kafka_topic_partition_t *p1 =
|
|
4379
|
+
&rktparlist->elems[i - 1];
|
|
4380
|
+
const rd_kafka_topic_partition_t *p2 = &rktparlist->elems[i];
|
|
4381
|
+
|
|
4382
|
+
if (((p1->partition == p2->partition) || ignore_partition) &&
|
|
4383
|
+
!strcmp(p1->topic, p2->topic)) {
|
|
4384
|
+
return rd_true;
|
|
4385
|
+
}
|
|
4386
|
+
}
|
|
4387
|
+
|
|
4388
|
+
return rd_false;
|
|
4389
|
+
}
|
|
4390
|
+
|
|
4391
|
+
|
|
4392
|
+
/**
|
|
4393
|
+
* @brief Set \c .err field \p err on all partitions in list.
|
|
4394
|
+
*/
|
|
4395
|
+
void rd_kafka_topic_partition_list_set_err(
|
|
4396
|
+
rd_kafka_topic_partition_list_t *rktparlist,
|
|
4397
|
+
rd_kafka_resp_err_t err) {
|
|
4398
|
+
int i;
|
|
4399
|
+
|
|
4400
|
+
for (i = 0; i < rktparlist->cnt; i++)
|
|
4401
|
+
rktparlist->elems[i].err = err;
|
|
4402
|
+
}
|
|
4403
|
+
|
|
4404
|
+
/**
|
|
4405
|
+
* @brief Get the first set error in the partition list.
|
|
4406
|
+
*/
|
|
4407
|
+
rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err(
|
|
4408
|
+
const rd_kafka_topic_partition_list_t *rktparlist) {
|
|
4409
|
+
int i;
|
|
4410
|
+
|
|
4411
|
+
for (i = 0; i < rktparlist->cnt; i++)
|
|
4412
|
+
if (rktparlist->elems[i].err)
|
|
4413
|
+
return rktparlist->elems[i].err;
|
|
4414
|
+
|
|
4415
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
4416
|
+
}
|
|
4417
|
+
|
|
4418
|
+
|
|
4419
|
+
/**
|
|
4420
|
+
* @returns the number of wildcard/regex topics
|
|
4421
|
+
*/
|
|
4422
|
+
int rd_kafka_topic_partition_list_regex_cnt(
|
|
4423
|
+
const rd_kafka_topic_partition_list_t *rktparlist) {
|
|
4424
|
+
int i;
|
|
4425
|
+
int cnt = 0;
|
|
4426
|
+
|
|
4427
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4428
|
+
const rd_kafka_topic_partition_t *rktpar =
|
|
4429
|
+
&rktparlist->elems[i];
|
|
4430
|
+
cnt += *rktpar->topic == '^';
|
|
4431
|
+
}
|
|
4432
|
+
return cnt;
|
|
4433
|
+
}
|
|
4434
|
+
|
|
4435
|
+
|
|
4436
|
+
/**
|
|
4437
|
+
* @brief Match function that returns true if topic is not a regex.
|
|
4438
|
+
*/
|
|
4439
|
+
static int rd_kafka_topic_partition_not_regex(const void *elem,
|
|
4440
|
+
const void *opaque) {
|
|
4441
|
+
const rd_kafka_topic_partition_t *rktpar = elem;
|
|
4442
|
+
return *rktpar->topic != '^';
|
|
4443
|
+
}
|
|
4444
|
+
|
|
4445
|
+
/**
|
|
4446
|
+
* @brief Return a new list with all regex topics removed.
|
|
4447
|
+
*
|
|
4448
|
+
* @remark The caller is responsible for freeing the returned list.
|
|
4449
|
+
*/
|
|
4450
|
+
rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_remove_regexes(
|
|
4451
|
+
const rd_kafka_topic_partition_list_t *rktparlist) {
|
|
4452
|
+
return rd_kafka_topic_partition_list_match(
|
|
4453
|
+
rktparlist, rd_kafka_topic_partition_not_regex, NULL);
|
|
4454
|
+
}
|
|
4455
|
+
|
|
4456
|
+
|
|
4457
|
+
/**
|
|
4458
|
+
* @brief Combine regexes present in the list into a single regex.
|
|
4459
|
+
*/
|
|
4460
|
+
rd_kafkap_str_t *rd_kafka_topic_partition_list_combine_regexes(
|
|
4461
|
+
const rd_kafka_topic_partition_list_t *rktparlist) {
|
|
4462
|
+
int i;
|
|
4463
|
+
int combined_regex_len = 1; /* 1 for null-terminator */
|
|
4464
|
+
int regex_cnt = 0;
|
|
4465
|
+
int j = 1;
|
|
4466
|
+
rd_bool_t is_first_regex = rd_true;
|
|
4467
|
+
char *combined_regex_str;
|
|
4468
|
+
rd_kafkap_str_t *combined_regex_kstr;
|
|
4469
|
+
|
|
4470
|
+
// Count the number of characters needed for the combined regex string
|
|
4471
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4472
|
+
const rd_kafka_topic_partition_t *rktpar =
|
|
4473
|
+
&(rktparlist->elems[i]);
|
|
4474
|
+
if (*rktpar->topic == '^') {
|
|
4475
|
+
combined_regex_len += strlen(rktpar->topic);
|
|
4476
|
+
regex_cnt++;
|
|
4477
|
+
}
|
|
4478
|
+
}
|
|
4479
|
+
|
|
4480
|
+
if (regex_cnt == 0)
|
|
4481
|
+
return rd_kafkap_str_new("", 0);
|
|
4482
|
+
|
|
4483
|
+
combined_regex_len +=
|
|
4484
|
+
3 * (regex_cnt - 1); /* 1 for each ')|(' separator */
|
|
4485
|
+
combined_regex_len += 2; /* 2 for enclosing brackets */
|
|
4486
|
+
|
|
4487
|
+
// memory allocation for the combined regex string
|
|
4488
|
+
combined_regex_str = rd_malloc(combined_regex_len);
|
|
4489
|
+
|
|
4490
|
+
// Construct the combined regex string
|
|
4491
|
+
combined_regex_str[0] = '(';
|
|
4492
|
+
for (i = 0; i < rktparlist->cnt; i++) {
|
|
4493
|
+
const rd_kafka_topic_partition_t *rktpar =
|
|
4494
|
+
&(rktparlist->elems[i]);
|
|
4495
|
+
char *topic = rktpar->topic;
|
|
4496
|
+
if (*topic == '^') {
|
|
4497
|
+
if (!is_first_regex) {
|
|
4498
|
+
combined_regex_str[j++] = ')';
|
|
4499
|
+
combined_regex_str[j++] = '|';
|
|
4500
|
+
combined_regex_str[j++] = '(';
|
|
4501
|
+
}
|
|
4502
|
+
while (*topic) {
|
|
4503
|
+
combined_regex_str[j++] = *topic;
|
|
4504
|
+
topic++;
|
|
4505
|
+
}
|
|
4506
|
+
is_first_regex = rd_false;
|
|
4507
|
+
}
|
|
4508
|
+
}
|
|
4509
|
+
combined_regex_str[j++] = ')';
|
|
4510
|
+
combined_regex_str[j] = '\0';
|
|
4511
|
+
|
|
4512
|
+
combined_regex_kstr =
|
|
4513
|
+
rd_kafkap_str_new(combined_regex_str, combined_regex_len - 1);
|
|
4514
|
+
rd_free(combined_regex_str);
|
|
4515
|
+
return combined_regex_kstr;
|
|
4516
|
+
}
|
|
4517
|
+
|
|
4518
|
+
|
|
4519
|
+
/**
|
|
4520
|
+
* @brief Reset base sequence for this toppar.
|
|
4521
|
+
*
|
|
4522
|
+
* See rd_kafka_toppar_pid_change() below.
|
|
4523
|
+
*
|
|
4524
|
+
* @warning Toppar must be completely drained.
|
|
4525
|
+
*
|
|
4526
|
+
* @locality toppar handler thread
|
|
4527
|
+
* @locks toppar_lock MUST be held.
|
|
4528
|
+
*/
|
|
4529
|
+
static void rd_kafka_toppar_reset_base_msgid(rd_kafka_toppar_t *rktp,
|
|
4530
|
+
uint64_t new_base_msgid) {
|
|
4531
|
+
rd_kafka_dbg(
|
|
4532
|
+
rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "RESETSEQ",
|
|
4533
|
+
"%.*s [%" PRId32
|
|
4534
|
+
"] "
|
|
4535
|
+
"resetting epoch base seq from %" PRIu64 " to %" PRIu64,
|
|
4536
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
|
|
4537
|
+
rktp->rktp_eos.epoch_base_msgid, new_base_msgid);
|
|
4538
|
+
|
|
4539
|
+
rktp->rktp_eos.next_ack_seq = 0;
|
|
4540
|
+
rktp->rktp_eos.next_err_seq = 0;
|
|
4541
|
+
rktp->rktp_eos.epoch_base_msgid = new_base_msgid;
|
|
4542
|
+
}
|
|
4543
|
+
|
|
4544
|
+
|
|
4545
|
+
/**
|
|
4546
|
+
* @brief Update/change the Producer ID for this toppar.
|
|
4547
|
+
*
|
|
4548
|
+
* Must only be called when pid is different from the current toppar pid.
|
|
4549
|
+
*
|
|
4550
|
+
* The epoch base sequence will be set to \p base_msgid, which must be the
|
|
4551
|
+
* first message in the partition
|
|
4552
|
+
* queue. However, if there are outstanding messages in-flight to the broker
|
|
4553
|
+
* we will need to wait for these ProduceRequests to finish (most likely
|
|
4554
|
+
* with failure) and have their messages re-enqueued to maintain original order.
|
|
4555
|
+
* In this case the pid will not be updated and this function should be
|
|
4556
|
+
* called again when there are no outstanding messages.
|
|
4557
|
+
*
|
|
4558
|
+
* @remark This function must only be called when rktp_xmitq is non-empty.
|
|
4559
|
+
*
|
|
4560
|
+
* @returns 1 if a new pid was set, else 0.
|
|
4561
|
+
*
|
|
4562
|
+
* @locality toppar handler thread
|
|
4563
|
+
* @locks none
|
|
4564
|
+
*/
|
|
4565
|
+
int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp,
|
|
4566
|
+
rd_kafka_pid_t pid,
|
|
4567
|
+
uint64_t base_msgid) {
|
|
4568
|
+
int inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight);
|
|
4569
|
+
|
|
4570
|
+
if (unlikely(inflight > 0)) {
|
|
4571
|
+
rd_kafka_dbg(
|
|
4572
|
+
rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID",
|
|
4573
|
+
"%.*s [%" PRId32
|
|
4574
|
+
"] will not change %s -> %s yet: "
|
|
4575
|
+
"%d message(s) still in-flight from current "
|
|
4576
|
+
"epoch",
|
|
4577
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
4578
|
+
rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid),
|
|
4579
|
+
rd_kafka_pid2str(pid), inflight);
|
|
4580
|
+
return 0;
|
|
4581
|
+
}
|
|
4582
|
+
|
|
4583
|
+
rd_assert(base_msgid != 0 &&
|
|
4584
|
+
*"BUG: pid_change() must only be called with "
|
|
4585
|
+
"non-empty xmitq");
|
|
4586
|
+
|
|
4587
|
+
rd_kafka_toppar_lock(rktp);
|
|
4588
|
+
rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID",
|
|
4589
|
+
"%.*s [%" PRId32
|
|
4590
|
+
"] changed %s -> %s "
|
|
4591
|
+
"with base MsgId %" PRIu64,
|
|
4592
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
4593
|
+
rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid),
|
|
4594
|
+
rd_kafka_pid2str(pid), base_msgid);
|
|
4595
|
+
|
|
4596
|
+
rktp->rktp_eos.pid = pid;
|
|
4597
|
+
rd_kafka_toppar_reset_base_msgid(rktp, base_msgid);
|
|
4598
|
+
|
|
4599
|
+
rd_kafka_toppar_unlock(rktp);
|
|
4600
|
+
|
|
4601
|
+
return 1;
|
|
4602
|
+
}
|
|
4603
|
+
|
|
4604
|
+
|
|
4605
|
+
/**
|
|
4606
|
+
* @brief Purge messages in partition queues.
|
|
4607
|
+
* Delivery reports will be enqueued for all purged messages, the error
|
|
4608
|
+
* code is set to RD_KAFKA_RESP_ERR__PURGE_QUEUE.
|
|
4609
|
+
*
|
|
4610
|
+
* @param include_xmit_msgq If executing from the rktp's current broker handler
|
|
4611
|
+
* thread, also include the xmit message queue.
|
|
4612
|
+
*
|
|
4613
|
+
* @warning Only to be used with the producer.
|
|
4614
|
+
*
|
|
4615
|
+
* @returns the number of messages purged
|
|
4616
|
+
*
|
|
4617
|
+
* @locality any thread.
|
|
4618
|
+
* @locks_acquired rd_kafka_toppar_lock()
|
|
4619
|
+
* @locks_required none
|
|
4620
|
+
*/
|
|
4621
|
+
int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp,
|
|
4622
|
+
int purge_flags,
|
|
4623
|
+
rd_bool_t include_xmit_msgq) {
|
|
4624
|
+
rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
|
|
4625
|
+
rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq);
|
|
4626
|
+
int cnt;
|
|
4627
|
+
|
|
4628
|
+
rd_assert(rk->rk_type == RD_KAFKA_PRODUCER);
|
|
4629
|
+
|
|
4630
|
+
rd_kafka_dbg(rk, TOPIC, "PURGE",
|
|
4631
|
+
"%s [%" PRId32
|
|
4632
|
+
"]: purging queues "
|
|
4633
|
+
"(purge_flags 0x%x, %s xmit_msgq)",
|
|
4634
|
+
rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
|
|
4635
|
+
purge_flags, include_xmit_msgq ? "include" : "exclude");
|
|
4636
|
+
|
|
4637
|
+
if (!(purge_flags & RD_KAFKA_PURGE_F_QUEUE))
|
|
4638
|
+
return 0;
|
|
4639
|
+
|
|
4640
|
+
if (include_xmit_msgq) {
|
|
4641
|
+
/* xmit_msgq is owned by the toppar handler thread
|
|
4642
|
+
* (broker thread) and requires no locking. */
|
|
4643
|
+
rd_assert(rktp->rktp_broker);
|
|
4644
|
+
rd_assert(thrd_is_current(rktp->rktp_broker->rkb_thread));
|
|
4645
|
+
rd_kafka_msgq_concat(&rkmq, &rktp->rktp_xmit_msgq);
|
|
4646
|
+
}
|
|
4647
|
+
|
|
4648
|
+
rd_kafka_toppar_lock(rktp);
|
|
4649
|
+
rd_kafka_msgq_concat(&rkmq, &rktp->rktp_msgq);
|
|
4650
|
+
cnt = rd_kafka_msgq_len(&rkmq);
|
|
4651
|
+
|
|
4652
|
+
if (cnt > 0 && purge_flags & RD_KAFKA_PURGE_F_ABORT_TXN) {
|
|
4653
|
+
/* All messages in-queue are purged
|
|
4654
|
+
* on abort_transaction(). Since these messages
|
|
4655
|
+
* will not be produced (retried) we need to adjust the
|
|
4656
|
+
* idempotence epoch's base msgid to skip the messages. */
|
|
4657
|
+
rktp->rktp_eos.epoch_base_msgid += cnt;
|
|
4658
|
+
rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_EOS, "ADVBASE",
|
|
4659
|
+
"%.*s [%" PRId32
|
|
4660
|
+
"] "
|
|
4661
|
+
"advancing epoch base msgid to %" PRIu64
|
|
4662
|
+
" due to %d message(s) in aborted transaction",
|
|
4663
|
+
RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
|
|
4664
|
+
rktp->rktp_partition,
|
|
4665
|
+
rktp->rktp_eos.epoch_base_msgid, cnt);
|
|
4666
|
+
}
|
|
4667
|
+
rd_kafka_toppar_unlock(rktp);
|
|
4668
|
+
|
|
4669
|
+
rd_kafka_dr_msgq(rktp->rktp_rkt, &rkmq, RD_KAFKA_RESP_ERR__PURGE_QUEUE);
|
|
4670
|
+
|
|
4671
|
+
return cnt;
|
|
4672
|
+
}
|
|
4673
|
+
|
|
4674
|
+
|
|
4675
|
+
/**
|
|
4676
|
+
* @brief Purge queues for the unassigned toppars of all known topics.
|
|
4677
|
+
*
|
|
4678
|
+
* @locality application thread
|
|
4679
|
+
* @locks none
|
|
4680
|
+
*/
|
|
4681
|
+
void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk) {
|
|
4682
|
+
rd_kafka_topic_t *rkt;
|
|
4683
|
+
int msg_cnt = 0, part_cnt = 0;
|
|
4684
|
+
|
|
4685
|
+
rd_kafka_rdlock(rk);
|
|
4686
|
+
TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
|
|
4687
|
+
rd_kafka_toppar_t *rktp;
|
|
4688
|
+
int r;
|
|
4689
|
+
|
|
4690
|
+
rd_kafka_topic_rdlock(rkt);
|
|
4691
|
+
rktp = rkt->rkt_ua;
|
|
4692
|
+
if (rktp)
|
|
4693
|
+
rd_kafka_toppar_keep(rktp);
|
|
4694
|
+
rd_kafka_topic_rdunlock(rkt);
|
|
4695
|
+
|
|
4696
|
+
if (unlikely(!rktp))
|
|
4697
|
+
continue;
|
|
4698
|
+
|
|
4699
|
+
|
|
4700
|
+
rd_kafka_toppar_lock(rktp);
|
|
4701
|
+
|
|
4702
|
+
r = rd_kafka_msgq_len(&rktp->rktp_msgq);
|
|
4703
|
+
rd_kafka_dr_msgq(rkt, &rktp->rktp_msgq,
|
|
4704
|
+
RD_KAFKA_RESP_ERR__PURGE_QUEUE);
|
|
4705
|
+
rd_kafka_toppar_unlock(rktp);
|
|
4706
|
+
rd_kafka_toppar_destroy(rktp);
|
|
4707
|
+
|
|
4708
|
+
if (r > 0) {
|
|
4709
|
+
msg_cnt += r;
|
|
4710
|
+
part_cnt++;
|
|
4711
|
+
}
|
|
4712
|
+
}
|
|
4713
|
+
rd_kafka_rdunlock(rk);
|
|
4714
|
+
|
|
4715
|
+
rd_kafka_dbg(rk, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ",
|
|
4716
|
+
"Purged %i message(s) from %d UA-partition(s)", msg_cnt,
|
|
4717
|
+
part_cnt);
|
|
4718
|
+
}
|
|
4719
|
+
|
|
4720
|
+
|
|
4721
|
+
void rd_kafka_partition_leader_destroy_free(void *ptr) {
|
|
4722
|
+
struct rd_kafka_partition_leader *leader = ptr;
|
|
4723
|
+
rd_kafka_partition_leader_destroy(leader);
|
|
4724
|
+
}
|
|
4725
|
+
|
|
4726
|
+
|
|
4727
|
+
const char *rd_kafka_fetch_pos2str(const rd_kafka_fetch_pos_t fetchpos) {
|
|
4728
|
+
static RD_TLS char ret[2][64];
|
|
4729
|
+
static int idx;
|
|
4730
|
+
|
|
4731
|
+
idx = (idx + 1) % 2;
|
|
4732
|
+
|
|
4733
|
+
rd_snprintf(
|
|
4734
|
+
ret[idx], sizeof(ret[idx]), "offset %s (leader epoch %" PRId32 ")",
|
|
4735
|
+
rd_kafka_offset2str(fetchpos.offset), fetchpos.leader_epoch);
|
|
4736
|
+
|
|
4737
|
+
return ret[idx];
|
|
4738
|
+
}
|
|
4739
|
+
|
|
4740
|
+
typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
|
|
4741
|
+
void *) map_toppar_void_t;
|
|
4742
|
+
|
|
4743
|
+
/**
|
|
4744
|
+
* @brief Calculates \p a ∩ \p b using \p cmp and \p hash .
|
|
4745
|
+
* Ordered following \p a order. Elements are copied from \p a.
|
|
4746
|
+
*/
|
|
4747
|
+
static rd_kafka_topic_partition_list_t *
|
|
4748
|
+
rd_kafka_topic_partition_list_intersection0(
|
|
4749
|
+
rd_kafka_topic_partition_list_t *a,
|
|
4750
|
+
rd_kafka_topic_partition_list_t *b,
|
|
4751
|
+
int(cmp)(const void *_a, const void *_b),
|
|
4752
|
+
unsigned int(hash)(const void *_a)) {
|
|
4753
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
4754
|
+
rd_kafka_topic_partition_list_t *ret =
|
|
4755
|
+
rd_kafka_topic_partition_list_new(a->cnt < b->cnt ? a->cnt
|
|
4756
|
+
: b->cnt);
|
|
4757
|
+
map_toppar_void_t b_map =
|
|
4758
|
+
RD_MAP_INITIALIZER(b->cnt, cmp, hash, NULL, NULL);
|
|
4759
|
+
RD_KAFKA_TPLIST_FOREACH(rktpar, b) {
|
|
4760
|
+
RD_MAP_SET(&b_map, rktpar, rktpar);
|
|
4761
|
+
}
|
|
4762
|
+
RD_KAFKA_TPLIST_FOREACH(rktpar, a) {
|
|
4763
|
+
if ((RD_MAP_GET(&b_map, rktpar) != NULL) == 1) {
|
|
4764
|
+
rd_kafka_topic_partition_list_add_copy(ret, rktpar);
|
|
4765
|
+
}
|
|
4766
|
+
}
|
|
4767
|
+
RD_MAP_DESTROY(&b_map);
|
|
4768
|
+
return ret;
|
|
4769
|
+
}
|
|
4770
|
+
|
|
4771
|
+
/**
|
|
4772
|
+
* @brief Calculates \p a - \p b using \p cmp and \p hash .
|
|
4773
|
+
* Ordered following \p a order. Elements are copied from \p a.
|
|
4774
|
+
*/
|
|
4775
|
+
static rd_kafka_topic_partition_list_t *
|
|
4776
|
+
rd_kafka_topic_partition_list_difference0(rd_kafka_topic_partition_list_t *a,
|
|
4777
|
+
rd_kafka_topic_partition_list_t *b,
|
|
4778
|
+
int(cmp)(const void *_a,
|
|
4779
|
+
const void *_b),
|
|
4780
|
+
unsigned int(hash)(const void *_a)) {
|
|
4781
|
+
rd_kafka_topic_partition_t *rktpar;
|
|
4782
|
+
rd_kafka_topic_partition_list_t *ret =
|
|
4783
|
+
rd_kafka_topic_partition_list_new(a->cnt);
|
|
4784
|
+
map_toppar_void_t b_map =
|
|
4785
|
+
RD_MAP_INITIALIZER(b->cnt, cmp, hash, NULL, NULL);
|
|
4786
|
+
RD_KAFKA_TPLIST_FOREACH(rktpar, b) {
|
|
4787
|
+
RD_MAP_SET(&b_map, rktpar, rktpar);
|
|
4788
|
+
}
|
|
4789
|
+
RD_KAFKA_TPLIST_FOREACH(rktpar, a) {
|
|
4790
|
+
if ((RD_MAP_GET(&b_map, rktpar) != NULL) == 0) {
|
|
4791
|
+
rd_kafka_topic_partition_list_add_copy(ret, rktpar);
|
|
4792
|
+
}
|
|
4793
|
+
}
|
|
4794
|
+
RD_MAP_DESTROY(&b_map);
|
|
4795
|
+
return ret;
|
|
4796
|
+
}
|
|
4797
|
+
|
|
4798
|
+
/**
|
|
4799
|
+
* @brief Calculates \p a ∪ \p b using \p cmp and \p hash .
|
|
4800
|
+
* Ordered following \p a order for elements in \p a
|
|
4801
|
+
* and \p b order for elements only in \p b.
|
|
4802
|
+
* Elements are copied the same way.
|
|
4803
|
+
*/
|
|
4804
|
+
static rd_kafka_topic_partition_list_t *
|
|
4805
|
+
rd_kafka_topic_partition_list_union0(rd_kafka_topic_partition_list_t *a,
|
|
4806
|
+
rd_kafka_topic_partition_list_t *b,
|
|
4807
|
+
int(cmp)(const void *_a, const void *_b),
|
|
4808
|
+
unsigned int(hash)(const void *_a)) {
|
|
4809
|
+
|
|
4810
|
+
rd_kafka_topic_partition_list_t *b_minus_a =
|
|
4811
|
+
rd_kafka_topic_partition_list_difference0(b, a, cmp, hash);
|
|
4812
|
+
rd_kafka_topic_partition_list_t *ret =
|
|
4813
|
+
rd_kafka_topic_partition_list_new(a->cnt + b_minus_a->cnt);
|
|
4814
|
+
|
|
4815
|
+
rd_kafka_topic_partition_list_add_list(ret, a);
|
|
4816
|
+
rd_kafka_topic_partition_list_add_list(ret, b_minus_a);
|
|
4817
|
+
|
|
4818
|
+
rd_kafka_topic_partition_list_destroy(b_minus_a);
|
|
4819
|
+
return ret;
|
|
4820
|
+
}
|
|
4821
|
+
|
|
4822
|
+
/**
|
|
4823
|
+
* @brief Calculates \p a ∩ \p b using topic name and partition id.
|
|
4824
|
+
* Ordered following \p a order. Elements are copied from \p a.
|
|
4825
|
+
*/
|
|
4826
|
+
rd_kafka_topic_partition_list_t *
|
|
4827
|
+
rd_kafka_topic_partition_list_intersection_by_name(
|
|
4828
|
+
rd_kafka_topic_partition_list_t *a,
|
|
4829
|
+
rd_kafka_topic_partition_list_t *b) {
|
|
4830
|
+
return rd_kafka_topic_partition_list_intersection0(
|
|
4831
|
+
a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash);
|
|
4832
|
+
}
|
|
4833
|
+
|
|
4834
|
+
/**
|
|
4835
|
+
* @brief Calculates \p a - \p b using topic name and partition id.
|
|
4836
|
+
* Ordered following \p a order. Elements are copied from \p a.
|
|
4837
|
+
*/
|
|
4838
|
+
rd_kafka_topic_partition_list_t *
|
|
4839
|
+
rd_kafka_topic_partition_list_difference_by_name(
|
|
4840
|
+
rd_kafka_topic_partition_list_t *a,
|
|
4841
|
+
rd_kafka_topic_partition_list_t *b) {
|
|
4842
|
+
return rd_kafka_topic_partition_list_difference0(
|
|
4843
|
+
a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash);
|
|
4844
|
+
}
|
|
4845
|
+
|
|
4846
|
+
/**
|
|
4847
|
+
* @brief Calculates \p a ∪ \p b using topic name and partition id.
|
|
4848
|
+
* Ordered following \p a order for elements in \p a
|
|
4849
|
+
* and \p b order for elements only in \p b.
|
|
4850
|
+
* Elements are copied the same way.
|
|
4851
|
+
*/
|
|
4852
|
+
rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_union_by_name(
|
|
4853
|
+
rd_kafka_topic_partition_list_t *a,
|
|
4854
|
+
rd_kafka_topic_partition_list_t *b) {
|
|
4855
|
+
return rd_kafka_topic_partition_list_union0(
|
|
4856
|
+
a, b, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash);
|
|
4857
|
+
}
|
|
4858
|
+
|
|
4859
|
+
/**
|
|
4860
|
+
* @brief Calculates \p a ∩ \p b using topic id and partition id.
|
|
4861
|
+
* Ordered following \p a order. Elements are copied from \p a.
|
|
4862
|
+
*/
|
|
4863
|
+
rd_kafka_topic_partition_list_t *
|
|
4864
|
+
rd_kafka_topic_partition_list_intersection_by_id(
|
|
4865
|
+
rd_kafka_topic_partition_list_t *a,
|
|
4866
|
+
rd_kafka_topic_partition_list_t *b) {
|
|
4867
|
+
return rd_kafka_topic_partition_list_intersection0(
|
|
4868
|
+
a, b, rd_kafka_topic_partition_by_id_cmp,
|
|
4869
|
+
rd_kafka_topic_partition_hash_by_id);
|
|
4870
|
+
}
|
|
4871
|
+
|
|
4872
|
+
/**
|
|
4873
|
+
* @brief Calculates \p a - \p b using topic id and partition id.
|
|
4874
|
+
* Ordered following \p a order. Elements are copied from \p a.
|
|
4875
|
+
*/
|
|
4876
|
+
rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_difference_by_id(
|
|
4877
|
+
rd_kafka_topic_partition_list_t *a,
|
|
4878
|
+
rd_kafka_topic_partition_list_t *b) {
|
|
4879
|
+
return rd_kafka_topic_partition_list_difference0(
|
|
4880
|
+
a, b, rd_kafka_topic_partition_by_id_cmp,
|
|
4881
|
+
rd_kafka_topic_partition_hash_by_id);
|
|
4882
|
+
}
|
|
4883
|
+
|
|
4884
|
+
/**
|
|
4885
|
+
* @brief Calculates \p a ∪ \p b using topic id and partition id.
|
|
4886
|
+
* Ordered following \p a order for elements in \p a
|
|
4887
|
+
* and \p b order for elements only in \p b.
|
|
4888
|
+
* Elements are copied the same way.
|
|
4889
|
+
*/
|
|
4890
|
+
rd_kafka_topic_partition_list_t *
|
|
4891
|
+
rd_kafka_topic_partition_list_union_by_id(rd_kafka_topic_partition_list_t *a,
|
|
4892
|
+
rd_kafka_topic_partition_list_t *b) {
|
|
4893
|
+
return rd_kafka_topic_partition_list_union0(
|
|
4894
|
+
a, b, rd_kafka_topic_partition_by_id_cmp,
|
|
4895
|
+
rd_kafka_topic_partition_hash_by_id);
|
|
4896
|
+
}
|