@point3/node-rdkafka 3.6.0-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.txt +20 -0
- package/README.md +636 -0
- package/binding.gyp +154 -0
- package/deps/librdkafka/.clang-format +136 -0
- package/deps/librdkafka/.clang-format-cpp +103 -0
- package/deps/librdkafka/.dir-locals.el +10 -0
- package/deps/librdkafka/.formatignore +33 -0
- package/deps/librdkafka/.gdbmacros +19 -0
- package/deps/librdkafka/.github/CODEOWNERS +1 -0
- package/deps/librdkafka/.github/ISSUE_TEMPLATE +34 -0
- package/deps/librdkafka/.semaphore/run-all-tests.yml +77 -0
- package/deps/librdkafka/.semaphore/semaphore-integration.yml +250 -0
- package/deps/librdkafka/.semaphore/semaphore.yml +378 -0
- package/deps/librdkafka/.semaphore/verify-linux-packages.yml +41 -0
- package/deps/librdkafka/CHANGELOG.md +2208 -0
- package/deps/librdkafka/CMakeLists.txt +291 -0
- package/deps/librdkafka/CODE_OF_CONDUCT.md +46 -0
- package/deps/librdkafka/CONFIGURATION.md +209 -0
- package/deps/librdkafka/CONTRIBUTING.md +431 -0
- package/deps/librdkafka/Doxyfile +2375 -0
- package/deps/librdkafka/INTRODUCTION.md +2481 -0
- package/deps/librdkafka/LICENSE +26 -0
- package/deps/librdkafka/LICENSE.cjson +22 -0
- package/deps/librdkafka/LICENSE.crc32c +28 -0
- package/deps/librdkafka/LICENSE.fnv1a +18 -0
- package/deps/librdkafka/LICENSE.hdrhistogram +27 -0
- package/deps/librdkafka/LICENSE.lz4 +26 -0
- package/deps/librdkafka/LICENSE.murmur2 +25 -0
- package/deps/librdkafka/LICENSE.nanopb +22 -0
- package/deps/librdkafka/LICENSE.opentelemetry +203 -0
- package/deps/librdkafka/LICENSE.pycrc +23 -0
- package/deps/librdkafka/LICENSE.queue +31 -0
- package/deps/librdkafka/LICENSE.regexp +5 -0
- package/deps/librdkafka/LICENSE.snappy +36 -0
- package/deps/librdkafka/LICENSE.tinycthread +26 -0
- package/deps/librdkafka/LICENSE.wingetopt +49 -0
- package/deps/librdkafka/LICENSES.txt +625 -0
- package/deps/librdkafka/Makefile +125 -0
- package/deps/librdkafka/README.md +199 -0
- package/deps/librdkafka/README.win32 +26 -0
- package/deps/librdkafka/STATISTICS.md +624 -0
- package/deps/librdkafka/configure +214 -0
- package/deps/librdkafka/configure.self +331 -0
- package/deps/librdkafka/debian/changelog +111 -0
- package/deps/librdkafka/debian/compat +1 -0
- package/deps/librdkafka/debian/control +71 -0
- package/deps/librdkafka/debian/copyright +99 -0
- package/deps/librdkafka/debian/gbp.conf +9 -0
- package/deps/librdkafka/debian/librdkafka++1.install +1 -0
- package/deps/librdkafka/debian/librdkafka-dev.examples +2 -0
- package/deps/librdkafka/debian/librdkafka-dev.install +9 -0
- package/deps/librdkafka/debian/librdkafka1.docs +5 -0
- package/deps/librdkafka/debian/librdkafka1.install +1 -0
- package/deps/librdkafka/debian/librdkafka1.symbols +135 -0
- package/deps/librdkafka/debian/rules +19 -0
- package/deps/librdkafka/debian/source/format +1 -0
- package/deps/librdkafka/debian/watch +2 -0
- package/deps/librdkafka/dev-conf.sh +123 -0
- package/deps/librdkafka/examples/CMakeLists.txt +79 -0
- package/deps/librdkafka/examples/Makefile +167 -0
- package/deps/librdkafka/examples/README.md +42 -0
- package/deps/librdkafka/examples/alter_consumer_group_offsets.c +338 -0
- package/deps/librdkafka/examples/consumer.c +271 -0
- package/deps/librdkafka/examples/delete_records.c +233 -0
- package/deps/librdkafka/examples/describe_cluster.c +322 -0
- package/deps/librdkafka/examples/describe_consumer_groups.c +455 -0
- package/deps/librdkafka/examples/describe_topics.c +427 -0
- package/deps/librdkafka/examples/elect_leaders.c +317 -0
- package/deps/librdkafka/examples/globals.json +11 -0
- package/deps/librdkafka/examples/idempotent_producer.c +344 -0
- package/deps/librdkafka/examples/incremental_alter_configs.c +347 -0
- package/deps/librdkafka/examples/kafkatest_verifiable_client.cpp +945 -0
- package/deps/librdkafka/examples/list_consumer_group_offsets.c +359 -0
- package/deps/librdkafka/examples/list_consumer_groups.c +365 -0
- package/deps/librdkafka/examples/list_offsets.c +327 -0
- package/deps/librdkafka/examples/misc.c +287 -0
- package/deps/librdkafka/examples/openssl_engine_example.cpp +248 -0
- package/deps/librdkafka/examples/producer.c +251 -0
- package/deps/librdkafka/examples/producer.cpp +228 -0
- package/deps/librdkafka/examples/rdkafka_complex_consumer_example.c +617 -0
- package/deps/librdkafka/examples/rdkafka_complex_consumer_example.cpp +467 -0
- package/deps/librdkafka/examples/rdkafka_consume_batch.cpp +264 -0
- package/deps/librdkafka/examples/rdkafka_example.c +853 -0
- package/deps/librdkafka/examples/rdkafka_example.cpp +679 -0
- package/deps/librdkafka/examples/rdkafka_performance.c +1781 -0
- package/deps/librdkafka/examples/transactions-older-broker.c +668 -0
- package/deps/librdkafka/examples/transactions.c +665 -0
- package/deps/librdkafka/examples/user_scram.c +491 -0
- package/deps/librdkafka/examples/win_ssl_cert_store.cpp +396 -0
- package/deps/librdkafka/lds-gen.py +73 -0
- package/deps/librdkafka/mainpage.doxy +40 -0
- package/deps/librdkafka/mklove/Makefile.base +329 -0
- package/deps/librdkafka/mklove/modules/configure.atomics +144 -0
- package/deps/librdkafka/mklove/modules/configure.base +2484 -0
- package/deps/librdkafka/mklove/modules/configure.builtin +70 -0
- package/deps/librdkafka/mklove/modules/configure.cc +186 -0
- package/deps/librdkafka/mklove/modules/configure.cxx +8 -0
- package/deps/librdkafka/mklove/modules/configure.fileversion +65 -0
- package/deps/librdkafka/mklove/modules/configure.gitversion +29 -0
- package/deps/librdkafka/mklove/modules/configure.good_cflags +18 -0
- package/deps/librdkafka/mklove/modules/configure.host +132 -0
- package/deps/librdkafka/mklove/modules/configure.lib +49 -0
- package/deps/librdkafka/mklove/modules/configure.libcurl +99 -0
- package/deps/librdkafka/mklove/modules/configure.libsasl2 +36 -0
- package/deps/librdkafka/mklove/modules/configure.libssl +147 -0
- package/deps/librdkafka/mklove/modules/configure.libzstd +58 -0
- package/deps/librdkafka/mklove/modules/configure.parseversion +95 -0
- package/deps/librdkafka/mklove/modules/configure.pic +16 -0
- package/deps/librdkafka/mklove/modules/configure.socket +20 -0
- package/deps/librdkafka/mklove/modules/configure.zlib +61 -0
- package/deps/librdkafka/mklove/modules/patches/README.md +8 -0
- package/deps/librdkafka/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch +11 -0
- package/deps/librdkafka/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch +56 -0
- package/deps/librdkafka/packaging/RELEASE.md +319 -0
- package/deps/librdkafka/packaging/alpine/build-alpine.sh +38 -0
- package/deps/librdkafka/packaging/archlinux/PKGBUILD +30 -0
- package/deps/librdkafka/packaging/cmake/Config.cmake.in +37 -0
- package/deps/librdkafka/packaging/cmake/Modules/FindLZ4.cmake +38 -0
- package/deps/librdkafka/packaging/cmake/Modules/FindZSTD.cmake +27 -0
- package/deps/librdkafka/packaging/cmake/Modules/LICENSE.FindZstd +178 -0
- package/deps/librdkafka/packaging/cmake/README.md +38 -0
- package/deps/librdkafka/packaging/cmake/config.h.in +52 -0
- package/deps/librdkafka/packaging/cmake/parseversion.cmake +60 -0
- package/deps/librdkafka/packaging/cmake/rdkafka.pc.in +12 -0
- package/deps/librdkafka/packaging/cmake/try_compile/atomic_32_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/atomic_64_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/c11threads_test.c +14 -0
- package/deps/librdkafka/packaging/cmake/try_compile/crc32c_hw_test.c +27 -0
- package/deps/librdkafka/packaging/cmake/try_compile/dlopen_test.c +11 -0
- package/deps/librdkafka/packaging/cmake/try_compile/libsasl2_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_darwin_test.c +6 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_freebsd_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_gnu_test.c +5 -0
- package/deps/librdkafka/packaging/cmake/try_compile/rand_r_test.c +7 -0
- package/deps/librdkafka/packaging/cmake/try_compile/rdkafka_setup.cmake +122 -0
- package/deps/librdkafka/packaging/cmake/try_compile/regex_test.c +10 -0
- package/deps/librdkafka/packaging/cmake/try_compile/strndup_test.c +5 -0
- package/deps/librdkafka/packaging/cmake/try_compile/sync_32_test.c +8 -0
- package/deps/librdkafka/packaging/cmake/try_compile/sync_64_test.c +8 -0
- package/deps/librdkafka/packaging/cp/README.md +16 -0
- package/deps/librdkafka/packaging/cp/check_features.c +72 -0
- package/deps/librdkafka/packaging/cp/verify-deb.sh +33 -0
- package/deps/librdkafka/packaging/cp/verify-packages.sh +69 -0
- package/deps/librdkafka/packaging/cp/verify-rpm.sh +32 -0
- package/deps/librdkafka/packaging/debian/changelog +66 -0
- package/deps/librdkafka/packaging/debian/compat +1 -0
- package/deps/librdkafka/packaging/debian/control +49 -0
- package/deps/librdkafka/packaging/debian/copyright +84 -0
- package/deps/librdkafka/packaging/debian/docs +5 -0
- package/deps/librdkafka/packaging/debian/gbp.conf +9 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.dirs +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.examples +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.install +6 -0
- package/deps/librdkafka/packaging/debian/librdkafka-dev.substvars +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka.dsc +16 -0
- package/deps/librdkafka/packaging/debian/librdkafka1-dbg.substvars +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.dirs +1 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.install +2 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.postinst.debhelper +5 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.postrm.debhelper +5 -0
- package/deps/librdkafka/packaging/debian/librdkafka1.symbols +64 -0
- package/deps/librdkafka/packaging/debian/rules +19 -0
- package/deps/librdkafka/packaging/debian/source/format +1 -0
- package/deps/librdkafka/packaging/debian/watch +2 -0
- package/deps/librdkafka/packaging/get_version.py +21 -0
- package/deps/librdkafka/packaging/homebrew/README.md +15 -0
- package/deps/librdkafka/packaging/homebrew/brew-update-pr.sh +31 -0
- package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw-static.sh +52 -0
- package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw.sh +21 -0
- package/deps/librdkafka/packaging/mingw-w64/export-variables.sh +13 -0
- package/deps/librdkafka/packaging/mingw-w64/run-tests.sh +6 -0
- package/deps/librdkafka/packaging/mingw-w64/semaphoreci-build.sh +38 -0
- package/deps/librdkafka/packaging/nuget/README.md +84 -0
- package/deps/librdkafka/packaging/nuget/artifact.py +177 -0
- package/deps/librdkafka/packaging/nuget/cleanup-s3.py +143 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip +0 -0
- package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip +0 -0
- package/deps/librdkafka/packaging/nuget/nuget.sh +21 -0
- package/deps/librdkafka/packaging/nuget/nugetpackage.py +278 -0
- package/deps/librdkafka/packaging/nuget/packaging.py +448 -0
- package/deps/librdkafka/packaging/nuget/push-to-nuget.sh +21 -0
- package/deps/librdkafka/packaging/nuget/release.py +167 -0
- package/deps/librdkafka/packaging/nuget/requirements.txt +3 -0
- package/deps/librdkafka/packaging/nuget/staticpackage.py +178 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.nuspec +21 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.props +18 -0
- package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.targets +19 -0
- package/deps/librdkafka/packaging/nuget/zfile/__init__.py +0 -0
- package/deps/librdkafka/packaging/nuget/zfile/zfile.py +98 -0
- package/deps/librdkafka/packaging/rpm/Makefile +92 -0
- package/deps/librdkafka/packaging/rpm/README.md +23 -0
- package/deps/librdkafka/packaging/rpm/el7-x86_64.cfg +40 -0
- package/deps/librdkafka/packaging/rpm/librdkafka.spec +118 -0
- package/deps/librdkafka/packaging/rpm/mock-on-docker.sh +96 -0
- package/deps/librdkafka/packaging/rpm/tests/Makefile +25 -0
- package/deps/librdkafka/packaging/rpm/tests/README.md +8 -0
- package/deps/librdkafka/packaging/rpm/tests/run-test.sh +42 -0
- package/deps/librdkafka/packaging/rpm/tests/test-on-docker.sh +56 -0
- package/deps/librdkafka/packaging/rpm/tests/test.c +77 -0
- package/deps/librdkafka/packaging/rpm/tests/test.cpp +34 -0
- package/deps/librdkafka/packaging/tools/Dockerfile +31 -0
- package/deps/librdkafka/packaging/tools/build-configurations-checks.sh +12 -0
- package/deps/librdkafka/packaging/tools/build-deb-package.sh +64 -0
- package/deps/librdkafka/packaging/tools/build-debian.sh +65 -0
- package/deps/librdkafka/packaging/tools/build-manylinux.sh +68 -0
- package/deps/librdkafka/packaging/tools/build-release-artifacts.sh +139 -0
- package/deps/librdkafka/packaging/tools/distro-build.sh +38 -0
- package/deps/librdkafka/packaging/tools/gh-release-checksums.py +39 -0
- package/deps/librdkafka/packaging/tools/rdutcoverage.sh +25 -0
- package/deps/librdkafka/packaging/tools/requirements.txt +2 -0
- package/deps/librdkafka/packaging/tools/run-in-docker.sh +28 -0
- package/deps/librdkafka/packaging/tools/run-integration-tests.sh +31 -0
- package/deps/librdkafka/packaging/tools/run-style-check.sh +4 -0
- package/deps/librdkafka/packaging/tools/style-format.sh +149 -0
- package/deps/librdkafka/packaging/tools/update_rpcs_max_versions.py +100 -0
- package/deps/librdkafka/service.yml +172 -0
- package/deps/librdkafka/src/CMakeLists.txt +374 -0
- package/deps/librdkafka/src/Makefile +103 -0
- package/deps/librdkafka/src/README.lz4.md +30 -0
- package/deps/librdkafka/src/cJSON.c +2834 -0
- package/deps/librdkafka/src/cJSON.h +398 -0
- package/deps/librdkafka/src/crc32c.c +430 -0
- package/deps/librdkafka/src/crc32c.h +38 -0
- package/deps/librdkafka/src/generate_proto.sh +66 -0
- package/deps/librdkafka/src/librdkafka_cgrp_synch.png +0 -0
- package/deps/librdkafka/src/lz4.c +2727 -0
- package/deps/librdkafka/src/lz4.h +842 -0
- package/deps/librdkafka/src/lz4frame.c +2078 -0
- package/deps/librdkafka/src/lz4frame.h +692 -0
- package/deps/librdkafka/src/lz4frame_static.h +47 -0
- package/deps/librdkafka/src/lz4hc.c +1631 -0
- package/deps/librdkafka/src/lz4hc.h +413 -0
- package/deps/librdkafka/src/nanopb/pb.h +917 -0
- package/deps/librdkafka/src/nanopb/pb_common.c +388 -0
- package/deps/librdkafka/src/nanopb/pb_common.h +49 -0
- package/deps/librdkafka/src/nanopb/pb_decode.c +1727 -0
- package/deps/librdkafka/src/nanopb/pb_decode.h +193 -0
- package/deps/librdkafka/src/nanopb/pb_encode.c +1000 -0
- package/deps/librdkafka/src/nanopb/pb_encode.h +185 -0
- package/deps/librdkafka/src/opentelemetry/common.pb.c +32 -0
- package/deps/librdkafka/src/opentelemetry/common.pb.h +170 -0
- package/deps/librdkafka/src/opentelemetry/metrics.options +2 -0
- package/deps/librdkafka/src/opentelemetry/metrics.pb.c +67 -0
- package/deps/librdkafka/src/opentelemetry/metrics.pb.h +966 -0
- package/deps/librdkafka/src/opentelemetry/resource.pb.c +12 -0
- package/deps/librdkafka/src/opentelemetry/resource.pb.h +58 -0
- package/deps/librdkafka/src/queue.h +850 -0
- package/deps/librdkafka/src/rd.h +584 -0
- package/deps/librdkafka/src/rdaddr.c +255 -0
- package/deps/librdkafka/src/rdaddr.h +202 -0
- package/deps/librdkafka/src/rdatomic.h +230 -0
- package/deps/librdkafka/src/rdavg.h +260 -0
- package/deps/librdkafka/src/rdavl.c +210 -0
- package/deps/librdkafka/src/rdavl.h +250 -0
- package/deps/librdkafka/src/rdbase64.c +200 -0
- package/deps/librdkafka/src/rdbase64.h +43 -0
- package/deps/librdkafka/src/rdbuf.c +1884 -0
- package/deps/librdkafka/src/rdbuf.h +375 -0
- package/deps/librdkafka/src/rdcrc32.c +114 -0
- package/deps/librdkafka/src/rdcrc32.h +170 -0
- package/deps/librdkafka/src/rddl.c +179 -0
- package/deps/librdkafka/src/rddl.h +43 -0
- package/deps/librdkafka/src/rdendian.h +175 -0
- package/deps/librdkafka/src/rdfloat.h +67 -0
- package/deps/librdkafka/src/rdfnv1a.c +113 -0
- package/deps/librdkafka/src/rdfnv1a.h +35 -0
- package/deps/librdkafka/src/rdgz.c +120 -0
- package/deps/librdkafka/src/rdgz.h +46 -0
- package/deps/librdkafka/src/rdhdrhistogram.c +721 -0
- package/deps/librdkafka/src/rdhdrhistogram.h +87 -0
- package/deps/librdkafka/src/rdhttp.c +830 -0
- package/deps/librdkafka/src/rdhttp.h +101 -0
- package/deps/librdkafka/src/rdinterval.h +177 -0
- package/deps/librdkafka/src/rdkafka.c +5505 -0
- package/deps/librdkafka/src/rdkafka.h +10686 -0
- package/deps/librdkafka/src/rdkafka_admin.c +9794 -0
- package/deps/librdkafka/src/rdkafka_admin.h +661 -0
- package/deps/librdkafka/src/rdkafka_assignment.c +1010 -0
- package/deps/librdkafka/src/rdkafka_assignment.h +73 -0
- package/deps/librdkafka/src/rdkafka_assignor.c +1786 -0
- package/deps/librdkafka/src/rdkafka_assignor.h +402 -0
- package/deps/librdkafka/src/rdkafka_aux.c +409 -0
- package/deps/librdkafka/src/rdkafka_aux.h +174 -0
- package/deps/librdkafka/src/rdkafka_background.c +221 -0
- package/deps/librdkafka/src/rdkafka_broker.c +6337 -0
- package/deps/librdkafka/src/rdkafka_broker.h +744 -0
- package/deps/librdkafka/src/rdkafka_buf.c +543 -0
- package/deps/librdkafka/src/rdkafka_buf.h +1525 -0
- package/deps/librdkafka/src/rdkafka_cert.c +576 -0
- package/deps/librdkafka/src/rdkafka_cert.h +62 -0
- package/deps/librdkafka/src/rdkafka_cgrp.c +7587 -0
- package/deps/librdkafka/src/rdkafka_cgrp.h +477 -0
- package/deps/librdkafka/src/rdkafka_conf.c +4880 -0
- package/deps/librdkafka/src/rdkafka_conf.h +732 -0
- package/deps/librdkafka/src/rdkafka_confval.h +97 -0
- package/deps/librdkafka/src/rdkafka_coord.c +623 -0
- package/deps/librdkafka/src/rdkafka_coord.h +132 -0
- package/deps/librdkafka/src/rdkafka_error.c +228 -0
- package/deps/librdkafka/src/rdkafka_error.h +80 -0
- package/deps/librdkafka/src/rdkafka_event.c +502 -0
- package/deps/librdkafka/src/rdkafka_event.h +126 -0
- package/deps/librdkafka/src/rdkafka_feature.c +898 -0
- package/deps/librdkafka/src/rdkafka_feature.h +104 -0
- package/deps/librdkafka/src/rdkafka_fetcher.c +1422 -0
- package/deps/librdkafka/src/rdkafka_fetcher.h +44 -0
- package/deps/librdkafka/src/rdkafka_header.c +220 -0
- package/deps/librdkafka/src/rdkafka_header.h +76 -0
- package/deps/librdkafka/src/rdkafka_idempotence.c +807 -0
- package/deps/librdkafka/src/rdkafka_idempotence.h +144 -0
- package/deps/librdkafka/src/rdkafka_int.h +1260 -0
- package/deps/librdkafka/src/rdkafka_interceptor.c +819 -0
- package/deps/librdkafka/src/rdkafka_interceptor.h +104 -0
- package/deps/librdkafka/src/rdkafka_lz4.c +450 -0
- package/deps/librdkafka/src/rdkafka_lz4.h +49 -0
- package/deps/librdkafka/src/rdkafka_metadata.c +2209 -0
- package/deps/librdkafka/src/rdkafka_metadata.h +345 -0
- package/deps/librdkafka/src/rdkafka_metadata_cache.c +1183 -0
- package/deps/librdkafka/src/rdkafka_mock.c +3661 -0
- package/deps/librdkafka/src/rdkafka_mock.h +610 -0
- package/deps/librdkafka/src/rdkafka_mock_cgrp.c +1876 -0
- package/deps/librdkafka/src/rdkafka_mock_handlers.c +3113 -0
- package/deps/librdkafka/src/rdkafka_mock_int.h +710 -0
- package/deps/librdkafka/src/rdkafka_msg.c +2589 -0
- package/deps/librdkafka/src/rdkafka_msg.h +614 -0
- package/deps/librdkafka/src/rdkafka_msgbatch.h +62 -0
- package/deps/librdkafka/src/rdkafka_msgset.h +98 -0
- package/deps/librdkafka/src/rdkafka_msgset_reader.c +1806 -0
- package/deps/librdkafka/src/rdkafka_msgset_writer.c +1474 -0
- package/deps/librdkafka/src/rdkafka_offset.c +1565 -0
- package/deps/librdkafka/src/rdkafka_offset.h +150 -0
- package/deps/librdkafka/src/rdkafka_op.c +997 -0
- package/deps/librdkafka/src/rdkafka_op.h +858 -0
- package/deps/librdkafka/src/rdkafka_partition.c +4896 -0
- package/deps/librdkafka/src/rdkafka_partition.h +1182 -0
- package/deps/librdkafka/src/rdkafka_pattern.c +228 -0
- package/deps/librdkafka/src/rdkafka_pattern.h +70 -0
- package/deps/librdkafka/src/rdkafka_plugin.c +213 -0
- package/deps/librdkafka/src/rdkafka_plugin.h +41 -0
- package/deps/librdkafka/src/rdkafka_proto.h +736 -0
- package/deps/librdkafka/src/rdkafka_protocol.h +128 -0
- package/deps/librdkafka/src/rdkafka_queue.c +1230 -0
- package/deps/librdkafka/src/rdkafka_queue.h +1220 -0
- package/deps/librdkafka/src/rdkafka_range_assignor.c +1748 -0
- package/deps/librdkafka/src/rdkafka_request.c +7089 -0
- package/deps/librdkafka/src/rdkafka_request.h +732 -0
- package/deps/librdkafka/src/rdkafka_roundrobin_assignor.c +123 -0
- package/deps/librdkafka/src/rdkafka_sasl.c +530 -0
- package/deps/librdkafka/src/rdkafka_sasl.h +63 -0
- package/deps/librdkafka/src/rdkafka_sasl_cyrus.c +722 -0
- package/deps/librdkafka/src/rdkafka_sasl_int.h +89 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.c +1833 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.h +52 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.c +1666 -0
- package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.h +47 -0
- package/deps/librdkafka/src/rdkafka_sasl_plain.c +142 -0
- package/deps/librdkafka/src/rdkafka_sasl_scram.c +858 -0
- package/deps/librdkafka/src/rdkafka_sasl_win32.c +550 -0
- package/deps/librdkafka/src/rdkafka_ssl.c +2129 -0
- package/deps/librdkafka/src/rdkafka_ssl.h +86 -0
- package/deps/librdkafka/src/rdkafka_sticky_assignor.c +4785 -0
- package/deps/librdkafka/src/rdkafka_subscription.c +278 -0
- package/deps/librdkafka/src/rdkafka_telemetry.c +760 -0
- package/deps/librdkafka/src/rdkafka_telemetry.h +52 -0
- package/deps/librdkafka/src/rdkafka_telemetry_decode.c +1053 -0
- package/deps/librdkafka/src/rdkafka_telemetry_decode.h +59 -0
- package/deps/librdkafka/src/rdkafka_telemetry_encode.c +997 -0
- package/deps/librdkafka/src/rdkafka_telemetry_encode.h +301 -0
- package/deps/librdkafka/src/rdkafka_timer.c +402 -0
- package/deps/librdkafka/src/rdkafka_timer.h +117 -0
- package/deps/librdkafka/src/rdkafka_topic.c +2161 -0
- package/deps/librdkafka/src/rdkafka_topic.h +334 -0
- package/deps/librdkafka/src/rdkafka_transport.c +1309 -0
- package/deps/librdkafka/src/rdkafka_transport.h +99 -0
- package/deps/librdkafka/src/rdkafka_transport_int.h +100 -0
- package/deps/librdkafka/src/rdkafka_txnmgr.c +3256 -0
- package/deps/librdkafka/src/rdkafka_txnmgr.h +171 -0
- package/deps/librdkafka/src/rdkafka_zstd.c +226 -0
- package/deps/librdkafka/src/rdkafka_zstd.h +57 -0
- package/deps/librdkafka/src/rdlist.c +576 -0
- package/deps/librdkafka/src/rdlist.h +434 -0
- package/deps/librdkafka/src/rdlog.c +89 -0
- package/deps/librdkafka/src/rdlog.h +41 -0
- package/deps/librdkafka/src/rdmap.c +508 -0
- package/deps/librdkafka/src/rdmap.h +492 -0
- package/deps/librdkafka/src/rdmurmur2.c +167 -0
- package/deps/librdkafka/src/rdmurmur2.h +35 -0
- package/deps/librdkafka/src/rdports.c +61 -0
- package/deps/librdkafka/src/rdports.h +38 -0
- package/deps/librdkafka/src/rdposix.h +250 -0
- package/deps/librdkafka/src/rdrand.c +80 -0
- package/deps/librdkafka/src/rdrand.h +43 -0
- package/deps/librdkafka/src/rdregex.c +156 -0
- package/deps/librdkafka/src/rdregex.h +43 -0
- package/deps/librdkafka/src/rdsignal.h +57 -0
- package/deps/librdkafka/src/rdstring.c +645 -0
- package/deps/librdkafka/src/rdstring.h +98 -0
- package/deps/librdkafka/src/rdsysqueue.h +404 -0
- package/deps/librdkafka/src/rdtime.h +356 -0
- package/deps/librdkafka/src/rdtypes.h +86 -0
- package/deps/librdkafka/src/rdunittest.c +549 -0
- package/deps/librdkafka/src/rdunittest.h +232 -0
- package/deps/librdkafka/src/rdvarint.c +134 -0
- package/deps/librdkafka/src/rdvarint.h +165 -0
- package/deps/librdkafka/src/rdwin32.h +382 -0
- package/deps/librdkafka/src/rdxxhash.c +1030 -0
- package/deps/librdkafka/src/rdxxhash.h +328 -0
- package/deps/librdkafka/src/regexp.c +1352 -0
- package/deps/librdkafka/src/regexp.h +41 -0
- package/deps/librdkafka/src/snappy.c +1866 -0
- package/deps/librdkafka/src/snappy.h +62 -0
- package/deps/librdkafka/src/snappy_compat.h +138 -0
- package/deps/librdkafka/src/statistics_schema.json +444 -0
- package/deps/librdkafka/src/tinycthread.c +932 -0
- package/deps/librdkafka/src/tinycthread.h +503 -0
- package/deps/librdkafka/src/tinycthread_extra.c +199 -0
- package/deps/librdkafka/src/tinycthread_extra.h +212 -0
- package/deps/librdkafka/src/win32_config.h +58 -0
- package/deps/librdkafka/src-cpp/CMakeLists.txt +90 -0
- package/deps/librdkafka/src-cpp/ConfImpl.cpp +84 -0
- package/deps/librdkafka/src-cpp/ConsumerImpl.cpp +244 -0
- package/deps/librdkafka/src-cpp/HandleImpl.cpp +436 -0
- package/deps/librdkafka/src-cpp/HeadersImpl.cpp +48 -0
- package/deps/librdkafka/src-cpp/KafkaConsumerImpl.cpp +296 -0
- package/deps/librdkafka/src-cpp/Makefile +55 -0
- package/deps/librdkafka/src-cpp/MessageImpl.cpp +38 -0
- package/deps/librdkafka/src-cpp/MetadataImpl.cpp +170 -0
- package/deps/librdkafka/src-cpp/ProducerImpl.cpp +197 -0
- package/deps/librdkafka/src-cpp/QueueImpl.cpp +70 -0
- package/deps/librdkafka/src-cpp/README.md +16 -0
- package/deps/librdkafka/src-cpp/RdKafka.cpp +59 -0
- package/deps/librdkafka/src-cpp/TopicImpl.cpp +124 -0
- package/deps/librdkafka/src-cpp/TopicPartitionImpl.cpp +57 -0
- package/deps/librdkafka/src-cpp/rdkafkacpp.h +3797 -0
- package/deps/librdkafka/src-cpp/rdkafkacpp_int.h +1641 -0
- package/deps/librdkafka/tests/0000-unittests.c +72 -0
- package/deps/librdkafka/tests/0001-multiobj.c +102 -0
- package/deps/librdkafka/tests/0002-unkpart.c +244 -0
- package/deps/librdkafka/tests/0003-msgmaxsize.c +173 -0
- package/deps/librdkafka/tests/0004-conf.c +934 -0
- package/deps/librdkafka/tests/0005-order.c +133 -0
- package/deps/librdkafka/tests/0006-symbols.c +163 -0
- package/deps/librdkafka/tests/0007-autotopic.c +136 -0
- package/deps/librdkafka/tests/0008-reqacks.c +179 -0
- package/deps/librdkafka/tests/0009-mock_cluster.c +97 -0
- package/deps/librdkafka/tests/0011-produce_batch.c +753 -0
- package/deps/librdkafka/tests/0012-produce_consume.c +537 -0
- package/deps/librdkafka/tests/0013-null-msgs.c +473 -0
- package/deps/librdkafka/tests/0014-reconsume-191.c +512 -0
- package/deps/librdkafka/tests/0015-offset_seeks.c +172 -0
- package/deps/librdkafka/tests/0016-client_swname.c +181 -0
- package/deps/librdkafka/tests/0017-compression.c +140 -0
- package/deps/librdkafka/tests/0018-cgrp_term.c +338 -0
- package/deps/librdkafka/tests/0019-list_groups.c +289 -0
- package/deps/librdkafka/tests/0020-destroy_hang.c +162 -0
- package/deps/librdkafka/tests/0021-rkt_destroy.c +72 -0
- package/deps/librdkafka/tests/0022-consume_batch.c +279 -0
- package/deps/librdkafka/tests/0025-timers.c +147 -0
- package/deps/librdkafka/tests/0026-consume_pause.c +547 -0
- package/deps/librdkafka/tests/0028-long_topicnames.c +79 -0
- package/deps/librdkafka/tests/0029-assign_offset.c +202 -0
- package/deps/librdkafka/tests/0030-offset_commit.c +589 -0
- package/deps/librdkafka/tests/0031-get_offsets.c +235 -0
- package/deps/librdkafka/tests/0033-regex_subscribe.c +536 -0
- package/deps/librdkafka/tests/0034-offset_reset.c +398 -0
- package/deps/librdkafka/tests/0035-api_version.c +73 -0
- package/deps/librdkafka/tests/0036-partial_fetch.c +87 -0
- package/deps/librdkafka/tests/0037-destroy_hang_local.c +85 -0
- package/deps/librdkafka/tests/0038-performance.c +121 -0
- package/deps/librdkafka/tests/0039-event.c +284 -0
- package/deps/librdkafka/tests/0040-io_event.c +257 -0
- package/deps/librdkafka/tests/0041-fetch_max_bytes.c +97 -0
- package/deps/librdkafka/tests/0042-many_topics.c +252 -0
- package/deps/librdkafka/tests/0043-no_connection.c +77 -0
- package/deps/librdkafka/tests/0044-partition_cnt.c +94 -0
- package/deps/librdkafka/tests/0045-subscribe_update.c +1010 -0
- package/deps/librdkafka/tests/0046-rkt_cache.c +65 -0
- package/deps/librdkafka/tests/0047-partial_buf_tmout.c +98 -0
- package/deps/librdkafka/tests/0048-partitioner.c +283 -0
- package/deps/librdkafka/tests/0049-consume_conn_close.c +162 -0
- package/deps/librdkafka/tests/0050-subscribe_adds.c +145 -0
- package/deps/librdkafka/tests/0051-assign_adds.c +126 -0
- package/deps/librdkafka/tests/0052-msg_timestamps.c +238 -0
- package/deps/librdkafka/tests/0053-stats_cb.cpp +527 -0
- package/deps/librdkafka/tests/0054-offset_time.cpp +236 -0
- package/deps/librdkafka/tests/0055-producer_latency.c +539 -0
- package/deps/librdkafka/tests/0056-balanced_group_mt.c +315 -0
- package/deps/librdkafka/tests/0057-invalid_topic.cpp +112 -0
- package/deps/librdkafka/tests/0058-log.cpp +123 -0
- package/deps/librdkafka/tests/0059-bsearch.cpp +241 -0
- package/deps/librdkafka/tests/0060-op_prio.cpp +163 -0
- package/deps/librdkafka/tests/0061-consumer_lag.cpp +295 -0
- package/deps/librdkafka/tests/0062-stats_event.c +126 -0
- package/deps/librdkafka/tests/0063-clusterid.cpp +180 -0
- package/deps/librdkafka/tests/0064-interceptors.c +481 -0
- package/deps/librdkafka/tests/0065-yield.cpp +140 -0
- package/deps/librdkafka/tests/0066-plugins.cpp +129 -0
- package/deps/librdkafka/tests/0067-empty_topic.cpp +151 -0
- package/deps/librdkafka/tests/0068-produce_timeout.c +136 -0
- package/deps/librdkafka/tests/0069-consumer_add_parts.c +119 -0
- package/deps/librdkafka/tests/0070-null_empty.cpp +197 -0
- package/deps/librdkafka/tests/0072-headers_ut.c +448 -0
- package/deps/librdkafka/tests/0073-headers.c +381 -0
- package/deps/librdkafka/tests/0074-producev.c +87 -0
- package/deps/librdkafka/tests/0075-retry.c +290 -0
- package/deps/librdkafka/tests/0076-produce_retry.c +452 -0
- package/deps/librdkafka/tests/0077-compaction.c +363 -0
- package/deps/librdkafka/tests/0078-c_from_cpp.cpp +96 -0
- package/deps/librdkafka/tests/0079-fork.c +93 -0
- package/deps/librdkafka/tests/0080-admin_ut.c +3095 -0
- package/deps/librdkafka/tests/0081-admin.c +5633 -0
- package/deps/librdkafka/tests/0082-fetch_max_bytes.cpp +137 -0
- package/deps/librdkafka/tests/0083-cb_event.c +233 -0
- package/deps/librdkafka/tests/0084-destroy_flags.c +208 -0
- package/deps/librdkafka/tests/0085-headers.cpp +392 -0
- package/deps/librdkafka/tests/0086-purge.c +368 -0
- package/deps/librdkafka/tests/0088-produce_metadata_timeout.c +162 -0
- package/deps/librdkafka/tests/0089-max_poll_interval.c +511 -0
- package/deps/librdkafka/tests/0090-idempotence.c +171 -0
- package/deps/librdkafka/tests/0091-max_poll_interval_timeout.c +295 -0
- package/deps/librdkafka/tests/0092-mixed_msgver.c +103 -0
- package/deps/librdkafka/tests/0093-holb.c +200 -0
- package/deps/librdkafka/tests/0094-idempotence_msg_timeout.c +231 -0
- package/deps/librdkafka/tests/0095-all_brokers_down.cpp +122 -0
- package/deps/librdkafka/tests/0097-ssl_verify.cpp +658 -0
- package/deps/librdkafka/tests/0098-consumer-txn.cpp +1218 -0
- package/deps/librdkafka/tests/0099-commit_metadata.c +194 -0
- package/deps/librdkafka/tests/0100-thread_interceptors.cpp +195 -0
- package/deps/librdkafka/tests/0101-fetch-from-follower.cpp +446 -0
- package/deps/librdkafka/tests/0102-static_group_rebalance.c +836 -0
- package/deps/librdkafka/tests/0103-transactions.c +1383 -0
- package/deps/librdkafka/tests/0104-fetch_from_follower_mock.c +625 -0
- package/deps/librdkafka/tests/0105-transactions_mock.c +3930 -0
- package/deps/librdkafka/tests/0106-cgrp_sess_timeout.c +318 -0
- package/deps/librdkafka/tests/0107-topic_recreate.c +259 -0
- package/deps/librdkafka/tests/0109-auto_create_topics.cpp +278 -0
- package/deps/librdkafka/tests/0110-batch_size.cpp +182 -0
- package/deps/librdkafka/tests/0111-delay_create_topics.cpp +127 -0
- package/deps/librdkafka/tests/0112-assign_unknown_part.c +87 -0
- package/deps/librdkafka/tests/0113-cooperative_rebalance.cpp +3473 -0
- package/deps/librdkafka/tests/0114-sticky_partitioning.cpp +176 -0
- package/deps/librdkafka/tests/0115-producer_auth.cpp +182 -0
- package/deps/librdkafka/tests/0116-kafkaconsumer_close.cpp +216 -0
- package/deps/librdkafka/tests/0117-mock_errors.c +331 -0
- package/deps/librdkafka/tests/0118-commit_rebalance.c +154 -0
- package/deps/librdkafka/tests/0119-consumer_auth.cpp +167 -0
- package/deps/librdkafka/tests/0120-asymmetric_subscription.c +185 -0
- package/deps/librdkafka/tests/0121-clusterid.c +115 -0
- package/deps/librdkafka/tests/0122-buffer_cleaning_after_rebalance.c +227 -0
- package/deps/librdkafka/tests/0123-connections_max_idle.c +98 -0
- package/deps/librdkafka/tests/0124-openssl_invalid_engine.c +69 -0
- package/deps/librdkafka/tests/0125-immediate_flush.c +144 -0
- package/deps/librdkafka/tests/0126-oauthbearer_oidc.c +528 -0
- package/deps/librdkafka/tests/0127-fetch_queue_backoff.cpp +165 -0
- package/deps/librdkafka/tests/0128-sasl_callback_queue.cpp +125 -0
- package/deps/librdkafka/tests/0129-fetch_aborted_msgs.c +79 -0
- package/deps/librdkafka/tests/0130-store_offsets.c +178 -0
- package/deps/librdkafka/tests/0131-connect_timeout.c +81 -0
- package/deps/librdkafka/tests/0132-strategy_ordering.c +179 -0
- package/deps/librdkafka/tests/0133-ssl_keys.c +150 -0
- package/deps/librdkafka/tests/0134-ssl_provider.c +92 -0
- package/deps/librdkafka/tests/0135-sasl_credentials.cpp +143 -0
- package/deps/librdkafka/tests/0136-resolve_cb.c +181 -0
- package/deps/librdkafka/tests/0137-barrier_batch_consume.c +619 -0
- package/deps/librdkafka/tests/0138-admin_mock.c +281 -0
- package/deps/librdkafka/tests/0139-offset_validation_mock.c +950 -0
- package/deps/librdkafka/tests/0140-commit_metadata.cpp +108 -0
- package/deps/librdkafka/tests/0142-reauthentication.c +515 -0
- package/deps/librdkafka/tests/0143-exponential_backoff_mock.c +552 -0
- package/deps/librdkafka/tests/0144-idempotence_mock.c +373 -0
- package/deps/librdkafka/tests/0145-pause_resume_mock.c +119 -0
- package/deps/librdkafka/tests/0146-metadata_mock.c +505 -0
- package/deps/librdkafka/tests/0147-consumer_group_consumer_mock.c +952 -0
- package/deps/librdkafka/tests/0148-offset_fetch_commit_error_mock.c +563 -0
- package/deps/librdkafka/tests/0149-broker-same-host-port.c +140 -0
- package/deps/librdkafka/tests/0150-telemetry_mock.c +651 -0
- package/deps/librdkafka/tests/0151-purge-brokers.c +566 -0
- package/deps/librdkafka/tests/0152-rebootstrap.c +59 -0
- package/deps/librdkafka/tests/0153-memberid.c +128 -0
- package/deps/librdkafka/tests/1000-unktopic.c +164 -0
- package/deps/librdkafka/tests/8000-idle.cpp +60 -0
- package/deps/librdkafka/tests/8001-fetch_from_follower_mock_manual.c +113 -0
- package/deps/librdkafka/tests/CMakeLists.txt +170 -0
- package/deps/librdkafka/tests/LibrdkafkaTestApp.py +291 -0
- package/deps/librdkafka/tests/Makefile +182 -0
- package/deps/librdkafka/tests/README.md +509 -0
- package/deps/librdkafka/tests/autotest.sh +33 -0
- package/deps/librdkafka/tests/backtrace.gdb +30 -0
- package/deps/librdkafka/tests/broker_version_tests.py +315 -0
- package/deps/librdkafka/tests/buildbox.sh +17 -0
- package/deps/librdkafka/tests/cleanup-checker-tests.sh +20 -0
- package/deps/librdkafka/tests/cluster_testing.py +191 -0
- package/deps/librdkafka/tests/delete-test-topics.sh +56 -0
- package/deps/librdkafka/tests/fixtures/oauthbearer/jwt_assertion_template.json +10 -0
- package/deps/librdkafka/tests/fixtures/ssl/Makefile +8 -0
- package/deps/librdkafka/tests/fixtures/ssl/README.md +13 -0
- package/deps/librdkafka/tests/fixtures/ssl/client.keystore.intermediate.p12 +0 -0
- package/deps/librdkafka/tests/fixtures/ssl/client.keystore.p12 +0 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.intermediate.pem +72 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.pem +50 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.intermediate.key +46 -0
- package/deps/librdkafka/tests/fixtures/ssl/client2.key +46 -0
- package/deps/librdkafka/tests/fixtures/ssl/create_keys.sh +168 -0
- package/deps/librdkafka/tests/fuzzers/Makefile +12 -0
- package/deps/librdkafka/tests/fuzzers/README.md +31 -0
- package/deps/librdkafka/tests/fuzzers/fuzz_regex.c +74 -0
- package/deps/librdkafka/tests/fuzzers/helpers.h +90 -0
- package/deps/librdkafka/tests/gen-ssl-certs.sh +165 -0
- package/deps/librdkafka/tests/interactive_broker_version.py +170 -0
- package/deps/librdkafka/tests/interceptor_test/CMakeLists.txt +16 -0
- package/deps/librdkafka/tests/interceptor_test/Makefile +22 -0
- package/deps/librdkafka/tests/interceptor_test/interceptor_test.c +314 -0
- package/deps/librdkafka/tests/interceptor_test/interceptor_test.h +54 -0
- package/deps/librdkafka/tests/java/IncrementalRebalanceCli.java +97 -0
- package/deps/librdkafka/tests/java/Makefile +13 -0
- package/deps/librdkafka/tests/java/Murmur2Cli.java +46 -0
- package/deps/librdkafka/tests/java/README.md +14 -0
- package/deps/librdkafka/tests/java/TransactionProducerCli.java +162 -0
- package/deps/librdkafka/tests/java/run-class.sh +11 -0
- package/deps/librdkafka/tests/librdkafka.suppressions +483 -0
- package/deps/librdkafka/tests/lz4_manual_test.sh +59 -0
- package/deps/librdkafka/tests/multi-broker-version-test.sh +50 -0
- package/deps/librdkafka/tests/parse-refcnt.sh +43 -0
- package/deps/librdkafka/tests/performance_plot.py +115 -0
- package/deps/librdkafka/tests/plugin_test/Makefile +19 -0
- package/deps/librdkafka/tests/plugin_test/plugin_test.c +58 -0
- package/deps/librdkafka/tests/requirements.txt +2 -0
- package/deps/librdkafka/tests/run-all-tests.sh +79 -0
- package/deps/librdkafka/tests/run-consumer-tests.sh +16 -0
- package/deps/librdkafka/tests/run-producer-tests.sh +16 -0
- package/deps/librdkafka/tests/run-test-batches.py +157 -0
- package/deps/librdkafka/tests/run-test.sh +140 -0
- package/deps/librdkafka/tests/rusage.c +249 -0
- package/deps/librdkafka/tests/sasl_test.py +289 -0
- package/deps/librdkafka/tests/scenarios/README.md +6 -0
- package/deps/librdkafka/tests/scenarios/ak23.json +6 -0
- package/deps/librdkafka/tests/scenarios/default.json +5 -0
- package/deps/librdkafka/tests/scenarios/noautocreate.json +5 -0
- package/deps/librdkafka/tests/sockem.c +801 -0
- package/deps/librdkafka/tests/sockem.h +85 -0
- package/deps/librdkafka/tests/sockem_ctrl.c +145 -0
- package/deps/librdkafka/tests/sockem_ctrl.h +61 -0
- package/deps/librdkafka/tests/test.c +7778 -0
- package/deps/librdkafka/tests/test.conf.example +27 -0
- package/deps/librdkafka/tests/test.h +1028 -0
- package/deps/librdkafka/tests/testcpp.cpp +131 -0
- package/deps/librdkafka/tests/testcpp.h +388 -0
- package/deps/librdkafka/tests/testshared.h +416 -0
- package/deps/librdkafka/tests/tools/README.md +4 -0
- package/deps/librdkafka/tests/tools/stats/README.md +21 -0
- package/deps/librdkafka/tests/tools/stats/filter.jq +42 -0
- package/deps/librdkafka/tests/tools/stats/graph.py +150 -0
- package/deps/librdkafka/tests/tools/stats/requirements.txt +3 -0
- package/deps/librdkafka/tests/tools/stats/to_csv.py +124 -0
- package/deps/librdkafka/tests/trivup/trivup-0.14.0.tar.gz +0 -0
- package/deps/librdkafka/tests/until-fail.sh +87 -0
- package/deps/librdkafka/tests/xxxx-assign_partition.c +122 -0
- package/deps/librdkafka/tests/xxxx-metadata.cpp +159 -0
- package/deps/librdkafka/vcpkg.json +23 -0
- package/deps/librdkafka/win32/README.md +5 -0
- package/deps/librdkafka/win32/build-package.bat +3 -0
- package/deps/librdkafka/win32/build.bat +19 -0
- package/deps/librdkafka/win32/common.vcxproj +84 -0
- package/deps/librdkafka/win32/interceptor_test/interceptor_test.vcxproj +87 -0
- package/deps/librdkafka/win32/librdkafka.autopkg.template +54 -0
- package/deps/librdkafka/win32/librdkafka.master.testing.targets +13 -0
- package/deps/librdkafka/win32/librdkafka.sln +226 -0
- package/deps/librdkafka/win32/librdkafka.vcxproj +276 -0
- package/deps/librdkafka/win32/librdkafkacpp/librdkafkacpp.vcxproj +104 -0
- package/deps/librdkafka/win32/msbuild.ps1 +15 -0
- package/deps/librdkafka/win32/openssl_engine_example/openssl_engine_example.vcxproj +132 -0
- package/deps/librdkafka/win32/package-zip.ps1 +46 -0
- package/deps/librdkafka/win32/packages/repositories.config +4 -0
- package/deps/librdkafka/win32/push-package.bat +4 -0
- package/deps/librdkafka/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj +67 -0
- package/deps/librdkafka/win32/rdkafka_example/rdkafka_example.vcxproj +97 -0
- package/deps/librdkafka/win32/rdkafka_performance/rdkafka_performance.vcxproj +97 -0
- package/deps/librdkafka/win32/setup-msys2.ps1 +47 -0
- package/deps/librdkafka/win32/setup-vcpkg.ps1 +34 -0
- package/deps/librdkafka/win32/tests/test.conf.example +25 -0
- package/deps/librdkafka/win32/tests/tests.vcxproj +253 -0
- package/deps/librdkafka/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj +132 -0
- package/deps/librdkafka/win32/wingetopt.c +564 -0
- package/deps/librdkafka/win32/wingetopt.h +101 -0
- package/deps/librdkafka/win32/wintime.h +33 -0
- package/deps/librdkafka.gyp +62 -0
- package/lib/admin.js +233 -0
- package/lib/client.js +573 -0
- package/lib/error.js +500 -0
- package/lib/index.js +34 -0
- package/lib/kafka-consumer-stream.js +397 -0
- package/lib/kafka-consumer.js +698 -0
- package/lib/producer/high-level-producer.js +323 -0
- package/lib/producer-stream.js +307 -0
- package/lib/producer.js +375 -0
- package/lib/tools/ref-counter.js +52 -0
- package/lib/topic-partition.js +88 -0
- package/lib/topic.js +42 -0
- package/lib/util.js +29 -0
- package/package.json +61 -0
- package/prebuilds/darwin-arm64/@point3+node-rdkafka.node +0 -0
- package/prebuilds/linux-x64/@point3+node-rdkafka.node +0 -0
- package/util/configure.js +30 -0
- package/util/get-env.js +6 -0
- package/util/test-compile.js +11 -0
- package/util/test-producer-delivery.js +100 -0
|
@@ -0,0 +1,2589 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* librdkafka - Apache Kafka C library
|
|
3
|
+
*
|
|
4
|
+
* Copyright (c) 2012-2022, Magnus Edenhill,
|
|
5
|
+
* 2023, Confluent Inc.
|
|
6
|
+
* All rights reserved.
|
|
7
|
+
*
|
|
8
|
+
* Redistribution and use in source and binary forms, with or without
|
|
9
|
+
* modification, are permitted provided that the following conditions are met:
|
|
10
|
+
*
|
|
11
|
+
* 1. Redistributions of source code must retain the above copyright notice,
|
|
12
|
+
* this list of conditions and the following disclaimer.
|
|
13
|
+
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
14
|
+
* this list of conditions and the following disclaimer in the documentation
|
|
15
|
+
* and/or other materials provided with the distribution.
|
|
16
|
+
*
|
|
17
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
18
|
+
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
19
|
+
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
20
|
+
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
21
|
+
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
22
|
+
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
23
|
+
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
24
|
+
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
25
|
+
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
26
|
+
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
27
|
+
* POSSIBILITY OF SUCH DAMAGE.
|
|
28
|
+
*/
|
|
29
|
+
|
|
30
|
+
#include "rd.h"
|
|
31
|
+
#include "rdkafka_int.h"
|
|
32
|
+
#include "rdkafka_msg.h"
|
|
33
|
+
#include "rdkafka_topic.h"
|
|
34
|
+
#include "rdkafka_partition.h"
|
|
35
|
+
#include "rdkafka_interceptor.h"
|
|
36
|
+
#include "rdkafka_header.h"
|
|
37
|
+
#include "rdkafka_idempotence.h"
|
|
38
|
+
#include "rdkafka_txnmgr.h"
|
|
39
|
+
#include "rdkafka_error.h"
|
|
40
|
+
#include "rdcrc32.h"
|
|
41
|
+
#include "rdfnv1a.h"
|
|
42
|
+
#include "rdmurmur2.h"
|
|
43
|
+
#include "rdrand.h"
|
|
44
|
+
#include "rdtime.h"
|
|
45
|
+
#include "rdsysqueue.h"
|
|
46
|
+
#include "rdunittest.h"
|
|
47
|
+
|
|
48
|
+
#include <stdarg.h>
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) {
|
|
52
|
+
if (!rkmessage->err)
|
|
53
|
+
return NULL;
|
|
54
|
+
|
|
55
|
+
if (rkmessage->payload)
|
|
56
|
+
return (const char *)rkmessage->payload;
|
|
57
|
+
|
|
58
|
+
return rd_kafka_err2str(rkmessage->err);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
const char *
|
|
62
|
+
rd_kafka_message_produce_errstr(const rd_kafka_message_t *rkmessage) {
|
|
63
|
+
if (!rkmessage->err)
|
|
64
|
+
return NULL;
|
|
65
|
+
rd_kafka_msg_t *rkm = (rd_kafka_msg_t *)rkmessage;
|
|
66
|
+
return rkm->rkm_u.producer.errstr;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* @brief Check if producing is allowed.
|
|
73
|
+
*
|
|
74
|
+
* @param errorp If non-NULL and an producing is prohibited a new error_t
|
|
75
|
+
* object will be allocated and returned in this pointer.
|
|
76
|
+
*
|
|
77
|
+
* @returns an error if not allowed, else 0.
|
|
78
|
+
*
|
|
79
|
+
* @remarks Also sets the corresponding errno.
|
|
80
|
+
*/
|
|
81
|
+
static RD_INLINE rd_kafka_resp_err_t
|
|
82
|
+
rd_kafka_check_produce(rd_kafka_t *rk, rd_kafka_error_t **errorp) {
|
|
83
|
+
rd_kafka_resp_err_t err;
|
|
84
|
+
|
|
85
|
+
if (unlikely((err = rd_kafka_fatal_error_code(rk)))) {
|
|
86
|
+
rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__FATAL, ECANCELED);
|
|
87
|
+
if (errorp) {
|
|
88
|
+
rd_kafka_rdlock(rk);
|
|
89
|
+
*errorp = rd_kafka_error_new_fatal(
|
|
90
|
+
err,
|
|
91
|
+
"Producing not allowed since a previous fatal "
|
|
92
|
+
"error was raised: %s",
|
|
93
|
+
rk->rk_fatal.errstr);
|
|
94
|
+
rd_kafka_rdunlock(rk);
|
|
95
|
+
}
|
|
96
|
+
return RD_KAFKA_RESP_ERR__FATAL;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
if (likely(rd_kafka_txn_may_enq_msg(rk)))
|
|
100
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
101
|
+
|
|
102
|
+
/* Transactional state forbids producing */
|
|
103
|
+
rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__STATE, ENOEXEC);
|
|
104
|
+
|
|
105
|
+
if (errorp) {
|
|
106
|
+
rd_kafka_rdlock(rk);
|
|
107
|
+
*errorp = rd_kafka_error_new(
|
|
108
|
+
RD_KAFKA_RESP_ERR__STATE,
|
|
109
|
+
"Producing not allowed in transactional state %s",
|
|
110
|
+
rd_kafka_txn_state2str(rk->rk_eos.txn_state));
|
|
111
|
+
rd_kafka_rdunlock(rk);
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
return RD_KAFKA_RESP_ERR__STATE;
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm) {
|
|
119
|
+
// FIXME
|
|
120
|
+
if (rkm->rkm_flags & RD_KAFKA_MSG_F_ACCOUNT) {
|
|
121
|
+
rd_dassert(rk || rkm->rkm_rkmessage.rkt);
|
|
122
|
+
rd_kafka_curr_msgs_sub(rk ? rk : rkm->rkm_rkmessage.rkt->rkt_rk,
|
|
123
|
+
1, rkm->rkm_len);
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if (rkm->rkm_headers)
|
|
127
|
+
rd_kafka_headers_destroy(rkm->rkm_headers);
|
|
128
|
+
|
|
129
|
+
if (likely(rkm->rkm_rkmessage.rkt != NULL))
|
|
130
|
+
rd_kafka_topic_destroy0(rkm->rkm_rkmessage.rkt);
|
|
131
|
+
|
|
132
|
+
if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE && rkm->rkm_payload)
|
|
133
|
+
rd_free(rkm->rkm_payload);
|
|
134
|
+
|
|
135
|
+
if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE_RKM)
|
|
136
|
+
rd_free(rkm);
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* @brief Create a new Producer message, copying the payload as
|
|
143
|
+
* indicated by msgflags.
|
|
144
|
+
*
|
|
145
|
+
* @returns the new message
|
|
146
|
+
*/
|
|
147
|
+
static rd_kafka_msg_t *rd_kafka_msg_new00(rd_kafka_topic_t *rkt,
|
|
148
|
+
int32_t partition,
|
|
149
|
+
int msgflags,
|
|
150
|
+
char *payload,
|
|
151
|
+
size_t len,
|
|
152
|
+
const void *key,
|
|
153
|
+
size_t keylen,
|
|
154
|
+
void *msg_opaque) {
|
|
155
|
+
rd_kafka_msg_t *rkm;
|
|
156
|
+
size_t mlen = sizeof(*rkm);
|
|
157
|
+
char *p;
|
|
158
|
+
|
|
159
|
+
/* If we are to make a copy of the payload, allocate space for it too */
|
|
160
|
+
if (msgflags & RD_KAFKA_MSG_F_COPY) {
|
|
161
|
+
msgflags &= ~RD_KAFKA_MSG_F_FREE;
|
|
162
|
+
mlen += len;
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
mlen += keylen;
|
|
166
|
+
|
|
167
|
+
/* Note: using rd_malloc here, not rd_calloc, so make sure all fields
|
|
168
|
+
* are properly set up. */
|
|
169
|
+
rkm = rd_malloc(mlen);
|
|
170
|
+
rkm->rkm_err = 0;
|
|
171
|
+
rkm->rkm_flags =
|
|
172
|
+
(RD_KAFKA_MSG_F_PRODUCER | RD_KAFKA_MSG_F_FREE_RKM | msgflags);
|
|
173
|
+
rkm->rkm_len = len;
|
|
174
|
+
rkm->rkm_opaque = msg_opaque;
|
|
175
|
+
rkm->rkm_rkmessage.rkt = rd_kafka_topic_keep(rkt);
|
|
176
|
+
|
|
177
|
+
rkm->rkm_broker_id = -1;
|
|
178
|
+
rkm->rkm_partition = partition;
|
|
179
|
+
rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID;
|
|
180
|
+
rkm->rkm_timestamp = 0;
|
|
181
|
+
rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
|
|
182
|
+
rkm->rkm_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
|
|
183
|
+
rkm->rkm_headers = NULL;
|
|
184
|
+
|
|
185
|
+
p = (char *)(rkm + 1);
|
|
186
|
+
|
|
187
|
+
if (payload && msgflags & RD_KAFKA_MSG_F_COPY) {
|
|
188
|
+
/* Copy payload to space following the ..msg_t */
|
|
189
|
+
rkm->rkm_payload = p;
|
|
190
|
+
memcpy(rkm->rkm_payload, payload, len);
|
|
191
|
+
p += len;
|
|
192
|
+
|
|
193
|
+
} else {
|
|
194
|
+
/* Just point to the provided payload. */
|
|
195
|
+
rkm->rkm_payload = payload;
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
if (key) {
|
|
199
|
+
rkm->rkm_key = p;
|
|
200
|
+
rkm->rkm_key_len = keylen;
|
|
201
|
+
memcpy(rkm->rkm_key, key, keylen);
|
|
202
|
+
} else {
|
|
203
|
+
rkm->rkm_key = NULL;
|
|
204
|
+
rkm->rkm_key_len = 0;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
return rkm;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
/**
|
|
213
|
+
* @brief Create a new Producer message.
|
|
214
|
+
*
|
|
215
|
+
* @remark Must only be used by producer code.
|
|
216
|
+
*
|
|
217
|
+
* Returns 0 on success or -1 on error.
|
|
218
|
+
* Both errno and 'errp' are set appropriately.
|
|
219
|
+
*/
|
|
220
|
+
static rd_kafka_msg_t *rd_kafka_msg_new0(rd_kafka_topic_t *rkt,
|
|
221
|
+
int32_t force_partition,
|
|
222
|
+
int msgflags,
|
|
223
|
+
char *payload,
|
|
224
|
+
size_t len,
|
|
225
|
+
const void *key,
|
|
226
|
+
size_t keylen,
|
|
227
|
+
void *msg_opaque,
|
|
228
|
+
rd_kafka_resp_err_t *errp,
|
|
229
|
+
int *errnop,
|
|
230
|
+
rd_kafka_headers_t *hdrs,
|
|
231
|
+
int64_t timestamp,
|
|
232
|
+
rd_ts_t now) {
|
|
233
|
+
rd_kafka_msg_t *rkm;
|
|
234
|
+
size_t hdrs_size = 0;
|
|
235
|
+
|
|
236
|
+
if (unlikely(!payload))
|
|
237
|
+
len = 0;
|
|
238
|
+
if (!key)
|
|
239
|
+
keylen = 0;
|
|
240
|
+
if (hdrs)
|
|
241
|
+
hdrs_size = rd_kafka_headers_serialized_size(hdrs);
|
|
242
|
+
|
|
243
|
+
if (unlikely(len > INT32_MAX || keylen > INT32_MAX ||
|
|
244
|
+
rd_kafka_msg_max_wire_size(keylen, len, hdrs_size) >
|
|
245
|
+
(size_t)rkt->rkt_rk->rk_conf.max_msg_size)) {
|
|
246
|
+
*errp = RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE;
|
|
247
|
+
if (errnop)
|
|
248
|
+
*errnop = EMSGSIZE;
|
|
249
|
+
return NULL;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
if (msgflags & RD_KAFKA_MSG_F_BLOCK)
|
|
253
|
+
*errp = rd_kafka_curr_msgs_add(
|
|
254
|
+
rkt->rkt_rk, 1, len, 1 /*block*/,
|
|
255
|
+
(msgflags & RD_KAFKA_MSG_F_RKT_RDLOCKED) ? &rkt->rkt_lock
|
|
256
|
+
: NULL);
|
|
257
|
+
else
|
|
258
|
+
*errp = rd_kafka_curr_msgs_add(rkt->rkt_rk, 1, len, 0, NULL);
|
|
259
|
+
|
|
260
|
+
if (unlikely(*errp)) {
|
|
261
|
+
if (errnop)
|
|
262
|
+
*errnop = ENOBUFS;
|
|
263
|
+
return NULL;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
rkm = rd_kafka_msg_new00(
|
|
268
|
+
rkt, force_partition,
|
|
269
|
+
msgflags | RD_KAFKA_MSG_F_ACCOUNT /* curr_msgs_add() */, payload,
|
|
270
|
+
len, key, keylen, msg_opaque);
|
|
271
|
+
|
|
272
|
+
memset(&rkm->rkm_u.producer, 0, sizeof(rkm->rkm_u.producer));
|
|
273
|
+
|
|
274
|
+
if (timestamp)
|
|
275
|
+
rkm->rkm_timestamp = timestamp;
|
|
276
|
+
else
|
|
277
|
+
rkm->rkm_timestamp = rd_uclock() / 1000;
|
|
278
|
+
rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
|
|
279
|
+
|
|
280
|
+
if (hdrs) {
|
|
281
|
+
rd_dassert(!rkm->rkm_headers);
|
|
282
|
+
rkm->rkm_headers = hdrs;
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
rkm->rkm_ts_enq = now;
|
|
286
|
+
|
|
287
|
+
if (rkt->rkt_conf.message_timeout_ms == 0) {
|
|
288
|
+
rkm->rkm_ts_timeout = INT64_MAX;
|
|
289
|
+
} else {
|
|
290
|
+
rkm->rkm_ts_timeout =
|
|
291
|
+
now + (int64_t)rkt->rkt_conf.message_timeout_ms * 1000;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
/* Call interceptor chain for on_send */
|
|
295
|
+
rd_kafka_interceptors_on_send(rkt->rkt_rk, &rkm->rkm_rkmessage);
|
|
296
|
+
|
|
297
|
+
return rkm;
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
/**
|
|
302
|
+
* @brief Produce: creates a new message, runs the partitioner and enqueues
|
|
303
|
+
* into on the selected partition.
|
|
304
|
+
*
|
|
305
|
+
* @returns 0 on success or -1 on error.
|
|
306
|
+
*
|
|
307
|
+
* If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then
|
|
308
|
+
* the memory associated with the payload is still the caller's
|
|
309
|
+
* responsibility.
|
|
310
|
+
*
|
|
311
|
+
* @locks none
|
|
312
|
+
*/
|
|
313
|
+
int rd_kafka_msg_new(rd_kafka_topic_t *rkt,
|
|
314
|
+
int32_t force_partition,
|
|
315
|
+
int msgflags,
|
|
316
|
+
char *payload,
|
|
317
|
+
size_t len,
|
|
318
|
+
const void *key,
|
|
319
|
+
size_t keylen,
|
|
320
|
+
void *msg_opaque) {
|
|
321
|
+
rd_kafka_msg_t *rkm;
|
|
322
|
+
rd_kafka_resp_err_t err;
|
|
323
|
+
int errnox;
|
|
324
|
+
|
|
325
|
+
if (unlikely((err = rd_kafka_check_produce(rkt->rkt_rk, NULL))))
|
|
326
|
+
return -1;
|
|
327
|
+
|
|
328
|
+
/* Create message */
|
|
329
|
+
rkm = rd_kafka_msg_new0(rkt, force_partition, msgflags, payload, len,
|
|
330
|
+
key, keylen, msg_opaque, &err, &errnox, NULL, 0,
|
|
331
|
+
rd_clock());
|
|
332
|
+
if (unlikely(!rkm)) {
|
|
333
|
+
/* errno is already set by msg_new() */
|
|
334
|
+
rd_kafka_set_last_error(err, errnox);
|
|
335
|
+
return -1;
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
/* Partition the message */
|
|
340
|
+
err = rd_kafka_msg_partitioner(rkt, rkm, 1);
|
|
341
|
+
if (likely(!err)) {
|
|
342
|
+
rd_kafka_set_last_error(0, 0);
|
|
343
|
+
return 0;
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
/* Interceptor: unroll failing messages by triggering on_ack.. */
|
|
347
|
+
rkm->rkm_err = err;
|
|
348
|
+
rd_kafka_interceptors_on_acknowledgement(rkt->rkt_rk,
|
|
349
|
+
&rkm->rkm_rkmessage);
|
|
350
|
+
|
|
351
|
+
/* Handle partitioner failures: it only fails when the application
|
|
352
|
+
* attempts to force a destination partition that does not exist
|
|
353
|
+
* in the cluster. Note we must clear the RD_KAFKA_MSG_F_FREE
|
|
354
|
+
* flag since our contract says we don't free the payload on
|
|
355
|
+
* failure. */
|
|
356
|
+
|
|
357
|
+
rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
|
|
358
|
+
rd_kafka_msg_destroy(rkt->rkt_rk, rkm);
|
|
359
|
+
|
|
360
|
+
/* Translate error codes to errnos. */
|
|
361
|
+
if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
|
|
362
|
+
rd_kafka_set_last_error(err, ESRCH);
|
|
363
|
+
else if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
|
|
364
|
+
rd_kafka_set_last_error(err, ENOENT);
|
|
365
|
+
else
|
|
366
|
+
rd_kafka_set_last_error(err, EINVAL); /* NOTREACHED */
|
|
367
|
+
|
|
368
|
+
return -1;
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
/** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */
|
|
373
|
+
rd_kafka_error_t *
|
|
374
|
+
rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt) {
|
|
375
|
+
rd_kafka_msg_t s_rkm = {
|
|
376
|
+
/* Message defaults */
|
|
377
|
+
.rkm_partition = RD_KAFKA_PARTITION_UA,
|
|
378
|
+
.rkm_timestamp = 0, /* current time */
|
|
379
|
+
};
|
|
380
|
+
rd_kafka_msg_t *rkm = &s_rkm;
|
|
381
|
+
rd_kafka_topic_t *rkt = NULL;
|
|
382
|
+
rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
383
|
+
rd_kafka_error_t *error = NULL;
|
|
384
|
+
rd_kafka_headers_t *hdrs = NULL;
|
|
385
|
+
rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */
|
|
386
|
+
int existing = 0;
|
|
387
|
+
size_t i;
|
|
388
|
+
|
|
389
|
+
if (unlikely(rd_kafka_check_produce(rk, &error)))
|
|
390
|
+
return error;
|
|
391
|
+
|
|
392
|
+
for (i = 0; i < cnt; i++) {
|
|
393
|
+
const rd_kafka_vu_t *vu = &vus[i];
|
|
394
|
+
switch (vu->vtype) {
|
|
395
|
+
case RD_KAFKA_VTYPE_TOPIC:
|
|
396
|
+
rkt = rd_kafka_topic_new0(rk, vu->u.cstr, NULL,
|
|
397
|
+
&existing, 1);
|
|
398
|
+
if (!existing)
|
|
399
|
+
rd_kafka_topic_fast_leader_query(
|
|
400
|
+
rk, rd_true /* force */);
|
|
401
|
+
break;
|
|
402
|
+
|
|
403
|
+
case RD_KAFKA_VTYPE_RKT:
|
|
404
|
+
rkt = rd_kafka_topic_proper(vu->u.rkt);
|
|
405
|
+
rd_kafka_topic_keep(rkt);
|
|
406
|
+
break;
|
|
407
|
+
|
|
408
|
+
case RD_KAFKA_VTYPE_PARTITION:
|
|
409
|
+
rkm->rkm_partition = vu->u.i32;
|
|
410
|
+
break;
|
|
411
|
+
|
|
412
|
+
case RD_KAFKA_VTYPE_VALUE:
|
|
413
|
+
rkm->rkm_payload = vu->u.mem.ptr;
|
|
414
|
+
rkm->rkm_len = vu->u.mem.size;
|
|
415
|
+
break;
|
|
416
|
+
|
|
417
|
+
case RD_KAFKA_VTYPE_KEY:
|
|
418
|
+
rkm->rkm_key = vu->u.mem.ptr;
|
|
419
|
+
rkm->rkm_key_len = vu->u.mem.size;
|
|
420
|
+
break;
|
|
421
|
+
|
|
422
|
+
case RD_KAFKA_VTYPE_OPAQUE:
|
|
423
|
+
rkm->rkm_opaque = vu->u.ptr;
|
|
424
|
+
break;
|
|
425
|
+
|
|
426
|
+
case RD_KAFKA_VTYPE_MSGFLAGS:
|
|
427
|
+
rkm->rkm_flags = vu->u.i;
|
|
428
|
+
break;
|
|
429
|
+
|
|
430
|
+
case RD_KAFKA_VTYPE_TIMESTAMP:
|
|
431
|
+
rkm->rkm_timestamp = vu->u.i64;
|
|
432
|
+
break;
|
|
433
|
+
|
|
434
|
+
case RD_KAFKA_VTYPE_HEADER:
|
|
435
|
+
if (unlikely(app_hdrs != NULL)) {
|
|
436
|
+
error = rd_kafka_error_new(
|
|
437
|
+
RD_KAFKA_RESP_ERR__CONFLICT,
|
|
438
|
+
"VTYPE_HEADER and VTYPE_HEADERS "
|
|
439
|
+
"are mutually exclusive");
|
|
440
|
+
goto err;
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
if (unlikely(!hdrs))
|
|
444
|
+
hdrs = rd_kafka_headers_new(8);
|
|
445
|
+
|
|
446
|
+
err = rd_kafka_header_add(hdrs, vu->u.header.name, -1,
|
|
447
|
+
vu->u.header.val,
|
|
448
|
+
vu->u.header.size);
|
|
449
|
+
if (unlikely(err)) {
|
|
450
|
+
error = rd_kafka_error_new(
|
|
451
|
+
err, "Failed to add header: %s",
|
|
452
|
+
rd_kafka_err2str(err));
|
|
453
|
+
goto err;
|
|
454
|
+
}
|
|
455
|
+
break;
|
|
456
|
+
|
|
457
|
+
case RD_KAFKA_VTYPE_HEADERS:
|
|
458
|
+
if (unlikely(hdrs != NULL)) {
|
|
459
|
+
error = rd_kafka_error_new(
|
|
460
|
+
RD_KAFKA_RESP_ERR__CONFLICT,
|
|
461
|
+
"VTYPE_HEADERS and VTYPE_HEADER "
|
|
462
|
+
"are mutually exclusive");
|
|
463
|
+
goto err;
|
|
464
|
+
}
|
|
465
|
+
app_hdrs = vu->u.headers;
|
|
466
|
+
break;
|
|
467
|
+
|
|
468
|
+
default:
|
|
469
|
+
error = rd_kafka_error_new(
|
|
470
|
+
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
|
471
|
+
"Unsupported VTYPE %d", (int)vu->vtype);
|
|
472
|
+
goto err;
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
rd_assert(!error);
|
|
477
|
+
|
|
478
|
+
if (unlikely(!rkt)) {
|
|
479
|
+
error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
|
|
480
|
+
"Topic name or object required");
|
|
481
|
+
goto err;
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
rkm = rd_kafka_msg_new0(
|
|
485
|
+
rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload,
|
|
486
|
+
rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len, rkm->rkm_opaque, &err,
|
|
487
|
+
NULL, app_hdrs ? app_hdrs : hdrs, rkm->rkm_timestamp, rd_clock());
|
|
488
|
+
|
|
489
|
+
if (unlikely(err)) {
|
|
490
|
+
error = rd_kafka_error_new(err, "Failed to produce message: %s",
|
|
491
|
+
rd_kafka_err2str(err));
|
|
492
|
+
goto err;
|
|
493
|
+
}
|
|
494
|
+
/* 'hdrs' is now owned by 'rkm' */
|
|
495
|
+
hdrs = NULL;
|
|
496
|
+
|
|
497
|
+
/* Partition the message */
|
|
498
|
+
err = rd_kafka_msg_partitioner(rkt, rkm, 1);
|
|
499
|
+
if (unlikely(err)) {
|
|
500
|
+
/* Handle partitioner failures: it only fails when
|
|
501
|
+
* the application attempts to force a destination
|
|
502
|
+
* partition that does not exist in the cluster. */
|
|
503
|
+
|
|
504
|
+
/* Interceptors: Unroll on_send by on_ack.. */
|
|
505
|
+
rkm->rkm_err = err;
|
|
506
|
+
rd_kafka_interceptors_on_acknowledgement(rk,
|
|
507
|
+
&rkm->rkm_rkmessage);
|
|
508
|
+
|
|
509
|
+
/* Note we must clear the RD_KAFKA_MSG_F_FREE
|
|
510
|
+
* flag since our contract says we don't free the payload on
|
|
511
|
+
* failure. */
|
|
512
|
+
rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
|
|
513
|
+
|
|
514
|
+
/* Deassociate application owned headers from message
|
|
515
|
+
* since headers remain in application ownership
|
|
516
|
+
* when producev() fails */
|
|
517
|
+
if (app_hdrs && app_hdrs == rkm->rkm_headers)
|
|
518
|
+
rkm->rkm_headers = NULL;
|
|
519
|
+
|
|
520
|
+
rd_kafka_msg_destroy(rk, rkm);
|
|
521
|
+
|
|
522
|
+
error = rd_kafka_error_new(err, "Failed to enqueue message: %s",
|
|
523
|
+
rd_kafka_err2str(err));
|
|
524
|
+
goto err;
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
rd_kafka_topic_destroy0(rkt);
|
|
528
|
+
|
|
529
|
+
return NULL;
|
|
530
|
+
|
|
531
|
+
err:
|
|
532
|
+
if (rkt)
|
|
533
|
+
rd_kafka_topic_destroy0(rkt);
|
|
534
|
+
|
|
535
|
+
if (hdrs)
|
|
536
|
+
rd_kafka_headers_destroy(hdrs);
|
|
537
|
+
|
|
538
|
+
rd_assert(error != NULL);
|
|
539
|
+
return error;
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
|
|
543
|
+
|
|
544
|
+
/** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */
|
|
545
|
+
rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...) {
|
|
546
|
+
va_list ap;
|
|
547
|
+
rd_kafka_msg_t s_rkm = {
|
|
548
|
+
/* Message defaults */
|
|
549
|
+
.rkm_partition = RD_KAFKA_PARTITION_UA,
|
|
550
|
+
.rkm_timestamp = 0, /* current time */
|
|
551
|
+
};
|
|
552
|
+
rd_kafka_msg_t *rkm = &s_rkm;
|
|
553
|
+
rd_kafka_vtype_t vtype;
|
|
554
|
+
rd_kafka_topic_t *rkt = NULL;
|
|
555
|
+
rd_kafka_resp_err_t err;
|
|
556
|
+
rd_kafka_headers_t *hdrs = NULL;
|
|
557
|
+
rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */
|
|
558
|
+
int existing = 0;
|
|
559
|
+
|
|
560
|
+
if (unlikely((err = rd_kafka_check_produce(rk, NULL))))
|
|
561
|
+
return err;
|
|
562
|
+
|
|
563
|
+
va_start(ap, rk);
|
|
564
|
+
while (!err &&
|
|
565
|
+
(vtype = va_arg(ap, rd_kafka_vtype_t)) != RD_KAFKA_VTYPE_END) {
|
|
566
|
+
switch (vtype) {
|
|
567
|
+
case RD_KAFKA_VTYPE_TOPIC:
|
|
568
|
+
rkt = rd_kafka_topic_new0(rk, va_arg(ap, const char *),
|
|
569
|
+
NULL, &existing, 1);
|
|
570
|
+
if (!existing)
|
|
571
|
+
rd_kafka_topic_fast_leader_query(
|
|
572
|
+
rk, rd_true /* force */);
|
|
573
|
+
break;
|
|
574
|
+
|
|
575
|
+
case RD_KAFKA_VTYPE_RKT:
|
|
576
|
+
rkt = rd_kafka_topic_proper(
|
|
577
|
+
va_arg(ap, rd_kafka_topic_t *));
|
|
578
|
+
rd_kafka_topic_keep(rkt);
|
|
579
|
+
break;
|
|
580
|
+
|
|
581
|
+
case RD_KAFKA_VTYPE_PARTITION:
|
|
582
|
+
rkm->rkm_partition = va_arg(ap, int32_t);
|
|
583
|
+
break;
|
|
584
|
+
|
|
585
|
+
case RD_KAFKA_VTYPE_VALUE:
|
|
586
|
+
rkm->rkm_payload = va_arg(ap, void *);
|
|
587
|
+
rkm->rkm_len = va_arg(ap, size_t);
|
|
588
|
+
break;
|
|
589
|
+
|
|
590
|
+
case RD_KAFKA_VTYPE_KEY:
|
|
591
|
+
rkm->rkm_key = va_arg(ap, void *);
|
|
592
|
+
rkm->rkm_key_len = va_arg(ap, size_t);
|
|
593
|
+
break;
|
|
594
|
+
|
|
595
|
+
case RD_KAFKA_VTYPE_OPAQUE:
|
|
596
|
+
rkm->rkm_opaque = va_arg(ap, void *);
|
|
597
|
+
break;
|
|
598
|
+
|
|
599
|
+
case RD_KAFKA_VTYPE_MSGFLAGS:
|
|
600
|
+
rkm->rkm_flags = va_arg(ap, int);
|
|
601
|
+
break;
|
|
602
|
+
|
|
603
|
+
case RD_KAFKA_VTYPE_TIMESTAMP:
|
|
604
|
+
rkm->rkm_timestamp = va_arg(ap, int64_t);
|
|
605
|
+
break;
|
|
606
|
+
|
|
607
|
+
case RD_KAFKA_VTYPE_HEADER: {
|
|
608
|
+
const char *name;
|
|
609
|
+
const void *value;
|
|
610
|
+
ssize_t size;
|
|
611
|
+
|
|
612
|
+
if (unlikely(app_hdrs != NULL)) {
|
|
613
|
+
err = RD_KAFKA_RESP_ERR__CONFLICT;
|
|
614
|
+
break;
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
if (unlikely(!hdrs))
|
|
618
|
+
hdrs = rd_kafka_headers_new(8);
|
|
619
|
+
|
|
620
|
+
name = va_arg(ap, const char *);
|
|
621
|
+
value = va_arg(ap, const void *);
|
|
622
|
+
size = va_arg(ap, ssize_t);
|
|
623
|
+
|
|
624
|
+
err = rd_kafka_header_add(hdrs, name, -1, value, size);
|
|
625
|
+
} break;
|
|
626
|
+
|
|
627
|
+
case RD_KAFKA_VTYPE_HEADERS:
|
|
628
|
+
if (unlikely(hdrs != NULL)) {
|
|
629
|
+
err = RD_KAFKA_RESP_ERR__CONFLICT;
|
|
630
|
+
break;
|
|
631
|
+
}
|
|
632
|
+
app_hdrs = va_arg(ap, rd_kafka_headers_t *);
|
|
633
|
+
break;
|
|
634
|
+
|
|
635
|
+
default:
|
|
636
|
+
err = RD_KAFKA_RESP_ERR__INVALID_ARG;
|
|
637
|
+
break;
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
va_end(ap);
|
|
642
|
+
|
|
643
|
+
if (unlikely(!rkt))
|
|
644
|
+
return RD_KAFKA_RESP_ERR__INVALID_ARG;
|
|
645
|
+
|
|
646
|
+
if (likely(!err))
|
|
647
|
+
rkm = rd_kafka_msg_new0(
|
|
648
|
+
rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload,
|
|
649
|
+
rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len,
|
|
650
|
+
rkm->rkm_opaque, &err, NULL, app_hdrs ? app_hdrs : hdrs,
|
|
651
|
+
rkm->rkm_timestamp, rd_clock());
|
|
652
|
+
|
|
653
|
+
if (unlikely(err)) {
|
|
654
|
+
rd_kafka_topic_destroy0(rkt);
|
|
655
|
+
if (hdrs)
|
|
656
|
+
rd_kafka_headers_destroy(hdrs);
|
|
657
|
+
return err;
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
/* Partition the message */
|
|
661
|
+
err = rd_kafka_msg_partitioner(rkt, rkm, 1);
|
|
662
|
+
if (unlikely(err)) {
|
|
663
|
+
/* Handle partitioner failures: it only fails when
|
|
664
|
+
* the application attempts to force a destination
|
|
665
|
+
* partition that does not exist in the cluster. */
|
|
666
|
+
|
|
667
|
+
/* Interceptors: Unroll on_send by on_ack.. */
|
|
668
|
+
rkm->rkm_err = err;
|
|
669
|
+
rd_kafka_interceptors_on_acknowledgement(rk,
|
|
670
|
+
&rkm->rkm_rkmessage);
|
|
671
|
+
|
|
672
|
+
/* Note we must clear the RD_KAFKA_MSG_F_FREE
|
|
673
|
+
* flag since our contract says we don't free the payload on
|
|
674
|
+
* failure. */
|
|
675
|
+
rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
|
|
676
|
+
|
|
677
|
+
/* Deassociate application owned headers from message
|
|
678
|
+
* since headers remain in application ownership
|
|
679
|
+
* when producev() fails */
|
|
680
|
+
if (app_hdrs && app_hdrs == rkm->rkm_headers)
|
|
681
|
+
rkm->rkm_headers = NULL;
|
|
682
|
+
|
|
683
|
+
rd_kafka_msg_destroy(rk, rkm);
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
rd_kafka_topic_destroy0(rkt);
|
|
687
|
+
|
|
688
|
+
return err;
|
|
689
|
+
}
|
|
690
|
+
|
|
691
|
+
|
|
692
|
+
|
|
693
|
+
/**
|
|
694
|
+
* @brief Produce a single message.
|
|
695
|
+
* @locality any application thread
|
|
696
|
+
* @locks none
|
|
697
|
+
*/
|
|
698
|
+
int rd_kafka_produce(rd_kafka_topic_t *rkt,
|
|
699
|
+
int32_t partition,
|
|
700
|
+
int msgflags,
|
|
701
|
+
void *payload,
|
|
702
|
+
size_t len,
|
|
703
|
+
const void *key,
|
|
704
|
+
size_t keylen,
|
|
705
|
+
void *msg_opaque) {
|
|
706
|
+
return rd_kafka_msg_new(rkt, partition, msgflags, payload, len, key,
|
|
707
|
+
keylen, msg_opaque);
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
|
|
711
|
+
|
|
712
|
+
/**
|
|
713
|
+
* Produce a batch of messages.
|
|
714
|
+
* Returns the number of messages succesfully queued for producing.
|
|
715
|
+
* Each message's .err will be set accordingly.
|
|
716
|
+
*/
|
|
717
|
+
int rd_kafka_produce_batch(rd_kafka_topic_t *app_rkt,
|
|
718
|
+
int32_t partition,
|
|
719
|
+
int msgflags,
|
|
720
|
+
rd_kafka_message_t *rkmessages,
|
|
721
|
+
int message_cnt) {
|
|
722
|
+
rd_kafka_msgq_t tmpq = RD_KAFKA_MSGQ_INITIALIZER(tmpq);
|
|
723
|
+
int i;
|
|
724
|
+
int64_t utc_now = rd_uclock() / 1000;
|
|
725
|
+
rd_ts_t now = rd_clock();
|
|
726
|
+
int good = 0;
|
|
727
|
+
int multiple_partitions = (partition == RD_KAFKA_PARTITION_UA ||
|
|
728
|
+
(msgflags & RD_KAFKA_MSG_F_PARTITION));
|
|
729
|
+
rd_kafka_resp_err_t all_err;
|
|
730
|
+
rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
|
|
731
|
+
rd_kafka_toppar_t *rktp = NULL;
|
|
732
|
+
|
|
733
|
+
/* Propagated per-message below */
|
|
734
|
+
all_err = rd_kafka_check_produce(rkt->rkt_rk, NULL);
|
|
735
|
+
|
|
736
|
+
rd_kafka_topic_rdlock(rkt);
|
|
737
|
+
if (!multiple_partitions) {
|
|
738
|
+
/* Single partition: look up the rktp once. */
|
|
739
|
+
rktp = rd_kafka_toppar_get_avail(rkt, partition,
|
|
740
|
+
1 /*ua on miss*/, &all_err);
|
|
741
|
+
|
|
742
|
+
} else {
|
|
743
|
+
/* Indicate to lower-level msg_new..() that rkt is locked
|
|
744
|
+
* so that they may unlock it momentarily if blocking. */
|
|
745
|
+
msgflags |= RD_KAFKA_MSG_F_RKT_RDLOCKED;
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
for (i = 0; i < message_cnt; i++) {
|
|
749
|
+
rd_kafka_msg_t *rkm;
|
|
750
|
+
|
|
751
|
+
/* Propagate error for all messages. */
|
|
752
|
+
if (unlikely(all_err)) {
|
|
753
|
+
rkmessages[i].err = all_err;
|
|
754
|
+
continue;
|
|
755
|
+
}
|
|
756
|
+
|
|
757
|
+
/* Create message */
|
|
758
|
+
rkm = rd_kafka_msg_new0(
|
|
759
|
+
rkt,
|
|
760
|
+
(msgflags & RD_KAFKA_MSG_F_PARTITION)
|
|
761
|
+
? rkmessages[i].partition
|
|
762
|
+
: partition,
|
|
763
|
+
msgflags, rkmessages[i].payload, rkmessages[i].len,
|
|
764
|
+
rkmessages[i].key, rkmessages[i].key_len,
|
|
765
|
+
rkmessages[i]._private, &rkmessages[i].err, NULL, NULL,
|
|
766
|
+
utc_now, now);
|
|
767
|
+
if (unlikely(!rkm)) {
|
|
768
|
+
if (rkmessages[i].err == RD_KAFKA_RESP_ERR__QUEUE_FULL)
|
|
769
|
+
all_err = rkmessages[i].err;
|
|
770
|
+
continue;
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
/* Three cases here:
|
|
774
|
+
* partition==UA: run the partitioner (slow)
|
|
775
|
+
* RD_KAFKA_MSG_F_PARTITION: produce message to specified
|
|
776
|
+
* partition
|
|
777
|
+
* fixed partition: simply concatenate the queue
|
|
778
|
+
* to partit */
|
|
779
|
+
if (multiple_partitions) {
|
|
780
|
+
if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) {
|
|
781
|
+
/* Partition the message */
|
|
782
|
+
rkmessages[i].err = rd_kafka_msg_partitioner(
|
|
783
|
+
rkt, rkm, 0 /*already locked*/);
|
|
784
|
+
} else {
|
|
785
|
+
if (rktp == NULL || rkm->rkm_partition !=
|
|
786
|
+
rktp->rktp_partition) {
|
|
787
|
+
rd_kafka_resp_err_t err;
|
|
788
|
+
if (rktp != NULL)
|
|
789
|
+
rd_kafka_toppar_destroy(rktp);
|
|
790
|
+
rktp = rd_kafka_toppar_get_avail(
|
|
791
|
+
rkt, rkm->rkm_partition,
|
|
792
|
+
1 /*ua on miss*/, &err);
|
|
793
|
+
|
|
794
|
+
if (unlikely(!rktp)) {
|
|
795
|
+
rkmessages[i].err = err;
|
|
796
|
+
continue;
|
|
797
|
+
}
|
|
798
|
+
}
|
|
799
|
+
rd_kafka_toppar_enq_msg(rktp, rkm, now);
|
|
800
|
+
|
|
801
|
+
if (rd_kafka_is_transactional(rkt->rkt_rk)) {
|
|
802
|
+
/* Add partition to transaction */
|
|
803
|
+
rd_kafka_txn_add_partition(rktp);
|
|
804
|
+
}
|
|
805
|
+
}
|
|
806
|
+
|
|
807
|
+
if (unlikely(rkmessages[i].err)) {
|
|
808
|
+
/* Interceptors: Unroll on_send by on_ack.. */
|
|
809
|
+
rd_kafka_interceptors_on_acknowledgement(
|
|
810
|
+
rkt->rkt_rk, &rkmessages[i]);
|
|
811
|
+
|
|
812
|
+
rd_kafka_msg_destroy(rkt->rkt_rk, rkm);
|
|
813
|
+
continue;
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
} else {
|
|
818
|
+
/* Single destination partition. */
|
|
819
|
+
rd_kafka_toppar_enq_msg(rktp, rkm, now);
|
|
820
|
+
}
|
|
821
|
+
|
|
822
|
+
rkmessages[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
823
|
+
good++;
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
rd_kafka_topic_rdunlock(rkt);
|
|
827
|
+
|
|
828
|
+
if (!multiple_partitions && good > 0 &&
|
|
829
|
+
rd_kafka_is_transactional(rkt->rkt_rk) &&
|
|
830
|
+
rktp->rktp_partition != RD_KAFKA_PARTITION_UA) {
|
|
831
|
+
/* Add single destination partition to transaction */
|
|
832
|
+
rd_kafka_txn_add_partition(rktp);
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
if (rktp != NULL)
|
|
836
|
+
rd_kafka_toppar_destroy(rktp);
|
|
837
|
+
|
|
838
|
+
return good;
|
|
839
|
+
}
|
|
840
|
+
|
|
841
|
+
/**
|
|
842
|
+
* @brief Scan \p rkmq for messages that have timed out and remove them from
|
|
843
|
+
* \p rkmq and add to \p timedout queue.
|
|
844
|
+
*
|
|
845
|
+
* @param abs_next_timeout will be set to the next message timeout, or 0
|
|
846
|
+
* if no timeout. Optional, may be NULL.
|
|
847
|
+
*
|
|
848
|
+
* @returns the number of messages timed out.
|
|
849
|
+
*
|
|
850
|
+
* @locality any
|
|
851
|
+
* @locks toppar_lock MUST be held
|
|
852
|
+
*/
|
|
853
|
+
int rd_kafka_msgq_age_scan(rd_kafka_toppar_t *rktp,
|
|
854
|
+
rd_kafka_msgq_t *rkmq,
|
|
855
|
+
rd_kafka_msgq_t *timedout,
|
|
856
|
+
rd_ts_t now,
|
|
857
|
+
rd_ts_t *abs_next_timeout) {
|
|
858
|
+
rd_kafka_msg_t *rkm, *tmp, *first = NULL;
|
|
859
|
+
int cnt = timedout->rkmq_msg_cnt;
|
|
860
|
+
|
|
861
|
+
if (abs_next_timeout)
|
|
862
|
+
*abs_next_timeout = 0;
|
|
863
|
+
|
|
864
|
+
/* Assume messages are added in time sequencial order */
|
|
865
|
+
TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp) {
|
|
866
|
+
/* NOTE: this is not true for the deprecated (and soon removed)
|
|
867
|
+
* LIFO queuing strategy. */
|
|
868
|
+
if (likely(rkm->rkm_ts_timeout > now)) {
|
|
869
|
+
if (abs_next_timeout)
|
|
870
|
+
*abs_next_timeout = rkm->rkm_ts_timeout;
|
|
871
|
+
break;
|
|
872
|
+
}
|
|
873
|
+
|
|
874
|
+
if (!first)
|
|
875
|
+
first = rkm;
|
|
876
|
+
|
|
877
|
+
rd_kafka_msgq_deq(rkmq, rkm, 1);
|
|
878
|
+
rd_kafka_msgq_enq(timedout, rkm);
|
|
879
|
+
}
|
|
880
|
+
|
|
881
|
+
return timedout->rkmq_msg_cnt - cnt;
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
|
|
885
|
+
int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq,
|
|
886
|
+
rd_kafka_msg_t *rkm,
|
|
887
|
+
int (*order_cmp)(const void *, const void *)) {
|
|
888
|
+
TAILQ_INSERT_SORTED(&rkmq->rkmq_msgs, rkm, rd_kafka_msg_t *, rkm_link,
|
|
889
|
+
order_cmp);
|
|
890
|
+
rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len;
|
|
891
|
+
return ++rkmq->rkmq_msg_cnt;
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt,
|
|
895
|
+
rd_kafka_msgq_t *rkmq,
|
|
896
|
+
rd_kafka_msg_t *rkm) {
|
|
897
|
+
rd_dassert(rkm->rkm_u.producer.msgid != 0);
|
|
898
|
+
return rd_kafka_msgq_enq_sorted0(rkmq, rkm,
|
|
899
|
+
rkt->rkt_conf.msg_order_cmp);
|
|
900
|
+
}
|
|
901
|
+
|
|
902
|
+
/**
|
|
903
|
+
* @brief Find the insert before position (i.e., the msg which comes
|
|
904
|
+
* after \p rkm sequencially) for message \p rkm.
|
|
905
|
+
*
|
|
906
|
+
* @param rkmq insert queue.
|
|
907
|
+
* @param start_pos the element in \p rkmq to start scanning at, or NULL
|
|
908
|
+
* to start with the first element.
|
|
909
|
+
* @param rkm message to insert.
|
|
910
|
+
* @param cmp message comparator.
|
|
911
|
+
* @param cntp the accumulated number of messages up to, but not including,
|
|
912
|
+
* the returned insert position. Optional (NULL).
|
|
913
|
+
* Do not use when start_pos is set.
|
|
914
|
+
* @param bytesp the accumulated number of bytes up to, but not inclduing,
|
|
915
|
+
* the returned insert position. Optional (NULL).
|
|
916
|
+
* Do not use when start_pos is set.
|
|
917
|
+
*
|
|
918
|
+
* @remark cntp and bytesp will NOT be accurate when \p start_pos is non-NULL.
|
|
919
|
+
*
|
|
920
|
+
* @returns the insert position element, or NULL if \p rkm should be
|
|
921
|
+
* added at tail of queue.
|
|
922
|
+
*/
|
|
923
|
+
rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq,
|
|
924
|
+
const rd_kafka_msg_t *start_pos,
|
|
925
|
+
const rd_kafka_msg_t *rkm,
|
|
926
|
+
int (*cmp)(const void *, const void *),
|
|
927
|
+
int *cntp,
|
|
928
|
+
int64_t *bytesp) {
|
|
929
|
+
const rd_kafka_msg_t *curr;
|
|
930
|
+
int cnt = 0;
|
|
931
|
+
int64_t bytes = 0;
|
|
932
|
+
|
|
933
|
+
for (curr = start_pos ? start_pos : rd_kafka_msgq_first(rkmq); curr;
|
|
934
|
+
curr = TAILQ_NEXT(curr, rkm_link)) {
|
|
935
|
+
if (cmp(rkm, curr) < 0) {
|
|
936
|
+
if (cntp) {
|
|
937
|
+
*cntp = cnt;
|
|
938
|
+
*bytesp = bytes;
|
|
939
|
+
}
|
|
940
|
+
return (rd_kafka_msg_t *)curr;
|
|
941
|
+
}
|
|
942
|
+
if (cntp) {
|
|
943
|
+
cnt++;
|
|
944
|
+
bytes += rkm->rkm_len + rkm->rkm_key_len;
|
|
945
|
+
}
|
|
946
|
+
}
|
|
947
|
+
|
|
948
|
+
return NULL;
|
|
949
|
+
}
|
|
950
|
+
|
|
951
|
+
|
|
952
|
+
/**
|
|
953
|
+
* @brief Split the original \p leftq into a left and right part,
|
|
954
|
+
* with element \p first_right being the first element in the
|
|
955
|
+
* right part (\p rightq).
|
|
956
|
+
*
|
|
957
|
+
* @param cnt is the number of messages up to, but not including \p first_right
|
|
958
|
+
* in \p leftq, namely the number of messages to remain in
|
|
959
|
+
* \p leftq after the split.
|
|
960
|
+
* @param bytes is the bytes counterpart to \p cnt.
|
|
961
|
+
*/
|
|
962
|
+
void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq,
|
|
963
|
+
rd_kafka_msgq_t *rightq,
|
|
964
|
+
rd_kafka_msg_t *first_right,
|
|
965
|
+
int cnt,
|
|
966
|
+
int64_t bytes) {
|
|
967
|
+
rd_kafka_msg_t *llast;
|
|
968
|
+
|
|
969
|
+
rd_assert(first_right != TAILQ_FIRST(&leftq->rkmq_msgs));
|
|
970
|
+
|
|
971
|
+
llast = TAILQ_PREV(first_right, rd_kafka_msg_head_s, rkm_link);
|
|
972
|
+
|
|
973
|
+
rd_kafka_msgq_init(rightq);
|
|
974
|
+
|
|
975
|
+
rightq->rkmq_msgs.tqh_first = first_right;
|
|
976
|
+
rightq->rkmq_msgs.tqh_last = leftq->rkmq_msgs.tqh_last;
|
|
977
|
+
|
|
978
|
+
first_right->rkm_link.tqe_prev = &rightq->rkmq_msgs.tqh_first;
|
|
979
|
+
|
|
980
|
+
leftq->rkmq_msgs.tqh_last = &llast->rkm_link.tqe_next;
|
|
981
|
+
llast->rkm_link.tqe_next = NULL;
|
|
982
|
+
|
|
983
|
+
rightq->rkmq_msg_cnt = leftq->rkmq_msg_cnt - cnt;
|
|
984
|
+
rightq->rkmq_msg_bytes = leftq->rkmq_msg_bytes - bytes;
|
|
985
|
+
leftq->rkmq_msg_cnt = cnt;
|
|
986
|
+
leftq->rkmq_msg_bytes = bytes;
|
|
987
|
+
|
|
988
|
+
rd_kafka_msgq_verify_order(NULL, leftq, 0, rd_false);
|
|
989
|
+
rd_kafka_msgq_verify_order(NULL, rightq, 0, rd_false);
|
|
990
|
+
}
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
/**
|
|
994
|
+
* @brief Set per-message metadata for all messages in \p rkmq
|
|
995
|
+
*/
|
|
996
|
+
void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq,
|
|
997
|
+
int32_t broker_id,
|
|
998
|
+
int64_t base_offset,
|
|
999
|
+
int64_t timestamp,
|
|
1000
|
+
rd_kafka_msg_status_t status) {
|
|
1001
|
+
rd_kafka_msg_t *rkm;
|
|
1002
|
+
|
|
1003
|
+
TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
|
|
1004
|
+
rkm->rkm_broker_id = broker_id;
|
|
1005
|
+
rkm->rkm_offset = base_offset++;
|
|
1006
|
+
if (timestamp != -1) {
|
|
1007
|
+
rkm->rkm_timestamp = timestamp;
|
|
1008
|
+
rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
|
|
1009
|
+
}
|
|
1010
|
+
|
|
1011
|
+
/* Don't downgrade a message from any form of PERSISTED
|
|
1012
|
+
* to NOT_PERSISTED, since the original cause of indicating
|
|
1013
|
+
* PERSISTED can't be changed.
|
|
1014
|
+
* E.g., a previous ack or in-flight timeout. */
|
|
1015
|
+
if (unlikely(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED &&
|
|
1016
|
+
rkm->rkm_status !=
|
|
1017
|
+
RD_KAFKA_MSG_STATUS_NOT_PERSISTED))
|
|
1018
|
+
continue;
|
|
1019
|
+
|
|
1020
|
+
rkm->rkm_status = status;
|
|
1021
|
+
}
|
|
1022
|
+
}
|
|
1023
|
+
|
|
1024
|
+
|
|
1025
|
+
/**
|
|
1026
|
+
* @brief Move all messages in \p src to \p dst whose msgid <= last_msgid.
|
|
1027
|
+
*
|
|
1028
|
+
* @remark src must be ordered
|
|
1029
|
+
*/
|
|
1030
|
+
void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest,
|
|
1031
|
+
rd_kafka_msgq_t *src,
|
|
1032
|
+
uint64_t last_msgid,
|
|
1033
|
+
rd_kafka_msg_status_t status) {
|
|
1034
|
+
rd_kafka_msg_t *rkm;
|
|
1035
|
+
|
|
1036
|
+
while ((rkm = rd_kafka_msgq_first(src)) &&
|
|
1037
|
+
rkm->rkm_u.producer.msgid <= last_msgid) {
|
|
1038
|
+
rd_kafka_msgq_deq(src, rkm, 1);
|
|
1039
|
+
rd_kafka_msgq_enq(dest, rkm);
|
|
1040
|
+
|
|
1041
|
+
rkm->rkm_status = status;
|
|
1042
|
+
}
|
|
1043
|
+
|
|
1044
|
+
rd_kafka_msgq_verify_order(NULL, dest, 0, rd_false);
|
|
1045
|
+
rd_kafka_msgq_verify_order(NULL, src, 0, rd_false);
|
|
1046
|
+
}
|
|
1047
|
+
|
|
1048
|
+
|
|
1049
|
+
|
|
1050
|
+
int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt,
|
|
1051
|
+
const void *key,
|
|
1052
|
+
size_t keylen,
|
|
1053
|
+
int32_t partition_cnt,
|
|
1054
|
+
void *rkt_opaque,
|
|
1055
|
+
void *msg_opaque) {
|
|
1056
|
+
int32_t p = rd_jitter(0, partition_cnt - 1);
|
|
1057
|
+
if (unlikely(!rd_kafka_topic_partition_available(rkt, p)))
|
|
1058
|
+
return rd_jitter(0, partition_cnt - 1);
|
|
1059
|
+
else
|
|
1060
|
+
return p;
|
|
1061
|
+
}
|
|
1062
|
+
|
|
1063
|
+
int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt,
|
|
1064
|
+
const void *key,
|
|
1065
|
+
size_t keylen,
|
|
1066
|
+
int32_t partition_cnt,
|
|
1067
|
+
void *rkt_opaque,
|
|
1068
|
+
void *msg_opaque) {
|
|
1069
|
+
return rd_crc32(key, keylen) % partition_cnt;
|
|
1070
|
+
}
|
|
1071
|
+
|
|
1072
|
+
int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt,
|
|
1073
|
+
const void *key,
|
|
1074
|
+
size_t keylen,
|
|
1075
|
+
int32_t partition_cnt,
|
|
1076
|
+
void *rkt_opaque,
|
|
1077
|
+
void *msg_opaque) {
|
|
1078
|
+
if (keylen == 0)
|
|
1079
|
+
return rd_kafka_msg_partitioner_random(
|
|
1080
|
+
rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
|
|
1081
|
+
else
|
|
1082
|
+
return rd_kafka_msg_partitioner_consistent(
|
|
1083
|
+
rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
|
|
1084
|
+
}
|
|
1085
|
+
|
|
1086
|
+
int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt,
|
|
1087
|
+
const void *key,
|
|
1088
|
+
size_t keylen,
|
|
1089
|
+
int32_t partition_cnt,
|
|
1090
|
+
void *rkt_opaque,
|
|
1091
|
+
void *msg_opaque) {
|
|
1092
|
+
return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt;
|
|
1093
|
+
}
|
|
1094
|
+
|
|
1095
|
+
int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt,
|
|
1096
|
+
const void *key,
|
|
1097
|
+
size_t keylen,
|
|
1098
|
+
int32_t partition_cnt,
|
|
1099
|
+
void *rkt_opaque,
|
|
1100
|
+
void *msg_opaque) {
|
|
1101
|
+
if (!key)
|
|
1102
|
+
return rd_kafka_msg_partitioner_random(
|
|
1103
|
+
rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
|
|
1104
|
+
else
|
|
1105
|
+
return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt;
|
|
1106
|
+
}
|
|
1107
|
+
|
|
1108
|
+
int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt,
|
|
1109
|
+
const void *key,
|
|
1110
|
+
size_t keylen,
|
|
1111
|
+
int32_t partition_cnt,
|
|
1112
|
+
void *rkt_opaque,
|
|
1113
|
+
void *msg_opaque) {
|
|
1114
|
+
return rd_fnv1a(key, keylen) % partition_cnt;
|
|
1115
|
+
}
|
|
1116
|
+
|
|
1117
|
+
int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt,
|
|
1118
|
+
const void *key,
|
|
1119
|
+
size_t keylen,
|
|
1120
|
+
int32_t partition_cnt,
|
|
1121
|
+
void *rkt_opaque,
|
|
1122
|
+
void *msg_opaque) {
|
|
1123
|
+
if (!key)
|
|
1124
|
+
return rd_kafka_msg_partitioner_random(
|
|
1125
|
+
rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
|
|
1126
|
+
else
|
|
1127
|
+
return rd_fnv1a(key, keylen) % partition_cnt;
|
|
1128
|
+
}
|
|
1129
|
+
|
|
1130
|
+
int32_t rd_kafka_msg_sticky_partition(rd_kafka_topic_t *rkt,
|
|
1131
|
+
const void *key,
|
|
1132
|
+
size_t keylen,
|
|
1133
|
+
int32_t partition_cnt,
|
|
1134
|
+
void *rkt_opaque,
|
|
1135
|
+
void *msg_opaque) {
|
|
1136
|
+
|
|
1137
|
+
if (!rd_kafka_topic_partition_available(rkt, rkt->rkt_sticky_partition))
|
|
1138
|
+
rd_interval_expedite(&rkt->rkt_sticky_intvl, 0);
|
|
1139
|
+
|
|
1140
|
+
if (rd_interval(&rkt->rkt_sticky_intvl,
|
|
1141
|
+
rkt->rkt_rk->rk_conf.sticky_partition_linger_ms * 1000,
|
|
1142
|
+
0) > 0) {
|
|
1143
|
+
rkt->rkt_sticky_partition = rd_kafka_msg_partitioner_random(
|
|
1144
|
+
rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
|
|
1145
|
+
rd_kafka_dbg(rkt->rkt_rk, TOPIC, "PARTITIONER",
|
|
1146
|
+
"%s [%" PRId32 "] is the new sticky partition",
|
|
1147
|
+
rkt->rkt_topic->str, rkt->rkt_sticky_partition);
|
|
1148
|
+
}
|
|
1149
|
+
|
|
1150
|
+
return rkt->rkt_sticky_partition;
|
|
1151
|
+
}
|
|
1152
|
+
|
|
1153
|
+
/**
|
|
1154
|
+
* @brief Assigns a message to a topic partition using a partitioner.
|
|
1155
|
+
*
|
|
1156
|
+
* @param do_lock if RD_DO_LOCK then acquire topic lock.
|
|
1157
|
+
*
|
|
1158
|
+
* @returns RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or .._UNKNOWN_TOPIC if
|
|
1159
|
+
* partitioning failed, or 0 on success.
|
|
1160
|
+
*
|
|
1161
|
+
* @locality any
|
|
1162
|
+
* @locks rd_kafka_
|
|
1163
|
+
*/
|
|
1164
|
+
int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt,
|
|
1165
|
+
rd_kafka_msg_t *rkm,
|
|
1166
|
+
rd_dolock_t do_lock) {
|
|
1167
|
+
int32_t partition;
|
|
1168
|
+
rd_kafka_toppar_t *rktp_new;
|
|
1169
|
+
rd_kafka_resp_err_t err;
|
|
1170
|
+
|
|
1171
|
+
if (do_lock)
|
|
1172
|
+
rd_kafka_topic_rdlock(rkt);
|
|
1173
|
+
|
|
1174
|
+
switch (rkt->rkt_state) {
|
|
1175
|
+
case RD_KAFKA_TOPIC_S_UNKNOWN:
|
|
1176
|
+
/* No metadata received from cluster yet.
|
|
1177
|
+
* Put message in UA partition and re-run partitioner when
|
|
1178
|
+
* cluster comes up. */
|
|
1179
|
+
partition = RD_KAFKA_PARTITION_UA;
|
|
1180
|
+
break;
|
|
1181
|
+
|
|
1182
|
+
case RD_KAFKA_TOPIC_S_NOTEXISTS:
|
|
1183
|
+
/* Topic not found in cluster.
|
|
1184
|
+
* Fail message immediately. */
|
|
1185
|
+
err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
|
|
1186
|
+
if (do_lock)
|
|
1187
|
+
rd_kafka_topic_rdunlock(rkt);
|
|
1188
|
+
return err;
|
|
1189
|
+
|
|
1190
|
+
case RD_KAFKA_TOPIC_S_ERROR:
|
|
1191
|
+
/* Topic has permanent error.
|
|
1192
|
+
* Fail message immediately. */
|
|
1193
|
+
err = rkt->rkt_err;
|
|
1194
|
+
if (do_lock)
|
|
1195
|
+
rd_kafka_topic_rdunlock(rkt);
|
|
1196
|
+
return err;
|
|
1197
|
+
|
|
1198
|
+
case RD_KAFKA_TOPIC_S_EXISTS:
|
|
1199
|
+
/* Topic exists in cluster. */
|
|
1200
|
+
|
|
1201
|
+
/* Topic exists but has no partitions.
|
|
1202
|
+
* This is usually an transient state following the
|
|
1203
|
+
* auto-creation of a topic. */
|
|
1204
|
+
if (unlikely(rkt->rkt_partition_cnt == 0)) {
|
|
1205
|
+
partition = RD_KAFKA_PARTITION_UA;
|
|
1206
|
+
break;
|
|
1207
|
+
}
|
|
1208
|
+
|
|
1209
|
+
/* Partition not assigned, run partitioner. */
|
|
1210
|
+
if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) {
|
|
1211
|
+
|
|
1212
|
+
if (!rkt->rkt_conf.random_partitioner &&
|
|
1213
|
+
(!rkm->rkm_key ||
|
|
1214
|
+
(rkm->rkm_key_len == 0 &&
|
|
1215
|
+
rkt->rkt_conf.partitioner ==
|
|
1216
|
+
rd_kafka_msg_partitioner_consistent_random))) {
|
|
1217
|
+
partition = rd_kafka_msg_sticky_partition(
|
|
1218
|
+
rkt, rkm->rkm_key, rkm->rkm_key_len,
|
|
1219
|
+
rkt->rkt_partition_cnt,
|
|
1220
|
+
rkt->rkt_conf.opaque, rkm->rkm_opaque);
|
|
1221
|
+
} else {
|
|
1222
|
+
partition = rkt->rkt_conf.partitioner(
|
|
1223
|
+
rkt, rkm->rkm_key, rkm->rkm_key_len,
|
|
1224
|
+
rkt->rkt_partition_cnt,
|
|
1225
|
+
rkt->rkt_conf.opaque, rkm->rkm_opaque);
|
|
1226
|
+
}
|
|
1227
|
+
} else
|
|
1228
|
+
partition = rkm->rkm_partition;
|
|
1229
|
+
|
|
1230
|
+
/* Check that partition exists. */
|
|
1231
|
+
if (partition >= rkt->rkt_partition_cnt) {
|
|
1232
|
+
err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
1233
|
+
if (do_lock)
|
|
1234
|
+
rd_kafka_topic_rdunlock(rkt);
|
|
1235
|
+
return err;
|
|
1236
|
+
}
|
|
1237
|
+
break;
|
|
1238
|
+
|
|
1239
|
+
default:
|
|
1240
|
+
rd_kafka_assert(rkt->rkt_rk, !*"NOTREACHED");
|
|
1241
|
+
break;
|
|
1242
|
+
}
|
|
1243
|
+
|
|
1244
|
+
/* Get new partition */
|
|
1245
|
+
rktp_new = rd_kafka_toppar_get(rkt, partition, 0);
|
|
1246
|
+
|
|
1247
|
+
if (unlikely(!rktp_new)) {
|
|
1248
|
+
/* Unknown topic or partition */
|
|
1249
|
+
if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
|
|
1250
|
+
err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
|
|
1251
|
+
else
|
|
1252
|
+
err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
|
|
1253
|
+
|
|
1254
|
+
if (do_lock)
|
|
1255
|
+
rd_kafka_topic_rdunlock(rkt);
|
|
1256
|
+
|
|
1257
|
+
return err;
|
|
1258
|
+
}
|
|
1259
|
+
|
|
1260
|
+
rd_atomic64_add(&rktp_new->rktp_c.producer_enq_msgs, 1);
|
|
1261
|
+
|
|
1262
|
+
/* Update message partition */
|
|
1263
|
+
if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA)
|
|
1264
|
+
rkm->rkm_partition = partition;
|
|
1265
|
+
|
|
1266
|
+
/* Partition is available: enqueue msg on partition's queue */
|
|
1267
|
+
rd_kafka_toppar_enq_msg(rktp_new, rkm, rd_clock());
|
|
1268
|
+
if (do_lock)
|
|
1269
|
+
rd_kafka_topic_rdunlock(rkt);
|
|
1270
|
+
|
|
1271
|
+
if (rktp_new->rktp_partition != RD_KAFKA_PARTITION_UA &&
|
|
1272
|
+
rd_kafka_is_transactional(rkt->rkt_rk)) {
|
|
1273
|
+
/* Add partition to transaction */
|
|
1274
|
+
rd_kafka_txn_add_partition(rktp_new);
|
|
1275
|
+
}
|
|
1276
|
+
|
|
1277
|
+
rd_kafka_toppar_destroy(rktp_new); /* from _get() */
|
|
1278
|
+
return 0;
|
|
1279
|
+
}
|
|
1280
|
+
|
|
1281
|
+
|
|
1282
|
+
|
|
1283
|
+
/**
|
|
1284
|
+
* @name Public message type (rd_kafka_message_t)
|
|
1285
|
+
*/
|
|
1286
|
+
void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage) {
|
|
1287
|
+
rd_kafka_op_t *rko;
|
|
1288
|
+
|
|
1289
|
+
if (likely((rko = (rd_kafka_op_t *)rkmessage->_private) != NULL))
|
|
1290
|
+
rd_kafka_op_destroy(rko);
|
|
1291
|
+
else {
|
|
1292
|
+
rd_kafka_msg_t *rkm = rd_kafka_message2msg(rkmessage);
|
|
1293
|
+
rd_kafka_msg_destroy(NULL, rkm);
|
|
1294
|
+
}
|
|
1295
|
+
}
|
|
1296
|
+
|
|
1297
|
+
|
|
1298
|
+
rd_kafka_message_t *rd_kafka_message_new(void) {
|
|
1299
|
+
rd_kafka_msg_t *rkm = rd_calloc(1, sizeof(*rkm));
|
|
1300
|
+
rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM;
|
|
1301
|
+
rkm->rkm_broker_id = -1;
|
|
1302
|
+
return (rd_kafka_message_t *)rkm;
|
|
1303
|
+
}
|
|
1304
|
+
|
|
1305
|
+
|
|
1306
|
+
/**
|
|
1307
|
+
* @brief Set up a rkmessage from an rko for passing to the application.
|
|
1308
|
+
* @remark Will trigger on_consume() interceptors if any.
|
|
1309
|
+
*/
|
|
1310
|
+
static rd_kafka_message_t *
|
|
1311
|
+
rd_kafka_message_setup(rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) {
|
|
1312
|
+
rd_kafka_topic_t *rkt;
|
|
1313
|
+
rd_kafka_toppar_t *rktp = NULL;
|
|
1314
|
+
|
|
1315
|
+
if (rko->rko_type == RD_KAFKA_OP_DR) {
|
|
1316
|
+
rkt = rko->rko_u.dr.rkt;
|
|
1317
|
+
} else {
|
|
1318
|
+
if (rko->rko_rktp) {
|
|
1319
|
+
rktp = rko->rko_rktp;
|
|
1320
|
+
rkt = rktp->rktp_rkt;
|
|
1321
|
+
} else
|
|
1322
|
+
rkt = NULL;
|
|
1323
|
+
|
|
1324
|
+
rkmessage->_private = rko;
|
|
1325
|
+
}
|
|
1326
|
+
|
|
1327
|
+
|
|
1328
|
+
if (!rkmessage->rkt && rkt)
|
|
1329
|
+
rkmessage->rkt = rd_kafka_topic_keep(rkt);
|
|
1330
|
+
|
|
1331
|
+
if (rktp)
|
|
1332
|
+
rkmessage->partition = rktp->rktp_partition;
|
|
1333
|
+
|
|
1334
|
+
if (!rkmessage->err)
|
|
1335
|
+
rkmessage->err = rko->rko_err;
|
|
1336
|
+
|
|
1337
|
+
/* Call on_consume interceptors */
|
|
1338
|
+
switch (rko->rko_type) {
|
|
1339
|
+
case RD_KAFKA_OP_FETCH:
|
|
1340
|
+
if (!rkmessage->err && rkt)
|
|
1341
|
+
rd_kafka_interceptors_on_consume(rkt->rkt_rk,
|
|
1342
|
+
rkmessage);
|
|
1343
|
+
break;
|
|
1344
|
+
|
|
1345
|
+
default:
|
|
1346
|
+
break;
|
|
1347
|
+
}
|
|
1348
|
+
|
|
1349
|
+
return rkmessage;
|
|
1350
|
+
}
|
|
1351
|
+
|
|
1352
|
+
|
|
1353
|
+
|
|
1354
|
+
/**
|
|
1355
|
+
* @brief Get rkmessage from rkm (for EVENT_DR)
|
|
1356
|
+
* @remark Must only be called just prior to passing a dr to the application.
|
|
1357
|
+
*/
|
|
1358
|
+
rd_kafka_message_t *rd_kafka_message_get_from_rkm(rd_kafka_op_t *rko,
|
|
1359
|
+
rd_kafka_msg_t *rkm) {
|
|
1360
|
+
return rd_kafka_message_setup(rko, &rkm->rkm_rkmessage);
|
|
1361
|
+
}
|
|
1362
|
+
|
|
1363
|
+
/**
|
|
1364
|
+
* @brief Convert rko to rkmessage
|
|
1365
|
+
* @remark Must only be called just prior to passing a consumed message
|
|
1366
|
+
* or event to the application.
|
|
1367
|
+
* @remark Will trigger on_consume() interceptors, if any.
|
|
1368
|
+
* @returns a rkmessage (bound to the rko).
|
|
1369
|
+
*/
|
|
1370
|
+
rd_kafka_message_t *rd_kafka_message_get(rd_kafka_op_t *rko) {
|
|
1371
|
+
rd_kafka_message_t *rkmessage;
|
|
1372
|
+
|
|
1373
|
+
if (!rko)
|
|
1374
|
+
return rd_kafka_message_new(); /* empty */
|
|
1375
|
+
|
|
1376
|
+
switch (rko->rko_type) {
|
|
1377
|
+
case RD_KAFKA_OP_FETCH:
|
|
1378
|
+
/* Use embedded rkmessage */
|
|
1379
|
+
rkmessage = &rko->rko_u.fetch.rkm.rkm_rkmessage;
|
|
1380
|
+
break;
|
|
1381
|
+
|
|
1382
|
+
case RD_KAFKA_OP_ERR:
|
|
1383
|
+
case RD_KAFKA_OP_CONSUMER_ERR:
|
|
1384
|
+
rkmessage = &rko->rko_u.err.rkm.rkm_rkmessage;
|
|
1385
|
+
rkmessage->payload = rko->rko_u.err.errstr;
|
|
1386
|
+
rkmessage->len =
|
|
1387
|
+
rkmessage->payload ? strlen(rkmessage->payload) : 0;
|
|
1388
|
+
rkmessage->offset = rko->rko_u.err.offset;
|
|
1389
|
+
break;
|
|
1390
|
+
|
|
1391
|
+
default:
|
|
1392
|
+
rd_kafka_assert(NULL, !*"unhandled optype");
|
|
1393
|
+
RD_NOTREACHED();
|
|
1394
|
+
return NULL;
|
|
1395
|
+
}
|
|
1396
|
+
|
|
1397
|
+
return rd_kafka_message_setup(rko, rkmessage);
|
|
1398
|
+
}
|
|
1399
|
+
|
|
1400
|
+
|
|
1401
|
+
int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage,
|
|
1402
|
+
rd_kafka_timestamp_type_t *tstype) {
|
|
1403
|
+
rd_kafka_msg_t *rkm;
|
|
1404
|
+
|
|
1405
|
+
if (rkmessage->err) {
|
|
1406
|
+
if (tstype)
|
|
1407
|
+
*tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
|
|
1408
|
+
return -1;
|
|
1409
|
+
}
|
|
1410
|
+
|
|
1411
|
+
rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
|
|
1412
|
+
|
|
1413
|
+
if (tstype)
|
|
1414
|
+
*tstype = rkm->rkm_tstype;
|
|
1415
|
+
|
|
1416
|
+
return rkm->rkm_timestamp;
|
|
1417
|
+
}
|
|
1418
|
+
|
|
1419
|
+
|
|
1420
|
+
int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage) {
|
|
1421
|
+
rd_kafka_msg_t *rkm;
|
|
1422
|
+
|
|
1423
|
+
rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
|
|
1424
|
+
|
|
1425
|
+
if (unlikely(!rkm->rkm_ts_enq))
|
|
1426
|
+
return -1;
|
|
1427
|
+
|
|
1428
|
+
return rd_clock() - rkm->rkm_ts_enq;
|
|
1429
|
+
}
|
|
1430
|
+
|
|
1431
|
+
|
|
1432
|
+
int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage) {
|
|
1433
|
+
rd_kafka_msg_t *rkm;
|
|
1434
|
+
|
|
1435
|
+
rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
|
|
1436
|
+
|
|
1437
|
+
return rkm->rkm_broker_id;
|
|
1438
|
+
}
|
|
1439
|
+
|
|
1440
|
+
|
|
1441
|
+
|
|
1442
|
+
/**
|
|
1443
|
+
* @brief Parse serialized message headers and populate
|
|
1444
|
+
* rkm->rkm_headers (which must be NULL).
|
|
1445
|
+
*/
|
|
1446
|
+
static rd_kafka_resp_err_t rd_kafka_msg_headers_parse(rd_kafka_msg_t *rkm) {
|
|
1447
|
+
rd_kafka_buf_t *rkbuf;
|
|
1448
|
+
int64_t HeaderCount;
|
|
1449
|
+
const int log_decode_errors = 0;
|
|
1450
|
+
rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG;
|
|
1451
|
+
int i;
|
|
1452
|
+
rd_kafka_headers_t *hdrs = NULL;
|
|
1453
|
+
|
|
1454
|
+
rd_dassert(!rkm->rkm_headers);
|
|
1455
|
+
|
|
1456
|
+
if (RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs) == 0)
|
|
1457
|
+
return RD_KAFKA_RESP_ERR__NOENT;
|
|
1458
|
+
|
|
1459
|
+
rkbuf = rd_kafka_buf_new_shadow(
|
|
1460
|
+
rkm->rkm_u.consumer.binhdrs.data,
|
|
1461
|
+
RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs), NULL);
|
|
1462
|
+
|
|
1463
|
+
rd_kafka_buf_read_varint(rkbuf, &HeaderCount);
|
|
1464
|
+
|
|
1465
|
+
if (HeaderCount <= 0) {
|
|
1466
|
+
rd_kafka_buf_destroy(rkbuf);
|
|
1467
|
+
return RD_KAFKA_RESP_ERR__NOENT;
|
|
1468
|
+
} else if (unlikely(HeaderCount > 100000)) {
|
|
1469
|
+
rd_kafka_buf_destroy(rkbuf);
|
|
1470
|
+
return RD_KAFKA_RESP_ERR__BAD_MSG;
|
|
1471
|
+
}
|
|
1472
|
+
|
|
1473
|
+
hdrs = rd_kafka_headers_new((size_t)HeaderCount);
|
|
1474
|
+
|
|
1475
|
+
for (i = 0; (int64_t)i < HeaderCount; i++) {
|
|
1476
|
+
int64_t KeyLen, ValueLen;
|
|
1477
|
+
const char *Key, *Value;
|
|
1478
|
+
|
|
1479
|
+
rd_kafka_buf_read_varint(rkbuf, &KeyLen);
|
|
1480
|
+
rd_kafka_buf_read_ptr(rkbuf, &Key, (size_t)KeyLen);
|
|
1481
|
+
|
|
1482
|
+
rd_kafka_buf_read_varint(rkbuf, &ValueLen);
|
|
1483
|
+
if (unlikely(ValueLen == -1))
|
|
1484
|
+
Value = NULL;
|
|
1485
|
+
else
|
|
1486
|
+
rd_kafka_buf_read_ptr(rkbuf, &Value, (size_t)ValueLen);
|
|
1487
|
+
|
|
1488
|
+
rd_kafka_header_add(hdrs, Key, (ssize_t)KeyLen, Value,
|
|
1489
|
+
(ssize_t)ValueLen);
|
|
1490
|
+
}
|
|
1491
|
+
|
|
1492
|
+
rkm->rkm_headers = hdrs;
|
|
1493
|
+
|
|
1494
|
+
rd_kafka_buf_destroy(rkbuf);
|
|
1495
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
1496
|
+
|
|
1497
|
+
err_parse:
|
|
1498
|
+
err = rkbuf->rkbuf_err;
|
|
1499
|
+
rd_kafka_buf_destroy(rkbuf);
|
|
1500
|
+
if (hdrs)
|
|
1501
|
+
rd_kafka_headers_destroy(hdrs);
|
|
1502
|
+
return err;
|
|
1503
|
+
}
|
|
1504
|
+
|
|
1505
|
+
|
|
1506
|
+
|
|
1507
|
+
rd_kafka_resp_err_t
|
|
1508
|
+
rd_kafka_message_headers(const rd_kafka_message_t *rkmessage,
|
|
1509
|
+
rd_kafka_headers_t **hdrsp) {
|
|
1510
|
+
rd_kafka_msg_t *rkm;
|
|
1511
|
+
rd_kafka_resp_err_t err;
|
|
1512
|
+
|
|
1513
|
+
rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
|
|
1514
|
+
|
|
1515
|
+
if (rkm->rkm_headers) {
|
|
1516
|
+
*hdrsp = rkm->rkm_headers;
|
|
1517
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
1518
|
+
}
|
|
1519
|
+
|
|
1520
|
+
/* Producer (rkm_headers will be set if there were any headers) */
|
|
1521
|
+
if (rkm->rkm_flags & RD_KAFKA_MSG_F_PRODUCER)
|
|
1522
|
+
return RD_KAFKA_RESP_ERR__NOENT;
|
|
1523
|
+
|
|
1524
|
+
/* Consumer */
|
|
1525
|
+
|
|
1526
|
+
/* No previously parsed headers, check if the underlying
|
|
1527
|
+
* protocol message had headers and if so, parse them. */
|
|
1528
|
+
if (unlikely(!RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs)))
|
|
1529
|
+
return RD_KAFKA_RESP_ERR__NOENT;
|
|
1530
|
+
|
|
1531
|
+
err = rd_kafka_msg_headers_parse(rkm);
|
|
1532
|
+
if (unlikely(err))
|
|
1533
|
+
return err;
|
|
1534
|
+
|
|
1535
|
+
*hdrsp = rkm->rkm_headers;
|
|
1536
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
1537
|
+
}
|
|
1538
|
+
|
|
1539
|
+
|
|
1540
|
+
rd_kafka_resp_err_t
|
|
1541
|
+
rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage,
|
|
1542
|
+
rd_kafka_headers_t **hdrsp) {
|
|
1543
|
+
rd_kafka_msg_t *rkm;
|
|
1544
|
+
rd_kafka_resp_err_t err;
|
|
1545
|
+
|
|
1546
|
+
err = rd_kafka_message_headers(rkmessage, hdrsp);
|
|
1547
|
+
if (err)
|
|
1548
|
+
return err;
|
|
1549
|
+
|
|
1550
|
+
rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
|
|
1551
|
+
rkm->rkm_headers = NULL;
|
|
1552
|
+
|
|
1553
|
+
return RD_KAFKA_RESP_ERR_NO_ERROR;
|
|
1554
|
+
}
|
|
1555
|
+
|
|
1556
|
+
|
|
1557
|
+
void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage,
|
|
1558
|
+
rd_kafka_headers_t *hdrs) {
|
|
1559
|
+
rd_kafka_msg_t *rkm;
|
|
1560
|
+
|
|
1561
|
+
rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
|
|
1562
|
+
|
|
1563
|
+
if (rkm->rkm_headers) {
|
|
1564
|
+
assert(rkm->rkm_headers != hdrs);
|
|
1565
|
+
rd_kafka_headers_destroy(rkm->rkm_headers);
|
|
1566
|
+
}
|
|
1567
|
+
|
|
1568
|
+
rkm->rkm_headers = hdrs;
|
|
1569
|
+
}
|
|
1570
|
+
|
|
1571
|
+
|
|
1572
|
+
|
|
1573
|
+
rd_kafka_msg_status_t
|
|
1574
|
+
rd_kafka_message_status(const rd_kafka_message_t *rkmessage) {
|
|
1575
|
+
rd_kafka_msg_t *rkm;
|
|
1576
|
+
|
|
1577
|
+
rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
|
|
1578
|
+
|
|
1579
|
+
return rkm->rkm_status;
|
|
1580
|
+
}
|
|
1581
|
+
|
|
1582
|
+
|
|
1583
|
+
int32_t rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage) {
|
|
1584
|
+
rd_kafka_msg_t *rkm;
|
|
1585
|
+
if (unlikely(!rkmessage->rkt || rd_kafka_rkt_is_lw(rkmessage->rkt) ||
|
|
1586
|
+
!rkmessage->rkt->rkt_rk ||
|
|
1587
|
+
rkmessage->rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER))
|
|
1588
|
+
return -1;
|
|
1589
|
+
|
|
1590
|
+
rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
|
|
1591
|
+
|
|
1592
|
+
return rkm->rkm_u.consumer.leader_epoch;
|
|
1593
|
+
}
|
|
1594
|
+
|
|
1595
|
+
|
|
1596
|
+
void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq) {
|
|
1597
|
+
rd_kafka_msg_t *rkm;
|
|
1598
|
+
int cnt = 0;
|
|
1599
|
+
|
|
1600
|
+
fprintf(fp, "%s msgq_dump (%d messages, %" PRIusz " bytes):\n", what,
|
|
1601
|
+
rd_kafka_msgq_len(rkmq), rd_kafka_msgq_size(rkmq));
|
|
1602
|
+
TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
|
|
1603
|
+
fprintf(fp,
|
|
1604
|
+
" [%" PRId32 "]@%" PRId64 ": rkm msgid %" PRIu64
|
|
1605
|
+
": \"%.*s\"\n",
|
|
1606
|
+
rkm->rkm_partition, rkm->rkm_offset,
|
|
1607
|
+
rkm->rkm_u.producer.msgid, (int)rkm->rkm_len,
|
|
1608
|
+
(const char *)rkm->rkm_payload);
|
|
1609
|
+
rd_assert(cnt++ < rkmq->rkmq_msg_cnt);
|
|
1610
|
+
}
|
|
1611
|
+
}
|
|
1612
|
+
|
|
1613
|
+
|
|
1614
|
+
|
|
1615
|
+
/**
|
|
1616
|
+
* @brief Destroy resources associated with msgbatch
|
|
1617
|
+
*/
|
|
1618
|
+
void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb) {
|
|
1619
|
+
if (rkmb->rktp) {
|
|
1620
|
+
rd_kafka_toppar_destroy(rkmb->rktp);
|
|
1621
|
+
rkmb->rktp = NULL;
|
|
1622
|
+
}
|
|
1623
|
+
|
|
1624
|
+
rd_assert(RD_KAFKA_MSGQ_EMPTY(&rkmb->msgq));
|
|
1625
|
+
}
|
|
1626
|
+
|
|
1627
|
+
|
|
1628
|
+
/**
|
|
1629
|
+
* @brief Initialize a message batch for the Idempotent Producer.
|
|
1630
|
+
*/
|
|
1631
|
+
void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb,
|
|
1632
|
+
rd_kafka_toppar_t *rktp,
|
|
1633
|
+
rd_kafka_pid_t pid,
|
|
1634
|
+
uint64_t epoch_base_msgid) {
|
|
1635
|
+
memset(rkmb, 0, sizeof(*rkmb));
|
|
1636
|
+
|
|
1637
|
+
rkmb->rktp = rd_kafka_toppar_keep(rktp);
|
|
1638
|
+
|
|
1639
|
+
rd_kafka_msgq_init(&rkmb->msgq);
|
|
1640
|
+
|
|
1641
|
+
rkmb->pid = pid;
|
|
1642
|
+
rkmb->first_seq = -1;
|
|
1643
|
+
rkmb->epoch_base_msgid = epoch_base_msgid;
|
|
1644
|
+
}
|
|
1645
|
+
|
|
1646
|
+
|
|
1647
|
+
/**
|
|
1648
|
+
* @brief Set the first message in the batch. which is used to set
|
|
1649
|
+
* the BaseSequence and keep track of batch reconstruction range.
|
|
1650
|
+
*
|
|
1651
|
+
* @param rkm is the first message in the batch.
|
|
1652
|
+
*/
|
|
1653
|
+
void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb,
|
|
1654
|
+
rd_kafka_msg_t *rkm) {
|
|
1655
|
+
rd_assert(rkmb->first_msgid == 0);
|
|
1656
|
+
|
|
1657
|
+
if (!rd_kafka_pid_valid(rkmb->pid))
|
|
1658
|
+
return;
|
|
1659
|
+
|
|
1660
|
+
rkmb->first_msgid = rkm->rkm_u.producer.msgid;
|
|
1661
|
+
|
|
1662
|
+
/* Our msgid counter is 64-bits, but the
|
|
1663
|
+
* Kafka protocol's sequence is only 31 (signed), so we'll
|
|
1664
|
+
* need to handle wrapping. */
|
|
1665
|
+
rkmb->first_seq = rd_kafka_seq_wrap(rkm->rkm_u.producer.msgid -
|
|
1666
|
+
rkmb->epoch_base_msgid);
|
|
1667
|
+
|
|
1668
|
+
/* Check if there is a stored last message
|
|
1669
|
+
* on the first msg, which means an entire
|
|
1670
|
+
* batch of messages are being retried and
|
|
1671
|
+
* we need to maintain the exact messages
|
|
1672
|
+
* of the original batch.
|
|
1673
|
+
* Simply tracking the last message, on
|
|
1674
|
+
* the first message, is sufficient for now.
|
|
1675
|
+
* Will be 0 if not applicable. */
|
|
1676
|
+
rkmb->last_msgid = rkm->rkm_u.producer.last_msgid;
|
|
1677
|
+
}
|
|
1678
|
+
|
|
1679
|
+
|
|
1680
|
+
|
|
1681
|
+
/**
|
|
1682
|
+
* @brief Message batch is ready to be transmitted.
|
|
1683
|
+
*
|
|
1684
|
+
* @remark This function assumes the batch will be transmitted and increases
|
|
1685
|
+
* the toppar's in-flight count.
|
|
1686
|
+
*/
|
|
1687
|
+
void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb) {
|
|
1688
|
+
rd_kafka_toppar_t *rktp = rkmb->rktp;
|
|
1689
|
+
rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
|
|
1690
|
+
|
|
1691
|
+
/* Keep track of number of requests in-flight per partition,
|
|
1692
|
+
* and the number of partitions with in-flight requests when
|
|
1693
|
+
* idempotent producer - this is used to drain partitions
|
|
1694
|
+
* before resetting the PID. */
|
|
1695
|
+
if (rd_atomic32_add(&rktp->rktp_msgs_inflight,
|
|
1696
|
+
rd_kafka_msgq_len(&rkmb->msgq)) ==
|
|
1697
|
+
rd_kafka_msgq_len(&rkmb->msgq) &&
|
|
1698
|
+
rd_kafka_is_idempotent(rk))
|
|
1699
|
+
rd_kafka_idemp_inflight_toppar_add(rk, rktp);
|
|
1700
|
+
}
|
|
1701
|
+
|
|
1702
|
+
|
|
1703
|
+
|
|
1704
|
+
/**
|
|
1705
|
+
* @brief Allow queue wakeups after \p abstime, or when the
|
|
1706
|
+
* given \p batch_msg_cnt or \p batch_msg_bytes have been reached.
|
|
1707
|
+
*
|
|
1708
|
+
* @param rkmq Queue to monitor and set wakeup parameters on.
|
|
1709
|
+
* @param dest_rkmq Destination queue used to meter current queue depths
|
|
1710
|
+
* and oldest message. May be the same as \p rkmq but is
|
|
1711
|
+
* typically the rktp_xmit_msgq.
|
|
1712
|
+
* @param next_wakeup If non-NULL: update the caller's next scheduler wakeup
|
|
1713
|
+
* according to the wakeup time calculated by this function.
|
|
1714
|
+
* @param now The current time.
|
|
1715
|
+
* @param linger_us The configured queue linger / batching time.
|
|
1716
|
+
* @param batch_msg_cnt Queue threshold before signalling.
|
|
1717
|
+
* @param batch_msg_bytes Queue threshold before signalling.
|
|
1718
|
+
*
|
|
1719
|
+
* @returns true if the wakeup conditions are already met and messages are ready
|
|
1720
|
+
* to be sent, else false.
|
|
1721
|
+
*
|
|
1722
|
+
* @locks_required rd_kafka_toppar_lock()
|
|
1723
|
+
*
|
|
1724
|
+
*
|
|
1725
|
+
* Producer queue and broker thread wake-up behaviour.
|
|
1726
|
+
*
|
|
1727
|
+
* There are contradicting requirements at play here:
|
|
1728
|
+
* - Latency: queued messages must be batched and sent according to
|
|
1729
|
+
* batch size and linger.ms configuration.
|
|
1730
|
+
* - Wakeups: keep the number of thread wake-ups to a minimum to avoid
|
|
1731
|
+
* high CPU utilization and context switching.
|
|
1732
|
+
*
|
|
1733
|
+
* The message queue (rd_kafka_msgq_t) has functionality for the writer (app)
|
|
1734
|
+
* to wake up the reader (broker thread) when there's a new message added.
|
|
1735
|
+
* This wakeup is done thru a combination of cndvar signalling and IO writes
|
|
1736
|
+
* to make sure a thread wakeup is triggered regardless if the broker thread
|
|
1737
|
+
* is blocking on cnd_timedwait() or on IO poll.
|
|
1738
|
+
* When the broker thread is woken up it will scan all the partitions it is
|
|
1739
|
+
* the leader for to check if there are messages to be sent - all according
|
|
1740
|
+
* to the configured batch size and linger.ms - and then decide its next
|
|
1741
|
+
* wait time depending on the lowest remaining linger.ms setting of any
|
|
1742
|
+
* partition with messages enqueued.
|
|
1743
|
+
*
|
|
1744
|
+
* This wait time must also be set as a threshold on the message queue, telling
|
|
1745
|
+
* the writer (app) that it must not trigger a wakeup until the wait time
|
|
1746
|
+
* has expired, or the batch sizes have been exceeded.
|
|
1747
|
+
*
|
|
1748
|
+
* The message queue wakeup time is per partition, while the broker thread
|
|
1749
|
+
* wakeup time is the lowest of all its partitions' wakeup times.
|
|
1750
|
+
*
|
|
1751
|
+
* The per-partition wakeup constraints are calculated and set by
|
|
1752
|
+
* rd_kafka_msgq_allow_wakeup_at() which is called from the broker thread's
|
|
1753
|
+
* per-partition handler.
|
|
1754
|
+
* This function is called each time there are changes to the broker-local
|
|
1755
|
+
* partition transmit queue (rktp_xmit_msgq), such as:
|
|
1756
|
+
* - messages are moved from the partition queue (rktp_msgq) to rktp_xmit_msgq
|
|
1757
|
+
* - messages are moved to a ProduceRequest
|
|
1758
|
+
* - messages are timed out from the rktp_xmit_msgq
|
|
1759
|
+
* - the flushing state changed (rd_kafka_flush() is called or returned).
|
|
1760
|
+
*
|
|
1761
|
+
* If none of these things happen, the broker thread will simply read the
|
|
1762
|
+
* last stored wakeup time for each partition and use that for calculating its
|
|
1763
|
+
* minimum wait time.
|
|
1764
|
+
*
|
|
1765
|
+
*
|
|
1766
|
+
* On the writer side, namely the application calling rd_kafka_produce(), the
|
|
1767
|
+
* followings checks are performed to see if it may trigger a wakeup when
|
|
1768
|
+
* it adds a new message to the partition queue:
|
|
1769
|
+
* - the current time has reached the wakeup time (e.g., remaining linger.ms
|
|
1770
|
+
* has expired), or
|
|
1771
|
+
* - with the new message(s) being added, either the batch.size or
|
|
1772
|
+
* batch.num.messages thresholds have been exceeded, or
|
|
1773
|
+
* - the application is calling rd_kafka_flush(),
|
|
1774
|
+
* - and no wakeup has been signalled yet. This is critical since it may take
|
|
1775
|
+
* some time for the broker thread to do its work we'll want to avoid
|
|
1776
|
+
* flooding it with wakeups. So a wakeup is only sent once per
|
|
1777
|
+
* wakeup period.
|
|
1778
|
+
*/
|
|
1779
|
+
rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq,
|
|
1780
|
+
const rd_kafka_msgq_t *dest_rkmq,
|
|
1781
|
+
rd_ts_t *next_wakeup,
|
|
1782
|
+
rd_ts_t now,
|
|
1783
|
+
rd_ts_t linger_us,
|
|
1784
|
+
int32_t batch_msg_cnt,
|
|
1785
|
+
int64_t batch_msg_bytes) {
|
|
1786
|
+
int32_t msg_cnt = rd_kafka_msgq_len(dest_rkmq);
|
|
1787
|
+
int64_t msg_bytes = rd_kafka_msgq_size(dest_rkmq);
|
|
1788
|
+
|
|
1789
|
+
if (RD_KAFKA_MSGQ_EMPTY(dest_rkmq)) {
|
|
1790
|
+
rkmq->rkmq_wakeup.on_first = rd_true;
|
|
1791
|
+
rkmq->rkmq_wakeup.abstime = now + linger_us;
|
|
1792
|
+
/* Leave next_wakeup untouched since the queue is empty */
|
|
1793
|
+
msg_cnt = 0;
|
|
1794
|
+
msg_bytes = 0;
|
|
1795
|
+
} else {
|
|
1796
|
+
const rd_kafka_msg_t *rkm = rd_kafka_msgq_first(dest_rkmq);
|
|
1797
|
+
|
|
1798
|
+
rkmq->rkmq_wakeup.on_first = rd_false;
|
|
1799
|
+
|
|
1800
|
+
if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) {
|
|
1801
|
+
/* Honour retry.backoff.ms:
|
|
1802
|
+
* wait for backoff to expire */
|
|
1803
|
+
rkmq->rkmq_wakeup.abstime =
|
|
1804
|
+
rkm->rkm_u.producer.ts_backoff;
|
|
1805
|
+
} else {
|
|
1806
|
+
/* Use message's produce() time + linger.ms */
|
|
1807
|
+
rkmq->rkmq_wakeup.abstime =
|
|
1808
|
+
rd_kafka_msg_enq_time(rkm) + linger_us;
|
|
1809
|
+
if (rkmq->rkmq_wakeup.abstime <= now)
|
|
1810
|
+
rkmq->rkmq_wakeup.abstime = now;
|
|
1811
|
+
}
|
|
1812
|
+
|
|
1813
|
+
/* Update the caller's scheduler wakeup time */
|
|
1814
|
+
if (next_wakeup && rkmq->rkmq_wakeup.abstime < *next_wakeup)
|
|
1815
|
+
*next_wakeup = rkmq->rkmq_wakeup.abstime;
|
|
1816
|
+
|
|
1817
|
+
msg_cnt = rd_kafka_msgq_len(dest_rkmq);
|
|
1818
|
+
msg_bytes = rd_kafka_msgq_size(dest_rkmq);
|
|
1819
|
+
}
|
|
1820
|
+
|
|
1821
|
+
/*
|
|
1822
|
+
* If there are more messages or bytes in queue than the batch limits,
|
|
1823
|
+
* or the linger time has been exceeded,
|
|
1824
|
+
* then there is no need for wakeup since the broker thread will
|
|
1825
|
+
* produce those messages as quickly as it can.
|
|
1826
|
+
*/
|
|
1827
|
+
if (msg_cnt >= batch_msg_cnt || msg_bytes >= batch_msg_bytes ||
|
|
1828
|
+
(msg_cnt > 0 && now >= rkmq->rkmq_wakeup.abstime)) {
|
|
1829
|
+
/* Prevent further signalling */
|
|
1830
|
+
rkmq->rkmq_wakeup.signalled = rd_true;
|
|
1831
|
+
|
|
1832
|
+
/* Batch is ready */
|
|
1833
|
+
return rd_true;
|
|
1834
|
+
}
|
|
1835
|
+
|
|
1836
|
+
/* If the current msg or byte count is less than the batch limit
|
|
1837
|
+
* then set the rkmq count to the remaining count or size to
|
|
1838
|
+
* reach the batch limits.
|
|
1839
|
+
* This is for the case where the producer is waiting for more
|
|
1840
|
+
* messages to accumulate into a batch. The wakeup should only
|
|
1841
|
+
* occur once a threshold is reached or the abstime has expired.
|
|
1842
|
+
*/
|
|
1843
|
+
rkmq->rkmq_wakeup.signalled = rd_false;
|
|
1844
|
+
rkmq->rkmq_wakeup.msg_cnt = batch_msg_cnt - msg_cnt;
|
|
1845
|
+
rkmq->rkmq_wakeup.msg_bytes = batch_msg_bytes - msg_bytes;
|
|
1846
|
+
|
|
1847
|
+
return rd_false;
|
|
1848
|
+
}
|
|
1849
|
+
|
|
1850
|
+
|
|
1851
|
+
|
|
1852
|
+
/**
|
|
1853
|
+
* @brief Verify order (by msgid) in message queue.
|
|
1854
|
+
* For development use only.
|
|
1855
|
+
*/
|
|
1856
|
+
void rd_kafka_msgq_verify_order0(const char *function,
|
|
1857
|
+
int line,
|
|
1858
|
+
const rd_kafka_toppar_t *rktp,
|
|
1859
|
+
const rd_kafka_msgq_t *rkmq,
|
|
1860
|
+
uint64_t exp_first_msgid,
|
|
1861
|
+
rd_bool_t gapless) {
|
|
1862
|
+
const rd_kafka_msg_t *rkm;
|
|
1863
|
+
uint64_t exp;
|
|
1864
|
+
int errcnt = 0;
|
|
1865
|
+
int cnt = 0;
|
|
1866
|
+
const char *topic = rktp ? rktp->rktp_rkt->rkt_topic->str : "n/a";
|
|
1867
|
+
int32_t partition = rktp ? rktp->rktp_partition : -1;
|
|
1868
|
+
|
|
1869
|
+
if (rd_kafka_msgq_len(rkmq) == 0)
|
|
1870
|
+
return;
|
|
1871
|
+
|
|
1872
|
+
if (exp_first_msgid)
|
|
1873
|
+
exp = exp_first_msgid;
|
|
1874
|
+
else {
|
|
1875
|
+
exp = rd_kafka_msgq_first(rkmq)->rkm_u.producer.msgid;
|
|
1876
|
+
if (exp == 0) /* message without msgid (e.g., UA partition) */
|
|
1877
|
+
return;
|
|
1878
|
+
}
|
|
1879
|
+
|
|
1880
|
+
TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
|
|
1881
|
+
#if 0
|
|
1882
|
+
printf("%s:%d: %s [%"PRId32"]: rkm #%d (%p) "
|
|
1883
|
+
"msgid %"PRIu64"\n",
|
|
1884
|
+
function, line,
|
|
1885
|
+
topic, partition,
|
|
1886
|
+
cnt, rkm, rkm->rkm_u.producer.msgid);
|
|
1887
|
+
#endif
|
|
1888
|
+
if (gapless && rkm->rkm_u.producer.msgid != exp) {
|
|
1889
|
+
printf("%s:%d: %s [%" PRId32
|
|
1890
|
+
"]: rkm #%d (%p) "
|
|
1891
|
+
"msgid %" PRIu64
|
|
1892
|
+
": "
|
|
1893
|
+
"expected msgid %" PRIu64 "\n",
|
|
1894
|
+
function, line, topic, partition, cnt, rkm,
|
|
1895
|
+
rkm->rkm_u.producer.msgid, exp);
|
|
1896
|
+
errcnt++;
|
|
1897
|
+
} else if (!gapless && rkm->rkm_u.producer.msgid < exp) {
|
|
1898
|
+
printf("%s:%d: %s [%" PRId32
|
|
1899
|
+
"]: rkm #%d (%p) "
|
|
1900
|
+
"msgid %" PRIu64
|
|
1901
|
+
": "
|
|
1902
|
+
"expected increased msgid >= %" PRIu64 "\n",
|
|
1903
|
+
function, line, topic, partition, cnt, rkm,
|
|
1904
|
+
rkm->rkm_u.producer.msgid, exp);
|
|
1905
|
+
errcnt++;
|
|
1906
|
+
} else
|
|
1907
|
+
exp++;
|
|
1908
|
+
|
|
1909
|
+
if (cnt >= rkmq->rkmq_msg_cnt) {
|
|
1910
|
+
printf("%s:%d: %s [%" PRId32
|
|
1911
|
+
"]: rkm #%d (%p) "
|
|
1912
|
+
"msgid %" PRIu64 ": loop in queue?\n",
|
|
1913
|
+
function, line, topic, partition, cnt, rkm,
|
|
1914
|
+
rkm->rkm_u.producer.msgid);
|
|
1915
|
+
errcnt++;
|
|
1916
|
+
break;
|
|
1917
|
+
}
|
|
1918
|
+
|
|
1919
|
+
cnt++;
|
|
1920
|
+
}
|
|
1921
|
+
|
|
1922
|
+
rd_assert(!errcnt);
|
|
1923
|
+
}
|
|
1924
|
+
|
|
1925
|
+
rd_kafka_Produce_result_t *rd_kafka_Produce_result_new(int64_t offset,
|
|
1926
|
+
int64_t timestamp) {
|
|
1927
|
+
rd_kafka_Produce_result_t *ret = rd_calloc(1, sizeof(*ret));
|
|
1928
|
+
ret->offset = offset;
|
|
1929
|
+
ret->timestamp = timestamp;
|
|
1930
|
+
return ret;
|
|
1931
|
+
}
|
|
1932
|
+
|
|
1933
|
+
void rd_kafka_Produce_result_destroy(rd_kafka_Produce_result_t *result) {
|
|
1934
|
+
if (result->record_errors) {
|
|
1935
|
+
int32_t i;
|
|
1936
|
+
for (i = 0; i < result->record_errors_cnt; i++) {
|
|
1937
|
+
RD_IF_FREE(result->record_errors[i].errstr, rd_free);
|
|
1938
|
+
}
|
|
1939
|
+
rd_free(result->record_errors);
|
|
1940
|
+
}
|
|
1941
|
+
RD_IF_FREE(result->errstr, rd_free);
|
|
1942
|
+
rd_free(result);
|
|
1943
|
+
}
|
|
1944
|
+
|
|
1945
|
+
rd_kafka_Produce_result_t *
|
|
1946
|
+
rd_kafka_Produce_result_copy(const rd_kafka_Produce_result_t *result) {
|
|
1947
|
+
rd_kafka_Produce_result_t *ret = rd_calloc(1, sizeof(*ret));
|
|
1948
|
+
*ret = *result;
|
|
1949
|
+
if (result->errstr)
|
|
1950
|
+
ret->errstr = rd_strdup(result->errstr);
|
|
1951
|
+
if (result->record_errors) {
|
|
1952
|
+
ret->record_errors = rd_calloc(result->record_errors_cnt,
|
|
1953
|
+
sizeof(*result->record_errors));
|
|
1954
|
+
int32_t i;
|
|
1955
|
+
for (i = 0; i < result->record_errors_cnt; i++) {
|
|
1956
|
+
ret->record_errors[i] = result->record_errors[i];
|
|
1957
|
+
if (result->record_errors[i].errstr)
|
|
1958
|
+
ret->record_errors[i].errstr =
|
|
1959
|
+
rd_strdup(result->record_errors[i].errstr);
|
|
1960
|
+
}
|
|
1961
|
+
}
|
|
1962
|
+
return ret;
|
|
1963
|
+
}
|
|
1964
|
+
|
|
1965
|
+
/**
|
|
1966
|
+
* @name Unit tests
|
|
1967
|
+
*/
|
|
1968
|
+
|
|
1969
|
+
/**
|
|
1970
|
+
* @brief Unittest: message allocator
|
|
1971
|
+
*/
|
|
1972
|
+
rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize) {
|
|
1973
|
+
rd_kafka_msg_t *rkm;
|
|
1974
|
+
|
|
1975
|
+
rkm = rd_calloc(1, sizeof(*rkm));
|
|
1976
|
+
rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM;
|
|
1977
|
+
rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID;
|
|
1978
|
+
rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
|
|
1979
|
+
|
|
1980
|
+
if (msgsize) {
|
|
1981
|
+
rd_assert(msgsize <= sizeof(*rkm));
|
|
1982
|
+
rkm->rkm_payload = rkm;
|
|
1983
|
+
rkm->rkm_len = msgsize;
|
|
1984
|
+
}
|
|
1985
|
+
|
|
1986
|
+
return rkm;
|
|
1987
|
+
}
|
|
1988
|
+
|
|
1989
|
+
|
|
1990
|
+
|
|
1991
|
+
/**
|
|
1992
|
+
* @brief Unittest: destroy all messages in queue
|
|
1993
|
+
*/
|
|
1994
|
+
void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq) {
|
|
1995
|
+
rd_kafka_msg_t *rkm, *tmp;
|
|
1996
|
+
|
|
1997
|
+
TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp)
|
|
1998
|
+
rd_kafka_msg_destroy(NULL, rkm);
|
|
1999
|
+
|
|
2000
|
+
|
|
2001
|
+
rd_kafka_msgq_init(rkmq);
|
|
2002
|
+
}
|
|
2003
|
+
|
|
2004
|
+
|
|
2005
|
+
|
|
2006
|
+
static int ut_verify_msgq_order(const char *what,
|
|
2007
|
+
const rd_kafka_msgq_t *rkmq,
|
|
2008
|
+
uint64_t first,
|
|
2009
|
+
uint64_t last,
|
|
2010
|
+
rd_bool_t req_consecutive) {
|
|
2011
|
+
const rd_kafka_msg_t *rkm;
|
|
2012
|
+
uint64_t expected = first;
|
|
2013
|
+
int incr = first < last ? +1 : -1;
|
|
2014
|
+
int fails = 0;
|
|
2015
|
+
int cnt = 0;
|
|
2016
|
+
|
|
2017
|
+
TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
|
|
2018
|
+
if ((req_consecutive &&
|
|
2019
|
+
rkm->rkm_u.producer.msgid != expected) ||
|
|
2020
|
+
(!req_consecutive &&
|
|
2021
|
+
rkm->rkm_u.producer.msgid < expected)) {
|
|
2022
|
+
if (fails++ < 100)
|
|
2023
|
+
RD_UT_SAY("%s: expected msgid %s %" PRIu64
|
|
2024
|
+
" not %" PRIu64 " at index #%d",
|
|
2025
|
+
what, req_consecutive ? "==" : ">=",
|
|
2026
|
+
expected, rkm->rkm_u.producer.msgid,
|
|
2027
|
+
cnt);
|
|
2028
|
+
}
|
|
2029
|
+
|
|
2030
|
+
cnt++;
|
|
2031
|
+
expected += incr;
|
|
2032
|
+
|
|
2033
|
+
if (cnt > rkmq->rkmq_msg_cnt) {
|
|
2034
|
+
RD_UT_SAY("%s: loop in queue?", what);
|
|
2035
|
+
fails++;
|
|
2036
|
+
break;
|
|
2037
|
+
}
|
|
2038
|
+
}
|
|
2039
|
+
|
|
2040
|
+
RD_UT_ASSERT(!fails, "See %d previous failure(s)", fails);
|
|
2041
|
+
return fails;
|
|
2042
|
+
}
|
|
2043
|
+
|
|
2044
|
+
/**
|
|
2045
|
+
* @brief Verify ordering comparator for message queues.
|
|
2046
|
+
*/
|
|
2047
|
+
static int unittest_msgq_order(const char *what,
|
|
2048
|
+
int fifo,
|
|
2049
|
+
int (*cmp)(const void *, const void *)) {
|
|
2050
|
+
rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq);
|
|
2051
|
+
rd_kafka_msg_t *rkm;
|
|
2052
|
+
rd_kafka_msgq_t sendq, sendq2;
|
|
2053
|
+
const size_t msgsize = 100;
|
|
2054
|
+
int i;
|
|
2055
|
+
|
|
2056
|
+
RD_UT_SAY("%s: testing in %s mode", what, fifo ? "FIFO" : "LIFO");
|
|
2057
|
+
|
|
2058
|
+
for (i = 1; i <= 6; i++) {
|
|
2059
|
+
rkm = ut_rd_kafka_msg_new(msgsize);
|
|
2060
|
+
rkm->rkm_u.producer.msgid = i;
|
|
2061
|
+
rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp);
|
|
2062
|
+
}
|
|
2063
|
+
|
|
2064
|
+
if (fifo) {
|
|
2065
|
+
if (ut_verify_msgq_order("added", &rkmq, 1, 6, rd_true))
|
|
2066
|
+
return 1;
|
|
2067
|
+
} else {
|
|
2068
|
+
if (ut_verify_msgq_order("added", &rkmq, 6, 1, rd_true))
|
|
2069
|
+
return 1;
|
|
2070
|
+
}
|
|
2071
|
+
|
|
2072
|
+
/* Move 3 messages to "send" queue which we then re-insert
|
|
2073
|
+
* in the original queue (i.e., "retry"). */
|
|
2074
|
+
rd_kafka_msgq_init(&sendq);
|
|
2075
|
+
while (rd_kafka_msgq_len(&sendq) < 3)
|
|
2076
|
+
rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
|
|
2077
|
+
|
|
2078
|
+
if (fifo) {
|
|
2079
|
+
if (ut_verify_msgq_order("send removed", &rkmq, 4, 6, rd_true))
|
|
2080
|
+
return 1;
|
|
2081
|
+
|
|
2082
|
+
if (ut_verify_msgq_order("sendq", &sendq, 1, 3, rd_true))
|
|
2083
|
+
return 1;
|
|
2084
|
+
} else {
|
|
2085
|
+
if (ut_verify_msgq_order("send removed", &rkmq, 3, 1, rd_true))
|
|
2086
|
+
return 1;
|
|
2087
|
+
|
|
2088
|
+
if (ut_verify_msgq_order("sendq", &sendq, 6, 4, rd_true))
|
|
2089
|
+
return 1;
|
|
2090
|
+
}
|
|
2091
|
+
|
|
2092
|
+
/* Retry the messages, which moves them back to sendq
|
|
2093
|
+
* maintaining the original order with exponential backoff
|
|
2094
|
+
* set to false */
|
|
2095
|
+
rd_kafka_retry_msgq(&rkmq, &sendq, 1, 1, 0,
|
|
2096
|
+
RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0,
|
|
2097
|
+
0);
|
|
2098
|
+
|
|
2099
|
+
RD_UT_ASSERT(rd_kafka_msgq_len(&sendq) == 0,
|
|
2100
|
+
"sendq FIFO should be empty, not contain %d messages",
|
|
2101
|
+
rd_kafka_msgq_len(&sendq));
|
|
2102
|
+
|
|
2103
|
+
if (fifo) {
|
|
2104
|
+
if (ut_verify_msgq_order("readded", &rkmq, 1, 6, rd_true))
|
|
2105
|
+
return 1;
|
|
2106
|
+
} else {
|
|
2107
|
+
if (ut_verify_msgq_order("readded", &rkmq, 6, 1, rd_true))
|
|
2108
|
+
return 1;
|
|
2109
|
+
}
|
|
2110
|
+
|
|
2111
|
+
/* Move 4 first messages to to "send" queue, then
|
|
2112
|
+
* retry them with max_retries=1 which should now fail for
|
|
2113
|
+
* the 3 first messages that were already retried. */
|
|
2114
|
+
rd_kafka_msgq_init(&sendq);
|
|
2115
|
+
while (rd_kafka_msgq_len(&sendq) < 4)
|
|
2116
|
+
rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
|
|
2117
|
+
|
|
2118
|
+
if (fifo) {
|
|
2119
|
+
if (ut_verify_msgq_order("send removed #2", &rkmq, 5, 6,
|
|
2120
|
+
rd_true))
|
|
2121
|
+
return 1;
|
|
2122
|
+
|
|
2123
|
+
if (ut_verify_msgq_order("sendq #2", &sendq, 1, 4, rd_true))
|
|
2124
|
+
return 1;
|
|
2125
|
+
} else {
|
|
2126
|
+
if (ut_verify_msgq_order("send removed #2", &rkmq, 2, 1,
|
|
2127
|
+
rd_true))
|
|
2128
|
+
return 1;
|
|
2129
|
+
|
|
2130
|
+
if (ut_verify_msgq_order("sendq #2", &sendq, 6, 3, rd_true))
|
|
2131
|
+
return 1;
|
|
2132
|
+
}
|
|
2133
|
+
|
|
2134
|
+
/* Retry the messages, which should now keep the 3 first messages
|
|
2135
|
+
* on sendq (no more retries) and just number 4 moved back.
|
|
2136
|
+
* No exponential backoff applied. */
|
|
2137
|
+
rd_kafka_retry_msgq(&rkmq, &sendq, 1, 1, 0,
|
|
2138
|
+
RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0,
|
|
2139
|
+
0);
|
|
2140
|
+
|
|
2141
|
+
if (fifo) {
|
|
2142
|
+
if (ut_verify_msgq_order("readded #2", &rkmq, 4, 6, rd_true))
|
|
2143
|
+
return 1;
|
|
2144
|
+
|
|
2145
|
+
if (ut_verify_msgq_order("no more retries", &sendq, 1, 3,
|
|
2146
|
+
rd_true))
|
|
2147
|
+
return 1;
|
|
2148
|
+
|
|
2149
|
+
} else {
|
|
2150
|
+
if (ut_verify_msgq_order("readded #2", &rkmq, 3, 1, rd_true))
|
|
2151
|
+
return 1;
|
|
2152
|
+
|
|
2153
|
+
if (ut_verify_msgq_order("no more retries", &sendq, 6, 4,
|
|
2154
|
+
rd_true))
|
|
2155
|
+
return 1;
|
|
2156
|
+
}
|
|
2157
|
+
|
|
2158
|
+
/* Move all messages back on rkmq without any exponential backoff. */
|
|
2159
|
+
rd_kafka_retry_msgq(&rkmq, &sendq, 0, 1000, 0,
|
|
2160
|
+
RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0,
|
|
2161
|
+
0);
|
|
2162
|
+
|
|
2163
|
+
|
|
2164
|
+
/* Move first half of messages to sendq (1,2,3).
|
|
2165
|
+
* Move second half o messages to sendq2 (4,5,6).
|
|
2166
|
+
* Add new message to rkmq (7).
|
|
2167
|
+
* Move first half of messages back on rkmq (1,2,3,7).
|
|
2168
|
+
* Move second half back on the rkmq (1,2,3,4,5,6,7). */
|
|
2169
|
+
rd_kafka_msgq_init(&sendq);
|
|
2170
|
+
rd_kafka_msgq_init(&sendq2);
|
|
2171
|
+
|
|
2172
|
+
while (rd_kafka_msgq_len(&sendq) < 3)
|
|
2173
|
+
rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
|
|
2174
|
+
|
|
2175
|
+
while (rd_kafka_msgq_len(&sendq2) < 3)
|
|
2176
|
+
rd_kafka_msgq_enq(&sendq2, rd_kafka_msgq_pop(&rkmq));
|
|
2177
|
+
|
|
2178
|
+
rkm = ut_rd_kafka_msg_new(msgsize);
|
|
2179
|
+
rkm->rkm_u.producer.msgid = i;
|
|
2180
|
+
rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp);
|
|
2181
|
+
/* No exponential backoff applied. */
|
|
2182
|
+
rd_kafka_retry_msgq(&rkmq, &sendq, 0, 1000, 0,
|
|
2183
|
+
RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0,
|
|
2184
|
+
0);
|
|
2185
|
+
/* No exponential backoff applied. */
|
|
2186
|
+
rd_kafka_retry_msgq(&rkmq, &sendq2, 0, 1000, 0,
|
|
2187
|
+
RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp, rd_false, 0,
|
|
2188
|
+
0);
|
|
2189
|
+
|
|
2190
|
+
RD_UT_ASSERT(rd_kafka_msgq_len(&sendq) == 0,
|
|
2191
|
+
"sendq FIFO should be empty, not contain %d messages",
|
|
2192
|
+
rd_kafka_msgq_len(&sendq));
|
|
2193
|
+
RD_UT_ASSERT(rd_kafka_msgq_len(&sendq2) == 0,
|
|
2194
|
+
"sendq2 FIFO should be empty, not contain %d messages",
|
|
2195
|
+
rd_kafka_msgq_len(&sendq2));
|
|
2196
|
+
|
|
2197
|
+
if (fifo) {
|
|
2198
|
+
if (ut_verify_msgq_order("inject", &rkmq, 1, 7, rd_true))
|
|
2199
|
+
return 1;
|
|
2200
|
+
} else {
|
|
2201
|
+
if (ut_verify_msgq_order("readded #2", &rkmq, 7, 1, rd_true))
|
|
2202
|
+
return 1;
|
|
2203
|
+
}
|
|
2204
|
+
|
|
2205
|
+
RD_UT_ASSERT(rd_kafka_msgq_size(&rkmq) ==
|
|
2206
|
+
rd_kafka_msgq_len(&rkmq) * msgsize,
|
|
2207
|
+
"expected msgq size %" PRIusz ", not %" PRIusz,
|
|
2208
|
+
(size_t)rd_kafka_msgq_len(&rkmq) * msgsize,
|
|
2209
|
+
rd_kafka_msgq_size(&rkmq));
|
|
2210
|
+
|
|
2211
|
+
|
|
2212
|
+
ut_rd_kafka_msgq_purge(&sendq);
|
|
2213
|
+
ut_rd_kafka_msgq_purge(&sendq2);
|
|
2214
|
+
ut_rd_kafka_msgq_purge(&rkmq);
|
|
2215
|
+
|
|
2216
|
+
return 0;
|
|
2217
|
+
}
|
|
2218
|
+
|
|
2219
|
+
/**
|
|
2220
|
+
* @brief Verify that rd_kafka_seq_wrap() works.
|
|
2221
|
+
*/
|
|
2222
|
+
static int unittest_msg_seq_wrap(void) {
|
|
2223
|
+
static const struct exp {
|
|
2224
|
+
int64_t in;
|
|
2225
|
+
int32_t out;
|
|
2226
|
+
} exp[] = {
|
|
2227
|
+
{0, 0},
|
|
2228
|
+
{1, 1},
|
|
2229
|
+
{(int64_t)INT32_MAX + 2, 1},
|
|
2230
|
+
{(int64_t)INT32_MAX + 1, 0},
|
|
2231
|
+
{INT32_MAX, INT32_MAX},
|
|
2232
|
+
{INT32_MAX - 1, INT32_MAX - 1},
|
|
2233
|
+
{INT32_MAX - 2, INT32_MAX - 2},
|
|
2234
|
+
{((int64_t)1 << 33) - 2, INT32_MAX - 1},
|
|
2235
|
+
{((int64_t)1 << 33) - 1, INT32_MAX},
|
|
2236
|
+
{((int64_t)1 << 34), 0},
|
|
2237
|
+
{((int64_t)1 << 35) + 3, 3},
|
|
2238
|
+
{1710 + 1229, 2939},
|
|
2239
|
+
{-1, -1},
|
|
2240
|
+
};
|
|
2241
|
+
int i;
|
|
2242
|
+
|
|
2243
|
+
for (i = 0; exp[i].in != -1; i++) {
|
|
2244
|
+
int32_t wseq = rd_kafka_seq_wrap(exp[i].in);
|
|
2245
|
+
RD_UT_ASSERT(wseq == exp[i].out,
|
|
2246
|
+
"Expected seq_wrap(%" PRId64 ") -> %" PRId32
|
|
2247
|
+
", not %" PRId32,
|
|
2248
|
+
exp[i].in, exp[i].out, wseq);
|
|
2249
|
+
}
|
|
2250
|
+
|
|
2251
|
+
RD_UT_PASS();
|
|
2252
|
+
}
|
|
2253
|
+
|
|
2254
|
+
|
|
2255
|
+
/**
|
|
2256
|
+
* @brief Populate message queue with message ids from lo..hi (inclusive)
|
|
2257
|
+
*/
|
|
2258
|
+
static void ut_msgq_populate(rd_kafka_msgq_t *rkmq,
|
|
2259
|
+
uint64_t lo,
|
|
2260
|
+
uint64_t hi,
|
|
2261
|
+
size_t msgsize) {
|
|
2262
|
+
uint64_t i;
|
|
2263
|
+
|
|
2264
|
+
for (i = lo; i <= hi; i++) {
|
|
2265
|
+
rd_kafka_msg_t *rkm = ut_rd_kafka_msg_new(msgsize);
|
|
2266
|
+
rkm->rkm_u.producer.msgid = i;
|
|
2267
|
+
rd_kafka_msgq_enq(rkmq, rkm);
|
|
2268
|
+
}
|
|
2269
|
+
}
|
|
2270
|
+
|
|
2271
|
+
|
|
2272
|
+
struct ut_msg_range {
|
|
2273
|
+
uint64_t lo;
|
|
2274
|
+
uint64_t hi;
|
|
2275
|
+
};
|
|
2276
|
+
|
|
2277
|
+
/**
|
|
2278
|
+
* @brief Verify that msgq insert sorts are optimized. Issue #2508.
|
|
2279
|
+
* All source ranges are combined into a single queue before insert.
|
|
2280
|
+
*/
|
|
2281
|
+
static int
|
|
2282
|
+
unittest_msgq_insert_all_sort(const char *what,
|
|
2283
|
+
double max_us_per_msg,
|
|
2284
|
+
double *ret_us_per_msg,
|
|
2285
|
+
const struct ut_msg_range *src_ranges,
|
|
2286
|
+
const struct ut_msg_range *dest_ranges) {
|
|
2287
|
+
rd_kafka_msgq_t destq, srcq;
|
|
2288
|
+
int i;
|
|
2289
|
+
uint64_t lo = UINT64_MAX, hi = 0;
|
|
2290
|
+
uint64_t cnt = 0;
|
|
2291
|
+
const size_t msgsize = 100;
|
|
2292
|
+
size_t totsize = 0;
|
|
2293
|
+
rd_ts_t ts;
|
|
2294
|
+
double us_per_msg;
|
|
2295
|
+
|
|
2296
|
+
RD_UT_SAY("Testing msgq insert (all) efficiency: %s", what);
|
|
2297
|
+
|
|
2298
|
+
rd_kafka_msgq_init(&destq);
|
|
2299
|
+
rd_kafka_msgq_init(&srcq);
|
|
2300
|
+
|
|
2301
|
+
for (i = 0; src_ranges[i].hi > 0; i++) {
|
|
2302
|
+
uint64_t this_cnt;
|
|
2303
|
+
|
|
2304
|
+
ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi,
|
|
2305
|
+
msgsize);
|
|
2306
|
+
if (src_ranges[i].lo < lo)
|
|
2307
|
+
lo = src_ranges[i].lo;
|
|
2308
|
+
if (src_ranges[i].hi > hi)
|
|
2309
|
+
hi = src_ranges[i].hi;
|
|
2310
|
+
this_cnt = (src_ranges[i].hi - src_ranges[i].lo) + 1;
|
|
2311
|
+
cnt += this_cnt;
|
|
2312
|
+
totsize += msgsize * (size_t)this_cnt;
|
|
2313
|
+
}
|
|
2314
|
+
|
|
2315
|
+
for (i = 0; dest_ranges[i].hi > 0; i++) {
|
|
2316
|
+
uint64_t this_cnt;
|
|
2317
|
+
|
|
2318
|
+
ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi,
|
|
2319
|
+
msgsize);
|
|
2320
|
+
if (dest_ranges[i].lo < lo)
|
|
2321
|
+
lo = dest_ranges[i].lo;
|
|
2322
|
+
if (dest_ranges[i].hi > hi)
|
|
2323
|
+
hi = dest_ranges[i].hi;
|
|
2324
|
+
this_cnt = (dest_ranges[i].hi - dest_ranges[i].lo) + 1;
|
|
2325
|
+
cnt += this_cnt;
|
|
2326
|
+
totsize += msgsize * (size_t)this_cnt;
|
|
2327
|
+
}
|
|
2328
|
+
|
|
2329
|
+
RD_UT_SAY("Begin insert of %d messages into destq with %d messages",
|
|
2330
|
+
rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq));
|
|
2331
|
+
|
|
2332
|
+
ts = rd_clock();
|
|
2333
|
+
rd_kafka_msgq_insert_msgq(&destq, &srcq, rd_kafka_msg_cmp_msgid);
|
|
2334
|
+
ts = rd_clock() - ts;
|
|
2335
|
+
us_per_msg = (double)ts / (double)cnt;
|
|
2336
|
+
|
|
2337
|
+
RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts, us_per_msg);
|
|
2338
|
+
|
|
2339
|
+
RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0,
|
|
2340
|
+
"srcq should be empty, but contains %d messages",
|
|
2341
|
+
rd_kafka_msgq_len(&srcq));
|
|
2342
|
+
RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt,
|
|
2343
|
+
"destq should contain %d messages, not %d", (int)cnt,
|
|
2344
|
+
rd_kafka_msgq_len(&destq));
|
|
2345
|
+
|
|
2346
|
+
if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false))
|
|
2347
|
+
return 1;
|
|
2348
|
+
|
|
2349
|
+
RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize,
|
|
2350
|
+
"expected destq size to be %" PRIusz
|
|
2351
|
+
" bytes, not %" PRIusz,
|
|
2352
|
+
totsize, rd_kafka_msgq_size(&destq));
|
|
2353
|
+
|
|
2354
|
+
ut_rd_kafka_msgq_purge(&srcq);
|
|
2355
|
+
ut_rd_kafka_msgq_purge(&destq);
|
|
2356
|
+
|
|
2357
|
+
if (!rd_unittest_slow)
|
|
2358
|
+
RD_UT_ASSERT(!(us_per_msg > max_us_per_msg + 0.0001),
|
|
2359
|
+
"maximum us/msg exceeded: %.4f > %.4f us/msg",
|
|
2360
|
+
us_per_msg, max_us_per_msg);
|
|
2361
|
+
else if (us_per_msg > max_us_per_msg + 0.0001)
|
|
2362
|
+
RD_UT_WARN("maximum us/msg exceeded: %.4f > %.4f us/msg",
|
|
2363
|
+
us_per_msg, max_us_per_msg);
|
|
2364
|
+
|
|
2365
|
+
if (ret_us_per_msg)
|
|
2366
|
+
*ret_us_per_msg = us_per_msg;
|
|
2367
|
+
|
|
2368
|
+
RD_UT_PASS();
|
|
2369
|
+
}
|
|
2370
|
+
|
|
2371
|
+
|
|
2372
|
+
/**
|
|
2373
|
+
* @brief Verify that msgq insert sorts are optimized. Issue #2508.
|
|
2374
|
+
* Inserts each source range individually.
|
|
2375
|
+
*/
|
|
2376
|
+
static int
|
|
2377
|
+
unittest_msgq_insert_each_sort(const char *what,
|
|
2378
|
+
double max_us_per_msg,
|
|
2379
|
+
double *ret_us_per_msg,
|
|
2380
|
+
const struct ut_msg_range *src_ranges,
|
|
2381
|
+
const struct ut_msg_range *dest_ranges) {
|
|
2382
|
+
rd_kafka_msgq_t destq;
|
|
2383
|
+
int i;
|
|
2384
|
+
uint64_t lo = UINT64_MAX, hi = 0;
|
|
2385
|
+
uint64_t cnt = 0;
|
|
2386
|
+
uint64_t scnt = 0;
|
|
2387
|
+
const size_t msgsize = 100;
|
|
2388
|
+
size_t totsize = 0;
|
|
2389
|
+
double us_per_msg;
|
|
2390
|
+
rd_ts_t accum_ts = 0;
|
|
2391
|
+
|
|
2392
|
+
RD_UT_SAY("Testing msgq insert (each) efficiency: %s", what);
|
|
2393
|
+
|
|
2394
|
+
rd_kafka_msgq_init(&destq);
|
|
2395
|
+
|
|
2396
|
+
for (i = 0; dest_ranges[i].hi > 0; i++) {
|
|
2397
|
+
uint64_t this_cnt;
|
|
2398
|
+
|
|
2399
|
+
ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi,
|
|
2400
|
+
msgsize);
|
|
2401
|
+
if (dest_ranges[i].lo < lo)
|
|
2402
|
+
lo = dest_ranges[i].lo;
|
|
2403
|
+
if (dest_ranges[i].hi > hi)
|
|
2404
|
+
hi = dest_ranges[i].hi;
|
|
2405
|
+
this_cnt = (dest_ranges[i].hi - dest_ranges[i].lo) + 1;
|
|
2406
|
+
cnt += this_cnt;
|
|
2407
|
+
totsize += msgsize * (size_t)this_cnt;
|
|
2408
|
+
}
|
|
2409
|
+
|
|
2410
|
+
|
|
2411
|
+
for (i = 0; src_ranges[i].hi > 0; i++) {
|
|
2412
|
+
rd_kafka_msgq_t srcq;
|
|
2413
|
+
uint64_t this_cnt;
|
|
2414
|
+
rd_ts_t ts;
|
|
2415
|
+
|
|
2416
|
+
rd_kafka_msgq_init(&srcq);
|
|
2417
|
+
|
|
2418
|
+
ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi,
|
|
2419
|
+
msgsize);
|
|
2420
|
+
if (src_ranges[i].lo < lo)
|
|
2421
|
+
lo = src_ranges[i].lo;
|
|
2422
|
+
if (src_ranges[i].hi > hi)
|
|
2423
|
+
hi = src_ranges[i].hi;
|
|
2424
|
+
this_cnt = (src_ranges[i].hi - src_ranges[i].lo) + 1;
|
|
2425
|
+
cnt += this_cnt;
|
|
2426
|
+
scnt += this_cnt;
|
|
2427
|
+
totsize += msgsize * (size_t)this_cnt;
|
|
2428
|
+
|
|
2429
|
+
RD_UT_SAY(
|
|
2430
|
+
"Begin insert of %d messages into destq with "
|
|
2431
|
+
"%d messages",
|
|
2432
|
+
rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq));
|
|
2433
|
+
|
|
2434
|
+
ts = rd_clock();
|
|
2435
|
+
rd_kafka_msgq_insert_msgq(&destq, &srcq,
|
|
2436
|
+
rd_kafka_msg_cmp_msgid);
|
|
2437
|
+
ts = rd_clock() - ts;
|
|
2438
|
+
accum_ts += ts;
|
|
2439
|
+
|
|
2440
|
+
RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts,
|
|
2441
|
+
(double)ts / (double)this_cnt);
|
|
2442
|
+
|
|
2443
|
+
RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0,
|
|
2444
|
+
"srcq should be empty, but contains %d messages",
|
|
2445
|
+
rd_kafka_msgq_len(&srcq));
|
|
2446
|
+
RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt,
|
|
2447
|
+
"destq should contain %d messages, not %d",
|
|
2448
|
+
(int)cnt, rd_kafka_msgq_len(&destq));
|
|
2449
|
+
|
|
2450
|
+
if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false))
|
|
2451
|
+
return 1;
|
|
2452
|
+
|
|
2453
|
+
RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize,
|
|
2454
|
+
"expected destq size to be %" PRIusz
|
|
2455
|
+
" bytes, not %" PRIusz,
|
|
2456
|
+
totsize, rd_kafka_msgq_size(&destq));
|
|
2457
|
+
|
|
2458
|
+
ut_rd_kafka_msgq_purge(&srcq);
|
|
2459
|
+
}
|
|
2460
|
+
|
|
2461
|
+
ut_rd_kafka_msgq_purge(&destq);
|
|
2462
|
+
|
|
2463
|
+
us_per_msg = (double)accum_ts / (double)scnt;
|
|
2464
|
+
|
|
2465
|
+
RD_UT_SAY("Total: %.4fus/msg over %" PRId64 " messages in %" PRId64
|
|
2466
|
+
"us",
|
|
2467
|
+
us_per_msg, scnt, accum_ts);
|
|
2468
|
+
|
|
2469
|
+
if (!rd_unittest_slow)
|
|
2470
|
+
RD_UT_ASSERT(!(us_per_msg > max_us_per_msg + 0.0001),
|
|
2471
|
+
"maximum us/msg exceeded: %.4f > %.4f us/msg",
|
|
2472
|
+
us_per_msg, max_us_per_msg);
|
|
2473
|
+
else if (us_per_msg > max_us_per_msg + 0.0001)
|
|
2474
|
+
RD_UT_WARN("maximum us/msg exceeded: %.4f > %.4f us/msg",
|
|
2475
|
+
us_per_msg, max_us_per_msg);
|
|
2476
|
+
|
|
2477
|
+
|
|
2478
|
+
if (ret_us_per_msg)
|
|
2479
|
+
*ret_us_per_msg = us_per_msg;
|
|
2480
|
+
|
|
2481
|
+
RD_UT_PASS();
|
|
2482
|
+
}
|
|
2483
|
+
|
|
2484
|
+
|
|
2485
|
+
|
|
2486
|
+
/**
|
|
2487
|
+
* @brief Calls both insert_all and insert_each
|
|
2488
|
+
*/
|
|
2489
|
+
static int unittest_msgq_insert_sort(const char *what,
|
|
2490
|
+
double max_us_per_msg,
|
|
2491
|
+
double *ret_us_per_msg,
|
|
2492
|
+
const struct ut_msg_range *src_ranges,
|
|
2493
|
+
const struct ut_msg_range *dest_ranges) {
|
|
2494
|
+
double ret_all = 0.0, ret_each = 0.0;
|
|
2495
|
+
int r;
|
|
2496
|
+
|
|
2497
|
+
r = unittest_msgq_insert_all_sort(what, max_us_per_msg, &ret_all,
|
|
2498
|
+
src_ranges, dest_ranges);
|
|
2499
|
+
if (r)
|
|
2500
|
+
return r;
|
|
2501
|
+
|
|
2502
|
+
r = unittest_msgq_insert_each_sort(what, max_us_per_msg, &ret_each,
|
|
2503
|
+
src_ranges, dest_ranges);
|
|
2504
|
+
if (r)
|
|
2505
|
+
return r;
|
|
2506
|
+
|
|
2507
|
+
if (ret_us_per_msg)
|
|
2508
|
+
*ret_us_per_msg = RD_MAX(ret_all, ret_each);
|
|
2509
|
+
|
|
2510
|
+
return 0;
|
|
2511
|
+
}
|
|
2512
|
+
|
|
2513
|
+
|
|
2514
|
+
int unittest_msg(void) {
|
|
2515
|
+
int fails = 0;
|
|
2516
|
+
double insert_baseline = 0.0;
|
|
2517
|
+
|
|
2518
|
+
fails += unittest_msgq_order("FIFO", 1, rd_kafka_msg_cmp_msgid);
|
|
2519
|
+
fails += unittest_msg_seq_wrap();
|
|
2520
|
+
|
|
2521
|
+
fails += unittest_msgq_insert_sort(
|
|
2522
|
+
"get baseline insert time", 100000.0, &insert_baseline,
|
|
2523
|
+
(const struct ut_msg_range[]) {{1, 1}, {3, 3}, {0, 0}},
|
|
2524
|
+
(const struct ut_msg_range[]) {{2, 2}, {4, 4}, {0, 0}});
|
|
2525
|
+
|
|
2526
|
+
/* Allow some wiggle room in baseline time. */
|
|
2527
|
+
if (insert_baseline < 0.1)
|
|
2528
|
+
insert_baseline = 0.2;
|
|
2529
|
+
insert_baseline *= 3;
|
|
2530
|
+
|
|
2531
|
+
fails += unittest_msgq_insert_sort(
|
|
2532
|
+
"single-message ranges", insert_baseline, NULL,
|
|
2533
|
+
(const struct ut_msg_range[]) {
|
|
2534
|
+
{2, 2}, {4, 4}, {9, 9}, {33692864, 33692864}, {0, 0}},
|
|
2535
|
+
(const struct ut_msg_range[]) {{1, 1},
|
|
2536
|
+
{3, 3},
|
|
2537
|
+
{5, 5},
|
|
2538
|
+
{10, 10},
|
|
2539
|
+
{33692865, 33692865},
|
|
2540
|
+
{0, 0}});
|
|
2541
|
+
if (rd_unittest_with_valgrind) {
|
|
2542
|
+
RD_UT_WARN(
|
|
2543
|
+
"Skipping large message range test "
|
|
2544
|
+
"when using Valgrind");
|
|
2545
|
+
} else {
|
|
2546
|
+
fails += unittest_msgq_insert_sort(
|
|
2547
|
+
"many messages", insert_baseline, NULL,
|
|
2548
|
+
(const struct ut_msg_range[]) {{100000, 200000},
|
|
2549
|
+
{400000, 450000},
|
|
2550
|
+
{900000, 920000},
|
|
2551
|
+
{33692864, 33751992},
|
|
2552
|
+
{33906868, 33993690},
|
|
2553
|
+
{40000000, 44000000},
|
|
2554
|
+
{0, 0}},
|
|
2555
|
+
(const struct ut_msg_range[]) {{1, 199},
|
|
2556
|
+
{350000, 360000},
|
|
2557
|
+
{500000, 500010},
|
|
2558
|
+
{1000000, 1000200},
|
|
2559
|
+
{33751993, 33906867},
|
|
2560
|
+
{50000001, 50000001},
|
|
2561
|
+
{0, 0}});
|
|
2562
|
+
}
|
|
2563
|
+
fails += unittest_msgq_insert_sort(
|
|
2564
|
+
"issue #2508", insert_baseline, NULL,
|
|
2565
|
+
(const struct ut_msg_range[]) {
|
|
2566
|
+
{33692864, 33751992}, {33906868, 33993690}, {0, 0}},
|
|
2567
|
+
(const struct ut_msg_range[]) {{33751993, 33906867}, {0, 0}});
|
|
2568
|
+
|
|
2569
|
+
/* The standard case where all of the srcq
|
|
2570
|
+
* goes after the destq.
|
|
2571
|
+
* Create a big destq and a number of small srcqs.
|
|
2572
|
+
* Should not result in O(n) scans to find the insert position. */
|
|
2573
|
+
fails += unittest_msgq_insert_sort(
|
|
2574
|
+
"issue #2450 (v1.2.1 regression)", insert_baseline, NULL,
|
|
2575
|
+
(const struct ut_msg_range[]) {{200000, 200001},
|
|
2576
|
+
{200002, 200006},
|
|
2577
|
+
{200009, 200012},
|
|
2578
|
+
{200015, 200016},
|
|
2579
|
+
{200020, 200022},
|
|
2580
|
+
{200030, 200090},
|
|
2581
|
+
{200091, 200092},
|
|
2582
|
+
{200093, 200094},
|
|
2583
|
+
{200095, 200096},
|
|
2584
|
+
{200097, 200099},
|
|
2585
|
+
{0, 0}},
|
|
2586
|
+
(const struct ut_msg_range[]) {{1, 199999}, {0, 0}});
|
|
2587
|
+
|
|
2588
|
+
return fails;
|
|
2589
|
+
}
|