@point3/node-rdkafka 3.6.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (707) hide show
  1. package/LICENSE.txt +20 -0
  2. package/README.md +636 -0
  3. package/binding.gyp +154 -0
  4. package/deps/librdkafka/.clang-format +136 -0
  5. package/deps/librdkafka/.clang-format-cpp +103 -0
  6. package/deps/librdkafka/.dir-locals.el +10 -0
  7. package/deps/librdkafka/.formatignore +33 -0
  8. package/deps/librdkafka/.gdbmacros +19 -0
  9. package/deps/librdkafka/.github/CODEOWNERS +1 -0
  10. package/deps/librdkafka/.github/ISSUE_TEMPLATE +34 -0
  11. package/deps/librdkafka/.semaphore/run-all-tests.yml +77 -0
  12. package/deps/librdkafka/.semaphore/semaphore-integration.yml +250 -0
  13. package/deps/librdkafka/.semaphore/semaphore.yml +378 -0
  14. package/deps/librdkafka/.semaphore/verify-linux-packages.yml +41 -0
  15. package/deps/librdkafka/CHANGELOG.md +2208 -0
  16. package/deps/librdkafka/CMakeLists.txt +291 -0
  17. package/deps/librdkafka/CODE_OF_CONDUCT.md +46 -0
  18. package/deps/librdkafka/CONFIGURATION.md +209 -0
  19. package/deps/librdkafka/CONTRIBUTING.md +431 -0
  20. package/deps/librdkafka/Doxyfile +2375 -0
  21. package/deps/librdkafka/INTRODUCTION.md +2481 -0
  22. package/deps/librdkafka/LICENSE +26 -0
  23. package/deps/librdkafka/LICENSE.cjson +22 -0
  24. package/deps/librdkafka/LICENSE.crc32c +28 -0
  25. package/deps/librdkafka/LICENSE.fnv1a +18 -0
  26. package/deps/librdkafka/LICENSE.hdrhistogram +27 -0
  27. package/deps/librdkafka/LICENSE.lz4 +26 -0
  28. package/deps/librdkafka/LICENSE.murmur2 +25 -0
  29. package/deps/librdkafka/LICENSE.nanopb +22 -0
  30. package/deps/librdkafka/LICENSE.opentelemetry +203 -0
  31. package/deps/librdkafka/LICENSE.pycrc +23 -0
  32. package/deps/librdkafka/LICENSE.queue +31 -0
  33. package/deps/librdkafka/LICENSE.regexp +5 -0
  34. package/deps/librdkafka/LICENSE.snappy +36 -0
  35. package/deps/librdkafka/LICENSE.tinycthread +26 -0
  36. package/deps/librdkafka/LICENSE.wingetopt +49 -0
  37. package/deps/librdkafka/LICENSES.txt +625 -0
  38. package/deps/librdkafka/Makefile +125 -0
  39. package/deps/librdkafka/README.md +199 -0
  40. package/deps/librdkafka/README.win32 +26 -0
  41. package/deps/librdkafka/STATISTICS.md +624 -0
  42. package/deps/librdkafka/configure +214 -0
  43. package/deps/librdkafka/configure.self +331 -0
  44. package/deps/librdkafka/debian/changelog +111 -0
  45. package/deps/librdkafka/debian/compat +1 -0
  46. package/deps/librdkafka/debian/control +71 -0
  47. package/deps/librdkafka/debian/copyright +99 -0
  48. package/deps/librdkafka/debian/gbp.conf +9 -0
  49. package/deps/librdkafka/debian/librdkafka++1.install +1 -0
  50. package/deps/librdkafka/debian/librdkafka-dev.examples +2 -0
  51. package/deps/librdkafka/debian/librdkafka-dev.install +9 -0
  52. package/deps/librdkafka/debian/librdkafka1.docs +5 -0
  53. package/deps/librdkafka/debian/librdkafka1.install +1 -0
  54. package/deps/librdkafka/debian/librdkafka1.symbols +135 -0
  55. package/deps/librdkafka/debian/rules +19 -0
  56. package/deps/librdkafka/debian/source/format +1 -0
  57. package/deps/librdkafka/debian/watch +2 -0
  58. package/deps/librdkafka/dev-conf.sh +123 -0
  59. package/deps/librdkafka/examples/CMakeLists.txt +79 -0
  60. package/deps/librdkafka/examples/Makefile +167 -0
  61. package/deps/librdkafka/examples/README.md +42 -0
  62. package/deps/librdkafka/examples/alter_consumer_group_offsets.c +338 -0
  63. package/deps/librdkafka/examples/consumer.c +271 -0
  64. package/deps/librdkafka/examples/delete_records.c +233 -0
  65. package/deps/librdkafka/examples/describe_cluster.c +322 -0
  66. package/deps/librdkafka/examples/describe_consumer_groups.c +455 -0
  67. package/deps/librdkafka/examples/describe_topics.c +427 -0
  68. package/deps/librdkafka/examples/elect_leaders.c +317 -0
  69. package/deps/librdkafka/examples/globals.json +11 -0
  70. package/deps/librdkafka/examples/idempotent_producer.c +344 -0
  71. package/deps/librdkafka/examples/incremental_alter_configs.c +347 -0
  72. package/deps/librdkafka/examples/kafkatest_verifiable_client.cpp +945 -0
  73. package/deps/librdkafka/examples/list_consumer_group_offsets.c +359 -0
  74. package/deps/librdkafka/examples/list_consumer_groups.c +365 -0
  75. package/deps/librdkafka/examples/list_offsets.c +327 -0
  76. package/deps/librdkafka/examples/misc.c +287 -0
  77. package/deps/librdkafka/examples/openssl_engine_example.cpp +248 -0
  78. package/deps/librdkafka/examples/producer.c +251 -0
  79. package/deps/librdkafka/examples/producer.cpp +228 -0
  80. package/deps/librdkafka/examples/rdkafka_complex_consumer_example.c +617 -0
  81. package/deps/librdkafka/examples/rdkafka_complex_consumer_example.cpp +467 -0
  82. package/deps/librdkafka/examples/rdkafka_consume_batch.cpp +264 -0
  83. package/deps/librdkafka/examples/rdkafka_example.c +853 -0
  84. package/deps/librdkafka/examples/rdkafka_example.cpp +679 -0
  85. package/deps/librdkafka/examples/rdkafka_performance.c +1781 -0
  86. package/deps/librdkafka/examples/transactions-older-broker.c +668 -0
  87. package/deps/librdkafka/examples/transactions.c +665 -0
  88. package/deps/librdkafka/examples/user_scram.c +491 -0
  89. package/deps/librdkafka/examples/win_ssl_cert_store.cpp +396 -0
  90. package/deps/librdkafka/lds-gen.py +73 -0
  91. package/deps/librdkafka/mainpage.doxy +40 -0
  92. package/deps/librdkafka/mklove/Makefile.base +329 -0
  93. package/deps/librdkafka/mklove/modules/configure.atomics +144 -0
  94. package/deps/librdkafka/mklove/modules/configure.base +2484 -0
  95. package/deps/librdkafka/mklove/modules/configure.builtin +70 -0
  96. package/deps/librdkafka/mklove/modules/configure.cc +186 -0
  97. package/deps/librdkafka/mklove/modules/configure.cxx +8 -0
  98. package/deps/librdkafka/mklove/modules/configure.fileversion +65 -0
  99. package/deps/librdkafka/mklove/modules/configure.gitversion +29 -0
  100. package/deps/librdkafka/mklove/modules/configure.good_cflags +18 -0
  101. package/deps/librdkafka/mklove/modules/configure.host +132 -0
  102. package/deps/librdkafka/mklove/modules/configure.lib +49 -0
  103. package/deps/librdkafka/mklove/modules/configure.libcurl +99 -0
  104. package/deps/librdkafka/mklove/modules/configure.libsasl2 +36 -0
  105. package/deps/librdkafka/mklove/modules/configure.libssl +147 -0
  106. package/deps/librdkafka/mklove/modules/configure.libzstd +58 -0
  107. package/deps/librdkafka/mklove/modules/configure.parseversion +95 -0
  108. package/deps/librdkafka/mklove/modules/configure.pic +16 -0
  109. package/deps/librdkafka/mklove/modules/configure.socket +20 -0
  110. package/deps/librdkafka/mklove/modules/configure.zlib +61 -0
  111. package/deps/librdkafka/mklove/modules/patches/README.md +8 -0
  112. package/deps/librdkafka/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch +11 -0
  113. package/deps/librdkafka/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch +56 -0
  114. package/deps/librdkafka/packaging/RELEASE.md +319 -0
  115. package/deps/librdkafka/packaging/alpine/build-alpine.sh +38 -0
  116. package/deps/librdkafka/packaging/archlinux/PKGBUILD +30 -0
  117. package/deps/librdkafka/packaging/cmake/Config.cmake.in +37 -0
  118. package/deps/librdkafka/packaging/cmake/Modules/FindLZ4.cmake +38 -0
  119. package/deps/librdkafka/packaging/cmake/Modules/FindZSTD.cmake +27 -0
  120. package/deps/librdkafka/packaging/cmake/Modules/LICENSE.FindZstd +178 -0
  121. package/deps/librdkafka/packaging/cmake/README.md +38 -0
  122. package/deps/librdkafka/packaging/cmake/config.h.in +52 -0
  123. package/deps/librdkafka/packaging/cmake/parseversion.cmake +60 -0
  124. package/deps/librdkafka/packaging/cmake/rdkafka.pc.in +12 -0
  125. package/deps/librdkafka/packaging/cmake/try_compile/atomic_32_test.c +8 -0
  126. package/deps/librdkafka/packaging/cmake/try_compile/atomic_64_test.c +8 -0
  127. package/deps/librdkafka/packaging/cmake/try_compile/c11threads_test.c +14 -0
  128. package/deps/librdkafka/packaging/cmake/try_compile/crc32c_hw_test.c +27 -0
  129. package/deps/librdkafka/packaging/cmake/try_compile/dlopen_test.c +11 -0
  130. package/deps/librdkafka/packaging/cmake/try_compile/libsasl2_test.c +7 -0
  131. package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_darwin_test.c +6 -0
  132. package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_freebsd_test.c +7 -0
  133. package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_gnu_test.c +5 -0
  134. package/deps/librdkafka/packaging/cmake/try_compile/rand_r_test.c +7 -0
  135. package/deps/librdkafka/packaging/cmake/try_compile/rdkafka_setup.cmake +122 -0
  136. package/deps/librdkafka/packaging/cmake/try_compile/regex_test.c +10 -0
  137. package/deps/librdkafka/packaging/cmake/try_compile/strndup_test.c +5 -0
  138. package/deps/librdkafka/packaging/cmake/try_compile/sync_32_test.c +8 -0
  139. package/deps/librdkafka/packaging/cmake/try_compile/sync_64_test.c +8 -0
  140. package/deps/librdkafka/packaging/cp/README.md +16 -0
  141. package/deps/librdkafka/packaging/cp/check_features.c +72 -0
  142. package/deps/librdkafka/packaging/cp/verify-deb.sh +33 -0
  143. package/deps/librdkafka/packaging/cp/verify-packages.sh +69 -0
  144. package/deps/librdkafka/packaging/cp/verify-rpm.sh +32 -0
  145. package/deps/librdkafka/packaging/debian/changelog +66 -0
  146. package/deps/librdkafka/packaging/debian/compat +1 -0
  147. package/deps/librdkafka/packaging/debian/control +49 -0
  148. package/deps/librdkafka/packaging/debian/copyright +84 -0
  149. package/deps/librdkafka/packaging/debian/docs +5 -0
  150. package/deps/librdkafka/packaging/debian/gbp.conf +9 -0
  151. package/deps/librdkafka/packaging/debian/librdkafka-dev.dirs +2 -0
  152. package/deps/librdkafka/packaging/debian/librdkafka-dev.examples +2 -0
  153. package/deps/librdkafka/packaging/debian/librdkafka-dev.install +6 -0
  154. package/deps/librdkafka/packaging/debian/librdkafka-dev.substvars +1 -0
  155. package/deps/librdkafka/packaging/debian/librdkafka.dsc +16 -0
  156. package/deps/librdkafka/packaging/debian/librdkafka1-dbg.substvars +1 -0
  157. package/deps/librdkafka/packaging/debian/librdkafka1.dirs +1 -0
  158. package/deps/librdkafka/packaging/debian/librdkafka1.install +2 -0
  159. package/deps/librdkafka/packaging/debian/librdkafka1.postinst.debhelper +5 -0
  160. package/deps/librdkafka/packaging/debian/librdkafka1.postrm.debhelper +5 -0
  161. package/deps/librdkafka/packaging/debian/librdkafka1.symbols +64 -0
  162. package/deps/librdkafka/packaging/debian/rules +19 -0
  163. package/deps/librdkafka/packaging/debian/source/format +1 -0
  164. package/deps/librdkafka/packaging/debian/watch +2 -0
  165. package/deps/librdkafka/packaging/get_version.py +21 -0
  166. package/deps/librdkafka/packaging/homebrew/README.md +15 -0
  167. package/deps/librdkafka/packaging/homebrew/brew-update-pr.sh +31 -0
  168. package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw-static.sh +52 -0
  169. package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw.sh +21 -0
  170. package/deps/librdkafka/packaging/mingw-w64/export-variables.sh +13 -0
  171. package/deps/librdkafka/packaging/mingw-w64/run-tests.sh +6 -0
  172. package/deps/librdkafka/packaging/mingw-w64/semaphoreci-build.sh +38 -0
  173. package/deps/librdkafka/packaging/nuget/README.md +84 -0
  174. package/deps/librdkafka/packaging/nuget/artifact.py +177 -0
  175. package/deps/librdkafka/packaging/nuget/cleanup-s3.py +143 -0
  176. package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip +0 -0
  177. package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip +0 -0
  178. package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip +0 -0
  179. package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip +0 -0
  180. package/deps/librdkafka/packaging/nuget/nuget.sh +21 -0
  181. package/deps/librdkafka/packaging/nuget/nugetpackage.py +278 -0
  182. package/deps/librdkafka/packaging/nuget/packaging.py +448 -0
  183. package/deps/librdkafka/packaging/nuget/push-to-nuget.sh +21 -0
  184. package/deps/librdkafka/packaging/nuget/release.py +167 -0
  185. package/deps/librdkafka/packaging/nuget/requirements.txt +3 -0
  186. package/deps/librdkafka/packaging/nuget/staticpackage.py +178 -0
  187. package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.nuspec +21 -0
  188. package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.props +18 -0
  189. package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.targets +19 -0
  190. package/deps/librdkafka/packaging/nuget/zfile/__init__.py +0 -0
  191. package/deps/librdkafka/packaging/nuget/zfile/zfile.py +98 -0
  192. package/deps/librdkafka/packaging/rpm/Makefile +92 -0
  193. package/deps/librdkafka/packaging/rpm/README.md +23 -0
  194. package/deps/librdkafka/packaging/rpm/el7-x86_64.cfg +40 -0
  195. package/deps/librdkafka/packaging/rpm/librdkafka.spec +118 -0
  196. package/deps/librdkafka/packaging/rpm/mock-on-docker.sh +96 -0
  197. package/deps/librdkafka/packaging/rpm/tests/Makefile +25 -0
  198. package/deps/librdkafka/packaging/rpm/tests/README.md +8 -0
  199. package/deps/librdkafka/packaging/rpm/tests/run-test.sh +42 -0
  200. package/deps/librdkafka/packaging/rpm/tests/test-on-docker.sh +56 -0
  201. package/deps/librdkafka/packaging/rpm/tests/test.c +77 -0
  202. package/deps/librdkafka/packaging/rpm/tests/test.cpp +34 -0
  203. package/deps/librdkafka/packaging/tools/Dockerfile +31 -0
  204. package/deps/librdkafka/packaging/tools/build-configurations-checks.sh +12 -0
  205. package/deps/librdkafka/packaging/tools/build-deb-package.sh +64 -0
  206. package/deps/librdkafka/packaging/tools/build-debian.sh +65 -0
  207. package/deps/librdkafka/packaging/tools/build-manylinux.sh +68 -0
  208. package/deps/librdkafka/packaging/tools/build-release-artifacts.sh +139 -0
  209. package/deps/librdkafka/packaging/tools/distro-build.sh +38 -0
  210. package/deps/librdkafka/packaging/tools/gh-release-checksums.py +39 -0
  211. package/deps/librdkafka/packaging/tools/rdutcoverage.sh +25 -0
  212. package/deps/librdkafka/packaging/tools/requirements.txt +2 -0
  213. package/deps/librdkafka/packaging/tools/run-in-docker.sh +28 -0
  214. package/deps/librdkafka/packaging/tools/run-integration-tests.sh +31 -0
  215. package/deps/librdkafka/packaging/tools/run-style-check.sh +4 -0
  216. package/deps/librdkafka/packaging/tools/style-format.sh +149 -0
  217. package/deps/librdkafka/packaging/tools/update_rpcs_max_versions.py +100 -0
  218. package/deps/librdkafka/service.yml +172 -0
  219. package/deps/librdkafka/src/CMakeLists.txt +374 -0
  220. package/deps/librdkafka/src/Makefile +103 -0
  221. package/deps/librdkafka/src/README.lz4.md +30 -0
  222. package/deps/librdkafka/src/cJSON.c +2834 -0
  223. package/deps/librdkafka/src/cJSON.h +398 -0
  224. package/deps/librdkafka/src/crc32c.c +430 -0
  225. package/deps/librdkafka/src/crc32c.h +38 -0
  226. package/deps/librdkafka/src/generate_proto.sh +66 -0
  227. package/deps/librdkafka/src/librdkafka_cgrp_synch.png +0 -0
  228. package/deps/librdkafka/src/lz4.c +2727 -0
  229. package/deps/librdkafka/src/lz4.h +842 -0
  230. package/deps/librdkafka/src/lz4frame.c +2078 -0
  231. package/deps/librdkafka/src/lz4frame.h +692 -0
  232. package/deps/librdkafka/src/lz4frame_static.h +47 -0
  233. package/deps/librdkafka/src/lz4hc.c +1631 -0
  234. package/deps/librdkafka/src/lz4hc.h +413 -0
  235. package/deps/librdkafka/src/nanopb/pb.h +917 -0
  236. package/deps/librdkafka/src/nanopb/pb_common.c +388 -0
  237. package/deps/librdkafka/src/nanopb/pb_common.h +49 -0
  238. package/deps/librdkafka/src/nanopb/pb_decode.c +1727 -0
  239. package/deps/librdkafka/src/nanopb/pb_decode.h +193 -0
  240. package/deps/librdkafka/src/nanopb/pb_encode.c +1000 -0
  241. package/deps/librdkafka/src/nanopb/pb_encode.h +185 -0
  242. package/deps/librdkafka/src/opentelemetry/common.pb.c +32 -0
  243. package/deps/librdkafka/src/opentelemetry/common.pb.h +170 -0
  244. package/deps/librdkafka/src/opentelemetry/metrics.options +2 -0
  245. package/deps/librdkafka/src/opentelemetry/metrics.pb.c +67 -0
  246. package/deps/librdkafka/src/opentelemetry/metrics.pb.h +966 -0
  247. package/deps/librdkafka/src/opentelemetry/resource.pb.c +12 -0
  248. package/deps/librdkafka/src/opentelemetry/resource.pb.h +58 -0
  249. package/deps/librdkafka/src/queue.h +850 -0
  250. package/deps/librdkafka/src/rd.h +584 -0
  251. package/deps/librdkafka/src/rdaddr.c +255 -0
  252. package/deps/librdkafka/src/rdaddr.h +202 -0
  253. package/deps/librdkafka/src/rdatomic.h +230 -0
  254. package/deps/librdkafka/src/rdavg.h +260 -0
  255. package/deps/librdkafka/src/rdavl.c +210 -0
  256. package/deps/librdkafka/src/rdavl.h +250 -0
  257. package/deps/librdkafka/src/rdbase64.c +200 -0
  258. package/deps/librdkafka/src/rdbase64.h +43 -0
  259. package/deps/librdkafka/src/rdbuf.c +1884 -0
  260. package/deps/librdkafka/src/rdbuf.h +375 -0
  261. package/deps/librdkafka/src/rdcrc32.c +114 -0
  262. package/deps/librdkafka/src/rdcrc32.h +170 -0
  263. package/deps/librdkafka/src/rddl.c +179 -0
  264. package/deps/librdkafka/src/rddl.h +43 -0
  265. package/deps/librdkafka/src/rdendian.h +175 -0
  266. package/deps/librdkafka/src/rdfloat.h +67 -0
  267. package/deps/librdkafka/src/rdfnv1a.c +113 -0
  268. package/deps/librdkafka/src/rdfnv1a.h +35 -0
  269. package/deps/librdkafka/src/rdgz.c +120 -0
  270. package/deps/librdkafka/src/rdgz.h +46 -0
  271. package/deps/librdkafka/src/rdhdrhistogram.c +721 -0
  272. package/deps/librdkafka/src/rdhdrhistogram.h +87 -0
  273. package/deps/librdkafka/src/rdhttp.c +830 -0
  274. package/deps/librdkafka/src/rdhttp.h +101 -0
  275. package/deps/librdkafka/src/rdinterval.h +177 -0
  276. package/deps/librdkafka/src/rdkafka.c +5505 -0
  277. package/deps/librdkafka/src/rdkafka.h +10686 -0
  278. package/deps/librdkafka/src/rdkafka_admin.c +9794 -0
  279. package/deps/librdkafka/src/rdkafka_admin.h +661 -0
  280. package/deps/librdkafka/src/rdkafka_assignment.c +1010 -0
  281. package/deps/librdkafka/src/rdkafka_assignment.h +73 -0
  282. package/deps/librdkafka/src/rdkafka_assignor.c +1786 -0
  283. package/deps/librdkafka/src/rdkafka_assignor.h +402 -0
  284. package/deps/librdkafka/src/rdkafka_aux.c +409 -0
  285. package/deps/librdkafka/src/rdkafka_aux.h +174 -0
  286. package/deps/librdkafka/src/rdkafka_background.c +221 -0
  287. package/deps/librdkafka/src/rdkafka_broker.c +6337 -0
  288. package/deps/librdkafka/src/rdkafka_broker.h +744 -0
  289. package/deps/librdkafka/src/rdkafka_buf.c +543 -0
  290. package/deps/librdkafka/src/rdkafka_buf.h +1525 -0
  291. package/deps/librdkafka/src/rdkafka_cert.c +576 -0
  292. package/deps/librdkafka/src/rdkafka_cert.h +62 -0
  293. package/deps/librdkafka/src/rdkafka_cgrp.c +7587 -0
  294. package/deps/librdkafka/src/rdkafka_cgrp.h +477 -0
  295. package/deps/librdkafka/src/rdkafka_conf.c +4880 -0
  296. package/deps/librdkafka/src/rdkafka_conf.h +732 -0
  297. package/deps/librdkafka/src/rdkafka_confval.h +97 -0
  298. package/deps/librdkafka/src/rdkafka_coord.c +623 -0
  299. package/deps/librdkafka/src/rdkafka_coord.h +132 -0
  300. package/deps/librdkafka/src/rdkafka_error.c +228 -0
  301. package/deps/librdkafka/src/rdkafka_error.h +80 -0
  302. package/deps/librdkafka/src/rdkafka_event.c +502 -0
  303. package/deps/librdkafka/src/rdkafka_event.h +126 -0
  304. package/deps/librdkafka/src/rdkafka_feature.c +898 -0
  305. package/deps/librdkafka/src/rdkafka_feature.h +104 -0
  306. package/deps/librdkafka/src/rdkafka_fetcher.c +1422 -0
  307. package/deps/librdkafka/src/rdkafka_fetcher.h +44 -0
  308. package/deps/librdkafka/src/rdkafka_header.c +220 -0
  309. package/deps/librdkafka/src/rdkafka_header.h +76 -0
  310. package/deps/librdkafka/src/rdkafka_idempotence.c +807 -0
  311. package/deps/librdkafka/src/rdkafka_idempotence.h +144 -0
  312. package/deps/librdkafka/src/rdkafka_int.h +1260 -0
  313. package/deps/librdkafka/src/rdkafka_interceptor.c +819 -0
  314. package/deps/librdkafka/src/rdkafka_interceptor.h +104 -0
  315. package/deps/librdkafka/src/rdkafka_lz4.c +450 -0
  316. package/deps/librdkafka/src/rdkafka_lz4.h +49 -0
  317. package/deps/librdkafka/src/rdkafka_metadata.c +2209 -0
  318. package/deps/librdkafka/src/rdkafka_metadata.h +345 -0
  319. package/deps/librdkafka/src/rdkafka_metadata_cache.c +1183 -0
  320. package/deps/librdkafka/src/rdkafka_mock.c +3661 -0
  321. package/deps/librdkafka/src/rdkafka_mock.h +610 -0
  322. package/deps/librdkafka/src/rdkafka_mock_cgrp.c +1876 -0
  323. package/deps/librdkafka/src/rdkafka_mock_handlers.c +3113 -0
  324. package/deps/librdkafka/src/rdkafka_mock_int.h +710 -0
  325. package/deps/librdkafka/src/rdkafka_msg.c +2589 -0
  326. package/deps/librdkafka/src/rdkafka_msg.h +614 -0
  327. package/deps/librdkafka/src/rdkafka_msgbatch.h +62 -0
  328. package/deps/librdkafka/src/rdkafka_msgset.h +98 -0
  329. package/deps/librdkafka/src/rdkafka_msgset_reader.c +1806 -0
  330. package/deps/librdkafka/src/rdkafka_msgset_writer.c +1474 -0
  331. package/deps/librdkafka/src/rdkafka_offset.c +1565 -0
  332. package/deps/librdkafka/src/rdkafka_offset.h +150 -0
  333. package/deps/librdkafka/src/rdkafka_op.c +997 -0
  334. package/deps/librdkafka/src/rdkafka_op.h +858 -0
  335. package/deps/librdkafka/src/rdkafka_partition.c +4896 -0
  336. package/deps/librdkafka/src/rdkafka_partition.h +1182 -0
  337. package/deps/librdkafka/src/rdkafka_pattern.c +228 -0
  338. package/deps/librdkafka/src/rdkafka_pattern.h +70 -0
  339. package/deps/librdkafka/src/rdkafka_plugin.c +213 -0
  340. package/deps/librdkafka/src/rdkafka_plugin.h +41 -0
  341. package/deps/librdkafka/src/rdkafka_proto.h +736 -0
  342. package/deps/librdkafka/src/rdkafka_protocol.h +128 -0
  343. package/deps/librdkafka/src/rdkafka_queue.c +1230 -0
  344. package/deps/librdkafka/src/rdkafka_queue.h +1220 -0
  345. package/deps/librdkafka/src/rdkafka_range_assignor.c +1748 -0
  346. package/deps/librdkafka/src/rdkafka_request.c +7089 -0
  347. package/deps/librdkafka/src/rdkafka_request.h +732 -0
  348. package/deps/librdkafka/src/rdkafka_roundrobin_assignor.c +123 -0
  349. package/deps/librdkafka/src/rdkafka_sasl.c +530 -0
  350. package/deps/librdkafka/src/rdkafka_sasl.h +63 -0
  351. package/deps/librdkafka/src/rdkafka_sasl_cyrus.c +722 -0
  352. package/deps/librdkafka/src/rdkafka_sasl_int.h +89 -0
  353. package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.c +1833 -0
  354. package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.h +52 -0
  355. package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.c +1666 -0
  356. package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.h +47 -0
  357. package/deps/librdkafka/src/rdkafka_sasl_plain.c +142 -0
  358. package/deps/librdkafka/src/rdkafka_sasl_scram.c +858 -0
  359. package/deps/librdkafka/src/rdkafka_sasl_win32.c +550 -0
  360. package/deps/librdkafka/src/rdkafka_ssl.c +2129 -0
  361. package/deps/librdkafka/src/rdkafka_ssl.h +86 -0
  362. package/deps/librdkafka/src/rdkafka_sticky_assignor.c +4785 -0
  363. package/deps/librdkafka/src/rdkafka_subscription.c +278 -0
  364. package/deps/librdkafka/src/rdkafka_telemetry.c +760 -0
  365. package/deps/librdkafka/src/rdkafka_telemetry.h +52 -0
  366. package/deps/librdkafka/src/rdkafka_telemetry_decode.c +1053 -0
  367. package/deps/librdkafka/src/rdkafka_telemetry_decode.h +59 -0
  368. package/deps/librdkafka/src/rdkafka_telemetry_encode.c +997 -0
  369. package/deps/librdkafka/src/rdkafka_telemetry_encode.h +301 -0
  370. package/deps/librdkafka/src/rdkafka_timer.c +402 -0
  371. package/deps/librdkafka/src/rdkafka_timer.h +117 -0
  372. package/deps/librdkafka/src/rdkafka_topic.c +2161 -0
  373. package/deps/librdkafka/src/rdkafka_topic.h +334 -0
  374. package/deps/librdkafka/src/rdkafka_transport.c +1309 -0
  375. package/deps/librdkafka/src/rdkafka_transport.h +99 -0
  376. package/deps/librdkafka/src/rdkafka_transport_int.h +100 -0
  377. package/deps/librdkafka/src/rdkafka_txnmgr.c +3256 -0
  378. package/deps/librdkafka/src/rdkafka_txnmgr.h +171 -0
  379. package/deps/librdkafka/src/rdkafka_zstd.c +226 -0
  380. package/deps/librdkafka/src/rdkafka_zstd.h +57 -0
  381. package/deps/librdkafka/src/rdlist.c +576 -0
  382. package/deps/librdkafka/src/rdlist.h +434 -0
  383. package/deps/librdkafka/src/rdlog.c +89 -0
  384. package/deps/librdkafka/src/rdlog.h +41 -0
  385. package/deps/librdkafka/src/rdmap.c +508 -0
  386. package/deps/librdkafka/src/rdmap.h +492 -0
  387. package/deps/librdkafka/src/rdmurmur2.c +167 -0
  388. package/deps/librdkafka/src/rdmurmur2.h +35 -0
  389. package/deps/librdkafka/src/rdports.c +61 -0
  390. package/deps/librdkafka/src/rdports.h +38 -0
  391. package/deps/librdkafka/src/rdposix.h +250 -0
  392. package/deps/librdkafka/src/rdrand.c +80 -0
  393. package/deps/librdkafka/src/rdrand.h +43 -0
  394. package/deps/librdkafka/src/rdregex.c +156 -0
  395. package/deps/librdkafka/src/rdregex.h +43 -0
  396. package/deps/librdkafka/src/rdsignal.h +57 -0
  397. package/deps/librdkafka/src/rdstring.c +645 -0
  398. package/deps/librdkafka/src/rdstring.h +98 -0
  399. package/deps/librdkafka/src/rdsysqueue.h +404 -0
  400. package/deps/librdkafka/src/rdtime.h +356 -0
  401. package/deps/librdkafka/src/rdtypes.h +86 -0
  402. package/deps/librdkafka/src/rdunittest.c +549 -0
  403. package/deps/librdkafka/src/rdunittest.h +232 -0
  404. package/deps/librdkafka/src/rdvarint.c +134 -0
  405. package/deps/librdkafka/src/rdvarint.h +165 -0
  406. package/deps/librdkafka/src/rdwin32.h +382 -0
  407. package/deps/librdkafka/src/rdxxhash.c +1030 -0
  408. package/deps/librdkafka/src/rdxxhash.h +328 -0
  409. package/deps/librdkafka/src/regexp.c +1352 -0
  410. package/deps/librdkafka/src/regexp.h +41 -0
  411. package/deps/librdkafka/src/snappy.c +1866 -0
  412. package/deps/librdkafka/src/snappy.h +62 -0
  413. package/deps/librdkafka/src/snappy_compat.h +138 -0
  414. package/deps/librdkafka/src/statistics_schema.json +444 -0
  415. package/deps/librdkafka/src/tinycthread.c +932 -0
  416. package/deps/librdkafka/src/tinycthread.h +503 -0
  417. package/deps/librdkafka/src/tinycthread_extra.c +199 -0
  418. package/deps/librdkafka/src/tinycthread_extra.h +212 -0
  419. package/deps/librdkafka/src/win32_config.h +58 -0
  420. package/deps/librdkafka/src-cpp/CMakeLists.txt +90 -0
  421. package/deps/librdkafka/src-cpp/ConfImpl.cpp +84 -0
  422. package/deps/librdkafka/src-cpp/ConsumerImpl.cpp +244 -0
  423. package/deps/librdkafka/src-cpp/HandleImpl.cpp +436 -0
  424. package/deps/librdkafka/src-cpp/HeadersImpl.cpp +48 -0
  425. package/deps/librdkafka/src-cpp/KafkaConsumerImpl.cpp +296 -0
  426. package/deps/librdkafka/src-cpp/Makefile +55 -0
  427. package/deps/librdkafka/src-cpp/MessageImpl.cpp +38 -0
  428. package/deps/librdkafka/src-cpp/MetadataImpl.cpp +170 -0
  429. package/deps/librdkafka/src-cpp/ProducerImpl.cpp +197 -0
  430. package/deps/librdkafka/src-cpp/QueueImpl.cpp +70 -0
  431. package/deps/librdkafka/src-cpp/README.md +16 -0
  432. package/deps/librdkafka/src-cpp/RdKafka.cpp +59 -0
  433. package/deps/librdkafka/src-cpp/TopicImpl.cpp +124 -0
  434. package/deps/librdkafka/src-cpp/TopicPartitionImpl.cpp +57 -0
  435. package/deps/librdkafka/src-cpp/rdkafkacpp.h +3797 -0
  436. package/deps/librdkafka/src-cpp/rdkafkacpp_int.h +1641 -0
  437. package/deps/librdkafka/tests/0000-unittests.c +72 -0
  438. package/deps/librdkafka/tests/0001-multiobj.c +102 -0
  439. package/deps/librdkafka/tests/0002-unkpart.c +244 -0
  440. package/deps/librdkafka/tests/0003-msgmaxsize.c +173 -0
  441. package/deps/librdkafka/tests/0004-conf.c +934 -0
  442. package/deps/librdkafka/tests/0005-order.c +133 -0
  443. package/deps/librdkafka/tests/0006-symbols.c +163 -0
  444. package/deps/librdkafka/tests/0007-autotopic.c +136 -0
  445. package/deps/librdkafka/tests/0008-reqacks.c +179 -0
  446. package/deps/librdkafka/tests/0009-mock_cluster.c +97 -0
  447. package/deps/librdkafka/tests/0011-produce_batch.c +753 -0
  448. package/deps/librdkafka/tests/0012-produce_consume.c +537 -0
  449. package/deps/librdkafka/tests/0013-null-msgs.c +473 -0
  450. package/deps/librdkafka/tests/0014-reconsume-191.c +512 -0
  451. package/deps/librdkafka/tests/0015-offset_seeks.c +172 -0
  452. package/deps/librdkafka/tests/0016-client_swname.c +181 -0
  453. package/deps/librdkafka/tests/0017-compression.c +140 -0
  454. package/deps/librdkafka/tests/0018-cgrp_term.c +338 -0
  455. package/deps/librdkafka/tests/0019-list_groups.c +289 -0
  456. package/deps/librdkafka/tests/0020-destroy_hang.c +162 -0
  457. package/deps/librdkafka/tests/0021-rkt_destroy.c +72 -0
  458. package/deps/librdkafka/tests/0022-consume_batch.c +279 -0
  459. package/deps/librdkafka/tests/0025-timers.c +147 -0
  460. package/deps/librdkafka/tests/0026-consume_pause.c +547 -0
  461. package/deps/librdkafka/tests/0028-long_topicnames.c +79 -0
  462. package/deps/librdkafka/tests/0029-assign_offset.c +202 -0
  463. package/deps/librdkafka/tests/0030-offset_commit.c +589 -0
  464. package/deps/librdkafka/tests/0031-get_offsets.c +235 -0
  465. package/deps/librdkafka/tests/0033-regex_subscribe.c +536 -0
  466. package/deps/librdkafka/tests/0034-offset_reset.c +398 -0
  467. package/deps/librdkafka/tests/0035-api_version.c +73 -0
  468. package/deps/librdkafka/tests/0036-partial_fetch.c +87 -0
  469. package/deps/librdkafka/tests/0037-destroy_hang_local.c +85 -0
  470. package/deps/librdkafka/tests/0038-performance.c +121 -0
  471. package/deps/librdkafka/tests/0039-event.c +284 -0
  472. package/deps/librdkafka/tests/0040-io_event.c +257 -0
  473. package/deps/librdkafka/tests/0041-fetch_max_bytes.c +97 -0
  474. package/deps/librdkafka/tests/0042-many_topics.c +252 -0
  475. package/deps/librdkafka/tests/0043-no_connection.c +77 -0
  476. package/deps/librdkafka/tests/0044-partition_cnt.c +94 -0
  477. package/deps/librdkafka/tests/0045-subscribe_update.c +1010 -0
  478. package/deps/librdkafka/tests/0046-rkt_cache.c +65 -0
  479. package/deps/librdkafka/tests/0047-partial_buf_tmout.c +98 -0
  480. package/deps/librdkafka/tests/0048-partitioner.c +283 -0
  481. package/deps/librdkafka/tests/0049-consume_conn_close.c +162 -0
  482. package/deps/librdkafka/tests/0050-subscribe_adds.c +145 -0
  483. package/deps/librdkafka/tests/0051-assign_adds.c +126 -0
  484. package/deps/librdkafka/tests/0052-msg_timestamps.c +238 -0
  485. package/deps/librdkafka/tests/0053-stats_cb.cpp +527 -0
  486. package/deps/librdkafka/tests/0054-offset_time.cpp +236 -0
  487. package/deps/librdkafka/tests/0055-producer_latency.c +539 -0
  488. package/deps/librdkafka/tests/0056-balanced_group_mt.c +315 -0
  489. package/deps/librdkafka/tests/0057-invalid_topic.cpp +112 -0
  490. package/deps/librdkafka/tests/0058-log.cpp +123 -0
  491. package/deps/librdkafka/tests/0059-bsearch.cpp +241 -0
  492. package/deps/librdkafka/tests/0060-op_prio.cpp +163 -0
  493. package/deps/librdkafka/tests/0061-consumer_lag.cpp +295 -0
  494. package/deps/librdkafka/tests/0062-stats_event.c +126 -0
  495. package/deps/librdkafka/tests/0063-clusterid.cpp +180 -0
  496. package/deps/librdkafka/tests/0064-interceptors.c +481 -0
  497. package/deps/librdkafka/tests/0065-yield.cpp +140 -0
  498. package/deps/librdkafka/tests/0066-plugins.cpp +129 -0
  499. package/deps/librdkafka/tests/0067-empty_topic.cpp +151 -0
  500. package/deps/librdkafka/tests/0068-produce_timeout.c +136 -0
  501. package/deps/librdkafka/tests/0069-consumer_add_parts.c +119 -0
  502. package/deps/librdkafka/tests/0070-null_empty.cpp +197 -0
  503. package/deps/librdkafka/tests/0072-headers_ut.c +448 -0
  504. package/deps/librdkafka/tests/0073-headers.c +381 -0
  505. package/deps/librdkafka/tests/0074-producev.c +87 -0
  506. package/deps/librdkafka/tests/0075-retry.c +290 -0
  507. package/deps/librdkafka/tests/0076-produce_retry.c +452 -0
  508. package/deps/librdkafka/tests/0077-compaction.c +363 -0
  509. package/deps/librdkafka/tests/0078-c_from_cpp.cpp +96 -0
  510. package/deps/librdkafka/tests/0079-fork.c +93 -0
  511. package/deps/librdkafka/tests/0080-admin_ut.c +3095 -0
  512. package/deps/librdkafka/tests/0081-admin.c +5633 -0
  513. package/deps/librdkafka/tests/0082-fetch_max_bytes.cpp +137 -0
  514. package/deps/librdkafka/tests/0083-cb_event.c +233 -0
  515. package/deps/librdkafka/tests/0084-destroy_flags.c +208 -0
  516. package/deps/librdkafka/tests/0085-headers.cpp +392 -0
  517. package/deps/librdkafka/tests/0086-purge.c +368 -0
  518. package/deps/librdkafka/tests/0088-produce_metadata_timeout.c +162 -0
  519. package/deps/librdkafka/tests/0089-max_poll_interval.c +511 -0
  520. package/deps/librdkafka/tests/0090-idempotence.c +171 -0
  521. package/deps/librdkafka/tests/0091-max_poll_interval_timeout.c +295 -0
  522. package/deps/librdkafka/tests/0092-mixed_msgver.c +103 -0
  523. package/deps/librdkafka/tests/0093-holb.c +200 -0
  524. package/deps/librdkafka/tests/0094-idempotence_msg_timeout.c +231 -0
  525. package/deps/librdkafka/tests/0095-all_brokers_down.cpp +122 -0
  526. package/deps/librdkafka/tests/0097-ssl_verify.cpp +658 -0
  527. package/deps/librdkafka/tests/0098-consumer-txn.cpp +1218 -0
  528. package/deps/librdkafka/tests/0099-commit_metadata.c +194 -0
  529. package/deps/librdkafka/tests/0100-thread_interceptors.cpp +195 -0
  530. package/deps/librdkafka/tests/0101-fetch-from-follower.cpp +446 -0
  531. package/deps/librdkafka/tests/0102-static_group_rebalance.c +836 -0
  532. package/deps/librdkafka/tests/0103-transactions.c +1383 -0
  533. package/deps/librdkafka/tests/0104-fetch_from_follower_mock.c +625 -0
  534. package/deps/librdkafka/tests/0105-transactions_mock.c +3930 -0
  535. package/deps/librdkafka/tests/0106-cgrp_sess_timeout.c +318 -0
  536. package/deps/librdkafka/tests/0107-topic_recreate.c +259 -0
  537. package/deps/librdkafka/tests/0109-auto_create_topics.cpp +278 -0
  538. package/deps/librdkafka/tests/0110-batch_size.cpp +182 -0
  539. package/deps/librdkafka/tests/0111-delay_create_topics.cpp +127 -0
  540. package/deps/librdkafka/tests/0112-assign_unknown_part.c +87 -0
  541. package/deps/librdkafka/tests/0113-cooperative_rebalance.cpp +3473 -0
  542. package/deps/librdkafka/tests/0114-sticky_partitioning.cpp +176 -0
  543. package/deps/librdkafka/tests/0115-producer_auth.cpp +182 -0
  544. package/deps/librdkafka/tests/0116-kafkaconsumer_close.cpp +216 -0
  545. package/deps/librdkafka/tests/0117-mock_errors.c +331 -0
  546. package/deps/librdkafka/tests/0118-commit_rebalance.c +154 -0
  547. package/deps/librdkafka/tests/0119-consumer_auth.cpp +167 -0
  548. package/deps/librdkafka/tests/0120-asymmetric_subscription.c +185 -0
  549. package/deps/librdkafka/tests/0121-clusterid.c +115 -0
  550. package/deps/librdkafka/tests/0122-buffer_cleaning_after_rebalance.c +227 -0
  551. package/deps/librdkafka/tests/0123-connections_max_idle.c +98 -0
  552. package/deps/librdkafka/tests/0124-openssl_invalid_engine.c +69 -0
  553. package/deps/librdkafka/tests/0125-immediate_flush.c +144 -0
  554. package/deps/librdkafka/tests/0126-oauthbearer_oidc.c +528 -0
  555. package/deps/librdkafka/tests/0127-fetch_queue_backoff.cpp +165 -0
  556. package/deps/librdkafka/tests/0128-sasl_callback_queue.cpp +125 -0
  557. package/deps/librdkafka/tests/0129-fetch_aborted_msgs.c +79 -0
  558. package/deps/librdkafka/tests/0130-store_offsets.c +178 -0
  559. package/deps/librdkafka/tests/0131-connect_timeout.c +81 -0
  560. package/deps/librdkafka/tests/0132-strategy_ordering.c +179 -0
  561. package/deps/librdkafka/tests/0133-ssl_keys.c +150 -0
  562. package/deps/librdkafka/tests/0134-ssl_provider.c +92 -0
  563. package/deps/librdkafka/tests/0135-sasl_credentials.cpp +143 -0
  564. package/deps/librdkafka/tests/0136-resolve_cb.c +181 -0
  565. package/deps/librdkafka/tests/0137-barrier_batch_consume.c +619 -0
  566. package/deps/librdkafka/tests/0138-admin_mock.c +281 -0
  567. package/deps/librdkafka/tests/0139-offset_validation_mock.c +950 -0
  568. package/deps/librdkafka/tests/0140-commit_metadata.cpp +108 -0
  569. package/deps/librdkafka/tests/0142-reauthentication.c +515 -0
  570. package/deps/librdkafka/tests/0143-exponential_backoff_mock.c +552 -0
  571. package/deps/librdkafka/tests/0144-idempotence_mock.c +373 -0
  572. package/deps/librdkafka/tests/0145-pause_resume_mock.c +119 -0
  573. package/deps/librdkafka/tests/0146-metadata_mock.c +505 -0
  574. package/deps/librdkafka/tests/0147-consumer_group_consumer_mock.c +952 -0
  575. package/deps/librdkafka/tests/0148-offset_fetch_commit_error_mock.c +563 -0
  576. package/deps/librdkafka/tests/0149-broker-same-host-port.c +140 -0
  577. package/deps/librdkafka/tests/0150-telemetry_mock.c +651 -0
  578. package/deps/librdkafka/tests/0151-purge-brokers.c +566 -0
  579. package/deps/librdkafka/tests/0152-rebootstrap.c +59 -0
  580. package/deps/librdkafka/tests/0153-memberid.c +128 -0
  581. package/deps/librdkafka/tests/1000-unktopic.c +164 -0
  582. package/deps/librdkafka/tests/8000-idle.cpp +60 -0
  583. package/deps/librdkafka/tests/8001-fetch_from_follower_mock_manual.c +113 -0
  584. package/deps/librdkafka/tests/CMakeLists.txt +170 -0
  585. package/deps/librdkafka/tests/LibrdkafkaTestApp.py +291 -0
  586. package/deps/librdkafka/tests/Makefile +182 -0
  587. package/deps/librdkafka/tests/README.md +509 -0
  588. package/deps/librdkafka/tests/autotest.sh +33 -0
  589. package/deps/librdkafka/tests/backtrace.gdb +30 -0
  590. package/deps/librdkafka/tests/broker_version_tests.py +315 -0
  591. package/deps/librdkafka/tests/buildbox.sh +17 -0
  592. package/deps/librdkafka/tests/cleanup-checker-tests.sh +20 -0
  593. package/deps/librdkafka/tests/cluster_testing.py +191 -0
  594. package/deps/librdkafka/tests/delete-test-topics.sh +56 -0
  595. package/deps/librdkafka/tests/fixtures/oauthbearer/jwt_assertion_template.json +10 -0
  596. package/deps/librdkafka/tests/fixtures/ssl/Makefile +8 -0
  597. package/deps/librdkafka/tests/fixtures/ssl/README.md +13 -0
  598. package/deps/librdkafka/tests/fixtures/ssl/client.keystore.intermediate.p12 +0 -0
  599. package/deps/librdkafka/tests/fixtures/ssl/client.keystore.p12 +0 -0
  600. package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.intermediate.pem +72 -0
  601. package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.pem +50 -0
  602. package/deps/librdkafka/tests/fixtures/ssl/client2.intermediate.key +46 -0
  603. package/deps/librdkafka/tests/fixtures/ssl/client2.key +46 -0
  604. package/deps/librdkafka/tests/fixtures/ssl/create_keys.sh +168 -0
  605. package/deps/librdkafka/tests/fuzzers/Makefile +12 -0
  606. package/deps/librdkafka/tests/fuzzers/README.md +31 -0
  607. package/deps/librdkafka/tests/fuzzers/fuzz_regex.c +74 -0
  608. package/deps/librdkafka/tests/fuzzers/helpers.h +90 -0
  609. package/deps/librdkafka/tests/gen-ssl-certs.sh +165 -0
  610. package/deps/librdkafka/tests/interactive_broker_version.py +170 -0
  611. package/deps/librdkafka/tests/interceptor_test/CMakeLists.txt +16 -0
  612. package/deps/librdkafka/tests/interceptor_test/Makefile +22 -0
  613. package/deps/librdkafka/tests/interceptor_test/interceptor_test.c +314 -0
  614. package/deps/librdkafka/tests/interceptor_test/interceptor_test.h +54 -0
  615. package/deps/librdkafka/tests/java/IncrementalRebalanceCli.java +97 -0
  616. package/deps/librdkafka/tests/java/Makefile +13 -0
  617. package/deps/librdkafka/tests/java/Murmur2Cli.java +46 -0
  618. package/deps/librdkafka/tests/java/README.md +14 -0
  619. package/deps/librdkafka/tests/java/TransactionProducerCli.java +162 -0
  620. package/deps/librdkafka/tests/java/run-class.sh +11 -0
  621. package/deps/librdkafka/tests/librdkafka.suppressions +483 -0
  622. package/deps/librdkafka/tests/lz4_manual_test.sh +59 -0
  623. package/deps/librdkafka/tests/multi-broker-version-test.sh +50 -0
  624. package/deps/librdkafka/tests/parse-refcnt.sh +43 -0
  625. package/deps/librdkafka/tests/performance_plot.py +115 -0
  626. package/deps/librdkafka/tests/plugin_test/Makefile +19 -0
  627. package/deps/librdkafka/tests/plugin_test/plugin_test.c +58 -0
  628. package/deps/librdkafka/tests/requirements.txt +2 -0
  629. package/deps/librdkafka/tests/run-all-tests.sh +79 -0
  630. package/deps/librdkafka/tests/run-consumer-tests.sh +16 -0
  631. package/deps/librdkafka/tests/run-producer-tests.sh +16 -0
  632. package/deps/librdkafka/tests/run-test-batches.py +157 -0
  633. package/deps/librdkafka/tests/run-test.sh +140 -0
  634. package/deps/librdkafka/tests/rusage.c +249 -0
  635. package/deps/librdkafka/tests/sasl_test.py +289 -0
  636. package/deps/librdkafka/tests/scenarios/README.md +6 -0
  637. package/deps/librdkafka/tests/scenarios/ak23.json +6 -0
  638. package/deps/librdkafka/tests/scenarios/default.json +5 -0
  639. package/deps/librdkafka/tests/scenarios/noautocreate.json +5 -0
  640. package/deps/librdkafka/tests/sockem.c +801 -0
  641. package/deps/librdkafka/tests/sockem.h +85 -0
  642. package/deps/librdkafka/tests/sockem_ctrl.c +145 -0
  643. package/deps/librdkafka/tests/sockem_ctrl.h +61 -0
  644. package/deps/librdkafka/tests/test.c +7778 -0
  645. package/deps/librdkafka/tests/test.conf.example +27 -0
  646. package/deps/librdkafka/tests/test.h +1028 -0
  647. package/deps/librdkafka/tests/testcpp.cpp +131 -0
  648. package/deps/librdkafka/tests/testcpp.h +388 -0
  649. package/deps/librdkafka/tests/testshared.h +416 -0
  650. package/deps/librdkafka/tests/tools/README.md +4 -0
  651. package/deps/librdkafka/tests/tools/stats/README.md +21 -0
  652. package/deps/librdkafka/tests/tools/stats/filter.jq +42 -0
  653. package/deps/librdkafka/tests/tools/stats/graph.py +150 -0
  654. package/deps/librdkafka/tests/tools/stats/requirements.txt +3 -0
  655. package/deps/librdkafka/tests/tools/stats/to_csv.py +124 -0
  656. package/deps/librdkafka/tests/trivup/trivup-0.14.0.tar.gz +0 -0
  657. package/deps/librdkafka/tests/until-fail.sh +87 -0
  658. package/deps/librdkafka/tests/xxxx-assign_partition.c +122 -0
  659. package/deps/librdkafka/tests/xxxx-metadata.cpp +159 -0
  660. package/deps/librdkafka/vcpkg.json +23 -0
  661. package/deps/librdkafka/win32/README.md +5 -0
  662. package/deps/librdkafka/win32/build-package.bat +3 -0
  663. package/deps/librdkafka/win32/build.bat +19 -0
  664. package/deps/librdkafka/win32/common.vcxproj +84 -0
  665. package/deps/librdkafka/win32/interceptor_test/interceptor_test.vcxproj +87 -0
  666. package/deps/librdkafka/win32/librdkafka.autopkg.template +54 -0
  667. package/deps/librdkafka/win32/librdkafka.master.testing.targets +13 -0
  668. package/deps/librdkafka/win32/librdkafka.sln +226 -0
  669. package/deps/librdkafka/win32/librdkafka.vcxproj +276 -0
  670. package/deps/librdkafka/win32/librdkafkacpp/librdkafkacpp.vcxproj +104 -0
  671. package/deps/librdkafka/win32/msbuild.ps1 +15 -0
  672. package/deps/librdkafka/win32/openssl_engine_example/openssl_engine_example.vcxproj +132 -0
  673. package/deps/librdkafka/win32/package-zip.ps1 +46 -0
  674. package/deps/librdkafka/win32/packages/repositories.config +4 -0
  675. package/deps/librdkafka/win32/push-package.bat +4 -0
  676. package/deps/librdkafka/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj +67 -0
  677. package/deps/librdkafka/win32/rdkafka_example/rdkafka_example.vcxproj +97 -0
  678. package/deps/librdkafka/win32/rdkafka_performance/rdkafka_performance.vcxproj +97 -0
  679. package/deps/librdkafka/win32/setup-msys2.ps1 +47 -0
  680. package/deps/librdkafka/win32/setup-vcpkg.ps1 +34 -0
  681. package/deps/librdkafka/win32/tests/test.conf.example +25 -0
  682. package/deps/librdkafka/win32/tests/tests.vcxproj +253 -0
  683. package/deps/librdkafka/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj +132 -0
  684. package/deps/librdkafka/win32/wingetopt.c +564 -0
  685. package/deps/librdkafka/win32/wingetopt.h +101 -0
  686. package/deps/librdkafka/win32/wintime.h +33 -0
  687. package/deps/librdkafka.gyp +62 -0
  688. package/lib/admin.js +233 -0
  689. package/lib/client.js +573 -0
  690. package/lib/error.js +500 -0
  691. package/lib/index.js +34 -0
  692. package/lib/kafka-consumer-stream.js +397 -0
  693. package/lib/kafka-consumer.js +698 -0
  694. package/lib/producer/high-level-producer.js +323 -0
  695. package/lib/producer-stream.js +307 -0
  696. package/lib/producer.js +375 -0
  697. package/lib/tools/ref-counter.js +52 -0
  698. package/lib/topic-partition.js +88 -0
  699. package/lib/topic.js +42 -0
  700. package/lib/util.js +29 -0
  701. package/package.json +61 -0
  702. package/prebuilds/darwin-arm64/@point3+node-rdkafka.node +0 -0
  703. package/prebuilds/linux-x64/@point3+node-rdkafka.node +0 -0
  704. package/util/configure.js +30 -0
  705. package/util/get-env.js +6 -0
  706. package/util/test-compile.js +11 -0
  707. package/util/test-producer-delivery.js +100 -0
@@ -0,0 +1,4785 @@
1
+ /*
2
+ * librdkafka - The Apache Kafka C/C++ library
3
+ *
4
+ * Copyright (c) 2020-2022, Magnus Edenhill
5
+ * 2023, Confluent Inc.
6
+ * All rights reserved.
7
+ *
8
+ * Redistribution and use in source and binary forms, with or without
9
+ * modification, are permitted provided that the following conditions are met:
10
+ *
11
+ * 1. Redistributions of source code must retain the above copyright notice,
12
+ * this list of conditions and the following disclaimer.
13
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ * this list of conditions and the following disclaimer in the documentation
15
+ * and/or other materials provided with the distribution.
16
+ *
17
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
+ * POSSIBILITY OF SUCH DAMAGE.
28
+ */
29
+
30
+
31
+ #include "rdkafka_int.h"
32
+ #include "rdkafka_assignor.h"
33
+ #include "rdkafka_request.h"
34
+ #include "rdmap.h"
35
+ #include "rdunittest.h"
36
+
37
+ #include <stdarg.h>
38
+ #include <math.h> /* abs() */
39
+
40
+ /**
41
+ * @name KIP-54 and KIP-341 Sticky assignor.
42
+ *
43
+ * Closely mimicking the official Apache Kafka AbstractStickyAssignor
44
+ * implementation.
45
+ */
46
+
47
+ /** FIXME
48
+ * Remaining:
49
+ * isSticky() -- used by tests
50
+ */
51
+
52
+
53
+ /** @brief Assignor state from last rebalance */
54
+ typedef struct rd_kafka_sticky_assignor_state_s {
55
+ rd_kafka_topic_partition_list_t *prev_assignment;
56
+ int32_t generation_id;
57
+ } rd_kafka_sticky_assignor_state_t;
58
+
59
+
60
+
61
+ /**
62
+ * Auxilliary glue types
63
+ */
64
+
65
+ /**
66
+ * @struct ConsumerPair_t represents a pair of consumer member ids involved in
67
+ * a partition reassignment, indicating a source consumer a partition
68
+ * is moving from and a destination partition the same partition is
69
+ * moving to.
70
+ *
71
+ * @sa PartitionMovements_t
72
+ */
73
+ typedef struct ConsumerPair_s {
74
+ const char *src; /**< Source member id */
75
+ const char *dst; /**< Destination member id */
76
+ } ConsumerPair_t;
77
+
78
+
79
+ static ConsumerPair_t *ConsumerPair_new(const char *src, const char *dst) {
80
+ ConsumerPair_t *cpair;
81
+
82
+ cpair = rd_malloc(sizeof(*cpair));
83
+ cpair->src = src ? rd_strdup(src) : NULL;
84
+ cpair->dst = dst ? rd_strdup(dst) : NULL;
85
+
86
+ return cpair;
87
+ }
88
+
89
+
90
+ static void ConsumerPair_free(void *p) {
91
+ ConsumerPair_t *cpair = p;
92
+ if (cpair->src)
93
+ rd_free((void *)cpair->src);
94
+ if (cpair->dst)
95
+ rd_free((void *)cpair->dst);
96
+ rd_free(cpair);
97
+ }
98
+
99
+ static int ConsumerPair_cmp(const void *_a, const void *_b) {
100
+ const ConsumerPair_t *a = _a, *b = _b;
101
+ int r = strcmp(a->src ? a->src : "", b->src ? b->src : "");
102
+ if (r)
103
+ return r;
104
+ return strcmp(a->dst ? a->dst : "", b->dst ? b->dst : "");
105
+ }
106
+
107
+
108
+ static unsigned int ConsumerPair_hash(const void *_a) {
109
+ const ConsumerPair_t *a = _a;
110
+ return 31 * (a->src ? rd_map_str_hash(a->src) : 1) +
111
+ (a->dst ? rd_map_str_hash(a->dst) : 1);
112
+ }
113
+
114
+
115
+
116
+ typedef struct ConsumerGenerationPair_s {
117
+ const char *consumer; /**< Memory owned by caller */
118
+ int generation;
119
+ } ConsumerGenerationPair_t;
120
+
121
+ static void ConsumerGenerationPair_destroy(void *ptr) {
122
+ ConsumerGenerationPair_t *cgpair = ptr;
123
+ rd_free(cgpair);
124
+ }
125
+
126
+ /**
127
+ * @param consumer This memory will be referenced, not copied, and thus must
128
+ * outlive the ConsumerGenerationPair_t object.
129
+ */
130
+ static ConsumerGenerationPair_t *
131
+ ConsumerGenerationPair_new(const char *consumer, int generation) {
132
+ ConsumerGenerationPair_t *cgpair = rd_malloc(sizeof(*cgpair));
133
+ cgpair->consumer = consumer;
134
+ cgpair->generation = generation;
135
+ return cgpair;
136
+ }
137
+
138
+ static int ConsumerGenerationPair_cmp_generation(const void *_a,
139
+ const void *_b) {
140
+ const ConsumerGenerationPair_t *a = _a, *b = _b;
141
+ return a->generation - b->generation;
142
+ }
143
+
144
+
145
+
146
+ /**
147
+ * Hash map types.
148
+ *
149
+ * Naming convention is:
150
+ * map_<keytype>_<valuetype>_t
151
+ *
152
+ * Where the keytype and valuetype are spoken names of the types and
153
+ * not the specific C types (since that'd be too long).
154
+ */
155
+ typedef RD_MAP_TYPE(const char *,
156
+ rd_kafka_topic_partition_list_t *) map_str_toppar_list_t;
157
+
158
+ typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
159
+ const char *) map_toppar_str_t;
160
+
161
+ typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
162
+ rd_list_t *) map_toppar_list_t;
163
+
164
+ typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
165
+ rd_kafka_metadata_partition_internal_t *) map_toppar_mdpi_t;
166
+
167
+ typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
168
+ ConsumerGenerationPair_t *) map_toppar_cgpair_t;
169
+
170
+ typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
171
+ ConsumerPair_t *) map_toppar_cpair_t;
172
+
173
+ typedef RD_MAP_TYPE(const ConsumerPair_t *,
174
+ rd_kafka_topic_partition_list_t *) map_cpair_toppar_list_t;
175
+
176
+ /* map<string, map<ConsumerPair*, topic_partition_list_t*>> */
177
+ typedef RD_MAP_TYPE(const char *,
178
+ map_cpair_toppar_list_t *) map_str_map_cpair_toppar_list_t;
179
+
180
+ typedef RD_MAP_TYPE(const char *, const char *) map_str_str_t;
181
+
182
+
183
+ /** Glue type helpers */
184
+
185
+ static map_cpair_toppar_list_t *map_cpair_toppar_list_t_new(void) {
186
+ map_cpair_toppar_list_t *map = rd_calloc(1, sizeof(*map));
187
+
188
+ RD_MAP_INIT(map, 0, ConsumerPair_cmp, ConsumerPair_hash, NULL,
189
+ rd_kafka_topic_partition_list_destroy_free);
190
+
191
+ return map;
192
+ }
193
+
194
+ static void map_cpair_toppar_list_t_free(void *ptr) {
195
+ map_cpair_toppar_list_t *map = ptr;
196
+ RD_MAP_DESTROY(map);
197
+ rd_free(map);
198
+ }
199
+
200
+
201
+ /** @struct Convenience struct for storing consumer/rack and toppar/rack
202
+ * mappings. */
203
+ typedef struct {
204
+ /** A map of member_id -> rack_id pairs. */
205
+ map_str_str_t member_id_to_rack_id;
206
+ /* A map of topic partition to rd_kafka_metadata_partition_internal_t */
207
+ map_toppar_mdpi_t toppar_to_mdpi;
208
+ } rd_kafka_rack_info_t;
209
+
210
+ /**
211
+ * @brief Initialize a rd_kafka_rack_info_t.
212
+ *
213
+ * @param topics
214
+ * @param topic_cnt
215
+ * @param mdi
216
+ *
217
+ * This struct is for convenience/easy grouping, and as a consequence, we avoid
218
+ * copying values. Thus, it is intended to be used within the lifetime of this
219
+ * function's arguments.
220
+ *
221
+ * @return rd_kafka_rack_info_t*
222
+ */
223
+ static rd_kafka_rack_info_t *
224
+ rd_kafka_rack_info_new(rd_kafka_assignor_topic_t **topics,
225
+ size_t topic_cnt,
226
+ const rd_kafka_metadata_internal_t *mdi) {
227
+ int i;
228
+ size_t t;
229
+ rd_kafka_group_member_t *rkgm;
230
+ rd_kafka_rack_info_t *rkri = rd_calloc(1, sizeof(rd_kafka_rack_info_t));
231
+
232
+ if (!rd_kafka_use_rack_aware_assignment(topics, topic_cnt, mdi)) {
233
+ /* Free everything immediately, we aren't using rack aware
234
+ assignment, this struct is not applicable. */
235
+ rd_free(rkri);
236
+ return NULL;
237
+ }
238
+
239
+ rkri->member_id_to_rack_id = (map_str_str_t)RD_MAP_INITIALIZER(
240
+ 0, rd_map_str_cmp, rd_map_str_hash,
241
+ NULL /* refs members.rkgm_member_id */,
242
+ NULL /* refs members.rkgm_rack_id */);
243
+ rkri->toppar_to_mdpi = (map_toppar_mdpi_t)RD_MAP_INITIALIZER(
244
+ 0, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
245
+ rd_kafka_topic_partition_destroy_free, NULL);
246
+
247
+ for (t = 0; t < topic_cnt; t++) {
248
+ RD_LIST_FOREACH(rkgm, &topics[t]->members, i) {
249
+ RD_MAP_SET(&rkri->member_id_to_rack_id,
250
+ rkgm->rkgm_member_id->str,
251
+ rkgm->rkgm_rack_id->str);
252
+ }
253
+
254
+ for (i = 0; i < topics[t]->metadata->partition_cnt; i++) {
255
+ rd_kafka_topic_partition_t *rkpart =
256
+ rd_kafka_topic_partition_new(
257
+ topics[t]->metadata->topic, i);
258
+ RD_MAP_SET(
259
+ &rkri->toppar_to_mdpi, rkpart,
260
+ &topics[t]->metadata_internal->partitions[i]);
261
+ }
262
+ }
263
+
264
+ return rkri;
265
+ }
266
+
267
+ /* Destroy a rd_kafka_rack_info_t. */
268
+ static void rd_kafka_rack_info_destroy(rd_kafka_rack_info_t *rkri) {
269
+ if (!rkri)
270
+ return;
271
+
272
+ RD_MAP_DESTROY(&rkri->member_id_to_rack_id);
273
+ RD_MAP_DESTROY(&rkri->toppar_to_mdpi);
274
+
275
+ rd_free(rkri);
276
+ }
277
+
278
+
279
+ /* Convenience function to bsearch inside the racks of a
280
+ * rd_kafka_metadata_partition_internal_t. */
281
+ static char *rd_kafka_partition_internal_find_rack(
282
+ rd_kafka_metadata_partition_internal_t *mdpi,
283
+ const char *rack) {
284
+ char **partition_racks = mdpi->racks;
285
+ size_t cnt = mdpi->racks_cnt;
286
+
287
+ void *res =
288
+ bsearch(&rack, partition_racks, cnt, sizeof(char *), rd_strcmp3);
289
+
290
+ if (res)
291
+ return *(char **)res;
292
+ return NULL;
293
+ }
294
+
295
+
296
+ /* Computes whether there is a rack mismatch between the rack of the consumer
297
+ * and the topic partition/any of its replicas. */
298
+ static rd_bool_t
299
+ rd_kafka_racks_mismatch(rd_kafka_rack_info_t *rkri,
300
+ const char *consumer,
301
+ const rd_kafka_topic_partition_t *topic_partition) {
302
+ const char *consumer_rack;
303
+ rd_kafka_metadata_partition_internal_t *mdpi;
304
+
305
+ if (rkri == NULL) /* Not using rack aware assignment */
306
+ return rd_false;
307
+
308
+ consumer_rack = RD_MAP_GET(&rkri->member_id_to_rack_id, consumer);
309
+
310
+ mdpi = RD_MAP_GET(&rkri->toppar_to_mdpi, topic_partition);
311
+
312
+ return consumer_rack != NULL &&
313
+ (mdpi == NULL ||
314
+ !rd_kafka_partition_internal_find_rack(mdpi, consumer_rack));
315
+ }
316
+
317
+ /**
318
+ * @struct Provides current state of partition movements between consumers
319
+ * for each topic, and possible movements for each partition.
320
+ */
321
+ typedef struct PartitionMovements_s {
322
+ map_toppar_cpair_t partitionMovements;
323
+ map_str_map_cpair_toppar_list_t partitionMovementsByTopic;
324
+ } PartitionMovements_t;
325
+
326
+
327
+ static void PartitionMovements_init(PartitionMovements_t *pmov,
328
+ size_t topic_cnt) {
329
+ RD_MAP_INIT(&pmov->partitionMovements, topic_cnt * 3,
330
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
331
+ NULL, ConsumerPair_free);
332
+
333
+ RD_MAP_INIT(&pmov->partitionMovementsByTopic, topic_cnt, rd_map_str_cmp,
334
+ rd_map_str_hash, NULL, map_cpair_toppar_list_t_free);
335
+ }
336
+
337
+ static void PartitionMovements_destroy(PartitionMovements_t *pmov) {
338
+ RD_MAP_DESTROY(&pmov->partitionMovementsByTopic);
339
+ RD_MAP_DESTROY(&pmov->partitionMovements);
340
+ }
341
+
342
+
343
+ static ConsumerPair_t *PartitionMovements_removeMovementRecordOfPartition(
344
+ PartitionMovements_t *pmov,
345
+ const rd_kafka_topic_partition_t *toppar) {
346
+
347
+ ConsumerPair_t *cpair;
348
+ map_cpair_toppar_list_t *partitionMovementsForThisTopic;
349
+ rd_kafka_topic_partition_list_t *plist;
350
+
351
+ cpair = RD_MAP_GET(&pmov->partitionMovements, toppar);
352
+ rd_assert(cpair);
353
+
354
+ partitionMovementsForThisTopic =
355
+ RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic);
356
+
357
+ plist = RD_MAP_GET(partitionMovementsForThisTopic, cpair);
358
+ rd_assert(plist);
359
+
360
+ rd_kafka_topic_partition_list_del(plist, toppar->topic,
361
+ toppar->partition);
362
+ if (plist->cnt == 0)
363
+ RD_MAP_DELETE(partitionMovementsForThisTopic, cpair);
364
+ if (RD_MAP_IS_EMPTY(partitionMovementsForThisTopic))
365
+ RD_MAP_DELETE(&pmov->partitionMovementsByTopic, toppar->topic);
366
+
367
+ return cpair;
368
+ }
369
+
370
+ static void PartitionMovements_addPartitionMovementRecord(
371
+ PartitionMovements_t *pmov,
372
+ const rd_kafka_topic_partition_t *toppar,
373
+ ConsumerPair_t *cpair) {
374
+ map_cpair_toppar_list_t *partitionMovementsForThisTopic;
375
+ rd_kafka_topic_partition_list_t *plist;
376
+
377
+ RD_MAP_SET(&pmov->partitionMovements, toppar, cpair);
378
+
379
+ partitionMovementsForThisTopic =
380
+ RD_MAP_GET_OR_SET(&pmov->partitionMovementsByTopic, toppar->topic,
381
+ map_cpair_toppar_list_t_new());
382
+
383
+ plist = RD_MAP_GET_OR_SET(partitionMovementsForThisTopic, cpair,
384
+ rd_kafka_topic_partition_list_new(16));
385
+
386
+ rd_kafka_topic_partition_list_add(plist, toppar->topic,
387
+ toppar->partition);
388
+ }
389
+
390
+ static void
391
+ PartitionMovements_movePartition(PartitionMovements_t *pmov,
392
+ const rd_kafka_topic_partition_t *toppar,
393
+ const char *old_consumer,
394
+ const char *new_consumer) {
395
+
396
+ if (RD_MAP_GET(&pmov->partitionMovements, toppar)) {
397
+ /* This partition has previously moved */
398
+ ConsumerPair_t *existing_cpair;
399
+
400
+ existing_cpair =
401
+ PartitionMovements_removeMovementRecordOfPartition(pmov,
402
+ toppar);
403
+
404
+ rd_assert(!rd_strcmp(existing_cpair->dst, old_consumer));
405
+
406
+ if (rd_strcmp(existing_cpair->src, new_consumer)) {
407
+ /* Partition is not moving back to its
408
+ * previous consumer */
409
+ PartitionMovements_addPartitionMovementRecord(
410
+ pmov, toppar,
411
+ ConsumerPair_new(existing_cpair->src,
412
+ new_consumer));
413
+ }
414
+ } else {
415
+ PartitionMovements_addPartitionMovementRecord(
416
+ pmov, toppar, ConsumerPair_new(old_consumer, new_consumer));
417
+ }
418
+ }
419
+
420
+ static const rd_kafka_topic_partition_t *
421
+ PartitionMovements_getTheActualPartitionToBeMoved(
422
+ PartitionMovements_t *pmov,
423
+ const rd_kafka_topic_partition_t *toppar,
424
+ const char *oldConsumer,
425
+ const char *newConsumer) {
426
+
427
+ ConsumerPair_t *cpair;
428
+ ConsumerPair_t reverse_cpair = {.src = newConsumer, .dst = oldConsumer};
429
+ map_cpair_toppar_list_t *partitionMovementsForThisTopic;
430
+ rd_kafka_topic_partition_list_t *plist;
431
+
432
+ if (!RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic))
433
+ return toppar;
434
+
435
+ cpair = RD_MAP_GET(&pmov->partitionMovements, toppar);
436
+ if (cpair) {
437
+ /* This partition has previously moved */
438
+ rd_assert(!rd_strcmp(oldConsumer, cpair->dst));
439
+
440
+ oldConsumer = cpair->src;
441
+ }
442
+
443
+ partitionMovementsForThisTopic =
444
+ RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic);
445
+
446
+ plist = RD_MAP_GET(partitionMovementsForThisTopic, &reverse_cpair);
447
+ if (!plist)
448
+ return toppar;
449
+
450
+ return &plist->elems[0];
451
+ }
452
+
453
+ #if FIXME
454
+
455
+ static rd_bool_t hasCycles(map_cpair_toppar_list_t *pairs) {
456
+ return rd_true; // FIXME
457
+ }
458
+
459
+ /**
460
+ * @remark This method is only used by the AbstractStickyAssignorTest
461
+ * in the Java client.
462
+ */
463
+ static rd_bool_t PartitionMovements_isSticky(rd_kafka_t *rk,
464
+ PartitionMovements_t *pmov) {
465
+ const char *topic;
466
+ map_cpair_toppar_list_t *topicMovementPairs;
467
+
468
+ RD_MAP_FOREACH(topic, topicMovementPairs,
469
+ &pmov->partitionMovementsByTopic) {
470
+ if (hasCycles(topicMovementPairs)) {
471
+ const ConsumerPair_t *cpair;
472
+ const rd_kafka_topic_partition_list_t *partitions;
473
+
474
+ rd_kafka_log(
475
+ rk, LOG_ERR, "STICKY",
476
+ "Sticky assignor: Stickiness is violated for "
477
+ "topic %s: partition movements for this topic "
478
+ "occurred among the following consumers: ",
479
+ topic);
480
+ RD_MAP_FOREACH(cpair, partitions, topicMovementPairs) {
481
+ rd_kafka_log(rk, LOG_ERR, "STICKY", " %s -> %s",
482
+ cpair->src, cpair->dst);
483
+ }
484
+
485
+ if (partitions)
486
+ ; /* Avoid unused warning */
487
+
488
+ return rd_false;
489
+ }
490
+ }
491
+
492
+ return rd_true;
493
+ }
494
+ #endif
495
+
496
+
497
+ /**
498
+ * @brief Comparator to sort ascendingly by rd_map_elem_t object value as
499
+ * topic partition list count, or by member id if the list count is
500
+ * identical.
501
+ * Used to sort sortedCurrentSubscriptions list.
502
+ *
503
+ * elem.key is the consumer member id string,
504
+ * elem.value is the partition list.
505
+ */
506
+ static int sort_by_map_elem_val_toppar_list_cnt(const void *_a,
507
+ const void *_b) {
508
+ const rd_map_elem_t *a = _a, *b = _b;
509
+ const rd_kafka_topic_partition_list_t *al = a->value, *bl = b->value;
510
+ int r = al->cnt - bl->cnt;
511
+ if (r)
512
+ return r;
513
+ return strcmp((const char *)a->key, (const char *)b->key);
514
+ }
515
+
516
+
517
+ /**
518
+ * @brief Assign partition to the most eligible consumer.
519
+ *
520
+ * The assignment should improve the overall balance of the partition
521
+ * assignments to consumers.
522
+ * @returns true if partition was assigned, false otherwise.
523
+ */
524
+ static rd_bool_t
525
+ maybeAssignPartition(const rd_kafka_topic_partition_t *partition,
526
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
527
+ map_str_toppar_list_t *currentAssignment,
528
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
529
+ map_toppar_str_t *currentPartitionConsumer,
530
+ rd_kafka_rack_info_t *rkri) {
531
+ const rd_map_elem_t *elem;
532
+ int i;
533
+
534
+ RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
535
+ const char *consumer = (const char *)elem->key;
536
+ const rd_kafka_topic_partition_list_t *partitions;
537
+
538
+ partitions =
539
+ RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
540
+ if (!rd_kafka_topic_partition_list_find(
541
+ partitions, partition->topic, partition->partition))
542
+ continue;
543
+ if (rkri != NULL &&
544
+ rd_kafka_racks_mismatch(rkri, consumer, partition))
545
+ continue;
546
+
547
+ rd_kafka_topic_partition_list_add(
548
+ RD_MAP_GET(currentAssignment, consumer), partition->topic,
549
+ partition->partition);
550
+
551
+ RD_MAP_SET(currentPartitionConsumer,
552
+ rd_kafka_topic_partition_copy(partition), consumer);
553
+
554
+ /* Re-sort sortedCurrentSubscriptions since this consumer's
555
+ * assignment count has increased.
556
+ * This is an O(N) operation since it is a single shuffle. */
557
+ rd_list_sort(sortedCurrentSubscriptions,
558
+ sort_by_map_elem_val_toppar_list_cnt);
559
+ return rd_true;
560
+ }
561
+ return rd_false;
562
+ }
563
+
564
+ /**
565
+ * @returns true if the partition has two or more potential consumers.
566
+ */
567
+ static RD_INLINE rd_bool_t partitionCanParticipateInReassignment(
568
+ const rd_kafka_topic_partition_t *partition,
569
+ map_toppar_list_t *partition2AllPotentialConsumers) {
570
+ rd_list_t *consumers;
571
+
572
+ if (!(consumers =
573
+ RD_MAP_GET(partition2AllPotentialConsumers, partition)))
574
+ return rd_false;
575
+
576
+ return rd_list_cnt(consumers) >= 2;
577
+ }
578
+
579
+
580
+ /**
581
+ * @returns true if consumer can participate in reassignment based on
582
+ * its current assignment.
583
+ */
584
+ static RD_INLINE rd_bool_t consumerCanParticipateInReassignment(
585
+ rd_kafka_t *rk,
586
+ const char *consumer,
587
+ map_str_toppar_list_t *currentAssignment,
588
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
589
+ map_toppar_list_t *partition2AllPotentialConsumers) {
590
+ const rd_kafka_topic_partition_list_t *currentPartitions =
591
+ RD_MAP_GET(currentAssignment, consumer);
592
+ int currentAssignmentSize = currentPartitions->cnt;
593
+ int maxAssignmentSize =
594
+ RD_MAP_GET(consumer2AllPotentialPartitions, consumer)->cnt;
595
+ int i;
596
+
597
+ /* FIXME: And then what? Is this a local error? If so, assert. */
598
+ if (currentAssignmentSize > maxAssignmentSize)
599
+ rd_kafka_log(rk, LOG_ERR, "STICKY",
600
+ "Sticky assignor error: "
601
+ "Consumer %s is assigned more partitions (%d) "
602
+ "than the maximum possible (%d)",
603
+ consumer, currentAssignmentSize,
604
+ maxAssignmentSize);
605
+
606
+ /* If a consumer is not assigned all its potential partitions it is
607
+ * subject to reassignment. */
608
+ if (currentAssignmentSize < maxAssignmentSize)
609
+ return rd_true;
610
+
611
+ /* If any of the partitions assigned to a consumer is subject to
612
+ * reassignment the consumer itself is subject to reassignment. */
613
+ for (i = 0; i < currentPartitions->cnt; i++) {
614
+ const rd_kafka_topic_partition_t *partition =
615
+ &currentPartitions->elems[i];
616
+
617
+ if (partitionCanParticipateInReassignment(
618
+ partition, partition2AllPotentialConsumers))
619
+ return rd_true;
620
+ }
621
+
622
+ return rd_false;
623
+ }
624
+
625
+
626
+ /**
627
+ * @brief Process moving partition from old consumer to new consumer.
628
+ */
629
+ static void processPartitionMovement(
630
+ rd_kafka_t *rk,
631
+ PartitionMovements_t *partitionMovements,
632
+ const rd_kafka_topic_partition_t *partition,
633
+ const char *newConsumer,
634
+ map_str_toppar_list_t *currentAssignment,
635
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
636
+ map_toppar_str_t *currentPartitionConsumer) {
637
+
638
+ const char *oldConsumer =
639
+ RD_MAP_GET(currentPartitionConsumer, partition);
640
+
641
+ PartitionMovements_movePartition(partitionMovements, partition,
642
+ oldConsumer, newConsumer);
643
+
644
+ rd_kafka_topic_partition_list_add(
645
+ RD_MAP_GET(currentAssignment, newConsumer), partition->topic,
646
+ partition->partition);
647
+
648
+ rd_kafka_topic_partition_list_del(
649
+ RD_MAP_GET(currentAssignment, oldConsumer), partition->topic,
650
+ partition->partition);
651
+
652
+ RD_MAP_SET(currentPartitionConsumer,
653
+ rd_kafka_topic_partition_copy(partition), newConsumer);
654
+
655
+ /* Re-sort after assignment count has changed. */
656
+ rd_list_sort(sortedCurrentSubscriptions,
657
+ sort_by_map_elem_val_toppar_list_cnt);
658
+
659
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
660
+ "%s [%" PRId32 "] %sassigned to %s (from %s)",
661
+ partition->topic, partition->partition,
662
+ oldConsumer ? "re" : "", newConsumer,
663
+ oldConsumer ? oldConsumer : "(none)");
664
+ }
665
+
666
+
667
+ /**
668
+ * @brief Reassign \p partition to \p newConsumer
669
+ */
670
+ static void reassignPartitionToConsumer(
671
+ rd_kafka_t *rk,
672
+ PartitionMovements_t *partitionMovements,
673
+ const rd_kafka_topic_partition_t *partition,
674
+ map_str_toppar_list_t *currentAssignment,
675
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
676
+ map_toppar_str_t *currentPartitionConsumer,
677
+ const char *newConsumer) {
678
+
679
+ const char *consumer = RD_MAP_GET(currentPartitionConsumer, partition);
680
+ const rd_kafka_topic_partition_t *partitionToBeMoved;
681
+
682
+ /* Find the correct partition movement considering
683
+ * the stickiness requirement. */
684
+ partitionToBeMoved = PartitionMovements_getTheActualPartitionToBeMoved(
685
+ partitionMovements, partition, consumer, newConsumer);
686
+
687
+ processPartitionMovement(rk, partitionMovements, partitionToBeMoved,
688
+ newConsumer, currentAssignment,
689
+ sortedCurrentSubscriptions,
690
+ currentPartitionConsumer);
691
+ }
692
+
693
+ /**
694
+ * @brief Reassign \p partition to an eligible new consumer.
695
+ */
696
+ static void
697
+ reassignPartition(rd_kafka_t *rk,
698
+ PartitionMovements_t *partitionMovements,
699
+ const rd_kafka_topic_partition_t *partition,
700
+ map_str_toppar_list_t *currentAssignment,
701
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
702
+ map_toppar_str_t *currentPartitionConsumer,
703
+ map_str_toppar_list_t *consumer2AllPotentialPartitions) {
704
+
705
+ const rd_map_elem_t *elem;
706
+ int i;
707
+
708
+ /* Find the new consumer */
709
+ RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
710
+ const char *newConsumer = (const char *)elem->key;
711
+
712
+ if (rd_kafka_topic_partition_list_find(
713
+ RD_MAP_GET(consumer2AllPotentialPartitions,
714
+ newConsumer),
715
+ partition->topic, partition->partition)) {
716
+ reassignPartitionToConsumer(
717
+ rk, partitionMovements, partition,
718
+ currentAssignment, sortedCurrentSubscriptions,
719
+ currentPartitionConsumer, newConsumer);
720
+
721
+ return;
722
+ }
723
+ }
724
+
725
+ rd_assert(!*"reassignPartition(): no new consumer found");
726
+ }
727
+
728
+
729
+
730
+ /**
731
+ * @brief Determine if the current assignment is balanced.
732
+ *
733
+ * @param currentAssignment the assignment whose balance needs to be checked
734
+ * @param sortedCurrentSubscriptions an ascending sorted set of consumers based
735
+ * on how many topic partitions are already
736
+ * assigned to them
737
+ * @param consumer2AllPotentialPartitions a mapping of all consumers to all
738
+ * potential topic partitions that can be
739
+ * assigned to them.
740
+ * This parameter is called
741
+ * allSubscriptions in the Java
742
+ * implementation, but we choose this
743
+ * name to be more consistent with its
744
+ * use elsewhere in the code.
745
+ * @param partition2AllPotentialConsumers a mapping of all partitions to
746
+ * all potential consumers.
747
+ *
748
+ * @returns true if the given assignment is balanced; false otherwise
749
+ */
750
+ static rd_bool_t
751
+ isBalanced(rd_kafka_t *rk,
752
+ map_str_toppar_list_t *currentAssignment,
753
+ const rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
754
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
755
+ map_toppar_list_t *partition2AllPotentialConsumers) {
756
+
757
+ int minimum = ((const rd_kafka_topic_partition_list_t
758
+ *)((const rd_map_elem_t *)rd_list_first(
759
+ sortedCurrentSubscriptions))
760
+ ->value)
761
+ ->cnt;
762
+ int maximum = ((const rd_kafka_topic_partition_list_t
763
+ *)((const rd_map_elem_t *)rd_list_last(
764
+ sortedCurrentSubscriptions))
765
+ ->value)
766
+ ->cnt;
767
+
768
+ /* Iterators */
769
+ const rd_kafka_topic_partition_list_t *partitions;
770
+ const char *consumer;
771
+ const rd_map_elem_t *elem;
772
+ int i;
773
+
774
+ /* The assignment is balanced if minimum and maximum numbers of
775
+ * partitions assigned to consumers differ by at most one. */
776
+ if (minimum >= maximum - 1) {
777
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
778
+ "Assignment is balanced: "
779
+ "minimum %d and maximum %d partitions assigned "
780
+ "to each consumer",
781
+ minimum, maximum);
782
+ return rd_true;
783
+ }
784
+
785
+ /* Mapping from partitions to the consumer assigned to them */
786
+ map_toppar_str_t allPartitions = RD_MAP_INITIALIZER(
787
+ RD_MAP_CNT(partition2AllPotentialConsumers),
788
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
789
+ NULL /* references currentAssignment */,
790
+ NULL /* references currentAssignment */);
791
+
792
+ /* Create a mapping from partitions to the consumer assigned to them */
793
+ RD_MAP_FOREACH(consumer, partitions, currentAssignment) {
794
+
795
+ for (i = 0; i < partitions->cnt; i++) {
796
+ const rd_kafka_topic_partition_t *partition =
797
+ &partitions->elems[i];
798
+ const char *existing;
799
+ if ((existing = RD_MAP_GET(&allPartitions, partition)))
800
+ rd_kafka_log(rk, LOG_ERR, "STICKY",
801
+ "Sticky assignor: %s [%" PRId32
802
+ "] "
803
+ "is assigned to more than one "
804
+ "consumer (%s and %s)",
805
+ partition->topic,
806
+ partition->partition, existing,
807
+ consumer);
808
+
809
+ RD_MAP_SET(&allPartitions, partition, consumer);
810
+ }
811
+ }
812
+
813
+
814
+ /* For each consumer that does not have all the topic partitions it
815
+ * can get make sure none of the topic partitions it could but did
816
+ * not get cannot be moved to it, because that would break the balance.
817
+ *
818
+ * Note: Since sortedCurrentSubscriptions elements are pointers to
819
+ * currentAssignment's element we get both the consumer
820
+ * and partition list in elem here. */
821
+ RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
822
+ int j;
823
+ const char *consumer = (const char *)elem->key;
824
+ const rd_kafka_topic_partition_list_t *potentialTopicPartitions;
825
+ const rd_kafka_topic_partition_list_t *consumerPartitions;
826
+
827
+ consumerPartitions =
828
+ (const rd_kafka_topic_partition_list_t *)elem->value;
829
+
830
+ potentialTopicPartitions =
831
+ RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
832
+
833
+ /* Skip if this consumer already has all the topic partitions
834
+ * it can get. */
835
+ if (consumerPartitions->cnt == potentialTopicPartitions->cnt)
836
+ continue;
837
+
838
+ /* Otherwise make sure it can't get any more partitions */
839
+
840
+ for (j = 0; j < potentialTopicPartitions->cnt; j++) {
841
+ const rd_kafka_topic_partition_t *partition =
842
+ &potentialTopicPartitions->elems[j];
843
+ const char *otherConsumer;
844
+ int otherConsumerPartitionCount;
845
+
846
+ if (rd_kafka_topic_partition_list_find(
847
+ consumerPartitions, partition->topic,
848
+ partition->partition))
849
+ continue;
850
+
851
+ otherConsumer = RD_MAP_GET(&allPartitions, partition);
852
+ otherConsumerPartitionCount =
853
+ RD_MAP_GET(currentAssignment, otherConsumer)->cnt;
854
+
855
+ if (consumerPartitions->cnt <
856
+ otherConsumerPartitionCount) {
857
+ rd_kafka_dbg(
858
+ rk, ASSIGNOR, "STICKY",
859
+ "%s [%" PRId32
860
+ "] can be moved from "
861
+ "consumer %s (%d partition(s)) to "
862
+ "consumer %s (%d partition(s)) "
863
+ "for a more balanced assignment",
864
+ partition->topic, partition->partition,
865
+ otherConsumer, otherConsumerPartitionCount,
866
+ consumer, consumerPartitions->cnt);
867
+ RD_MAP_DESTROY(&allPartitions);
868
+ return rd_false;
869
+ }
870
+ }
871
+ }
872
+
873
+ RD_MAP_DESTROY(&allPartitions);
874
+ return rd_true;
875
+ }
876
+
877
+
878
+ /**
879
+ * @brief Perform reassignment.
880
+ *
881
+ * @returns true if reassignment was performed.
882
+ */
883
+ static rd_bool_t
884
+ performReassignments(rd_kafka_t *rk,
885
+ PartitionMovements_t *partitionMovements,
886
+ rd_kafka_topic_partition_list_t *reassignablePartitions,
887
+ map_str_toppar_list_t *currentAssignment,
888
+ map_toppar_cgpair_t *prevAssignment,
889
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
890
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
891
+ map_toppar_list_t *partition2AllPotentialConsumers,
892
+ map_toppar_str_t *currentPartitionConsumer,
893
+ rd_kafka_rack_info_t *rkri) {
894
+ rd_bool_t reassignmentPerformed = rd_false;
895
+ rd_bool_t modified, saveIsBalanced = rd_false;
896
+ int iterations = 0;
897
+
898
+ /* Repeat reassignment until no partition can be moved to
899
+ * improve the balance. */
900
+ do {
901
+ int i;
902
+
903
+ iterations++;
904
+
905
+ modified = rd_false;
906
+
907
+ /* Reassign all reassignable partitions (starting from the
908
+ * partition with least potential consumers and if needed)
909
+ * until the full list is processed or a balance is achieved. */
910
+
911
+ for (i = 0; i < reassignablePartitions->cnt &&
912
+ !isBalanced(rk, currentAssignment,
913
+ sortedCurrentSubscriptions,
914
+ consumer2AllPotentialPartitions,
915
+ partition2AllPotentialConsumers);
916
+ i++) {
917
+ const rd_kafka_topic_partition_t *partition =
918
+ &reassignablePartitions->elems[i];
919
+ const rd_list_t *consumers = RD_MAP_GET(
920
+ partition2AllPotentialConsumers, partition);
921
+ const char *consumer, *otherConsumer;
922
+ const ConsumerGenerationPair_t *prevcgp;
923
+ const rd_kafka_topic_partition_list_t *currAssignment;
924
+ int j;
925
+ rd_bool_t found_rack;
926
+ const char *consumer_rack = NULL;
927
+ rd_kafka_metadata_partition_internal_t *mdpi = NULL;
928
+
929
+ /* FIXME: Is this a local error/bug? If so, assert */
930
+ if (rd_list_cnt(consumers) <= 1)
931
+ rd_kafka_log(
932
+ rk, LOG_ERR, "STICKY",
933
+ "Sticky assignor: expected more than "
934
+ "one potential consumer for partition "
935
+ "%s [%" PRId32 "]",
936
+ partition->topic, partition->partition);
937
+
938
+ /* The partition must have a current consumer */
939
+ consumer =
940
+ RD_MAP_GET(currentPartitionConsumer, partition);
941
+ rd_assert(consumer);
942
+
943
+ currAssignment =
944
+ RD_MAP_GET(currentAssignment, consumer);
945
+ prevcgp = RD_MAP_GET(prevAssignment, partition);
946
+
947
+ if (prevcgp &&
948
+ currAssignment->cnt >
949
+ RD_MAP_GET(currentAssignment, prevcgp->consumer)
950
+ ->cnt +
951
+ 1) {
952
+ reassignPartitionToConsumer(
953
+ rk, partitionMovements, partition,
954
+ currentAssignment,
955
+ sortedCurrentSubscriptions,
956
+ currentPartitionConsumer,
957
+ prevcgp->consumer);
958
+ reassignmentPerformed = rd_true;
959
+ modified = rd_true;
960
+ continue;
961
+ }
962
+
963
+ /* Check if a better-suited consumer exists for the
964
+ * partition; if so, reassign it. Use consumer within
965
+ * rack if possible. */
966
+ if (rkri) {
967
+ consumer_rack = RD_MAP_GET(
968
+ &rkri->member_id_to_rack_id, consumer);
969
+ mdpi = RD_MAP_GET(&rkri->toppar_to_mdpi,
970
+ partition);
971
+ }
972
+ found_rack = rd_false;
973
+
974
+ if (consumer_rack != NULL && mdpi != NULL &&
975
+ mdpi->racks_cnt > 0 &&
976
+ rd_kafka_partition_internal_find_rack(
977
+ mdpi, consumer_rack)) {
978
+ RD_LIST_FOREACH(otherConsumer, consumers, j) {
979
+ /* No need for rkri == NULL check, that
980
+ * is guaranteed if we're inside this if
981
+ * block. */
982
+ const char *other_consumer_rack =
983
+ RD_MAP_GET(
984
+ &rkri->member_id_to_rack_id,
985
+ otherConsumer);
986
+
987
+ if (other_consumer_rack == NULL ||
988
+ !rd_kafka_partition_internal_find_rack(
989
+ mdpi, other_consumer_rack))
990
+ continue;
991
+
992
+ if (currAssignment->cnt <=
993
+ RD_MAP_GET(currentAssignment,
994
+ otherConsumer)
995
+ ->cnt +
996
+ 1)
997
+ continue;
998
+
999
+ reassignPartition(
1000
+ rk, partitionMovements, partition,
1001
+ currentAssignment,
1002
+ sortedCurrentSubscriptions,
1003
+ currentPartitionConsumer,
1004
+ consumer2AllPotentialPartitions);
1005
+
1006
+ reassignmentPerformed = rd_true;
1007
+ modified = rd_true;
1008
+ found_rack = rd_true;
1009
+ break;
1010
+ }
1011
+ }
1012
+
1013
+ if (found_rack) {
1014
+ continue;
1015
+ }
1016
+
1017
+ RD_LIST_FOREACH(otherConsumer, consumers, j) {
1018
+ if (consumer == otherConsumer)
1019
+ continue;
1020
+
1021
+ if (currAssignment->cnt <=
1022
+ RD_MAP_GET(currentAssignment, otherConsumer)
1023
+ ->cnt +
1024
+ 1)
1025
+ continue;
1026
+
1027
+ reassignPartition(
1028
+ rk, partitionMovements, partition,
1029
+ currentAssignment,
1030
+ sortedCurrentSubscriptions,
1031
+ currentPartitionConsumer,
1032
+ consumer2AllPotentialPartitions);
1033
+
1034
+ reassignmentPerformed = rd_true;
1035
+ modified = rd_true;
1036
+ break;
1037
+ }
1038
+ }
1039
+
1040
+ if (i < reassignablePartitions->cnt)
1041
+ saveIsBalanced = rd_true;
1042
+
1043
+ } while (modified);
1044
+
1045
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
1046
+ "Reassignment %sperformed after %d iteration(s) of %d "
1047
+ "reassignable partition(s)%s",
1048
+ reassignmentPerformed ? "" : "not ", iterations,
1049
+ reassignablePartitions->cnt,
1050
+ saveIsBalanced ? ": assignment is balanced" : "");
1051
+
1052
+ return reassignmentPerformed;
1053
+ }
1054
+
1055
+
1056
+ /**
1057
+ * @returns the balance score of the given assignment, as the sum of assigned
1058
+ * partitions size difference of all consumer pairs.
1059
+ *
1060
+ * A perfectly balanced assignment (with all consumers getting the same number
1061
+ * of partitions) has a balance score of 0.
1062
+ *
1063
+ * Lower balance score indicates a more balanced assignment.
1064
+ * FIXME: should be called imbalance score then?
1065
+ */
1066
+ static int getBalanceScore(map_str_toppar_list_t *assignment) {
1067
+ const char *consumer;
1068
+ const rd_kafka_topic_partition_list_t *partitions;
1069
+ int *sizes;
1070
+ int cnt = 0;
1071
+ int score = 0;
1072
+ int i, next;
1073
+
1074
+ /* If there is just a single consumer the assignment will be balanced */
1075
+ if (RD_MAP_CNT(assignment) < 2)
1076
+ return 0;
1077
+
1078
+ sizes = rd_malloc(sizeof(*sizes) * RD_MAP_CNT(assignment));
1079
+
1080
+ RD_MAP_FOREACH(consumer, partitions, assignment)
1081
+ sizes[cnt++] = partitions->cnt;
1082
+
1083
+ for (next = 0; next < cnt; next++)
1084
+ for (i = next + 1; i < cnt; i++)
1085
+ score += abs(sizes[next] - sizes[i]);
1086
+
1087
+ rd_free(sizes);
1088
+
1089
+ if (consumer)
1090
+ ; /* Avoid unused warning */
1091
+
1092
+ return score;
1093
+ }
1094
+
1095
+ static void maybeAssign(rd_kafka_topic_partition_list_t *unassignedPartitions,
1096
+ map_toppar_list_t *partition2AllPotentialConsumers,
1097
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
1098
+ map_str_toppar_list_t *currentAssignment,
1099
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
1100
+ map_toppar_str_t *currentPartitionConsumer,
1101
+ rd_bool_t removeAssigned,
1102
+ rd_kafka_rack_info_t *rkri) {
1103
+ int i;
1104
+ const rd_kafka_topic_partition_t *partition;
1105
+
1106
+ for (i = 0; i < unassignedPartitions->cnt; i++) {
1107
+ partition = &unassignedPartitions->elems[i];
1108
+ rd_bool_t assigned;
1109
+
1110
+ /* Skip if there is no potential consumer for the partition.
1111
+ * FIXME: How could this be? */
1112
+ if (rd_list_empty(RD_MAP_GET(partition2AllPotentialConsumers,
1113
+ partition))) {
1114
+ rd_dassert(!*"sticky assignor bug");
1115
+ continue;
1116
+ }
1117
+
1118
+ assigned = maybeAssignPartition(
1119
+ partition, sortedCurrentSubscriptions, currentAssignment,
1120
+ consumer2AllPotentialPartitions, currentPartitionConsumer,
1121
+ rkri);
1122
+ if (assigned && removeAssigned) {
1123
+ rd_kafka_topic_partition_list_del_by_idx(
1124
+ unassignedPartitions, i);
1125
+ i--; /* Since the current element was
1126
+ * removed we need the next for
1127
+ * loop iteration to stay at the
1128
+ * same index. */
1129
+ }
1130
+ }
1131
+ }
1132
+
1133
+ /**
1134
+ * @brief Balance the current assignment using the data structures
1135
+ * created in assign_cb(). */
1136
+ static void balance(rd_kafka_t *rk,
1137
+ PartitionMovements_t *partitionMovements,
1138
+ map_str_toppar_list_t *currentAssignment,
1139
+ map_toppar_cgpair_t *prevAssignment,
1140
+ rd_kafka_topic_partition_list_t *sortedPartitions,
1141
+ rd_kafka_topic_partition_list_t *unassignedPartitions,
1142
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
1143
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
1144
+ map_toppar_list_t *partition2AllPotentialConsumers,
1145
+ map_toppar_str_t *currentPartitionConsumer,
1146
+ rd_bool_t revocationRequired,
1147
+ rd_kafka_rack_info_t *rkri) {
1148
+
1149
+ /* If the consumer with most assignments (thus the last element
1150
+ * in the ascendingly ordered sortedCurrentSubscriptions list) has
1151
+ * zero partitions assigned it means there is no current assignment
1152
+ * for any consumer and the group is thus initializing for the first
1153
+ * time. */
1154
+ rd_bool_t initializing = ((const rd_kafka_topic_partition_list_t
1155
+ *)((const rd_map_elem_t *)rd_list_last(
1156
+ sortedCurrentSubscriptions))
1157
+ ->value)
1158
+ ->cnt == 0;
1159
+ rd_bool_t reassignmentPerformed = rd_false;
1160
+
1161
+ map_str_toppar_list_t fixedAssignments =
1162
+ RD_MAP_INITIALIZER(RD_MAP_CNT(partition2AllPotentialConsumers),
1163
+ rd_map_str_cmp,
1164
+ rd_map_str_hash,
1165
+ NULL,
1166
+ NULL /* Will transfer ownership of the list
1167
+ * to currentAssignment at the end of
1168
+ * this function. */);
1169
+
1170
+ map_str_toppar_list_t preBalanceAssignment = RD_MAP_INITIALIZER(
1171
+ RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash,
1172
+ NULL /* references currentAssignment */,
1173
+ rd_kafka_topic_partition_list_destroy_free);
1174
+ map_toppar_str_t preBalancePartitionConsumers = RD_MAP_INITIALIZER(
1175
+ RD_MAP_CNT(partition2AllPotentialConsumers),
1176
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
1177
+ rd_kafka_topic_partition_destroy_free,
1178
+ NULL /* refs currentPartitionConsumer */);
1179
+ int newScore, oldScore;
1180
+ /* Iterator variables */
1181
+ const rd_kafka_topic_partition_t *partition;
1182
+ const void *ignore;
1183
+ const rd_map_elem_t *elem;
1184
+ int i;
1185
+ rd_kafka_topic_partition_list_t *leftoverUnassignedPartitions;
1186
+ rd_bool_t leftoverUnassignedPartitions_allocated = rd_false;
1187
+
1188
+ leftoverUnassignedPartitions =
1189
+ unassignedPartitions; /* copy on write. */
1190
+
1191
+ if (rkri != NULL && RD_MAP_CNT(&rkri->member_id_to_rack_id) != 0) {
1192
+ leftoverUnassignedPartitions_allocated = rd_true;
1193
+ /* Since maybeAssign is called twice, we keep track of those
1194
+ * partitions which the first call has taken care of already,
1195
+ * but we don't want to modify the original
1196
+ * unassignedPartitions. */
1197
+ leftoverUnassignedPartitions =
1198
+ rd_kafka_topic_partition_list_copy(unassignedPartitions);
1199
+ maybeAssign(leftoverUnassignedPartitions,
1200
+ partition2AllPotentialConsumers,
1201
+ sortedCurrentSubscriptions, currentAssignment,
1202
+ consumer2AllPotentialPartitions,
1203
+ currentPartitionConsumer, rd_true, rkri);
1204
+ }
1205
+ maybeAssign(leftoverUnassignedPartitions,
1206
+ partition2AllPotentialConsumers, sortedCurrentSubscriptions,
1207
+ currentAssignment, consumer2AllPotentialPartitions,
1208
+ currentPartitionConsumer, rd_false, NULL);
1209
+
1210
+ if (leftoverUnassignedPartitions_allocated)
1211
+ rd_kafka_topic_partition_list_destroy(
1212
+ leftoverUnassignedPartitions);
1213
+
1214
+
1215
+ /* Narrow down the reassignment scope to only those partitions that can
1216
+ * actually be reassigned. */
1217
+ RD_MAP_FOREACH(partition, ignore, partition2AllPotentialConsumers) {
1218
+ if (partitionCanParticipateInReassignment(
1219
+ partition, partition2AllPotentialConsumers))
1220
+ continue;
1221
+
1222
+ rd_kafka_topic_partition_list_del(
1223
+ sortedPartitions, partition->topic, partition->partition);
1224
+ rd_kafka_topic_partition_list_del(unassignedPartitions,
1225
+ partition->topic,
1226
+ partition->partition);
1227
+ }
1228
+
1229
+ if (ignore)
1230
+ ; /* Avoid unused warning */
1231
+
1232
+
1233
+ /* Narrow down the reassignment scope to only those consumers that are
1234
+ * subject to reassignment. */
1235
+ RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
1236
+ const char *consumer = (const char *)elem->key;
1237
+ rd_kafka_topic_partition_list_t *partitions;
1238
+
1239
+ if (consumerCanParticipateInReassignment(
1240
+ rk, consumer, currentAssignment,
1241
+ consumer2AllPotentialPartitions,
1242
+ partition2AllPotentialConsumers))
1243
+ continue;
1244
+
1245
+ rd_list_remove_elem(sortedCurrentSubscriptions, i);
1246
+ i--; /* Since the current element is removed we need
1247
+ * to rewind the iterator. */
1248
+
1249
+ partitions = rd_kafka_topic_partition_list_copy(
1250
+ RD_MAP_GET(currentAssignment, consumer));
1251
+ RD_MAP_DELETE(currentAssignment, consumer);
1252
+
1253
+ RD_MAP_SET(&fixedAssignments, consumer, partitions);
1254
+ }
1255
+
1256
+
1257
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
1258
+ "Prepared balanced reassignment for %d consumers, "
1259
+ "%d available partition(s) where of %d are unassigned "
1260
+ "(initializing=%s, revocationRequired=%s, "
1261
+ "%d fixed assignments)",
1262
+ (int)RD_MAP_CNT(consumer2AllPotentialPartitions),
1263
+ sortedPartitions->cnt, unassignedPartitions->cnt,
1264
+ initializing ? "true" : "false",
1265
+ revocationRequired ? "true" : "false",
1266
+ (int)RD_MAP_CNT(&fixedAssignments));
1267
+
1268
+ /* Create a deep copy of the current assignment so we can revert to it
1269
+ * if we do not get a more balanced assignment later. */
1270
+ RD_MAP_COPY(&preBalanceAssignment, currentAssignment,
1271
+ NULL /* just reference the key */,
1272
+ (rd_map_copy_t *)rd_kafka_topic_partition_list_copy);
1273
+ RD_MAP_COPY(&preBalancePartitionConsumers, currentPartitionConsumer,
1274
+ rd_kafka_topic_partition_copy_void,
1275
+ NULL /* references assign_cb(members) fields */);
1276
+
1277
+
1278
+ /* If we don't already need to revoke something due to subscription
1279
+ * changes, first try to balance by only moving newly added partitions.
1280
+ */
1281
+ if (!revocationRequired && unassignedPartitions->cnt > 0)
1282
+ performReassignments(rk, partitionMovements,
1283
+ unassignedPartitions, currentAssignment,
1284
+ prevAssignment, sortedCurrentSubscriptions,
1285
+ consumer2AllPotentialPartitions,
1286
+ partition2AllPotentialConsumers,
1287
+ currentPartitionConsumer, rkri);
1288
+
1289
+ reassignmentPerformed = performReassignments(
1290
+ rk, partitionMovements, sortedPartitions, currentAssignment,
1291
+ prevAssignment, sortedCurrentSubscriptions,
1292
+ consumer2AllPotentialPartitions, partition2AllPotentialConsumers,
1293
+ currentPartitionConsumer, rkri);
1294
+
1295
+ /* If we are not preserving existing assignments and we have made
1296
+ * changes to the current assignment make sure we are getting a more
1297
+ * balanced assignment; otherwise, revert to previous assignment. */
1298
+
1299
+ if (!initializing && reassignmentPerformed &&
1300
+ (newScore = getBalanceScore(currentAssignment)) >=
1301
+ (oldScore = getBalanceScore(&preBalanceAssignment))) {
1302
+
1303
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
1304
+ "Reassignment performed but keeping previous "
1305
+ "assignment since balance score did not improve: "
1306
+ "new score %d (%d consumers) vs "
1307
+ "old score %d (%d consumers): "
1308
+ "lower score is better",
1309
+ newScore, (int)RD_MAP_CNT(currentAssignment),
1310
+ oldScore, (int)RD_MAP_CNT(&preBalanceAssignment));
1311
+
1312
+ RD_MAP_COPY(
1313
+ currentAssignment, &preBalanceAssignment,
1314
+ NULL /* just reference the key */,
1315
+ (rd_map_copy_t *)rd_kafka_topic_partition_list_copy);
1316
+
1317
+ RD_MAP_CLEAR(currentPartitionConsumer);
1318
+ RD_MAP_COPY(currentPartitionConsumer,
1319
+ &preBalancePartitionConsumers,
1320
+ rd_kafka_topic_partition_copy_void,
1321
+ NULL /* references assign_cb(members) fields */);
1322
+ }
1323
+
1324
+ RD_MAP_DESTROY(&preBalancePartitionConsumers);
1325
+ RD_MAP_DESTROY(&preBalanceAssignment);
1326
+
1327
+ /* Add the fixed assignments (those that could not change) back. */
1328
+ if (!RD_MAP_IS_EMPTY(&fixedAssignments)) {
1329
+ const rd_map_elem_t *elem;
1330
+
1331
+ RD_MAP_FOREACH_ELEM(elem, &fixedAssignments.rmap) {
1332
+ const char *consumer = elem->key;
1333
+ rd_kafka_topic_partition_list_t *partitions =
1334
+ (rd_kafka_topic_partition_list_t *)elem->value;
1335
+
1336
+ RD_MAP_SET(currentAssignment, consumer, partitions);
1337
+
1338
+ rd_list_add(sortedCurrentSubscriptions, (void *)elem);
1339
+ }
1340
+
1341
+ /* Re-sort */
1342
+ rd_list_sort(sortedCurrentSubscriptions,
1343
+ sort_by_map_elem_val_toppar_list_cnt);
1344
+ }
1345
+
1346
+ RD_MAP_DESTROY(&fixedAssignments);
1347
+ }
1348
+
1349
+
1350
+
1351
+ /**
1352
+ * @brief Populate subscriptions, current and previous assignments based on the
1353
+ * \p members assignments.
1354
+ */
1355
+ static void prepopulateCurrentAssignments(
1356
+ rd_kafka_t *rk,
1357
+ rd_kafka_group_member_t *members,
1358
+ size_t member_cnt,
1359
+ map_str_toppar_list_t *subscriptions,
1360
+ map_str_toppar_list_t *currentAssignment,
1361
+ map_toppar_cgpair_t *prevAssignment,
1362
+ map_toppar_str_t *currentPartitionConsumer,
1363
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
1364
+ size_t estimated_partition_cnt) {
1365
+
1366
+ /* We need to process subscriptions' user data with each consumer's
1367
+ * reported generation in mind.
1368
+ * Higher generations overwrite lower generations in case of a conflict.
1369
+ * Conflicts will only exist if user data is for different generations.
1370
+ */
1371
+
1372
+ /* For each partition we create a sorted list (by generation) of
1373
+ * its consumers. */
1374
+ RD_MAP_LOCAL_INITIALIZER(
1375
+ sortedPartitionConsumersByGeneration, member_cnt * 10 /* FIXME */,
1376
+ const rd_kafka_topic_partition_t *,
1377
+ /* List of ConsumerGenerationPair_t */
1378
+ rd_list_t *, rd_kafka_topic_partition_cmp,
1379
+ rd_kafka_topic_partition_hash, NULL, rd_list_destroy_free);
1380
+ const rd_kafka_topic_partition_t *partition;
1381
+ rd_list_t *consumers;
1382
+ int i;
1383
+
1384
+ /* For each partition that is currently assigned to the group members
1385
+ * add the member and its generation to
1386
+ * sortedPartitionConsumersByGeneration (which is sorted afterwards)
1387
+ * indexed by the partition. */
1388
+ for (i = 0; i < (int)member_cnt; i++) {
1389
+ rd_kafka_group_member_t *consumer = &members[i];
1390
+ int j;
1391
+
1392
+ RD_MAP_SET(subscriptions, consumer->rkgm_member_id->str,
1393
+ consumer->rkgm_subscription);
1394
+
1395
+ RD_MAP_SET(currentAssignment, consumer->rkgm_member_id->str,
1396
+ rd_kafka_topic_partition_list_new(10));
1397
+
1398
+ RD_MAP_SET(consumer2AllPotentialPartitions,
1399
+ consumer->rkgm_member_id->str,
1400
+ rd_kafka_topic_partition_list_new(
1401
+ (int)estimated_partition_cnt));
1402
+
1403
+ if (!consumer->rkgm_owned)
1404
+ continue;
1405
+
1406
+ for (j = 0; j < (int)consumer->rkgm_owned->cnt; j++) {
1407
+ partition = &consumer->rkgm_owned->elems[j];
1408
+
1409
+ consumers = RD_MAP_GET_OR_SET(
1410
+ &sortedPartitionConsumersByGeneration, partition,
1411
+ rd_list_new(10, ConsumerGenerationPair_destroy));
1412
+
1413
+ rd_list_add(consumers,
1414
+ ConsumerGenerationPair_new(
1415
+ consumer->rkgm_member_id->str,
1416
+ consumer->rkgm_generation));
1417
+
1418
+ RD_MAP_SET(currentPartitionConsumer,
1419
+ rd_kafka_topic_partition_copy(partition),
1420
+ consumer->rkgm_member_id->str);
1421
+ }
1422
+ }
1423
+
1424
+ /* Populate currentAssignment and prevAssignment.
1425
+ * prevAssignment holds the prior ConsumerGenerationPair_t
1426
+ * (before current) of each partition. */
1427
+ RD_MAP_FOREACH(partition, consumers,
1428
+ &sortedPartitionConsumersByGeneration) {
1429
+ /* current and previous are the last two consumers
1430
+ * of each partition, and found is used to check for duplicate
1431
+ * consumers of same generation. */
1432
+ ConsumerGenerationPair_t *current, *previous, *found;
1433
+ rd_kafka_topic_partition_list_t *partitions;
1434
+
1435
+ /* Sort the per-partition consumers list by generation */
1436
+ rd_list_sort(consumers, ConsumerGenerationPair_cmp_generation);
1437
+
1438
+ /* In case a partition is claimed by multiple consumers with the
1439
+ * same generation, invalidate it for all such consumers, and
1440
+ * log an error for this situation. */
1441
+ if ((found = rd_list_find_duplicate(
1442
+ consumers, ConsumerGenerationPair_cmp_generation))) {
1443
+ const char *consumer1, *consumer2;
1444
+ int idx = rd_list_index(
1445
+ consumers, found,
1446
+ ConsumerGenerationPair_cmp_generation);
1447
+ consumer1 = ((ConsumerGenerationPair_t *)rd_list_elem(
1448
+ consumers, idx))
1449
+ ->consumer;
1450
+ consumer2 = ((ConsumerGenerationPair_t *)rd_list_elem(
1451
+ consumers, idx + 1))
1452
+ ->consumer;
1453
+
1454
+ RD_MAP_DELETE(currentPartitionConsumer, partition);
1455
+
1456
+ rd_kafka_log(
1457
+ rk, LOG_ERR, "STICKY",
1458
+ "Sticky assignor: Found multiple consumers %s and "
1459
+ "%s claiming the same topic partition %s:%d in the "
1460
+ "same generation %d, this will be invalidated and "
1461
+ "removed from their previous assignment.",
1462
+ consumer1, consumer2, partition->topic,
1463
+ partition->partition, found->generation);
1464
+ continue;
1465
+ }
1466
+
1467
+ /* Add current (highest generation) consumer
1468
+ * to currentAssignment. */
1469
+ current = rd_list_last(consumers);
1470
+ partitions = RD_MAP_GET(currentAssignment, current->consumer);
1471
+ rd_kafka_topic_partition_list_add(partitions, partition->topic,
1472
+ partition->partition);
1473
+
1474
+ /* Add previous (next highest generation) consumer, if any,
1475
+ * to prevAssignment. */
1476
+ if (rd_list_cnt(consumers) >= 2 &&
1477
+ (previous =
1478
+ rd_list_elem(consumers, rd_list_cnt(consumers) - 2)))
1479
+ RD_MAP_SET(
1480
+ prevAssignment,
1481
+ rd_kafka_topic_partition_copy(partition),
1482
+ ConsumerGenerationPair_new(previous->consumer,
1483
+ previous->generation));
1484
+ }
1485
+
1486
+ RD_MAP_DESTROY(&sortedPartitionConsumersByGeneration);
1487
+ }
1488
+
1489
+
1490
+ /**
1491
+ * @brief Populate maps for potential partitions per consumer and vice-versa.
1492
+ */
1493
+ static void
1494
+ populatePotentialMaps(const rd_kafka_assignor_topic_t *atopic,
1495
+ map_toppar_list_t *partition2AllPotentialConsumers,
1496
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
1497
+ size_t estimated_partition_cnt) {
1498
+ int i;
1499
+ const rd_kafka_group_member_t *rkgm;
1500
+
1501
+ /* for each eligible (subscribed and available) topic (\p atopic):
1502
+ * for each member subscribing to that topic:
1503
+ * and for each partition of that topic:
1504
+ * add consumer and partition to:
1505
+ * partition2AllPotentialConsumers
1506
+ * consumer2AllPotentialPartitions
1507
+ */
1508
+
1509
+ RD_LIST_FOREACH(rkgm, &atopic->members, i) {
1510
+ const char *consumer = rkgm->rkgm_member_id->str;
1511
+ rd_kafka_topic_partition_list_t *partitions =
1512
+ RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
1513
+ int j;
1514
+
1515
+ rd_assert(partitions != NULL);
1516
+
1517
+ for (j = 0; j < atopic->metadata->partition_cnt; j++) {
1518
+ rd_kafka_topic_partition_t *partition;
1519
+ rd_list_t *consumers;
1520
+
1521
+ /* consumer2AllPotentialPartitions[consumer] += part */
1522
+ partition = rd_kafka_topic_partition_list_add(
1523
+ partitions, atopic->metadata->topic,
1524
+ atopic->metadata->partitions[j].id);
1525
+
1526
+ /* partition2AllPotentialConsumers[part] += consumer */
1527
+ if (!(consumers =
1528
+ RD_MAP_GET(partition2AllPotentialConsumers,
1529
+ partition))) {
1530
+ consumers = rd_list_new(
1531
+ RD_MAX(2, (int)estimated_partition_cnt / 2),
1532
+ NULL);
1533
+ RD_MAP_SET(
1534
+ partition2AllPotentialConsumers,
1535
+ rd_kafka_topic_partition_copy(partition),
1536
+ consumers);
1537
+ }
1538
+ rd_list_add(consumers, (void *)consumer);
1539
+ }
1540
+ }
1541
+ }
1542
+
1543
+
1544
+ /**
1545
+ * @returns true if all consumers have identical subscriptions based on
1546
+ * the currently available topics and partitions.
1547
+ *
1548
+ * @remark The Java code checks both partition2AllPotentialConsumers and
1549
+ * and consumer2AllPotentialPartitions but since these maps
1550
+ * are symmetrical we only check one of them.
1551
+ * ^ FIXME, but we do.
1552
+ */
1553
+ static rd_bool_t areSubscriptionsIdentical(
1554
+ map_toppar_list_t *partition2AllPotentialConsumers,
1555
+ map_str_toppar_list_t *consumer2AllPotentialPartitions) {
1556
+ const void *ignore;
1557
+ const rd_list_t *lcurr, *lprev = NULL;
1558
+ const rd_kafka_topic_partition_list_t *pcurr, *pprev = NULL;
1559
+
1560
+ RD_MAP_FOREACH(ignore, lcurr, partition2AllPotentialConsumers) {
1561
+ if (lprev && rd_list_cmp(lcurr, lprev, rd_map_str_cmp))
1562
+ return rd_false;
1563
+ lprev = lcurr;
1564
+ }
1565
+
1566
+ RD_MAP_FOREACH(ignore, pcurr, consumer2AllPotentialPartitions) {
1567
+ if (pprev && rd_kafka_topic_partition_list_cmp(
1568
+ pcurr, pprev, rd_kafka_topic_partition_cmp))
1569
+ return rd_false;
1570
+ pprev = pcurr;
1571
+ }
1572
+
1573
+ if (ignore) /* Avoid unused warning */
1574
+ ;
1575
+
1576
+ return rd_true;
1577
+ }
1578
+
1579
+
1580
+ /**
1581
+ * @brief Comparator to sort an rd_kafka_topic_partition_list_t in ascending
1582
+ * order by the number of list elements in the .opaque field, or
1583
+ * secondarily by the topic name.
1584
+ * Used by sortPartitions().
1585
+ */
1586
+ static int
1587
+ toppar_sort_by_list_cnt(const void *_a, const void *_b, void *opaque) {
1588
+ const rd_kafka_topic_partition_t *a = _a, *b = _b;
1589
+ const rd_list_t *al = a->opaque, *bl = b->opaque;
1590
+ int r = rd_list_cnt(al) - rd_list_cnt(bl); /* ascending order */
1591
+ if (r)
1592
+ return r;
1593
+ return rd_kafka_topic_partition_cmp(a, b);
1594
+ }
1595
+
1596
+
1597
+ /**
1598
+ * @brief Sort valid partitions so they are processed in the potential
1599
+ * reassignment phase in the proper order that causes minimal partition
1600
+ * movement among consumers (hence honouring maximal stickiness).
1601
+ *
1602
+ * @returns The result of the partitions sort.
1603
+ */
1604
+ static rd_kafka_topic_partition_list_t *
1605
+ sortPartitions(rd_kafka_t *rk,
1606
+ map_str_toppar_list_t *currentAssignment,
1607
+ map_toppar_cgpair_t *prevAssignment,
1608
+ rd_bool_t isFreshAssignment,
1609
+ map_toppar_list_t *partition2AllPotentialConsumers,
1610
+ map_str_toppar_list_t *consumer2AllPotentialPartitions) {
1611
+
1612
+ rd_kafka_topic_partition_list_t *sortedPartitions;
1613
+ map_str_toppar_list_t assignments = RD_MAP_INITIALIZER(
1614
+ RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash,
1615
+ NULL, rd_kafka_topic_partition_list_destroy_free);
1616
+ rd_kafka_topic_partition_list_t *partitions;
1617
+ const rd_kafka_topic_partition_t *partition;
1618
+ const rd_list_t *consumers;
1619
+ const char *consumer;
1620
+ rd_list_t sortedConsumers; /* element is the (rd_map_elem_t *) from
1621
+ * assignments. */
1622
+ const rd_map_elem_t *elem;
1623
+ rd_bool_t wasEmpty;
1624
+ int i;
1625
+
1626
+ sortedPartitions = rd_kafka_topic_partition_list_new(
1627
+ (int)RD_MAP_CNT(partition2AllPotentialConsumers));
1628
+ ;
1629
+
1630
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
1631
+ "Sort %d partitions in %s assignment",
1632
+ (int)RD_MAP_CNT(partition2AllPotentialConsumers),
1633
+ isFreshAssignment ? "fresh" : "existing");
1634
+
1635
+ if (isFreshAssignment ||
1636
+ !areSubscriptionsIdentical(partition2AllPotentialConsumers,
1637
+ consumer2AllPotentialPartitions)) {
1638
+ /* Create an ascending sorted list of partitions based on
1639
+ * how many consumers can potentially use them. */
1640
+ RD_MAP_FOREACH(partition, consumers,
1641
+ partition2AllPotentialConsumers) {
1642
+ rd_kafka_topic_partition_list_add(sortedPartitions,
1643
+ partition->topic,
1644
+ partition->partition)
1645
+ ->opaque = (void *)consumers;
1646
+ }
1647
+
1648
+ rd_kafka_topic_partition_list_sort(
1649
+ sortedPartitions, toppar_sort_by_list_cnt, NULL);
1650
+
1651
+ RD_MAP_DESTROY(&assignments);
1652
+
1653
+ return sortedPartitions;
1654
+ }
1655
+
1656
+ /* If this is a reassignment and the subscriptions are identical
1657
+ * then we just need to list partitions in a round robin fashion
1658
+ * (from consumers with most assigned partitions to those
1659
+ * with least assigned partitions). */
1660
+
1661
+ /* Create an ascending sorted list of consumers by valid
1662
+ * partition count. The list element is the `rd_map_elem_t *`
1663
+ * of the assignments map. This allows us to get a sorted list
1664
+ * of consumers without too much data duplication. */
1665
+ rd_list_init(&sortedConsumers, (int)RD_MAP_CNT(currentAssignment),
1666
+ NULL);
1667
+
1668
+ RD_MAP_FOREACH(consumer, partitions, currentAssignment) {
1669
+ rd_kafka_topic_partition_list_t *partitions2;
1670
+
1671
+ /* Sort assigned partitions for consistency (during tests) */
1672
+ rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
1673
+
1674
+ partitions2 =
1675
+ rd_kafka_topic_partition_list_new(partitions->cnt);
1676
+
1677
+ for (i = 0; i < partitions->cnt; i++) {
1678
+ partition = &partitions->elems[i];
1679
+
1680
+ /* Only add partitions from the current assignment
1681
+ * that still exist. */
1682
+ if (RD_MAP_GET(partition2AllPotentialConsumers,
1683
+ partition))
1684
+ rd_kafka_topic_partition_list_add(
1685
+ partitions2, partition->topic,
1686
+ partition->partition);
1687
+ }
1688
+
1689
+ if (partitions2->cnt > 0) {
1690
+ elem = RD_MAP_SET(&assignments, consumer, partitions2);
1691
+ rd_list_add(&sortedConsumers, (void *)elem);
1692
+ } else
1693
+ rd_kafka_topic_partition_list_destroy(partitions2);
1694
+ }
1695
+
1696
+ /* Sort consumers */
1697
+ rd_list_sort(&sortedConsumers, sort_by_map_elem_val_toppar_list_cnt);
1698
+
1699
+ /* At this point sortedConsumers contains an ascending-sorted list
1700
+ * of consumers based on how many valid partitions are currently
1701
+ * assigned to them. */
1702
+
1703
+ while (!rd_list_empty(&sortedConsumers)) {
1704
+ /* Take consumer with most partitions */
1705
+ const rd_map_elem_t *elem = rd_list_last(&sortedConsumers);
1706
+ const char *consumer = (const char *)elem->key;
1707
+ /* Currently assigned partitions to this consumer */
1708
+ rd_kafka_topic_partition_list_t *remainingPartitions =
1709
+ RD_MAP_GET(&assignments, consumer);
1710
+ /* Partitions that were assigned to a different consumer
1711
+ * last time */
1712
+ rd_kafka_topic_partition_list_t *prevPartitions =
1713
+ rd_kafka_topic_partition_list_new(
1714
+ (int)RD_MAP_CNT(prevAssignment));
1715
+ rd_bool_t reSort = rd_true;
1716
+
1717
+ /* From the partitions that had a different consumer before,
1718
+ * keep only those that are assigned to this consumer now. */
1719
+ for (i = 0; i < remainingPartitions->cnt; i++) {
1720
+ partition = &remainingPartitions->elems[i];
1721
+ if (RD_MAP_GET(prevAssignment, partition))
1722
+ rd_kafka_topic_partition_list_add(
1723
+ prevPartitions, partition->topic,
1724
+ partition->partition);
1725
+ }
1726
+
1727
+ if (prevPartitions->cnt > 0) {
1728
+ /* If there is a partition of this consumer that was
1729
+ * assigned to another consumer before, then mark
1730
+ * it as a good option for reassignment. */
1731
+ partition = &prevPartitions->elems[0];
1732
+
1733
+ rd_kafka_topic_partition_list_del(remainingPartitions,
1734
+ partition->topic,
1735
+ partition->partition);
1736
+
1737
+ rd_kafka_topic_partition_list_add(sortedPartitions,
1738
+ partition->topic,
1739
+ partition->partition);
1740
+
1741
+ rd_kafka_topic_partition_list_del_by_idx(prevPartitions,
1742
+ 0);
1743
+
1744
+ } else if (remainingPartitions->cnt > 0) {
1745
+ /* Otherwise mark any other one of the current
1746
+ * partitions as a reassignment candidate. */
1747
+ partition = &remainingPartitions->elems[0];
1748
+
1749
+ rd_kafka_topic_partition_list_add(sortedPartitions,
1750
+ partition->topic,
1751
+ partition->partition);
1752
+
1753
+ rd_kafka_topic_partition_list_del_by_idx(
1754
+ remainingPartitions, 0);
1755
+ } else {
1756
+ rd_list_remove_elem(&sortedConsumers,
1757
+ rd_list_cnt(&sortedConsumers) - 1);
1758
+ /* No need to re-sort the list (below) */
1759
+ reSort = rd_false;
1760
+ }
1761
+
1762
+ rd_kafka_topic_partition_list_destroy(prevPartitions);
1763
+
1764
+ if (reSort) {
1765
+ /* Re-sort the list to keep the consumer with the most
1766
+ * partitions at the end of the list.
1767
+ * This should be an O(N) operation given it is at most
1768
+ * a single shuffle. */
1769
+ rd_list_sort(&sortedConsumers,
1770
+ sort_by_map_elem_val_toppar_list_cnt);
1771
+ }
1772
+ }
1773
+
1774
+
1775
+ wasEmpty = !sortedPartitions->cnt;
1776
+
1777
+ RD_MAP_FOREACH(partition, consumers, partition2AllPotentialConsumers)
1778
+ rd_kafka_topic_partition_list_upsert(sortedPartitions, partition->topic,
1779
+ partition->partition);
1780
+
1781
+ /* If all partitions were added in the foreach loop just above
1782
+ * it means there is no order to retain from the sorderConsumer loop
1783
+ * below and we sort the partitions according to their topic+partition
1784
+ * to get consistent results (mainly in tests). */
1785
+ if (wasEmpty)
1786
+ rd_kafka_topic_partition_list_sort(sortedPartitions, NULL,
1787
+ NULL);
1788
+
1789
+ rd_list_destroy(&sortedConsumers);
1790
+ RD_MAP_DESTROY(&assignments);
1791
+
1792
+ return sortedPartitions;
1793
+ }
1794
+
1795
+
1796
+ /**
1797
+ * @brief Transfer currentAssignment to members array.
1798
+ */
1799
+ static void assignToMembers(map_str_toppar_list_t *currentAssignment,
1800
+ rd_kafka_group_member_t *members,
1801
+ size_t member_cnt) {
1802
+ size_t i;
1803
+
1804
+ for (i = 0; i < member_cnt; i++) {
1805
+ rd_kafka_group_member_t *rkgm = &members[i];
1806
+ const rd_kafka_topic_partition_list_t *partitions =
1807
+ RD_MAP_GET(currentAssignment, rkgm->rkgm_member_id->str);
1808
+ if (rkgm->rkgm_assignment)
1809
+ rd_kafka_topic_partition_list_destroy(
1810
+ rkgm->rkgm_assignment);
1811
+ rkgm->rkgm_assignment =
1812
+ rd_kafka_topic_partition_list_copy(partitions);
1813
+ }
1814
+ }
1815
+
1816
+
1817
+ /**
1818
+ * @brief KIP-54 and KIP-341/FIXME sticky assignor.
1819
+ *
1820
+ * This code is closely mimicking the AK Java AbstractStickyAssignor.assign().
1821
+ */
1822
+ rd_kafka_resp_err_t
1823
+ rd_kafka_sticky_assignor_assign_cb(rd_kafka_t *rk,
1824
+ const rd_kafka_assignor_t *rkas,
1825
+ const char *member_id,
1826
+ const rd_kafka_metadata_t *metadata,
1827
+ rd_kafka_group_member_t *members,
1828
+ size_t member_cnt,
1829
+ rd_kafka_assignor_topic_t **eligible_topics,
1830
+ size_t eligible_topic_cnt,
1831
+ char *errstr,
1832
+ size_t errstr_size,
1833
+ void *opaque) {
1834
+ /* FIXME: Let the cgrp pass the actual eligible partition count */
1835
+ size_t partition_cnt = member_cnt * 10; /* FIXME */
1836
+ const rd_kafka_metadata_internal_t *mdi =
1837
+ rd_kafka_metadata_get_internal(metadata);
1838
+
1839
+ rd_kafka_rack_info_t *rkri =
1840
+ rd_kafka_rack_info_new(eligible_topics, eligible_topic_cnt, mdi);
1841
+
1842
+ /* Map of subscriptions. This is \p member turned into a map. */
1843
+ map_str_toppar_list_t subscriptions =
1844
+ RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
1845
+ NULL /* refs members.rkgm_member_id */,
1846
+ NULL /* refs members.rkgm_subscription */);
1847
+
1848
+ /* Map member to current assignment */
1849
+ map_str_toppar_list_t currentAssignment =
1850
+ RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
1851
+ NULL /* refs members.rkgm_member_id */,
1852
+ rd_kafka_topic_partition_list_destroy_free);
1853
+
1854
+ /* Map partition to ConsumerGenerationPair */
1855
+ map_toppar_cgpair_t prevAssignment =
1856
+ RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp,
1857
+ rd_kafka_topic_partition_hash,
1858
+ rd_kafka_topic_partition_destroy_free,
1859
+ ConsumerGenerationPair_destroy);
1860
+
1861
+ /* Partition assignment movements between consumers */
1862
+ PartitionMovements_t partitionMovements;
1863
+
1864
+ rd_bool_t isFreshAssignment;
1865
+
1866
+ /* Mapping of all topic partitions to all consumers that can be
1867
+ * assigned to them.
1868
+ * Value is an rd_list_t* with elements referencing the \p members
1869
+ * \c rkgm_member_id->str. */
1870
+ map_toppar_list_t partition2AllPotentialConsumers = RD_MAP_INITIALIZER(
1871
+ partition_cnt, rd_kafka_topic_partition_cmp,
1872
+ rd_kafka_topic_partition_hash,
1873
+ rd_kafka_topic_partition_destroy_free, rd_list_destroy_free);
1874
+
1875
+ /* Mapping of all consumers to all potential topic partitions that
1876
+ * can be assigned to them. */
1877
+ map_str_toppar_list_t consumer2AllPotentialPartitions =
1878
+ RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
1879
+ NULL,
1880
+ rd_kafka_topic_partition_list_destroy_free);
1881
+
1882
+ /* Mapping of partition to current consumer. */
1883
+ map_toppar_str_t currentPartitionConsumer =
1884
+ RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp,
1885
+ rd_kafka_topic_partition_hash,
1886
+ rd_kafka_topic_partition_destroy_free,
1887
+ NULL /* refs members.rkgm_member_id->str */);
1888
+
1889
+ rd_kafka_topic_partition_list_t *sortedPartitions;
1890
+ rd_kafka_topic_partition_list_t *unassignedPartitions;
1891
+ rd_list_t sortedCurrentSubscriptions;
1892
+
1893
+ rd_bool_t revocationRequired = rd_false;
1894
+
1895
+ /* Iteration variables */
1896
+ const char *consumer;
1897
+ rd_kafka_topic_partition_list_t *partitions;
1898
+ const rd_map_elem_t *elem;
1899
+ int i;
1900
+
1901
+ /* Initialize PartitionMovements */
1902
+ PartitionMovements_init(&partitionMovements, eligible_topic_cnt);
1903
+
1904
+ /* Prepopulate current and previous assignments */
1905
+ prepopulateCurrentAssignments(
1906
+ rk, members, member_cnt, &subscriptions, &currentAssignment,
1907
+ &prevAssignment, &currentPartitionConsumer,
1908
+ &consumer2AllPotentialPartitions, partition_cnt);
1909
+
1910
+ isFreshAssignment = RD_MAP_IS_EMPTY(&currentAssignment);
1911
+
1912
+ /* Populate partition2AllPotentialConsumers and
1913
+ * consumer2AllPotentialPartitions maps by each eligible topic. */
1914
+ for (i = 0; i < (int)eligible_topic_cnt; i++)
1915
+ populatePotentialMaps(
1916
+ eligible_topics[i], &partition2AllPotentialConsumers,
1917
+ &consumer2AllPotentialPartitions, partition_cnt);
1918
+
1919
+
1920
+ /* Sort valid partitions to minimize partition movements. */
1921
+ sortedPartitions = sortPartitions(
1922
+ rk, &currentAssignment, &prevAssignment, isFreshAssignment,
1923
+ &partition2AllPotentialConsumers, &consumer2AllPotentialPartitions);
1924
+
1925
+
1926
+ /* All partitions that need to be assigned (initially set to all
1927
+ * partitions but adjusted in the following loop) */
1928
+ unassignedPartitions =
1929
+ rd_kafka_topic_partition_list_copy(sortedPartitions);
1930
+
1931
+ if (rkri)
1932
+ rd_kafka_dbg(rk, CGRP, "STICKY",
1933
+ "Sticky assignor: using rack aware assignment.");
1934
+
1935
+ RD_MAP_FOREACH(consumer, partitions, &currentAssignment) {
1936
+ if (!RD_MAP_GET(&subscriptions, consumer)) {
1937
+ /* If a consumer that existed before
1938
+ * (and had some partition assignments) is now removed,
1939
+ * remove it from currentAssignment and its
1940
+ * partitions from currentPartitionConsumer */
1941
+
1942
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
1943
+ "Removing now non-existent consumer %s "
1944
+ "with %d previously assigned partitions",
1945
+ consumer, partitions->cnt);
1946
+
1947
+
1948
+ for (i = 0; i < partitions->cnt; i++) {
1949
+ const rd_kafka_topic_partition_t *partition =
1950
+ &partitions->elems[i];
1951
+ RD_MAP_DELETE(&currentPartitionConsumer,
1952
+ partition);
1953
+ }
1954
+
1955
+ /* FIXME: The delete could be optimized by passing the
1956
+ * underlying elem_t. */
1957
+ RD_MAP_DELETE(&currentAssignment, consumer);
1958
+
1959
+ } else {
1960
+ /* Otherwise (the consumer still exists) */
1961
+
1962
+ for (i = 0; i < partitions->cnt; i++) {
1963
+ const rd_kafka_topic_partition_t *partition =
1964
+ &partitions->elems[i];
1965
+ rd_bool_t remove_part = rd_false;
1966
+
1967
+ if (!RD_MAP_GET(
1968
+ &partition2AllPotentialConsumers,
1969
+ partition)) {
1970
+ /* If this partition of this consumer
1971
+ * no longer exists remove it from
1972
+ * currentAssignment of the consumer */
1973
+ remove_part = rd_true;
1974
+ RD_MAP_DELETE(&currentPartitionConsumer,
1975
+ partition);
1976
+
1977
+ } else if (!rd_kafka_topic_partition_list_find(
1978
+ RD_MAP_GET(&subscriptions,
1979
+ consumer),
1980
+ partition->topic,
1981
+ RD_KAFKA_PARTITION_UA) ||
1982
+ rd_kafka_racks_mismatch(
1983
+ rkri, consumer, partition)) {
1984
+ /* If this partition cannot remain
1985
+ * assigned to its current consumer
1986
+ * because the consumer is no longer
1987
+ * subscribed to its topic, or racks
1988
+ * don't match for rack-aware
1989
+ * assignment, remove it from the
1990
+ * currentAssignment of the consumer. */
1991
+ remove_part = rd_true;
1992
+ revocationRequired = rd_true;
1993
+ } else {
1994
+ /* Otherwise, remove the topic partition
1995
+ * from those that need to be assigned
1996
+ * only if its current consumer is still
1997
+ * subscribed to its topic (because it
1998
+ * is already assigned and we would want
1999
+ * to preserve that assignment as much
2000
+ * as possible). */
2001
+ rd_kafka_topic_partition_list_del(
2002
+ unassignedPartitions,
2003
+ partition->topic,
2004
+ partition->partition);
2005
+ }
2006
+
2007
+ if (remove_part) {
2008
+ rd_kafka_topic_partition_list_del_by_idx(
2009
+ partitions, i);
2010
+ i--; /* Since the current element was
2011
+ * removed we need the next for
2012
+ * loop iteration to stay at the
2013
+ * same index. */
2014
+ }
2015
+ }
2016
+ }
2017
+ }
2018
+
2019
+
2020
+ /* At this point we have preserved all valid topic partition to consumer
2021
+ * assignments and removed all invalid topic partitions and invalid
2022
+ * consumers.
2023
+ * Now we need to assign unassignedPartitions to consumers so that the
2024
+ * topic partition assignments are as balanced as possible. */
2025
+
2026
+ /* An ascending sorted list of consumers based on how many topic
2027
+ * partitions are already assigned to them. The list element is
2028
+ * referencing the rd_map_elem_t* from the currentAssignment map. */
2029
+ rd_list_init(&sortedCurrentSubscriptions,
2030
+ (int)RD_MAP_CNT(&currentAssignment), NULL);
2031
+
2032
+ RD_MAP_FOREACH_ELEM(elem, &currentAssignment.rmap)
2033
+ rd_list_add(&sortedCurrentSubscriptions, (void *)elem);
2034
+
2035
+ rd_list_sort(&sortedCurrentSubscriptions,
2036
+ sort_by_map_elem_val_toppar_list_cnt);
2037
+
2038
+ /* Balance the available partitions across consumers */
2039
+ balance(rk, &partitionMovements, &currentAssignment, &prevAssignment,
2040
+ sortedPartitions, unassignedPartitions,
2041
+ &sortedCurrentSubscriptions, &consumer2AllPotentialPartitions,
2042
+ &partition2AllPotentialConsumers, &currentPartitionConsumer,
2043
+ revocationRequired, rkri);
2044
+
2045
+ /* Transfer currentAssignment (now updated) to each member's
2046
+ * assignment. */
2047
+ assignToMembers(&currentAssignment, members, member_cnt);
2048
+
2049
+
2050
+ rd_list_destroy(&sortedCurrentSubscriptions);
2051
+
2052
+ PartitionMovements_destroy(&partitionMovements);
2053
+
2054
+ rd_kafka_topic_partition_list_destroy(unassignedPartitions);
2055
+ rd_kafka_topic_partition_list_destroy(sortedPartitions);
2056
+ rd_kafka_rack_info_destroy(rkri);
2057
+
2058
+ RD_MAP_DESTROY(&currentPartitionConsumer);
2059
+ RD_MAP_DESTROY(&consumer2AllPotentialPartitions);
2060
+ RD_MAP_DESTROY(&partition2AllPotentialConsumers);
2061
+ RD_MAP_DESTROY(&prevAssignment);
2062
+ RD_MAP_DESTROY(&currentAssignment);
2063
+ RD_MAP_DESTROY(&subscriptions);
2064
+
2065
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
2066
+ }
2067
+
2068
+
2069
+
2070
+ /** @brief FIXME docstring */
2071
+ static void rd_kafka_sticky_assignor_on_assignment_cb(
2072
+ const rd_kafka_assignor_t *rkas,
2073
+ void **assignor_state,
2074
+ const rd_kafka_topic_partition_list_t *partitions,
2075
+ const rd_kafkap_bytes_t *assignment_userdata,
2076
+ const rd_kafka_consumer_group_metadata_t *rkcgm) {
2077
+ rd_kafka_sticky_assignor_state_t *state =
2078
+ (rd_kafka_sticky_assignor_state_t *)*assignor_state;
2079
+
2080
+ if (!state)
2081
+ state = rd_calloc(1, sizeof(*state));
2082
+ else
2083
+ rd_kafka_topic_partition_list_destroy(state->prev_assignment);
2084
+
2085
+ state->prev_assignment = rd_kafka_topic_partition_list_copy(partitions);
2086
+ state->generation_id = rkcgm->generation_id;
2087
+
2088
+ *assignor_state = state;
2089
+ }
2090
+
2091
+ /** @brief FIXME docstring */
2092
+ static rd_kafkap_bytes_t *rd_kafka_sticky_assignor_get_metadata(
2093
+ const rd_kafka_assignor_t *rkas,
2094
+ void *assignor_state,
2095
+ const rd_list_t *topics,
2096
+ const rd_kafka_topic_partition_list_t *owned_partitions,
2097
+ const rd_kafkap_str_t *rack_id) {
2098
+ rd_kafka_sticky_assignor_state_t *state;
2099
+ rd_kafka_buf_t *rkbuf;
2100
+ rd_kafkap_bytes_t *metadata;
2101
+ rd_kafkap_bytes_t *kbytes;
2102
+ size_t len;
2103
+
2104
+ /*
2105
+ * UserData (Version: 1) => [previous_assignment] generation
2106
+ * previous_assignment => topic [partitions]
2107
+ * topic => STRING
2108
+ * partitions => partition
2109
+ * partition => INT32
2110
+ * generation => INT32
2111
+ *
2112
+ * If there is no previous assignment, UserData is NULL.
2113
+ */
2114
+
2115
+
2116
+ if (!assignor_state) {
2117
+ return rd_kafka_consumer_protocol_member_metadata_new(
2118
+ topics, NULL, 0, owned_partitions, -1 /* generation */,
2119
+ rack_id);
2120
+ }
2121
+
2122
+ state = (rd_kafka_sticky_assignor_state_t *)assignor_state;
2123
+
2124
+ rkbuf = rd_kafka_buf_new(1, 100);
2125
+ rd_assert(state->prev_assignment != NULL);
2126
+ const rd_kafka_topic_partition_field_t fields[] = {
2127
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
2128
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
2129
+ rd_kafka_buf_write_topic_partitions(
2130
+ rkbuf, state->prev_assignment, rd_false /*skip invalid offsets*/,
2131
+ rd_false /*any offset*/, rd_false /*don't use topic id*/,
2132
+ rd_true /*use topic name*/, fields);
2133
+ rd_kafka_buf_write_i32(rkbuf, state->generation_id);
2134
+
2135
+ /* Get binary buffer and allocate a new Kafka Bytes with a copy. */
2136
+ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
2137
+ len = rd_slice_remains(&rkbuf->rkbuf_reader);
2138
+ kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len);
2139
+ rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len);
2140
+ rd_kafka_buf_destroy(rkbuf);
2141
+
2142
+ metadata = rd_kafka_consumer_protocol_member_metadata_new(
2143
+ topics, kbytes->data, kbytes->len, owned_partitions,
2144
+ state->generation_id, rack_id);
2145
+
2146
+ rd_kafkap_bytes_destroy(kbytes);
2147
+
2148
+ return metadata;
2149
+ }
2150
+
2151
+
2152
+ /**
2153
+ * @brief Destroy assignor state
2154
+ */
2155
+ static void rd_kafka_sticky_assignor_state_destroy(void *assignor_state) {
2156
+ rd_kafka_sticky_assignor_state_t *state =
2157
+ (rd_kafka_sticky_assignor_state_t *)assignor_state;
2158
+
2159
+ rd_assert(assignor_state);
2160
+
2161
+ rd_kafka_topic_partition_list_destroy(state->prev_assignment);
2162
+ rd_free(state);
2163
+ }
2164
+
2165
+
2166
+
2167
+ /**
2168
+ * @name Sticky assignor unit tests
2169
+ *
2170
+ *
2171
+ * These are based on AbstractStickyAssignorTest.java
2172
+ *
2173
+ *
2174
+ *
2175
+ */
2176
+
2177
+ /* All possible racks used in tests, as well as several common rack configs used
2178
+ * by consumers */
2179
+ static rd_kafkap_str_t
2180
+ *ALL_RACKS[7]; /* initialized before starting the unit tests. */
2181
+ static int RACKS_INITIAL[] = {0, 1, 2};
2182
+ static int RACKS_NULL[] = {6, 6, 6};
2183
+ static int RACKS_FINAL[] = {4, 5, 6};
2184
+ static int RACKS_ONE_NULL[] = {6, 4, 5};
2185
+
2186
+ /* Helper to get consumer rack based on the index of the consumer. */
2187
+ static rd_kafkap_str_t *
2188
+ ut_get_consumer_rack(int idx,
2189
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2190
+ const int cycle_size =
2191
+ (parametrization == RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK
2192
+ ? RD_ARRAYSIZE(ALL_RACKS)
2193
+ : 3);
2194
+ return (ALL_RACKS[idx % cycle_size]);
2195
+ }
2196
+
2197
+ /* Helper to populate a member's owned partitions (accepted as variadic), and
2198
+ * generation. */
2199
+ static void
2200
+ ut_populate_member_owned_partitions_generation(rd_kafka_group_member_t *rkgm,
2201
+ int generation,
2202
+ size_t partition_cnt,
2203
+ ...) {
2204
+ va_list ap;
2205
+ size_t i;
2206
+
2207
+ if (rkgm->rkgm_owned)
2208
+ rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned);
2209
+ rkgm->rkgm_owned = rd_kafka_topic_partition_list_new(partition_cnt);
2210
+
2211
+ va_start(ap, partition_cnt);
2212
+ for (i = 0; i < partition_cnt; i++) {
2213
+ char *topic = va_arg(ap, char *);
2214
+ int partition = va_arg(ap, int);
2215
+ rd_kafka_topic_partition_list_add(rkgm->rkgm_owned, topic,
2216
+ partition);
2217
+ }
2218
+ va_end(ap);
2219
+
2220
+ rkgm->rkgm_generation = generation;
2221
+ }
2222
+
2223
+ /* Helper to create topic partition list from a variadic list of topic,
2224
+ * partition pairs. */
2225
+ static rd_kafka_topic_partition_list_t **
2226
+ ut_create_topic_partition_lists(size_t list_cnt, ...) {
2227
+ va_list ap;
2228
+ size_t i;
2229
+ rd_kafka_topic_partition_list_t **lists =
2230
+ rd_calloc(list_cnt, sizeof(rd_kafka_topic_partition_list_t *));
2231
+
2232
+ va_start(ap, list_cnt);
2233
+ for (i = 0; i < list_cnt; i++) {
2234
+ const char *topic;
2235
+ lists[i] = rd_kafka_topic_partition_list_new(0);
2236
+ while ((topic = va_arg(ap, const char *))) {
2237
+ int partition = va_arg(ap, int);
2238
+ rd_kafka_topic_partition_list_add(lists[i], topic,
2239
+ partition);
2240
+ }
2241
+ }
2242
+ va_end(ap);
2243
+
2244
+ return lists;
2245
+ }
2246
+
2247
+ static int
2248
+ ut_testOneConsumerNoTopic(rd_kafka_t *rk,
2249
+ const rd_kafka_assignor_t *rkas,
2250
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2251
+ rd_kafka_resp_err_t err;
2252
+ char errstr[512];
2253
+ rd_kafka_metadata_t *metadata;
2254
+ rd_kafka_group_member_t members[1];
2255
+
2256
+ if (parametrization == RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) {
2257
+ RD_UT_PASS();
2258
+ }
2259
+
2260
+
2261
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2262
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2263
+ 0);
2264
+
2265
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2266
+ ut_get_consumer_rack(0, parametrization),
2267
+ parametrization, "topic1", NULL);
2268
+
2269
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2270
+ RD_ARRAYSIZE(members), errstr,
2271
+ sizeof(errstr));
2272
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2273
+
2274
+ verifyAssignment(&members[0], NULL);
2275
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2276
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2277
+
2278
+ rd_kafka_group_member_clear(&members[0]);
2279
+ ut_destroy_metadata(metadata);
2280
+
2281
+ RD_UT_PASS();
2282
+ }
2283
+
2284
+
2285
+ static int ut_testOneConsumerNonexistentTopic(
2286
+ rd_kafka_t *rk,
2287
+ const rd_kafka_assignor_t *rkas,
2288
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2289
+ rd_kafka_resp_err_t err;
2290
+ char errstr[512];
2291
+ rd_kafka_metadata_t *metadata;
2292
+ rd_kafka_group_member_t members[1];
2293
+
2294
+ if (parametrization == RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK) {
2295
+ RD_UT_PASS();
2296
+ }
2297
+
2298
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2299
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2300
+ 1, "topic1", 0);
2301
+
2302
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2303
+ ut_get_consumer_rack(0, parametrization),
2304
+ parametrization, "topic1", NULL);
2305
+
2306
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2307
+ RD_ARRAYSIZE(members), errstr,
2308
+ sizeof(errstr));
2309
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2310
+
2311
+ verifyAssignment(&members[0], NULL);
2312
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2313
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2314
+
2315
+ rd_kafka_group_member_clear(&members[0]);
2316
+ ut_destroy_metadata(metadata);
2317
+
2318
+ RD_UT_PASS();
2319
+ }
2320
+
2321
+
2322
+
2323
+ static int
2324
+ ut_testOneConsumerOneTopic(rd_kafka_t *rk,
2325
+ const rd_kafka_assignor_t *rkas,
2326
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2327
+ rd_kafka_resp_err_t err;
2328
+ char errstr[512];
2329
+ rd_kafka_metadata_t *metadata;
2330
+ rd_kafka_group_member_t members[1];
2331
+
2332
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2333
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2334
+ 1, "topic1", 3);
2335
+
2336
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2337
+ ut_get_consumer_rack(0, parametrization),
2338
+ parametrization, "topic1", NULL);
2339
+
2340
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2341
+ RD_ARRAYSIZE(members), errstr,
2342
+ sizeof(errstr));
2343
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2344
+
2345
+ RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 3,
2346
+ "expected assignment of 3 partitions, got %d partition(s)",
2347
+ members[0].rkgm_assignment->cnt);
2348
+
2349
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
2350
+ NULL);
2351
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2352
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2353
+
2354
+ rd_kafka_group_member_clear(&members[0]);
2355
+ ut_destroy_metadata(metadata);
2356
+
2357
+ RD_UT_PASS();
2358
+ }
2359
+
2360
+
2361
+ static int ut_testOnlyAssignsPartitionsFromSubscribedTopics(
2362
+ rd_kafka_t *rk,
2363
+ const rd_kafka_assignor_t *rkas,
2364
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2365
+ rd_kafka_resp_err_t err;
2366
+ char errstr[512];
2367
+ rd_kafka_metadata_t *metadata;
2368
+ rd_kafka_group_member_t members[1];
2369
+
2370
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2371
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2372
+ 2, "topic1", 3, "topic2", 3);
2373
+
2374
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2375
+ ut_get_consumer_rack(0, parametrization),
2376
+ parametrization, "topic1", NULL);
2377
+
2378
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2379
+ RD_ARRAYSIZE(members), errstr,
2380
+ sizeof(errstr));
2381
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2382
+
2383
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
2384
+ NULL);
2385
+
2386
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2387
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2388
+
2389
+ rd_kafka_group_member_clear(&members[0]);
2390
+ ut_destroy_metadata(metadata);
2391
+
2392
+ RD_UT_PASS();
2393
+ }
2394
+
2395
+
2396
+ static int ut_testOneConsumerMultipleTopics(
2397
+ rd_kafka_t *rk,
2398
+ const rd_kafka_assignor_t *rkas,
2399
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2400
+ rd_kafka_resp_err_t err;
2401
+ char errstr[512];
2402
+ rd_kafka_metadata_t *metadata;
2403
+ rd_kafka_group_member_t members[1];
2404
+
2405
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2406
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2407
+ 2, "topic1", 1, "topic2", 2);
2408
+
2409
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2410
+ ut_get_consumer_rack(0, parametrization),
2411
+ parametrization, "topic1", "topic2", NULL);
2412
+
2413
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2414
+ RD_ARRAYSIZE(members), errstr,
2415
+ sizeof(errstr));
2416
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2417
+
2418
+ verifyAssignment(&members[0], "topic1", 0, "topic2", 0, "topic2", 1,
2419
+ NULL);
2420
+
2421
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2422
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2423
+
2424
+ rd_kafka_group_member_clear(&members[0]);
2425
+ ut_destroy_metadata(metadata);
2426
+
2427
+ RD_UT_PASS();
2428
+ }
2429
+
2430
+ static int ut_testTwoConsumersOneTopicOnePartition(
2431
+ rd_kafka_t *rk,
2432
+ const rd_kafka_assignor_t *rkas,
2433
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2434
+ rd_kafka_resp_err_t err;
2435
+ char errstr[512];
2436
+ rd_kafka_metadata_t *metadata;
2437
+ rd_kafka_group_member_t members[2];
2438
+
2439
+
2440
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2441
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2442
+ 1, "topic1", 1);
2443
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2444
+ ut_get_consumer_rack(0, parametrization),
2445
+ parametrization, "topic1", NULL);
2446
+ ut_initMemberConditionalRack(&members[1], "consumer2",
2447
+ ut_get_consumer_rack(1, parametrization),
2448
+ parametrization, "topic1", NULL);
2449
+
2450
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2451
+ RD_ARRAYSIZE(members), errstr,
2452
+ sizeof(errstr));
2453
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2454
+
2455
+ verifyAssignment(&members[0], "topic1", 0, NULL);
2456
+ verifyAssignment(&members[1], NULL);
2457
+
2458
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2459
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2460
+
2461
+ rd_kafka_group_member_clear(&members[0]);
2462
+ rd_kafka_group_member_clear(&members[1]);
2463
+ ut_destroy_metadata(metadata);
2464
+
2465
+ RD_UT_PASS();
2466
+ }
2467
+
2468
+
2469
+ static int ut_testTwoConsumersOneTopicTwoPartitions(
2470
+ rd_kafka_t *rk,
2471
+ const rd_kafka_assignor_t *rkas,
2472
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2473
+ rd_kafka_resp_err_t err;
2474
+ char errstr[512];
2475
+ rd_kafka_metadata_t *metadata;
2476
+ rd_kafka_group_member_t members[2];
2477
+
2478
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2479
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2480
+ 1, "topic1", 2);
2481
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2482
+ ut_get_consumer_rack(0, parametrization),
2483
+ parametrization, "topic1", NULL);
2484
+ ut_initMemberConditionalRack(&members[1], "consumer2",
2485
+ ut_get_consumer_rack(1, parametrization),
2486
+ parametrization, "topic1", NULL);
2487
+
2488
+
2489
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2490
+ RD_ARRAYSIZE(members), errstr,
2491
+ sizeof(errstr));
2492
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2493
+
2494
+ verifyAssignment(&members[0], "topic1", 0, NULL);
2495
+ verifyAssignment(&members[1], "topic1", 1, NULL);
2496
+
2497
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2498
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2499
+
2500
+ rd_kafka_group_member_clear(&members[0]);
2501
+ rd_kafka_group_member_clear(&members[1]);
2502
+ ut_destroy_metadata(metadata);
2503
+
2504
+ RD_UT_PASS();
2505
+ }
2506
+
2507
+
2508
+ static int ut_testMultipleConsumersMixedTopicSubscriptions(
2509
+ rd_kafka_t *rk,
2510
+ const rd_kafka_assignor_t *rkas,
2511
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2512
+
2513
+ rd_kafka_resp_err_t err;
2514
+ char errstr[512];
2515
+ rd_kafka_metadata_t *metadata;
2516
+ rd_kafka_group_member_t members[3];
2517
+
2518
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2519
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2520
+ 2, "topic1", 3, "topic2", 2);
2521
+
2522
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2523
+ ut_get_consumer_rack(0, parametrization),
2524
+ parametrization, "topic1", NULL);
2525
+ ut_initMemberConditionalRack(&members[1], "consumer2",
2526
+ ut_get_consumer_rack(1, parametrization),
2527
+ parametrization, "topic1", "topic2", NULL);
2528
+ ut_initMemberConditionalRack(&members[2], "consumer3",
2529
+ ut_get_consumer_rack(2, parametrization),
2530
+ parametrization, "topic1", NULL);
2531
+
2532
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2533
+ RD_ARRAYSIZE(members), errstr,
2534
+ sizeof(errstr));
2535
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2536
+
2537
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL);
2538
+ verifyAssignment(&members[1], "topic2", 0, "topic2", 1, NULL);
2539
+ verifyAssignment(&members[2], "topic1", 1, NULL);
2540
+
2541
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2542
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2543
+
2544
+ rd_kafka_group_member_clear(&members[0]);
2545
+ rd_kafka_group_member_clear(&members[1]);
2546
+ rd_kafka_group_member_clear(&members[2]);
2547
+ ut_destroy_metadata(metadata);
2548
+
2549
+ RD_UT_PASS();
2550
+ }
2551
+
2552
+
2553
+ static int ut_testTwoConsumersTwoTopicsSixPartitions(
2554
+ rd_kafka_t *rk,
2555
+ const rd_kafka_assignor_t *rkas,
2556
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2557
+ rd_kafka_resp_err_t err;
2558
+ char errstr[512];
2559
+ rd_kafka_metadata_t *metadata;
2560
+ rd_kafka_group_member_t members[2];
2561
+
2562
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2563
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2564
+ 2, "topic1", 3, "topic2", 3);
2565
+
2566
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2567
+ ut_get_consumer_rack(0, parametrization),
2568
+ parametrization, "topic1", "topic2", NULL);
2569
+ ut_initMemberConditionalRack(&members[1], "consumer2",
2570
+ ut_get_consumer_rack(1, parametrization),
2571
+ parametrization, "topic1", "topic2", NULL);
2572
+
2573
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2574
+ RD_ARRAYSIZE(members), errstr,
2575
+ sizeof(errstr));
2576
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2577
+
2578
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1,
2579
+ NULL);
2580
+ verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2,
2581
+ NULL);
2582
+
2583
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2584
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2585
+
2586
+ rd_kafka_group_member_clear(&members[0]);
2587
+ rd_kafka_group_member_clear(&members[1]);
2588
+ ut_destroy_metadata(metadata);
2589
+
2590
+ RD_UT_PASS();
2591
+ }
2592
+
2593
+
2594
+ static int ut_testAddRemoveConsumerOneTopic(
2595
+ rd_kafka_t *rk,
2596
+ const rd_kafka_assignor_t *rkas,
2597
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2598
+ rd_kafka_resp_err_t err;
2599
+ char errstr[512];
2600
+ rd_kafka_metadata_t *metadata;
2601
+ rd_kafka_group_member_t members[2];
2602
+
2603
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2604
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2605
+ 1, "topic1", 3);
2606
+
2607
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2608
+ ut_get_consumer_rack(0, parametrization),
2609
+ parametrization, "topic1", NULL);
2610
+
2611
+
2612
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1,
2613
+ errstr, sizeof(errstr));
2614
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2615
+
2616
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
2617
+ NULL);
2618
+
2619
+ verifyValidityAndBalance(members, 1, metadata);
2620
+ isFullyBalanced(members, 1);
2621
+
2622
+ /* Add consumer2 */
2623
+ ut_initMemberConditionalRack(&members[1], "consumer2",
2624
+ ut_get_consumer_rack(1, parametrization),
2625
+ parametrization, "topic1", NULL);
2626
+
2627
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2628
+ RD_ARRAYSIZE(members), errstr,
2629
+ sizeof(errstr));
2630
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2631
+
2632
+ verifyAssignment(&members[0], "topic1", 1, "topic1", 2, NULL);
2633
+ verifyAssignment(&members[1], "topic1", 0, NULL);
2634
+
2635
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2636
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2637
+ // FIXME: isSticky();
2638
+
2639
+
2640
+ /* Remove consumer1 */
2641
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 1,
2642
+ errstr, sizeof(errstr));
2643
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2644
+
2645
+ verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2,
2646
+ NULL);
2647
+
2648
+ verifyValidityAndBalance(&members[1], 1, metadata);
2649
+ isFullyBalanced(&members[1], 1);
2650
+ // FIXME: isSticky();
2651
+
2652
+ rd_kafka_group_member_clear(&members[0]);
2653
+ rd_kafka_group_member_clear(&members[1]);
2654
+ ut_destroy_metadata(metadata);
2655
+
2656
+ RD_UT_PASS();
2657
+ }
2658
+
2659
+ /**
2660
+ * This unit test performs sticky assignment for a scenario that round robin
2661
+ * assignor handles poorly.
2662
+ * Topics (partitions per topic):
2663
+ * - topic1 (2), topic2 (1), topic3 (2), topic4 (1), topic5 (2)
2664
+ * Subscriptions:
2665
+ * - consumer1: topic1, topic2, topic3, topic4, topic5
2666
+ * - consumer2: topic1, topic3, topic5
2667
+ * - consumer3: topic1, topic3, topic5
2668
+ * - consumer4: topic1, topic2, topic3, topic4, topic5
2669
+ * Round Robin Assignment Result:
2670
+ * - consumer1: topic1-0, topic3-0, topic5-0
2671
+ * - consumer2: topic1-1, topic3-1, topic5-1
2672
+ * - consumer3:
2673
+ * - consumer4: topic2-0, topic4-0
2674
+ * Sticky Assignment Result:
2675
+ * - consumer1: topic2-0, topic3-0
2676
+ * - consumer2: topic1-0, topic3-1
2677
+ * - consumer3: topic1-1, topic5-0
2678
+ * - consumer4: topic4-0, topic5-1
2679
+ */
2680
+ static int ut_testPoorRoundRobinAssignmentScenario(
2681
+ rd_kafka_t *rk,
2682
+ const rd_kafka_assignor_t *rkas,
2683
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2684
+ rd_kafka_resp_err_t err;
2685
+ char errstr[512];
2686
+ rd_kafka_metadata_t *metadata;
2687
+ rd_kafka_group_member_t members[4];
2688
+
2689
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2690
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2691
+ 5, "topic1", 2, "topic2", 1, "topic3", 2,
2692
+ "topic4", 1, "topic5", 2);
2693
+
2694
+
2695
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2696
+ ut_get_consumer_rack(0, parametrization),
2697
+ parametrization, "topic1", "topic2",
2698
+ "topic3", "topic4", "topic5", NULL);
2699
+ ut_initMemberConditionalRack(
2700
+ &members[1], "consumer2", ut_get_consumer_rack(1, parametrization),
2701
+ parametrization, "topic1", "topic3", "topic5", NULL);
2702
+ ut_initMemberConditionalRack(
2703
+ &members[2], "consumer3", ut_get_consumer_rack(2, parametrization),
2704
+ parametrization, "topic1", "topic3", "topic5", NULL);
2705
+ ut_initMemberConditionalRack(&members[3], "consumer4",
2706
+ ut_get_consumer_rack(3, parametrization),
2707
+ parametrization, "topic1", "topic2",
2708
+ "topic3", "topic4", "topic5", NULL);
2709
+
2710
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2711
+ RD_ARRAYSIZE(members), errstr,
2712
+ sizeof(errstr));
2713
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2714
+
2715
+ verifyAssignment(&members[0], "topic2", 0, "topic3", 0, NULL);
2716
+ verifyAssignment(&members[1], "topic1", 0, "topic3", 1, NULL);
2717
+ verifyAssignment(&members[2], "topic1", 1, "topic5", 0, NULL);
2718
+ verifyAssignment(&members[3], "topic4", 0, "topic5", 1, NULL);
2719
+
2720
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2721
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2722
+
2723
+ rd_kafka_group_member_clear(&members[0]);
2724
+ rd_kafka_group_member_clear(&members[1]);
2725
+ rd_kafka_group_member_clear(&members[2]);
2726
+ rd_kafka_group_member_clear(&members[3]);
2727
+ ut_destroy_metadata(metadata);
2728
+
2729
+ RD_UT_PASS();
2730
+ }
2731
+
2732
+
2733
+
2734
+ static int ut_testAddRemoveTopicTwoConsumers(
2735
+ rd_kafka_t *rk,
2736
+ const rd_kafka_assignor_t *rkas,
2737
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2738
+ rd_kafka_resp_err_t err;
2739
+ char errstr[512];
2740
+ rd_kafka_metadata_t *metadata;
2741
+ rd_kafka_group_member_t members[2];
2742
+
2743
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2744
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2745
+ 1, "topic1", 3);
2746
+
2747
+ ut_initMemberConditionalRack(&members[0], "consumer1",
2748
+ ut_get_consumer_rack(0, parametrization),
2749
+ parametrization, "topic1", "topic2", NULL);
2750
+ ut_initMemberConditionalRack(&members[1], "consumer2",
2751
+ ut_get_consumer_rack(1, parametrization),
2752
+ parametrization, "topic1", "topic2", NULL);
2753
+
2754
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2755
+ RD_ARRAYSIZE(members), errstr,
2756
+ sizeof(errstr));
2757
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2758
+
2759
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL);
2760
+ verifyAssignment(&members[1], "topic1", 1, NULL);
2761
+
2762
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2763
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2764
+
2765
+ /*
2766
+ * Add topic2
2767
+ */
2768
+ RD_UT_SAY("Adding topic2");
2769
+ ut_destroy_metadata(metadata);
2770
+
2771
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2772
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2773
+ 2, "topic1", 3, "topic2", 3);
2774
+
2775
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2776
+ RD_ARRAYSIZE(members), errstr,
2777
+ sizeof(errstr));
2778
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2779
+
2780
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1,
2781
+ NULL);
2782
+ verifyAssignment(&members[1], "topic1", 1, "topic2", 2, "topic2", 0,
2783
+ NULL);
2784
+
2785
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2786
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2787
+ // FIXME: isSticky();
2788
+
2789
+
2790
+ /*
2791
+ * Remove topic1
2792
+ */
2793
+ RD_UT_SAY("Removing topic1");
2794
+ ut_destroy_metadata(metadata);
2795
+
2796
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2797
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2798
+ 1, "topic2", 3);
2799
+
2800
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2801
+ RD_ARRAYSIZE(members), errstr,
2802
+ sizeof(errstr));
2803
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2804
+
2805
+ verifyAssignment(&members[0], "topic2", 1, NULL);
2806
+ verifyAssignment(&members[1], "topic2", 0, "topic2", 2, NULL);
2807
+
2808
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
2809
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
2810
+ // FIXME: isSticky();
2811
+
2812
+ rd_kafka_group_member_clear(&members[0]);
2813
+ rd_kafka_group_member_clear(&members[1]);
2814
+ ut_destroy_metadata(metadata);
2815
+
2816
+ RD_UT_PASS();
2817
+ }
2818
+
2819
+
2820
+ static int ut_testReassignmentAfterOneConsumerLeaves(
2821
+ rd_kafka_t *rk,
2822
+ const rd_kafka_assignor_t *rkas,
2823
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2824
+ rd_kafka_resp_err_t err;
2825
+ char errstr[512];
2826
+ rd_kafka_metadata_t *metadata;
2827
+ rd_kafka_group_member_t members[19];
2828
+ int member_cnt = RD_ARRAYSIZE(members);
2829
+ rd_kafka_metadata_topic_t mt[19];
2830
+ int topic_cnt = RD_ARRAYSIZE(mt);
2831
+ int i;
2832
+
2833
+ for (i = 0; i < topic_cnt; i++) {
2834
+ char topic[10];
2835
+ rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
2836
+ rd_strdupa(&mt[i].topic, topic);
2837
+ mt[i].partition_cnt = i + 1;
2838
+ }
2839
+
2840
+ ut_initMetadataConditionalRack0(&metadata, 3, 3, ALL_RACKS,
2841
+ RD_ARRAYSIZE(ALL_RACKS),
2842
+ parametrization, mt, topic_cnt);
2843
+
2844
+ for (i = 1; i <= member_cnt; i++) {
2845
+ char name[20];
2846
+ rd_kafka_topic_partition_list_t *subscription =
2847
+ rd_kafka_topic_partition_list_new(i);
2848
+ int j;
2849
+ for (j = 1; j <= i; j++) {
2850
+ char topic[16];
2851
+ rd_snprintf(topic, sizeof(topic), "topic%d", j);
2852
+ rd_kafka_topic_partition_list_add(
2853
+ subscription, topic, RD_KAFKA_PARTITION_UA);
2854
+ }
2855
+ rd_snprintf(name, sizeof(name), "consumer%d", i);
2856
+
2857
+ ut_initMemberConditionalRack(
2858
+ &members[i - 1], name,
2859
+ ut_get_consumer_rack(i, parametrization), parametrization,
2860
+ NULL);
2861
+
2862
+ rd_kafka_topic_partition_list_destroy(
2863
+ members[i - 1].rkgm_subscription);
2864
+ members[i - 1].rkgm_subscription = subscription;
2865
+ }
2866
+
2867
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2868
+ member_cnt, errstr, sizeof(errstr));
2869
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2870
+
2871
+ verifyValidityAndBalance(members, member_cnt, metadata);
2872
+
2873
+
2874
+ /*
2875
+ * Remove consumer10.
2876
+ */
2877
+ rd_kafka_group_member_clear(&members[9]);
2878
+ memmove(&members[9], &members[10],
2879
+ sizeof(*members) * (member_cnt - 10));
2880
+ member_cnt--;
2881
+
2882
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2883
+ member_cnt, errstr, sizeof(errstr));
2884
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2885
+
2886
+ verifyValidityAndBalance(members, member_cnt, metadata);
2887
+ // FIXME: isSticky();
2888
+
2889
+ for (i = 0; i < member_cnt; i++)
2890
+ rd_kafka_group_member_clear(&members[i]);
2891
+ ut_destroy_metadata(metadata);
2892
+
2893
+ RD_UT_PASS();
2894
+ }
2895
+
2896
+
2897
+ static int ut_testReassignmentAfterOneConsumerAdded(
2898
+ rd_kafka_t *rk,
2899
+ const rd_kafka_assignor_t *rkas,
2900
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2901
+ rd_kafka_resp_err_t err;
2902
+ char errstr[512];
2903
+ rd_kafka_metadata_t *metadata;
2904
+ rd_kafka_group_member_t members[9];
2905
+ int member_cnt = RD_ARRAYSIZE(members);
2906
+ int i;
2907
+
2908
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
2909
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
2910
+ 1, "topic1", 20);
2911
+
2912
+ for (i = 1; i <= member_cnt; i++) {
2913
+ char name[20];
2914
+ rd_kafka_topic_partition_list_t *subscription =
2915
+ rd_kafka_topic_partition_list_new(1);
2916
+ rd_kafka_topic_partition_list_add(subscription, "topic1",
2917
+ RD_KAFKA_PARTITION_UA);
2918
+ rd_snprintf(name, sizeof(name), "consumer%d", i);
2919
+ ut_initMemberConditionalRack(
2920
+ &members[i - 1], name,
2921
+ ut_get_consumer_rack(i, parametrization), parametrization,
2922
+ NULL);
2923
+ rd_kafka_topic_partition_list_destroy(
2924
+ members[i - 1].rkgm_subscription);
2925
+ members[i - 1].rkgm_subscription = subscription;
2926
+ }
2927
+
2928
+ member_cnt--; /* Skip one consumer */
2929
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2930
+ member_cnt, errstr, sizeof(errstr));
2931
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2932
+
2933
+ verifyValidityAndBalance(members, member_cnt, metadata);
2934
+
2935
+
2936
+ /*
2937
+ * Add consumer.
2938
+ */
2939
+ member_cnt++;
2940
+
2941
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2942
+ member_cnt, errstr, sizeof(errstr));
2943
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
2944
+
2945
+ verifyValidityAndBalance(members, member_cnt, metadata);
2946
+ // FIXME: isSticky();
2947
+
2948
+ for (i = 0; i < member_cnt; i++)
2949
+ rd_kafka_group_member_clear(&members[i]);
2950
+ ut_destroy_metadata(metadata);
2951
+
2952
+ RD_UT_PASS();
2953
+ }
2954
+
2955
+
2956
+ static int
2957
+ ut_testSameSubscriptions(rd_kafka_t *rk,
2958
+ const rd_kafka_assignor_t *rkas,
2959
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
2960
+ rd_kafka_resp_err_t err;
2961
+ char errstr[512];
2962
+ rd_kafka_metadata_t *metadata;
2963
+ rd_kafka_group_member_t members[9];
2964
+ int member_cnt = RD_ARRAYSIZE(members);
2965
+ rd_kafka_metadata_topic_t mt[15];
2966
+ int topic_cnt = RD_ARRAYSIZE(mt);
2967
+ rd_kafka_topic_partition_list_t *subscription =
2968
+ rd_kafka_topic_partition_list_new(topic_cnt);
2969
+ int i;
2970
+
2971
+ for (i = 0; i < topic_cnt; i++) {
2972
+ char topic[10];
2973
+ rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
2974
+ rd_strdupa(&mt[i].topic, topic);
2975
+ mt[i].partition_cnt = i + 1;
2976
+ rd_kafka_topic_partition_list_add(subscription, topic,
2977
+ RD_KAFKA_PARTITION_UA);
2978
+ }
2979
+
2980
+ ut_initMetadataConditionalRack0(&metadata, 3, 3, ALL_RACKS,
2981
+ RD_ARRAYSIZE(ALL_RACKS),
2982
+ parametrization, mt, topic_cnt);
2983
+
2984
+ for (i = 1; i <= member_cnt; i++) {
2985
+ char name[16];
2986
+ rd_snprintf(name, sizeof(name), "consumer%d", i);
2987
+ ut_initMemberConditionalRack(
2988
+ &members[i - 1], name,
2989
+ ut_get_consumer_rack(i, parametrization), parametrization,
2990
+ NULL);
2991
+ rd_kafka_topic_partition_list_destroy(
2992
+ members[i - 1].rkgm_subscription);
2993
+ members[i - 1].rkgm_subscription =
2994
+ rd_kafka_topic_partition_list_copy(subscription);
2995
+ }
2996
+
2997
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
2998
+ member_cnt, errstr, sizeof(errstr));
2999
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3000
+
3001
+ verifyValidityAndBalance(members, member_cnt, metadata);
3002
+
3003
+ /*
3004
+ * Remove consumer5
3005
+ */
3006
+ rd_kafka_group_member_clear(&members[5]);
3007
+ memmove(&members[5], &members[6], sizeof(*members) * (member_cnt - 6));
3008
+ member_cnt--;
3009
+
3010
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3011
+ member_cnt, errstr, sizeof(errstr));
3012
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3013
+
3014
+ verifyValidityAndBalance(members, member_cnt, metadata);
3015
+ // FIXME: isSticky();
3016
+
3017
+ for (i = 0; i < member_cnt; i++)
3018
+ rd_kafka_group_member_clear(&members[i]);
3019
+ ut_destroy_metadata(metadata);
3020
+ rd_kafka_topic_partition_list_destroy(subscription);
3021
+
3022
+ RD_UT_PASS();
3023
+ }
3024
+
3025
+
3026
+ static int ut_testLargeAssignmentWithMultipleConsumersLeaving(
3027
+ rd_kafka_t *rk,
3028
+ const rd_kafka_assignor_t *rkas,
3029
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3030
+ if (rd_unittest_with_valgrind)
3031
+ RD_UT_SKIP(
3032
+ "Skipping large assignment test when using Valgrind");
3033
+
3034
+ rd_kafka_resp_err_t err;
3035
+ char errstr[512];
3036
+ rd_kafka_metadata_t *metadata;
3037
+ rd_kafka_group_member_t members[200];
3038
+ int member_cnt = RD_ARRAYSIZE(members);
3039
+ rd_kafka_metadata_topic_t mt[40];
3040
+ int topic_cnt = RD_ARRAYSIZE(mt);
3041
+ int i;
3042
+
3043
+ for (i = 0; i < topic_cnt; i++) {
3044
+ char topic[10];
3045
+ rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
3046
+ rd_strdupa(&mt[i].topic, topic);
3047
+ mt[i].partition_cnt = i + 1;
3048
+ }
3049
+
3050
+ ut_initMetadataConditionalRack0(&metadata, 3, 3, ALL_RACKS,
3051
+ RD_ARRAYSIZE(ALL_RACKS),
3052
+ parametrization, mt, topic_cnt);
3053
+
3054
+ for (i = 0; i < member_cnt; i++) {
3055
+ /* Java tests use a random set, this is more deterministic. */
3056
+ int sub_cnt = ((i + 1) * 17) % topic_cnt;
3057
+ rd_kafka_topic_partition_list_t *subscription =
3058
+ rd_kafka_topic_partition_list_new(sub_cnt);
3059
+ char name[16];
3060
+ int j;
3061
+
3062
+ /* Subscribe to a subset of topics */
3063
+ for (j = 0; j < sub_cnt; j++)
3064
+ rd_kafka_topic_partition_list_add(
3065
+ subscription, metadata->topics[j].topic,
3066
+ RD_KAFKA_PARTITION_UA);
3067
+
3068
+ rd_snprintf(name, sizeof(name), "consumer%d", i + 1);
3069
+ ut_initMemberConditionalRack(
3070
+ &members[i], name, ut_get_consumer_rack(i, parametrization),
3071
+ parametrization, NULL);
3072
+
3073
+ rd_kafka_topic_partition_list_destroy(
3074
+ members[i].rkgm_subscription);
3075
+ members[i].rkgm_subscription = subscription;
3076
+ }
3077
+
3078
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3079
+ member_cnt, errstr, sizeof(errstr));
3080
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3081
+
3082
+ verifyValidityAndBalance(members, member_cnt, metadata);
3083
+
3084
+ /*
3085
+ * Remove every 4th consumer (~50)
3086
+ */
3087
+ for (i = member_cnt - 1; i >= 0; i -= 4) {
3088
+ rd_kafka_group_member_clear(&members[i]);
3089
+ memmove(&members[i], &members[i + 1],
3090
+ sizeof(*members) * (member_cnt - (i + 1)));
3091
+ member_cnt--;
3092
+ }
3093
+
3094
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3095
+ member_cnt, errstr, sizeof(errstr));
3096
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3097
+
3098
+ verifyValidityAndBalance(members, member_cnt, metadata);
3099
+ // FIXME: isSticky();
3100
+
3101
+ for (i = 0; i < member_cnt; i++)
3102
+ rd_kafka_group_member_clear(&members[i]);
3103
+ ut_destroy_metadata(metadata);
3104
+
3105
+ RD_UT_PASS();
3106
+ }
3107
+
3108
+
3109
+ static int
3110
+ ut_testNewSubscription(rd_kafka_t *rk,
3111
+ const rd_kafka_assignor_t *rkas,
3112
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3113
+ rd_kafka_resp_err_t err;
3114
+ char errstr[512];
3115
+ rd_kafka_metadata_t *metadata;
3116
+ rd_kafka_group_member_t members[3];
3117
+ int member_cnt = RD_ARRAYSIZE(members);
3118
+ int i;
3119
+
3120
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3121
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3122
+ 5, "topic1", 1, "topic2", 2, "topic3", 3,
3123
+ "topic4", 4, "topic5", 5);
3124
+
3125
+ for (i = 0; i < member_cnt; i++) {
3126
+ char name[16];
3127
+ int j;
3128
+
3129
+ rd_snprintf(name, sizeof(name), "consumer%d", i);
3130
+ ut_initMemberConditionalRack(
3131
+ &members[i], name, ut_get_consumer_rack(i, parametrization),
3132
+ parametrization, NULL);
3133
+
3134
+ rd_kafka_topic_partition_list_destroy(
3135
+ members[i].rkgm_subscription);
3136
+ members[i].rkgm_subscription =
3137
+ rd_kafka_topic_partition_list_new(5);
3138
+
3139
+ for (j = metadata->topic_cnt - (1 + i); j >= 0; j--)
3140
+ rd_kafka_topic_partition_list_add(
3141
+ members[i].rkgm_subscription,
3142
+ metadata->topics[j].topic, RD_KAFKA_PARTITION_UA);
3143
+ }
3144
+
3145
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3146
+ RD_ARRAYSIZE(members), errstr,
3147
+ sizeof(errstr));
3148
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3149
+
3150
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3151
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
3152
+
3153
+ /*
3154
+ * Add topic1 to consumer1's subscription
3155
+ */
3156
+ RD_UT_SAY("Adding topic1 to consumer1");
3157
+ rd_kafka_topic_partition_list_add(members[0].rkgm_subscription,
3158
+ "topic1", RD_KAFKA_PARTITION_UA);
3159
+
3160
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3161
+ RD_ARRAYSIZE(members), errstr,
3162
+ sizeof(errstr));
3163
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3164
+
3165
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3166
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
3167
+ // FIXME: isSticky();
3168
+
3169
+ for (i = 0; i < member_cnt; i++)
3170
+ rd_kafka_group_member_clear(&members[i]);
3171
+ ut_destroy_metadata(metadata);
3172
+
3173
+ RD_UT_PASS();
3174
+ }
3175
+
3176
+
3177
+ static int ut_testMoveExistingAssignments(
3178
+ rd_kafka_t *rk,
3179
+ const rd_kafka_assignor_t *rkas,
3180
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3181
+ rd_kafka_resp_err_t err;
3182
+ char errstr[512];
3183
+ rd_kafka_metadata_t *metadata;
3184
+ rd_kafka_group_member_t members[4];
3185
+ int member_cnt = RD_ARRAYSIZE(members);
3186
+ rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT;
3187
+ int i;
3188
+ int fails = 0;
3189
+
3190
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3191
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3192
+ 1, "topic1", 3);
3193
+
3194
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3195
+ ut_get_consumer_rack(0, parametrization),
3196
+ parametrization, "topic1", NULL);
3197
+ ut_initMemberConditionalRack(&members[1], "consumer2",
3198
+ ut_get_consumer_rack(1, parametrization),
3199
+ parametrization, "topic1", NULL);
3200
+ ut_initMemberConditionalRack(&members[2], "consumer3",
3201
+ ut_get_consumer_rack(2, parametrization),
3202
+ parametrization, "topic1", NULL);
3203
+ ut_initMemberConditionalRack(&members[3], "consumer4",
3204
+ ut_get_consumer_rack(3, parametrization),
3205
+ parametrization, "topic1", NULL);
3206
+
3207
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3208
+ member_cnt, errstr, sizeof(errstr));
3209
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3210
+
3211
+ verifyValidityAndBalance(members, member_cnt, metadata);
3212
+
3213
+ for (i = 0; i < member_cnt; i++) {
3214
+ if (members[i].rkgm_assignment->cnt > 1) {
3215
+ RD_UT_WARN("%s assigned %d partitions, expected <= 1",
3216
+ members[i].rkgm_member_id->str,
3217
+ members[i].rkgm_assignment->cnt);
3218
+ fails++;
3219
+ } else if (members[i].rkgm_assignment->cnt == 1) {
3220
+ assignments[i] = rd_kafka_topic_partition_list_copy(
3221
+ members[i].rkgm_assignment);
3222
+ }
3223
+ }
3224
+
3225
+ /*
3226
+ * Remove potential group leader consumer1
3227
+ */
3228
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1],
3229
+ member_cnt - 1, errstr, sizeof(errstr));
3230
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3231
+
3232
+ verifyValidityAndBalance(&members[1], member_cnt - 1, metadata);
3233
+ // FIXME: isSticky()
3234
+
3235
+ for (i = 1; i < member_cnt; i++) {
3236
+ if (members[i].rkgm_assignment->cnt != 1) {
3237
+ RD_UT_WARN("%s assigned %d partitions, expected 1",
3238
+ members[i].rkgm_member_id->str,
3239
+ members[i].rkgm_assignment->cnt);
3240
+ fails++;
3241
+ } else if (assignments[i] &&
3242
+ !rd_kafka_topic_partition_list_find(
3243
+ assignments[i],
3244
+ members[i].rkgm_assignment->elems[0].topic,
3245
+ members[i]
3246
+ .rkgm_assignment->elems[0]
3247
+ .partition)) {
3248
+ RD_UT_WARN(
3249
+ "Stickiness was not honored for %s, "
3250
+ "%s [%" PRId32 "] not in previous assignment",
3251
+ members[i].rkgm_member_id->str,
3252
+ members[i].rkgm_assignment->elems[0].topic,
3253
+ members[i].rkgm_assignment->elems[0].partition);
3254
+ fails++;
3255
+ }
3256
+ }
3257
+
3258
+ RD_UT_ASSERT(!fails, "See previous errors");
3259
+
3260
+
3261
+ for (i = 0; i < member_cnt; i++) {
3262
+ rd_kafka_group_member_clear(&members[i]);
3263
+ if (assignments[i])
3264
+ rd_kafka_topic_partition_list_destroy(assignments[i]);
3265
+ }
3266
+ ut_destroy_metadata(metadata);
3267
+
3268
+ RD_UT_PASS();
3269
+ }
3270
+
3271
+
3272
+ /* The original version of this test diverged from the Java implementaion in
3273
+ * what it was testing. It's not certain whether it was by mistake, or by
3274
+ * design, but the new version matches the Java implementation, and the old one
3275
+ * is retained as well, since it provides extra coverage.
3276
+ */
3277
+ static int ut_testMoveExistingAssignments_j(
3278
+ rd_kafka_t *rk,
3279
+ const rd_kafka_assignor_t *rkas,
3280
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3281
+ rd_kafka_resp_err_t err;
3282
+ char errstr[512];
3283
+ rd_kafka_metadata_t *metadata;
3284
+ rd_kafka_group_member_t members[3];
3285
+ int member_cnt = RD_ARRAYSIZE(members);
3286
+ rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT;
3287
+ int i;
3288
+
3289
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3290
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3291
+ 6, "topic1", 1, "topic2", 1, "topic3", 1,
3292
+ "topic4", 1, "topic5", 1, "topic6", 1);
3293
+
3294
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3295
+ ut_get_consumer_rack(0, parametrization),
3296
+ parametrization, "topic1", "topic2", NULL);
3297
+ ut_populate_member_owned_partitions_generation(
3298
+ &members[0], 1 /* generation */, 1, "topic1", 0);
3299
+
3300
+ ut_initMemberConditionalRack(
3301
+ &members[1], "consumer2", ut_get_consumer_rack(1, parametrization),
3302
+ parametrization, "topic1", "topic2", "topic3", "topic4", NULL);
3303
+ ut_populate_member_owned_partitions_generation(
3304
+ &members[1], 1 /* generation */, 2, "topic2", 0, "topic3", 0);
3305
+
3306
+ ut_initMemberConditionalRack(&members[2], "consumer3",
3307
+ ut_get_consumer_rack(2, parametrization),
3308
+ parametrization, "topic2", "topic3",
3309
+ "topic4", "topic5", "topic6", NULL);
3310
+ ut_populate_member_owned_partitions_generation(
3311
+ &members[2], 1 /* generation */, 3, "topic4", 0, "topic5", 0,
3312
+ "topic6", 0);
3313
+
3314
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3315
+ member_cnt, errstr, sizeof(errstr));
3316
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3317
+
3318
+ verifyValidityAndBalance(members, member_cnt, metadata);
3319
+
3320
+ for (i = 0; i < member_cnt; i++) {
3321
+ rd_kafka_group_member_clear(&members[i]);
3322
+ if (assignments[i])
3323
+ rd_kafka_topic_partition_list_destroy(assignments[i]);
3324
+ }
3325
+ ut_destroy_metadata(metadata);
3326
+
3327
+ RD_UT_PASS();
3328
+ }
3329
+
3330
+
3331
+ static int
3332
+ ut_testStickiness(rd_kafka_t *rk,
3333
+ const rd_kafka_assignor_t *rkas,
3334
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3335
+ rd_kafka_resp_err_t err;
3336
+ char errstr[512];
3337
+ rd_kafka_metadata_t *metadata;
3338
+ rd_kafka_group_member_t members[3];
3339
+ int member_cnt = RD_ARRAYSIZE(members);
3340
+ int i;
3341
+
3342
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3343
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3344
+ 6, "topic1", 1, "topic2", 1, "topic3", 1,
3345
+ "topic4", 1, "topic5", 1, "topic6", 1);
3346
+
3347
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3348
+ ut_get_consumer_rack(0, parametrization),
3349
+ parametrization, "topic1", "topic2", NULL);
3350
+ rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment);
3351
+ members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(1);
3352
+ rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
3353
+ 0);
3354
+
3355
+ ut_initMemberConditionalRack(
3356
+ &members[1], "consumer2", ut_get_consumer_rack(1, parametrization),
3357
+ parametrization, "topic1", "topic2", "topic3", "topic4", NULL);
3358
+ rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment);
3359
+ members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
3360
+ rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic2",
3361
+ 0);
3362
+ rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic3",
3363
+ 0);
3364
+
3365
+ ut_initMemberConditionalRack(
3366
+ &members[2], "consumer3", ut_get_consumer_rack(1, parametrization),
3367
+ parametrization, "topic4", "topic5", "topic6", NULL);
3368
+ rd_kafka_topic_partition_list_destroy(members[2].rkgm_assignment);
3369
+ members[2].rkgm_assignment = rd_kafka_topic_partition_list_new(3);
3370
+ rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic4",
3371
+ 0);
3372
+ rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic5",
3373
+ 0);
3374
+ rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic6",
3375
+ 0);
3376
+
3377
+
3378
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3379
+ member_cnt, errstr, sizeof(errstr));
3380
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3381
+
3382
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3383
+
3384
+ for (i = 0; i < member_cnt; i++)
3385
+ rd_kafka_group_member_clear(&members[i]);
3386
+ ut_destroy_metadata(metadata);
3387
+
3388
+ RD_UT_PASS();
3389
+ }
3390
+
3391
+
3392
+ /* The original version of this test diverged from the Java implementaion in
3393
+ * what it was testing. It's not certain whether it was by mistake, or by
3394
+ * design, but the new version matches the Java implementation, and the old one
3395
+ * is retained as well, for extra coverage.
3396
+ */
3397
+ static int
3398
+ ut_testStickiness_j(rd_kafka_t *rk,
3399
+ const rd_kafka_assignor_t *rkas,
3400
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3401
+ rd_kafka_resp_err_t err;
3402
+ char errstr[512];
3403
+ rd_kafka_metadata_t *metadata;
3404
+ rd_kafka_group_member_t members[4];
3405
+ int member_cnt = RD_ARRAYSIZE(members);
3406
+ int i;
3407
+ rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT;
3408
+ int fails = 0;
3409
+
3410
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3411
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3412
+ 1, "topic1", 3);
3413
+
3414
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3415
+ ut_get_consumer_rack(0, parametrization),
3416
+ parametrization, "topic1", NULL);
3417
+ ut_initMemberConditionalRack(&members[1], "consumer2",
3418
+ ut_get_consumer_rack(1, parametrization),
3419
+ parametrization, "topic1", NULL);
3420
+ ut_initMemberConditionalRack(&members[2], "consumer3",
3421
+ ut_get_consumer_rack(2, parametrization),
3422
+ parametrization, "topic1", NULL);
3423
+ ut_initMemberConditionalRack(&members[3], "consumer4",
3424
+ ut_get_consumer_rack(3, parametrization),
3425
+ parametrization, "topic1", NULL);
3426
+
3427
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3428
+ member_cnt, errstr, sizeof(errstr));
3429
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3430
+
3431
+ verifyValidityAndBalance(members, member_cnt, metadata);
3432
+
3433
+ for (i = 0; i < member_cnt; i++) {
3434
+ if (members[i].rkgm_assignment->cnt > 1) {
3435
+ RD_UT_WARN("%s assigned %d partitions, expected <= 1",
3436
+ members[i].rkgm_member_id->str,
3437
+ members[i].rkgm_assignment->cnt);
3438
+ fails++;
3439
+ } else if (members[i].rkgm_assignment->cnt == 1) {
3440
+ assignments[i] = rd_kafka_topic_partition_list_copy(
3441
+ members[i].rkgm_assignment);
3442
+ }
3443
+ }
3444
+
3445
+ /*
3446
+ * Remove potential group leader consumer1, by starting members at
3447
+ * index 1.
3448
+ * Owned partitions of the members are already set to the assignment by
3449
+ * verifyValidityAndBalance above to simulate the fact that the assignor
3450
+ * has already run once.
3451
+ */
3452
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1],
3453
+ member_cnt - 1, errstr, sizeof(errstr));
3454
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3455
+
3456
+ verifyValidityAndBalance(&members[1], member_cnt - 1, metadata);
3457
+ // FIXME: isSticky()
3458
+
3459
+ for (i = 1; i < member_cnt; i++) {
3460
+ if (members[i].rkgm_assignment->cnt != 1) {
3461
+ RD_UT_WARN("%s assigned %d partitions, expected 1",
3462
+ members[i].rkgm_member_id->str,
3463
+ members[i].rkgm_assignment->cnt);
3464
+ fails++;
3465
+ } else if (assignments[i] &&
3466
+ !rd_kafka_topic_partition_list_find(
3467
+ assignments[i],
3468
+ members[i].rkgm_assignment->elems[0].topic,
3469
+ members[i]
3470
+ .rkgm_assignment->elems[0]
3471
+ .partition)) {
3472
+ RD_UT_WARN(
3473
+ "Stickiness was not honored for %s, "
3474
+ "%s [%" PRId32 "] not in previous assignment",
3475
+ members[i].rkgm_member_id->str,
3476
+ members[i].rkgm_assignment->elems[0].topic,
3477
+ members[i].rkgm_assignment->elems[0].partition);
3478
+ fails++;
3479
+ }
3480
+ }
3481
+
3482
+ RD_UT_ASSERT(!fails, "See previous errors");
3483
+
3484
+
3485
+ for (i = 0; i < member_cnt; i++) {
3486
+ rd_kafka_group_member_clear(&members[i]);
3487
+ if (assignments[i])
3488
+ rd_kafka_topic_partition_list_destroy(assignments[i]);
3489
+ }
3490
+ ut_destroy_metadata(metadata);
3491
+
3492
+ RD_UT_PASS();
3493
+ }
3494
+
3495
+
3496
+ /**
3497
+ * @brief Verify stickiness across three rebalances.
3498
+ */
3499
+ static int
3500
+ ut_testStickiness2(rd_kafka_t *rk,
3501
+ const rd_kafka_assignor_t *rkas,
3502
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3503
+ rd_kafka_resp_err_t err;
3504
+ char errstr[512];
3505
+ rd_kafka_metadata_t *metadata;
3506
+ rd_kafka_group_member_t members[3];
3507
+ int member_cnt = RD_ARRAYSIZE(members);
3508
+ int i;
3509
+
3510
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3511
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3512
+ 1, "topic1", 6);
3513
+
3514
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3515
+ ut_get_consumer_rack(0, parametrization),
3516
+ parametrization, "topic1", NULL);
3517
+ ut_initMemberConditionalRack(&members[1], "consumer2",
3518
+ ut_get_consumer_rack(1, parametrization),
3519
+ parametrization, "topic1", NULL);
3520
+ ut_initMemberConditionalRack(&members[2], "consumer3",
3521
+ ut_get_consumer_rack(2, parametrization),
3522
+ parametrization, "topic1", NULL);
3523
+
3524
+ /* Just consumer1 */
3525
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1,
3526
+ errstr, sizeof(errstr));
3527
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3528
+
3529
+ verifyValidityAndBalance(members, 1, metadata);
3530
+ isFullyBalanced(members, 1);
3531
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
3532
+ "topic1", 3, "topic1", 4, "topic1", 5, NULL);
3533
+
3534
+ /* consumer1 and consumer2 */
3535
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 2,
3536
+ errstr, sizeof(errstr));
3537
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3538
+
3539
+ verifyValidityAndBalance(members, 2, metadata);
3540
+ isFullyBalanced(members, 2);
3541
+ verifyAssignment(&members[0], "topic1", 3, "topic1", 4, "topic1", 5,
3542
+ NULL);
3543
+ verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2,
3544
+ NULL);
3545
+
3546
+ /* Run it twice, should be stable. */
3547
+ for (i = 0; i < 2; i++) {
3548
+ /* consumer1, consumer2, and consumer3 */
3549
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata,
3550
+ members, 3, errstr, sizeof(errstr));
3551
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3552
+
3553
+ verifyValidityAndBalance(members, 3, metadata);
3554
+ isFullyBalanced(members, 3);
3555
+ verifyAssignment(&members[0], "topic1", 4, "topic1", 5, NULL);
3556
+ verifyAssignment(&members[1], "topic1", 1, "topic1", 2, NULL);
3557
+ verifyAssignment(&members[2], "topic1", 0, "topic1", 3, NULL);
3558
+ }
3559
+
3560
+ /* Remove consumer1 */
3561
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 2,
3562
+ errstr, sizeof(errstr));
3563
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3564
+
3565
+ verifyValidityAndBalance(&members[1], 2, metadata);
3566
+ isFullyBalanced(&members[1], 2);
3567
+ verifyAssignment(&members[1], "topic1", 1, "topic1", 2, "topic1", 5,
3568
+ NULL);
3569
+ verifyAssignment(&members[2], "topic1", 0, "topic1", 3, "topic1", 4,
3570
+ NULL);
3571
+
3572
+ /* Remove consumer2 */
3573
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[2], 1,
3574
+ errstr, sizeof(errstr));
3575
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3576
+
3577
+ verifyValidityAndBalance(&members[2], 1, metadata);
3578
+ isFullyBalanced(&members[2], 1);
3579
+ verifyAssignment(&members[2], "topic1", 0, "topic1", 1, "topic1", 2,
3580
+ "topic1", 3, "topic1", 4, "topic1", 5, NULL);
3581
+
3582
+ for (i = 0; i < member_cnt; i++)
3583
+ rd_kafka_group_member_clear(&members[i]);
3584
+ ut_destroy_metadata(metadata);
3585
+
3586
+ RD_UT_PASS();
3587
+ }
3588
+
3589
+
3590
+ static int ut_testAssignmentUpdatedForDeletedTopic(
3591
+ rd_kafka_t *rk,
3592
+ const rd_kafka_assignor_t *rkas,
3593
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3594
+ rd_kafka_resp_err_t err;
3595
+ char errstr[512];
3596
+ rd_kafka_metadata_t *metadata;
3597
+ rd_kafka_group_member_t members[1];
3598
+
3599
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3600
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3601
+ 2, "topic1", 1, "topic3", 100);
3602
+
3603
+ ut_initMemberConditionalRack(
3604
+ &members[0], "consumer1", ut_get_consumer_rack(0, parametrization),
3605
+ parametrization, "topic1", "topic2", "topic3", NULL);
3606
+
3607
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3608
+ RD_ARRAYSIZE(members), errstr,
3609
+ sizeof(errstr));
3610
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3611
+
3612
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3613
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
3614
+
3615
+ RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 + 100,
3616
+ "Expected %d assigned partitions, not %d", 1 + 100,
3617
+ members[0].rkgm_assignment->cnt);
3618
+
3619
+ rd_kafka_group_member_clear(&members[0]);
3620
+ ut_destroy_metadata(metadata);
3621
+
3622
+ RD_UT_PASS();
3623
+ }
3624
+
3625
+
3626
+ static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted(
3627
+ rd_kafka_t *rk,
3628
+ const rd_kafka_assignor_t *rkas,
3629
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3630
+ rd_kafka_resp_err_t err;
3631
+ char errstr[512];
3632
+ rd_kafka_metadata_t *metadata;
3633
+ rd_kafka_group_member_t members[1];
3634
+
3635
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3636
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3637
+ 1, "topic1", 3);
3638
+
3639
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3640
+ ut_get_consumer_rack(0, parametrization),
3641
+ parametrization, "topic1", NULL);
3642
+
3643
+
3644
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3645
+ RD_ARRAYSIZE(members), errstr,
3646
+ sizeof(errstr));
3647
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3648
+
3649
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3650
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
3651
+
3652
+ /*
3653
+ * Remove topic
3654
+ */
3655
+ ut_destroy_metadata(metadata);
3656
+ metadata = rd_kafka_metadata_new_topic_mock(NULL, 0, -1, 0);
3657
+
3658
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3659
+ RD_ARRAYSIZE(members), errstr,
3660
+ sizeof(errstr));
3661
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3662
+
3663
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3664
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
3665
+
3666
+ rd_kafka_group_member_clear(&members[0]);
3667
+ ut_destroy_metadata(metadata);
3668
+
3669
+ RD_UT_PASS();
3670
+ }
3671
+
3672
+
3673
+ static int ut_testConflictingPreviousAssignments(
3674
+ rd_kafka_t *rk,
3675
+ const rd_kafka_assignor_t *rkas,
3676
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3677
+ rd_kafka_resp_err_t err;
3678
+ char errstr[512];
3679
+ rd_kafka_metadata_t *metadata;
3680
+ rd_kafka_group_member_t members[2];
3681
+ int member_cnt = RD_ARRAYSIZE(members);
3682
+ int i;
3683
+
3684
+ // FIXME: removed from Java test suite, and fails for us, why, why?
3685
+ // NOTE: rack-awareness changes aren't made to this test because of
3686
+ // the FIXME above.
3687
+ RD_UT_PASS();
3688
+
3689
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 2);
3690
+
3691
+ /* Both consumer and consumer2 have both partitions assigned */
3692
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
3693
+ rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment);
3694
+ members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
3695
+ rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
3696
+ 0);
3697
+ rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
3698
+ 1);
3699
+
3700
+ ut_init_member(&members[1], "consumer2", "topic1", NULL);
3701
+ rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment);
3702
+ members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
3703
+ rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1",
3704
+ 0);
3705
+ rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1",
3706
+ 1);
3707
+
3708
+
3709
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3710
+ member_cnt, errstr, sizeof(errstr));
3711
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3712
+
3713
+ RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 &&
3714
+ members[1].rkgm_assignment->cnt == 1,
3715
+ "Expected consumers to have 1 partition each, "
3716
+ "not %d and %d",
3717
+ members[0].rkgm_assignment->cnt,
3718
+ members[1].rkgm_assignment->cnt);
3719
+ RD_UT_ASSERT(members[0].rkgm_assignment->elems[0].partition !=
3720
+ members[1].rkgm_assignment->elems[0].partition,
3721
+ "Expected consumers to have different partitions "
3722
+ "assigned, not same partition %" PRId32,
3723
+ members[0].rkgm_assignment->elems[0].partition);
3724
+
3725
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3726
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
3727
+ /* FIXME: isSticky() */
3728
+
3729
+ for (i = 0; i < member_cnt; i++)
3730
+ rd_kafka_group_member_clear(&members[i]);
3731
+ ut_destroy_metadata(metadata);
3732
+
3733
+ RD_UT_PASS();
3734
+ }
3735
+
3736
+ /* testReassignmentWithRandomSubscriptionsAndChanges is not ported
3737
+ * from Java since random tests don't provide meaningful test coverage. */
3738
+
3739
+
3740
+ static int ut_testAllConsumersReachExpectedQuotaAndAreConsideredFilled(
3741
+ rd_kafka_t *rk,
3742
+ const rd_kafka_assignor_t *rkas,
3743
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3744
+ rd_kafka_resp_err_t err;
3745
+ char errstr[512];
3746
+ rd_kafka_metadata_t *metadata;
3747
+ rd_kafka_group_member_t members[3];
3748
+ int member_cnt = RD_ARRAYSIZE(members);
3749
+ int i;
3750
+
3751
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3752
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3753
+ 1, "topic1", 4);
3754
+
3755
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3756
+ ut_get_consumer_rack(0, parametrization),
3757
+ parametrization, "topic1", NULL);
3758
+ ut_populate_member_owned_partitions_generation(
3759
+ &members[0], 1 /* generation */, 2, "topic1", 0, "topic1", 1);
3760
+
3761
+ ut_initMemberConditionalRack(&members[1], "consumer2",
3762
+ ut_get_consumer_rack(1, parametrization),
3763
+ parametrization, "topic1", NULL);
3764
+ ut_populate_member_owned_partitions_generation(
3765
+ &members[1], 1 /* generation */, 1, "topic1", 2);
3766
+
3767
+ ut_initMemberConditionalRack(&members[2], "consumer3",
3768
+ ut_get_consumer_rack(2, parametrization),
3769
+ parametrization, "topic1", NULL);
3770
+
3771
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3772
+ member_cnt, errstr, sizeof(errstr));
3773
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3774
+
3775
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3776
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, NULL);
3777
+ verifyAssignment(&members[1], "topic1", 2, NULL);
3778
+ verifyAssignment(&members[2], "topic1", 3, NULL);
3779
+
3780
+ for (i = 0; i < member_cnt; i++)
3781
+ rd_kafka_group_member_clear(&members[i]);
3782
+ ut_destroy_metadata(metadata);
3783
+
3784
+ RD_UT_PASS();
3785
+ }
3786
+
3787
+
3788
+ static int ut_testOwnedPartitionsAreInvalidatedForConsumerWithStaleGeneration(
3789
+ rd_kafka_t *rk,
3790
+ const rd_kafka_assignor_t *rkas,
3791
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3792
+ rd_kafka_resp_err_t err;
3793
+ char errstr[512];
3794
+ rd_kafka_metadata_t *metadata;
3795
+ rd_kafka_group_member_t members[2];
3796
+ int member_cnt = RD_ARRAYSIZE(members);
3797
+ int i;
3798
+ int current_generation = 10;
3799
+
3800
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3801
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3802
+ 2, "topic1", 3, "topic2", 3);
3803
+
3804
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3805
+ ut_get_consumer_rack(0, parametrization),
3806
+ parametrization, "topic1", "topic2", NULL);
3807
+ ut_populate_member_owned_partitions_generation(
3808
+ &members[0], current_generation, 3, "topic1", 0, "topic1", 2,
3809
+ "topic2", 1);
3810
+
3811
+ ut_initMemberConditionalRack(&members[1], "consumer2",
3812
+ ut_get_consumer_rack(1, parametrization),
3813
+ parametrization, "topic1", "topic2", NULL);
3814
+ ut_populate_member_owned_partitions_generation(
3815
+ &members[1], current_generation - 1, 3, "topic1", 0, "topic1", 2,
3816
+ "topic2", 1);
3817
+
3818
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3819
+ member_cnt, errstr, sizeof(errstr));
3820
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3821
+
3822
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3823
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1,
3824
+ NULL);
3825
+ verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2,
3826
+ NULL);
3827
+
3828
+
3829
+ for (i = 0; i < member_cnt; i++)
3830
+ rd_kafka_group_member_clear(&members[i]);
3831
+ ut_destroy_metadata(metadata);
3832
+
3833
+ RD_UT_PASS();
3834
+ }
3835
+
3836
+ static int ut_testOwnedPartitionsAreInvalidatedForConsumerWithNoGeneration(
3837
+ rd_kafka_t *rk,
3838
+ const rd_kafka_assignor_t *rkas,
3839
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3840
+ rd_kafka_resp_err_t err;
3841
+ char errstr[512];
3842
+ rd_kafka_metadata_t *metadata;
3843
+ rd_kafka_group_member_t members[2];
3844
+ int member_cnt = RD_ARRAYSIZE(members);
3845
+ int i;
3846
+ int current_generation = 10;
3847
+
3848
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3849
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3850
+ 2, "topic1", 3, "topic2", 3);
3851
+
3852
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3853
+ ut_get_consumer_rack(0, parametrization),
3854
+ parametrization, "topic1", "topic2", NULL);
3855
+ ut_populate_member_owned_partitions_generation(
3856
+ &members[0], current_generation, 3, "topic1", 0, "topic1", 2,
3857
+ "topic2", 1);
3858
+
3859
+ ut_initMemberConditionalRack(&members[1], "consumer2",
3860
+ ut_get_consumer_rack(1, parametrization),
3861
+ parametrization, "topic1", "topic2", NULL);
3862
+ ut_populate_member_owned_partitions_generation(
3863
+ &members[1], -1 /* default generation*/, 3, "topic1", 0, "topic1",
3864
+ 2, "topic2", 1);
3865
+
3866
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3867
+ member_cnt, errstr, sizeof(errstr));
3868
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3869
+
3870
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3871
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1,
3872
+ NULL);
3873
+ verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2,
3874
+ NULL);
3875
+
3876
+
3877
+ for (i = 0; i < member_cnt; i++)
3878
+ rd_kafka_group_member_clear(&members[i]);
3879
+ ut_destroy_metadata(metadata);
3880
+
3881
+ RD_UT_PASS();
3882
+ }
3883
+
3884
+ static int
3885
+ ut_testPartitionsTransferringOwnershipIncludeThePartitionClaimedByMultipleConsumersInSameGeneration(
3886
+ rd_kafka_t *rk,
3887
+ const rd_kafka_assignor_t *rkas,
3888
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3889
+ rd_kafka_resp_err_t err;
3890
+ char errstr[512];
3891
+ rd_kafka_metadata_t *metadata;
3892
+ rd_kafka_group_member_t members[3];
3893
+ int member_cnt = RD_ARRAYSIZE(members);
3894
+ int i;
3895
+
3896
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3897
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3898
+ 1, "topic1", 3);
3899
+
3900
+ // partition topic-0 is owned by multiple consumers
3901
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3902
+ ut_get_consumer_rack(0, parametrization),
3903
+ parametrization, "topic1", NULL);
3904
+ ut_populate_member_owned_partitions_generation(
3905
+ &members[0], 1 /* generation */, 2, "topic1", 0, "topic1", 1);
3906
+
3907
+ ut_initMemberConditionalRack(&members[1], "consumer2",
3908
+ ut_get_consumer_rack(1, parametrization),
3909
+ parametrization, "topic1", NULL);
3910
+ ut_populate_member_owned_partitions_generation(
3911
+ &members[1], 1 /* generation */, 2, "topic1", 0, "topic1", 2);
3912
+
3913
+ ut_initMemberConditionalRack(&members[2], "consumer3",
3914
+ ut_get_consumer_rack(2, parametrization),
3915
+ parametrization, "topic1", NULL);
3916
+
3917
+
3918
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3919
+ member_cnt, errstr, sizeof(errstr));
3920
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3921
+
3922
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3923
+ verifyAssignment(&members[0], "topic1", 1, NULL);
3924
+ verifyAssignment(&members[1], "topic1", 2, NULL);
3925
+ verifyAssignment(&members[2], "topic1", 0, NULL);
3926
+
3927
+ for (i = 0; i < member_cnt; i++)
3928
+ rd_kafka_group_member_clear(&members[i]);
3929
+ ut_destroy_metadata(metadata);
3930
+
3931
+ RD_UT_PASS();
3932
+ }
3933
+
3934
+
3935
+ /* In Java, there is a way to check what partition transferred ownership.
3936
+ * We don't have anything like that for our UTs, so in lieue of that, this
3937
+ * test is added along with the previous test to make sure that we move the
3938
+ * right partition. Our solution in case of two consumers owning the same
3939
+ * partitions with the same generation id was differing from the Java
3940
+ * implementation earlier. (Check #4252.) */
3941
+ static int
3942
+ ut_testPartitionsTransferringOwnershipIncludeThePartitionClaimedByMultipleConsumersInSameGeneration2(
3943
+ rd_kafka_t *rk,
3944
+ const rd_kafka_assignor_t *rkas,
3945
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3946
+ rd_kafka_resp_err_t err;
3947
+ char errstr[512];
3948
+ rd_kafka_metadata_t *metadata;
3949
+ rd_kafka_group_member_t members[3];
3950
+ int member_cnt = RD_ARRAYSIZE(members);
3951
+ int i;
3952
+
3953
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
3954
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
3955
+ 1, "topic1", 3);
3956
+
3957
+ // partition topic-0 is owned by multiple consumers
3958
+ ut_initMemberConditionalRack(&members[0], "consumer1",
3959
+ ut_get_consumer_rack(0, parametrization),
3960
+ parametrization, "topic1", NULL);
3961
+ ut_populate_member_owned_partitions_generation(
3962
+ &members[0], 1 /* generation */, 2, "topic1", 0, "topic1", 1);
3963
+
3964
+ ut_initMemberConditionalRack(&members[1], "consumer2",
3965
+ ut_get_consumer_rack(1, parametrization),
3966
+ parametrization, "topic1", NULL);
3967
+ ut_populate_member_owned_partitions_generation(
3968
+ &members[1], 1 /* generation */, 2, "topic1", 1, "topic1", 2);
3969
+
3970
+ ut_initMemberConditionalRack(&members[2], "consumer3",
3971
+ ut_get_consumer_rack(2, parametrization),
3972
+ parametrization, "topic1", NULL);
3973
+
3974
+
3975
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
3976
+ member_cnt, errstr, sizeof(errstr));
3977
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
3978
+
3979
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
3980
+ verifyAssignment(&members[0], "topic1", 0, NULL);
3981
+ verifyAssignment(&members[1], "topic1", 2, NULL);
3982
+ verifyAssignment(&members[2], "topic1", 1, NULL);
3983
+
3984
+ for (i = 0; i < member_cnt; i++)
3985
+ rd_kafka_group_member_clear(&members[i]);
3986
+ ut_destroy_metadata(metadata);
3987
+
3988
+ RD_UT_PASS();
3989
+ }
3990
+
3991
+
3992
+ static int ut_testEnsurePartitionsAssignedToHighestGeneration(
3993
+ rd_kafka_t *rk,
3994
+ const rd_kafka_assignor_t *rkas,
3995
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
3996
+ rd_kafka_resp_err_t err;
3997
+ char errstr[512];
3998
+ rd_kafka_metadata_t *metadata;
3999
+ rd_kafka_group_member_t members[3];
4000
+ int member_cnt = RD_ARRAYSIZE(members);
4001
+ int i;
4002
+ int currentGeneration = 10;
4003
+
4004
+ ut_initMetadataConditionalRack(
4005
+ &metadata, 3, 3, ALL_RACKS, RD_ARRAYSIZE(ALL_RACKS),
4006
+ parametrization, 3, "topic1", 3, "topic2", 3, "topic3", 3);
4007
+
4008
+ ut_initMemberConditionalRack(
4009
+ &members[0], "consumer1", ut_get_consumer_rack(0, parametrization),
4010
+ parametrization, "topic1", "topic2", "topic3", NULL);
4011
+ ut_populate_member_owned_partitions_generation(
4012
+ &members[0], currentGeneration, 3, "topic1", 0, "topic2", 0,
4013
+ "topic3", 0);
4014
+
4015
+
4016
+ ut_initMemberConditionalRack(
4017
+ &members[1], "consumer2", ut_get_consumer_rack(1, parametrization),
4018
+ parametrization, "topic1", "topic2", "topic3", NULL);
4019
+ ut_populate_member_owned_partitions_generation(
4020
+ &members[1], currentGeneration - 1, 3, "topic1", 1, "topic2", 1,
4021
+ "topic3", 1);
4022
+
4023
+
4024
+ ut_initMemberConditionalRack(
4025
+ &members[2], "consumer3", ut_get_consumer_rack(2, parametrization),
4026
+ parametrization, "topic1", "topic2", "topic3", NULL);
4027
+ ut_populate_member_owned_partitions_generation(
4028
+ &members[2], currentGeneration - 2, 3, "topic2", 1, "topic3", 0,
4029
+ "topic3", 2);
4030
+
4031
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
4032
+ member_cnt, errstr, sizeof(errstr));
4033
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
4034
+ verifyAssignment(&members[0], "topic1", 0, "topic2", 0, "topic3", 0,
4035
+ NULL);
4036
+ verifyAssignment(&members[1], "topic1", 1, "topic2", 1, "topic3", 1,
4037
+ NULL);
4038
+ verifyAssignment(&members[2], "topic1", 2, "topic2", 2, "topic3", 2,
4039
+ NULL);
4040
+
4041
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
4042
+
4043
+ for (i = 0; i < member_cnt; i++)
4044
+ rd_kafka_group_member_clear(&members[i]);
4045
+ ut_destroy_metadata(metadata);
4046
+
4047
+ RD_UT_PASS();
4048
+ }
4049
+
4050
+
4051
+ static int ut_testNoReassignmentOnCurrentMembers(
4052
+ rd_kafka_t *rk,
4053
+ const rd_kafka_assignor_t *rkas,
4054
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
4055
+ rd_kafka_resp_err_t err;
4056
+ char errstr[512];
4057
+ rd_kafka_metadata_t *metadata;
4058
+ rd_kafka_group_member_t members[4];
4059
+ int member_cnt = RD_ARRAYSIZE(members);
4060
+ int i;
4061
+ int currentGeneration = 10;
4062
+
4063
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
4064
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
4065
+ 4, "topic0", 3, "topic1", 3, "topic2", 3,
4066
+ "topic3", 3);
4067
+
4068
+ ut_initMemberConditionalRack(
4069
+ &members[0], "consumer1", ut_get_consumer_rack(0, parametrization),
4070
+ parametrization, "topic0", "topic1", "topic2", "topic3", NULL);
4071
+ ut_populate_member_owned_partitions_generation(
4072
+ &members[0], -1 /* default generation */, 0);
4073
+
4074
+ ut_initMemberConditionalRack(
4075
+ &members[1], "consumer2", ut_get_consumer_rack(1, parametrization),
4076
+ parametrization, "topic0", "topic1", "topic2", "topic3", NULL);
4077
+ ut_populate_member_owned_partitions_generation(
4078
+ &members[1], currentGeneration - 1, 3, "topic0", 0, "topic2", 0,
4079
+ "topic1", 0);
4080
+
4081
+ ut_initMemberConditionalRack(
4082
+ &members[2], "consumer3", ut_get_consumer_rack(2, parametrization),
4083
+ parametrization, "topic0", "topic1", "topic2", "topic3", NULL);
4084
+ ut_populate_member_owned_partitions_generation(
4085
+ &members[2], currentGeneration - 2, 3, "topic3", 2, "topic2", 2,
4086
+ "topic1", 1);
4087
+
4088
+ ut_initMemberConditionalRack(
4089
+ &members[3], "consumer4", ut_get_consumer_rack(3, parametrization),
4090
+ parametrization, "topic0", "topic1", "topic2", "topic3", NULL);
4091
+ ut_populate_member_owned_partitions_generation(
4092
+ &members[3], currentGeneration - 3, 3, "topic3", 1, "topic0", 1,
4093
+ "topic0", 2);
4094
+
4095
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
4096
+ member_cnt, errstr, sizeof(errstr));
4097
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
4098
+
4099
+ verifyValidityAndBalance(members, member_cnt, metadata);
4100
+ verifyAssignment(&members[0], "topic1", 2, "topic2", 1, "topic3", 0,
4101
+ NULL);
4102
+
4103
+ for (i = 0; i < member_cnt; i++)
4104
+ rd_kafka_group_member_clear(&members[i]);
4105
+ ut_destroy_metadata(metadata);
4106
+
4107
+ RD_UT_PASS();
4108
+ }
4109
+
4110
+
4111
+ static int
4112
+ ut_testOwnedPartitionsAreInvalidatedForConsumerWithMultipleGeneration(
4113
+ rd_kafka_t *rk,
4114
+ const rd_kafka_assignor_t *rkas,
4115
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
4116
+ rd_kafka_resp_err_t err;
4117
+ char errstr[512];
4118
+ rd_kafka_metadata_t *metadata;
4119
+ rd_kafka_group_member_t members[2];
4120
+ int member_cnt = RD_ARRAYSIZE(members);
4121
+ int i;
4122
+ int currentGeneration = 10;
4123
+
4124
+ ut_initMetadataConditionalRack(&metadata, 3, 3, ALL_RACKS,
4125
+ RD_ARRAYSIZE(ALL_RACKS), parametrization,
4126
+ 2, "topic1", 3, "topic2", 3);
4127
+
4128
+ ut_initMemberConditionalRack(&members[0], "consumer1",
4129
+ ut_get_consumer_rack(0, parametrization),
4130
+ parametrization, "topic1", "topic2", NULL);
4131
+ ut_populate_member_owned_partitions_generation(
4132
+ &members[0], currentGeneration, 3, "topic1", 0, "topic2", 1,
4133
+ "topic1", 1);
4134
+
4135
+ ut_initMemberConditionalRack(&members[1], "consumer2",
4136
+ ut_get_consumer_rack(1, parametrization),
4137
+ parametrization, "topic1", "topic2", NULL);
4138
+ ut_populate_member_owned_partitions_generation(
4139
+ &members[1], currentGeneration - 2, 3, "topic1", 0, "topic2", 1,
4140
+ "topic2", 2);
4141
+
4142
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
4143
+ member_cnt, errstr, sizeof(errstr));
4144
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
4145
+
4146
+ verifyValidityAndBalance(members, member_cnt, metadata);
4147
+ verifyAssignment(&members[0], "topic1", 0, "topic2", 1, "topic1", 1,
4148
+ NULL);
4149
+ verifyAssignment(&members[1], "topic1", 2, "topic2", 2, "topic2", 0,
4150
+ NULL);
4151
+
4152
+ for (i = 0; i < member_cnt; i++)
4153
+ rd_kafka_group_member_clear(&members[i]);
4154
+ ut_destroy_metadata(metadata);
4155
+
4156
+ RD_UT_PASS();
4157
+ }
4158
+
4159
+ /* Helper for setting up metadata and members, and running the assignor, and
4160
+ * verifying validity and balance of the assignment. Does not check the results
4161
+ * of the assignment on a per member basis..
4162
+ */
4163
+ static int
4164
+ setupRackAwareAssignment0(rd_kafka_t *rk,
4165
+ const rd_kafka_assignor_t *rkas,
4166
+ rd_kafka_group_member_t *members,
4167
+ size_t member_cnt,
4168
+ int replication_factor,
4169
+ int num_broker_racks,
4170
+ size_t topic_cnt,
4171
+ char *topics[],
4172
+ int *partitions,
4173
+ int *subscriptions_count,
4174
+ char **subscriptions[],
4175
+ int *consumer_racks,
4176
+ rd_kafka_topic_partition_list_t **owned_tp_list,
4177
+ rd_bool_t initialize_members,
4178
+ rd_kafka_metadata_t **metadata) {
4179
+ rd_kafka_resp_err_t err;
4180
+ char errstr[512];
4181
+ rd_kafka_metadata_t *metadata_local = NULL;
4182
+
4183
+ size_t i = 0;
4184
+ const int num_brokers = num_broker_racks > 0
4185
+ ? replication_factor * num_broker_racks
4186
+ : replication_factor;
4187
+ if (!metadata)
4188
+ metadata = &metadata_local;
4189
+
4190
+ /* The member naming for tests is consumerN where N is a single
4191
+ * character. */
4192
+ rd_assert(member_cnt <= 9);
4193
+
4194
+ *metadata = rd_kafka_metadata_new_topic_with_partition_replicas_mock(
4195
+ replication_factor, num_brokers, topics, partitions, topic_cnt);
4196
+ ut_populate_internal_broker_metadata(
4197
+ rd_kafka_metadata_get_internal(*metadata), num_broker_racks,
4198
+ ALL_RACKS, RD_ARRAYSIZE(ALL_RACKS));
4199
+ ut_populate_internal_topic_metadata(
4200
+ rd_kafka_metadata_get_internal(*metadata));
4201
+
4202
+ for (i = 0; initialize_members && i < member_cnt; i++) {
4203
+ char member_id[10];
4204
+ snprintf(member_id, 10, "consumer%d", (int)(i + 1));
4205
+ ut_init_member_with_rack(
4206
+ &members[i], member_id, ALL_RACKS[consumer_racks[i]],
4207
+ subscriptions[i], subscriptions_count[i]);
4208
+
4209
+ if (!owned_tp_list || !owned_tp_list[i])
4210
+ continue;
4211
+
4212
+ if (members[i].rkgm_owned)
4213
+ rd_kafka_topic_partition_list_destroy(
4214
+ members[i].rkgm_owned);
4215
+
4216
+ members[i].rkgm_owned =
4217
+ rd_kafka_topic_partition_list_copy(owned_tp_list[i]);
4218
+ }
4219
+
4220
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, *metadata, members,
4221
+ member_cnt, errstr, sizeof(errstr));
4222
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
4223
+
4224
+ /* Note that verifyValidityAndBalance also sets rkgm_owned for each
4225
+ * member to rkgm_assignment, so if the members are used without
4226
+ * clearing, in another assignor_run, the result should be stable. */
4227
+ verifyValidityAndBalance(members, member_cnt, *metadata);
4228
+
4229
+ if (metadata_local)
4230
+ ut_destroy_metadata(metadata_local);
4231
+ return 0;
4232
+ }
4233
+
4234
+ static int
4235
+ setupRackAwareAssignment(rd_kafka_t *rk,
4236
+ const rd_kafka_assignor_t *rkas,
4237
+ rd_kafka_group_member_t *members,
4238
+ size_t member_cnt,
4239
+ int replication_factor,
4240
+ int num_broker_racks,
4241
+ size_t topic_cnt,
4242
+ char *topics[],
4243
+ int *partitions,
4244
+ int *subscriptions_count,
4245
+ char **subscriptions[],
4246
+ int *consumer_racks,
4247
+ rd_kafka_topic_partition_list_t **owned_tp_list,
4248
+ rd_bool_t initialize_members) {
4249
+ return setupRackAwareAssignment0(
4250
+ rk, rkas, members, member_cnt, replication_factor, num_broker_racks,
4251
+ topic_cnt, topics, partitions, subscriptions_count, subscriptions,
4252
+ consumer_racks, owned_tp_list, initialize_members, NULL);
4253
+ }
4254
+
4255
+ /* Helper for testing cases where rack-aware assignment should not be triggered,
4256
+ * and assignment should be the same as the pre-rack-aware assignor. Each case
4257
+ * is run twice, once with owned partitions set to empty, and in the second
4258
+ * case, with owned partitions set to the result of the previous run, to check
4259
+ * that the assignment is stable. */
4260
+ #define verifyNonRackAwareAssignment(rk, rkas, members, member_cnt, topic_cnt, \
4261
+ topics, partitions, subscriptions_count, \
4262
+ subscriptions, ...) \
4263
+ do { \
4264
+ size_t idx = 0; \
4265
+ int init_members = 1; \
4266
+ rd_kafka_metadata_t *metadata; \
4267
+ \
4268
+ /* num_broker_racks = 0, implies that brokers have no \
4269
+ * configured racks. */ \
4270
+ for (init_members = 1; init_members >= 0; init_members--) { \
4271
+ setupRackAwareAssignment( \
4272
+ rk, rkas, members, member_cnt, 3, 0, topic_cnt, \
4273
+ topics, partitions, subscriptions_count, \
4274
+ subscriptions, RACKS_INITIAL, NULL, init_members); \
4275
+ verifyMultipleAssignment(members, member_cnt, \
4276
+ __VA_ARGS__); \
4277
+ } \
4278
+ for (idx = 0; idx < member_cnt; idx++) \
4279
+ rd_kafka_group_member_clear(&members[idx]); \
4280
+ /* consumer_racks = RACKS_NULL implies that consumers have no \
4281
+ * racks. */ \
4282
+ for (init_members = 1; init_members >= 0; init_members--) { \
4283
+ setupRackAwareAssignment( \
4284
+ rk, rkas, members, member_cnt, 3, 3, topic_cnt, \
4285
+ topics, partitions, subscriptions_count, \
4286
+ subscriptions, RACKS_NULL, NULL, init_members); \
4287
+ verifyMultipleAssignment(members, member_cnt, \
4288
+ __VA_ARGS__); \
4289
+ } \
4290
+ for (idx = 0; idx < member_cnt; idx++) \
4291
+ rd_kafka_group_member_clear(&members[idx]); \
4292
+ /* replication_factor = 3 and num_broker_racks = 3 means that \
4293
+ * all partitions are replicated on all racks.*/ \
4294
+ for (init_members = 1; init_members >= 0; init_members--) { \
4295
+ setupRackAwareAssignment0( \
4296
+ rk, rkas, members, member_cnt, 3, 3, topic_cnt, \
4297
+ topics, partitions, subscriptions_count, \
4298
+ subscriptions, RACKS_INITIAL, NULL, init_members, \
4299
+ &metadata); \
4300
+ verifyMultipleAssignment(members, member_cnt, \
4301
+ __VA_ARGS__); \
4302
+ verifyNumPartitionsWithRackMismatch( \
4303
+ metadata, members, RD_ARRAYSIZE(members), 0); \
4304
+ ut_destroy_metadata(metadata); \
4305
+ } \
4306
+ for (idx = 0; idx < member_cnt; idx++) \
4307
+ rd_kafka_group_member_clear(&members[idx]); \
4308
+ /* replication_factor = 4 and num_broker_racks = 4 means that \
4309
+ * all partitions are replicated on all racks. */ \
4310
+ for (init_members = 1; init_members >= 0; init_members--) { \
4311
+ setupRackAwareAssignment0( \
4312
+ rk, rkas, members, member_cnt, 4, 4, topic_cnt, \
4313
+ topics, partitions, subscriptions_count, \
4314
+ subscriptions, RACKS_INITIAL, NULL, init_members, \
4315
+ &metadata); \
4316
+ verifyMultipleAssignment(members, member_cnt, \
4317
+ __VA_ARGS__); \
4318
+ verifyNumPartitionsWithRackMismatch( \
4319
+ metadata, members, RD_ARRAYSIZE(members), 0); \
4320
+ ut_destroy_metadata(metadata); \
4321
+ } \
4322
+ for (idx = 0; idx < member_cnt; idx++) \
4323
+ rd_kafka_group_member_clear(&members[idx]); \
4324
+ /* There's no overap between broker racks and consumer racks, \
4325
+ * since num_broker_racks = 3, they'll be picked from a,b,c \
4326
+ * and consumer racks are d,e,f. */ \
4327
+ for (init_members = 1; init_members >= 0; init_members--) { \
4328
+ setupRackAwareAssignment( \
4329
+ rk, rkas, members, member_cnt, 3, 3, topic_cnt, \
4330
+ topics, partitions, subscriptions_count, \
4331
+ subscriptions, RACKS_FINAL, NULL, init_members); \
4332
+ verifyMultipleAssignment(members, member_cnt, \
4333
+ __VA_ARGS__); \
4334
+ } \
4335
+ for (idx = 0; idx < member_cnt; idx++) \
4336
+ rd_kafka_group_member_clear(&members[idx]); \
4337
+ /* There's no overap between broker racks and consumer racks, \
4338
+ * since num_broker_racks = 3, they'll be picked from a,b,c \
4339
+ * and consumer racks are d,e,NULL. */ \
4340
+ for (init_members = 1; init_members >= 0; init_members--) { \
4341
+ setupRackAwareAssignment( \
4342
+ rk, rkas, members, member_cnt, 3, 3, topic_cnt, \
4343
+ topics, partitions, subscriptions_count, \
4344
+ subscriptions, RACKS_ONE_NULL, NULL, \
4345
+ init_members); \
4346
+ verifyMultipleAssignment(members, member_cnt, \
4347
+ __VA_ARGS__); \
4348
+ } \
4349
+ for (idx = 0; idx < member_cnt; idx++) \
4350
+ rd_kafka_group_member_clear(&members[idx]); \
4351
+ } while (0)
4352
+
4353
+
4354
+ static int ut_testRackAwareAssignmentWithUniformSubscription(
4355
+ rd_kafka_t *rk,
4356
+ const rd_kafka_assignor_t *rkas,
4357
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
4358
+ char *topics[] = {"t1", "t2", "t3"};
4359
+ int partitions[] = {6, 7, 2};
4360
+ rd_kafka_group_member_t members[3];
4361
+ size_t member_cnt = RD_ARRAYSIZE(members);
4362
+ size_t i = 0;
4363
+ int subscriptions_count[] = {3, 3, 3};
4364
+ char **subscriptions[] = {topics, topics, topics};
4365
+ int init_members = 0;
4366
+ rd_kafka_topic_partition_list_t **owned;
4367
+ rd_kafka_metadata_t *metadata;
4368
+
4369
+ if (parametrization !=
4370
+ RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) {
4371
+ RD_UT_PASS();
4372
+ }
4373
+
4374
+ verifyNonRackAwareAssignment(
4375
+ rk, rkas, members, RD_ARRAYSIZE(members), RD_ARRAYSIZE(topics),
4376
+ topics, partitions, subscriptions_count, subscriptions,
4377
+ /* consumer1 */
4378
+ "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL,
4379
+ /* consumer2 */
4380
+ "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL,
4381
+ /* consumer3 */
4382
+ "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL);
4383
+
4384
+ /* Verify assignment is rack-aligned for lower replication factor where
4385
+ * brokers have a subset of partitions */
4386
+ for (init_members = 1; init_members >= 0; init_members--) {
4387
+ setupRackAwareAssignment0(
4388
+ rk, rkas, members, RD_ARRAYSIZE(members), 1, 3,
4389
+ RD_ARRAYSIZE(topics), topics, partitions,
4390
+ subscriptions_count, subscriptions, RACKS_INITIAL, NULL,
4391
+ init_members, &metadata);
4392
+ verifyMultipleAssignment(
4393
+ members, RD_ARRAYSIZE(members),
4394
+ /* consumer1 */
4395
+ "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL,
4396
+ /* consumer2 */
4397
+ "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL,
4398
+ /* consumer3 */
4399
+ "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL);
4400
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4401
+ RD_ARRAYSIZE(members), 0);
4402
+ ut_destroy_metadata(metadata);
4403
+ }
4404
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4405
+ rd_kafka_group_member_clear(&members[i]);
4406
+
4407
+
4408
+ for (init_members = 1; init_members >= 0; init_members--) {
4409
+ setupRackAwareAssignment0(
4410
+ rk, rkas, members, RD_ARRAYSIZE(members), 2, 3,
4411
+ RD_ARRAYSIZE(topics), topics, partitions,
4412
+ subscriptions_count, subscriptions, RACKS_INITIAL, NULL,
4413
+ init_members, &metadata);
4414
+ verifyMultipleAssignment(
4415
+ members, RD_ARRAYSIZE(members),
4416
+ /* consumer1 */
4417
+ "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL,
4418
+ /* consumer2 */
4419
+ "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL,
4420
+ /* consumer3 */
4421
+ "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL);
4422
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4423
+ RD_ARRAYSIZE(members), 0);
4424
+ ut_destroy_metadata(metadata);
4425
+ }
4426
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4427
+ rd_kafka_group_member_clear(&members[i]);
4428
+
4429
+ /* One consumer on a rack with no partitions. We allocate with
4430
+ * misaligned rack to this consumer to maintain balance. */
4431
+ for (init_members = 1; init_members >= 0; init_members--) {
4432
+ setupRackAwareAssignment0(
4433
+ rk, rkas, members, RD_ARRAYSIZE(members), 3, 2,
4434
+ RD_ARRAYSIZE(topics), topics, partitions,
4435
+ subscriptions_count, subscriptions, RACKS_INITIAL, NULL,
4436
+ init_members, &metadata);
4437
+ verifyMultipleAssignment(
4438
+ members, RD_ARRAYSIZE(members),
4439
+ /* consumer1 */
4440
+ "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL,
4441
+ /* consumer2 */
4442
+ "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL,
4443
+ /* consumer3 */
4444
+ "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL);
4445
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4446
+ RD_ARRAYSIZE(members), 5);
4447
+ ut_destroy_metadata(metadata);
4448
+ }
4449
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4450
+ rd_kafka_group_member_clear(&members[i]);
4451
+
4452
+ /* Verify that rack-awareness is improved if already owned partitions
4453
+ * are misaligned */
4454
+ owned = ut_create_topic_partition_lists(
4455
+ 3,
4456
+ /* consumer1 */
4457
+ "t1", 0, "t1", 1, "t1", 2, "t1", 3, "t1", 4, NULL,
4458
+ /* consumer2 */
4459
+ "t1", 5, "t2", 0, "t2", 1, "t2", 2, "t2", 3, NULL,
4460
+ /* consumer3 */
4461
+ "t2", 4, "t2", 5, "t2", 6, "t3", 0, "t3", 1, NULL);
4462
+
4463
+ setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1,
4464
+ 3, RD_ARRAYSIZE(topics), topics, partitions,
4465
+ subscriptions_count, subscriptions,
4466
+ RACKS_INITIAL, owned, rd_true, &metadata);
4467
+ verifyMultipleAssignment(
4468
+ members, RD_ARRAYSIZE(members),
4469
+ /* consumer1 */
4470
+ "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL,
4471
+ /* consumer2 */
4472
+ "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL,
4473
+ /* consumer3 */
4474
+ "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL);
4475
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4476
+ RD_ARRAYSIZE(members), 0);
4477
+ ut_destroy_metadata(metadata);
4478
+
4479
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4480
+ rd_kafka_group_member_clear(&members[i]);
4481
+ for (i = 0; i < member_cnt; i++)
4482
+ rd_kafka_topic_partition_list_destroy(owned[i]);
4483
+ rd_free(owned);
4484
+
4485
+
4486
+ /* Verify that stickiness is retained when racks match */
4487
+ owned = ut_create_topic_partition_lists(
4488
+ 3,
4489
+ /* consumer1 */
4490
+ "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL,
4491
+ /* consumer2 */
4492
+ "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL,
4493
+ /* consumer3 */
4494
+ "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL);
4495
+
4496
+ /* This test deviates slightly from Java, in that we test with two
4497
+ * additional replication factors, 1 and 2, which are not tested in
4498
+ * Java. This is because in Java, there is a way to turn rack aware
4499
+ * logic on or off for tests. We don't have that, and to test with rack
4500
+ * aware logic, we need to change something, in this case, the
4501
+ * replication factor. */
4502
+ for (i = 1; i <= 3; i++) {
4503
+ setupRackAwareAssignment0(
4504
+ rk, rkas, members, RD_ARRAYSIZE(members),
4505
+ i /* replication factor */, 3, RD_ARRAYSIZE(topics), topics,
4506
+ partitions, subscriptions_count, subscriptions,
4507
+ RACKS_INITIAL, owned, rd_true, &metadata);
4508
+ verifyMultipleAssignment(
4509
+ members, RD_ARRAYSIZE(members),
4510
+ /* consumer1 */
4511
+ "t1", 0, "t1", 3, "t2", 0, "t2", 3, "t2", 6, NULL,
4512
+ /* consumer2 */
4513
+ "t1", 1, "t1", 4, "t2", 1, "t2", 4, "t3", 0, NULL,
4514
+ /* consumer3 */
4515
+ "t1", 2, "t1", 5, "t2", 2, "t2", 5, "t3", 1, NULL);
4516
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4517
+ RD_ARRAYSIZE(members), 0);
4518
+
4519
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4520
+ rd_kafka_group_member_clear(&members[i]);
4521
+ ut_destroy_metadata(metadata);
4522
+ }
4523
+
4524
+ for (i = 0; i < member_cnt; i++)
4525
+ rd_kafka_topic_partition_list_destroy(owned[i]);
4526
+ rd_free(owned);
4527
+
4528
+ RD_UT_PASS();
4529
+ }
4530
+
4531
+
4532
+ static int ut_testRackAwareAssignmentWithNonEqualSubscription(
4533
+ rd_kafka_t *rk,
4534
+ const rd_kafka_assignor_t *rkas,
4535
+ rd_kafka_assignor_ut_rack_config_t parametrization) {
4536
+ char *topics[] = {"t1", "t2", "t3"};
4537
+ char *topics0[] = {"t1", "t3"};
4538
+ int partitions[] = {6, 7, 2};
4539
+ rd_kafka_group_member_t members[3];
4540
+ size_t member_cnt = RD_ARRAYSIZE(members);
4541
+ size_t i = 0;
4542
+ int subscriptions_count[] = {3, 3, 2};
4543
+ char **subscriptions[] = {topics, topics, topics0};
4544
+ int with_owned = 0;
4545
+ rd_kafka_topic_partition_list_t **owned;
4546
+ rd_kafka_metadata_t *metadata;
4547
+
4548
+ if (parametrization !=
4549
+ RD_KAFKA_RANGE_ASSIGNOR_UT_BROKER_AND_CONSUMER_RACK) {
4550
+ RD_UT_PASS();
4551
+ }
4552
+
4553
+ verifyNonRackAwareAssignment(
4554
+ rk, rkas, members, RD_ARRAYSIZE(members), RD_ARRAYSIZE(topics),
4555
+ topics, partitions, subscriptions_count, subscriptions, "t1", 5,
4556
+ "t2", 0, "t2", 2, "t2", 4, "t2", 6, NULL,
4557
+ /* consumer2 */
4558
+ "t1", 3, "t2", 1, "t2", 3, "t2", 5, "t3", 0, NULL,
4559
+ /* consumer3 */
4560
+ "t1", 0, "t1", 1, "t1", 2, "t1", 4, "t3", 1, NULL);
4561
+
4562
+ // Verify assignment is rack-aligned for lower replication factor where
4563
+ // brokers have a subset of partitions
4564
+ for (with_owned = 0; with_owned <= 1; with_owned++) {
4565
+ setupRackAwareAssignment0(
4566
+ rk, rkas, members, RD_ARRAYSIZE(members), 1, 3,
4567
+ RD_ARRAYSIZE(topics), topics, partitions,
4568
+ subscriptions_count, subscriptions, RACKS_INITIAL, NULL,
4569
+ !with_owned, &metadata);
4570
+ verifyMultipleAssignment(
4571
+ members, RD_ARRAYSIZE(members),
4572
+ /* consumer1 */
4573
+ "t1", 3, "t2", 0, "t2", 2, "t2", 3, "t2", 6, NULL,
4574
+ /* consumer2 */
4575
+ "t1", 4, "t2", 1, "t2", 4, "t2", 5, "t3", 0, NULL,
4576
+ /* consumer3 */
4577
+ "t1", 0, "t1", 1, "t1", 2, "t1", 5, "t3", 1, NULL);
4578
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4579
+ RD_ARRAYSIZE(members), 4);
4580
+ ut_destroy_metadata(metadata);
4581
+ }
4582
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4583
+ rd_kafka_group_member_clear(&members[i]);
4584
+
4585
+
4586
+
4587
+ for (with_owned = 0; with_owned <= 1; with_owned++) {
4588
+ setupRackAwareAssignment0(
4589
+ rk, rkas, members, RD_ARRAYSIZE(members), 2, 3,
4590
+ RD_ARRAYSIZE(topics), topics, partitions,
4591
+ subscriptions_count, subscriptions, RACKS_INITIAL, NULL,
4592
+ !with_owned, &metadata);
4593
+ verifyMultipleAssignment(
4594
+ members, RD_ARRAYSIZE(members),
4595
+ /* consumer1 */
4596
+ "t1", 3, "t2", 0, "t2", 2, "t2", 5, "t2", 6, NULL,
4597
+ /* consumer2 */
4598
+ "t1", 0, "t2", 1, "t2", 3, "t2", 4, "t3", 0, NULL,
4599
+ /* consumer3 */
4600
+ "t1", 1, "t1", 2, "t1", 4, "t1", 5, "t3", 1, NULL);
4601
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4602
+ RD_ARRAYSIZE(members), 0);
4603
+ ut_destroy_metadata(metadata);
4604
+ }
4605
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4606
+ rd_kafka_group_member_clear(&members[i]);
4607
+
4608
+ /* One consumer on a rack with no partitions. We allocate with
4609
+ * misaligned rack to this consumer to maintain balance. */
4610
+ for (with_owned = 0; with_owned <= 1; with_owned++) {
4611
+ setupRackAwareAssignment0(
4612
+ rk, rkas, members, RD_ARRAYSIZE(members), 3, 2,
4613
+ RD_ARRAYSIZE(topics), topics, partitions,
4614
+ subscriptions_count, subscriptions, RACKS_INITIAL, NULL,
4615
+ !with_owned, &metadata);
4616
+ verifyMultipleAssignment(
4617
+ members, RD_ARRAYSIZE(members),
4618
+ /* consumer1 */
4619
+ "t1", 5, "t2", 0, "t2", 2, "t2", 4, "t2", 6, NULL,
4620
+ /* consumer2 */
4621
+ "t1", 3, "t2", 1, "t2", 3, "t2", 5, "t3", 0, NULL,
4622
+ /* consumer3 */
4623
+ "t1", 0, "t1", 1, "t1", 2, "t1", 4, "t3", 1, NULL);
4624
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4625
+ RD_ARRAYSIZE(members), 5);
4626
+ ut_destroy_metadata(metadata);
4627
+ }
4628
+
4629
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4630
+ rd_kafka_group_member_clear(&members[i]);
4631
+
4632
+ /* Verify that rack-awareness is improved if already owned partitions
4633
+ * are misaligned. */
4634
+ owned = ut_create_topic_partition_lists(
4635
+ 3,
4636
+ /* consumer1 */
4637
+ "t1", 0, "t1", 1, "t1", 2, "t1", 3, "t1", 4, NULL,
4638
+ /* consumer2 */
4639
+ "t1", 5, "t2", 0, "t2", 1, "t2", 2, "t2", 3, NULL,
4640
+ /* consumer3 */
4641
+ "t2", 4, "t2", 5, "t2", 6, "t3", 0, "t3", 1, NULL);
4642
+
4643
+ setupRackAwareAssignment0(rk, rkas, members, RD_ARRAYSIZE(members), 1,
4644
+ 3, RD_ARRAYSIZE(topics), topics, partitions,
4645
+ subscriptions_count, subscriptions,
4646
+ RACKS_INITIAL, owned, rd_true, &metadata);
4647
+ verifyMultipleAssignment(
4648
+ members, RD_ARRAYSIZE(members),
4649
+ /* consumer1 */
4650
+ "t1", 3, "t2", 0, "t2", 2, "t2", 3, "t2", 6, NULL,
4651
+ /* consumer2 */
4652
+ "t1", 4, "t2", 1, "t2", 4, "t2", 5, "t3", 0, NULL,
4653
+ /* consumer3 */
4654
+ "t1", 0, "t1", 1, "t1", 2, "t1", 5, "t3", 1, NULL);
4655
+ verifyNumPartitionsWithRackMismatch(metadata, members,
4656
+ RD_ARRAYSIZE(members), 4);
4657
+ ut_destroy_metadata(metadata);
4658
+
4659
+ for (i = 0; i < RD_ARRAYSIZE(members); i++)
4660
+ rd_kafka_group_member_clear(&members[i]);
4661
+ for (i = 0; i < member_cnt; i++)
4662
+ rd_kafka_topic_partition_list_destroy(owned[i]);
4663
+ rd_free(owned);
4664
+
4665
+ /* One of the Java tests is skipped here, which tests if the rack-aware
4666
+ * logic assigns the same partitions as non-rack aware logic. This is
4667
+ * because we don't have a way to force rack-aware logic like the Java
4668
+ * assignor. */
4669
+ RD_UT_PASS();
4670
+ }
4671
+
4672
+ static int rd_kafka_sticky_assignor_unittest(void) {
4673
+ rd_kafka_conf_t *conf;
4674
+ rd_kafka_t *rk;
4675
+ int fails = 0;
4676
+ char errstr[256];
4677
+ rd_kafka_assignor_t *rkas;
4678
+ static int (*tests[])(
4679
+ rd_kafka_t *, const rd_kafka_assignor_t *,
4680
+ rd_kafka_assignor_ut_rack_config_t parametrization) = {
4681
+ ut_testOneConsumerNoTopic,
4682
+ ut_testOneConsumerNonexistentTopic,
4683
+ ut_testOneConsumerOneTopic,
4684
+ ut_testOnlyAssignsPartitionsFromSubscribedTopics,
4685
+ ut_testOneConsumerMultipleTopics,
4686
+ ut_testTwoConsumersOneTopicOnePartition,
4687
+ ut_testTwoConsumersOneTopicTwoPartitions,
4688
+ ut_testMultipleConsumersMixedTopicSubscriptions,
4689
+ ut_testTwoConsumersTwoTopicsSixPartitions,
4690
+ ut_testAddRemoveConsumerOneTopic,
4691
+ ut_testPoorRoundRobinAssignmentScenario,
4692
+ ut_testAddRemoveTopicTwoConsumers,
4693
+ ut_testReassignmentAfterOneConsumerLeaves,
4694
+ ut_testReassignmentAfterOneConsumerAdded,
4695
+ ut_testSameSubscriptions,
4696
+ ut_testLargeAssignmentWithMultipleConsumersLeaving,
4697
+ ut_testNewSubscription,
4698
+ ut_testMoveExistingAssignments,
4699
+ ut_testMoveExistingAssignments_j,
4700
+ ut_testStickiness,
4701
+ ut_testStickiness_j,
4702
+ ut_testStickiness2,
4703
+ ut_testAssignmentUpdatedForDeletedTopic,
4704
+ ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted,
4705
+ ut_testConflictingPreviousAssignments,
4706
+ ut_testAllConsumersReachExpectedQuotaAndAreConsideredFilled,
4707
+ ut_testOwnedPartitionsAreInvalidatedForConsumerWithStaleGeneration,
4708
+ ut_testOwnedPartitionsAreInvalidatedForConsumerWithNoGeneration,
4709
+ ut_testPartitionsTransferringOwnershipIncludeThePartitionClaimedByMultipleConsumersInSameGeneration,
4710
+ ut_testPartitionsTransferringOwnershipIncludeThePartitionClaimedByMultipleConsumersInSameGeneration2,
4711
+ ut_testEnsurePartitionsAssignedToHighestGeneration,
4712
+ ut_testNoReassignmentOnCurrentMembers,
4713
+ ut_testOwnedPartitionsAreInvalidatedForConsumerWithMultipleGeneration,
4714
+ ut_testRackAwareAssignmentWithUniformSubscription,
4715
+ ut_testRackAwareAssignmentWithNonEqualSubscription,
4716
+ NULL,
4717
+ };
4718
+ size_t i;
4719
+
4720
+
4721
+ conf = rd_kafka_conf_new();
4722
+ if (rd_kafka_conf_set(conf, "group.id", "test", errstr,
4723
+ sizeof(errstr)) ||
4724
+ rd_kafka_conf_set(conf, "partition.assignment.strategy",
4725
+ "cooperative-sticky", errstr, sizeof(errstr)))
4726
+ RD_UT_FAIL("sticky assignor conf failed: %s", errstr);
4727
+
4728
+ rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL,
4729
+ 0);
4730
+
4731
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
4732
+ RD_UT_ASSERT(rk, "sticky assignor client instantiation failed: %s",
4733
+ errstr);
4734
+
4735
+ rkas = rd_kafka_assignor_find(rk, "cooperative-sticky");
4736
+ RD_UT_ASSERT(rkas, "sticky assignor not found");
4737
+
4738
+ for (i = 0; i < RD_ARRAY_SIZE(ALL_RACKS) - 1; i++) {
4739
+ char c = 'a' + i;
4740
+ ALL_RACKS[i] = rd_kafkap_str_new(&c, 1);
4741
+ }
4742
+ ALL_RACKS[i] = NULL;
4743
+
4744
+ for (i = 0; tests[i]; i++) {
4745
+ rd_ts_t ts = rd_clock();
4746
+ int r = 0;
4747
+ rd_kafka_assignor_ut_rack_config_t j;
4748
+
4749
+ RD_UT_SAY("[ Test #%" PRIusz " ]", i);
4750
+ for (j = RD_KAFKA_RANGE_ASSIGNOR_UT_NO_BROKER_RACK;
4751
+ j != RD_KAFKA_RANGE_ASSIGNOR_UT_CONFIG_CNT; j++) {
4752
+ RD_UT_SAY("[ Test #%" PRIusz ", RackConfig = %d ]", i,
4753
+ j);
4754
+ r += tests[i](rk, rkas, j);
4755
+ }
4756
+ RD_UT_SAY("[ Test #%" PRIusz " ran for %.3fms ]", i,
4757
+ (double)(rd_clock() - ts) / 1000.0);
4758
+
4759
+ RD_UT_ASSERT(!r, "^ failed");
4760
+
4761
+ fails += r;
4762
+ }
4763
+
4764
+ for (i = 0; i < RD_ARRAY_SIZE(ALL_RACKS) - 1; i++) {
4765
+ rd_kafkap_str_destroy(ALL_RACKS[i]);
4766
+ }
4767
+
4768
+ rd_kafka_destroy(rk);
4769
+
4770
+ return fails;
4771
+ }
4772
+
4773
+
4774
+ /**
4775
+ * @brief Initialzie and add sticky assignor.
4776
+ */
4777
+ rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk) {
4778
+ return rd_kafka_assignor_add(rk, "consumer", "cooperative-sticky",
4779
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE,
4780
+ rd_kafka_sticky_assignor_assign_cb,
4781
+ rd_kafka_sticky_assignor_get_metadata,
4782
+ rd_kafka_sticky_assignor_on_assignment_cb,
4783
+ rd_kafka_sticky_assignor_state_destroy,
4784
+ rd_kafka_sticky_assignor_unittest, NULL);
4785
+ }