@point3/node-rdkafka 3.6.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (707) hide show
  1. package/LICENSE.txt +20 -0
  2. package/README.md +636 -0
  3. package/binding.gyp +154 -0
  4. package/deps/librdkafka/.clang-format +136 -0
  5. package/deps/librdkafka/.clang-format-cpp +103 -0
  6. package/deps/librdkafka/.dir-locals.el +10 -0
  7. package/deps/librdkafka/.formatignore +33 -0
  8. package/deps/librdkafka/.gdbmacros +19 -0
  9. package/deps/librdkafka/.github/CODEOWNERS +1 -0
  10. package/deps/librdkafka/.github/ISSUE_TEMPLATE +34 -0
  11. package/deps/librdkafka/.semaphore/run-all-tests.yml +77 -0
  12. package/deps/librdkafka/.semaphore/semaphore-integration.yml +250 -0
  13. package/deps/librdkafka/.semaphore/semaphore.yml +378 -0
  14. package/deps/librdkafka/.semaphore/verify-linux-packages.yml +41 -0
  15. package/deps/librdkafka/CHANGELOG.md +2208 -0
  16. package/deps/librdkafka/CMakeLists.txt +291 -0
  17. package/deps/librdkafka/CODE_OF_CONDUCT.md +46 -0
  18. package/deps/librdkafka/CONFIGURATION.md +209 -0
  19. package/deps/librdkafka/CONTRIBUTING.md +431 -0
  20. package/deps/librdkafka/Doxyfile +2375 -0
  21. package/deps/librdkafka/INTRODUCTION.md +2481 -0
  22. package/deps/librdkafka/LICENSE +26 -0
  23. package/deps/librdkafka/LICENSE.cjson +22 -0
  24. package/deps/librdkafka/LICENSE.crc32c +28 -0
  25. package/deps/librdkafka/LICENSE.fnv1a +18 -0
  26. package/deps/librdkafka/LICENSE.hdrhistogram +27 -0
  27. package/deps/librdkafka/LICENSE.lz4 +26 -0
  28. package/deps/librdkafka/LICENSE.murmur2 +25 -0
  29. package/deps/librdkafka/LICENSE.nanopb +22 -0
  30. package/deps/librdkafka/LICENSE.opentelemetry +203 -0
  31. package/deps/librdkafka/LICENSE.pycrc +23 -0
  32. package/deps/librdkafka/LICENSE.queue +31 -0
  33. package/deps/librdkafka/LICENSE.regexp +5 -0
  34. package/deps/librdkafka/LICENSE.snappy +36 -0
  35. package/deps/librdkafka/LICENSE.tinycthread +26 -0
  36. package/deps/librdkafka/LICENSE.wingetopt +49 -0
  37. package/deps/librdkafka/LICENSES.txt +625 -0
  38. package/deps/librdkafka/Makefile +125 -0
  39. package/deps/librdkafka/README.md +199 -0
  40. package/deps/librdkafka/README.win32 +26 -0
  41. package/deps/librdkafka/STATISTICS.md +624 -0
  42. package/deps/librdkafka/configure +214 -0
  43. package/deps/librdkafka/configure.self +331 -0
  44. package/deps/librdkafka/debian/changelog +111 -0
  45. package/deps/librdkafka/debian/compat +1 -0
  46. package/deps/librdkafka/debian/control +71 -0
  47. package/deps/librdkafka/debian/copyright +99 -0
  48. package/deps/librdkafka/debian/gbp.conf +9 -0
  49. package/deps/librdkafka/debian/librdkafka++1.install +1 -0
  50. package/deps/librdkafka/debian/librdkafka-dev.examples +2 -0
  51. package/deps/librdkafka/debian/librdkafka-dev.install +9 -0
  52. package/deps/librdkafka/debian/librdkafka1.docs +5 -0
  53. package/deps/librdkafka/debian/librdkafka1.install +1 -0
  54. package/deps/librdkafka/debian/librdkafka1.symbols +135 -0
  55. package/deps/librdkafka/debian/rules +19 -0
  56. package/deps/librdkafka/debian/source/format +1 -0
  57. package/deps/librdkafka/debian/watch +2 -0
  58. package/deps/librdkafka/dev-conf.sh +123 -0
  59. package/deps/librdkafka/examples/CMakeLists.txt +79 -0
  60. package/deps/librdkafka/examples/Makefile +167 -0
  61. package/deps/librdkafka/examples/README.md +42 -0
  62. package/deps/librdkafka/examples/alter_consumer_group_offsets.c +338 -0
  63. package/deps/librdkafka/examples/consumer.c +271 -0
  64. package/deps/librdkafka/examples/delete_records.c +233 -0
  65. package/deps/librdkafka/examples/describe_cluster.c +322 -0
  66. package/deps/librdkafka/examples/describe_consumer_groups.c +455 -0
  67. package/deps/librdkafka/examples/describe_topics.c +427 -0
  68. package/deps/librdkafka/examples/elect_leaders.c +317 -0
  69. package/deps/librdkafka/examples/globals.json +11 -0
  70. package/deps/librdkafka/examples/idempotent_producer.c +344 -0
  71. package/deps/librdkafka/examples/incremental_alter_configs.c +347 -0
  72. package/deps/librdkafka/examples/kafkatest_verifiable_client.cpp +945 -0
  73. package/deps/librdkafka/examples/list_consumer_group_offsets.c +359 -0
  74. package/deps/librdkafka/examples/list_consumer_groups.c +365 -0
  75. package/deps/librdkafka/examples/list_offsets.c +327 -0
  76. package/deps/librdkafka/examples/misc.c +287 -0
  77. package/deps/librdkafka/examples/openssl_engine_example.cpp +248 -0
  78. package/deps/librdkafka/examples/producer.c +251 -0
  79. package/deps/librdkafka/examples/producer.cpp +228 -0
  80. package/deps/librdkafka/examples/rdkafka_complex_consumer_example.c +617 -0
  81. package/deps/librdkafka/examples/rdkafka_complex_consumer_example.cpp +467 -0
  82. package/deps/librdkafka/examples/rdkafka_consume_batch.cpp +264 -0
  83. package/deps/librdkafka/examples/rdkafka_example.c +853 -0
  84. package/deps/librdkafka/examples/rdkafka_example.cpp +679 -0
  85. package/deps/librdkafka/examples/rdkafka_performance.c +1781 -0
  86. package/deps/librdkafka/examples/transactions-older-broker.c +668 -0
  87. package/deps/librdkafka/examples/transactions.c +665 -0
  88. package/deps/librdkafka/examples/user_scram.c +491 -0
  89. package/deps/librdkafka/examples/win_ssl_cert_store.cpp +396 -0
  90. package/deps/librdkafka/lds-gen.py +73 -0
  91. package/deps/librdkafka/mainpage.doxy +40 -0
  92. package/deps/librdkafka/mklove/Makefile.base +329 -0
  93. package/deps/librdkafka/mklove/modules/configure.atomics +144 -0
  94. package/deps/librdkafka/mklove/modules/configure.base +2484 -0
  95. package/deps/librdkafka/mklove/modules/configure.builtin +70 -0
  96. package/deps/librdkafka/mklove/modules/configure.cc +186 -0
  97. package/deps/librdkafka/mklove/modules/configure.cxx +8 -0
  98. package/deps/librdkafka/mklove/modules/configure.fileversion +65 -0
  99. package/deps/librdkafka/mklove/modules/configure.gitversion +29 -0
  100. package/deps/librdkafka/mklove/modules/configure.good_cflags +18 -0
  101. package/deps/librdkafka/mklove/modules/configure.host +132 -0
  102. package/deps/librdkafka/mklove/modules/configure.lib +49 -0
  103. package/deps/librdkafka/mklove/modules/configure.libcurl +99 -0
  104. package/deps/librdkafka/mklove/modules/configure.libsasl2 +36 -0
  105. package/deps/librdkafka/mklove/modules/configure.libssl +147 -0
  106. package/deps/librdkafka/mklove/modules/configure.libzstd +58 -0
  107. package/deps/librdkafka/mklove/modules/configure.parseversion +95 -0
  108. package/deps/librdkafka/mklove/modules/configure.pic +16 -0
  109. package/deps/librdkafka/mklove/modules/configure.socket +20 -0
  110. package/deps/librdkafka/mklove/modules/configure.zlib +61 -0
  111. package/deps/librdkafka/mklove/modules/patches/README.md +8 -0
  112. package/deps/librdkafka/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch +11 -0
  113. package/deps/librdkafka/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch +56 -0
  114. package/deps/librdkafka/packaging/RELEASE.md +319 -0
  115. package/deps/librdkafka/packaging/alpine/build-alpine.sh +38 -0
  116. package/deps/librdkafka/packaging/archlinux/PKGBUILD +30 -0
  117. package/deps/librdkafka/packaging/cmake/Config.cmake.in +37 -0
  118. package/deps/librdkafka/packaging/cmake/Modules/FindLZ4.cmake +38 -0
  119. package/deps/librdkafka/packaging/cmake/Modules/FindZSTD.cmake +27 -0
  120. package/deps/librdkafka/packaging/cmake/Modules/LICENSE.FindZstd +178 -0
  121. package/deps/librdkafka/packaging/cmake/README.md +38 -0
  122. package/deps/librdkafka/packaging/cmake/config.h.in +52 -0
  123. package/deps/librdkafka/packaging/cmake/parseversion.cmake +60 -0
  124. package/deps/librdkafka/packaging/cmake/rdkafka.pc.in +12 -0
  125. package/deps/librdkafka/packaging/cmake/try_compile/atomic_32_test.c +8 -0
  126. package/deps/librdkafka/packaging/cmake/try_compile/atomic_64_test.c +8 -0
  127. package/deps/librdkafka/packaging/cmake/try_compile/c11threads_test.c +14 -0
  128. package/deps/librdkafka/packaging/cmake/try_compile/crc32c_hw_test.c +27 -0
  129. package/deps/librdkafka/packaging/cmake/try_compile/dlopen_test.c +11 -0
  130. package/deps/librdkafka/packaging/cmake/try_compile/libsasl2_test.c +7 -0
  131. package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_darwin_test.c +6 -0
  132. package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_freebsd_test.c +7 -0
  133. package/deps/librdkafka/packaging/cmake/try_compile/pthread_setname_gnu_test.c +5 -0
  134. package/deps/librdkafka/packaging/cmake/try_compile/rand_r_test.c +7 -0
  135. package/deps/librdkafka/packaging/cmake/try_compile/rdkafka_setup.cmake +122 -0
  136. package/deps/librdkafka/packaging/cmake/try_compile/regex_test.c +10 -0
  137. package/deps/librdkafka/packaging/cmake/try_compile/strndup_test.c +5 -0
  138. package/deps/librdkafka/packaging/cmake/try_compile/sync_32_test.c +8 -0
  139. package/deps/librdkafka/packaging/cmake/try_compile/sync_64_test.c +8 -0
  140. package/deps/librdkafka/packaging/cp/README.md +16 -0
  141. package/deps/librdkafka/packaging/cp/check_features.c +72 -0
  142. package/deps/librdkafka/packaging/cp/verify-deb.sh +33 -0
  143. package/deps/librdkafka/packaging/cp/verify-packages.sh +69 -0
  144. package/deps/librdkafka/packaging/cp/verify-rpm.sh +32 -0
  145. package/deps/librdkafka/packaging/debian/changelog +66 -0
  146. package/deps/librdkafka/packaging/debian/compat +1 -0
  147. package/deps/librdkafka/packaging/debian/control +49 -0
  148. package/deps/librdkafka/packaging/debian/copyright +84 -0
  149. package/deps/librdkafka/packaging/debian/docs +5 -0
  150. package/deps/librdkafka/packaging/debian/gbp.conf +9 -0
  151. package/deps/librdkafka/packaging/debian/librdkafka-dev.dirs +2 -0
  152. package/deps/librdkafka/packaging/debian/librdkafka-dev.examples +2 -0
  153. package/deps/librdkafka/packaging/debian/librdkafka-dev.install +6 -0
  154. package/deps/librdkafka/packaging/debian/librdkafka-dev.substvars +1 -0
  155. package/deps/librdkafka/packaging/debian/librdkafka.dsc +16 -0
  156. package/deps/librdkafka/packaging/debian/librdkafka1-dbg.substvars +1 -0
  157. package/deps/librdkafka/packaging/debian/librdkafka1.dirs +1 -0
  158. package/deps/librdkafka/packaging/debian/librdkafka1.install +2 -0
  159. package/deps/librdkafka/packaging/debian/librdkafka1.postinst.debhelper +5 -0
  160. package/deps/librdkafka/packaging/debian/librdkafka1.postrm.debhelper +5 -0
  161. package/deps/librdkafka/packaging/debian/librdkafka1.symbols +64 -0
  162. package/deps/librdkafka/packaging/debian/rules +19 -0
  163. package/deps/librdkafka/packaging/debian/source/format +1 -0
  164. package/deps/librdkafka/packaging/debian/watch +2 -0
  165. package/deps/librdkafka/packaging/get_version.py +21 -0
  166. package/deps/librdkafka/packaging/homebrew/README.md +15 -0
  167. package/deps/librdkafka/packaging/homebrew/brew-update-pr.sh +31 -0
  168. package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw-static.sh +52 -0
  169. package/deps/librdkafka/packaging/mingw-w64/configure-build-msys2-mingw.sh +21 -0
  170. package/deps/librdkafka/packaging/mingw-w64/export-variables.sh +13 -0
  171. package/deps/librdkafka/packaging/mingw-w64/run-tests.sh +6 -0
  172. package/deps/librdkafka/packaging/mingw-w64/semaphoreci-build.sh +38 -0
  173. package/deps/librdkafka/packaging/nuget/README.md +84 -0
  174. package/deps/librdkafka/packaging/nuget/artifact.py +177 -0
  175. package/deps/librdkafka/packaging/nuget/cleanup-s3.py +143 -0
  176. package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip +0 -0
  177. package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip +0 -0
  178. package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip +0 -0
  179. package/deps/librdkafka/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip +0 -0
  180. package/deps/librdkafka/packaging/nuget/nuget.sh +21 -0
  181. package/deps/librdkafka/packaging/nuget/nugetpackage.py +278 -0
  182. package/deps/librdkafka/packaging/nuget/packaging.py +448 -0
  183. package/deps/librdkafka/packaging/nuget/push-to-nuget.sh +21 -0
  184. package/deps/librdkafka/packaging/nuget/release.py +167 -0
  185. package/deps/librdkafka/packaging/nuget/requirements.txt +3 -0
  186. package/deps/librdkafka/packaging/nuget/staticpackage.py +178 -0
  187. package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.nuspec +21 -0
  188. package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.props +18 -0
  189. package/deps/librdkafka/packaging/nuget/templates/librdkafka.redist.targets +19 -0
  190. package/deps/librdkafka/packaging/nuget/zfile/__init__.py +0 -0
  191. package/deps/librdkafka/packaging/nuget/zfile/zfile.py +98 -0
  192. package/deps/librdkafka/packaging/rpm/Makefile +92 -0
  193. package/deps/librdkafka/packaging/rpm/README.md +23 -0
  194. package/deps/librdkafka/packaging/rpm/el7-x86_64.cfg +40 -0
  195. package/deps/librdkafka/packaging/rpm/librdkafka.spec +118 -0
  196. package/deps/librdkafka/packaging/rpm/mock-on-docker.sh +96 -0
  197. package/deps/librdkafka/packaging/rpm/tests/Makefile +25 -0
  198. package/deps/librdkafka/packaging/rpm/tests/README.md +8 -0
  199. package/deps/librdkafka/packaging/rpm/tests/run-test.sh +42 -0
  200. package/deps/librdkafka/packaging/rpm/tests/test-on-docker.sh +56 -0
  201. package/deps/librdkafka/packaging/rpm/tests/test.c +77 -0
  202. package/deps/librdkafka/packaging/rpm/tests/test.cpp +34 -0
  203. package/deps/librdkafka/packaging/tools/Dockerfile +31 -0
  204. package/deps/librdkafka/packaging/tools/build-configurations-checks.sh +12 -0
  205. package/deps/librdkafka/packaging/tools/build-deb-package.sh +64 -0
  206. package/deps/librdkafka/packaging/tools/build-debian.sh +65 -0
  207. package/deps/librdkafka/packaging/tools/build-manylinux.sh +68 -0
  208. package/deps/librdkafka/packaging/tools/build-release-artifacts.sh +139 -0
  209. package/deps/librdkafka/packaging/tools/distro-build.sh +38 -0
  210. package/deps/librdkafka/packaging/tools/gh-release-checksums.py +39 -0
  211. package/deps/librdkafka/packaging/tools/rdutcoverage.sh +25 -0
  212. package/deps/librdkafka/packaging/tools/requirements.txt +2 -0
  213. package/deps/librdkafka/packaging/tools/run-in-docker.sh +28 -0
  214. package/deps/librdkafka/packaging/tools/run-integration-tests.sh +31 -0
  215. package/deps/librdkafka/packaging/tools/run-style-check.sh +4 -0
  216. package/deps/librdkafka/packaging/tools/style-format.sh +149 -0
  217. package/deps/librdkafka/packaging/tools/update_rpcs_max_versions.py +100 -0
  218. package/deps/librdkafka/service.yml +172 -0
  219. package/deps/librdkafka/src/CMakeLists.txt +374 -0
  220. package/deps/librdkafka/src/Makefile +103 -0
  221. package/deps/librdkafka/src/README.lz4.md +30 -0
  222. package/deps/librdkafka/src/cJSON.c +2834 -0
  223. package/deps/librdkafka/src/cJSON.h +398 -0
  224. package/deps/librdkafka/src/crc32c.c +430 -0
  225. package/deps/librdkafka/src/crc32c.h +38 -0
  226. package/deps/librdkafka/src/generate_proto.sh +66 -0
  227. package/deps/librdkafka/src/librdkafka_cgrp_synch.png +0 -0
  228. package/deps/librdkafka/src/lz4.c +2727 -0
  229. package/deps/librdkafka/src/lz4.h +842 -0
  230. package/deps/librdkafka/src/lz4frame.c +2078 -0
  231. package/deps/librdkafka/src/lz4frame.h +692 -0
  232. package/deps/librdkafka/src/lz4frame_static.h +47 -0
  233. package/deps/librdkafka/src/lz4hc.c +1631 -0
  234. package/deps/librdkafka/src/lz4hc.h +413 -0
  235. package/deps/librdkafka/src/nanopb/pb.h +917 -0
  236. package/deps/librdkafka/src/nanopb/pb_common.c +388 -0
  237. package/deps/librdkafka/src/nanopb/pb_common.h +49 -0
  238. package/deps/librdkafka/src/nanopb/pb_decode.c +1727 -0
  239. package/deps/librdkafka/src/nanopb/pb_decode.h +193 -0
  240. package/deps/librdkafka/src/nanopb/pb_encode.c +1000 -0
  241. package/deps/librdkafka/src/nanopb/pb_encode.h +185 -0
  242. package/deps/librdkafka/src/opentelemetry/common.pb.c +32 -0
  243. package/deps/librdkafka/src/opentelemetry/common.pb.h +170 -0
  244. package/deps/librdkafka/src/opentelemetry/metrics.options +2 -0
  245. package/deps/librdkafka/src/opentelemetry/metrics.pb.c +67 -0
  246. package/deps/librdkafka/src/opentelemetry/metrics.pb.h +966 -0
  247. package/deps/librdkafka/src/opentelemetry/resource.pb.c +12 -0
  248. package/deps/librdkafka/src/opentelemetry/resource.pb.h +58 -0
  249. package/deps/librdkafka/src/queue.h +850 -0
  250. package/deps/librdkafka/src/rd.h +584 -0
  251. package/deps/librdkafka/src/rdaddr.c +255 -0
  252. package/deps/librdkafka/src/rdaddr.h +202 -0
  253. package/deps/librdkafka/src/rdatomic.h +230 -0
  254. package/deps/librdkafka/src/rdavg.h +260 -0
  255. package/deps/librdkafka/src/rdavl.c +210 -0
  256. package/deps/librdkafka/src/rdavl.h +250 -0
  257. package/deps/librdkafka/src/rdbase64.c +200 -0
  258. package/deps/librdkafka/src/rdbase64.h +43 -0
  259. package/deps/librdkafka/src/rdbuf.c +1884 -0
  260. package/deps/librdkafka/src/rdbuf.h +375 -0
  261. package/deps/librdkafka/src/rdcrc32.c +114 -0
  262. package/deps/librdkafka/src/rdcrc32.h +170 -0
  263. package/deps/librdkafka/src/rddl.c +179 -0
  264. package/deps/librdkafka/src/rddl.h +43 -0
  265. package/deps/librdkafka/src/rdendian.h +175 -0
  266. package/deps/librdkafka/src/rdfloat.h +67 -0
  267. package/deps/librdkafka/src/rdfnv1a.c +113 -0
  268. package/deps/librdkafka/src/rdfnv1a.h +35 -0
  269. package/deps/librdkafka/src/rdgz.c +120 -0
  270. package/deps/librdkafka/src/rdgz.h +46 -0
  271. package/deps/librdkafka/src/rdhdrhistogram.c +721 -0
  272. package/deps/librdkafka/src/rdhdrhistogram.h +87 -0
  273. package/deps/librdkafka/src/rdhttp.c +830 -0
  274. package/deps/librdkafka/src/rdhttp.h +101 -0
  275. package/deps/librdkafka/src/rdinterval.h +177 -0
  276. package/deps/librdkafka/src/rdkafka.c +5505 -0
  277. package/deps/librdkafka/src/rdkafka.h +10686 -0
  278. package/deps/librdkafka/src/rdkafka_admin.c +9794 -0
  279. package/deps/librdkafka/src/rdkafka_admin.h +661 -0
  280. package/deps/librdkafka/src/rdkafka_assignment.c +1010 -0
  281. package/deps/librdkafka/src/rdkafka_assignment.h +73 -0
  282. package/deps/librdkafka/src/rdkafka_assignor.c +1786 -0
  283. package/deps/librdkafka/src/rdkafka_assignor.h +402 -0
  284. package/deps/librdkafka/src/rdkafka_aux.c +409 -0
  285. package/deps/librdkafka/src/rdkafka_aux.h +174 -0
  286. package/deps/librdkafka/src/rdkafka_background.c +221 -0
  287. package/deps/librdkafka/src/rdkafka_broker.c +6337 -0
  288. package/deps/librdkafka/src/rdkafka_broker.h +744 -0
  289. package/deps/librdkafka/src/rdkafka_buf.c +543 -0
  290. package/deps/librdkafka/src/rdkafka_buf.h +1525 -0
  291. package/deps/librdkafka/src/rdkafka_cert.c +576 -0
  292. package/deps/librdkafka/src/rdkafka_cert.h +62 -0
  293. package/deps/librdkafka/src/rdkafka_cgrp.c +7587 -0
  294. package/deps/librdkafka/src/rdkafka_cgrp.h +477 -0
  295. package/deps/librdkafka/src/rdkafka_conf.c +4880 -0
  296. package/deps/librdkafka/src/rdkafka_conf.h +732 -0
  297. package/deps/librdkafka/src/rdkafka_confval.h +97 -0
  298. package/deps/librdkafka/src/rdkafka_coord.c +623 -0
  299. package/deps/librdkafka/src/rdkafka_coord.h +132 -0
  300. package/deps/librdkafka/src/rdkafka_error.c +228 -0
  301. package/deps/librdkafka/src/rdkafka_error.h +80 -0
  302. package/deps/librdkafka/src/rdkafka_event.c +502 -0
  303. package/deps/librdkafka/src/rdkafka_event.h +126 -0
  304. package/deps/librdkafka/src/rdkafka_feature.c +898 -0
  305. package/deps/librdkafka/src/rdkafka_feature.h +104 -0
  306. package/deps/librdkafka/src/rdkafka_fetcher.c +1422 -0
  307. package/deps/librdkafka/src/rdkafka_fetcher.h +44 -0
  308. package/deps/librdkafka/src/rdkafka_header.c +220 -0
  309. package/deps/librdkafka/src/rdkafka_header.h +76 -0
  310. package/deps/librdkafka/src/rdkafka_idempotence.c +807 -0
  311. package/deps/librdkafka/src/rdkafka_idempotence.h +144 -0
  312. package/deps/librdkafka/src/rdkafka_int.h +1260 -0
  313. package/deps/librdkafka/src/rdkafka_interceptor.c +819 -0
  314. package/deps/librdkafka/src/rdkafka_interceptor.h +104 -0
  315. package/deps/librdkafka/src/rdkafka_lz4.c +450 -0
  316. package/deps/librdkafka/src/rdkafka_lz4.h +49 -0
  317. package/deps/librdkafka/src/rdkafka_metadata.c +2209 -0
  318. package/deps/librdkafka/src/rdkafka_metadata.h +345 -0
  319. package/deps/librdkafka/src/rdkafka_metadata_cache.c +1183 -0
  320. package/deps/librdkafka/src/rdkafka_mock.c +3661 -0
  321. package/deps/librdkafka/src/rdkafka_mock.h +610 -0
  322. package/deps/librdkafka/src/rdkafka_mock_cgrp.c +1876 -0
  323. package/deps/librdkafka/src/rdkafka_mock_handlers.c +3113 -0
  324. package/deps/librdkafka/src/rdkafka_mock_int.h +710 -0
  325. package/deps/librdkafka/src/rdkafka_msg.c +2589 -0
  326. package/deps/librdkafka/src/rdkafka_msg.h +614 -0
  327. package/deps/librdkafka/src/rdkafka_msgbatch.h +62 -0
  328. package/deps/librdkafka/src/rdkafka_msgset.h +98 -0
  329. package/deps/librdkafka/src/rdkafka_msgset_reader.c +1806 -0
  330. package/deps/librdkafka/src/rdkafka_msgset_writer.c +1474 -0
  331. package/deps/librdkafka/src/rdkafka_offset.c +1565 -0
  332. package/deps/librdkafka/src/rdkafka_offset.h +150 -0
  333. package/deps/librdkafka/src/rdkafka_op.c +997 -0
  334. package/deps/librdkafka/src/rdkafka_op.h +858 -0
  335. package/deps/librdkafka/src/rdkafka_partition.c +4896 -0
  336. package/deps/librdkafka/src/rdkafka_partition.h +1182 -0
  337. package/deps/librdkafka/src/rdkafka_pattern.c +228 -0
  338. package/deps/librdkafka/src/rdkafka_pattern.h +70 -0
  339. package/deps/librdkafka/src/rdkafka_plugin.c +213 -0
  340. package/deps/librdkafka/src/rdkafka_plugin.h +41 -0
  341. package/deps/librdkafka/src/rdkafka_proto.h +736 -0
  342. package/deps/librdkafka/src/rdkafka_protocol.h +128 -0
  343. package/deps/librdkafka/src/rdkafka_queue.c +1230 -0
  344. package/deps/librdkafka/src/rdkafka_queue.h +1220 -0
  345. package/deps/librdkafka/src/rdkafka_range_assignor.c +1748 -0
  346. package/deps/librdkafka/src/rdkafka_request.c +7089 -0
  347. package/deps/librdkafka/src/rdkafka_request.h +732 -0
  348. package/deps/librdkafka/src/rdkafka_roundrobin_assignor.c +123 -0
  349. package/deps/librdkafka/src/rdkafka_sasl.c +530 -0
  350. package/deps/librdkafka/src/rdkafka_sasl.h +63 -0
  351. package/deps/librdkafka/src/rdkafka_sasl_cyrus.c +722 -0
  352. package/deps/librdkafka/src/rdkafka_sasl_int.h +89 -0
  353. package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.c +1833 -0
  354. package/deps/librdkafka/src/rdkafka_sasl_oauthbearer.h +52 -0
  355. package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.c +1666 -0
  356. package/deps/librdkafka/src/rdkafka_sasl_oauthbearer_oidc.h +47 -0
  357. package/deps/librdkafka/src/rdkafka_sasl_plain.c +142 -0
  358. package/deps/librdkafka/src/rdkafka_sasl_scram.c +858 -0
  359. package/deps/librdkafka/src/rdkafka_sasl_win32.c +550 -0
  360. package/deps/librdkafka/src/rdkafka_ssl.c +2129 -0
  361. package/deps/librdkafka/src/rdkafka_ssl.h +86 -0
  362. package/deps/librdkafka/src/rdkafka_sticky_assignor.c +4785 -0
  363. package/deps/librdkafka/src/rdkafka_subscription.c +278 -0
  364. package/deps/librdkafka/src/rdkafka_telemetry.c +760 -0
  365. package/deps/librdkafka/src/rdkafka_telemetry.h +52 -0
  366. package/deps/librdkafka/src/rdkafka_telemetry_decode.c +1053 -0
  367. package/deps/librdkafka/src/rdkafka_telemetry_decode.h +59 -0
  368. package/deps/librdkafka/src/rdkafka_telemetry_encode.c +997 -0
  369. package/deps/librdkafka/src/rdkafka_telemetry_encode.h +301 -0
  370. package/deps/librdkafka/src/rdkafka_timer.c +402 -0
  371. package/deps/librdkafka/src/rdkafka_timer.h +117 -0
  372. package/deps/librdkafka/src/rdkafka_topic.c +2161 -0
  373. package/deps/librdkafka/src/rdkafka_topic.h +334 -0
  374. package/deps/librdkafka/src/rdkafka_transport.c +1309 -0
  375. package/deps/librdkafka/src/rdkafka_transport.h +99 -0
  376. package/deps/librdkafka/src/rdkafka_transport_int.h +100 -0
  377. package/deps/librdkafka/src/rdkafka_txnmgr.c +3256 -0
  378. package/deps/librdkafka/src/rdkafka_txnmgr.h +171 -0
  379. package/deps/librdkafka/src/rdkafka_zstd.c +226 -0
  380. package/deps/librdkafka/src/rdkafka_zstd.h +57 -0
  381. package/deps/librdkafka/src/rdlist.c +576 -0
  382. package/deps/librdkafka/src/rdlist.h +434 -0
  383. package/deps/librdkafka/src/rdlog.c +89 -0
  384. package/deps/librdkafka/src/rdlog.h +41 -0
  385. package/deps/librdkafka/src/rdmap.c +508 -0
  386. package/deps/librdkafka/src/rdmap.h +492 -0
  387. package/deps/librdkafka/src/rdmurmur2.c +167 -0
  388. package/deps/librdkafka/src/rdmurmur2.h +35 -0
  389. package/deps/librdkafka/src/rdports.c +61 -0
  390. package/deps/librdkafka/src/rdports.h +38 -0
  391. package/deps/librdkafka/src/rdposix.h +250 -0
  392. package/deps/librdkafka/src/rdrand.c +80 -0
  393. package/deps/librdkafka/src/rdrand.h +43 -0
  394. package/deps/librdkafka/src/rdregex.c +156 -0
  395. package/deps/librdkafka/src/rdregex.h +43 -0
  396. package/deps/librdkafka/src/rdsignal.h +57 -0
  397. package/deps/librdkafka/src/rdstring.c +645 -0
  398. package/deps/librdkafka/src/rdstring.h +98 -0
  399. package/deps/librdkafka/src/rdsysqueue.h +404 -0
  400. package/deps/librdkafka/src/rdtime.h +356 -0
  401. package/deps/librdkafka/src/rdtypes.h +86 -0
  402. package/deps/librdkafka/src/rdunittest.c +549 -0
  403. package/deps/librdkafka/src/rdunittest.h +232 -0
  404. package/deps/librdkafka/src/rdvarint.c +134 -0
  405. package/deps/librdkafka/src/rdvarint.h +165 -0
  406. package/deps/librdkafka/src/rdwin32.h +382 -0
  407. package/deps/librdkafka/src/rdxxhash.c +1030 -0
  408. package/deps/librdkafka/src/rdxxhash.h +328 -0
  409. package/deps/librdkafka/src/regexp.c +1352 -0
  410. package/deps/librdkafka/src/regexp.h +41 -0
  411. package/deps/librdkafka/src/snappy.c +1866 -0
  412. package/deps/librdkafka/src/snappy.h +62 -0
  413. package/deps/librdkafka/src/snappy_compat.h +138 -0
  414. package/deps/librdkafka/src/statistics_schema.json +444 -0
  415. package/deps/librdkafka/src/tinycthread.c +932 -0
  416. package/deps/librdkafka/src/tinycthread.h +503 -0
  417. package/deps/librdkafka/src/tinycthread_extra.c +199 -0
  418. package/deps/librdkafka/src/tinycthread_extra.h +212 -0
  419. package/deps/librdkafka/src/win32_config.h +58 -0
  420. package/deps/librdkafka/src-cpp/CMakeLists.txt +90 -0
  421. package/deps/librdkafka/src-cpp/ConfImpl.cpp +84 -0
  422. package/deps/librdkafka/src-cpp/ConsumerImpl.cpp +244 -0
  423. package/deps/librdkafka/src-cpp/HandleImpl.cpp +436 -0
  424. package/deps/librdkafka/src-cpp/HeadersImpl.cpp +48 -0
  425. package/deps/librdkafka/src-cpp/KafkaConsumerImpl.cpp +296 -0
  426. package/deps/librdkafka/src-cpp/Makefile +55 -0
  427. package/deps/librdkafka/src-cpp/MessageImpl.cpp +38 -0
  428. package/deps/librdkafka/src-cpp/MetadataImpl.cpp +170 -0
  429. package/deps/librdkafka/src-cpp/ProducerImpl.cpp +197 -0
  430. package/deps/librdkafka/src-cpp/QueueImpl.cpp +70 -0
  431. package/deps/librdkafka/src-cpp/README.md +16 -0
  432. package/deps/librdkafka/src-cpp/RdKafka.cpp +59 -0
  433. package/deps/librdkafka/src-cpp/TopicImpl.cpp +124 -0
  434. package/deps/librdkafka/src-cpp/TopicPartitionImpl.cpp +57 -0
  435. package/deps/librdkafka/src-cpp/rdkafkacpp.h +3797 -0
  436. package/deps/librdkafka/src-cpp/rdkafkacpp_int.h +1641 -0
  437. package/deps/librdkafka/tests/0000-unittests.c +72 -0
  438. package/deps/librdkafka/tests/0001-multiobj.c +102 -0
  439. package/deps/librdkafka/tests/0002-unkpart.c +244 -0
  440. package/deps/librdkafka/tests/0003-msgmaxsize.c +173 -0
  441. package/deps/librdkafka/tests/0004-conf.c +934 -0
  442. package/deps/librdkafka/tests/0005-order.c +133 -0
  443. package/deps/librdkafka/tests/0006-symbols.c +163 -0
  444. package/deps/librdkafka/tests/0007-autotopic.c +136 -0
  445. package/deps/librdkafka/tests/0008-reqacks.c +179 -0
  446. package/deps/librdkafka/tests/0009-mock_cluster.c +97 -0
  447. package/deps/librdkafka/tests/0011-produce_batch.c +753 -0
  448. package/deps/librdkafka/tests/0012-produce_consume.c +537 -0
  449. package/deps/librdkafka/tests/0013-null-msgs.c +473 -0
  450. package/deps/librdkafka/tests/0014-reconsume-191.c +512 -0
  451. package/deps/librdkafka/tests/0015-offset_seeks.c +172 -0
  452. package/deps/librdkafka/tests/0016-client_swname.c +181 -0
  453. package/deps/librdkafka/tests/0017-compression.c +140 -0
  454. package/deps/librdkafka/tests/0018-cgrp_term.c +338 -0
  455. package/deps/librdkafka/tests/0019-list_groups.c +289 -0
  456. package/deps/librdkafka/tests/0020-destroy_hang.c +162 -0
  457. package/deps/librdkafka/tests/0021-rkt_destroy.c +72 -0
  458. package/deps/librdkafka/tests/0022-consume_batch.c +279 -0
  459. package/deps/librdkafka/tests/0025-timers.c +147 -0
  460. package/deps/librdkafka/tests/0026-consume_pause.c +547 -0
  461. package/deps/librdkafka/tests/0028-long_topicnames.c +79 -0
  462. package/deps/librdkafka/tests/0029-assign_offset.c +202 -0
  463. package/deps/librdkafka/tests/0030-offset_commit.c +589 -0
  464. package/deps/librdkafka/tests/0031-get_offsets.c +235 -0
  465. package/deps/librdkafka/tests/0033-regex_subscribe.c +536 -0
  466. package/deps/librdkafka/tests/0034-offset_reset.c +398 -0
  467. package/deps/librdkafka/tests/0035-api_version.c +73 -0
  468. package/deps/librdkafka/tests/0036-partial_fetch.c +87 -0
  469. package/deps/librdkafka/tests/0037-destroy_hang_local.c +85 -0
  470. package/deps/librdkafka/tests/0038-performance.c +121 -0
  471. package/deps/librdkafka/tests/0039-event.c +284 -0
  472. package/deps/librdkafka/tests/0040-io_event.c +257 -0
  473. package/deps/librdkafka/tests/0041-fetch_max_bytes.c +97 -0
  474. package/deps/librdkafka/tests/0042-many_topics.c +252 -0
  475. package/deps/librdkafka/tests/0043-no_connection.c +77 -0
  476. package/deps/librdkafka/tests/0044-partition_cnt.c +94 -0
  477. package/deps/librdkafka/tests/0045-subscribe_update.c +1010 -0
  478. package/deps/librdkafka/tests/0046-rkt_cache.c +65 -0
  479. package/deps/librdkafka/tests/0047-partial_buf_tmout.c +98 -0
  480. package/deps/librdkafka/tests/0048-partitioner.c +283 -0
  481. package/deps/librdkafka/tests/0049-consume_conn_close.c +162 -0
  482. package/deps/librdkafka/tests/0050-subscribe_adds.c +145 -0
  483. package/deps/librdkafka/tests/0051-assign_adds.c +126 -0
  484. package/deps/librdkafka/tests/0052-msg_timestamps.c +238 -0
  485. package/deps/librdkafka/tests/0053-stats_cb.cpp +527 -0
  486. package/deps/librdkafka/tests/0054-offset_time.cpp +236 -0
  487. package/deps/librdkafka/tests/0055-producer_latency.c +539 -0
  488. package/deps/librdkafka/tests/0056-balanced_group_mt.c +315 -0
  489. package/deps/librdkafka/tests/0057-invalid_topic.cpp +112 -0
  490. package/deps/librdkafka/tests/0058-log.cpp +123 -0
  491. package/deps/librdkafka/tests/0059-bsearch.cpp +241 -0
  492. package/deps/librdkafka/tests/0060-op_prio.cpp +163 -0
  493. package/deps/librdkafka/tests/0061-consumer_lag.cpp +295 -0
  494. package/deps/librdkafka/tests/0062-stats_event.c +126 -0
  495. package/deps/librdkafka/tests/0063-clusterid.cpp +180 -0
  496. package/deps/librdkafka/tests/0064-interceptors.c +481 -0
  497. package/deps/librdkafka/tests/0065-yield.cpp +140 -0
  498. package/deps/librdkafka/tests/0066-plugins.cpp +129 -0
  499. package/deps/librdkafka/tests/0067-empty_topic.cpp +151 -0
  500. package/deps/librdkafka/tests/0068-produce_timeout.c +136 -0
  501. package/deps/librdkafka/tests/0069-consumer_add_parts.c +119 -0
  502. package/deps/librdkafka/tests/0070-null_empty.cpp +197 -0
  503. package/deps/librdkafka/tests/0072-headers_ut.c +448 -0
  504. package/deps/librdkafka/tests/0073-headers.c +381 -0
  505. package/deps/librdkafka/tests/0074-producev.c +87 -0
  506. package/deps/librdkafka/tests/0075-retry.c +290 -0
  507. package/deps/librdkafka/tests/0076-produce_retry.c +452 -0
  508. package/deps/librdkafka/tests/0077-compaction.c +363 -0
  509. package/deps/librdkafka/tests/0078-c_from_cpp.cpp +96 -0
  510. package/deps/librdkafka/tests/0079-fork.c +93 -0
  511. package/deps/librdkafka/tests/0080-admin_ut.c +3095 -0
  512. package/deps/librdkafka/tests/0081-admin.c +5633 -0
  513. package/deps/librdkafka/tests/0082-fetch_max_bytes.cpp +137 -0
  514. package/deps/librdkafka/tests/0083-cb_event.c +233 -0
  515. package/deps/librdkafka/tests/0084-destroy_flags.c +208 -0
  516. package/deps/librdkafka/tests/0085-headers.cpp +392 -0
  517. package/deps/librdkafka/tests/0086-purge.c +368 -0
  518. package/deps/librdkafka/tests/0088-produce_metadata_timeout.c +162 -0
  519. package/deps/librdkafka/tests/0089-max_poll_interval.c +511 -0
  520. package/deps/librdkafka/tests/0090-idempotence.c +171 -0
  521. package/deps/librdkafka/tests/0091-max_poll_interval_timeout.c +295 -0
  522. package/deps/librdkafka/tests/0092-mixed_msgver.c +103 -0
  523. package/deps/librdkafka/tests/0093-holb.c +200 -0
  524. package/deps/librdkafka/tests/0094-idempotence_msg_timeout.c +231 -0
  525. package/deps/librdkafka/tests/0095-all_brokers_down.cpp +122 -0
  526. package/deps/librdkafka/tests/0097-ssl_verify.cpp +658 -0
  527. package/deps/librdkafka/tests/0098-consumer-txn.cpp +1218 -0
  528. package/deps/librdkafka/tests/0099-commit_metadata.c +194 -0
  529. package/deps/librdkafka/tests/0100-thread_interceptors.cpp +195 -0
  530. package/deps/librdkafka/tests/0101-fetch-from-follower.cpp +446 -0
  531. package/deps/librdkafka/tests/0102-static_group_rebalance.c +836 -0
  532. package/deps/librdkafka/tests/0103-transactions.c +1383 -0
  533. package/deps/librdkafka/tests/0104-fetch_from_follower_mock.c +625 -0
  534. package/deps/librdkafka/tests/0105-transactions_mock.c +3930 -0
  535. package/deps/librdkafka/tests/0106-cgrp_sess_timeout.c +318 -0
  536. package/deps/librdkafka/tests/0107-topic_recreate.c +259 -0
  537. package/deps/librdkafka/tests/0109-auto_create_topics.cpp +278 -0
  538. package/deps/librdkafka/tests/0110-batch_size.cpp +182 -0
  539. package/deps/librdkafka/tests/0111-delay_create_topics.cpp +127 -0
  540. package/deps/librdkafka/tests/0112-assign_unknown_part.c +87 -0
  541. package/deps/librdkafka/tests/0113-cooperative_rebalance.cpp +3473 -0
  542. package/deps/librdkafka/tests/0114-sticky_partitioning.cpp +176 -0
  543. package/deps/librdkafka/tests/0115-producer_auth.cpp +182 -0
  544. package/deps/librdkafka/tests/0116-kafkaconsumer_close.cpp +216 -0
  545. package/deps/librdkafka/tests/0117-mock_errors.c +331 -0
  546. package/deps/librdkafka/tests/0118-commit_rebalance.c +154 -0
  547. package/deps/librdkafka/tests/0119-consumer_auth.cpp +167 -0
  548. package/deps/librdkafka/tests/0120-asymmetric_subscription.c +185 -0
  549. package/deps/librdkafka/tests/0121-clusterid.c +115 -0
  550. package/deps/librdkafka/tests/0122-buffer_cleaning_after_rebalance.c +227 -0
  551. package/deps/librdkafka/tests/0123-connections_max_idle.c +98 -0
  552. package/deps/librdkafka/tests/0124-openssl_invalid_engine.c +69 -0
  553. package/deps/librdkafka/tests/0125-immediate_flush.c +144 -0
  554. package/deps/librdkafka/tests/0126-oauthbearer_oidc.c +528 -0
  555. package/deps/librdkafka/tests/0127-fetch_queue_backoff.cpp +165 -0
  556. package/deps/librdkafka/tests/0128-sasl_callback_queue.cpp +125 -0
  557. package/deps/librdkafka/tests/0129-fetch_aborted_msgs.c +79 -0
  558. package/deps/librdkafka/tests/0130-store_offsets.c +178 -0
  559. package/deps/librdkafka/tests/0131-connect_timeout.c +81 -0
  560. package/deps/librdkafka/tests/0132-strategy_ordering.c +179 -0
  561. package/deps/librdkafka/tests/0133-ssl_keys.c +150 -0
  562. package/deps/librdkafka/tests/0134-ssl_provider.c +92 -0
  563. package/deps/librdkafka/tests/0135-sasl_credentials.cpp +143 -0
  564. package/deps/librdkafka/tests/0136-resolve_cb.c +181 -0
  565. package/deps/librdkafka/tests/0137-barrier_batch_consume.c +619 -0
  566. package/deps/librdkafka/tests/0138-admin_mock.c +281 -0
  567. package/deps/librdkafka/tests/0139-offset_validation_mock.c +950 -0
  568. package/deps/librdkafka/tests/0140-commit_metadata.cpp +108 -0
  569. package/deps/librdkafka/tests/0142-reauthentication.c +515 -0
  570. package/deps/librdkafka/tests/0143-exponential_backoff_mock.c +552 -0
  571. package/deps/librdkafka/tests/0144-idempotence_mock.c +373 -0
  572. package/deps/librdkafka/tests/0145-pause_resume_mock.c +119 -0
  573. package/deps/librdkafka/tests/0146-metadata_mock.c +505 -0
  574. package/deps/librdkafka/tests/0147-consumer_group_consumer_mock.c +952 -0
  575. package/deps/librdkafka/tests/0148-offset_fetch_commit_error_mock.c +563 -0
  576. package/deps/librdkafka/tests/0149-broker-same-host-port.c +140 -0
  577. package/deps/librdkafka/tests/0150-telemetry_mock.c +651 -0
  578. package/deps/librdkafka/tests/0151-purge-brokers.c +566 -0
  579. package/deps/librdkafka/tests/0152-rebootstrap.c +59 -0
  580. package/deps/librdkafka/tests/0153-memberid.c +128 -0
  581. package/deps/librdkafka/tests/1000-unktopic.c +164 -0
  582. package/deps/librdkafka/tests/8000-idle.cpp +60 -0
  583. package/deps/librdkafka/tests/8001-fetch_from_follower_mock_manual.c +113 -0
  584. package/deps/librdkafka/tests/CMakeLists.txt +170 -0
  585. package/deps/librdkafka/tests/LibrdkafkaTestApp.py +291 -0
  586. package/deps/librdkafka/tests/Makefile +182 -0
  587. package/deps/librdkafka/tests/README.md +509 -0
  588. package/deps/librdkafka/tests/autotest.sh +33 -0
  589. package/deps/librdkafka/tests/backtrace.gdb +30 -0
  590. package/deps/librdkafka/tests/broker_version_tests.py +315 -0
  591. package/deps/librdkafka/tests/buildbox.sh +17 -0
  592. package/deps/librdkafka/tests/cleanup-checker-tests.sh +20 -0
  593. package/deps/librdkafka/tests/cluster_testing.py +191 -0
  594. package/deps/librdkafka/tests/delete-test-topics.sh +56 -0
  595. package/deps/librdkafka/tests/fixtures/oauthbearer/jwt_assertion_template.json +10 -0
  596. package/deps/librdkafka/tests/fixtures/ssl/Makefile +8 -0
  597. package/deps/librdkafka/tests/fixtures/ssl/README.md +13 -0
  598. package/deps/librdkafka/tests/fixtures/ssl/client.keystore.intermediate.p12 +0 -0
  599. package/deps/librdkafka/tests/fixtures/ssl/client.keystore.p12 +0 -0
  600. package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.intermediate.pem +72 -0
  601. package/deps/librdkafka/tests/fixtures/ssl/client2.certificate.pem +50 -0
  602. package/deps/librdkafka/tests/fixtures/ssl/client2.intermediate.key +46 -0
  603. package/deps/librdkafka/tests/fixtures/ssl/client2.key +46 -0
  604. package/deps/librdkafka/tests/fixtures/ssl/create_keys.sh +168 -0
  605. package/deps/librdkafka/tests/fuzzers/Makefile +12 -0
  606. package/deps/librdkafka/tests/fuzzers/README.md +31 -0
  607. package/deps/librdkafka/tests/fuzzers/fuzz_regex.c +74 -0
  608. package/deps/librdkafka/tests/fuzzers/helpers.h +90 -0
  609. package/deps/librdkafka/tests/gen-ssl-certs.sh +165 -0
  610. package/deps/librdkafka/tests/interactive_broker_version.py +170 -0
  611. package/deps/librdkafka/tests/interceptor_test/CMakeLists.txt +16 -0
  612. package/deps/librdkafka/tests/interceptor_test/Makefile +22 -0
  613. package/deps/librdkafka/tests/interceptor_test/interceptor_test.c +314 -0
  614. package/deps/librdkafka/tests/interceptor_test/interceptor_test.h +54 -0
  615. package/deps/librdkafka/tests/java/IncrementalRebalanceCli.java +97 -0
  616. package/deps/librdkafka/tests/java/Makefile +13 -0
  617. package/deps/librdkafka/tests/java/Murmur2Cli.java +46 -0
  618. package/deps/librdkafka/tests/java/README.md +14 -0
  619. package/deps/librdkafka/tests/java/TransactionProducerCli.java +162 -0
  620. package/deps/librdkafka/tests/java/run-class.sh +11 -0
  621. package/deps/librdkafka/tests/librdkafka.suppressions +483 -0
  622. package/deps/librdkafka/tests/lz4_manual_test.sh +59 -0
  623. package/deps/librdkafka/tests/multi-broker-version-test.sh +50 -0
  624. package/deps/librdkafka/tests/parse-refcnt.sh +43 -0
  625. package/deps/librdkafka/tests/performance_plot.py +115 -0
  626. package/deps/librdkafka/tests/plugin_test/Makefile +19 -0
  627. package/deps/librdkafka/tests/plugin_test/plugin_test.c +58 -0
  628. package/deps/librdkafka/tests/requirements.txt +2 -0
  629. package/deps/librdkafka/tests/run-all-tests.sh +79 -0
  630. package/deps/librdkafka/tests/run-consumer-tests.sh +16 -0
  631. package/deps/librdkafka/tests/run-producer-tests.sh +16 -0
  632. package/deps/librdkafka/tests/run-test-batches.py +157 -0
  633. package/deps/librdkafka/tests/run-test.sh +140 -0
  634. package/deps/librdkafka/tests/rusage.c +249 -0
  635. package/deps/librdkafka/tests/sasl_test.py +289 -0
  636. package/deps/librdkafka/tests/scenarios/README.md +6 -0
  637. package/deps/librdkafka/tests/scenarios/ak23.json +6 -0
  638. package/deps/librdkafka/tests/scenarios/default.json +5 -0
  639. package/deps/librdkafka/tests/scenarios/noautocreate.json +5 -0
  640. package/deps/librdkafka/tests/sockem.c +801 -0
  641. package/deps/librdkafka/tests/sockem.h +85 -0
  642. package/deps/librdkafka/tests/sockem_ctrl.c +145 -0
  643. package/deps/librdkafka/tests/sockem_ctrl.h +61 -0
  644. package/deps/librdkafka/tests/test.c +7778 -0
  645. package/deps/librdkafka/tests/test.conf.example +27 -0
  646. package/deps/librdkafka/tests/test.h +1028 -0
  647. package/deps/librdkafka/tests/testcpp.cpp +131 -0
  648. package/deps/librdkafka/tests/testcpp.h +388 -0
  649. package/deps/librdkafka/tests/testshared.h +416 -0
  650. package/deps/librdkafka/tests/tools/README.md +4 -0
  651. package/deps/librdkafka/tests/tools/stats/README.md +21 -0
  652. package/deps/librdkafka/tests/tools/stats/filter.jq +42 -0
  653. package/deps/librdkafka/tests/tools/stats/graph.py +150 -0
  654. package/deps/librdkafka/tests/tools/stats/requirements.txt +3 -0
  655. package/deps/librdkafka/tests/tools/stats/to_csv.py +124 -0
  656. package/deps/librdkafka/tests/trivup/trivup-0.14.0.tar.gz +0 -0
  657. package/deps/librdkafka/tests/until-fail.sh +87 -0
  658. package/deps/librdkafka/tests/xxxx-assign_partition.c +122 -0
  659. package/deps/librdkafka/tests/xxxx-metadata.cpp +159 -0
  660. package/deps/librdkafka/vcpkg.json +23 -0
  661. package/deps/librdkafka/win32/README.md +5 -0
  662. package/deps/librdkafka/win32/build-package.bat +3 -0
  663. package/deps/librdkafka/win32/build.bat +19 -0
  664. package/deps/librdkafka/win32/common.vcxproj +84 -0
  665. package/deps/librdkafka/win32/interceptor_test/interceptor_test.vcxproj +87 -0
  666. package/deps/librdkafka/win32/librdkafka.autopkg.template +54 -0
  667. package/deps/librdkafka/win32/librdkafka.master.testing.targets +13 -0
  668. package/deps/librdkafka/win32/librdkafka.sln +226 -0
  669. package/deps/librdkafka/win32/librdkafka.vcxproj +276 -0
  670. package/deps/librdkafka/win32/librdkafkacpp/librdkafkacpp.vcxproj +104 -0
  671. package/deps/librdkafka/win32/msbuild.ps1 +15 -0
  672. package/deps/librdkafka/win32/openssl_engine_example/openssl_engine_example.vcxproj +132 -0
  673. package/deps/librdkafka/win32/package-zip.ps1 +46 -0
  674. package/deps/librdkafka/win32/packages/repositories.config +4 -0
  675. package/deps/librdkafka/win32/push-package.bat +4 -0
  676. package/deps/librdkafka/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj +67 -0
  677. package/deps/librdkafka/win32/rdkafka_example/rdkafka_example.vcxproj +97 -0
  678. package/deps/librdkafka/win32/rdkafka_performance/rdkafka_performance.vcxproj +97 -0
  679. package/deps/librdkafka/win32/setup-msys2.ps1 +47 -0
  680. package/deps/librdkafka/win32/setup-vcpkg.ps1 +34 -0
  681. package/deps/librdkafka/win32/tests/test.conf.example +25 -0
  682. package/deps/librdkafka/win32/tests/tests.vcxproj +253 -0
  683. package/deps/librdkafka/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj +132 -0
  684. package/deps/librdkafka/win32/wingetopt.c +564 -0
  685. package/deps/librdkafka/win32/wingetopt.h +101 -0
  686. package/deps/librdkafka/win32/wintime.h +33 -0
  687. package/deps/librdkafka.gyp +62 -0
  688. package/lib/admin.js +233 -0
  689. package/lib/client.js +573 -0
  690. package/lib/error.js +500 -0
  691. package/lib/index.js +34 -0
  692. package/lib/kafka-consumer-stream.js +397 -0
  693. package/lib/kafka-consumer.js +698 -0
  694. package/lib/producer/high-level-producer.js +323 -0
  695. package/lib/producer-stream.js +307 -0
  696. package/lib/producer.js +375 -0
  697. package/lib/tools/ref-counter.js +52 -0
  698. package/lib/topic-partition.js +88 -0
  699. package/lib/topic.js +42 -0
  700. package/lib/util.js +29 -0
  701. package/package.json +61 -0
  702. package/prebuilds/darwin-arm64/@point3+node-rdkafka.node +0 -0
  703. package/prebuilds/linux-x64/@point3+node-rdkafka.node +0 -0
  704. package/util/configure.js +30 -0
  705. package/util/get-env.js +6 -0
  706. package/util/test-compile.js +11 -0
  707. package/util/test-producer-delivery.js +100 -0
@@ -0,0 +1,3930 @@
1
+ /*
2
+ * librdkafka - Apache Kafka C library
3
+ *
4
+ * Copyright (c) 2019-2022, Magnus Edenhill
5
+ * 2025, Confluent Inc.
6
+ * All rights reserved.
7
+ *
8
+ * Redistribution and use in source and binary forms, with or without
9
+ * modification, are permitted provided that the following conditions are met:
10
+ *
11
+ * 1. Redistributions of source code must retain the above copyright notice,
12
+ * this list of conditions and the following disclaimer.
13
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ * this list of conditions and the following disclaimer in the documentation
15
+ * and/or other materials provided with the distribution.
16
+ *
17
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
+ * POSSIBILITY OF SUCH DAMAGE.
28
+ */
29
+
30
+ #include "test.h"
31
+
32
+ #include "rdkafka.h"
33
+
34
+ #include "../src/rdkafka_proto.h"
35
+ #include "../src/rdstring.h"
36
+ #include "../src/rdunittest.h"
37
+
38
+ #include <stdarg.h>
39
+
40
+
41
+ /**
42
+ * @name Producer transaction tests using the mock cluster
43
+ *
44
+ */
45
+
46
+
47
+ static int allowed_error;
48
+ static int allowed_error_2;
49
+
50
+ /**
51
+ * @brief Decide what error_cb's will cause the test to fail.
52
+ */
53
+ static int
54
+ error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
55
+ if (err == allowed_error || err == allowed_error_2 ||
56
+ /* If transport errors are allowed then it is likely
57
+ * that we'll also see ALL_BROKERS_DOWN. */
58
+ (allowed_error == RD_KAFKA_RESP_ERR__TRANSPORT &&
59
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) {
60
+ TEST_SAY("Ignoring allowed error: %s: %s\n",
61
+ rd_kafka_err2name(err), reason);
62
+ return 0;
63
+ }
64
+ return 1;
65
+ }
66
+
67
+
68
+ static rd_kafka_resp_err_t (*on_response_received_cb)(rd_kafka_t *rk,
69
+ int sockfd,
70
+ const char *brokername,
71
+ int32_t brokerid,
72
+ int16_t ApiKey,
73
+ int16_t ApiVersion,
74
+ int32_t CorrId,
75
+ size_t size,
76
+ int64_t rtt,
77
+ rd_kafka_resp_err_t err,
78
+ void *ic_opaque);
79
+
80
+ /**
81
+ * @brief Simple on_response_received interceptor that simply calls the
82
+ * sub-test's on_response_received_cb function, if set.
83
+ */
84
+ static rd_kafka_resp_err_t
85
+ on_response_received_trampoline(rd_kafka_t *rk,
86
+ int sockfd,
87
+ const char *brokername,
88
+ int32_t brokerid,
89
+ int16_t ApiKey,
90
+ int16_t ApiVersion,
91
+ int32_t CorrId,
92
+ size_t size,
93
+ int64_t rtt,
94
+ rd_kafka_resp_err_t err,
95
+ void *ic_opaque) {
96
+ TEST_ASSERT(on_response_received_cb != NULL, "");
97
+ return on_response_received_cb(rk, sockfd, brokername, brokerid, ApiKey,
98
+ ApiVersion, CorrId, size, rtt, err,
99
+ ic_opaque);
100
+ }
101
+
102
+
103
+ /**
104
+ * @brief on_new interceptor to add an on_response_received interceptor.
105
+ */
106
+ static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
107
+ const rd_kafka_conf_t *conf,
108
+ void *ic_opaque,
109
+ char *errstr,
110
+ size_t errstr_size) {
111
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
112
+
113
+ if (on_response_received_cb)
114
+ err = rd_kafka_interceptor_add_on_response_received(
115
+ rk, "on_response_received", on_response_received_trampoline,
116
+ ic_opaque);
117
+
118
+ return err;
119
+ }
120
+
121
+
122
+ /**
123
+ * @brief Create a transactional producer and a mock cluster.
124
+ *
125
+ * The var-arg list is a NULL-terminated list of
126
+ * (const char *key, const char *value) config properties.
127
+ *
128
+ * Special keys:
129
+ * "on_response_received", "" - enable the on_response_received_cb
130
+ * interceptor,
131
+ * which must be assigned prior to
132
+ * calling create_tnx_producer().
133
+ */
134
+ static RD_SENTINEL rd_kafka_t *
135
+ create_txn_producer(rd_kafka_mock_cluster_t **mclusterp,
136
+ const char *transactional_id,
137
+ int broker_cnt,
138
+ ...) {
139
+ rd_kafka_conf_t *conf;
140
+ rd_kafka_t *rk;
141
+ char numstr[8];
142
+ va_list ap;
143
+ const char *key;
144
+ rd_bool_t add_interceptors = rd_false;
145
+
146
+ rd_snprintf(numstr, sizeof(numstr), "%d", broker_cnt);
147
+
148
+ test_conf_init(&conf, NULL, 60);
149
+
150
+ test_conf_set(conf, "transactional.id", transactional_id);
151
+ /* When mock brokers are set to down state they're still binding
152
+ * the port, just not listening to it, which makes connection attempts
153
+ * stall until socket.connection.setup.timeout.ms expires.
154
+ * To speed up detection of brokers being down we reduce this timeout
155
+ * to just a couple of seconds. */
156
+ test_conf_set(conf, "socket.connection.setup.timeout.ms", "5000");
157
+ /* Speed up reconnects */
158
+ test_conf_set(conf, "reconnect.backoff.max.ms", "2000");
159
+ test_conf_set(conf, "test.mock.num.brokers", numstr);
160
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
161
+
162
+ test_curr->ignore_dr_err = rd_false;
163
+
164
+ va_start(ap, broker_cnt);
165
+ while ((key = va_arg(ap, const char *))) {
166
+ if (!strcmp(key, "on_response_received")) {
167
+ add_interceptors = rd_true;
168
+ (void)va_arg(ap, const char *);
169
+ } else {
170
+ test_conf_set(conf, key, va_arg(ap, const char *));
171
+ }
172
+ }
173
+ va_end(ap);
174
+
175
+ /* Add an on_.. interceptors */
176
+ if (add_interceptors)
177
+ rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer",
178
+ on_new_producer, NULL);
179
+
180
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
181
+
182
+ if (mclusterp) {
183
+ *mclusterp = rd_kafka_handle_mock_cluster(rk);
184
+ TEST_ASSERT(*mclusterp, "failed to create mock cluster");
185
+
186
+ /* Create some of the common consumer "input" topics
187
+ * that we must be able to commit to with
188
+ * send_offsets_to_transaction().
189
+ * The number depicts the number of partitions in the topic. */
190
+ TEST_CALL_ERR__(
191
+ rd_kafka_mock_topic_create(*mclusterp, "srctopic4", 4, 1));
192
+ TEST_CALL_ERR__(rd_kafka_mock_topic_create(
193
+ *mclusterp, "srctopic64", 64, 1));
194
+ }
195
+
196
+ return rk;
197
+ }
198
+
199
+
200
+ /**
201
+ * @brief Test recoverable errors using mock broker error injections
202
+ * and code coverage checks.
203
+ */
204
+ static void do_test_txn_recoverable_errors(void) {
205
+ rd_kafka_t *rk;
206
+ rd_kafka_mock_cluster_t *mcluster;
207
+ rd_kafka_topic_partition_list_t *offsets;
208
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
209
+ const char *groupid = "myGroupId";
210
+ const char *txnid = "myTxnId";
211
+
212
+ SUB_TEST_QUICK();
213
+
214
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
215
+ NULL);
216
+
217
+ /* Make sure transaction and group coordinators are different.
218
+ * This verifies that AddOffsetsToTxnRequest isn't sent to the
219
+ * transaction coordinator but the group coordinator. */
220
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
221
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 2);
222
+
223
+ /*
224
+ * Inject som InitProducerId errors that causes retries
225
+ */
226
+ rd_kafka_mock_push_request_errors(
227
+ mcluster, RD_KAFKAP_InitProducerId, 3,
228
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
229
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
230
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS);
231
+
232
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
233
+
234
+ (void)RD_UT_COVERAGE_CHECK(0); /* idemp_request_pid_failed(retry) */
235
+ (void)RD_UT_COVERAGE_CHECK(1); /* txn_idemp_state_change(READY) */
236
+
237
+ /*
238
+ * Start a transaction
239
+ */
240
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
241
+
242
+
243
+ /* Produce a message without error first */
244
+ TEST_CALL_ERR__(rd_kafka_producev(
245
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
246
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
247
+
248
+ rd_kafka_flush(rk, -1);
249
+
250
+ /*
251
+ * Produce a message, let it fail with a non-idempo/non-txn
252
+ * retryable error
253
+ */
254
+ rd_kafka_mock_push_request_errors(
255
+ mcluster, RD_KAFKAP_Produce, 1,
256
+ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS);
257
+
258
+ TEST_CALL_ERR__(rd_kafka_producev(
259
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
260
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
261
+
262
+ /* Make sure messages are produced */
263
+ rd_kafka_flush(rk, -1);
264
+
265
+ /*
266
+ * Send some arbitrary offsets, first with some failures, then
267
+ * succeed.
268
+ */
269
+ offsets = rd_kafka_topic_partition_list_new(4);
270
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
271
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 39)->offset =
272
+ 999999111;
273
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset =
274
+ 999;
275
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 19)->offset =
276
+ 123456789;
277
+
278
+ rd_kafka_mock_push_request_errors(
279
+ mcluster, RD_KAFKAP_AddPartitionsToTxn, 1,
280
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
281
+
282
+ rd_kafka_mock_push_request_errors(
283
+ mcluster, RD_KAFKAP_TxnOffsetCommit, 2,
284
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
285
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
286
+
287
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
288
+
289
+ TEST_CALL_ERROR__(
290
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
291
+
292
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
293
+ rd_kafka_topic_partition_list_destroy(offsets);
294
+
295
+ /*
296
+ * Commit transaction, first with som failures, then succeed.
297
+ */
298
+ rd_kafka_mock_push_request_errors(
299
+ mcluster, RD_KAFKAP_EndTxn, 3,
300
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
301
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
302
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS);
303
+
304
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
305
+
306
+ /* All done */
307
+
308
+ rd_kafka_destroy(rk);
309
+
310
+ SUB_TEST_PASS();
311
+ }
312
+
313
+
314
+ /**
315
+ * @brief KIP-360: Test that fatal idempotence errors triggers abortable
316
+ * transaction errors and that the producer can recover.
317
+ */
318
+ static void do_test_txn_fatal_idempo_errors(void) {
319
+ rd_kafka_t *rk;
320
+ rd_kafka_mock_cluster_t *mcluster;
321
+ rd_kafka_error_t *error;
322
+ const char *txnid = "myTxnId";
323
+
324
+ SUB_TEST_QUICK();
325
+
326
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
327
+ NULL);
328
+
329
+ test_curr->ignore_dr_err = rd_true;
330
+ test_curr->is_fatal_cb = error_is_fatal_cb;
331
+ allowed_error = RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID;
332
+
333
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
334
+
335
+ /*
336
+ * Start a transaction
337
+ */
338
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
339
+
340
+
341
+ /* Produce a message without error first */
342
+ TEST_CALL_ERR__(rd_kafka_producev(
343
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
344
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
345
+
346
+ /* Produce a message, let it fail with a fatal idempo error. */
347
+ rd_kafka_mock_push_request_errors(
348
+ mcluster, RD_KAFKAP_Produce, 1,
349
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
350
+
351
+ TEST_CALL_ERR__(rd_kafka_producev(
352
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
353
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
354
+
355
+ /* Commit the transaction, should fail */
356
+ error = rd_kafka_commit_transaction(rk, -1);
357
+ TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
358
+
359
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
360
+ rd_kafka_error_string(error));
361
+
362
+ TEST_ASSERT(!rd_kafka_error_is_fatal(error),
363
+ "Did not expect fatal error");
364
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
365
+ "Expected abortable error");
366
+ rd_kafka_error_destroy(error);
367
+
368
+ /* Abort the transaction */
369
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
370
+
371
+ /* Run a new transaction without errors to verify that the
372
+ * producer can recover. */
373
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
374
+
375
+ TEST_CALL_ERR__(rd_kafka_producev(
376
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
377
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
378
+
379
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
380
+
381
+ /* All done */
382
+
383
+ rd_kafka_destroy(rk);
384
+
385
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
386
+
387
+ SUB_TEST_PASS();
388
+ }
389
+
390
+
391
+ /**
392
+ * @brief KIP-360: Test that fatal idempotence errors triggers abortable
393
+ * transaction errors, but let the broker-side bumping of the
394
+ * producer PID take longer than the remaining transaction timeout
395
+ * which should raise a retriable error from abort_transaction().
396
+ *
397
+ * @param with_sleep After the first abort sleep longer than it takes to
398
+ * re-init the pid so that the internal state automatically
399
+ * transitions.
400
+ */
401
+ static void do_test_txn_slow_reinit(rd_bool_t with_sleep) {
402
+ rd_kafka_t *rk;
403
+ rd_kafka_mock_cluster_t *mcluster;
404
+ rd_kafka_error_t *error;
405
+ int32_t txn_coord = 2;
406
+ const char *txnid = "myTxnId";
407
+ test_timing_t timing;
408
+
409
+ SUB_TEST("%s sleep", with_sleep ? "with" : "without");
410
+
411
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
412
+ NULL);
413
+
414
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
415
+ txn_coord);
416
+
417
+ test_curr->ignore_dr_err = rd_true;
418
+ test_curr->is_fatal_cb = NULL;
419
+
420
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
421
+
422
+ /*
423
+ * Start a transaction
424
+ */
425
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
426
+
427
+
428
+ /* Produce a message without error first */
429
+ TEST_CALL_ERR__(rd_kafka_producev(
430
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
431
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
432
+
433
+ test_flush(rk, -1);
434
+
435
+ /* Set transaction coordinator latency higher than
436
+ * the abort_transaction() call timeout so that the automatic
437
+ * re-initpid takes longer than abort_transaction(). */
438
+ rd_kafka_mock_broker_push_request_error_rtts(
439
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1,
440
+ RD_KAFKA_RESP_ERR_NO_ERROR, 10000 /*10s*/);
441
+
442
+ /* Produce a message, let it fail with a fatal idempo error. */
443
+ rd_kafka_mock_push_request_errors(
444
+ mcluster, RD_KAFKAP_Produce, 1,
445
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
446
+
447
+ TEST_CALL_ERR__(rd_kafka_producev(
448
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
449
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
450
+
451
+
452
+ /* Commit the transaction, should fail */
453
+ TIMING_START(&timing, "commit_transaction(-1)");
454
+ error = rd_kafka_commit_transaction(rk, -1);
455
+ TIMING_STOP(&timing);
456
+ TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
457
+
458
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
459
+ rd_kafka_error_string(error));
460
+
461
+ TEST_ASSERT(!rd_kafka_error_is_fatal(error),
462
+ "Did not expect fatal error");
463
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
464
+ "Expected abortable error");
465
+ rd_kafka_error_destroy(error);
466
+
467
+ /* Abort the transaction, should fail with retriable (timeout) error */
468
+ TIMING_START(&timing, "abort_transaction(100)");
469
+ error = rd_kafka_abort_transaction(rk, 100);
470
+ TIMING_STOP(&timing);
471
+ TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
472
+
473
+ TEST_SAY("First abort_transaction() failed: %s\n",
474
+ rd_kafka_error_string(error));
475
+ TEST_ASSERT(!rd_kafka_error_is_fatal(error),
476
+ "Did not expect fatal error");
477
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
478
+ "Expected retriable error");
479
+ rd_kafka_error_destroy(error);
480
+
481
+ if (with_sleep)
482
+ rd_sleep(12);
483
+
484
+ /* Retry abort, should now finish. */
485
+ TEST_SAY("Retrying abort\n");
486
+ TIMING_START(&timing, "abort_transaction(-1)");
487
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
488
+ TIMING_STOP(&timing);
489
+
490
+ /* Run a new transaction without errors to verify that the
491
+ * producer can recover. */
492
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
493
+
494
+ TEST_CALL_ERR__(rd_kafka_producev(
495
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
496
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
497
+
498
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
499
+
500
+ /* All done */
501
+
502
+ rd_kafka_destroy(rk);
503
+
504
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
505
+
506
+ SUB_TEST_PASS();
507
+ }
508
+
509
+
510
+
511
+ /**
512
+ * @brief KIP-360: Test that fatal idempotence errors triggers abortable
513
+ * transaction errors, but let the broker-side bumping of the
514
+ * producer PID fail with a fencing error.
515
+ * Should raise a fatal error.
516
+ *
517
+ * @param error_code Which error code InitProducerIdRequest should fail with.
518
+ * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older)
519
+ * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer).
520
+ */
521
+ static void do_test_txn_fenced_reinit(rd_kafka_resp_err_t error_code) {
522
+ rd_kafka_t *rk;
523
+ rd_kafka_mock_cluster_t *mcluster;
524
+ rd_kafka_error_t *error;
525
+ int32_t txn_coord = 2;
526
+ const char *txnid = "myTxnId";
527
+ char errstr[512];
528
+ rd_kafka_resp_err_t fatal_err;
529
+
530
+ SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code));
531
+
532
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
533
+ NULL);
534
+
535
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
536
+ txn_coord);
537
+
538
+ test_curr->ignore_dr_err = rd_true;
539
+ test_curr->is_fatal_cb = error_is_fatal_cb;
540
+ allowed_error = RD_KAFKA_RESP_ERR__FENCED;
541
+
542
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
543
+
544
+ /*
545
+ * Start a transaction
546
+ */
547
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
548
+
549
+
550
+ /* Produce a message without error first */
551
+ TEST_CALL_ERR__(rd_kafka_producev(
552
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
553
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
554
+
555
+ test_flush(rk, -1);
556
+
557
+ /* Fail the PID reinit */
558
+ rd_kafka_mock_broker_push_request_error_rtts(
559
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0);
560
+
561
+ /* Produce a message, let it fail with a fatal idempo error. */
562
+ rd_kafka_mock_push_request_errors(
563
+ mcluster, RD_KAFKAP_Produce, 1,
564
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
565
+
566
+ TEST_CALL_ERR__(rd_kafka_producev(
567
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
568
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
569
+
570
+ test_flush(rk, -1);
571
+
572
+ /* Abort the transaction, should fail with a fatal error */
573
+ error = rd_kafka_abort_transaction(rk, -1);
574
+ TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
575
+
576
+ TEST_SAY("abort_transaction() failed: %s\n",
577
+ rd_kafka_error_string(error));
578
+ TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error");
579
+ rd_kafka_error_destroy(error);
580
+
581
+ fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
582
+ TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised");
583
+ TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr);
584
+
585
+ /* All done */
586
+
587
+ rd_kafka_destroy(rk);
588
+
589
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
590
+
591
+ SUB_TEST_PASS();
592
+ }
593
+
594
+
595
+ /**
596
+ * @brief Test EndTxn errors.
597
+ */
598
+ static void do_test_txn_endtxn_errors(void) {
599
+ rd_kafka_t *rk = NULL;
600
+ rd_kafka_mock_cluster_t *mcluster = NULL;
601
+ rd_kafka_resp_err_t err;
602
+ struct {
603
+ size_t error_cnt;
604
+ rd_kafka_resp_err_t errors[4];
605
+ rd_kafka_resp_err_t exp_err;
606
+ rd_bool_t exp_retriable;
607
+ rd_bool_t exp_abortable;
608
+ rd_bool_t exp_fatal;
609
+ rd_bool_t exp_successful_abort;
610
+ } scenario[] = {
611
+ /* This list of errors is from the EndTxnResponse handler in
612
+ * AK clients/.../TransactionManager.java */
613
+ {
614
+ /* #0 */
615
+ 2,
616
+ {RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
617
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE},
618
+ /* Should auto-recover */
619
+ RD_KAFKA_RESP_ERR_NO_ERROR,
620
+ },
621
+ {
622
+ /* #1 */
623
+ 2,
624
+ {RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
625
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR},
626
+ /* Should auto-recover */
627
+ RD_KAFKA_RESP_ERR_NO_ERROR,
628
+ },
629
+ {
630
+ /* #2 */
631
+ 1,
632
+ {RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS},
633
+ /* Should auto-recover */
634
+ RD_KAFKA_RESP_ERR_NO_ERROR,
635
+ },
636
+ {
637
+ /* #3 */
638
+ 3,
639
+ {RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
640
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
641
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS},
642
+ /* Should auto-recover */
643
+ RD_KAFKA_RESP_ERR_NO_ERROR,
644
+ },
645
+ {
646
+ /* #4: the abort is auto-recovering thru epoch bump */
647
+ 1,
648
+ {RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID},
649
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
650
+ rd_false /* !retriable */,
651
+ rd_true /* abortable */,
652
+ rd_false /* !fatal */,
653
+ rd_true /* successful abort */
654
+ },
655
+ {
656
+ /* #5: the abort is auto-recovering thru epoch bump */
657
+ 1,
658
+ {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING},
659
+ RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING,
660
+ rd_false /* !retriable */,
661
+ rd_true /* abortable */,
662
+ rd_false /* !fatal */,
663
+ rd_true /* successful abort */
664
+ },
665
+ {
666
+ /* #6 */
667
+ 1,
668
+ {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH},
669
+ /* This error is normalized */
670
+ RD_KAFKA_RESP_ERR__FENCED,
671
+ rd_false /* !retriable */,
672
+ rd_false /* !abortable */,
673
+ rd_true /* fatal */
674
+ },
675
+ {
676
+ /* #7 */
677
+ 1,
678
+ {RD_KAFKA_RESP_ERR_PRODUCER_FENCED},
679
+ /* This error is normalized */
680
+ RD_KAFKA_RESP_ERR__FENCED,
681
+ rd_false /* !retriable */,
682
+ rd_false /* !abortable */,
683
+ rd_true /* fatal */
684
+ },
685
+ {
686
+ /* #8 */
687
+ 1,
688
+ {RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED},
689
+ RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED,
690
+ rd_false /* !retriable */,
691
+ rd_false /* !abortable */,
692
+ rd_true /* fatal */
693
+ },
694
+ {
695
+ /* #9 */
696
+ 1,
697
+ {RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED},
698
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
699
+ rd_false /* !retriable */,
700
+ rd_true /* abortable */,
701
+ rd_false /* !fatal */
702
+ },
703
+ {
704
+ /* #10 */
705
+ /* Any other error should raise a fatal error */
706
+ 1,
707
+ {RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE},
708
+ RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE,
709
+ rd_false /* !retriable */,
710
+ rd_true /* abortable */,
711
+ rd_false /* !fatal */,
712
+ },
713
+ {
714
+ /* #11 */
715
+ 1,
716
+ {RD_KAFKA_RESP_ERR_PRODUCER_FENCED},
717
+ /* This error is normalized */
718
+ RD_KAFKA_RESP_ERR__FENCED,
719
+ rd_false /* !retriable */,
720
+ rd_false /* !abortable */,
721
+ rd_true /* fatal */
722
+ },
723
+ {0},
724
+ };
725
+ int i;
726
+
727
+ SUB_TEST_QUICK();
728
+
729
+ for (i = 0; scenario[i].error_cnt > 0; i++) {
730
+ int j;
731
+ /* For each scenario, test:
732
+ * commit_transaction()
733
+ * flush() + commit_transaction()
734
+ * abort_transaction()
735
+ * flush() + abort_transaction()
736
+ */
737
+ for (j = 0; j < (2 + 2); j++) {
738
+ rd_bool_t commit = j < 2;
739
+ rd_bool_t with_flush = j & 1;
740
+ rd_bool_t exp_successful_abort =
741
+ !commit && scenario[i].exp_successful_abort;
742
+ const char *commit_str =
743
+ commit ? (with_flush ? "commit&flush" : "commit")
744
+ : (with_flush ? "abort&flush" : "abort");
745
+ rd_kafka_topic_partition_list_t *offsets;
746
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
747
+ rd_kafka_error_t *error;
748
+ test_timing_t t_call;
749
+
750
+ TEST_SAY("Testing scenario #%d %s with %" PRIusz
751
+ " injected erorrs, expecting %s\n",
752
+ i, commit_str, scenario[i].error_cnt,
753
+ exp_successful_abort
754
+ ? "successful abort"
755
+ : rd_kafka_err2name(scenario[i].exp_err));
756
+
757
+ if (!rk) {
758
+ const char *txnid = "myTxnId";
759
+ rk = create_txn_producer(&mcluster, txnid, 3,
760
+ NULL);
761
+ TEST_CALL_ERROR__(
762
+ rd_kafka_init_transactions(rk, 5000));
763
+ }
764
+
765
+ /*
766
+ * Start transaction
767
+ */
768
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
769
+
770
+ /* Transaction aborts will cause DR errors:
771
+ * ignore them. */
772
+ test_curr->ignore_dr_err = !commit;
773
+
774
+ /*
775
+ * Produce a message.
776
+ */
777
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
778
+ RD_KAFKA_V_VALUE("hi", 2),
779
+ RD_KAFKA_V_END);
780
+ TEST_ASSERT(!err, "produce failed: %s",
781
+ rd_kafka_err2str(err));
782
+
783
+ if (with_flush)
784
+ test_flush(rk, -1);
785
+
786
+ /*
787
+ * Send some arbitrary offsets.
788
+ */
789
+ offsets = rd_kafka_topic_partition_list_new(4);
790
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4",
791
+ 3)
792
+ ->offset = 12;
793
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64",
794
+ 60)
795
+ ->offset = 99999;
796
+
797
+ cgmetadata =
798
+ rd_kafka_consumer_group_metadata_new("mygroupid");
799
+
800
+ TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
801
+ rk, offsets, cgmetadata, -1));
802
+
803
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
804
+ rd_kafka_topic_partition_list_destroy(offsets);
805
+
806
+ /*
807
+ * Commit transaction, first with som failures,
808
+ * then succeed.
809
+ */
810
+ rd_kafka_mock_push_request_errors_array(
811
+ mcluster, RD_KAFKAP_EndTxn, scenario[i].error_cnt,
812
+ scenario[i].errors);
813
+
814
+ TIMING_START(&t_call, "%s", commit_str);
815
+ if (commit)
816
+ error = rd_kafka_commit_transaction(
817
+ rk, tmout_multip(5000));
818
+ else
819
+ error = rd_kafka_abort_transaction(
820
+ rk, tmout_multip(5000));
821
+ TIMING_STOP(&t_call);
822
+
823
+ if (error)
824
+ TEST_SAY(
825
+ "Scenario #%d %s failed: %s: %s "
826
+ "(retriable=%s, req_abort=%s, "
827
+ "fatal=%s)\n",
828
+ i, commit_str, rd_kafka_error_name(error),
829
+ rd_kafka_error_string(error),
830
+ RD_STR_ToF(
831
+ rd_kafka_error_is_retriable(error)),
832
+ RD_STR_ToF(
833
+ rd_kafka_error_txn_requires_abort(
834
+ error)),
835
+ RD_STR_ToF(rd_kafka_error_is_fatal(error)));
836
+ else
837
+ TEST_SAY("Scenario #%d %s succeeded\n", i,
838
+ commit_str);
839
+
840
+ if (!scenario[i].exp_err || exp_successful_abort) {
841
+ TEST_ASSERT(!error,
842
+ "Expected #%d %s to succeed, "
843
+ "got %s",
844
+ i, commit_str,
845
+ rd_kafka_error_string(error));
846
+ continue;
847
+ }
848
+
849
+
850
+ TEST_ASSERT(error != NULL, "Expected #%d %s to fail", i,
851
+ commit_str);
852
+ TEST_ASSERT(scenario[i].exp_err ==
853
+ rd_kafka_error_code(error),
854
+ "Scenario #%d: expected %s, not %s", i,
855
+ rd_kafka_err2name(scenario[i].exp_err),
856
+ rd_kafka_error_name(error));
857
+ TEST_ASSERT(
858
+ scenario[i].exp_retriable ==
859
+ (rd_bool_t)rd_kafka_error_is_retriable(error),
860
+ "Scenario #%d: retriable mismatch", i);
861
+ TEST_ASSERT(
862
+ scenario[i].exp_abortable ==
863
+ (rd_bool_t)rd_kafka_error_txn_requires_abort(
864
+ error),
865
+ "Scenario #%d: abortable mismatch", i);
866
+ TEST_ASSERT(
867
+ scenario[i].exp_fatal ==
868
+ (rd_bool_t)rd_kafka_error_is_fatal(error),
869
+ "Scenario #%d: fatal mismatch", i);
870
+
871
+ /* Handle errors according to the error flags */
872
+ if (rd_kafka_error_is_fatal(error)) {
873
+ TEST_SAY("Fatal error, destroying producer\n");
874
+ rd_kafka_error_destroy(error);
875
+ rd_kafka_destroy(rk);
876
+ rk = NULL; /* Will be re-created on the next
877
+ * loop iteration. */
878
+
879
+ } else if (rd_kafka_error_txn_requires_abort(error)) {
880
+ rd_kafka_error_destroy(error);
881
+ TEST_SAY(
882
+ "Abortable error, "
883
+ "aborting transaction\n");
884
+ TEST_CALL_ERROR__(
885
+ rd_kafka_abort_transaction(rk, -1));
886
+
887
+ } else if (rd_kafka_error_is_retriable(error)) {
888
+ rd_kafka_error_destroy(error);
889
+ TEST_SAY("Retriable error, retrying %s once\n",
890
+ commit_str);
891
+ if (commit)
892
+ TEST_CALL_ERROR__(
893
+ rd_kafka_commit_transaction(rk,
894
+ 5000));
895
+ else
896
+ TEST_CALL_ERROR__(
897
+ rd_kafka_abort_transaction(rk,
898
+ 5000));
899
+ } else {
900
+ TEST_FAIL(
901
+ "Scenario #%d %s: "
902
+ "Permanent error without enough "
903
+ "hints to proceed: %s\n",
904
+ i, commit_str,
905
+ rd_kafka_error_string(error));
906
+ }
907
+ }
908
+ }
909
+
910
+ /* All done */
911
+ if (rk)
912
+ rd_kafka_destroy(rk);
913
+
914
+ SUB_TEST_PASS();
915
+ }
916
+
917
+
918
+ /**
919
+ * @brief Test that the commit/abort works properly with infinite timeout.
920
+ */
921
+ static void do_test_txn_endtxn_infinite(void) {
922
+ rd_kafka_t *rk;
923
+ rd_kafka_mock_cluster_t *mcluster = NULL;
924
+ const char *txnid = "myTxnId";
925
+ int i;
926
+
927
+ SUB_TEST_QUICK();
928
+
929
+ rk = create_txn_producer(&mcluster, txnid, 3, NULL);
930
+
931
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
932
+
933
+ for (i = 0; i < 2; i++) {
934
+ rd_bool_t commit = i == 0;
935
+ const char *commit_str = commit ? "commit" : "abort";
936
+ rd_kafka_error_t *error;
937
+ test_timing_t t_call;
938
+
939
+ /* Messages will fail on as the transaction fails,
940
+ * ignore the DR error */
941
+ test_curr->ignore_dr_err = rd_true;
942
+
943
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
944
+
945
+ TEST_CALL_ERR__(rd_kafka_producev(
946
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
947
+ RD_KAFKA_V_END));
948
+
949
+ /*
950
+ * Commit/abort transaction, first with som retriable failures,
951
+ * then success.
952
+ */
953
+ rd_kafka_mock_push_request_errors(
954
+ mcluster, RD_KAFKAP_EndTxn, 10,
955
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
956
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
957
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
958
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
959
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
960
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
961
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
962
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
963
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
964
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
965
+
966
+ rd_sleep(1);
967
+
968
+ TIMING_START(&t_call, "%s_transaction()", commit_str);
969
+ if (commit)
970
+ error = rd_kafka_commit_transaction(rk, -1);
971
+ else
972
+ error = rd_kafka_abort_transaction(rk, -1);
973
+ TIMING_STOP(&t_call);
974
+
975
+ TEST_SAY("%s returned %s\n", commit_str,
976
+ error ? rd_kafka_error_string(error) : "success");
977
+
978
+ TEST_ASSERT(!error, "Expected %s to succeed, got %s",
979
+ commit_str, rd_kafka_error_string(error));
980
+ }
981
+
982
+ /* All done */
983
+
984
+ rd_kafka_destroy(rk);
985
+
986
+ SUB_TEST_PASS();
987
+ }
988
+
989
+
990
+
991
+ /**
992
+ * @brief Test that the commit/abort user timeout is honoured.
993
+ */
994
+ static void do_test_txn_endtxn_timeout(void) {
995
+ rd_kafka_t *rk;
996
+ rd_kafka_mock_cluster_t *mcluster = NULL;
997
+ const char *txnid = "myTxnId";
998
+ int i;
999
+
1000
+ SUB_TEST_QUICK();
1001
+
1002
+ rk = create_txn_producer(&mcluster, txnid, 3, NULL);
1003
+
1004
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1005
+
1006
+ for (i = 0; i < 2; i++) {
1007
+ rd_bool_t commit = i == 0;
1008
+ const char *commit_str = commit ? "commit" : "abort";
1009
+ rd_kafka_error_t *error;
1010
+ test_timing_t t_call;
1011
+
1012
+ /* Messages will fail as the transaction fails,
1013
+ * ignore the DR error */
1014
+ test_curr->ignore_dr_err = rd_true;
1015
+
1016
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1017
+
1018
+ TEST_CALL_ERR__(rd_kafka_producev(
1019
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
1020
+ RD_KAFKA_V_END));
1021
+
1022
+ /*
1023
+ * Commit/abort transaction, first with some retriable failures
1024
+ * whos retries exceed the user timeout.
1025
+ */
1026
+ rd_kafka_mock_push_request_errors(
1027
+ mcluster, RD_KAFKAP_EndTxn, 10,
1028
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
1029
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
1030
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
1031
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
1032
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
1033
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
1034
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
1035
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
1036
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
1037
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
1038
+
1039
+ rd_sleep(1);
1040
+
1041
+ TIMING_START(&t_call, "%s_transaction()", commit_str);
1042
+ if (commit)
1043
+ error = rd_kafka_commit_transaction(rk, 100);
1044
+ else
1045
+ error = rd_kafka_abort_transaction(rk, 100);
1046
+ TIMING_STOP(&t_call);
1047
+
1048
+ TEST_SAY_ERROR(error, "%s returned: ", commit_str);
1049
+ TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str);
1050
+ TEST_ASSERT(
1051
+ rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
1052
+ "Expected %s to fail with timeout, not %s: %s", commit_str,
1053
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
1054
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
1055
+ "%s failure should raise a retriable error",
1056
+ commit_str);
1057
+ rd_kafka_error_destroy(error);
1058
+
1059
+ /* Now call it again with an infinite timeout, should work. */
1060
+ TIMING_START(&t_call, "%s_transaction() nr 2", commit_str);
1061
+ if (commit)
1062
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
1063
+ else
1064
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
1065
+ TIMING_STOP(&t_call);
1066
+ }
1067
+
1068
+ /* All done */
1069
+
1070
+ rd_kafka_destroy(rk);
1071
+
1072
+ SUB_TEST_PASS();
1073
+ }
1074
+
1075
+
1076
+
1077
+ /**
1078
+ * @brief Test commit/abort inflight timeout behaviour, which should result
1079
+ * in a retriable error.
1080
+ */
1081
+ static void do_test_txn_endtxn_timeout_inflight(void) {
1082
+ rd_kafka_t *rk;
1083
+ rd_kafka_mock_cluster_t *mcluster = NULL;
1084
+ const char *txnid = "myTxnId";
1085
+ int32_t coord_id = 1;
1086
+ int i;
1087
+
1088
+ SUB_TEST();
1089
+
1090
+ allowed_error = RD_KAFKA_RESP_ERR__TIMED_OUT;
1091
+ test_curr->is_fatal_cb = error_is_fatal_cb;
1092
+
1093
+ rk = create_txn_producer(&mcluster, txnid, 1, "transaction.timeout.ms",
1094
+ "5000", NULL);
1095
+
1096
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
1097
+
1098
+ for (i = 0; i < 2; i++) {
1099
+ rd_bool_t commit = i == 0;
1100
+ const char *commit_str = commit ? "commit" : "abort";
1101
+ rd_kafka_error_t *error;
1102
+ test_timing_t t_call;
1103
+
1104
+ /* Messages will fail as the transaction fails,
1105
+ * ignore the DR error */
1106
+ test_curr->ignore_dr_err = rd_true;
1107
+
1108
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1109
+
1110
+ TEST_CALL_ERR__(rd_kafka_producev(
1111
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
1112
+ RD_KAFKA_V_END));
1113
+
1114
+ /* Let EndTxn & EndTxn retry timeout */
1115
+ rd_kafka_mock_broker_push_request_error_rtts(
1116
+ mcluster, coord_id, RD_KAFKAP_EndTxn, 2,
1117
+ RD_KAFKA_RESP_ERR_NO_ERROR, 10000,
1118
+ RD_KAFKA_RESP_ERR_NO_ERROR, 10000);
1119
+
1120
+ rd_sleep(1);
1121
+
1122
+ TIMING_START(&t_call, "%s_transaction()", commit_str);
1123
+ if (commit)
1124
+ error = rd_kafka_commit_transaction(rk, 4000);
1125
+ else
1126
+ error = rd_kafka_abort_transaction(rk, 4000);
1127
+ TIMING_STOP(&t_call);
1128
+
1129
+ TEST_SAY_ERROR(error, "%s returned: ", commit_str);
1130
+ TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str);
1131
+ TEST_ASSERT(
1132
+ rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
1133
+ "Expected %s to fail with timeout, not %s: %s", commit_str,
1134
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
1135
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
1136
+ "%s failure should raise a retriable error",
1137
+ commit_str);
1138
+ rd_kafka_error_destroy(error);
1139
+
1140
+ /* Now call it again with an infinite timeout, should work. */
1141
+ TIMING_START(&t_call, "%s_transaction() nr 2", commit_str);
1142
+ if (commit)
1143
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
1144
+ else
1145
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
1146
+ TIMING_STOP(&t_call);
1147
+ }
1148
+
1149
+ /* All done */
1150
+
1151
+ rd_kafka_destroy(rk);
1152
+
1153
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
1154
+ test_curr->is_fatal_cb = NULL;
1155
+
1156
+ SUB_TEST_PASS();
1157
+ }
1158
+
1159
+
1160
+
1161
+ /**
1162
+ * @brief Test that EndTxn is properly sent for aborted transactions
1163
+ * even if AddOffsetsToTxnRequest was retried.
1164
+ * This is a check for a txn_req_cnt bug.
1165
+ */
1166
+ static void do_test_txn_req_cnt(void) {
1167
+ rd_kafka_t *rk;
1168
+ rd_kafka_mock_cluster_t *mcluster;
1169
+ rd_kafka_topic_partition_list_t *offsets;
1170
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
1171
+ const char *txnid = "myTxnId";
1172
+
1173
+ SUB_TEST_QUICK();
1174
+
1175
+ rk = create_txn_producer(&mcluster, txnid, 3, NULL);
1176
+
1177
+ /* Messages will fail on abort(), ignore the DR error */
1178
+ test_curr->ignore_dr_err = rd_true;
1179
+
1180
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1181
+
1182
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1183
+
1184
+ /*
1185
+ * Send some arbitrary offsets, first with some failures, then
1186
+ * succeed.
1187
+ */
1188
+ offsets = rd_kafka_topic_partition_list_new(2);
1189
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
1190
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 40)->offset =
1191
+ 999999111;
1192
+
1193
+ rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_AddOffsetsToTxn,
1194
+ 2,
1195
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
1196
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
1197
+
1198
+ rd_kafka_mock_push_request_errors(
1199
+ mcluster, RD_KAFKAP_TxnOffsetCommit, 2,
1200
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
1201
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
1202
+
1203
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
1204
+
1205
+ TEST_CALL_ERROR__(
1206
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
1207
+
1208
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
1209
+ rd_kafka_topic_partition_list_destroy(offsets);
1210
+
1211
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 5000));
1212
+
1213
+ /* All done */
1214
+
1215
+ rd_kafka_destroy(rk);
1216
+
1217
+ SUB_TEST_PASS();
1218
+ }
1219
+
1220
+
1221
+ /**
1222
+ * @brief Test abortable errors using mock broker error injections
1223
+ * and code coverage checks.
1224
+ */
1225
+ static void do_test_txn_requires_abort_errors(void) {
1226
+ rd_kafka_t *rk;
1227
+ rd_kafka_mock_cluster_t *mcluster;
1228
+ rd_kafka_error_t *error;
1229
+ rd_kafka_resp_err_t err;
1230
+ rd_kafka_topic_partition_list_t *offsets;
1231
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
1232
+ int r;
1233
+
1234
+ SUB_TEST_QUICK();
1235
+
1236
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
1237
+
1238
+ test_curr->ignore_dr_err = rd_true;
1239
+
1240
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1241
+
1242
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1243
+
1244
+ /*
1245
+ * 1. Fail on produce
1246
+ */
1247
+ TEST_SAY("1. Fail on produce\n");
1248
+
1249
+ rd_kafka_mock_push_request_errors(
1250
+ mcluster, RD_KAFKAP_Produce, 1,
1251
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
1252
+
1253
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
1254
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
1255
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
1256
+
1257
+ /* Wait for messages to fail */
1258
+ test_flush(rk, 5000);
1259
+
1260
+ /* Any other transactional API should now raise an error */
1261
+ offsets = rd_kafka_topic_partition_list_new(1);
1262
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
1263
+
1264
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
1265
+
1266
+ error =
1267
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
1268
+
1269
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
1270
+ rd_kafka_topic_partition_list_destroy(offsets);
1271
+ TEST_ASSERT(error, "expected error");
1272
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
1273
+ "expected abortable error, not %s",
1274
+ rd_kafka_error_string(error));
1275
+ TEST_SAY("Error %s: %s\n", rd_kafka_error_name(error),
1276
+ rd_kafka_error_string(error));
1277
+ rd_kafka_error_destroy(error);
1278
+
1279
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
1280
+
1281
+ /*
1282
+ * 2. Restart transaction and fail on AddPartitionsToTxn
1283
+ */
1284
+ TEST_SAY("2. Fail on AddPartitionsToTxn\n");
1285
+
1286
+ /* First refresh proper Metadata to clear the topic's auth error,
1287
+ * otherwise the produce() below will fail immediately. */
1288
+ r = test_get_partition_count(rk, "mytopic", 5000);
1289
+ TEST_ASSERT(r > 0, "Expected topic %s to exist", "mytopic");
1290
+
1291
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1292
+
1293
+ rd_kafka_mock_push_request_errors(
1294
+ mcluster, RD_KAFKAP_AddPartitionsToTxn, 1,
1295
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
1296
+
1297
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
1298
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
1299
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
1300
+
1301
+ error = rd_kafka_commit_transaction(rk, 5000);
1302
+ TEST_ASSERT(error, "commit_transaction should have failed");
1303
+ TEST_SAY("commit_transaction() error %s: %s\n",
1304
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
1305
+ rd_kafka_error_destroy(error);
1306
+
1307
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
1308
+
1309
+ /*
1310
+ * 3. Restart transaction and fail on AddOffsetsToTxn
1311
+ */
1312
+ TEST_SAY("3. Fail on AddOffsetsToTxn\n");
1313
+
1314
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1315
+
1316
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
1317
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
1318
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
1319
+
1320
+ rd_kafka_mock_push_request_errors(
1321
+ mcluster, RD_KAFKAP_AddOffsetsToTxn, 1,
1322
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED);
1323
+
1324
+ offsets = rd_kafka_topic_partition_list_new(1);
1325
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
1326
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
1327
+
1328
+ error =
1329
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
1330
+ TEST_ASSERT(error, "Expected send_offsets..() to fail");
1331
+ TEST_ASSERT(rd_kafka_error_code(error) ==
1332
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
1333
+ "expected send_offsets_to_transaction() to fail with "
1334
+ "group auth error: not %s",
1335
+ rd_kafka_error_name(error));
1336
+ rd_kafka_error_destroy(error);
1337
+
1338
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
1339
+ rd_kafka_topic_partition_list_destroy(offsets);
1340
+
1341
+
1342
+ error = rd_kafka_commit_transaction(rk, 5000);
1343
+ TEST_ASSERT(error, "commit_transaction should have failed");
1344
+ rd_kafka_error_destroy(error);
1345
+
1346
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
1347
+
1348
+ /* All done */
1349
+
1350
+ rd_kafka_destroy(rk);
1351
+
1352
+ SUB_TEST_PASS();
1353
+ }
1354
+
1355
+
1356
+ /**
1357
+ * @brief Test error handling and recover for when broker goes down during
1358
+ * an ongoing transaction.
1359
+ */
1360
+ static void do_test_txn_broker_down_in_txn(rd_bool_t down_coord) {
1361
+ rd_kafka_t *rk;
1362
+ rd_kafka_mock_cluster_t *mcluster;
1363
+ int32_t coord_id, leader_id, down_id;
1364
+ const char *down_what;
1365
+ rd_kafka_resp_err_t err;
1366
+ const char *topic = "test";
1367
+ const char *transactional_id = "txnid";
1368
+ int msgcnt = 1000;
1369
+ int remains = 0;
1370
+
1371
+ /* Assign coordinator and leader to two different brokers */
1372
+ coord_id = 1;
1373
+ leader_id = 2;
1374
+ if (down_coord) {
1375
+ down_id = coord_id;
1376
+ down_what = "coordinator";
1377
+ } else {
1378
+ down_id = leader_id;
1379
+ down_what = "leader";
1380
+ }
1381
+
1382
+ SUB_TEST_QUICK("Test %s down", down_what);
1383
+
1384
+ rk = create_txn_producer(&mcluster, transactional_id, 3, NULL);
1385
+
1386
+ /* Broker down is not a test-failing error */
1387
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
1388
+ test_curr->is_fatal_cb = error_is_fatal_cb;
1389
+
1390
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3);
1391
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
1392
+
1393
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
1394
+ coord_id);
1395
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, leader_id);
1396
+
1397
+ /* Start transactioning */
1398
+ TEST_SAY("Starting transaction\n");
1399
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1400
+
1401
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1402
+
1403
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
1404
+ msgcnt / 2, NULL, 0, &remains);
1405
+
1406
+ TEST_SAY("Bringing down %s %" PRId32 "\n", down_what, down_id);
1407
+ rd_kafka_mock_broker_set_down(mcluster, down_id);
1408
+
1409
+ rd_kafka_flush(rk, 3000);
1410
+
1411
+ /* Produce remaining messages */
1412
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA,
1413
+ msgcnt / 2, msgcnt / 2, NULL, 0, &remains);
1414
+
1415
+ rd_sleep(2);
1416
+
1417
+ TEST_SAY("Bringing up %s %" PRId32 "\n", down_what, down_id);
1418
+ rd_kafka_mock_broker_set_up(mcluster, down_id);
1419
+
1420
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
1421
+
1422
+ TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
1423
+
1424
+ rd_kafka_destroy(rk);
1425
+
1426
+ test_curr->is_fatal_cb = NULL;
1427
+
1428
+ SUB_TEST_PASS();
1429
+ }
1430
+
1431
+
1432
+
1433
+ /**
1434
+ * @brief Advance the coord_id to the next broker.
1435
+ */
1436
+ static void set_next_coord(rd_kafka_mock_cluster_t *mcluster,
1437
+ const char *transactional_id,
1438
+ int broker_cnt,
1439
+ int32_t *coord_idp) {
1440
+ int32_t new_coord_id;
1441
+
1442
+ new_coord_id = 1 + ((*coord_idp) % (broker_cnt));
1443
+ TEST_SAY("Changing transaction coordinator from %" PRId32 " to %" PRId32
1444
+ "\n",
1445
+ *coord_idp, new_coord_id);
1446
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
1447
+ new_coord_id);
1448
+
1449
+ *coord_idp = new_coord_id;
1450
+ }
1451
+
1452
+ /**
1453
+ * @brief Switch coordinator during a transaction.
1454
+ *
1455
+ */
1456
+ static void do_test_txn_switch_coordinator(void) {
1457
+ rd_kafka_t *rk;
1458
+ rd_kafka_mock_cluster_t *mcluster;
1459
+ int32_t coord_id;
1460
+ const char *topic = "test";
1461
+ const char *transactional_id = "txnid";
1462
+ const int broker_cnt = 5;
1463
+ const int iterations = 20;
1464
+ int i;
1465
+
1466
+ test_timeout_set(iterations * 10);
1467
+
1468
+ SUB_TEST("Test switching coordinators");
1469
+
1470
+ rk = create_txn_producer(&mcluster, transactional_id, broker_cnt, NULL);
1471
+
1472
+ coord_id = 1;
1473
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
1474
+ coord_id);
1475
+
1476
+ /* Start transactioning */
1477
+ TEST_SAY("Starting transaction\n");
1478
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1479
+
1480
+ for (i = 0; i < iterations; i++) {
1481
+ const int msgcnt = 100;
1482
+ int remains = 0;
1483
+
1484
+ set_next_coord(mcluster, transactional_id, broker_cnt,
1485
+ &coord_id);
1486
+
1487
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1488
+
1489
+ test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
1490
+ msgcnt / 2, NULL, 0);
1491
+
1492
+ if (!(i % 3))
1493
+ set_next_coord(mcluster, transactional_id, broker_cnt,
1494
+ &coord_id);
1495
+
1496
+ /* Produce remaining messages */
1497
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA,
1498
+ msgcnt / 2, msgcnt / 2, NULL, 0,
1499
+ &remains);
1500
+
1501
+ if ((i & 1) || !(i % 8))
1502
+ set_next_coord(mcluster, transactional_id, broker_cnt,
1503
+ &coord_id);
1504
+
1505
+
1506
+ if (!(i % 5)) {
1507
+ test_curr->ignore_dr_err = rd_false;
1508
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
1509
+
1510
+ } else {
1511
+ test_curr->ignore_dr_err = rd_true;
1512
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
1513
+ }
1514
+ }
1515
+
1516
+
1517
+ rd_kafka_destroy(rk);
1518
+
1519
+ SUB_TEST_PASS();
1520
+ }
1521
+
1522
+
1523
+ /**
1524
+ * @brief Switch coordinator during a transaction when AddOffsetsToTxn
1525
+ * are sent. #3571.
1526
+ */
1527
+ static void do_test_txn_switch_coordinator_refresh(void) {
1528
+ rd_kafka_t *rk;
1529
+ rd_kafka_mock_cluster_t *mcluster;
1530
+ const char *topic = "test";
1531
+ const char *transactional_id = "txnid";
1532
+ rd_kafka_topic_partition_list_t *offsets;
1533
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
1534
+
1535
+ SUB_TEST("Test switching coordinators (refresh)");
1536
+
1537
+ rk = create_txn_producer(&mcluster, transactional_id, 3, NULL);
1538
+
1539
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
1540
+ 1);
1541
+
1542
+ /* Start transactioning */
1543
+ TEST_SAY("Starting transaction\n");
1544
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1545
+
1546
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1547
+
1548
+ /* Switch the coordinator so that AddOffsetsToTxnRequest
1549
+ * will respond with NOT_COORDINATOR. */
1550
+ TEST_SAY("Switching to coordinator 2\n");
1551
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
1552
+ 2);
1553
+
1554
+ /*
1555
+ * Send some arbitrary offsets.
1556
+ */
1557
+ offsets = rd_kafka_topic_partition_list_new(4);
1558
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
1559
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 29)->offset =
1560
+ 99999;
1561
+
1562
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
1563
+
1564
+ TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
1565
+ rk, offsets, cgmetadata, 20 * 1000));
1566
+
1567
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
1568
+ rd_kafka_topic_partition_list_destroy(offsets);
1569
+
1570
+
1571
+ /* Produce some messages */
1572
+ test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10, NULL, 0);
1573
+
1574
+ /* And commit the transaction */
1575
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
1576
+
1577
+ rd_kafka_destroy(rk);
1578
+
1579
+ SUB_TEST_PASS();
1580
+ }
1581
+
1582
+
1583
+ /**
1584
+ * @brief Test fatal error handling when transactions are not supported
1585
+ * by the broker.
1586
+ */
1587
+ static void do_test_txns_not_supported(void) {
1588
+ rd_kafka_t *rk;
1589
+ rd_kafka_conf_t *conf;
1590
+ rd_kafka_mock_cluster_t *mcluster;
1591
+ rd_kafka_error_t *error;
1592
+ rd_kafka_resp_err_t err;
1593
+
1594
+ SUB_TEST_QUICK();
1595
+
1596
+ test_conf_init(&conf, NULL, 10);
1597
+
1598
+ test_conf_set(conf, "transactional.id", "myxnid");
1599
+ test_conf_set(conf, "bootstrap.servers", ",");
1600
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
1601
+
1602
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
1603
+
1604
+ /* Create mock cluster */
1605
+ mcluster = rd_kafka_mock_cluster_new(rk, 3);
1606
+
1607
+ /* Disable InitProducerId */
1608
+ rd_kafka_mock_set_apiversion(mcluster, 22 /*InitProducerId*/, -1, -1);
1609
+
1610
+
1611
+ rd_kafka_brokers_add(rk, rd_kafka_mock_cluster_bootstraps(mcluster));
1612
+
1613
+
1614
+
1615
+ error = rd_kafka_init_transactions(rk, 5 * 1000);
1616
+ TEST_SAY("init_transactions() returned %s: %s\n",
1617
+ error ? rd_kafka_error_name(error) : "success",
1618
+ error ? rd_kafka_error_string(error) : "success");
1619
+
1620
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
1621
+ TEST_ASSERT(rd_kafka_error_code(error) ==
1622
+ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
1623
+ "Expected init_transactions() to fail with %s, not %s: %s",
1624
+ rd_kafka_err2name(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE),
1625
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
1626
+ rd_kafka_error_destroy(error);
1627
+
1628
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"),
1629
+ RD_KAFKA_V_KEY("test", 4), RD_KAFKA_V_END);
1630
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__FATAL,
1631
+ "Expected producev() to fail with %s, not %s",
1632
+ rd_kafka_err2name(RD_KAFKA_RESP_ERR__FATAL),
1633
+ rd_kafka_err2name(err));
1634
+
1635
+ rd_kafka_mock_cluster_destroy(mcluster);
1636
+
1637
+ rd_kafka_destroy(rk);
1638
+
1639
+ SUB_TEST_PASS();
1640
+ }
1641
+
1642
+
1643
+ /**
1644
+ * @brief CONCURRENT_TRANSACTION on AddOffsets.. should be retried.
1645
+ */
1646
+ static void do_test_txns_send_offsets_concurrent_is_retried(void) {
1647
+ rd_kafka_t *rk;
1648
+ rd_kafka_mock_cluster_t *mcluster;
1649
+ rd_kafka_resp_err_t err;
1650
+ rd_kafka_topic_partition_list_t *offsets;
1651
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
1652
+
1653
+ SUB_TEST_QUICK();
1654
+
1655
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
1656
+
1657
+ test_curr->ignore_dr_err = rd_true;
1658
+
1659
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1660
+
1661
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1662
+
1663
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
1664
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
1665
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
1666
+
1667
+ /* Wait for messages to be delivered */
1668
+ test_flush(rk, 5000);
1669
+
1670
+
1671
+ /*
1672
+ * Have AddOffsetsToTxn fail but eventually succeed due to
1673
+ * infinite retries.
1674
+ */
1675
+ rd_kafka_mock_push_request_errors(
1676
+ mcluster, RD_KAFKAP_AddOffsetsToTxn,
1677
+ 1 + 5, /* first request + some retries */
1678
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
1679
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
1680
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
1681
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
1682
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
1683
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
1684
+
1685
+ offsets = rd_kafka_topic_partition_list_new(1);
1686
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
1687
+
1688
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
1689
+
1690
+ TEST_CALL_ERROR__(
1691
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
1692
+
1693
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
1694
+ rd_kafka_topic_partition_list_destroy(offsets);
1695
+
1696
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
1697
+
1698
+ /* All done */
1699
+
1700
+ rd_kafka_destroy(rk);
1701
+
1702
+ SUB_TEST_PASS();
1703
+ }
1704
+
1705
+
1706
+ /**
1707
+ * @brief Verify that send_offsets_to_transaction() with no eligible offsets
1708
+ * is handled properly - the call should succeed immediately and be
1709
+ * repeatable.
1710
+ */
1711
+ static void do_test_txns_send_offsets_non_eligible(void) {
1712
+ rd_kafka_t *rk;
1713
+ rd_kafka_mock_cluster_t *mcluster;
1714
+ rd_kafka_resp_err_t err;
1715
+ rd_kafka_topic_partition_list_t *offsets;
1716
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
1717
+
1718
+ SUB_TEST_QUICK();
1719
+
1720
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
1721
+
1722
+ test_curr->ignore_dr_err = rd_true;
1723
+
1724
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1725
+
1726
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1727
+
1728
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
1729
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
1730
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
1731
+
1732
+ /* Wait for messages to be delivered */
1733
+ test_flush(rk, 5000);
1734
+
1735
+ /* Empty offsets list */
1736
+ offsets = rd_kafka_topic_partition_list_new(0);
1737
+
1738
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
1739
+
1740
+ TEST_CALL_ERROR__(
1741
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
1742
+
1743
+ /* Now call it again, should also succeed. */
1744
+ TEST_CALL_ERROR__(
1745
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
1746
+
1747
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
1748
+ rd_kafka_topic_partition_list_destroy(offsets);
1749
+
1750
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
1751
+
1752
+ /* All done */
1753
+
1754
+ rd_kafka_destroy(rk);
1755
+
1756
+ SUB_TEST_PASS();
1757
+ }
1758
+
1759
+
1760
+ /**
1761
+ * @brief Verify that request timeouts don't cause crash (#2913).
1762
+ */
1763
+ static void do_test_txns_no_timeout_crash(void) {
1764
+ rd_kafka_t *rk;
1765
+ rd_kafka_mock_cluster_t *mcluster;
1766
+ rd_kafka_error_t *error;
1767
+ rd_kafka_resp_err_t err;
1768
+ rd_kafka_topic_partition_list_t *offsets;
1769
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
1770
+
1771
+ SUB_TEST_QUICK();
1772
+
1773
+ rk =
1774
+ create_txn_producer(&mcluster, "txnid", 3, "socket.timeout.ms",
1775
+ "1000", "transaction.timeout.ms", "5000", NULL);
1776
+
1777
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1778
+
1779
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1780
+
1781
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
1782
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
1783
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
1784
+
1785
+ test_flush(rk, -1);
1786
+
1787
+ /* Delay all broker connections */
1788
+ if ((err = rd_kafka_mock_broker_set_rtt(mcluster, 1, 2000)) ||
1789
+ (err = rd_kafka_mock_broker_set_rtt(mcluster, 2, 2000)) ||
1790
+ (err = rd_kafka_mock_broker_set_rtt(mcluster, 3, 2000)))
1791
+ TEST_FAIL("Failed to set broker RTT: %s",
1792
+ rd_kafka_err2str(err));
1793
+
1794
+ /* send_offsets..() should now time out */
1795
+ offsets = rd_kafka_topic_partition_list_new(1);
1796
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
1797
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
1798
+
1799
+ error =
1800
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
1801
+ TEST_ASSERT(error, "Expected send_offsets..() to fail");
1802
+ TEST_SAY("send_offsets..() failed with %serror: %s\n",
1803
+ rd_kafka_error_is_retriable(error) ? "retriable " : "",
1804
+ rd_kafka_error_string(error));
1805
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
1806
+ "expected send_offsets_to_transaction() to fail with "
1807
+ "timeout, not %s",
1808
+ rd_kafka_error_name(error));
1809
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
1810
+ "expected send_offsets_to_transaction() to fail with "
1811
+ "a retriable error");
1812
+ rd_kafka_error_destroy(error);
1813
+
1814
+ /* Reset delay and try again */
1815
+ if ((err = rd_kafka_mock_broker_set_rtt(mcluster, 1, 0)) ||
1816
+ (err = rd_kafka_mock_broker_set_rtt(mcluster, 2, 0)) ||
1817
+ (err = rd_kafka_mock_broker_set_rtt(mcluster, 3, 0)))
1818
+ TEST_FAIL("Failed to reset broker RTT: %s",
1819
+ rd_kafka_err2str(err));
1820
+
1821
+ TEST_SAY("Retrying send_offsets..()\n");
1822
+ error =
1823
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
1824
+ TEST_ASSERT(!error, "Expected send_offsets..() to succeed, got: %s",
1825
+ rd_kafka_error_string(error));
1826
+
1827
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
1828
+ rd_kafka_topic_partition_list_destroy(offsets);
1829
+
1830
+ /* All done */
1831
+ rd_kafka_destroy(rk);
1832
+
1833
+ SUB_TEST_PASS();
1834
+ }
1835
+
1836
+
1837
+ /**
1838
+ * @brief Test auth failure handling.
1839
+ */
1840
+ static void do_test_txn_auth_failure(int16_t ApiKey,
1841
+ rd_kafka_resp_err_t ErrorCode) {
1842
+ rd_kafka_t *rk;
1843
+ rd_kafka_mock_cluster_t *mcluster;
1844
+ rd_kafka_error_t *error;
1845
+
1846
+ SUB_TEST_QUICK("ApiKey=%s ErrorCode=%s", rd_kafka_ApiKey2str(ApiKey),
1847
+ rd_kafka_err2name(ErrorCode));
1848
+
1849
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
1850
+
1851
+ rd_kafka_mock_push_request_errors(mcluster, ApiKey, 1, ErrorCode);
1852
+
1853
+ error = rd_kafka_init_transactions(rk, 5000);
1854
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
1855
+
1856
+ TEST_SAY("init_transactions() failed: %s: %s\n",
1857
+ rd_kafka_err2name(rd_kafka_error_code(error)),
1858
+ rd_kafka_error_string(error));
1859
+ TEST_ASSERT(rd_kafka_error_code(error) == ErrorCode,
1860
+ "Expected error %s, not %s", rd_kafka_err2name(ErrorCode),
1861
+ rd_kafka_err2name(rd_kafka_error_code(error)));
1862
+ TEST_ASSERT(rd_kafka_error_is_fatal(error),
1863
+ "Expected error to be fatal");
1864
+ TEST_ASSERT(!rd_kafka_error_is_retriable(error),
1865
+ "Expected error to not be retriable");
1866
+ rd_kafka_error_destroy(error);
1867
+
1868
+ /* All done */
1869
+
1870
+ rd_kafka_destroy(rk);
1871
+
1872
+ SUB_TEST_PASS();
1873
+ }
1874
+
1875
+
1876
+ /**
1877
+ * @brief Issue #3041: Commit fails due to message flush() taking too long,
1878
+ * eventually resulting in an unabortable error and failure to
1879
+ * re-init the transactional producer.
1880
+ */
1881
+ static void do_test_txn_flush_timeout(void) {
1882
+ rd_kafka_t *rk;
1883
+ rd_kafka_mock_cluster_t *mcluster;
1884
+ rd_kafka_topic_partition_list_t *offsets;
1885
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
1886
+ rd_kafka_error_t *error;
1887
+ const char *txnid = "myTxnId";
1888
+ const char *topic = "myTopic";
1889
+ const int32_t coord_id = 2;
1890
+ int msgcounter = 0;
1891
+ rd_bool_t is_retry = rd_false;
1892
+
1893
+ SUB_TEST_QUICK();
1894
+
1895
+ rk = create_txn_producer(&mcluster, txnid, 3, "message.timeout.ms",
1896
+ "10000", "transaction.timeout.ms", "10000",
1897
+ /* Speed up coordinator reconnect */
1898
+ "reconnect.backoff.max.ms", "1000", NULL);
1899
+
1900
+
1901
+ /* Broker down is not a test-failing error */
1902
+ test_curr->is_fatal_cb = error_is_fatal_cb;
1903
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
1904
+
1905
+ rd_kafka_mock_topic_create(mcluster, topic, 2, 3);
1906
+
1907
+ /* Set coordinator so we can disconnect it later */
1908
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, coord_id);
1909
+
1910
+ /*
1911
+ * Init transactions
1912
+ */
1913
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
1914
+
1915
+ retry:
1916
+ if (!is_retry) {
1917
+ /* First attempt should fail. */
1918
+
1919
+ test_curr->ignore_dr_err = rd_true;
1920
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
1921
+
1922
+ /* Assign invalid partition leaders for some partitions so
1923
+ * that messages will not be delivered. */
1924
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, -1);
1925
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 1, -1);
1926
+
1927
+ } else {
1928
+ /* The retry should succeed */
1929
+ test_curr->ignore_dr_err = rd_false;
1930
+ test_curr->exp_dr_err = is_retry
1931
+ ? RD_KAFKA_RESP_ERR_NO_ERROR
1932
+ : RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
1933
+
1934
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
1935
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1);
1936
+ }
1937
+
1938
+
1939
+ /*
1940
+ * Start a transaction
1941
+ */
1942
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
1943
+
1944
+ /*
1945
+ * Produce some messages to specific partitions and random.
1946
+ */
1947
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 100, NULL, 10,
1948
+ &msgcounter);
1949
+ test_produce_msgs2_nowait(rk, topic, 1, 0, 0, 100, NULL, 10,
1950
+ &msgcounter);
1951
+ test_produce_msgs2_nowait(rk, topic, RD_KAFKA_PARTITION_UA, 0, 0, 100,
1952
+ NULL, 10, &msgcounter);
1953
+
1954
+
1955
+ /*
1956
+ * Send some arbitrary offsets.
1957
+ */
1958
+ offsets = rd_kafka_topic_partition_list_new(4);
1959
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
1960
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 49)->offset =
1961
+ 999999111;
1962
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset =
1963
+ 999;
1964
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 34)->offset =
1965
+ 123456789;
1966
+
1967
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
1968
+
1969
+ TEST_CALL_ERROR__(
1970
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
1971
+
1972
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
1973
+ rd_kafka_topic_partition_list_destroy(offsets);
1974
+
1975
+ rd_sleep(2);
1976
+
1977
+ if (!is_retry) {
1978
+ /* Now disconnect the coordinator. */
1979
+ TEST_SAY("Disconnecting transaction coordinator %" PRId32 "\n",
1980
+ coord_id);
1981
+ rd_kafka_mock_broker_set_down(mcluster, coord_id);
1982
+ }
1983
+
1984
+ /*
1985
+ * Start committing.
1986
+ */
1987
+ error = rd_kafka_commit_transaction(rk, -1);
1988
+
1989
+ if (!is_retry) {
1990
+ TEST_ASSERT(error != NULL, "Expected commit to fail");
1991
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
1992
+ rd_kafka_error_string(error));
1993
+ rd_kafka_error_destroy(error);
1994
+
1995
+ } else {
1996
+ TEST_ASSERT(!error, "Expected commit to succeed, not: %s",
1997
+ rd_kafka_error_string(error));
1998
+ }
1999
+
2000
+ if (!is_retry) {
2001
+ /*
2002
+ * Bring the coordinator back up.
2003
+ */
2004
+ rd_kafka_mock_broker_set_up(mcluster, coord_id);
2005
+ rd_sleep(2);
2006
+
2007
+ /*
2008
+ * Abort, and try again, this time without error.
2009
+ */
2010
+ TEST_SAY("Aborting and retrying\n");
2011
+ is_retry = rd_true;
2012
+
2013
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 60000));
2014
+ goto retry;
2015
+ }
2016
+
2017
+ /* All done */
2018
+
2019
+ rd_kafka_destroy(rk);
2020
+
2021
+ SUB_TEST_PASS();
2022
+ }
2023
+
2024
+
2025
+ /**
2026
+ * @brief ESC-4424: rko is reused in response handler after destroy in coord_req
2027
+ * sender due to bad state.
2028
+ *
2029
+ * This is somewhat of a race condition so we need to perform a couple of
2030
+ * iterations before it hits, usually 2 or 3, so we try at least 15 times.
2031
+ */
2032
+ static void do_test_txn_coord_req_destroy(void) {
2033
+ rd_kafka_t *rk;
2034
+ rd_kafka_mock_cluster_t *mcluster;
2035
+ int i;
2036
+ int errcnt = 0;
2037
+
2038
+ SUB_TEST();
2039
+
2040
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
2041
+
2042
+ test_curr->ignore_dr_err = rd_true;
2043
+
2044
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
2045
+
2046
+ for (i = 0; i < 15; i++) {
2047
+ rd_kafka_error_t *error;
2048
+ rd_kafka_resp_err_t err;
2049
+ rd_kafka_topic_partition_list_t *offsets;
2050
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
2051
+
2052
+ test_timeout_set(10);
2053
+
2054
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2055
+
2056
+ /*
2057
+ * Inject errors to trigger retries
2058
+ */
2059
+ rd_kafka_mock_push_request_errors(
2060
+ mcluster, RD_KAFKAP_AddPartitionsToTxn,
2061
+ 2, /* first request + number of internal retries */
2062
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
2063
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
2064
+
2065
+ rd_kafka_mock_push_request_errors(
2066
+ mcluster, RD_KAFKAP_AddOffsetsToTxn,
2067
+ 3, /* first request + number of internal retries */
2068
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
2069
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
2070
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
2071
+
2072
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
2073
+ RD_KAFKA_V_VALUE("hi", 2),
2074
+ RD_KAFKA_V_END);
2075
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
2076
+
2077
+ rd_kafka_mock_push_request_errors(
2078
+ mcluster, RD_KAFKAP_Produce, 4,
2079
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
2080
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
2081
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
2082
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
2083
+ /* FIXME: When KIP-360 is supported, add this error:
2084
+ * RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER */
2085
+
2086
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
2087
+ RD_KAFKA_V_VALUE("hi", 2),
2088
+ RD_KAFKA_V_END);
2089
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
2090
+
2091
+
2092
+ /*
2093
+ * Send offsets to transaction
2094
+ */
2095
+
2096
+ offsets = rd_kafka_topic_partition_list_new(1);
2097
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)
2098
+ ->offset = 12;
2099
+
2100
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
2101
+
2102
+ error = rd_kafka_send_offsets_to_transaction(rk, offsets,
2103
+ cgmetadata, -1);
2104
+
2105
+ TEST_SAY("send_offsets_to_transaction() #%d: %s\n", i,
2106
+ rd_kafka_error_string(error));
2107
+
2108
+ /* As we can't control the exact timing and sequence
2109
+ * of requests this sometimes fails and sometimes succeeds,
2110
+ * but we run the test enough times to trigger at least
2111
+ * one failure. */
2112
+ if (error) {
2113
+ TEST_SAY(
2114
+ "send_offsets_to_transaction() #%d "
2115
+ "failed (expectedly): %s\n",
2116
+ i, rd_kafka_error_string(error));
2117
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
2118
+ "Expected abortable error for #%d", i);
2119
+ rd_kafka_error_destroy(error);
2120
+ errcnt++;
2121
+ }
2122
+
2123
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
2124
+ rd_kafka_topic_partition_list_destroy(offsets);
2125
+
2126
+ /* Allow time for internal retries */
2127
+ rd_sleep(2);
2128
+
2129
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 5000));
2130
+ }
2131
+
2132
+ TEST_SAY("Got %d errors\n", errcnt);
2133
+ TEST_ASSERT(errcnt > 0,
2134
+ "Expected at least one send_offets_to_transaction() "
2135
+ "failure");
2136
+
2137
+ /* All done */
2138
+
2139
+ rd_kafka_destroy(rk);
2140
+ }
2141
+
2142
+
2143
+ static rd_atomic32_t multi_find_req_cnt;
2144
+
2145
+ static rd_kafka_resp_err_t
2146
+ multi_find_on_response_received_cb(rd_kafka_t *rk,
2147
+ int sockfd,
2148
+ const char *brokername,
2149
+ int32_t brokerid,
2150
+ int16_t ApiKey,
2151
+ int16_t ApiVersion,
2152
+ int32_t CorrId,
2153
+ size_t size,
2154
+ int64_t rtt,
2155
+ rd_kafka_resp_err_t err,
2156
+ void *ic_opaque) {
2157
+ rd_kafka_mock_cluster_t *mcluster = rd_kafka_handle_mock_cluster(rk);
2158
+ rd_bool_t done = rd_atomic32_get(&multi_find_req_cnt) > 10000;
2159
+
2160
+ if (ApiKey != RD_KAFKAP_AddOffsetsToTxn || done)
2161
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
2162
+
2163
+ TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32
2164
+ ", ApiKey %hd, CorrId %d, rtt %.2fms, %s: %s\n",
2165
+ rd_kafka_name(rk), brokername, brokerid, ApiKey, CorrId,
2166
+ rtt != -1 ? (float)rtt / 1000.0 : 0.0,
2167
+ done ? "already done" : "not done yet",
2168
+ rd_kafka_err2name(err));
2169
+
2170
+
2171
+ if (rd_atomic32_add(&multi_find_req_cnt, 1) == 1) {
2172
+ /* Trigger a broker down/up event, which in turns
2173
+ * triggers the coord_req_fsm(). */
2174
+ rd_kafka_mock_broker_set_down(mcluster, 2);
2175
+ rd_kafka_mock_broker_set_up(mcluster, 2);
2176
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
2177
+ }
2178
+
2179
+ /* Trigger a broker down/up event, which in turns
2180
+ * triggers the coord_req_fsm(). */
2181
+ rd_kafka_mock_broker_set_down(mcluster, 3);
2182
+ rd_kafka_mock_broker_set_up(mcluster, 3);
2183
+
2184
+ /* Clear the downed broker's latency so that it reconnects
2185
+ * quickly, otherwise the ApiVersionRequest will be delayed and
2186
+ * this will in turn delay the -> UP transition that we need to
2187
+ * trigger the coord_reqs. */
2188
+ rd_kafka_mock_broker_set_rtt(mcluster, 3, 0);
2189
+
2190
+ /* Only do this down/up once */
2191
+ rd_atomic32_add(&multi_find_req_cnt, 10000);
2192
+
2193
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
2194
+ }
2195
+
2196
+
2197
+ /**
2198
+ * @brief ESC-4444: multiple FindCoordinatorRequests are sent referencing
2199
+ * the same coord_req_t, but the first one received will destroy
2200
+ * the coord_req_t object and make the subsequent FindCoordingResponses
2201
+ * reference a freed object.
2202
+ *
2203
+ * What we want to achieve is this sequence:
2204
+ * 1. AddOffsetsToTxnRequest + Response which..
2205
+ * 2. Triggers TxnOffsetCommitRequest, but the coordinator is not known, so..
2206
+ * 3. Triggers a FindCoordinatorRequest
2207
+ * 4. FindCoordinatorResponse from 3 is received ..
2208
+ * 5. A TxnOffsetCommitRequest is sent from coord_req_fsm().
2209
+ * 6. Another broker changing state to Up triggers coord reqs again, which..
2210
+ * 7. Triggers a second TxnOffsetCommitRequest from coord_req_fsm().
2211
+ * 7. FindCoordinatorResponse from 5 is received, references the destroyed rko
2212
+ * and crashes.
2213
+ */
2214
+ static void do_test_txn_coord_req_multi_find(void) {
2215
+ rd_kafka_t *rk;
2216
+ rd_kafka_mock_cluster_t *mcluster;
2217
+ rd_kafka_error_t *error;
2218
+ rd_kafka_resp_err_t err;
2219
+ rd_kafka_topic_partition_list_t *offsets;
2220
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
2221
+ const char *txnid = "txnid", *groupid = "mygroupid", *topic = "mytopic";
2222
+ int i;
2223
+
2224
+ SUB_TEST();
2225
+
2226
+ rd_atomic32_init(&multi_find_req_cnt, 0);
2227
+
2228
+ on_response_received_cb = multi_find_on_response_received_cb;
2229
+ rk = create_txn_producer(&mcluster, txnid, 3,
2230
+ /* Need connections to all brokers so we
2231
+ * can trigger coord_req_fsm events
2232
+ * by toggling connections. */
2233
+ "enable.sparse.connections", "false",
2234
+ /* Set up on_response_received interceptor */
2235
+ "on_response_received", "", NULL);
2236
+
2237
+ /* Let broker 1 be both txn and group coordinator
2238
+ * so that the group coordinator connection is up when it is time
2239
+ * send the TxnOffsetCommitRequest. */
2240
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 1);
2241
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
2242
+
2243
+ /* Set broker 1, 2, and 3 as leaders for a partition each and
2244
+ * later produce to both partitions so we know there's a connection
2245
+ * to all brokers. */
2246
+ rd_kafka_mock_topic_create(mcluster, topic, 3, 1);
2247
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
2248
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 2);
2249
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 3);
2250
+
2251
+ /* Broker down is not a test-failing error */
2252
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
2253
+ test_curr->is_fatal_cb = error_is_fatal_cb;
2254
+
2255
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
2256
+
2257
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2258
+
2259
+ for (i = 0; i < 3; i++) {
2260
+ err = rd_kafka_producev(
2261
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(i),
2262
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
2263
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
2264
+ }
2265
+
2266
+ test_flush(rk, 5000);
2267
+
2268
+ /*
2269
+ * send_offsets_to_transaction() will query for the group coordinator,
2270
+ * we need to make those requests slow so that multiple requests are
2271
+ * sent.
2272
+ */
2273
+ for (i = 1; i <= 3; i++)
2274
+ rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 4000);
2275
+
2276
+ /*
2277
+ * Send offsets to transaction
2278
+ */
2279
+
2280
+ offsets = rd_kafka_topic_partition_list_new(1);
2281
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
2282
+
2283
+ cgmetadata = rd_kafka_consumer_group_metadata_new(groupid);
2284
+
2285
+ error =
2286
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
2287
+
2288
+ TEST_SAY("send_offsets_to_transaction() %s\n",
2289
+ rd_kafka_error_string(error));
2290
+ TEST_ASSERT(!error, "send_offsets_to_transaction() failed: %s",
2291
+ rd_kafka_error_string(error));
2292
+
2293
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
2294
+ rd_kafka_topic_partition_list_destroy(offsets);
2295
+
2296
+ /* Clear delay */
2297
+ for (i = 1; i <= 3; i++)
2298
+ rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 0);
2299
+
2300
+ rd_sleep(5);
2301
+
2302
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
2303
+
2304
+ /* All done */
2305
+
2306
+ TEST_ASSERT(rd_atomic32_get(&multi_find_req_cnt) > 10000,
2307
+ "on_request_sent interceptor did not trigger properly");
2308
+
2309
+ rd_kafka_destroy(rk);
2310
+
2311
+ on_response_received_cb = NULL;
2312
+
2313
+ SUB_TEST_PASS();
2314
+ }
2315
+
2316
+
2317
+ /**
2318
+ * @brief ESC-4410: adding producer partitions gradually will trigger multiple
2319
+ * AddPartitionsToTxn requests. Due to a bug the third partition to be
2320
+ * registered would hang in PEND_TXN state.
2321
+ *
2322
+ * Trigger this behaviour by having two outstanding AddPartitionsToTxn requests
2323
+ * at the same time, followed by a need for a third:
2324
+ *
2325
+ * 1. Set coordinator broker rtt high (to give us time to produce).
2326
+ * 2. Produce to partition 0, will trigger first AddPartitionsToTxn.
2327
+ * 3. Produce to partition 1, will trigger second AddPartitionsToTxn.
2328
+ * 4. Wait for second AddPartitionsToTxn response.
2329
+ * 5. Produce to partition 2, should trigger AddPartitionsToTxn, but bug
2330
+ * causes it to be stale in pending state.
2331
+ */
2332
+
2333
+ static rd_atomic32_t multi_addparts_resp_cnt;
2334
+ static rd_kafka_resp_err_t
2335
+ multi_addparts_response_received_cb(rd_kafka_t *rk,
2336
+ int sockfd,
2337
+ const char *brokername,
2338
+ int32_t brokerid,
2339
+ int16_t ApiKey,
2340
+ int16_t ApiVersion,
2341
+ int32_t CorrId,
2342
+ size_t size,
2343
+ int64_t rtt,
2344
+ rd_kafka_resp_err_t err,
2345
+ void *ic_opaque) {
2346
+
2347
+ if (ApiKey == RD_KAFKAP_AddPartitionsToTxn) {
2348
+ TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32
2349
+ ", ApiKey %hd, CorrId %d, rtt %.2fms, count %" PRId32
2350
+ ": %s\n",
2351
+ rd_kafka_name(rk), brokername, brokerid, ApiKey,
2352
+ CorrId, rtt != -1 ? (float)rtt / 1000.0 : 0.0,
2353
+ rd_atomic32_get(&multi_addparts_resp_cnt),
2354
+ rd_kafka_err2name(err));
2355
+
2356
+ rd_atomic32_add(&multi_addparts_resp_cnt, 1);
2357
+ }
2358
+
2359
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
2360
+ }
2361
+
2362
+
2363
+ static void do_test_txn_addparts_req_multi(void) {
2364
+ rd_kafka_t *rk;
2365
+ rd_kafka_mock_cluster_t *mcluster;
2366
+ const char *txnid = "txnid", *topic = "mytopic";
2367
+ int32_t txn_coord = 2;
2368
+
2369
+ SUB_TEST();
2370
+
2371
+ rd_atomic32_init(&multi_addparts_resp_cnt, 0);
2372
+
2373
+ on_response_received_cb = multi_addparts_response_received_cb;
2374
+ rk = create_txn_producer(&mcluster, txnid, 3, "linger.ms", "0",
2375
+ "message.timeout.ms", "9000",
2376
+ /* Set up on_response_received interceptor */
2377
+ "on_response_received", "", NULL);
2378
+
2379
+ /* Let broker 1 be txn coordinator. */
2380
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
2381
+ txn_coord);
2382
+
2383
+ rd_kafka_mock_topic_create(mcluster, topic, 3, 1);
2384
+
2385
+ /* Set partition leaders to non-txn-coord broker so they wont
2386
+ * be affected by rtt delay */
2387
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
2388
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1);
2389
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 1);
2390
+
2391
+
2392
+
2393
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
2394
+
2395
+ /*
2396
+ * Run one transaction first to let the client familiarize with
2397
+ * the topic, this avoids metadata lookups, etc, when the real
2398
+ * test is run.
2399
+ */
2400
+ TEST_SAY("Running seed transaction\n");
2401
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2402
+ TEST_CALL_ERR__(rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
2403
+ RD_KAFKA_V_VALUE("seed", 4),
2404
+ RD_KAFKA_V_END));
2405
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
2406
+
2407
+
2408
+ /*
2409
+ * Now perform test transaction with rtt delays
2410
+ */
2411
+ TEST_SAY("Running test transaction\n");
2412
+
2413
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2414
+
2415
+ /* Reset counter */
2416
+ rd_atomic32_set(&multi_addparts_resp_cnt, 0);
2417
+
2418
+ /* Add latency to txn coordinator so we can pace our produce() calls */
2419
+ rd_kafka_mock_broker_set_rtt(mcluster, txn_coord, 1000);
2420
+
2421
+ /* Produce to partition 0 */
2422
+ TEST_CALL_ERR__(rd_kafka_producev(
2423
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
2424
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2425
+
2426
+ rd_usleep(500 * 1000, NULL);
2427
+
2428
+ /* Produce to partition 1 */
2429
+ TEST_CALL_ERR__(rd_kafka_producev(
2430
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(1),
2431
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2432
+
2433
+ TEST_SAY("Waiting for two AddPartitionsToTxnResponse\n");
2434
+ while (rd_atomic32_get(&multi_addparts_resp_cnt) < 2)
2435
+ rd_usleep(10 * 1000, NULL);
2436
+
2437
+ TEST_SAY("%" PRId32 " AddPartitionsToTxnResponses seen\n",
2438
+ rd_atomic32_get(&multi_addparts_resp_cnt));
2439
+
2440
+ /* Produce to partition 2, this message will hang in
2441
+ * queue if the bug is not fixed. */
2442
+ TEST_CALL_ERR__(rd_kafka_producev(
2443
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(2),
2444
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2445
+
2446
+ /* Allow some extra time for things to settle before committing
2447
+ * transaction. */
2448
+ rd_usleep(1000 * 1000, NULL);
2449
+
2450
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 10 * 1000));
2451
+
2452
+ /* All done */
2453
+ rd_kafka_destroy(rk);
2454
+
2455
+ on_response_received_cb = NULL;
2456
+
2457
+ SUB_TEST_PASS();
2458
+ }
2459
+
2460
+
2461
+
2462
+ /**
2463
+ * @brief Test handling of OffsetFetchRequest returning UNSTABLE_OFFSET_COMMIT.
2464
+ *
2465
+ * There are two things to test;
2466
+ * - OffsetFetch triggered by committed() (and similar code paths)
2467
+ * - OffsetFetch triggered by assign()
2468
+ */
2469
+ static void do_test_unstable_offset_commit(void) {
2470
+ rd_kafka_t *rk, *c;
2471
+ rd_kafka_conf_t *c_conf;
2472
+ rd_kafka_mock_cluster_t *mcluster;
2473
+ rd_kafka_topic_partition_list_t *offsets;
2474
+ const char *topic = "srctopic4";
2475
+ const int msgcnt = 100;
2476
+ const int64_t offset_to_commit = msgcnt / 2;
2477
+ int i;
2478
+ int remains = 0;
2479
+
2480
+ SUB_TEST_QUICK();
2481
+
2482
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
2483
+
2484
+ test_conf_init(&c_conf, NULL, 0);
2485
+ test_conf_set(c_conf, "security.protocol", "PLAINTEXT");
2486
+ test_conf_set(c_conf, "bootstrap.servers",
2487
+ rd_kafka_mock_cluster_bootstraps(mcluster));
2488
+ test_conf_set(c_conf, "enable.partition.eof", "true");
2489
+ test_conf_set(c_conf, "auto.offset.reset", "error");
2490
+ c = test_create_consumer("mygroup", NULL, c_conf, NULL);
2491
+
2492
+ rd_kafka_mock_topic_create(mcluster, topic, 2, 3);
2493
+
2494
+ /* Produce some messages to the topic so that the consumer has
2495
+ * something to read. */
2496
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
2497
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2498
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt, NULL, 0,
2499
+ &remains);
2500
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
2501
+
2502
+
2503
+ /* Commit offset */
2504
+ offsets = rd_kafka_topic_partition_list_new(1);
2505
+ rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset =
2506
+ offset_to_commit;
2507
+ TEST_CALL_ERR__(rd_kafka_commit(c, offsets, 0 /*sync*/));
2508
+ rd_kafka_topic_partition_list_destroy(offsets);
2509
+
2510
+ /* Retrieve offsets by calling committed().
2511
+ *
2512
+ * Have OffsetFetch fail and retry, on the first iteration
2513
+ * the API timeout is higher than the amount of time the retries will
2514
+ * take and thus succeed, and on the second iteration the timeout
2515
+ * will be lower and thus fail. */
2516
+ for (i = 0; i < 2; i++) {
2517
+ rd_kafka_resp_err_t err;
2518
+ rd_kafka_resp_err_t exp_err =
2519
+ i == 0 ? RD_KAFKA_RESP_ERR_NO_ERROR
2520
+ : RD_KAFKA_RESP_ERR__TIMED_OUT;
2521
+ int timeout_ms = exp_err ? 200 : 5 * 1000;
2522
+
2523
+ rd_kafka_mock_push_request_errors(
2524
+ mcluster, RD_KAFKAP_OffsetFetch,
2525
+ 1 + 5, /* first request + some retries */
2526
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2527
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2528
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2529
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2530
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2531
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT);
2532
+
2533
+ offsets = rd_kafka_topic_partition_list_new(1);
2534
+ rd_kafka_topic_partition_list_add(offsets, topic, 0);
2535
+
2536
+ err = rd_kafka_committed(c, offsets, timeout_ms);
2537
+
2538
+ TEST_SAY("#%d: committed() returned %s (expected %s)\n", i,
2539
+ rd_kafka_err2name(err), rd_kafka_err2name(exp_err));
2540
+
2541
+ TEST_ASSERT(err == exp_err,
2542
+ "#%d: Expected committed() to return %s, not %s", i,
2543
+ rd_kafka_err2name(exp_err), rd_kafka_err2name(err));
2544
+ TEST_ASSERT(offsets->cnt == 1,
2545
+ "Expected 1 committed offset, not %d",
2546
+ offsets->cnt);
2547
+ if (!exp_err)
2548
+ TEST_ASSERT(offsets->elems[0].offset ==
2549
+ offset_to_commit,
2550
+ "Expected committed offset %" PRId64
2551
+ ", "
2552
+ "not %" PRId64,
2553
+ offset_to_commit, offsets->elems[0].offset);
2554
+ else
2555
+ TEST_ASSERT(offsets->elems[0].offset < 0,
2556
+ "Expected no committed offset, "
2557
+ "not %" PRId64,
2558
+ offsets->elems[0].offset);
2559
+
2560
+ rd_kafka_topic_partition_list_destroy(offsets);
2561
+ }
2562
+
2563
+ TEST_SAY("Phase 2: OffsetFetch lookup through assignment\n");
2564
+ offsets = rd_kafka_topic_partition_list_new(1);
2565
+ rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset =
2566
+ RD_KAFKA_OFFSET_STORED;
2567
+
2568
+ rd_kafka_mock_push_request_errors(
2569
+ mcluster, RD_KAFKAP_OffsetFetch,
2570
+ 1 + 5, /* first request + some retries */
2571
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2572
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2573
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2574
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2575
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
2576
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT);
2577
+
2578
+ test_consumer_incremental_assign("assign", c, offsets);
2579
+ rd_kafka_topic_partition_list_destroy(offsets);
2580
+
2581
+ test_consumer_poll_exact("consume", c, 0, 1 /*eof*/, 0, msgcnt / 2,
2582
+ rd_true /*exact counts*/, NULL);
2583
+
2584
+ /* All done */
2585
+ rd_kafka_destroy(c);
2586
+ rd_kafka_destroy(rk);
2587
+
2588
+ SUB_TEST_PASS();
2589
+ }
2590
+
2591
+
2592
+ /**
2593
+ * @brief If a message times out locally before being attempted to send
2594
+ * and commit_transaction() is called, the transaction must not succeed.
2595
+ * https://github.com/confluentinc/confluent-kafka-dotnet/issues/1568
2596
+ */
2597
+ static void do_test_commit_after_msg_timeout(void) {
2598
+ rd_kafka_t *rk;
2599
+ rd_kafka_mock_cluster_t *mcluster;
2600
+ int32_t coord_id, leader_id;
2601
+ rd_kafka_resp_err_t err;
2602
+ rd_kafka_error_t *error;
2603
+ const char *topic = "test";
2604
+ const char *transactional_id = "txnid";
2605
+ int remains = 0;
2606
+
2607
+ SUB_TEST_QUICK();
2608
+
2609
+ /* Assign coordinator and leader to two different brokers */
2610
+ coord_id = 1;
2611
+ leader_id = 2;
2612
+
2613
+ rk = create_txn_producer(&mcluster, transactional_id, 3,
2614
+ "message.timeout.ms", "5000",
2615
+ "transaction.timeout.ms", "10000", NULL);
2616
+
2617
+ /* Broker down is not a test-failing error */
2618
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
2619
+ test_curr->is_fatal_cb = error_is_fatal_cb;
2620
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
2621
+
2622
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3);
2623
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
2624
+
2625
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
2626
+ coord_id);
2627
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, leader_id);
2628
+
2629
+ /* Start transactioning */
2630
+ TEST_SAY("Starting transaction\n");
2631
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
2632
+
2633
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2634
+
2635
+ TEST_SAY("Bringing down %" PRId32 "\n", leader_id);
2636
+ rd_kafka_mock_broker_set_down(mcluster, leader_id);
2637
+ rd_kafka_mock_broker_set_down(mcluster, coord_id);
2638
+
2639
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains);
2640
+
2641
+ error = rd_kafka_commit_transaction(rk, -1);
2642
+ TEST_ASSERT(error != NULL, "expected commit_transaciton() to fail");
2643
+ TEST_SAY_ERROR(error, "commit_transaction() failed (as expected): ");
2644
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
2645
+ "Expected txn_requires_abort error");
2646
+ rd_kafka_error_destroy(error);
2647
+
2648
+ /* Bring the brokers up so the abort can complete */
2649
+ rd_kafka_mock_broker_set_up(mcluster, coord_id);
2650
+ rd_kafka_mock_broker_set_up(mcluster, leader_id);
2651
+
2652
+ TEST_SAY("Aborting transaction\n");
2653
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
2654
+
2655
+ TEST_ASSERT(remains == 0, "%d message(s) were not flushed\n", remains);
2656
+
2657
+ TEST_SAY("Attempting second transaction, which should succeed\n");
2658
+ test_curr->is_fatal_cb = error_is_fatal_cb;
2659
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
2660
+
2661
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2662
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains);
2663
+
2664
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
2665
+
2666
+ TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
2667
+
2668
+ rd_kafka_destroy(rk);
2669
+
2670
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
2671
+ test_curr->is_fatal_cb = NULL;
2672
+
2673
+ SUB_TEST_PASS();
2674
+ }
2675
+
2676
+
2677
+ /**
2678
+ * @brief #3575: Verify that OUT_OF_ORDER_SEQ does not trigger an epoch bump
2679
+ * during an ongoing transaction.
2680
+ * The transaction should instead enter the abortable state.
2681
+ */
2682
+ static void do_test_out_of_order_seq(void) {
2683
+ rd_kafka_t *rk;
2684
+ rd_kafka_mock_cluster_t *mcluster;
2685
+ rd_kafka_error_t *error;
2686
+ int32_t txn_coord = 1, leader = 2;
2687
+ const char *txnid = "myTxnId";
2688
+ test_timing_t timing;
2689
+ rd_kafka_resp_err_t err;
2690
+
2691
+ SUB_TEST_QUICK();
2692
+
2693
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
2694
+ NULL);
2695
+
2696
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
2697
+ txn_coord);
2698
+
2699
+ rd_kafka_mock_partition_set_leader(mcluster, "mytopic", 0, leader);
2700
+
2701
+ test_curr->ignore_dr_err = rd_true;
2702
+ test_curr->is_fatal_cb = NULL;
2703
+
2704
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
2705
+
2706
+ /*
2707
+ * Start a transaction
2708
+ */
2709
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2710
+
2711
+
2712
+
2713
+ /* Produce one seeding message first to get the leader up and running */
2714
+ TEST_CALL_ERR__(rd_kafka_producev(
2715
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
2716
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2717
+ test_flush(rk, -1);
2718
+
2719
+ /* Let partition leader have a latency of 2 seconds
2720
+ * so that we can have multiple messages in-flight. */
2721
+ rd_kafka_mock_broker_set_rtt(mcluster, leader, 2 * 1000);
2722
+
2723
+ /* Produce a message, let it fail with with different errors,
2724
+ * ending with OUT_OF_ORDER which previously triggered an
2725
+ * Epoch bump. */
2726
+ rd_kafka_mock_push_request_errors(
2727
+ mcluster, RD_KAFKAP_Produce, 3,
2728
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
2729
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
2730
+ RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER);
2731
+
2732
+ /* Produce three messages that will be delayed
2733
+ * and have errors injected.*/
2734
+ TEST_CALL_ERR__(rd_kafka_producev(
2735
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
2736
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2737
+ TEST_CALL_ERR__(rd_kafka_producev(
2738
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
2739
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2740
+ TEST_CALL_ERR__(rd_kafka_producev(
2741
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
2742
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2743
+
2744
+ /* Now sleep a short while so that the messages are processed
2745
+ * by the broker and errors are returned. */
2746
+ TEST_SAY("Sleeping..\n");
2747
+ rd_sleep(5);
2748
+
2749
+ rd_kafka_mock_broker_set_rtt(mcluster, leader, 0);
2750
+
2751
+ /* Produce a fifth message, should fail with ERR__STATE since
2752
+ * the transaction should have entered the abortable state. */
2753
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
2754
+ RD_KAFKA_V_PARTITION(0),
2755
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
2756
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__STATE,
2757
+ "Expected produce() to fail with ERR__STATE, not %s",
2758
+ rd_kafka_err2name(err));
2759
+ TEST_SAY("produce() failed as expected: %s\n", rd_kafka_err2str(err));
2760
+
2761
+ /* Commit the transaction, should fail with abortable error. */
2762
+ TIMING_START(&timing, "commit_transaction(-1)");
2763
+ error = rd_kafka_commit_transaction(rk, -1);
2764
+ TIMING_STOP(&timing);
2765
+ TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
2766
+
2767
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
2768
+ rd_kafka_error_string(error));
2769
+
2770
+ TEST_ASSERT(!rd_kafka_error_is_fatal(error),
2771
+ "Did not expect fatal error");
2772
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
2773
+ "Expected abortable error");
2774
+ rd_kafka_error_destroy(error);
2775
+
2776
+ /* Abort the transaction */
2777
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
2778
+
2779
+ /* Run a new transaction without errors to verify that the
2780
+ * producer can recover. */
2781
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2782
+
2783
+ TEST_CALL_ERR__(rd_kafka_producev(
2784
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
2785
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2786
+
2787
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
2788
+
2789
+ rd_kafka_destroy(rk);
2790
+
2791
+ SUB_TEST_PASS();
2792
+ }
2793
+
2794
+
2795
+ /**
2796
+ * @brief Verify lossless delivery if topic disappears from Metadata for awhile.
2797
+ *
2798
+ * If a topic is removed from metadata inbetween transactions, the producer
2799
+ * will remove its partition state for the topic's partitions.
2800
+ * If later the same topic comes back (same topic instance, not a new creation)
2801
+ * then the producer must restore the previously used msgid/BaseSequence
2802
+ * in case the same Epoch is still used, or messages will be silently lost
2803
+ * as they would seem like legit duplicates to the broker.
2804
+ *
2805
+ * Reproduction:
2806
+ * 1. produce msgs to topic, commit transaction.
2807
+ * 2. remove topic from metadata
2808
+ * 3. make sure client updates its metadata, which removes the partition
2809
+ * objects.
2810
+ * 4. restore the topic in metadata
2811
+ * 5. produce new msgs to topic, commit transaction.
2812
+ * 6. consume topic. All messages should be accounted for.
2813
+ */
2814
+ static void do_test_topic_disappears_for_awhile(void) {
2815
+ rd_kafka_t *rk, *c;
2816
+ rd_kafka_conf_t *c_conf;
2817
+ rd_kafka_mock_cluster_t *mcluster;
2818
+ const char *topic = "mytopic";
2819
+ const char *txnid = "myTxnId";
2820
+ test_timing_t timing;
2821
+ int i;
2822
+ int msgcnt = 0;
2823
+ const int partition_cnt = 10;
2824
+
2825
+ SUB_TEST_QUICK();
2826
+
2827
+ rk = create_txn_producer(
2828
+ &mcluster, txnid, 1, "batch.num.messages", "3", "linger.ms", "100",
2829
+ "topic.metadata.refresh.interval.ms", "2000", NULL);
2830
+
2831
+ rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1);
2832
+
2833
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
2834
+
2835
+ for (i = 0; i < 2; i++) {
2836
+ int cnt = 3 * 2 * partition_cnt;
2837
+ rd_bool_t remove_topic = (i % 2) == 0;
2838
+ /*
2839
+ * Start a transaction
2840
+ */
2841
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2842
+
2843
+
2844
+ while (cnt-- >= 0) {
2845
+ TEST_CALL_ERR__(rd_kafka_producev(
2846
+ rk, RD_KAFKA_V_TOPIC(topic),
2847
+ RD_KAFKA_V_PARTITION(cnt % partition_cnt),
2848
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2849
+ msgcnt++;
2850
+ }
2851
+
2852
+ /* Commit the transaction */
2853
+ TIMING_START(&timing, "commit_transaction(-1)");
2854
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
2855
+ TIMING_STOP(&timing);
2856
+
2857
+
2858
+
2859
+ if (remove_topic) {
2860
+ /* Make it seem the topic is removed, refresh metadata,
2861
+ * and then make the topic available again. */
2862
+ const rd_kafka_metadata_t *md;
2863
+
2864
+ TEST_SAY("Marking topic as non-existent\n");
2865
+
2866
+ rd_kafka_mock_topic_set_error(
2867
+ mcluster, topic,
2868
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
2869
+
2870
+ TEST_CALL_ERR__(rd_kafka_metadata(rk, 0, NULL, &md,
2871
+ tmout_multip(5000)));
2872
+
2873
+ rd_kafka_metadata_destroy(md);
2874
+
2875
+ rd_sleep(2);
2876
+
2877
+ TEST_SAY("Bringing topic back to life\n");
2878
+ rd_kafka_mock_topic_set_error(
2879
+ mcluster, topic, RD_KAFKA_RESP_ERR_NO_ERROR);
2880
+ }
2881
+ }
2882
+
2883
+ TEST_SAY("Verifying messages by consumtion\n");
2884
+ test_conf_init(&c_conf, NULL, 0);
2885
+ test_conf_set(c_conf, "security.protocol", "PLAINTEXT");
2886
+ test_conf_set(c_conf, "bootstrap.servers",
2887
+ rd_kafka_mock_cluster_bootstraps(mcluster));
2888
+ test_conf_set(c_conf, "enable.partition.eof", "true");
2889
+ test_conf_set(c_conf, "auto.offset.reset", "earliest");
2890
+ c = test_create_consumer("mygroup", NULL, c_conf, NULL);
2891
+
2892
+ test_consumer_subscribe(c, topic);
2893
+ test_consumer_poll_exact("consume", c, 0, partition_cnt, 0, msgcnt,
2894
+ rd_true /*exact*/, NULL);
2895
+ rd_kafka_destroy(c);
2896
+
2897
+
2898
+ rd_kafka_destroy(rk);
2899
+
2900
+ SUB_TEST_PASS();
2901
+ }
2902
+
2903
+
2904
+ /**
2905
+ * @brief Test that group coordinator requests can handle an
2906
+ * untimely disconnect.
2907
+ *
2908
+ * The transaction manager makes use of librdkafka coord_req to commit
2909
+ * transaction offsets to the group coordinator.
2910
+ * If the connection to the given group coordinator is not up the
2911
+ * coord_req code will request a connection once, but if this connection fails
2912
+ * there will be no new attempts and the coord_req will idle until either
2913
+ * destroyed or the connection is retried for other reasons.
2914
+ * This in turn stalls the send_offsets_to_transaction() call until the
2915
+ * transaction times out.
2916
+ *
2917
+ * There are two variants to this test based on switch_coord:
2918
+ * - True - Switches the coordinator during the downtime.
2919
+ * The client should detect this and send the request to the
2920
+ * new coordinator.
2921
+ * - False - The coordinator remains on the down broker. Client will reconnect
2922
+ * when down broker comes up again.
2923
+ */
2924
+ struct some_state {
2925
+ rd_kafka_mock_cluster_t *mcluster;
2926
+ rd_bool_t switch_coord;
2927
+ int32_t broker_id;
2928
+ const char *grpid;
2929
+ };
2930
+
2931
+ static int delayed_up_cb(void *arg) {
2932
+ struct some_state *state = arg;
2933
+ rd_sleep(3);
2934
+ if (state->switch_coord) {
2935
+ TEST_SAY("Switching group coordinator to %" PRId32 "\n",
2936
+ state->broker_id);
2937
+ rd_kafka_mock_coordinator_set(state->mcluster, "group",
2938
+ state->grpid, state->broker_id);
2939
+ } else {
2940
+ TEST_SAY("Bringing up group coordinator %" PRId32 "..\n",
2941
+ state->broker_id);
2942
+ rd_kafka_mock_broker_set_up(state->mcluster, state->broker_id);
2943
+ }
2944
+ return 0;
2945
+ }
2946
+
2947
+ static void do_test_disconnected_group_coord(rd_bool_t switch_coord) {
2948
+ const char *topic = "mytopic";
2949
+ const char *txnid = "myTxnId";
2950
+ const char *grpid = "myGrpId";
2951
+ const int partition_cnt = 1;
2952
+ rd_kafka_t *rk;
2953
+ rd_kafka_mock_cluster_t *mcluster;
2954
+ rd_kafka_topic_partition_list_t *offsets;
2955
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
2956
+ struct some_state state = RD_ZERO_INIT;
2957
+ test_timing_t timing;
2958
+ thrd_t thrd;
2959
+ int ret;
2960
+
2961
+ SUB_TEST_QUICK("switch_coord=%s", RD_STR_ToF(switch_coord));
2962
+
2963
+ test_curr->is_fatal_cb = error_is_fatal_cb;
2964
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
2965
+
2966
+ rk = create_txn_producer(&mcluster, txnid, 3, NULL);
2967
+
2968
+ rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1);
2969
+
2970
+ /* Broker 1: txn coordinator
2971
+ * Broker 2: group coordinator
2972
+ * Broker 3: partition leader & backup coord if switch_coord=true */
2973
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 1);
2974
+ rd_kafka_mock_coordinator_set(mcluster, "group", grpid, 2);
2975
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 3);
2976
+
2977
+ /* Bring down group coordinator so there are no undesired
2978
+ * connections to it. */
2979
+ rd_kafka_mock_broker_set_down(mcluster, 2);
2980
+
2981
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
2982
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
2983
+ TEST_CALL_ERR__(rd_kafka_producev(
2984
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
2985
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
2986
+ test_flush(rk, -1);
2987
+
2988
+ rd_sleep(1);
2989
+
2990
+ /* Run a background thread that after 3s, which should be enough
2991
+ * to perform the first failed connection attempt, makes the
2992
+ * group coordinator available again. */
2993
+ state.switch_coord = switch_coord;
2994
+ state.mcluster = mcluster;
2995
+ state.grpid = grpid;
2996
+ state.broker_id = switch_coord ? 3 : 2;
2997
+ if (thrd_create(&thrd, delayed_up_cb, &state) != thrd_success)
2998
+ TEST_FAIL("Failed to create thread");
2999
+
3000
+ TEST_SAY("Calling send_offsets_to_transaction()\n");
3001
+ offsets = rd_kafka_topic_partition_list_new(1);
3002
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 1;
3003
+ cgmetadata = rd_kafka_consumer_group_metadata_new(grpid);
3004
+
3005
+ TIMING_START(&timing, "send_offsets_to_transaction(-1)");
3006
+ TEST_CALL_ERROR__(
3007
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
3008
+ TIMING_STOP(&timing);
3009
+ TIMING_ASSERT(&timing, 0, 10 * 1000 /*10s*/);
3010
+
3011
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
3012
+ rd_kafka_topic_partition_list_destroy(offsets);
3013
+ thrd_join(thrd, &ret);
3014
+
3015
+ /* Commit the transaction */
3016
+ TIMING_START(&timing, "commit_transaction(-1)");
3017
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
3018
+ TIMING_STOP(&timing);
3019
+
3020
+ rd_kafka_destroy(rk);
3021
+
3022
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
3023
+ test_curr->is_fatal_cb = NULL;
3024
+
3025
+ SUB_TEST_PASS();
3026
+ }
3027
+
3028
+
3029
+ /**
3030
+ * @brief Test that a NULL coordinator is not fatal when
3031
+ * the transactional producer reconnects to the txn coordinator
3032
+ * and the first thing it does is a FindCoordinatorRequest that
3033
+ * fails with COORDINATOR_NOT_AVAILABLE, setting coordinator to NULL.
3034
+ */
3035
+ static void do_test_txn_coordinator_null_not_fatal(void) {
3036
+ rd_kafka_t *rk;
3037
+ rd_kafka_mock_cluster_t *mcluster;
3038
+ rd_kafka_error_t *error;
3039
+ rd_kafka_resp_err_t err;
3040
+ int32_t coord_id = 1;
3041
+ const char *topic = "test";
3042
+ const char *transactional_id = "txnid";
3043
+ int msgcnt = 1;
3044
+ int remains = 0;
3045
+
3046
+ SUB_TEST_QUICK();
3047
+
3048
+ /* Broker down is not a test-failing error */
3049
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
3050
+ allowed_error_2 = RD_KAFKA_RESP_ERR__TIMED_OUT;
3051
+ test_curr->is_fatal_cb = error_is_fatal_cb;
3052
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
3053
+
3054
+ /* One second is the minimum transaction timeout */
3055
+ rk = create_txn_producer(&mcluster, transactional_id, 1,
3056
+ "transaction.timeout.ms", "1000", NULL);
3057
+
3058
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
3059
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
3060
+
3061
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
3062
+ coord_id);
3063
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
3064
+
3065
+ /* Start transactioning */
3066
+ TEST_SAY("Starting transaction\n");
3067
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
3068
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
3069
+
3070
+ /* Makes the produce request timeout. */
3071
+ rd_kafka_mock_broker_push_request_error_rtts(
3072
+ mcluster, coord_id, RD_KAFKAP_Produce, 1,
3073
+ RD_KAFKA_RESP_ERR_NO_ERROR, 3000);
3074
+
3075
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
3076
+ msgcnt, NULL, 0, &remains);
3077
+
3078
+ /* This value is linked to transaction.timeout.ms, needs enough time
3079
+ * so the message times out and a DrainBump sequence is started. */
3080
+ rd_kafka_flush(rk, 1000);
3081
+
3082
+ /* To trigger the error the COORDINATOR_NOT_AVAILABLE response
3083
+ * must come AFTER idempotent state has changed to WaitTransport
3084
+ * but BEFORE it changes to WaitPID. To make it more likely
3085
+ * rd_kafka_txn_coord_timer_start timeout can be changed to 5 ms
3086
+ * in rd_kafka_txn_coord_query, when unable to query for
3087
+ * transaction coordinator.
3088
+ */
3089
+ rd_kafka_mock_broker_push_request_error_rtts(
3090
+ mcluster, coord_id, RD_KAFKAP_FindCoordinator, 1,
3091
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, 10);
3092
+
3093
+ /* Coordinator down starts the FindCoordinatorRequest loop. */
3094
+ TEST_SAY("Bringing down coordinator %" PRId32 "\n", coord_id);
3095
+ rd_kafka_mock_broker_set_down(mcluster, coord_id);
3096
+
3097
+ /* Coordinator down for some time. */
3098
+ rd_usleep(100 * 1000, NULL);
3099
+
3100
+ /* When it comes up, the error is triggered, if the preconditions
3101
+ * happen. */
3102
+ TEST_SAY("Bringing up coordinator %" PRId32 "\n", coord_id);
3103
+ rd_kafka_mock_broker_set_up(mcluster, coord_id);
3104
+
3105
+ /* Make sure DRs are received */
3106
+ rd_kafka_flush(rk, 1000);
3107
+
3108
+ error = rd_kafka_commit_transaction(rk, -1);
3109
+
3110
+ TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
3111
+ TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
3112
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
3113
+ rd_kafka_error_string(error));
3114
+ rd_kafka_error_destroy(error);
3115
+
3116
+ /* Needs to wait some time before closing to make sure it doesn't go
3117
+ * into TERMINATING state before error is triggered. */
3118
+ rd_usleep(1000 * 1000, NULL);
3119
+ rd_kafka_destroy(rk);
3120
+
3121
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
3122
+ allowed_error_2 = RD_KAFKA_RESP_ERR_NO_ERROR;
3123
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
3124
+ test_curr->is_fatal_cb = NULL;
3125
+
3126
+ SUB_TEST_PASS();
3127
+ }
3128
+
3129
+
3130
+
3131
+ /**
3132
+ * @brief Simple test to make sure the init_transactions() timeout is honoured
3133
+ * and also not infinite.
3134
+ */
3135
+ static void do_test_txn_resumable_init(void) {
3136
+ rd_kafka_t *rk;
3137
+ const char *transactional_id = "txnid";
3138
+ rd_kafka_error_t *error;
3139
+ test_timing_t duration;
3140
+
3141
+ SUB_TEST();
3142
+
3143
+ rd_kafka_conf_t *conf;
3144
+
3145
+ test_conf_init(&conf, NULL, 20);
3146
+ test_conf_set(conf, "bootstrap.servers", "");
3147
+ test_conf_set(conf, "transactional.id", transactional_id);
3148
+ test_conf_set(conf, "transaction.timeout.ms", "4000");
3149
+
3150
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
3151
+
3152
+ /* First make sure a lower timeout is honoured. */
3153
+ TIMING_START(&duration, "init_transactions(1000)");
3154
+ error = rd_kafka_init_transactions(rk, 1000);
3155
+ TIMING_STOP(&duration);
3156
+
3157
+ if (error)
3158
+ TEST_SAY("First init_transactions failed (as expected): %s\n",
3159
+ rd_kafka_error_string(error));
3160
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
3161
+ "Expected _TIMED_OUT, not %s",
3162
+ error ? rd_kafka_error_string(error) : "success");
3163
+ rd_kafka_error_destroy(error);
3164
+
3165
+ TIMING_ASSERT(&duration, 900, 1500);
3166
+
3167
+ TEST_SAY(
3168
+ "Performing second init_transactions() call now with an "
3169
+ "infinite timeout: "
3170
+ "should time out in 2 x transaction.timeout.ms\n");
3171
+
3172
+ TIMING_START(&duration, "init_transactions(infinite)");
3173
+ error = rd_kafka_init_transactions(rk, -1);
3174
+ TIMING_STOP(&duration);
3175
+
3176
+ if (error)
3177
+ TEST_SAY("Second init_transactions failed (as expected): %s\n",
3178
+ rd_kafka_error_string(error));
3179
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
3180
+ "Expected _TIMED_OUT, not %s",
3181
+ error ? rd_kafka_error_string(error) : "success");
3182
+ rd_kafka_error_destroy(error);
3183
+
3184
+ TIMING_ASSERT(&duration, 2 * 4000 - 500, 2 * 4000 + 500);
3185
+
3186
+ rd_kafka_destroy(rk);
3187
+
3188
+ SUB_TEST_PASS();
3189
+ }
3190
+
3191
+
3192
+ /**
3193
+ * @brief Retries a transaction call until it succeeds or returns a
3194
+ * non-retriable error - which will cause the test to fail.
3195
+ *
3196
+ * @param intermed_calls Is a block of code that will be called after each
3197
+ * retriable failure of \p call.
3198
+ */
3199
+ #define RETRY_TXN_CALL__(call, intermed_calls) \
3200
+ do { \
3201
+ rd_kafka_error_t *_error = call; \
3202
+ if (!_error) \
3203
+ break; \
3204
+ TEST_SAY_ERROR(_error, "%s: ", "" #call); \
3205
+ TEST_ASSERT(rd_kafka_error_is_retriable(_error), \
3206
+ "Expected retriable error"); \
3207
+ TEST_SAY("%s failed, retrying in 1 second\n", "" #call); \
3208
+ rd_kafka_error_destroy(_error); \
3209
+ intermed_calls; \
3210
+ rd_sleep(1); \
3211
+ } while (1)
3212
+
3213
+ /**
3214
+ * @brief Call \p call and expect it to fail with \p exp_err_code.
3215
+ */
3216
+ #define TXN_CALL_EXPECT_ERROR__(call, exp_err_code) \
3217
+ do { \
3218
+ rd_kafka_error_t *_error = call; \
3219
+ TEST_ASSERT(_error != NULL, \
3220
+ "%s: Expected %s error, got success", "" #call, \
3221
+ rd_kafka_err2name(exp_err_code)); \
3222
+ TEST_SAY_ERROR(_error, "%s: ", "" #call); \
3223
+ TEST_ASSERT(rd_kafka_error_code(_error) == exp_err_code, \
3224
+ "%s: Expected %s error, got %s", "" #call, \
3225
+ rd_kafka_err2name(exp_err_code), \
3226
+ rd_kafka_error_name(_error)); \
3227
+ rd_kafka_error_destroy(_error); \
3228
+ } while (0)
3229
+
3230
+
3231
+ /**
3232
+ * @brief Simple test to make sure short API timeouts can be safely resumed
3233
+ * by calling the same API again.
3234
+ *
3235
+ * @param do_commit Commit transaction if true, else abort transaction.
3236
+ */
3237
+ static void do_test_txn_resumable_calls_timeout(rd_bool_t do_commit) {
3238
+ rd_kafka_t *rk;
3239
+ rd_kafka_mock_cluster_t *mcluster;
3240
+ rd_kafka_resp_err_t err;
3241
+ rd_kafka_topic_partition_list_t *offsets;
3242
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
3243
+ int32_t coord_id = 1;
3244
+ const char *topic = "test";
3245
+ const char *transactional_id = "txnid";
3246
+ int msgcnt = 1;
3247
+ int remains = 0;
3248
+
3249
+ SUB_TEST("%s_transaction", do_commit ? "commit" : "abort");
3250
+
3251
+ rk = create_txn_producer(&mcluster, transactional_id, 1, NULL);
3252
+
3253
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
3254
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
3255
+
3256
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
3257
+ coord_id);
3258
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
3259
+
3260
+ TEST_SAY("Starting transaction\n");
3261
+ TEST_SAY("Delaying first two InitProducerIdRequests by 500ms\n");
3262
+ rd_kafka_mock_broker_push_request_error_rtts(
3263
+ mcluster, coord_id, RD_KAFKAP_InitProducerId, 2,
3264
+ RD_KAFKA_RESP_ERR_NO_ERROR, 500, RD_KAFKA_RESP_ERR_NO_ERROR, 500);
3265
+
3266
+ RETRY_TXN_CALL__(
3267
+ rd_kafka_init_transactions(rk, 100),
3268
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1),
3269
+ RD_KAFKA_RESP_ERR__CONFLICT));
3270
+
3271
+ RETRY_TXN_CALL__(rd_kafka_begin_transaction(rk), /*none*/);
3272
+
3273
+
3274
+ TEST_SAY("Delaying ProduceRequests by 3000ms\n");
3275
+ rd_kafka_mock_broker_push_request_error_rtts(
3276
+ mcluster, coord_id, RD_KAFKAP_Produce, 1,
3277
+ RD_KAFKA_RESP_ERR_NO_ERROR, 3000);
3278
+
3279
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
3280
+ msgcnt, NULL, 0, &remains);
3281
+
3282
+
3283
+ TEST_SAY("Delaying SendOffsetsToTransaction by 400ms\n");
3284
+ rd_kafka_mock_broker_push_request_error_rtts(
3285
+ mcluster, coord_id, RD_KAFKAP_AddOffsetsToTxn, 1,
3286
+ RD_KAFKA_RESP_ERR_NO_ERROR, 400);
3287
+ offsets = rd_kafka_topic_partition_list_new(1);
3288
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 12;
3289
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
3290
+
3291
+ /* This is not a resumable call on timeout */
3292
+ TEST_CALL_ERROR__(
3293
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
3294
+
3295
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
3296
+ rd_kafka_topic_partition_list_destroy(offsets);
3297
+
3298
+
3299
+ TEST_SAY("Delaying EndTxnRequests by 1200ms\n");
3300
+ rd_kafka_mock_broker_push_request_error_rtts(
3301
+ mcluster, coord_id, RD_KAFKAP_EndTxn, 1, RD_KAFKA_RESP_ERR_NO_ERROR,
3302
+ 1200);
3303
+
3304
+ /* Committing/aborting the transaction will also be delayed by the
3305
+ * previous accumulated remaining delays. */
3306
+
3307
+ if (do_commit) {
3308
+ TEST_SAY("Committing transaction\n");
3309
+
3310
+ RETRY_TXN_CALL__(
3311
+ rd_kafka_commit_transaction(rk, 100),
3312
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1),
3313
+ RD_KAFKA_RESP_ERR__CONFLICT));
3314
+ } else {
3315
+ TEST_SAY("Aborting transaction\n");
3316
+
3317
+ RETRY_TXN_CALL__(
3318
+ rd_kafka_abort_transaction(rk, 100),
3319
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, -1),
3320
+ RD_KAFKA_RESP_ERR__CONFLICT));
3321
+ }
3322
+
3323
+ rd_kafka_destroy(rk);
3324
+
3325
+ SUB_TEST_PASS();
3326
+ }
3327
+
3328
+
3329
+ /**
3330
+ * @brief Verify that resuming timed out calls that after the timeout, but
3331
+ * before the resuming call, would error out.
3332
+ */
3333
+ static void do_test_txn_resumable_calls_timeout_error(rd_bool_t do_commit) {
3334
+ rd_kafka_t *rk;
3335
+ rd_kafka_mock_cluster_t *mcluster;
3336
+ rd_kafka_resp_err_t err;
3337
+ int32_t coord_id = 1;
3338
+ const char *topic = "test";
3339
+ const char *transactional_id = "txnid";
3340
+ int msgcnt = 1;
3341
+ int remains = 0;
3342
+ rd_kafka_error_t *error;
3343
+
3344
+ SUB_TEST_QUICK("%s_transaction", do_commit ? "commit" : "abort");
3345
+
3346
+ rk = create_txn_producer(&mcluster, transactional_id, 1, NULL);
3347
+
3348
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
3349
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
3350
+
3351
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
3352
+ coord_id);
3353
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
3354
+
3355
+ TEST_SAY("Starting transaction\n");
3356
+
3357
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
3358
+
3359
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
3360
+
3361
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
3362
+ msgcnt, NULL, 0, &remains);
3363
+
3364
+
3365
+ TEST_SAY("Fail EndTxn fatally after 2000ms\n");
3366
+ rd_kafka_mock_broker_push_request_error_rtts(
3367
+ mcluster, coord_id, RD_KAFKAP_EndTxn, 1,
3368
+ RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, 2000);
3369
+
3370
+ if (do_commit) {
3371
+ TEST_SAY("Committing transaction\n");
3372
+
3373
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500),
3374
+ RD_KAFKA_RESP_ERR__TIMED_OUT);
3375
+
3376
+ /* Sleep so that the background EndTxn fails locally and sets
3377
+ * an error result. */
3378
+ rd_sleep(3);
3379
+
3380
+ error = rd_kafka_commit_transaction(rk, -1);
3381
+
3382
+ } else {
3383
+ TEST_SAY("Aborting transaction\n");
3384
+
3385
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500),
3386
+ RD_KAFKA_RESP_ERR__TIMED_OUT);
3387
+
3388
+ /* Sleep so that the background EndTxn fails locally and sets
3389
+ * an error result. */
3390
+ rd_sleep(3);
3391
+
3392
+ error = rd_kafka_commit_transaction(rk, -1);
3393
+ }
3394
+
3395
+ TEST_ASSERT(error != NULL && rd_kafka_error_is_fatal(error),
3396
+ "Expected fatal error, not %s",
3397
+ rd_kafka_error_string(error));
3398
+ TEST_ASSERT(rd_kafka_error_code(error) ==
3399
+ RD_KAFKA_RESP_ERR_INVALID_TXN_STATE,
3400
+ "Expected error INVALID_TXN_STATE, got %s",
3401
+ rd_kafka_error_name(error));
3402
+ rd_kafka_error_destroy(error);
3403
+
3404
+ rd_kafka_destroy(rk);
3405
+
3406
+ SUB_TEST_PASS();
3407
+ }
3408
+
3409
+
3410
+ /**
3411
+ * @brief Concurrent transaction API calls are not permitted.
3412
+ * This test makes sure they're properly enforced.
3413
+ *
3414
+ * For each transactional API, call it with a 5s timeout, and during that time
3415
+ * from another thread call transactional APIs, one by one, and verify that
3416
+ * we get an ERR__CONFLICT error back in the second thread.
3417
+ *
3418
+ * We use a mutex for synchronization, the main thread will hold the lock
3419
+ * when not calling an API but release it just prior to calling.
3420
+ * The other thread will acquire the lock, sleep, and hold the lock while
3421
+ * calling the concurrent API that should fail immediately, releasing the lock
3422
+ * when done.
3423
+ *
3424
+ */
3425
+
3426
+ struct _txn_concurrent_state {
3427
+ const char *api;
3428
+ mtx_t lock;
3429
+ rd_kafka_t *rk;
3430
+ struct test *test;
3431
+ };
3432
+
3433
+ static int txn_concurrent_thread_main(void *arg) {
3434
+ struct _txn_concurrent_state *state = arg;
3435
+ static const char *apis[] = {
3436
+ "init_transactions", "begin_transaction",
3437
+ "send_offsets_to_transaction", "commit_transaction",
3438
+ "abort_transaction", NULL};
3439
+ rd_kafka_t *rk = state->rk;
3440
+ const char *main_api = NULL;
3441
+ int i;
3442
+
3443
+ /* Update TLS variable so TEST_..() macros work */
3444
+ test_curr = state->test;
3445
+
3446
+ while (1) {
3447
+ const char *api = NULL;
3448
+ const int timeout_ms = 10000;
3449
+ rd_kafka_error_t *error = NULL;
3450
+ rd_kafka_resp_err_t exp_err;
3451
+ test_timing_t duration;
3452
+
3453
+ /* Wait for other thread's txn call to start, then sleep a bit
3454
+ * to increase the chance of that call has really begun. */
3455
+ mtx_lock(&state->lock);
3456
+
3457
+ if (state->api && state->api == main_api) {
3458
+ /* Main thread is still blocking on the last API call */
3459
+ TEST_SAY("Waiting for main thread to finish %s()\n",
3460
+ main_api);
3461
+ mtx_unlock(&state->lock);
3462
+ rd_sleep(1);
3463
+ continue;
3464
+ } else if (!(main_api = state->api)) {
3465
+ mtx_unlock(&state->lock);
3466
+ break;
3467
+ }
3468
+
3469
+ rd_sleep(1);
3470
+
3471
+ for (i = 0; (api = apis[i]) != NULL; i++) {
3472
+ TEST_SAY(
3473
+ "Triggering concurrent %s() call while "
3474
+ "main is in %s() call\n",
3475
+ api, main_api);
3476
+ TIMING_START(&duration, "%s", api);
3477
+
3478
+ if (!strcmp(api, "init_transactions"))
3479
+ error =
3480
+ rd_kafka_init_transactions(rk, timeout_ms);
3481
+ else if (!strcmp(api, "begin_transaction"))
3482
+ error = rd_kafka_begin_transaction(rk);
3483
+ else if (!strcmp(api, "send_offsets_to_transaction")) {
3484
+ rd_kafka_topic_partition_list_t *offsets =
3485
+ rd_kafka_topic_partition_list_new(1);
3486
+ rd_kafka_consumer_group_metadata_t *cgmetadata =
3487
+ rd_kafka_consumer_group_metadata_new(
3488
+ "mygroupid");
3489
+ rd_kafka_topic_partition_list_add(
3490
+ offsets, "srctopic4", 0)
3491
+ ->offset = 12;
3492
+
3493
+ error = rd_kafka_send_offsets_to_transaction(
3494
+ rk, offsets, cgmetadata, -1);
3495
+ rd_kafka_consumer_group_metadata_destroy(
3496
+ cgmetadata);
3497
+ rd_kafka_topic_partition_list_destroy(offsets);
3498
+ } else if (!strcmp(api, "commit_transaction"))
3499
+ error =
3500
+ rd_kafka_commit_transaction(rk, timeout_ms);
3501
+ else if (!strcmp(api, "abort_transaction"))
3502
+ error =
3503
+ rd_kafka_abort_transaction(rk, timeout_ms);
3504
+ else
3505
+ TEST_FAIL("Unknown API: %s", api);
3506
+
3507
+ TIMING_STOP(&duration);
3508
+
3509
+ TEST_SAY_ERROR(error, "Conflicting %s() call: ", api);
3510
+ TEST_ASSERT(error,
3511
+ "Expected conflicting %s() call to fail",
3512
+ api);
3513
+
3514
+ exp_err = !strcmp(api, main_api)
3515
+ ? RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS
3516
+ : RD_KAFKA_RESP_ERR__CONFLICT;
3517
+
3518
+ TEST_ASSERT(rd_kafka_error_code(error) == exp_err,
3519
+
3520
+ "Conflicting %s(): Expected %s, not %s",
3521
+ api, rd_kafka_err2str(exp_err),
3522
+ rd_kafka_error_name(error));
3523
+ TEST_ASSERT(
3524
+ rd_kafka_error_is_retriable(error),
3525
+ "Conflicting %s(): Expected retriable error", api);
3526
+ rd_kafka_error_destroy(error);
3527
+ /* These calls should fail immediately */
3528
+ TIMING_ASSERT(&duration, 0, 100);
3529
+ }
3530
+
3531
+ mtx_unlock(&state->lock);
3532
+ }
3533
+
3534
+ return 0;
3535
+ }
3536
+
3537
+ static void do_test_txn_concurrent_operations(rd_bool_t do_commit) {
3538
+ rd_kafka_t *rk;
3539
+ rd_kafka_mock_cluster_t *mcluster;
3540
+ int32_t coord_id = 1;
3541
+ rd_kafka_resp_err_t err;
3542
+ const char *topic = "test";
3543
+ const char *transactional_id = "txnid";
3544
+ int remains = 0;
3545
+ thrd_t thrd;
3546
+ struct _txn_concurrent_state state = RD_ZERO_INIT;
3547
+ rd_kafka_topic_partition_list_t *offsets;
3548
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
3549
+
3550
+ SUB_TEST("%s", do_commit ? "commit" : "abort");
3551
+
3552
+ test_timeout_set(90);
3553
+
3554
+ /* We need to override the value of socket.connection.setup.timeout.ms
3555
+ * to be at least 2*RTT of the mock broker. This is because the first
3556
+ * ApiVersion request will fail, since we make the request with v3, and
3557
+ * the mock broker's MaxVersion is 2, so the request is retried with v0.
3558
+ * We use the value 3*RTT to add some buffer.
3559
+ */
3560
+ rk = create_txn_producer(&mcluster, transactional_id, 1,
3561
+ "socket.connection.setup.timeout.ms", "15000",
3562
+ NULL);
3563
+
3564
+ /* Set broker RTT to 3.5s so that the background thread has ample
3565
+ * time to call its conflicting APIs.
3566
+ * This value must be less than socket.connection.setup.timeout.ms/2. */
3567
+ rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 3500);
3568
+
3569
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
3570
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
3571
+
3572
+ /* Set up shared state between us and the concurrent thread */
3573
+ mtx_init(&state.lock, mtx_plain);
3574
+ state.test = test_curr;
3575
+ state.rk = rk;
3576
+
3577
+ /* We release the lock only while calling the TXN API */
3578
+ mtx_lock(&state.lock);
3579
+
3580
+ /* Spin up concurrent thread */
3581
+ if (thrd_create(&thrd, txn_concurrent_thread_main, (void *)&state) !=
3582
+ thrd_success)
3583
+ TEST_FAIL("Failed to create thread");
3584
+
3585
+ #define _start_call(callname) \
3586
+ do { \
3587
+ state.api = callname; \
3588
+ mtx_unlock(&state.lock); \
3589
+ } while (0)
3590
+ #define _end_call() mtx_lock(&state.lock)
3591
+
3592
+ _start_call("init_transactions");
3593
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
3594
+ _end_call();
3595
+
3596
+ /* This call doesn't block, so can't really be tested concurrently. */
3597
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
3598
+
3599
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10,
3600
+ NULL, 0, &remains);
3601
+
3602
+ _start_call("send_offsets_to_transaction");
3603
+ offsets = rd_kafka_topic_partition_list_new(1);
3604
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 12;
3605
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
3606
+
3607
+ TEST_CALL_ERROR__(
3608
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
3609
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
3610
+ rd_kafka_topic_partition_list_destroy(offsets);
3611
+ _end_call();
3612
+
3613
+ if (do_commit) {
3614
+ _start_call("commit_transaction");
3615
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
3616
+ _end_call();
3617
+ } else {
3618
+ _start_call("abort_transaction");
3619
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
3620
+ _end_call();
3621
+ }
3622
+
3623
+ /* Signal completion to background thread */
3624
+ state.api = NULL;
3625
+
3626
+ mtx_unlock(&state.lock);
3627
+
3628
+ thrd_join(thrd, NULL);
3629
+
3630
+ rd_kafka_destroy(rk);
3631
+
3632
+ mtx_destroy(&state.lock);
3633
+
3634
+ SUB_TEST_PASS();
3635
+ }
3636
+
3637
+
3638
+ /**
3639
+ * @brief KIP-360: Test that fatal idempotence errors triggers abortable
3640
+ * transaction errors, but let the broker-side abort of the
3641
+ * transaction fail with a fencing error.
3642
+ * Should raise a fatal error.
3643
+ *
3644
+ * @param error_code Which error code EndTxn should fail with.
3645
+ * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older)
3646
+ * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer).
3647
+ */
3648
+ static void do_test_txn_fenced_abort(rd_kafka_resp_err_t error_code) {
3649
+ rd_kafka_t *rk;
3650
+ rd_kafka_mock_cluster_t *mcluster;
3651
+ rd_kafka_error_t *error;
3652
+ int32_t txn_coord = 2;
3653
+ const char *txnid = "myTxnId";
3654
+ char errstr[512];
3655
+ rd_kafka_resp_err_t fatal_err;
3656
+ size_t errors_cnt;
3657
+
3658
+ SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code));
3659
+
3660
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
3661
+ NULL);
3662
+
3663
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
3664
+ txn_coord);
3665
+
3666
+ test_curr->ignore_dr_err = rd_true;
3667
+ test_curr->is_fatal_cb = error_is_fatal_cb;
3668
+ allowed_error = RD_KAFKA_RESP_ERR__FENCED;
3669
+
3670
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
3671
+
3672
+ /*
3673
+ * Start a transaction
3674
+ */
3675
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
3676
+
3677
+
3678
+ /* Produce a message without error first */
3679
+ TEST_CALL_ERR__(rd_kafka_producev(
3680
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
3681
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
3682
+
3683
+ test_flush(rk, -1);
3684
+
3685
+ /* Fail abort transaction */
3686
+ rd_kafka_mock_broker_push_request_error_rtts(
3687
+ mcluster, txn_coord, RD_KAFKAP_EndTxn, 1, error_code, 0);
3688
+
3689
+ /* Fail the PID reinit */
3690
+ rd_kafka_mock_broker_push_request_error_rtts(
3691
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0);
3692
+
3693
+ /* Produce a message, let it fail with a fatal idempo error. */
3694
+ rd_kafka_mock_push_request_errors(
3695
+ mcluster, RD_KAFKAP_Produce, 1,
3696
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
3697
+
3698
+ TEST_CALL_ERR__(rd_kafka_producev(
3699
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
3700
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
3701
+
3702
+ test_flush(rk, -1);
3703
+
3704
+ /* Abort the transaction, should fail with a fatal error */
3705
+ error = rd_kafka_abort_transaction(rk, -1);
3706
+ TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
3707
+
3708
+ TEST_SAY_ERROR(error, "abort_transaction() failed: ");
3709
+ TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error");
3710
+ rd_kafka_error_destroy(error);
3711
+
3712
+ fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
3713
+ TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised");
3714
+ TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr);
3715
+
3716
+ /* Verify that the producer sent the expected number of EndTxn requests
3717
+ * by inspecting the mock broker error stack,
3718
+ * which should now be empty. */
3719
+ if (rd_kafka_mock_broker_error_stack_cnt(
3720
+ mcluster, txn_coord, RD_KAFKAP_EndTxn, &errors_cnt)) {
3721
+ TEST_FAIL(
3722
+ "Broker error count should succeed for API %s"
3723
+ " on broker %" PRId32,
3724
+ rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), txn_coord);
3725
+ }
3726
+ /* Checks all the RD_KAFKAP_EndTxn responses have been consumed */
3727
+ TEST_ASSERT(errors_cnt == 0,
3728
+ "Expected error count 0 for API %s, found %zu",
3729
+ rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), errors_cnt);
3730
+
3731
+ if (rd_kafka_mock_broker_error_stack_cnt(
3732
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, &errors_cnt)) {
3733
+ TEST_FAIL(
3734
+ "Broker error count should succeed for API %s"
3735
+ " on broker %" PRId32,
3736
+ rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), txn_coord);
3737
+ }
3738
+ /* Checks none of the RD_KAFKAP_InitProducerId responses have been
3739
+ * consumed
3740
+ */
3741
+ TEST_ASSERT(errors_cnt == 1,
3742
+ "Expected error count 1 for API %s, found %zu",
3743
+ rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), errors_cnt);
3744
+
3745
+ /* All done */
3746
+ rd_kafka_destroy(rk);
3747
+
3748
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
3749
+
3750
+ SUB_TEST_PASS();
3751
+ }
3752
+
3753
+
3754
+ /**
3755
+ * @brief Test that the TxnOffsetCommit op doesn't retry without waiting
3756
+ * if the coordinator is found but not available, causing too frequent retries.
3757
+ */
3758
+ static void
3759
+ do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_bool_t times_out) {
3760
+ rd_kafka_t *rk;
3761
+ rd_kafka_mock_cluster_t *mcluster;
3762
+ rd_kafka_resp_err_t err;
3763
+ rd_kafka_topic_partition_list_t *offsets;
3764
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
3765
+ rd_kafka_error_t *error;
3766
+ int timeout;
3767
+
3768
+ SUB_TEST_QUICK("times_out=%s", RD_STR_ToF(times_out));
3769
+
3770
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
3771
+
3772
+ test_curr->ignore_dr_err = rd_true;
3773
+
3774
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
3775
+
3776
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
3777
+
3778
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
3779
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
3780
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
3781
+
3782
+ /* Wait for messages to be delivered */
3783
+ test_flush(rk, 5000);
3784
+
3785
+ /*
3786
+ * Fail TxnOffsetCommit with COORDINATOR_NOT_AVAILABLE
3787
+ * repeatedly.
3788
+ */
3789
+ rd_kafka_mock_push_request_errors(
3790
+ mcluster, RD_KAFKAP_TxnOffsetCommit, 4,
3791
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
3792
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
3793
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
3794
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE);
3795
+
3796
+ offsets = rd_kafka_topic_partition_list_new(1);
3797
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 1;
3798
+
3799
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
3800
+
3801
+ /* The retry delay is 500ms, with 4 retries it should take at least
3802
+ * 2000ms for this call to succeed. */
3803
+ timeout = times_out ? 500 : 4000;
3804
+ error = rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata,
3805
+ timeout);
3806
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
3807
+ rd_kafka_topic_partition_list_destroy(offsets);
3808
+
3809
+ if (times_out) {
3810
+ TEST_ASSERT(rd_kafka_error_code(error) ==
3811
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
3812
+ "expected %s, got: %s",
3813
+ rd_kafka_err2name(
3814
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE),
3815
+ rd_kafka_err2str(rd_kafka_error_code(error)));
3816
+ } else {
3817
+ TEST_ASSERT(rd_kafka_error_code(error) ==
3818
+ RD_KAFKA_RESP_ERR_NO_ERROR,
3819
+ "expected \"Success\", found: %s",
3820
+ rd_kafka_err2str(rd_kafka_error_code(error)));
3821
+ }
3822
+ rd_kafka_error_destroy(error);
3823
+
3824
+ /* All done */
3825
+ rd_kafka_destroy(rk);
3826
+
3827
+ SUB_TEST_PASS();
3828
+ }
3829
+
3830
+
3831
+ int main_0105_transactions_mock(int argc, char **argv) {
3832
+ TEST_SKIP_MOCK_CLUSTER(0);
3833
+
3834
+ do_test_txn_recoverable_errors();
3835
+
3836
+ do_test_txn_fatal_idempo_errors();
3837
+
3838
+ do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH);
3839
+ do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_PRODUCER_FENCED);
3840
+
3841
+ do_test_txn_req_cnt();
3842
+
3843
+ do_test_txn_requires_abort_errors();
3844
+
3845
+ do_test_txn_slow_reinit(rd_false);
3846
+ do_test_txn_slow_reinit(rd_true);
3847
+
3848
+ /* Just do a subset of tests in quick mode */
3849
+ if (test_quick)
3850
+ return 0;
3851
+
3852
+ do_test_txn_endtxn_errors();
3853
+
3854
+ do_test_txn_endtxn_infinite();
3855
+
3856
+ do_test_txn_endtxn_timeout();
3857
+
3858
+ do_test_txn_endtxn_timeout_inflight();
3859
+
3860
+ /* Bring down the coordinator */
3861
+ do_test_txn_broker_down_in_txn(rd_true);
3862
+
3863
+ /* Bring down partition leader */
3864
+ do_test_txn_broker_down_in_txn(rd_false);
3865
+
3866
+ do_test_txns_not_supported();
3867
+
3868
+ do_test_txns_send_offsets_concurrent_is_retried();
3869
+
3870
+ do_test_txns_send_offsets_non_eligible();
3871
+
3872
+ do_test_txn_coord_req_destroy();
3873
+
3874
+ do_test_txn_coord_req_multi_find();
3875
+
3876
+ do_test_txn_addparts_req_multi();
3877
+
3878
+ do_test_txns_no_timeout_crash();
3879
+
3880
+ do_test_txn_auth_failure(
3881
+ RD_KAFKAP_InitProducerId,
3882
+ RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED);
3883
+
3884
+ do_test_txn_auth_failure(
3885
+ RD_KAFKAP_FindCoordinator,
3886
+ RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED);
3887
+
3888
+ do_test_txn_flush_timeout();
3889
+
3890
+ do_test_unstable_offset_commit();
3891
+
3892
+ do_test_commit_after_msg_timeout();
3893
+
3894
+ do_test_txn_switch_coordinator();
3895
+
3896
+ do_test_txn_switch_coordinator_refresh();
3897
+
3898
+ do_test_out_of_order_seq();
3899
+
3900
+ do_test_topic_disappears_for_awhile();
3901
+
3902
+ do_test_disconnected_group_coord(rd_false);
3903
+
3904
+ do_test_disconnected_group_coord(rd_true);
3905
+
3906
+ do_test_txn_coordinator_null_not_fatal();
3907
+
3908
+ do_test_txn_resumable_calls_timeout(rd_true);
3909
+
3910
+ do_test_txn_resumable_calls_timeout(rd_false);
3911
+
3912
+ do_test_txn_resumable_calls_timeout_error(rd_true);
3913
+
3914
+ do_test_txn_resumable_calls_timeout_error(rd_false);
3915
+ do_test_txn_resumable_init();
3916
+
3917
+ do_test_txn_concurrent_operations(rd_true /*commit*/);
3918
+
3919
+ do_test_txn_concurrent_operations(rd_false /*abort*/);
3920
+
3921
+ do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH);
3922
+
3923
+ do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_PRODUCER_FENCED);
3924
+
3925
+ do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_true);
3926
+
3927
+ do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_false);
3928
+
3929
+ return 0;
3930
+ }