kreuzberg 4.0.0.rc1 → 4.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (342) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -8
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -534
  5. data/.rubocop.yml +538 -0
  6. data/Gemfile +8 -9
  7. data/Gemfile.lock +9 -109
  8. data/README.md +426 -421
  9. data/Rakefile +25 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -340
  12. data/ext/kreuzberg_rb/extconf.rb +45 -35
  13. data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
  14. data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
  15. data/ext/kreuzberg_rb/native/README.md +425 -425
  16. data/ext/kreuzberg_rb/native/build.rs +15 -17
  17. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  18. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  19. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  20. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  21. data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
  22. data/extconf.rb +28 -28
  23. data/kreuzberg.gemspec +148 -105
  24. data/lib/kreuzberg/api_proxy.rb +142 -142
  25. data/lib/kreuzberg/cache_api.rb +46 -45
  26. data/lib/kreuzberg/cli.rb +55 -55
  27. data/lib/kreuzberg/cli_proxy.rb +127 -127
  28. data/lib/kreuzberg/config.rb +691 -684
  29. data/lib/kreuzberg/error_context.rb +32 -0
  30. data/lib/kreuzberg/errors.rb +118 -50
  31. data/lib/kreuzberg/extraction_api.rb +85 -84
  32. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  33. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  34. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  35. data/lib/kreuzberg/result.rb +216 -216
  36. data/lib/kreuzberg/setup_lib_path.rb +80 -79
  37. data/lib/kreuzberg/validator_protocol.rb +89 -89
  38. data/lib/kreuzberg/version.rb +5 -5
  39. data/lib/kreuzberg.rb +103 -82
  40. data/sig/kreuzberg/internal.rbs +184 -184
  41. data/sig/kreuzberg.rbs +520 -468
  42. data/spec/binding/cache_spec.rb +227 -227
  43. data/spec/binding/cli_proxy_spec.rb +85 -87
  44. data/spec/binding/cli_spec.rb +55 -54
  45. data/spec/binding/config_spec.rb +345 -345
  46. data/spec/binding/config_validation_spec.rb +283 -283
  47. data/spec/binding/error_handling_spec.rb +213 -213
  48. data/spec/binding/errors_spec.rb +66 -66
  49. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  50. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  51. data/spec/binding/plugins/validator_spec.rb +274 -274
  52. data/spec/fixtures/config.toml +39 -39
  53. data/spec/fixtures/config.yaml +41 -42
  54. data/spec/fixtures/invalid_config.toml +4 -4
  55. data/spec/smoke/package_spec.rb +178 -178
  56. data/spec/spec_helper.rb +42 -42
  57. data/vendor/kreuzberg/Cargo.toml +204 -134
  58. data/vendor/kreuzberg/README.md +175 -175
  59. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
  60. data/vendor/kreuzberg/build.rs +474 -460
  61. data/vendor/kreuzberg/src/api/error.rs +81 -81
  62. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  63. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  64. data/vendor/kreuzberg/src/api/server.rs +353 -353
  65. data/vendor/kreuzberg/src/api/types.rs +170 -170
  66. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
  67. data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
  68. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
  69. data/vendor/kreuzberg/src/core/config.rs +1032 -1032
  70. data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
  71. data/vendor/kreuzberg/src/core/io.rs +329 -327
  72. data/vendor/kreuzberg/src/core/mime.rs +605 -615
  73. data/vendor/kreuzberg/src/core/mod.rs +45 -42
  74. data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
  75. data/vendor/kreuzberg/src/embeddings.rs +432 -323
  76. data/vendor/kreuzberg/src/error.rs +431 -431
  77. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  78. data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
  79. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  80. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  81. data/vendor/kreuzberg/src/extraction/html.rs +553 -553
  82. data/vendor/kreuzberg/src/extraction/image.rs +368 -368
  83. data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
  84. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
  85. data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
  86. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  87. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  88. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  89. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
  90. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
  91. data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
  92. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  93. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  94. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  95. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  96. data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
  97. data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
  98. data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
  99. data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
  100. data/vendor/kreuzberg/src/extractors/email.rs +143 -129
  101. data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
  102. data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
  103. data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
  104. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
  105. data/vendor/kreuzberg/src/extractors/html.rs +393 -410
  106. data/vendor/kreuzberg/src/extractors/image.rs +198 -195
  107. data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
  108. data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
  109. data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
  110. data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
  111. data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
  112. data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
  113. data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
  114. data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
  115. data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
  116. data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
  117. data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
  118. data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
  119. data/vendor/kreuzberg/src/extractors/security.rs +484 -0
  120. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
  121. data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
  122. data/vendor/kreuzberg/src/extractors/text.rs +260 -242
  123. data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
  124. data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
  125. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  126. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  127. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  128. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  129. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  130. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  131. data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
  132. data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
  133. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  134. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  135. data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
  136. data/vendor/kreuzberg/src/lib.rs +105 -102
  137. data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
  138. data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
  139. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  140. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  141. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  142. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  143. data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
  144. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  145. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  146. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
  147. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  148. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  149. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  150. data/vendor/kreuzberg/src/panic_context.rs +154 -0
  151. data/vendor/kreuzberg/src/pdf/error.rs +122 -122
  152. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  153. data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
  154. data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
  155. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  156. data/vendor/kreuzberg/src/pdf/table.rs +393 -420
  157. data/vendor/kreuzberg/src/pdf/text.rs +158 -161
  158. data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
  159. data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
  160. data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
  161. data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
  162. data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
  163. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  164. data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
  165. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  166. data/vendor/kreuzberg/src/text/mod.rs +19 -19
  167. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  168. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  169. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  170. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  171. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  172. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  173. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  174. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  175. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  176. data/vendor/kreuzberg/src/types.rs +903 -873
  177. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  178. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  179. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  180. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  181. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  182. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  183. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  184. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  185. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  186. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  187. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  188. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  189. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  190. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  191. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  192. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  193. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  194. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  195. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  196. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  198. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  199. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  200. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  201. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  202. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  203. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  204. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  205. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  206. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  207. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  208. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  209. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  210. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  211. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  212. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  213. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  214. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  215. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  216. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  217. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  218. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  219. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  220. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  221. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  222. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  223. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  224. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  225. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  226. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  227. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  228. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  229. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  230. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  231. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  232. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  233. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  234. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  235. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  236. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  237. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  238. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  239. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  240. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  241. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  242. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  243. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  244. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
  245. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  246. data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
  247. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
  248. data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
  249. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
  250. data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
  251. data/vendor/kreuzberg/tests/config_features.rs +598 -580
  252. data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
  253. data/vendor/kreuzberg/tests/core_integration.rs +510 -493
  254. data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
  255. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
  256. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
  257. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
  258. data/vendor/kreuzberg/tests/email_integration.rs +325 -325
  259. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
  260. data/vendor/kreuzberg/tests/error_handling.rs +393 -393
  261. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
  262. data/vendor/kreuzberg/tests/format_integration.rs +159 -159
  263. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  264. data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
  265. data/vendor/kreuzberg/tests/image_integration.rs +253 -253
  266. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
  267. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
  268. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
  269. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  270. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  271. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
  272. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
  273. data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
  274. data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
  275. data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
  276. data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
  277. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  278. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
  279. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
  280. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
  281. data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
  282. data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
  283. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
  284. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
  285. data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
  286. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  287. data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
  288. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
  289. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
  290. data/vendor/kreuzberg/tests/security_validation.rs +415 -404
  291. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  292. data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
  293. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
  294. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
  295. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  296. data/vendor/rb-sys/.cargo-ok +1 -0
  297. data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
  298. data/vendor/rb-sys/Cargo.lock +393 -0
  299. data/vendor/rb-sys/Cargo.toml +70 -0
  300. data/vendor/rb-sys/Cargo.toml.orig +57 -0
  301. data/vendor/rb-sys/LICENSE-APACHE +190 -0
  302. data/vendor/rb-sys/LICENSE-MIT +21 -0
  303. data/vendor/rb-sys/bin/release.sh +21 -0
  304. data/vendor/rb-sys/build/features.rs +108 -0
  305. data/vendor/rb-sys/build/main.rs +246 -0
  306. data/vendor/rb-sys/build/stable_api_config.rs +153 -0
  307. data/vendor/rb-sys/build/version.rs +48 -0
  308. data/vendor/rb-sys/readme.md +36 -0
  309. data/vendor/rb-sys/src/bindings.rs +21 -0
  310. data/vendor/rb-sys/src/hidden.rs +11 -0
  311. data/vendor/rb-sys/src/lib.rs +34 -0
  312. data/vendor/rb-sys/src/macros.rs +371 -0
  313. data/vendor/rb-sys/src/memory.rs +53 -0
  314. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
  315. data/vendor/rb-sys/src/special_consts.rs +31 -0
  316. data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
  317. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
  318. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
  319. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
  320. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
  321. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
  322. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
  323. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
  324. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
  325. data/vendor/rb-sys/src/stable_api.rs +261 -0
  326. data/vendor/rb-sys/src/symbol.rs +31 -0
  327. data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
  328. data/vendor/rb-sys/src/utils.rs +89 -0
  329. data/vendor/rb-sys/src/value_type.rs +7 -0
  330. metadata +90 -95
  331. data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
  332. data/spec/examples.txt +0 -104
  333. data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
  334. data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
  335. data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
  336. data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
  337. data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
  338. data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
  339. data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
  340. data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
  341. data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
  342. data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
@@ -1,542 +1,556 @@
1
- //! Batch processing orchestration tests.
2
- //!
3
- //! Validates efficient parallel processing at multiple levels:
4
- //! - Multiple documents in parallel
5
- //! - Multiple pages within PDFs
6
- //! - OCR across pages
7
- //! - File I/O optimization
8
- //! - Resource utilization (CPU cores)
9
-
10
- use kreuzberg::core::config::ExtractionConfig;
11
- use kreuzberg::core::extractor::{batch_extract_bytes, batch_extract_file};
12
- use std::time::{Duration, Instant};
13
-
14
- #[cfg(feature = "ocr")]
15
- use kreuzberg::core::config::OcrConfig;
16
-
17
- #[cfg(feature = "ocr")]
18
- use kreuzberg::core::extractor::extract_file_sync;
19
-
20
- mod helpers;
21
-
22
- /// Test that batch extraction processes documents in parallel.
23
- ///
24
- /// Validates:
25
- /// - Multiple documents process concurrently
26
- /// - Parallel processing is faster than sequential
27
- /// - Results maintain correct order
28
- #[tokio::test]
29
- async fn test_batch_documents_parallel_execution() {
30
- use helpers::get_test_file_path;
31
- use std::path::PathBuf;
32
-
33
- let config = ExtractionConfig::default();
34
-
35
- let test_files = vec![
36
- "text/contract.txt",
37
- "json/sample_document.json",
38
- "xml/simple_note.xml",
39
- "text/readme.md",
40
- ];
41
-
42
- let mut paths: Vec<PathBuf> = Vec::new();
43
- for _ in 0..5 {
44
- for file in &test_files {
45
- paths.push(get_test_file_path(file));
46
- }
47
- }
48
-
49
- let parallel_start = Instant::now();
50
- let results = batch_extract_file(paths.clone(), &config).await;
51
- let parallel_duration = parallel_start.elapsed();
52
-
53
- assert!(results.is_ok(), "Batch extraction should succeed");
54
- let results = results.unwrap();
55
- assert_eq!(results.len(), 20, "Should process all 20 files");
56
-
57
- for result in &results {
58
- assert!(
59
- !result.content.is_empty() || result.metadata.error.is_some(),
60
- "Each result should have content or error"
61
- );
62
- }
63
-
64
- assert!(
65
- parallel_duration < Duration::from_secs(5),
66
- "Batch processing 20 files should take <5s, took: {:?}",
67
- parallel_duration
68
- );
69
- }
70
-
71
- /// Test concurrency limiting in batch processing.
72
- ///
73
- /// Validates that batch extraction respects max_concurrent_extractions config.
74
- #[tokio::test]
75
- async fn test_batch_documents_concurrency_limiting() {
76
- use helpers::get_test_file_path;
77
-
78
- let config = ExtractionConfig {
79
- max_concurrent_extractions: Some(2),
80
- ..Default::default()
81
- };
82
-
83
- let paths = vec![
84
- get_test_file_path("text/contract.txt"),
85
- get_test_file_path("json/sample_document.json"),
86
- get_test_file_path("xml/simple_note.xml"),
87
- get_test_file_path("text/readme.md"),
88
- ];
89
-
90
- let results = batch_extract_file(paths, &config).await;
91
-
92
- assert!(results.is_ok());
93
- let results = results.unwrap();
94
- assert_eq!(results.len(), 4);
95
- }
96
-
97
- /// Test batch extraction with CPU-bound limit (default: num_cpus * 2).
98
- #[tokio::test]
99
- async fn test_batch_documents_default_concurrency() {
100
- use helpers::get_test_file_path;
101
-
102
- let config = ExtractionConfig::default();
103
-
104
- let mut paths = Vec::new();
105
- for _ in 0..13 {
106
- paths.push(get_test_file_path("text/contract.txt"));
107
- paths.push(get_test_file_path("json/sample_document.json"));
108
- paths.push(get_test_file_path("xml/simple_note.xml"));
109
- paths.push(get_test_file_path("text/readme.md"));
110
- }
111
- let paths = paths.into_iter().take(50).collect::<Vec<_>>();
112
-
113
- let start = Instant::now();
114
- let results = batch_extract_file(paths, &config).await;
115
- let duration = start.elapsed();
116
-
117
- assert!(results.is_ok());
118
- let results = results.unwrap();
119
- assert_eq!(results.len(), 50);
120
-
121
- println!("Processed 50 files in {:?}", duration);
122
- assert!(
123
- duration < Duration::from_secs(10),
124
- "50 files should process in <10s with parallelism, took: {:?}",
125
- duration
126
- );
127
- }
128
-
129
- /// Test that batch processing maintains result order.
130
- #[cfg(feature = "xml")]
131
- #[tokio::test]
132
- async fn test_batch_documents_preserves_order() {
133
- use helpers::get_test_file_path;
134
-
135
- let config = ExtractionConfig::default();
136
-
137
- let paths = vec![
138
- get_test_file_path("text/contract.txt"),
139
- get_test_file_path("json/sample_document.json"),
140
- get_test_file_path("xml/simple_note.xml"),
141
- ];
142
-
143
- let results = batch_extract_file(paths, &config).await.unwrap();
144
-
145
- assert_eq!(results.len(), 3, "Should have 3 results");
146
-
147
- assert!(!results[0].content.is_empty(), "First result should have content");
148
- assert!(!results[1].content.is_empty(), "Second result should have content");
149
- assert!(!results[2].content.is_empty(), "Third result should have content");
150
-
151
- assert!(
152
- results[0].content.contains("contract"),
153
- "First result should be from contract.txt, got: '{}'",
154
- results[0].content
155
- );
156
- assert!(
157
- results[1].content.contains("Sample") || results[1].content.contains("author"),
158
- "Second result should be from JSON document, got: '{}'",
159
- results[1].content
160
- );
161
- assert!(
162
- results[2].content.contains("Tove") || results[2].content.contains("note"),
163
- "Third result should be from XML note, got: '{}'",
164
- results[2].content
165
- );
166
- }
167
-
168
- /// Test that multi-page PDF extraction is efficient.
169
- ///
170
- /// Validates:
171
- /// - Multiple pages are processed
172
- /// - OCR is applied to all pages if needed
173
- /// - Content from all pages is combined
174
- #[cfg(feature = "pdf")]
175
- #[tokio::test]
176
- async fn test_multipage_pdf_extraction() {
177
- use helpers::{get_test_file_path, skip_if_missing};
178
-
179
- if skip_if_missing("pdfs/multi_page.pdf") {
180
- tracing::debug!("Skipping multi-page PDF test: test file not available");
181
- return;
182
- }
183
-
184
- let config = ExtractionConfig::default();
185
- let pdf_path = get_test_file_path("pdfs/multi_page.pdf");
186
-
187
- let start = Instant::now();
188
- let result = kreuzberg::core::extractor::extract_file(&pdf_path, None, &config).await;
189
- let duration = start.elapsed();
190
-
191
- assert!(result.is_ok(), "Multi-page PDF extraction should succeed");
192
- let extraction = result.unwrap();
193
-
194
- assert!(!extraction.content.is_empty(), "Should extract text from all pages");
195
- println!("Extracted multi-page PDF in {:?}", duration);
196
- }
197
-
198
- /// Test concurrent PDF extractions (multiple PDFs at once).
199
- #[cfg(feature = "pdf")]
200
- #[tokio::test]
201
- async fn test_concurrent_pdf_extractions() {
202
- use helpers::{get_test_file_path, skip_if_missing};
203
-
204
- if skip_if_missing("pdfs/simple.pdf") {
205
- tracing::debug!("Skipping concurrent PDF test: test file not available");
206
- return;
207
- }
208
-
209
- let config = ExtractionConfig::default();
210
-
211
- let mut paths = Vec::new();
212
- for _ in 0..10 {
213
- paths.push(get_test_file_path("pdfs/simple.pdf"));
214
- }
215
-
216
- let start = Instant::now();
217
- let results = batch_extract_file(paths, &config).await;
218
- let duration = start.elapsed();
219
-
220
- assert!(results.is_ok());
221
- let results = results.unwrap();
222
- assert_eq!(results.len(), 10);
223
-
224
- println!("Processed 10 PDFs in {:?}", duration);
225
- }
226
-
227
- /// Test OCR on multi-page scanned document.
228
- ///
229
- /// Validates:
230
- /// - All pages are OCR'd
231
- /// - Results are combined correctly
232
- /// - Processing is efficient
233
- #[cfg(feature = "ocr")]
234
- #[test]
235
- fn test_ocr_multipage_efficiency() {
236
- use helpers::{get_test_file_path, skip_if_missing};
237
-
238
- if skip_if_missing("images/ocr_image.jpg") {
239
- tracing::debug!("Skipping OCR multi-page test: test file not available");
240
- return;
241
- }
242
-
243
- let config = ExtractionConfig {
244
- ocr: Some(OcrConfig {
245
- backend: "tesseract".to_string(),
246
- language: "eng".to_string(),
247
- tesseract_config: None,
248
- }),
249
- force_ocr: false,
250
- use_cache: true,
251
- ..Default::default()
252
- };
253
-
254
- let file_path = get_test_file_path("images/ocr_image.jpg");
255
-
256
- let start = Instant::now();
257
- let result1 = extract_file_sync(&file_path, None, &config);
258
- let first_duration = start.elapsed();
259
-
260
- assert!(result1.is_ok(), "First OCR should succeed");
261
-
262
- let start = Instant::now();
263
- let result2 = extract_file_sync(&file_path, None, &config);
264
- let second_duration = start.elapsed();
265
-
266
- assert!(result2.is_ok(), "Second OCR should succeed");
267
-
268
- println!(
269
- "OCR timing: first={:?}, cached={:?}, speedup={:.1}x",
270
- first_duration,
271
- second_duration,
272
- first_duration.as_secs_f64() / second_duration.as_secs_f64().max(0.001)
273
- );
274
-
275
- assert!(
276
- second_duration < first_duration / 2,
277
- "Cached OCR should be at least 2x faster. First: {:?}, Second: {:?}",
278
- first_duration,
279
- second_duration
280
- );
281
- }
282
-
283
- /// Test parallel processing of byte arrays.
284
- ///
285
- /// Validates that batch_extract_bytes processes data in parallel.
286
- #[tokio::test]
287
- async fn test_batch_bytes_parallel_processing() {
288
- let config = ExtractionConfig::default();
289
-
290
- let contents: Vec<(Vec<u8>, &str)> = (0..30)
291
- .map(|i| {
292
- let content = format!("Test content number {}", i);
293
- (content.into_bytes(), "text/plain")
294
- })
295
- .collect();
296
-
297
- let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
298
-
299
- let start = Instant::now();
300
- let results = batch_extract_bytes(contents_ref, &config).await;
301
- let duration = start.elapsed();
302
-
303
- assert!(results.is_ok());
304
- let results = results.unwrap();
305
- assert_eq!(results.len(), 30);
306
-
307
- for (i, result) in results.iter().enumerate() {
308
- assert_eq!(result.content, format!("Test content number {}", i));
309
- }
310
-
311
- println!("Batch processed 30 byte arrays in {:?}", duration);
312
- }
313
-
314
- /// Test error handling in batch bytes processing.
315
- #[tokio::test]
316
- async fn test_batch_bytes_mixed_valid_invalid() {
317
- let config = ExtractionConfig::default();
318
-
319
- let contents = vec![
320
- (b"valid content 1".as_slice(), "text/plain"),
321
- (b"invalid content".as_slice(), "invalid/mime"),
322
- (b"valid content 2".as_slice(), "text/plain"),
323
- (b"more invalid".as_slice(), "bad/type"),
324
- (b"valid content 3".as_slice(), "text/plain"),
325
- ];
326
-
327
- let results = batch_extract_bytes(contents, &config).await;
328
-
329
- assert!(results.is_ok());
330
- let results = results.unwrap();
331
- assert_eq!(results.len(), 5);
332
-
333
- assert_eq!(results[0].content, "valid content 1");
334
- assert_eq!(results[2].content, "valid content 2");
335
- assert_eq!(results[4].content, "valid content 3");
336
-
337
- assert!(results[1].metadata.error.is_some());
338
- assert!(results[3].metadata.error.is_some());
339
- }
340
-
341
- /// Test that batch processing utilizes multiple CPU cores.
342
- ///
343
- /// Validates that parallel extraction actually runs in parallel,
344
- /// not just sequentially with fancy task management.
345
- #[tokio::test]
346
- async fn test_batch_utilizes_multiple_cores() {
347
- let config = ExtractionConfig {
348
- max_concurrent_extractions: Some(num_cpus::get()),
349
- ..Default::default()
350
- };
351
-
352
- let mut contents = Vec::new();
353
- for i in 0..20 {
354
- let json = format!(
355
- r#"{{"id": {}, "data": "{}", "nested": {{"value": "{}"}}}}"#,
356
- i,
357
- "x".repeat(100),
358
- "y".repeat(100)
359
- );
360
- contents.push((json.into_bytes(), "application/json"));
361
- }
362
-
363
- let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
364
-
365
- let start = Instant::now();
366
- let results = batch_extract_bytes(contents_ref, &config).await;
367
- let duration = start.elapsed();
368
-
369
- assert!(results.is_ok());
370
- let results = results.unwrap();
371
- assert_eq!(results.len(), 20);
372
-
373
- println!(
374
- "Processed 20 JSON documents in {:?} with {} cores",
375
- duration,
376
- num_cpus::get()
377
- );
378
-
379
- assert!(
380
- duration < Duration::from_secs(2),
381
- "Batch processing should leverage parallelism, took: {:?}",
382
- duration
383
- );
384
- }
385
-
386
- /// Test batch processing under memory pressure.
387
- ///
388
- /// Validates that semaphore prevents resource exhaustion.
389
- #[tokio::test]
390
- async fn test_batch_memory_pressure_handling() {
391
- let config = ExtractionConfig {
392
- max_concurrent_extractions: Some(4),
393
- ..Default::default()
394
- };
395
-
396
- let mut contents = Vec::new();
397
- for i in 0..50 {
398
- let json = format!(r#"{{"id": {}, "large_data": "{}"}}"#, i, "x".repeat(10000));
399
- contents.push((json.into_bytes(), "application/json"));
400
- }
401
-
402
- let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
403
-
404
- let start = Instant::now();
405
- let results = batch_extract_bytes(contents_ref, &config).await;
406
- let duration = start.elapsed();
407
-
408
- assert!(results.is_ok());
409
- let results = results.unwrap();
410
- assert_eq!(results.len(), 50);
411
-
412
- println!("Processed 50 large documents with concurrency limit in {:?}", duration);
413
-
414
- for result in &results {
415
- assert!(!result.content.is_empty());
416
- }
417
- }
418
-
419
- /// Test that batch processing scales with CPU count.
420
- #[tokio::test]
421
- async fn test_batch_scales_with_cpu_count() {
422
- let cpu_count = num_cpus::get();
423
-
424
- let contents: Vec<(Vec<u8>, &str)> = (0..30)
425
- .map(|i| (format!("Content {}", i).into_bytes(), "text/plain"))
426
- .collect();
427
-
428
- let config_1 = ExtractionConfig {
429
- max_concurrent_extractions: Some(1),
430
- ..Default::default()
431
- };
432
-
433
- let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
434
-
435
- let start = Instant::now();
436
- let _ = batch_extract_bytes(contents_ref.clone(), &config_1).await.unwrap();
437
- let duration_1 = start.elapsed();
438
-
439
- let config_full = ExtractionConfig {
440
- max_concurrent_extractions: Some(cpu_count),
441
- ..Default::default()
442
- };
443
-
444
- let start = Instant::now();
445
- let _ = batch_extract_bytes(contents_ref, &config_full).await.unwrap();
446
- let duration_full = start.elapsed();
447
-
448
- println!(
449
- "Concurrency=1: {:?}, Concurrency={}: {:?}, Speedup: {:.2}x",
450
- duration_1,
451
- cpu_count,
452
- duration_full,
453
- duration_1.as_secs_f64() / duration_full.as_secs_f64()
454
- );
455
-
456
- if cpu_count > 1 {
457
- let slowdown_ratio = duration_full.as_secs_f64() / duration_1.as_secs_f64();
458
- assert!(
459
- slowdown_ratio <= 5.0,
460
- "Parallel execution should not be excessively slower (got {:.2}x slowdown)",
461
- slowdown_ratio
462
- );
463
- }
464
- }
465
-
466
- /// End-to-end test: batch process mixed document types.
467
- #[cfg(feature = "xml")]
468
- #[tokio::test]
469
- async fn test_batch_mixed_document_types() {
470
- use helpers::get_test_file_path;
471
-
472
- let config = ExtractionConfig::default();
473
-
474
- let paths = vec![
475
- get_test_file_path("text/contract.txt"),
476
- get_test_file_path("json/sample_document.json"),
477
- get_test_file_path("xml/simple_note.xml"),
478
- get_test_file_path("text/readme.md"),
479
- ];
480
-
481
- let results = batch_extract_file(paths, &config).await;
482
-
483
- assert!(results.is_ok());
484
- let results = results.unwrap();
485
- assert_eq!(results.len(), 4);
486
-
487
- for (i, result) in results.iter().enumerate() {
488
- assert!(
489
- !result.content.is_empty(),
490
- "Document {} should have extracted content",
491
- i
492
- );
493
- }
494
-
495
- assert!(
496
- results[0].content.contains("contract"),
497
- "First result should be from contract.txt, got: '{}'",
498
- results[0].content
499
- );
500
- assert!(
501
- results[1].content.contains("Sample") || results[1].content.contains("author"),
502
- "Second result should be from JSON document, got: '{}'",
503
- results[1].content
504
- );
505
- assert!(
506
- results[2].content.contains("Tove") || results[2].content.contains("note"),
507
- "Third result should be from XML, got: '{}'",
508
- results[2].content
509
- );
510
- assert!(
511
- !results[3].content.is_empty(),
512
- "Fourth result should be from markdown, got: '{}'",
513
- results[3].content
514
- );
515
- }
516
-
517
- /// Test batch processing maintains high accuracy under load.
518
- #[tokio::test]
519
- async fn test_batch_accuracy_under_load() {
520
- let config = ExtractionConfig::default();
521
-
522
- let mut contents = Vec::new();
523
- for i in 0..100 {
524
- let content = format!("Document number {} with unique content", i);
525
- contents.push((content.into_bytes(), "text/plain"));
526
- }
527
-
528
- let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
529
-
530
- let results = batch_extract_bytes(contents_ref, &config).await.unwrap();
531
-
532
- assert_eq!(results.len(), 100);
533
-
534
- for (i, result) in results.iter().enumerate() {
535
- let expected = format!("Document number {} with unique content", i);
536
- assert_eq!(
537
- result.content, expected,
538
- "Document {} content mismatch - possible cross-contamination",
539
- i
540
- );
541
- }
542
- }
1
+ //! Batch processing orchestration tests.
2
+ //!
3
+ //! Validates efficient parallel processing at multiple levels:
4
+ //! - Multiple documents in parallel
5
+ //! - Multiple pages within PDFs
6
+ //! - OCR across pages
7
+ //! - File I/O optimization
8
+ //! - Resource utilization (CPU cores)
9
+
10
+ use kreuzberg::core::config::ExtractionConfig;
11
+ use kreuzberg::core::extractor::{batch_extract_bytes, batch_extract_file};
12
+ use std::time::{Duration, Instant};
13
+
14
+ #[cfg(feature = "ocr")]
15
+ use kreuzberg::core::config::OcrConfig;
16
+
17
+ #[cfg(feature = "ocr")]
18
+ use kreuzberg::core::extractor::extract_file_sync;
19
+
20
+ mod helpers;
21
+
22
+ fn trim_trailing_newlines(value: &str) -> &str {
23
+ value.trim_end_matches(['\n', '\r'])
24
+ }
25
+
26
+ fn assert_text_content(actual: &str, expected: &str) {
27
+ assert_eq!(
28
+ trim_trailing_newlines(actual),
29
+ expected,
30
+ "Content mismatch after trimming trailing newlines"
31
+ );
32
+ }
33
+
34
+ /// Test that batch extraction processes documents in parallel.
35
+ ///
36
+ /// Validates:
37
+ /// - Multiple documents process concurrently
38
+ /// - Parallel processing is faster than sequential
39
+ /// - Results maintain correct order
40
+ #[tokio::test]
41
+ async fn test_batch_documents_parallel_execution() {
42
+ use helpers::get_test_file_path;
43
+ use std::path::PathBuf;
44
+
45
+ let config = ExtractionConfig::default();
46
+
47
+ let test_files = vec![
48
+ "text/contract.txt",
49
+ "json/sample_document.json",
50
+ "xml/simple_note.xml",
51
+ "text/readme.md",
52
+ ];
53
+
54
+ let mut paths: Vec<PathBuf> = Vec::new();
55
+ for _ in 0..5 {
56
+ for file in &test_files {
57
+ paths.push(get_test_file_path(file));
58
+ }
59
+ }
60
+
61
+ let parallel_start = Instant::now();
62
+ let results = batch_extract_file(paths.clone(), &config).await;
63
+ let parallel_duration = parallel_start.elapsed();
64
+
65
+ assert!(results.is_ok(), "Batch extraction should succeed");
66
+ let results = results.unwrap();
67
+ assert_eq!(results.len(), 20, "Should process all 20 files");
68
+
69
+ for result in &results {
70
+ assert!(
71
+ !result.content.is_empty() || result.metadata.error.is_some(),
72
+ "Each result should have content or error"
73
+ );
74
+ }
75
+
76
+ assert!(
77
+ parallel_duration < Duration::from_secs(5),
78
+ "Batch processing 20 files should take <5s, took: {:?}",
79
+ parallel_duration
80
+ );
81
+ }
82
+
83
+ /// Test concurrency limiting in batch processing.
84
+ ///
85
+ /// Validates that batch extraction respects max_concurrent_extractions config.
86
+ #[tokio::test]
87
+ async fn test_batch_documents_concurrency_limiting() {
88
+ use helpers::get_test_file_path;
89
+
90
+ let config = ExtractionConfig {
91
+ max_concurrent_extractions: Some(2),
92
+ ..Default::default()
93
+ };
94
+
95
+ let paths = vec![
96
+ get_test_file_path("text/contract.txt"),
97
+ get_test_file_path("json/sample_document.json"),
98
+ get_test_file_path("xml/simple_note.xml"),
99
+ get_test_file_path("text/readme.md"),
100
+ ];
101
+
102
+ let results = batch_extract_file(paths, &config).await;
103
+
104
+ assert!(results.is_ok());
105
+ let results = results.unwrap();
106
+ assert_eq!(results.len(), 4);
107
+ }
108
+
109
+ /// Test batch extraction with CPU-bound limit (default: num_cpus * 2).
110
+ #[tokio::test]
111
+ async fn test_batch_documents_default_concurrency() {
112
+ use helpers::get_test_file_path;
113
+
114
+ let config = ExtractionConfig::default();
115
+
116
+ let mut paths = Vec::new();
117
+ for _ in 0..13 {
118
+ paths.push(get_test_file_path("text/contract.txt"));
119
+ paths.push(get_test_file_path("json/sample_document.json"));
120
+ paths.push(get_test_file_path("xml/simple_note.xml"));
121
+ paths.push(get_test_file_path("text/readme.md"));
122
+ }
123
+ let paths = paths.into_iter().take(50).collect::<Vec<_>>();
124
+
125
+ let start = Instant::now();
126
+ let results = batch_extract_file(paths, &config).await;
127
+ let duration = start.elapsed();
128
+
129
+ assert!(results.is_ok());
130
+ let results = results.unwrap();
131
+ assert_eq!(results.len(), 50);
132
+
133
+ println!("Processed 50 files in {:?}", duration);
134
+ assert!(
135
+ duration < Duration::from_secs(10),
136
+ "50 files should process in <10s with parallelism, took: {:?}",
137
+ duration
138
+ );
139
+ }
140
+
141
+ /// Test that batch processing maintains result order.
142
+ #[cfg(feature = "xml")]
143
+ #[tokio::test]
144
+ async fn test_batch_documents_preserves_order() {
145
+ use helpers::get_test_file_path;
146
+
147
+ let config = ExtractionConfig::default();
148
+
149
+ let paths = vec![
150
+ get_test_file_path("text/contract.txt"),
151
+ get_test_file_path("json/sample_document.json"),
152
+ get_test_file_path("xml/simple_note.xml"),
153
+ ];
154
+
155
+ let results = batch_extract_file(paths, &config).await.unwrap();
156
+
157
+ assert_eq!(results.len(), 3, "Should have 3 results");
158
+
159
+ assert!(!results[0].content.is_empty(), "First result should have content");
160
+ assert!(!results[1].content.is_empty(), "Second result should have content");
161
+ assert!(!results[2].content.is_empty(), "Third result should have content");
162
+
163
+ assert!(
164
+ results[0].content.contains("contract"),
165
+ "First result should be from contract.txt, got: '{}'",
166
+ results[0].content
167
+ );
168
+ assert!(
169
+ results[1].content.contains("Sample") || results[1].content.contains("author"),
170
+ "Second result should be from JSON document, got: '{}'",
171
+ results[1].content
172
+ );
173
+ assert!(
174
+ results[2].content.contains("Tove") || results[2].content.contains("note"),
175
+ "Third result should be from XML note, got: '{}'",
176
+ results[2].content
177
+ );
178
+ }
179
+
180
+ /// Test that multi-page PDF extraction is efficient.
181
+ ///
182
+ /// Validates:
183
+ /// - Multiple pages are processed
184
+ /// - OCR is applied to all pages if needed
185
+ /// - Content from all pages is combined
186
+ #[cfg(feature = "pdf")]
187
+ #[tokio::test]
188
+ async fn test_multipage_pdf_extraction() {
189
+ use helpers::{get_test_file_path, skip_if_missing};
190
+
191
+ if skip_if_missing("pdfs/multi_page.pdf") {
192
+ tracing::debug!("Skipping multi-page PDF test: test file not available");
193
+ return;
194
+ }
195
+
196
+ let config = ExtractionConfig::default();
197
+ let pdf_path = get_test_file_path("pdfs/multi_page.pdf");
198
+
199
+ let start = Instant::now();
200
+ let result = kreuzberg::core::extractor::extract_file(&pdf_path, None, &config).await;
201
+ let duration = start.elapsed();
202
+
203
+ assert!(result.is_ok(), "Multi-page PDF extraction should succeed");
204
+ let extraction = result.unwrap();
205
+
206
+ assert!(!extraction.content.is_empty(), "Should extract text from all pages");
207
+ println!("Extracted multi-page PDF in {:?}", duration);
208
+ }
209
+
210
+ /// Test concurrent PDF extractions (multiple PDFs at once).
211
+ #[cfg(feature = "pdf")]
212
+ #[tokio::test]
213
+ async fn test_concurrent_pdf_extractions() {
214
+ use helpers::{get_test_file_path, skip_if_missing};
215
+
216
+ if skip_if_missing("pdfs/simple.pdf") {
217
+ tracing::debug!("Skipping concurrent PDF test: test file not available");
218
+ return;
219
+ }
220
+
221
+ let config = ExtractionConfig::default();
222
+
223
+ let mut paths = Vec::new();
224
+ for _ in 0..10 {
225
+ paths.push(get_test_file_path("pdfs/simple.pdf"));
226
+ }
227
+
228
+ let start = Instant::now();
229
+ let results = batch_extract_file(paths, &config).await;
230
+ let duration = start.elapsed();
231
+
232
+ assert!(results.is_ok());
233
+ let results = results.unwrap();
234
+ assert_eq!(results.len(), 10);
235
+
236
+ println!("Processed 10 PDFs in {:?}", duration);
237
+ }
238
+
239
+ /// Test OCR on multi-page scanned document.
240
+ ///
241
+ /// Validates:
242
+ /// - All pages are OCR'd
243
+ /// - Results are combined correctly
244
+ /// - Processing is efficient
245
+ #[cfg(feature = "ocr")]
246
+ #[test]
247
+ fn test_ocr_multipage_efficiency() {
248
+ use helpers::{get_test_file_path, skip_if_missing};
249
+
250
+ if skip_if_missing("images/ocr_image.jpg") {
251
+ tracing::debug!("Skipping OCR multi-page test: test file not available");
252
+ return;
253
+ }
254
+
255
+ let config = ExtractionConfig {
256
+ ocr: Some(OcrConfig {
257
+ backend: "tesseract".to_string(),
258
+ language: "eng".to_string(),
259
+ tesseract_config: None,
260
+ }),
261
+ force_ocr: false,
262
+ use_cache: true,
263
+ ..Default::default()
264
+ };
265
+
266
+ let file_path = get_test_file_path("images/ocr_image.jpg");
267
+
268
+ let start = Instant::now();
269
+ let result1 = extract_file_sync(&file_path, None, &config);
270
+ let first_duration = start.elapsed();
271
+
272
+ assert!(result1.is_ok(), "First OCR should succeed");
273
+
274
+ let start = Instant::now();
275
+ let result2 = extract_file_sync(&file_path, None, &config);
276
+ let second_duration = start.elapsed();
277
+
278
+ assert!(result2.is_ok(), "Second OCR should succeed");
279
+
280
+ println!(
281
+ "OCR timing: first={:?}, cached={:?}, speedup={:.1}x",
282
+ first_duration,
283
+ second_duration,
284
+ first_duration.as_secs_f64() / second_duration.as_secs_f64().max(0.001)
285
+ );
286
+
287
+ assert!(
288
+ second_duration < first_duration / 2,
289
+ "Cached OCR should be at least 2x faster. First: {:?}, Second: {:?}",
290
+ first_duration,
291
+ second_duration
292
+ );
293
+ }
294
+
295
+ /// Test parallel processing of byte arrays.
296
+ ///
297
+ /// Validates that batch_extract_bytes processes data in parallel.
298
+ #[tokio::test]
299
+ async fn test_batch_bytes_parallel_processing() {
300
+ let config = ExtractionConfig::default();
301
+
302
+ let contents: Vec<(Vec<u8>, &str)> = (0..30)
303
+ .map(|i| {
304
+ let content = format!("Test content number {}", i);
305
+ (content.into_bytes(), "text/plain")
306
+ })
307
+ .collect();
308
+
309
+ let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
310
+
311
+ let start = Instant::now();
312
+ let results = batch_extract_bytes(contents_ref, &config).await;
313
+ let duration = start.elapsed();
314
+
315
+ assert!(results.is_ok());
316
+ let results = results.unwrap();
317
+ assert_eq!(results.len(), 30);
318
+
319
+ for (i, result) in results.iter().enumerate() {
320
+ let expected = format!("Test content number {}", i);
321
+ assert_text_content(&result.content, &expected);
322
+ }
323
+
324
+ println!("Batch processed 30 byte arrays in {:?}", duration);
325
+ }
326
+
327
+ /// Test error handling in batch bytes processing.
328
+ #[tokio::test]
329
+ async fn test_batch_bytes_mixed_valid_invalid() {
330
+ let config = ExtractionConfig::default();
331
+
332
+ let contents = vec![
333
+ (b"valid content 1".as_slice(), "text/plain"),
334
+ (b"invalid content".as_slice(), "invalid/mime"),
335
+ (b"valid content 2".as_slice(), "text/plain"),
336
+ (b"more invalid".as_slice(), "bad/type"),
337
+ (b"valid content 3".as_slice(), "text/plain"),
338
+ ];
339
+
340
+ let results = batch_extract_bytes(contents, &config).await;
341
+
342
+ assert!(results.is_ok());
343
+ let results = results.unwrap();
344
+ assert_eq!(results.len(), 5);
345
+
346
+ assert_text_content(&results[0].content, "valid content 1");
347
+ assert_text_content(&results[2].content, "valid content 2");
348
+ assert_text_content(&results[4].content, "valid content 3");
349
+
350
+ assert!(results[1].metadata.error.is_some());
351
+ assert!(results[3].metadata.error.is_some());
352
+ }
353
+
354
+ /// Test that batch processing utilizes multiple CPU cores.
355
+ ///
356
+ /// Validates that parallel extraction actually runs in parallel,
357
+ /// not just sequentially with fancy task management.
358
+ #[tokio::test]
359
+ async fn test_batch_utilizes_multiple_cores() {
360
+ let config = ExtractionConfig {
361
+ max_concurrent_extractions: Some(num_cpus::get()),
362
+ ..Default::default()
363
+ };
364
+
365
+ let mut contents = Vec::new();
366
+ for i in 0..20 {
367
+ let json = format!(
368
+ r#"{{"id": {}, "data": "{}", "nested": {{"value": "{}"}}}}"#,
369
+ i,
370
+ "x".repeat(100),
371
+ "y".repeat(100)
372
+ );
373
+ contents.push((json.into_bytes(), "application/json"));
374
+ }
375
+
376
+ let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
377
+
378
+ let start = Instant::now();
379
+ let results = batch_extract_bytes(contents_ref, &config).await;
380
+ let duration = start.elapsed();
381
+
382
+ assert!(results.is_ok());
383
+ let results = results.unwrap();
384
+ assert_eq!(results.len(), 20);
385
+
386
+ println!(
387
+ "Processed 20 JSON documents in {:?} with {} cores",
388
+ duration,
389
+ num_cpus::get()
390
+ );
391
+
392
+ assert!(
393
+ duration < Duration::from_secs(2),
394
+ "Batch processing should leverage parallelism, took: {:?}",
395
+ duration
396
+ );
397
+ }
398
+
399
+ /// Test batch processing under memory pressure.
400
+ ///
401
+ /// Validates that semaphore prevents resource exhaustion.
402
+ #[tokio::test]
403
+ async fn test_batch_memory_pressure_handling() {
404
+ let config = ExtractionConfig {
405
+ max_concurrent_extractions: Some(4),
406
+ ..Default::default()
407
+ };
408
+
409
+ let mut contents = Vec::new();
410
+ for i in 0..50 {
411
+ let json = format!(r#"{{"id": {}, "large_data": "{}"}}"#, i, "x".repeat(10000));
412
+ contents.push((json.into_bytes(), "application/json"));
413
+ }
414
+
415
+ let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
416
+
417
+ let start = Instant::now();
418
+ let results = batch_extract_bytes(contents_ref, &config).await;
419
+ let duration = start.elapsed();
420
+
421
+ assert!(results.is_ok());
422
+ let results = results.unwrap();
423
+ assert_eq!(results.len(), 50);
424
+
425
+ println!("Processed 50 large documents with concurrency limit in {:?}", duration);
426
+
427
+ for result in &results {
428
+ assert!(!result.content.is_empty());
429
+ }
430
+ }
431
+
432
+ /// Test that batch processing scales with CPU count.
433
+ #[tokio::test]
434
+ async fn test_batch_scales_with_cpu_count() {
435
+ let cpu_count = num_cpus::get();
436
+
437
+ let contents: Vec<(Vec<u8>, &str)> = (0..30)
438
+ .map(|i| (format!("Content {}", i).into_bytes(), "text/plain"))
439
+ .collect();
440
+
441
+ let config_1 = ExtractionConfig {
442
+ max_concurrent_extractions: Some(1),
443
+ ..Default::default()
444
+ };
445
+
446
+ let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
447
+
448
+ let start = Instant::now();
449
+ let _ = batch_extract_bytes(contents_ref.clone(), &config_1).await.unwrap();
450
+ let duration_1 = start.elapsed();
451
+
452
+ let config_full = ExtractionConfig {
453
+ max_concurrent_extractions: Some(cpu_count),
454
+ ..Default::default()
455
+ };
456
+
457
+ let start = Instant::now();
458
+ let _ = batch_extract_bytes(contents_ref, &config_full).await.unwrap();
459
+ let duration_full = start.elapsed();
460
+
461
+ println!(
462
+ "Concurrency=1: {:?}, Concurrency={}: {:?}, Speedup: {:.2}x",
463
+ duration_1,
464
+ cpu_count,
465
+ duration_full,
466
+ duration_1.as_secs_f64() / duration_full.as_secs_f64()
467
+ );
468
+
469
+ if cpu_count > 1 {
470
+ let slowdown_ratio = duration_full.as_secs_f64() / duration_1.as_secs_f64();
471
+ assert!(
472
+ slowdown_ratio <= 5.0,
473
+ "Parallel execution should not be excessively slower (got {:.2}x slowdown)",
474
+ slowdown_ratio
475
+ );
476
+ }
477
+ }
478
+
479
+ /// End-to-end test: batch process mixed document types.
480
+ #[cfg(feature = "xml")]
481
+ #[tokio::test]
482
+ async fn test_batch_mixed_document_types() {
483
+ use helpers::get_test_file_path;
484
+
485
+ let config = ExtractionConfig::default();
486
+
487
+ let paths = vec![
488
+ get_test_file_path("text/contract.txt"),
489
+ get_test_file_path("json/sample_document.json"),
490
+ get_test_file_path("xml/simple_note.xml"),
491
+ get_test_file_path("text/readme.md"),
492
+ ];
493
+
494
+ let results = batch_extract_file(paths, &config).await;
495
+
496
+ assert!(results.is_ok());
497
+ let results = results.unwrap();
498
+ assert_eq!(results.len(), 4);
499
+
500
+ for (i, result) in results.iter().enumerate() {
501
+ assert!(
502
+ !result.content.is_empty(),
503
+ "Document {} should have extracted content",
504
+ i
505
+ );
506
+ }
507
+
508
+ assert!(
509
+ results[0].content.contains("contract"),
510
+ "First result should be from contract.txt, got: '{}'",
511
+ results[0].content
512
+ );
513
+ assert!(
514
+ results[1].content.contains("Sample") || results[1].content.contains("author"),
515
+ "Second result should be from JSON document, got: '{}'",
516
+ results[1].content
517
+ );
518
+ assert!(
519
+ results[2].content.contains("Tove") || results[2].content.contains("note"),
520
+ "Third result should be from XML, got: '{}'",
521
+ results[2].content
522
+ );
523
+ assert!(
524
+ !results[3].content.is_empty(),
525
+ "Fourth result should be from markdown, got: '{}'",
526
+ results[3].content
527
+ );
528
+ }
529
+
530
+ /// Test batch processing maintains high accuracy under load.
531
+ #[tokio::test]
532
+ async fn test_batch_accuracy_under_load() {
533
+ let config = ExtractionConfig::default();
534
+
535
+ let mut contents = Vec::new();
536
+ for i in 0..100 {
537
+ let content = format!("Document number {} with unique content", i);
538
+ contents.push((content.into_bytes(), "text/plain"));
539
+ }
540
+
541
+ let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
542
+
543
+ let results = batch_extract_bytes(contents_ref, &config).await.unwrap();
544
+
545
+ assert_eq!(results.len(), 100);
546
+
547
+ for (i, result) in results.iter().enumerate() {
548
+ let expected = format!("Document number {} with unique content", i);
549
+ assert_eq!(
550
+ trim_trailing_newlines(&result.content),
551
+ expected,
552
+ "Document {} content mismatch - possible cross-contamination",
553
+ i
554
+ );
555
+ }
556
+ }