kreuzberg 4.0.0.rc2 → 4.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (446) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -14
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -1
  5. data/.rubocop.yml +543 -538
  6. data/Gemfile +8 -8
  7. data/Gemfile.lock +194 -6
  8. data/README.md +391 -426
  9. data/Rakefile +34 -25
  10. data/Steepfile +51 -47
  11. data/examples/async_patterns.rb +283 -341
  12. data/ext/kreuzberg_rb/extconf.rb +65 -45
  13. data/ext/kreuzberg_rb/native/.cargo/config.toml +23 -0
  14. data/ext/kreuzberg_rb/native/Cargo.lock +7619 -6535
  15. data/ext/kreuzberg_rb/native/Cargo.toml +75 -44
  16. data/ext/kreuzberg_rb/native/README.md +425 -425
  17. data/ext/kreuzberg_rb/native/build.rs +15 -15
  18. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  19. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  20. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  21. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  22. data/ext/kreuzberg_rb/native/src/lib.rs +3802 -2998
  23. data/extconf.rb +60 -28
  24. data/kreuzberg.gemspec +199 -148
  25. data/lib/kreuzberg/api_proxy.rb +126 -142
  26. data/lib/kreuzberg/cache_api.rb +67 -46
  27. data/lib/kreuzberg/cli.rb +47 -55
  28. data/lib/kreuzberg/cli_proxy.rb +117 -127
  29. data/lib/kreuzberg/config.rb +936 -691
  30. data/lib/kreuzberg/error_context.rb +136 -32
  31. data/lib/kreuzberg/errors.rb +116 -118
  32. data/lib/kreuzberg/extraction_api.rb +313 -85
  33. data/lib/kreuzberg/mcp_proxy.rb +177 -186
  34. data/lib/kreuzberg/ocr_backend_protocol.rb +40 -113
  35. data/lib/kreuzberg/post_processor_protocol.rb +15 -86
  36. data/lib/kreuzberg/result.rb +334 -216
  37. data/lib/kreuzberg/setup_lib_path.rb +99 -80
  38. data/lib/kreuzberg/types.rb +170 -0
  39. data/lib/kreuzberg/validator_protocol.rb +16 -89
  40. data/lib/kreuzberg/version.rb +5 -5
  41. data/lib/kreuzberg.rb +96 -103
  42. data/lib/libpdfium.so +0 -0
  43. data/sig/kreuzberg/internal.rbs +184 -184
  44. data/sig/kreuzberg.rbs +561 -520
  45. data/spec/binding/async_operations_spec.rb +473 -0
  46. data/spec/binding/batch_operations_spec.rb +595 -0
  47. data/spec/binding/batch_spec.rb +359 -0
  48. data/spec/binding/cache_spec.rb +227 -227
  49. data/spec/binding/cli_proxy_spec.rb +85 -85
  50. data/spec/binding/cli_spec.rb +55 -55
  51. data/spec/binding/config_result_spec.rb +377 -0
  52. data/spec/binding/config_spec.rb +419 -345
  53. data/spec/binding/config_validation_spec.rb +377 -283
  54. data/spec/binding/embeddings_spec.rb +816 -0
  55. data/spec/binding/error_handling_spec.rb +399 -213
  56. data/spec/binding/error_recovery_spec.rb +488 -0
  57. data/spec/binding/errors_spec.rb +66 -66
  58. data/spec/binding/font_config_spec.rb +220 -0
  59. data/spec/binding/images_spec.rb +738 -0
  60. data/spec/binding/keywords_extraction_spec.rb +600 -0
  61. data/spec/binding/metadata_types_spec.rb +1228 -0
  62. data/spec/binding/pages_extraction_spec.rb +471 -0
  63. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  64. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  65. data/spec/binding/plugins/validator_spec.rb +273 -274
  66. data/spec/binding/tables_spec.rb +641 -0
  67. data/spec/fixtures/config.toml +38 -39
  68. data/spec/fixtures/config.yaml +41 -41
  69. data/spec/fixtures/invalid_config.toml +3 -4
  70. data/spec/smoke/package_spec.rb +177 -178
  71. data/spec/spec_helper.rb +40 -42
  72. data/spec/unit/config/chunking_config_spec.rb +213 -0
  73. data/spec/unit/config/embedding_config_spec.rb +343 -0
  74. data/spec/unit/config/extraction_config_spec.rb +438 -0
  75. data/spec/unit/config/font_config_spec.rb +285 -0
  76. data/spec/unit/config/hierarchy_config_spec.rb +314 -0
  77. data/spec/unit/config/image_extraction_config_spec.rb +209 -0
  78. data/spec/unit/config/image_preprocessing_config_spec.rb +249 -0
  79. data/spec/unit/config/keyword_config_spec.rb +229 -0
  80. data/spec/unit/config/language_detection_config_spec.rb +258 -0
  81. data/spec/unit/config/ocr_config_spec.rb +171 -0
  82. data/spec/unit/config/page_config_spec.rb +221 -0
  83. data/spec/unit/config/pdf_config_spec.rb +267 -0
  84. data/spec/unit/config/postprocessor_config_spec.rb +290 -0
  85. data/spec/unit/config/tesseract_config_spec.rb +181 -0
  86. data/spec/unit/config/token_reduction_config_spec.rb +251 -0
  87. data/test/metadata_types_test.rb +959 -0
  88. data/vendor/Cargo.toml +61 -0
  89. data/vendor/kreuzberg/Cargo.toml +259 -204
  90. data/vendor/kreuzberg/README.md +263 -175
  91. data/vendor/kreuzberg/build.rs +782 -474
  92. data/vendor/kreuzberg/examples/bench_fixes.rs +71 -0
  93. data/vendor/kreuzberg/examples/test_pdfium_fork.rs +62 -0
  94. data/vendor/kreuzberg/src/api/error.rs +81 -81
  95. data/vendor/kreuzberg/src/api/handlers.rs +320 -199
  96. data/vendor/kreuzberg/src/api/mod.rs +94 -79
  97. data/vendor/kreuzberg/src/api/server.rs +518 -353
  98. data/vendor/kreuzberg/src/api/types.rs +206 -170
  99. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
  100. data/vendor/kreuzberg/src/chunking/mod.rs +2303 -677
  101. data/vendor/kreuzberg/src/chunking/processor.rs +219 -0
  102. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
  103. data/vendor/kreuzberg/src/core/batch_optimizations.rs +385 -0
  104. data/vendor/kreuzberg/src/core/config.rs +1914 -1032
  105. data/vendor/kreuzberg/src/core/config_validation.rs +949 -0
  106. data/vendor/kreuzberg/src/core/extractor.rs +1200 -1024
  107. data/vendor/kreuzberg/src/core/formats.rs +235 -0
  108. data/vendor/kreuzberg/src/core/io.rs +329 -329
  109. data/vendor/kreuzberg/src/core/mime.rs +605 -605
  110. data/vendor/kreuzberg/src/core/mod.rs +61 -45
  111. data/vendor/kreuzberg/src/core/pipeline.rs +1223 -984
  112. data/vendor/kreuzberg/src/core/server_config.rs +1220 -0
  113. data/vendor/kreuzberg/src/embeddings.rs +471 -432
  114. data/vendor/kreuzberg/src/error.rs +431 -431
  115. data/vendor/kreuzberg/src/extraction/archive.rs +959 -954
  116. data/vendor/kreuzberg/src/extraction/capacity.rs +263 -0
  117. data/vendor/kreuzberg/src/extraction/docx.rs +404 -40
  118. data/vendor/kreuzberg/src/extraction/email.rs +855 -854
  119. data/vendor/kreuzberg/src/extraction/excel.rs +697 -688
  120. data/vendor/kreuzberg/src/extraction/html.rs +1830 -553
  121. data/vendor/kreuzberg/src/extraction/image.rs +492 -368
  122. data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -563
  123. data/vendor/kreuzberg/src/extraction/markdown.rs +216 -213
  124. data/vendor/kreuzberg/src/extraction/mod.rs +93 -81
  125. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  126. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  127. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  128. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
  129. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -287
  130. data/vendor/kreuzberg/src/extraction/pptx.rs +3102 -3000
  131. data/vendor/kreuzberg/src/extraction/structured.rs +491 -490
  132. data/vendor/kreuzberg/src/extraction/table.rs +329 -328
  133. data/vendor/kreuzberg/src/extraction/text.rs +277 -269
  134. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  135. data/vendor/kreuzberg/src/extractors/archive.rs +447 -446
  136. data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -469
  137. data/vendor/kreuzberg/src/extractors/docbook.rs +504 -502
  138. data/vendor/kreuzberg/src/extractors/docx.rs +400 -367
  139. data/vendor/kreuzberg/src/extractors/email.rs +157 -143
  140. data/vendor/kreuzberg/src/extractors/epub.rs +696 -707
  141. data/vendor/kreuzberg/src/extractors/excel.rs +385 -343
  142. data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -491
  143. data/vendor/kreuzberg/src/extractors/html.rs +419 -393
  144. data/vendor/kreuzberg/src/extractors/image.rs +219 -198
  145. data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1051
  146. data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -367
  147. data/vendor/kreuzberg/src/extractors/latex.rs +653 -652
  148. data/vendor/kreuzberg/src/extractors/markdown.rs +701 -700
  149. data/vendor/kreuzberg/src/extractors/mod.rs +429 -365
  150. data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
  151. data/vendor/kreuzberg/src/extractors/opml.rs +635 -634
  152. data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -528
  153. data/vendor/kreuzberg/src/extractors/pdf.rs +761 -493
  154. data/vendor/kreuzberg/src/extractors/pptx.rs +279 -248
  155. data/vendor/kreuzberg/src/extractors/rst.rs +577 -576
  156. data/vendor/kreuzberg/src/extractors/rtf.rs +809 -810
  157. data/vendor/kreuzberg/src/extractors/security.rs +484 -484
  158. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
  159. data/vendor/kreuzberg/src/extractors/structured.rs +142 -140
  160. data/vendor/kreuzberg/src/extractors/text.rs +265 -260
  161. data/vendor/kreuzberg/src/extractors/typst.rs +651 -650
  162. data/vendor/kreuzberg/src/extractors/xml.rs +147 -135
  163. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  164. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  165. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  166. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  167. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  168. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  169. data/vendor/kreuzberg/src/keywords/processor.rs +275 -267
  170. data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
  171. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  172. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  173. data/vendor/kreuzberg/src/language_detection/mod.rs +985 -942
  174. data/vendor/kreuzberg/src/language_detection/processor.rs +218 -0
  175. data/vendor/kreuzberg/src/lib.rs +114 -105
  176. data/vendor/kreuzberg/src/mcp/mod.rs +35 -32
  177. data/vendor/kreuzberg/src/mcp/server.rs +2090 -1968
  178. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  179. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  180. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  181. data/vendor/kreuzberg/src/ocr/language_registry.rs +520 -0
  182. data/vendor/kreuzberg/src/ocr/mod.rs +60 -58
  183. data/vendor/kreuzberg/src/ocr/processor.rs +858 -863
  184. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  185. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  186. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +456 -450
  187. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  188. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  189. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  190. data/vendor/kreuzberg/src/panic_context.rs +154 -154
  191. data/vendor/kreuzberg/src/pdf/bindings.rs +306 -0
  192. data/vendor/kreuzberg/src/pdf/bundled.rs +408 -0
  193. data/vendor/kreuzberg/src/pdf/error.rs +214 -122
  194. data/vendor/kreuzberg/src/pdf/fonts.rs +358 -0
  195. data/vendor/kreuzberg/src/pdf/hierarchy.rs +903 -0
  196. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  197. data/vendor/kreuzberg/src/pdf/metadata.rs +509 -346
  198. data/vendor/kreuzberg/src/pdf/mod.rs +81 -50
  199. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  200. data/vendor/kreuzberg/src/pdf/table.rs +417 -393
  201. data/vendor/kreuzberg/src/pdf/text.rs +553 -158
  202. data/vendor/kreuzberg/src/plugins/extractor.rs +1042 -1013
  203. data/vendor/kreuzberg/src/plugins/mod.rs +212 -209
  204. data/vendor/kreuzberg/src/plugins/ocr.rs +637 -620
  205. data/vendor/kreuzberg/src/plugins/processor.rs +650 -642
  206. data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1337
  207. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  208. data/vendor/kreuzberg/src/plugins/validator.rs +967 -956
  209. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  210. data/vendor/kreuzberg/src/text/mod.rs +27 -19
  211. data/vendor/kreuzberg/src/text/quality.rs +710 -697
  212. data/vendor/kreuzberg/src/text/quality_processor.rs +231 -0
  213. data/vendor/kreuzberg/src/text/string_utils.rs +229 -217
  214. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  215. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  216. data/vendor/kreuzberg/src/text/token_reduction/core.rs +832 -796
  217. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +923 -902
  218. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  219. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  220. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +148 -147
  221. data/vendor/kreuzberg/src/text/utf8_validation.rs +193 -0
  222. data/vendor/kreuzberg/src/types.rs +1713 -903
  223. data/vendor/kreuzberg/src/utils/mod.rs +31 -17
  224. data/vendor/kreuzberg/src/utils/pool.rs +503 -0
  225. data/vendor/kreuzberg/src/utils/pool_sizing.rs +364 -0
  226. data/vendor/kreuzberg/src/utils/quality.rs +968 -959
  227. data/vendor/kreuzberg/src/utils/string_pool.rs +761 -0
  228. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  229. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  230. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  231. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  232. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  233. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  234. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  235. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  236. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  237. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  238. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  239. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  240. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  241. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  242. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  243. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  244. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  245. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  246. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  247. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  248. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  249. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  250. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  251. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  252. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  253. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  254. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  255. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  256. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  257. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  258. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  259. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  260. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  261. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  262. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  263. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  264. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  265. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  266. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  267. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  268. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  269. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  270. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  271. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  272. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  273. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  274. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  275. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  276. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  277. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  278. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  279. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  280. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  281. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  282. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  283. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  284. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  285. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  286. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  287. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  288. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  289. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  290. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  291. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  292. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  293. data/vendor/kreuzberg/tests/api_embed.rs +360 -0
  294. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
  295. data/vendor/kreuzberg/tests/api_large_pdf_extraction.rs +471 -0
  296. data/vendor/kreuzberg/tests/api_large_pdf_extraction_diagnostics.rs +289 -0
  297. data/vendor/kreuzberg/tests/api_tests.rs +1472 -966
  298. data/vendor/kreuzberg/tests/archive_integration.rs +545 -543
  299. data/vendor/kreuzberg/tests/batch_orchestration.rs +587 -556
  300. data/vendor/kreuzberg/tests/batch_pooling_benchmark.rs +154 -0
  301. data/vendor/kreuzberg/tests/batch_processing.rs +328 -316
  302. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
  303. data/vendor/kreuzberg/tests/concurrency_stress.rs +541 -525
  304. data/vendor/kreuzberg/tests/config_features.rs +612 -598
  305. data/vendor/kreuzberg/tests/config_integration_test.rs +753 -0
  306. data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -415
  307. data/vendor/kreuzberg/tests/core_integration.rs +519 -510
  308. data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
  309. data/vendor/kreuzberg/tests/data/hierarchy_ground_truth.json +294 -0
  310. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -498
  311. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
  312. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
  313. data/vendor/kreuzberg/tests/email_integration.rs +327 -325
  314. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
  315. data/vendor/kreuzberg/tests/error_handling.rs +402 -393
  316. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
  317. data/vendor/kreuzberg/tests/format_integration.rs +165 -159
  318. data/vendor/kreuzberg/tests/helpers/mod.rs +202 -142
  319. data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
  320. data/vendor/kreuzberg/tests/image_integration.rs +255 -253
  321. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
  322. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
  323. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
  324. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  325. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  326. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
  327. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
  328. data/vendor/kreuzberg/tests/mime_detection.rs +429 -428
  329. data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -510
  330. data/vendor/kreuzberg/tests/ocr_errors.rs +698 -676
  331. data/vendor/kreuzberg/tests/ocr_language_registry.rs +191 -0
  332. data/vendor/kreuzberg/tests/ocr_quality.rs +629 -627
  333. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  334. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -695
  335. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
  336. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
  337. data/vendor/kreuzberg/tests/page_markers.rs +297 -0
  338. data/vendor/kreuzberg/tests/pdf_hierarchy_detection.rs +301 -0
  339. data/vendor/kreuzberg/tests/pdf_hierarchy_quality.rs +589 -0
  340. data/vendor/kreuzberg/tests/pdf_integration.rs +45 -43
  341. data/vendor/kreuzberg/tests/pdf_ocr_triggering.rs +301 -0
  342. data/vendor/kreuzberg/tests/pdf_text_merging.rs +475 -0
  343. data/vendor/kreuzberg/tests/pdfium_linking.rs +340 -0
  344. data/vendor/kreuzberg/tests/pipeline_integration.rs +1446 -1411
  345. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -771
  346. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +577 -560
  347. data/vendor/kreuzberg/tests/plugin_system.rs +927 -921
  348. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  349. data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -586
  350. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -692
  351. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -776
  352. data/vendor/kreuzberg/tests/security_validation.rs +416 -415
  353. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  354. data/vendor/kreuzberg/tests/test_fastembed.rs +631 -609
  355. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1259
  356. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -647
  357. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  358. data/vendor/kreuzberg-ffi/Cargo.toml +67 -0
  359. data/vendor/kreuzberg-ffi/README.md +851 -0
  360. data/vendor/kreuzberg-ffi/benches/result_view_benchmark.rs +227 -0
  361. data/vendor/kreuzberg-ffi/build.rs +168 -0
  362. data/vendor/kreuzberg-ffi/cbindgen.toml +37 -0
  363. data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -0
  364. data/vendor/kreuzberg-ffi/kreuzberg.h +3012 -0
  365. data/vendor/kreuzberg-ffi/src/batch_streaming.rs +588 -0
  366. data/vendor/kreuzberg-ffi/src/config.rs +1341 -0
  367. data/vendor/kreuzberg-ffi/src/error.rs +901 -0
  368. data/vendor/kreuzberg-ffi/src/extraction.rs +555 -0
  369. data/vendor/kreuzberg-ffi/src/helpers.rs +879 -0
  370. data/vendor/kreuzberg-ffi/src/lib.rs +977 -0
  371. data/vendor/kreuzberg-ffi/src/memory.rs +493 -0
  372. data/vendor/kreuzberg-ffi/src/mime.rs +329 -0
  373. data/vendor/kreuzberg-ffi/src/panic_shield.rs +265 -0
  374. data/vendor/kreuzberg-ffi/src/plugins/document_extractor.rs +442 -0
  375. data/vendor/kreuzberg-ffi/src/plugins/mod.rs +14 -0
  376. data/vendor/kreuzberg-ffi/src/plugins/ocr_backend.rs +628 -0
  377. data/vendor/kreuzberg-ffi/src/plugins/post_processor.rs +438 -0
  378. data/vendor/kreuzberg-ffi/src/plugins/validator.rs +329 -0
  379. data/vendor/kreuzberg-ffi/src/result.rs +510 -0
  380. data/vendor/kreuzberg-ffi/src/result_pool.rs +639 -0
  381. data/vendor/kreuzberg-ffi/src/result_view.rs +773 -0
  382. data/vendor/kreuzberg-ffi/src/string_intern.rs +568 -0
  383. data/vendor/kreuzberg-ffi/src/types.rs +363 -0
  384. data/vendor/kreuzberg-ffi/src/util.rs +210 -0
  385. data/vendor/kreuzberg-ffi/src/validation.rs +848 -0
  386. data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -0
  387. data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -0
  388. data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -0
  389. data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -0
  390. data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -0
  391. data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -0
  392. data/vendor/kreuzberg-tesseract/.crate-ignore +2 -0
  393. data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -0
  394. data/vendor/kreuzberg-tesseract/Cargo.toml +57 -0
  395. data/vendor/{rb-sys/LICENSE-MIT → kreuzberg-tesseract/LICENSE} +22 -21
  396. data/vendor/kreuzberg-tesseract/README.md +399 -0
  397. data/vendor/kreuzberg-tesseract/build.rs +1127 -0
  398. data/vendor/kreuzberg-tesseract/patches/README.md +71 -0
  399. data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -0
  400. data/vendor/kreuzberg-tesseract/src/api.rs +1371 -0
  401. data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -0
  402. data/vendor/kreuzberg-tesseract/src/enums.rs +297 -0
  403. data/vendor/kreuzberg-tesseract/src/error.rs +81 -0
  404. data/vendor/kreuzberg-tesseract/src/lib.rs +145 -0
  405. data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -0
  406. data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -0
  407. data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -0
  408. data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -0
  409. data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -0
  410. data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -0
  411. metadata +196 -45
  412. data/vendor/kreuzberg/benches/otel_overhead.rs +0 -48
  413. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +0 -738
  414. data/vendor/rb-sys/.cargo-ok +0 -1
  415. data/vendor/rb-sys/.cargo_vcs_info.json +0 -6
  416. data/vendor/rb-sys/Cargo.lock +0 -393
  417. data/vendor/rb-sys/Cargo.toml +0 -70
  418. data/vendor/rb-sys/Cargo.toml.orig +0 -57
  419. data/vendor/rb-sys/LICENSE-APACHE +0 -190
  420. data/vendor/rb-sys/bin/release.sh +0 -21
  421. data/vendor/rb-sys/build/features.rs +0 -108
  422. data/vendor/rb-sys/build/main.rs +0 -246
  423. data/vendor/rb-sys/build/stable_api_config.rs +0 -153
  424. data/vendor/rb-sys/build/version.rs +0 -48
  425. data/vendor/rb-sys/readme.md +0 -36
  426. data/vendor/rb-sys/src/bindings.rs +0 -21
  427. data/vendor/rb-sys/src/hidden.rs +0 -11
  428. data/vendor/rb-sys/src/lib.rs +0 -34
  429. data/vendor/rb-sys/src/macros.rs +0 -371
  430. data/vendor/rb-sys/src/memory.rs +0 -53
  431. data/vendor/rb-sys/src/ruby_abi_version.rs +0 -38
  432. data/vendor/rb-sys/src/special_consts.rs +0 -31
  433. data/vendor/rb-sys/src/stable_api/compiled.c +0 -179
  434. data/vendor/rb-sys/src/stable_api/compiled.rs +0 -257
  435. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +0 -316
  436. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +0 -316
  437. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +0 -324
  438. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +0 -317
  439. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +0 -315
  440. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +0 -326
  441. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +0 -327
  442. data/vendor/rb-sys/src/stable_api.rs +0 -261
  443. data/vendor/rb-sys/src/symbol.rs +0 -31
  444. data/vendor/rb-sys/src/tracking_allocator.rs +0 -332
  445. data/vendor/rb-sys/src/utils.rs +0 -89
  446. data/vendor/rb-sys/src/value_type.rs +0 -7
@@ -1,598 +1,612 @@
1
- //! Configuration features integration tests.
2
- //!
3
- //! Tests for chunking, language detection, caching, token reduction, and quality processing.
4
- //! Validates that configuration options work correctly end-to-end.
5
-
6
- use kreuzberg::core::config::{ChunkingConfig, ExtractionConfig, LanguageDetectionConfig, TokenReductionConfig};
7
- use kreuzberg::core::extractor::extract_bytes;
8
-
9
- mod helpers;
10
-
11
- /// Test chunking enabled - text split into chunks.
12
- #[tokio::test]
13
- async fn test_chunking_enabled() {
14
- let config = ExtractionConfig {
15
- chunking: Some(ChunkingConfig {
16
- max_chars: 50,
17
- max_overlap: 10,
18
- embedding: None,
19
- preset: None,
20
- }),
21
- ..Default::default()
22
- };
23
-
24
- let text = "This is a long text that should be split into multiple chunks. ".repeat(10);
25
- let text_bytes = text.as_bytes();
26
-
27
- let result = extract_bytes(text_bytes, "text/plain", &config)
28
- .await
29
- .expect("Should extract successfully");
30
-
31
- assert!(result.chunks.is_some(), "Chunks should be present");
32
- let chunks = result.chunks.unwrap();
33
- assert!(chunks.len() > 1, "Should have multiple chunks");
34
-
35
- assert!(result.metadata.additional.contains_key("chunk_count"));
36
- let chunk_count = result.metadata.additional.get("chunk_count").unwrap();
37
- assert_eq!(
38
- chunks.len(),
39
- chunk_count.as_u64().unwrap() as usize,
40
- "Chunks length should match chunk_count metadata"
41
- );
42
-
43
- for chunk in &chunks {
44
- assert!(!chunk.content.is_empty(), "Chunk should not be empty");
45
- assert!(
46
- chunk.content.len() <= 50 + 10,
47
- "Chunk length {} exceeds max_chars + overlap",
48
- chunk.content.len()
49
- );
50
- }
51
- }
52
-
53
- /// Test chunking with overlap - overlap preserved between chunks.
54
- #[tokio::test]
55
- async fn test_chunking_with_overlap() {
56
- let config = ExtractionConfig {
57
- chunking: Some(ChunkingConfig {
58
- max_chars: 100,
59
- max_overlap: 20,
60
- embedding: None,
61
- preset: None,
62
- }),
63
- ..Default::default()
64
- };
65
-
66
- let text = "a".repeat(250);
67
- let text_bytes = text.as_bytes();
68
-
69
- let result = extract_bytes(text_bytes, "text/plain", &config)
70
- .await
71
- .expect("Should extract successfully");
72
-
73
- assert!(result.chunks.is_some(), "Chunks should be present");
74
- let chunks = result.chunks.unwrap();
75
- assert!(chunks.len() >= 2, "Should have at least 2 chunks");
76
-
77
- assert!(result.metadata.additional.contains_key("chunk_count"));
78
-
79
- if chunks.len() >= 2 {
80
- let chunk1 = &chunks[0];
81
- let chunk2 = &chunks[1];
82
-
83
- let chunk1_end = &chunk1.content[chunk1.content.len().saturating_sub(20)..];
84
- assert!(
85
- chunk2.content.starts_with(chunk1_end)
86
- || chunk1_end.starts_with(&chunk2.content[..chunk1_end.len().min(chunk2.content.len())]),
87
- "Chunks should have overlap"
88
- );
89
- }
90
- }
91
-
92
- /// Test chunking with custom sizes - custom chunk size and overlap.
93
- #[tokio::test]
94
- async fn test_chunking_custom_sizes() {
95
- let config = ExtractionConfig {
96
- chunking: Some(ChunkingConfig {
97
- max_chars: 200,
98
- max_overlap: 50,
99
- embedding: None,
100
- preset: None,
101
- }),
102
- ..Default::default()
103
- };
104
-
105
- let text = "Custom chunk test. ".repeat(50);
106
- let text_bytes = text.as_bytes();
107
-
108
- let result = extract_bytes(text_bytes, "text/plain", &config)
109
- .await
110
- .expect("Should extract successfully");
111
-
112
- assert!(result.chunks.is_some(), "Chunks should be present");
113
- let chunks = result.chunks.unwrap();
114
- assert!(!chunks.is_empty(), "Should have at least 1 chunk");
115
-
116
- assert!(result.metadata.additional.contains_key("chunk_count"));
117
-
118
- for chunk in &chunks {
119
- assert!(
120
- chunk.content.len() <= 200 + 50,
121
- "Chunk length {} exceeds custom max_chars + overlap",
122
- chunk.content.len()
123
- );
124
- }
125
- }
126
-
127
- /// Test chunking disabled - no chunking when disabled.
128
- #[tokio::test]
129
- async fn test_chunking_disabled() {
130
- let config = ExtractionConfig {
131
- chunking: None,
132
- ..Default::default()
133
- };
134
-
135
- let text = "This is a long text that should NOT be split into chunks. ".repeat(10);
136
- let text_bytes = text.as_bytes();
137
-
138
- let result = extract_bytes(text_bytes, "text/plain", &config)
139
- .await
140
- .expect("Should extract successfully");
141
-
142
- assert!(result.chunks.is_none(), "Should not have chunks when chunking disabled");
143
- assert!(
144
- !result.metadata.additional.contains_key("chunk_count"),
145
- "Should not have chunk_count when chunking disabled"
146
- );
147
-
148
- assert!(!result.content.is_empty(), "Content should be extracted");
149
- assert!(result.content.contains("long text"), "Should contain original text");
150
- }
151
-
152
- /// Test language detection for single language document.
153
- #[tokio::test]
154
- async fn test_language_detection_single() {
155
- let config = ExtractionConfig {
156
- language_detection: Some(LanguageDetectionConfig {
157
- enabled: true,
158
- min_confidence: 0.8,
159
- detect_multiple: false,
160
- }),
161
- ..Default::default()
162
- };
163
-
164
- let text = "Hello world! This is English text. It should be detected as English language.";
165
- let text_bytes = text.as_bytes();
166
-
167
- let result = extract_bytes(text_bytes, "text/plain", &config)
168
- .await
169
- .expect("Should extract successfully");
170
-
171
- assert!(result.detected_languages.is_some(), "Should detect language");
172
- let languages = result.detected_languages.unwrap();
173
- assert!(!languages.is_empty(), "Should detect at least one language");
174
- assert_eq!(languages[0], "eng", "Should detect English");
175
- }
176
-
177
- /// Test language detection for multi-language document.
178
- #[cfg_attr(coverage, ignore = "coverage instrumentation affects multi-language heuristics")]
179
- #[tokio::test]
180
- async fn test_language_detection_multiple() {
181
- let config = ExtractionConfig {
182
- language_detection: Some(LanguageDetectionConfig {
183
- enabled: true,
184
- min_confidence: 0.7,
185
- detect_multiple: true,
186
- }),
187
- ..Default::default()
188
- };
189
-
190
- let text = "Hello world! This is English. ".repeat(10) + "Hola mundo! Este es español. ".repeat(10).as_str();
191
- let text_bytes = text.as_bytes();
192
-
193
- let result = extract_bytes(text_bytes, "text/plain", &config)
194
- .await
195
- .expect("Should extract successfully");
196
-
197
- assert!(result.detected_languages.is_some(), "Should detect languages");
198
- let languages = result.detected_languages.unwrap();
199
- assert!(!languages.is_empty(), "Should detect at least one language");
200
- }
201
-
202
- /// Test language detection with confidence threshold.
203
- #[tokio::test]
204
- async fn test_language_detection_confidence() {
205
- let config = ExtractionConfig {
206
- language_detection: Some(LanguageDetectionConfig {
207
- enabled: true,
208
- min_confidence: 0.9,
209
- detect_multiple: false,
210
- }),
211
- ..Default::default()
212
- };
213
-
214
- let text = "This is clear English text that should have high confidence.";
215
- let text_bytes = text.as_bytes();
216
-
217
- let result = extract_bytes(text_bytes, "text/plain", &config)
218
- .await
219
- .expect("Should extract successfully");
220
-
221
- if let Some(languages) = result.detected_languages {
222
- assert!(!languages.is_empty());
223
- }
224
- }
225
-
226
- /// Test language detection disabled.
227
- #[tokio::test]
228
- async fn test_language_detection_disabled() {
229
- let config = ExtractionConfig {
230
- language_detection: Some(LanguageDetectionConfig {
231
- enabled: false,
232
- min_confidence: 0.8,
233
- detect_multiple: false,
234
- }),
235
- ..Default::default()
236
- };
237
-
238
- let text = "Hello world! This is English text.";
239
- let text_bytes = text.as_bytes();
240
-
241
- let result = extract_bytes(text_bytes, "text/plain", &config)
242
- .await
243
- .expect("Should extract successfully");
244
-
245
- assert!(
246
- result.detected_languages.is_none(),
247
- "Should not detect language when disabled"
248
- );
249
- }
250
-
251
- /// Test cache hit behavior - second extraction from cache.
252
- #[tokio::test]
253
- async fn test_cache_hit_behavior() {
254
- let config = ExtractionConfig {
255
- use_cache: true,
256
- ..Default::default()
257
- };
258
-
259
- let text = "Test text for caching behavior.";
260
- let text_bytes = text.as_bytes();
261
-
262
- let result1 = extract_bytes(text_bytes, "text/plain", &config)
263
- .await
264
- .expect("First extraction should succeed");
265
-
266
- let result2 = extract_bytes(text_bytes, "text/plain", &config)
267
- .await
268
- .expect("Second extraction should succeed");
269
-
270
- assert_eq!(result1.content, result2.content);
271
- }
272
-
273
- /// Test cache miss and invalidation.
274
- #[tokio::test]
275
- async fn test_cache_miss_invalidation() {
276
- let config = ExtractionConfig {
277
- use_cache: true,
278
- ..Default::default()
279
- };
280
-
281
- let text1 = "First text for cache test.";
282
- let text2 = "Second different text.";
283
-
284
- let result1 = extract_bytes(text1.as_bytes(), "text/plain", &config)
285
- .await
286
- .expect("First extraction should succeed");
287
-
288
- let result2 = extract_bytes(text2.as_bytes(), "text/plain", &config)
289
- .await
290
- .expect("Second extraction should succeed");
291
-
292
- assert_ne!(result1.content, result2.content);
293
- }
294
-
295
- /// Test custom cache directory (Note: OCR cache uses hardcoded directory).
296
- #[tokio::test]
297
- async fn test_custom_cache_directory() {
298
- let config = ExtractionConfig {
299
- use_cache: true,
300
- ..Default::default()
301
- };
302
-
303
- let text = "Test text for cache directory test.";
304
- let text_bytes = text.as_bytes();
305
-
306
- let result = extract_bytes(text_bytes, "text/plain", &config)
307
- .await
308
- .expect("Should extract successfully");
309
-
310
- assert!(!result.content.is_empty());
311
- }
312
-
313
- /// Test cache disabled - bypass cache.
314
- #[tokio::test]
315
- async fn test_cache_disabled() {
316
- let config = ExtractionConfig {
317
- use_cache: false,
318
- ..Default::default()
319
- };
320
-
321
- let text = "Test text without caching.";
322
- let text_bytes = text.as_bytes();
323
-
324
- let result1 = extract_bytes(text_bytes, "text/plain", &config)
325
- .await
326
- .expect("First extraction should succeed");
327
-
328
- let result2 = extract_bytes(text_bytes, "text/plain", &config)
329
- .await
330
- .expect("Second extraction should succeed");
331
-
332
- assert_eq!(result1.content, result2.content);
333
- }
334
-
335
- /// Test token reduction in aggressive mode.
336
- #[tokio::test]
337
- async fn test_token_reduction_aggressive() {
338
- let config = ExtractionConfig {
339
- token_reduction: Some(TokenReductionConfig {
340
- mode: "aggressive".to_string(),
341
- preserve_important_words: true,
342
- }),
343
- ..Default::default()
344
- };
345
-
346
- let text = "This is a very long sentence with many unnecessary words that could be reduced. ".repeat(5);
347
- let text_bytes = text.as_bytes();
348
-
349
- let result = extract_bytes(text_bytes, "text/plain", &config)
350
- .await
351
- .expect("Should extract successfully");
352
-
353
- assert!(!result.content.is_empty());
354
- }
355
-
356
- /// Test token reduction in conservative mode.
357
- #[tokio::test]
358
- async fn test_token_reduction_conservative() {
359
- let config = ExtractionConfig {
360
- token_reduction: Some(TokenReductionConfig {
361
- mode: "light".to_string(),
362
- preserve_important_words: true,
363
- }),
364
- ..Default::default()
365
- };
366
-
367
- let text = "Conservative token reduction test with moderate text length.";
368
- let text_bytes = text.as_bytes();
369
-
370
- let result = extract_bytes(text_bytes, "text/plain", &config)
371
- .await
372
- .expect("Should extract successfully");
373
-
374
- assert!(!result.content.is_empty());
375
- }
376
-
377
- /// Test token reduction disabled.
378
- #[tokio::test]
379
- async fn test_token_reduction_disabled() {
380
- let config = ExtractionConfig {
381
- token_reduction: Some(TokenReductionConfig {
382
- mode: "off".to_string(),
383
- preserve_important_words: false,
384
- }),
385
- ..Default::default()
386
- };
387
-
388
- let text = "Text without token reduction applied.";
389
- let text_bytes = text.as_bytes();
390
-
391
- let result = extract_bytes(text_bytes, "text/plain", &config)
392
- .await
393
- .expect("Should extract successfully");
394
-
395
- assert!(result.content.contains("without token reduction"));
396
- }
397
-
398
- /// Test quality processing enabled - quality scoring applied.
399
- #[tokio::test]
400
- async fn test_quality_processing_enabled() {
401
- let config = ExtractionConfig {
402
- enable_quality_processing: true,
403
- ..Default::default()
404
- };
405
-
406
- let text = "This is well-structured text. It has multiple sentences. And proper punctuation.";
407
- let text_bytes = text.as_bytes();
408
-
409
- let result = extract_bytes(text_bytes, "text/plain", &config)
410
- .await
411
- .expect("Should extract successfully");
412
-
413
- if let Some(score) = result.metadata.additional.get("quality_score") {
414
- let score_value = score.as_f64().unwrap();
415
- assert!((0.0..=1.0).contains(&score_value));
416
- }
417
-
418
- assert!(!result.content.is_empty());
419
- }
420
-
421
- /// Test quality processing calculates score for different text quality.
422
- #[tokio::test]
423
- async fn test_quality_threshold_filtering() {
424
- let config = ExtractionConfig {
425
- enable_quality_processing: true,
426
- ..Default::default()
427
- };
428
-
429
- let high_quality = "This is a well-structured document. It has proper sentences. And good formatting.";
430
- let result_high = extract_bytes(high_quality.as_bytes(), "text/plain", &config)
431
- .await
432
- .expect("Should extract successfully");
433
-
434
- let low_quality = "a b c d ....... word123mixed . . ";
435
- let result_low = extract_bytes(low_quality.as_bytes(), "text/plain", &config)
436
- .await
437
- .expect("Should extract successfully");
438
-
439
- assert!(
440
- result_high.metadata.additional.contains_key("quality_score"),
441
- "High quality should have score"
442
- );
443
- assert!(
444
- result_low.metadata.additional.contains_key("quality_score"),
445
- "Low quality should have score"
446
- );
447
-
448
- let score_high = result_high
449
- .metadata
450
- .additional
451
- .get("quality_score")
452
- .unwrap()
453
- .as_f64()
454
- .unwrap();
455
- let score_low = result_low
456
- .metadata
457
- .additional
458
- .get("quality_score")
459
- .unwrap()
460
- .as_f64()
461
- .unwrap();
462
-
463
- assert!((0.0..=1.0).contains(&score_high));
464
- assert!((0.0..=1.0).contains(&score_low));
465
- }
466
-
467
- /// Test quality processing disabled.
468
- #[tokio::test]
469
- async fn test_quality_processing_disabled() {
470
- let config = ExtractionConfig {
471
- enable_quality_processing: false,
472
- ..Default::default()
473
- };
474
-
475
- let text = "Text without quality processing.";
476
- let text_bytes = text.as_bytes();
477
-
478
- let result = extract_bytes(text_bytes, "text/plain", &config)
479
- .await
480
- .expect("Should extract successfully");
481
-
482
- assert!(!result.metadata.additional.contains_key("quality_score"));
483
- assert!(!result.content.is_empty());
484
- }
485
-
486
- /// Test chunking with embeddings using balanced preset.
487
- ///
488
- /// This test requires ONNX Runtime to be installed as a system dependency.
489
- /// On macOS with Homebrew: `brew install onnxruntime`
490
- /// On Linux: Install via your package manager or download from https://github.com/microsoft/onnxruntime/releases
491
- /// On Windows: Download from https://github.com/microsoft/onnxruntime/releases
492
- #[tokio::test]
493
- #[cfg(feature = "embeddings")]
494
- #[cfg_attr(target_os = "macos", ignore = "ONNX models not cached on macOS")]
495
- #[cfg_attr(target_os = "windows", ignore = "ONNX models not cached on Windows")]
496
- async fn test_chunking_with_embeddings() {
497
- use kreuzberg::core::config::EmbeddingConfig;
498
-
499
- let config = ExtractionConfig {
500
- chunking: Some(ChunkingConfig {
501
- max_chars: 100,
502
- max_overlap: 20,
503
- embedding: Some(EmbeddingConfig::default()),
504
- preset: None,
505
- }),
506
- ..Default::default()
507
- };
508
-
509
- let text = "This is a test document for embedding generation. ".repeat(10);
510
- let text_bytes = text.as_bytes();
511
-
512
- let result = extract_bytes(text_bytes, "text/plain", &config)
513
- .await
514
- .expect("Should extract successfully");
515
-
516
- assert!(result.chunks.is_some(), "Chunks should be present");
517
- let chunks = result.chunks.unwrap();
518
- assert!(chunks.len() > 1, "Should have multiple chunks");
519
-
520
- println!("Metadata: {:?}", result.metadata.additional);
521
-
522
- if let Some(error) = result.metadata.additional.get("embedding_error") {
523
- panic!("Embedding generation failed: {}", error);
524
- }
525
-
526
- assert!(
527
- result.metadata.additional.contains_key("embeddings_generated"),
528
- "Should have embeddings_generated metadata"
529
- );
530
- assert_eq!(
531
- result.metadata.additional.get("embeddings_generated").unwrap(),
532
- &serde_json::Value::Bool(true)
533
- );
534
-
535
- for chunk in &chunks {
536
- assert!(chunk.embedding.is_some(), "Each chunk should have an embedding");
537
- let embedding = chunk.embedding.as_ref().unwrap();
538
- assert_eq!(
539
- embedding.len(),
540
- 768,
541
- "Embedding should have 768 dimensions for balanced preset"
542
- );
543
-
544
- let magnitude: f32 = embedding.iter().map(|x| x * x).sum::<f32>().sqrt();
545
- assert!(
546
- (magnitude - 1.0).abs() < 0.01,
547
- "Embedding should be normalized (magnitude ~= 1.0)"
548
- );
549
- }
550
- }
551
-
552
- /// Test chunking with fast embedding preset.
553
- ///
554
- /// This test requires ONNX Runtime to be installed as a system dependency.
555
- /// On macOS with Homebrew: `brew install onnxruntime`
556
- /// On Linux: Install via your package manager or download from https://github.com/microsoft/onnxruntime/releases
557
- /// On Windows: Download from https://github.com/microsoft/onnxruntime/releases
558
- #[tokio::test]
559
- #[cfg(feature = "embeddings")]
560
- #[cfg_attr(target_os = "macos", ignore = "ONNX models not cached on macOS")]
561
- #[cfg_attr(target_os = "windows", ignore = "ONNX models not cached on Windows")]
562
- async fn test_chunking_with_fast_embeddings() {
563
- use kreuzberg::core::config::{EmbeddingConfig, EmbeddingModelType};
564
-
565
- let config = ExtractionConfig {
566
- chunking: Some(ChunkingConfig {
567
- max_chars: 100,
568
- max_overlap: 20,
569
- embedding: Some(EmbeddingConfig {
570
- model: EmbeddingModelType::Preset {
571
- name: "fast".to_string(),
572
- },
573
- ..Default::default()
574
- }),
575
- preset: None,
576
- }),
577
- ..Default::default()
578
- };
579
-
580
- let text = "Fast embedding test. ".repeat(10);
581
- let text_bytes = text.as_bytes();
582
-
583
- let result = extract_bytes(text_bytes, "text/plain", &config)
584
- .await
585
- .expect("Should extract successfully");
586
-
587
- let chunks = result.chunks.expect("Should have chunks");
588
- assert!(!chunks.is_empty(), "Should have at least one chunk");
589
-
590
- if let Some(error) = result.metadata.additional.get("embedding_error") {
591
- panic!("Embedding generation failed: {}", error);
592
- }
593
-
594
- for chunk in &chunks {
595
- let embedding = chunk.embedding.as_ref().expect("Should have embedding");
596
- assert_eq!(embedding.len(), 384, "Fast preset should produce 384-dim embeddings");
597
- }
598
- }
1
+ //! Configuration features integration tests.
2
+ //!
3
+ //! Tests for chunking, language detection, caching, token reduction, and quality processing.
4
+ //! Validates that configuration options work correctly end-to-end.
5
+
6
+ #[cfg(feature = "chunking")]
7
+ use kreuzberg::core::config::ChunkingConfig;
8
+ use kreuzberg::core::config::ExtractionConfig;
9
+ #[cfg(feature = "language-detection")]
10
+ use kreuzberg::core::config::LanguageDetectionConfig;
11
+ use kreuzberg::core::config::TokenReductionConfig;
12
+ use kreuzberg::core::extractor::extract_bytes;
13
+
14
+ mod helpers;
15
+
16
+ /// Test chunking enabled - text split into chunks.
17
+ #[tokio::test]
18
+ #[cfg(feature = "chunking")]
19
+ async fn test_chunking_enabled() {
20
+ let config = ExtractionConfig {
21
+ chunking: Some(ChunkingConfig {
22
+ max_chars: 50,
23
+ max_overlap: 10,
24
+ embedding: None,
25
+ preset: None,
26
+ }),
27
+ ..Default::default()
28
+ };
29
+
30
+ let text = "This is a long text that should be split into multiple chunks. ".repeat(10);
31
+ let text_bytes = text.as_bytes();
32
+
33
+ let result = extract_bytes(text_bytes, "text/plain", &config)
34
+ .await
35
+ .expect("Should extract successfully");
36
+
37
+ assert!(result.chunks.is_some(), "Chunks should be present");
38
+ let chunks = result.chunks.unwrap();
39
+ assert!(chunks.len() > 1, "Should have multiple chunks");
40
+
41
+ assert!(result.metadata.additional.contains_key("chunk_count"));
42
+ let chunk_count = result.metadata.additional.get("chunk_count").unwrap();
43
+ assert_eq!(
44
+ chunks.len(),
45
+ chunk_count.as_u64().unwrap() as usize,
46
+ "Chunks length should match chunk_count metadata"
47
+ );
48
+
49
+ for chunk in &chunks {
50
+ assert!(!chunk.content.is_empty(), "Chunk should not be empty");
51
+ assert!(
52
+ chunk.content.len() <= 50 + 10,
53
+ "Chunk length {} exceeds max_chars + overlap",
54
+ chunk.content.len()
55
+ );
56
+ }
57
+ }
58
+
59
+ /// Test chunking with overlap - overlap preserved between chunks.
60
+ #[tokio::test]
61
+ #[cfg(feature = "chunking")]
62
+ async fn test_chunking_with_overlap() {
63
+ let config = ExtractionConfig {
64
+ chunking: Some(ChunkingConfig {
65
+ max_chars: 100,
66
+ max_overlap: 20,
67
+ embedding: None,
68
+ preset: None,
69
+ }),
70
+ ..Default::default()
71
+ };
72
+
73
+ let text = "a".repeat(250);
74
+ let text_bytes = text.as_bytes();
75
+
76
+ let result = extract_bytes(text_bytes, "text/plain", &config)
77
+ .await
78
+ .expect("Should extract successfully");
79
+
80
+ assert!(result.chunks.is_some(), "Chunks should be present");
81
+ let chunks = result.chunks.unwrap();
82
+ assert!(chunks.len() >= 2, "Should have at least 2 chunks");
83
+
84
+ assert!(result.metadata.additional.contains_key("chunk_count"));
85
+
86
+ if chunks.len() >= 2 {
87
+ let chunk1 = &chunks[0];
88
+ let chunk2 = &chunks[1];
89
+
90
+ let chunk1_end = &chunk1.content[chunk1.content.len().saturating_sub(20)..];
91
+ assert!(
92
+ chunk2.content.starts_with(chunk1_end)
93
+ || chunk1_end.starts_with(&chunk2.content[..chunk1_end.len().min(chunk2.content.len())]),
94
+ "Chunks should have overlap"
95
+ );
96
+ }
97
+ }
98
+
99
+ /// Test chunking with custom sizes - custom chunk size and overlap.
100
+ #[tokio::test]
101
+ #[cfg(feature = "chunking")]
102
+ async fn test_chunking_custom_sizes() {
103
+ let config = ExtractionConfig {
104
+ chunking: Some(ChunkingConfig {
105
+ max_chars: 200,
106
+ max_overlap: 50,
107
+ embedding: None,
108
+ preset: None,
109
+ }),
110
+ ..Default::default()
111
+ };
112
+
113
+ let text = "Custom chunk test. ".repeat(50);
114
+ let text_bytes = text.as_bytes();
115
+
116
+ let result = extract_bytes(text_bytes, "text/plain", &config)
117
+ .await
118
+ .expect("Should extract successfully");
119
+
120
+ assert!(result.chunks.is_some(), "Chunks should be present");
121
+ let chunks = result.chunks.unwrap();
122
+ assert!(!chunks.is_empty(), "Should have at least 1 chunk");
123
+
124
+ assert!(result.metadata.additional.contains_key("chunk_count"));
125
+
126
+ for chunk in &chunks {
127
+ assert!(
128
+ chunk.content.len() <= 200 + 50,
129
+ "Chunk length {} exceeds custom max_chars + overlap",
130
+ chunk.content.len()
131
+ );
132
+ }
133
+ }
134
+
135
+ /// Test chunking disabled - no chunking when disabled.
136
+ #[tokio::test]
137
+ async fn test_chunking_disabled() {
138
+ let config = ExtractionConfig {
139
+ chunking: None,
140
+ ..Default::default()
141
+ };
142
+
143
+ let text = "This is a long text that should NOT be split into chunks. ".repeat(10);
144
+ let text_bytes = text.as_bytes();
145
+
146
+ let result = extract_bytes(text_bytes, "text/plain", &config)
147
+ .await
148
+ .expect("Should extract successfully");
149
+
150
+ assert!(result.chunks.is_none(), "Should not have chunks when chunking disabled");
151
+ assert!(
152
+ !result.metadata.additional.contains_key("chunk_count"),
153
+ "Should not have chunk_count when chunking disabled"
154
+ );
155
+
156
+ assert!(!result.content.is_empty(), "Content should be extracted");
157
+ assert!(result.content.contains("long text"), "Should contain original text");
158
+ }
159
+
160
+ /// Test language detection for single language document.
161
+ #[tokio::test]
162
+ #[cfg(feature = "language-detection")]
163
+ async fn test_language_detection_single() {
164
+ let config = ExtractionConfig {
165
+ language_detection: Some(LanguageDetectionConfig {
166
+ enabled: true,
167
+ min_confidence: 0.8,
168
+ detect_multiple: false,
169
+ }),
170
+ ..Default::default()
171
+ };
172
+
173
+ let text = "Hello world! This is English text. It should be detected as English language.";
174
+ let text_bytes = text.as_bytes();
175
+
176
+ let result = extract_bytes(text_bytes, "text/plain", &config)
177
+ .await
178
+ .expect("Should extract successfully");
179
+
180
+ assert!(result.detected_languages.is_some(), "Should detect language");
181
+ let languages = result.detected_languages.unwrap();
182
+ assert!(!languages.is_empty(), "Should detect at least one language");
183
+ assert_eq!(languages[0], "eng", "Should detect English");
184
+ }
185
+
186
+ /// Test language detection for multi-language document.
187
+ #[cfg_attr(coverage, ignore = "coverage instrumentation affects multi-language heuristics")]
188
+ #[tokio::test]
189
+ #[cfg(feature = "language-detection")]
190
+ async fn test_language_detection_multiple() {
191
+ let config = ExtractionConfig {
192
+ language_detection: Some(LanguageDetectionConfig {
193
+ enabled: true,
194
+ min_confidence: 0.7,
195
+ detect_multiple: true,
196
+ }),
197
+ ..Default::default()
198
+ };
199
+
200
+ let text = "Hello world! This is English. ".repeat(10) + "Hola mundo! Este es español. ".repeat(10).as_str();
201
+ let text_bytes = text.as_bytes();
202
+
203
+ let result = extract_bytes(text_bytes, "text/plain", &config)
204
+ .await
205
+ .expect("Should extract successfully");
206
+
207
+ assert!(result.detected_languages.is_some(), "Should detect languages");
208
+ let languages = result.detected_languages.unwrap();
209
+ assert!(!languages.is_empty(), "Should detect at least one language");
210
+ }
211
+
212
+ /// Test language detection with confidence threshold.
213
+ #[tokio::test]
214
+ #[cfg(feature = "language-detection")]
215
+ async fn test_language_detection_confidence() {
216
+ let config = ExtractionConfig {
217
+ language_detection: Some(LanguageDetectionConfig {
218
+ enabled: true,
219
+ min_confidence: 0.9,
220
+ detect_multiple: false,
221
+ }),
222
+ ..Default::default()
223
+ };
224
+
225
+ let text = "This is clear English text that should have high confidence.";
226
+ let text_bytes = text.as_bytes();
227
+
228
+ let result = extract_bytes(text_bytes, "text/plain", &config)
229
+ .await
230
+ .expect("Should extract successfully");
231
+
232
+ if let Some(languages) = result.detected_languages {
233
+ assert!(!languages.is_empty());
234
+ }
235
+ }
236
+
237
+ /// Test language detection disabled.
238
+ #[tokio::test]
239
+ #[cfg(feature = "language-detection")]
240
+ async fn test_language_detection_disabled() {
241
+ let config = ExtractionConfig {
242
+ language_detection: Some(LanguageDetectionConfig {
243
+ enabled: false,
244
+ min_confidence: 0.8,
245
+ detect_multiple: false,
246
+ }),
247
+ ..Default::default()
248
+ };
249
+
250
+ let text = "Hello world! This is English text.";
251
+ let text_bytes = text.as_bytes();
252
+
253
+ let result = extract_bytes(text_bytes, "text/plain", &config)
254
+ .await
255
+ .expect("Should extract successfully");
256
+
257
+ assert!(
258
+ result.detected_languages.is_none(),
259
+ "Should not detect language when disabled"
260
+ );
261
+ }
262
+
263
+ /// Test cache hit behavior - second extraction from cache.
264
+ #[tokio::test]
265
+ async fn test_cache_hit_behavior() {
266
+ let config = ExtractionConfig {
267
+ use_cache: true,
268
+ ..Default::default()
269
+ };
270
+
271
+ let text = "Test text for caching behavior.";
272
+ let text_bytes = text.as_bytes();
273
+
274
+ let result1 = extract_bytes(text_bytes, "text/plain", &config)
275
+ .await
276
+ .expect("First extraction should succeed");
277
+
278
+ let result2 = extract_bytes(text_bytes, "text/plain", &config)
279
+ .await
280
+ .expect("Second extraction should succeed");
281
+
282
+ assert_eq!(result1.content, result2.content);
283
+ }
284
+
285
+ /// Test cache miss and invalidation.
286
+ #[tokio::test]
287
+ async fn test_cache_miss_invalidation() {
288
+ let config = ExtractionConfig {
289
+ use_cache: true,
290
+ ..Default::default()
291
+ };
292
+
293
+ let text1 = "First text for cache test.";
294
+ let text2 = "Second different text.";
295
+
296
+ let result1 = extract_bytes(text1.as_bytes(), "text/plain", &config)
297
+ .await
298
+ .expect("First extraction should succeed");
299
+
300
+ let result2 = extract_bytes(text2.as_bytes(), "text/plain", &config)
301
+ .await
302
+ .expect("Second extraction should succeed");
303
+
304
+ assert_ne!(result1.content, result2.content);
305
+ }
306
+
307
+ /// Test custom cache directory (Note: OCR cache uses hardcoded directory).
308
+ #[tokio::test]
309
+ async fn test_custom_cache_directory() {
310
+ let config = ExtractionConfig {
311
+ use_cache: true,
312
+ ..Default::default()
313
+ };
314
+
315
+ let text = "Test text for cache directory test.";
316
+ let text_bytes = text.as_bytes();
317
+
318
+ let result = extract_bytes(text_bytes, "text/plain", &config)
319
+ .await
320
+ .expect("Should extract successfully");
321
+
322
+ assert!(!result.content.is_empty());
323
+ }
324
+
325
+ /// Test cache disabled - bypass cache.
326
+ #[tokio::test]
327
+ async fn test_cache_disabled() {
328
+ let config = ExtractionConfig {
329
+ use_cache: false,
330
+ ..Default::default()
331
+ };
332
+
333
+ let text = "Test text without caching.";
334
+ let text_bytes = text.as_bytes();
335
+
336
+ let result1 = extract_bytes(text_bytes, "text/plain", &config)
337
+ .await
338
+ .expect("First extraction should succeed");
339
+
340
+ let result2 = extract_bytes(text_bytes, "text/plain", &config)
341
+ .await
342
+ .expect("Second extraction should succeed");
343
+
344
+ assert_eq!(result1.content, result2.content);
345
+ }
346
+
347
+ /// Test token reduction in aggressive mode.
348
+ #[tokio::test]
349
+ async fn test_token_reduction_aggressive() {
350
+ let config = ExtractionConfig {
351
+ token_reduction: Some(TokenReductionConfig {
352
+ mode: "aggressive".to_string(),
353
+ preserve_important_words: true,
354
+ }),
355
+ ..Default::default()
356
+ };
357
+
358
+ let text = "This is a very long sentence with many unnecessary words that could be reduced. ".repeat(5);
359
+ let text_bytes = text.as_bytes();
360
+
361
+ let result = extract_bytes(text_bytes, "text/plain", &config)
362
+ .await
363
+ .expect("Should extract successfully");
364
+
365
+ assert!(!result.content.is_empty());
366
+ }
367
+
368
+ /// Test token reduction in conservative mode.
369
+ #[tokio::test]
370
+ async fn test_token_reduction_conservative() {
371
+ let config = ExtractionConfig {
372
+ token_reduction: Some(TokenReductionConfig {
373
+ mode: "light".to_string(),
374
+ preserve_important_words: true,
375
+ }),
376
+ ..Default::default()
377
+ };
378
+
379
+ let text = "Conservative token reduction test with moderate text length.";
380
+ let text_bytes = text.as_bytes();
381
+
382
+ let result = extract_bytes(text_bytes, "text/plain", &config)
383
+ .await
384
+ .expect("Should extract successfully");
385
+
386
+ assert!(!result.content.is_empty());
387
+ }
388
+
389
+ /// Test token reduction disabled.
390
+ #[tokio::test]
391
+ async fn test_token_reduction_disabled() {
392
+ let config = ExtractionConfig {
393
+ token_reduction: Some(TokenReductionConfig {
394
+ mode: "off".to_string(),
395
+ preserve_important_words: false,
396
+ }),
397
+ ..Default::default()
398
+ };
399
+
400
+ let text = "Text without token reduction applied.";
401
+ let text_bytes = text.as_bytes();
402
+
403
+ let result = extract_bytes(text_bytes, "text/plain", &config)
404
+ .await
405
+ .expect("Should extract successfully");
406
+
407
+ assert!(result.content.contains("without token reduction"));
408
+ }
409
+
410
+ /// Test quality processing enabled - quality scoring applied.
411
+ #[tokio::test]
412
+ #[cfg(feature = "quality")]
413
+ async fn test_quality_processing_enabled() {
414
+ let config = ExtractionConfig {
415
+ enable_quality_processing: true,
416
+ ..Default::default()
417
+ };
418
+
419
+ let text = "This is well-structured text. It has multiple sentences. And proper punctuation.";
420
+ let text_bytes = text.as_bytes();
421
+
422
+ let result = extract_bytes(text_bytes, "text/plain", &config)
423
+ .await
424
+ .expect("Should extract successfully");
425
+
426
+ if let Some(score) = result.metadata.additional.get("quality_score") {
427
+ let score_value = score.as_f64().unwrap();
428
+ assert!((0.0..=1.0).contains(&score_value));
429
+ }
430
+
431
+ assert!(!result.content.is_empty());
432
+ }
433
+
434
+ /// Test quality processing calculates score for different text quality.
435
+ #[tokio::test]
436
+ #[cfg(feature = "quality")]
437
+ async fn test_quality_threshold_filtering() {
438
+ let config = ExtractionConfig {
439
+ enable_quality_processing: true,
440
+ ..Default::default()
441
+ };
442
+
443
+ let high_quality = "This is a well-structured document. It has proper sentences. And good formatting.";
444
+ let result_high = extract_bytes(high_quality.as_bytes(), "text/plain", &config)
445
+ .await
446
+ .expect("Should extract successfully");
447
+
448
+ let low_quality = "a b c d ....... word123mixed . . ";
449
+ let result_low = extract_bytes(low_quality.as_bytes(), "text/plain", &config)
450
+ .await
451
+ .expect("Should extract successfully");
452
+
453
+ assert!(
454
+ result_high.metadata.additional.contains_key("quality_score"),
455
+ "High quality should have score"
456
+ );
457
+ assert!(
458
+ result_low.metadata.additional.contains_key("quality_score"),
459
+ "Low quality should have score"
460
+ );
461
+
462
+ let score_high = result_high
463
+ .metadata
464
+ .additional
465
+ .get("quality_score")
466
+ .unwrap()
467
+ .as_f64()
468
+ .unwrap();
469
+ let score_low = result_low
470
+ .metadata
471
+ .additional
472
+ .get("quality_score")
473
+ .unwrap()
474
+ .as_f64()
475
+ .unwrap();
476
+
477
+ assert!((0.0..=1.0).contains(&score_high));
478
+ assert!((0.0..=1.0).contains(&score_low));
479
+ }
480
+
481
+ /// Test quality processing disabled.
482
+ #[tokio::test]
483
+ async fn test_quality_processing_disabled() {
484
+ let config = ExtractionConfig {
485
+ enable_quality_processing: false,
486
+ ..Default::default()
487
+ };
488
+
489
+ let text = "Text without quality processing.";
490
+ let text_bytes = text.as_bytes();
491
+
492
+ let result = extract_bytes(text_bytes, "text/plain", &config)
493
+ .await
494
+ .expect("Should extract successfully");
495
+
496
+ assert!(!result.metadata.additional.contains_key("quality_score"));
497
+ assert!(!result.content.is_empty());
498
+ }
499
+
500
+ /// Test chunking with embeddings using balanced preset.
501
+ ///
502
+ /// This test requires ONNX Runtime to be installed as a system dependency.
503
+ /// On macOS with Homebrew: `brew install onnxruntime`
504
+ /// On Linux: Install via your package manager or download from https://github.com/microsoft/onnxruntime/releases
505
+ /// On Windows: Download from https://github.com/microsoft/onnxruntime/releases
506
+ #[tokio::test]
507
+ #[cfg(feature = "embeddings")]
508
+ #[cfg_attr(target_os = "macos", ignore = "ONNX models not cached on macOS")]
509
+ #[cfg_attr(target_os = "windows", ignore = "ONNX models not cached on Windows")]
510
+ async fn test_chunking_with_embeddings() {
511
+ use kreuzberg::core::config::EmbeddingConfig;
512
+
513
+ let config = ExtractionConfig {
514
+ chunking: Some(ChunkingConfig {
515
+ max_chars: 100,
516
+ max_overlap: 20,
517
+ embedding: Some(EmbeddingConfig::default()),
518
+ preset: None,
519
+ }),
520
+ ..Default::default()
521
+ };
522
+
523
+ let text = "This is a test document for embedding generation. ".repeat(10);
524
+ let text_bytes = text.as_bytes();
525
+
526
+ let result = extract_bytes(text_bytes, "text/plain", &config)
527
+ .await
528
+ .expect("Should extract successfully");
529
+
530
+ assert!(result.chunks.is_some(), "Chunks should be present");
531
+ let chunks = result.chunks.unwrap();
532
+ assert!(chunks.len() > 1, "Should have multiple chunks");
533
+
534
+ println!("Metadata: {:?}", result.metadata.additional);
535
+
536
+ if let Some(error) = result.metadata.additional.get("embedding_error") {
537
+ panic!("Embedding generation failed: {}", error);
538
+ }
539
+
540
+ assert!(
541
+ result.metadata.additional.contains_key("embeddings_generated"),
542
+ "Should have embeddings_generated metadata"
543
+ );
544
+ assert_eq!(
545
+ result.metadata.additional.get("embeddings_generated").unwrap(),
546
+ &serde_json::Value::Bool(true)
547
+ );
548
+
549
+ for chunk in &chunks {
550
+ assert!(chunk.embedding.is_some(), "Each chunk should have an embedding");
551
+ let embedding = chunk.embedding.as_ref().unwrap();
552
+ assert_eq!(
553
+ embedding.len(),
554
+ 768,
555
+ "Embedding should have 768 dimensions for balanced preset"
556
+ );
557
+
558
+ let magnitude: f32 = embedding.iter().map(|x| x * x).sum::<f32>().sqrt();
559
+ assert!(
560
+ (magnitude - 1.0).abs() < 0.01,
561
+ "Embedding should be normalized (magnitude ~= 1.0)"
562
+ );
563
+ }
564
+ }
565
+
566
+ /// Test chunking with fast embedding preset.
567
+ ///
568
+ /// This test requires ONNX Runtime to be installed as a system dependency.
569
+ /// On macOS with Homebrew: `brew install onnxruntime`
570
+ /// On Linux: Install via your package manager or download from https://github.com/microsoft/onnxruntime/releases
571
+ /// On Windows: Download from https://github.com/microsoft/onnxruntime/releases
572
+ #[tokio::test]
573
+ #[cfg(feature = "embeddings")]
574
+ #[cfg_attr(target_os = "macos", ignore = "ONNX models not cached on macOS")]
575
+ #[cfg_attr(target_os = "windows", ignore = "ONNX models not cached on Windows")]
576
+ async fn test_chunking_with_fast_embeddings() {
577
+ use kreuzberg::core::config::{EmbeddingConfig, EmbeddingModelType};
578
+
579
+ let config = ExtractionConfig {
580
+ chunking: Some(ChunkingConfig {
581
+ max_chars: 100,
582
+ max_overlap: 20,
583
+ embedding: Some(EmbeddingConfig {
584
+ model: EmbeddingModelType::Preset {
585
+ name: "fast".to_string(),
586
+ },
587
+ ..Default::default()
588
+ }),
589
+ preset: None,
590
+ }),
591
+ ..Default::default()
592
+ };
593
+
594
+ let text = "Fast embedding test. ".repeat(10);
595
+ let text_bytes = text.as_bytes();
596
+
597
+ let result = extract_bytes(text_bytes, "text/plain", &config)
598
+ .await
599
+ .expect("Should extract successfully");
600
+
601
+ let chunks = result.chunks.expect("Should have chunks");
602
+ assert!(!chunks.is_empty(), "Should have at least one chunk");
603
+
604
+ if let Some(error) = result.metadata.additional.get("embedding_error") {
605
+ panic!("Embedding generation failed: {}", error);
606
+ }
607
+
608
+ for chunk in &chunks {
609
+ let embedding = chunk.embedding.as_ref().expect("Should have embedding");
610
+ assert_eq!(embedding.len(), 384, "Fast preset should produce 384-dim embeddings");
611
+ }
612
+ }