kreuzberg 4.0.0.rc2 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (446) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -14
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -1
  5. data/.rubocop.yml +543 -538
  6. data/Gemfile +8 -8
  7. data/Gemfile.lock +194 -6
  8. data/README.md +396 -426
  9. data/Rakefile +34 -25
  10. data/Steepfile +51 -47
  11. data/examples/async_patterns.rb +283 -341
  12. data/ext/kreuzberg_rb/extconf.rb +65 -45
  13. data/ext/kreuzberg_rb/native/.cargo/config.toml +23 -0
  14. data/ext/kreuzberg_rb/native/Cargo.lock +7619 -6535
  15. data/ext/kreuzberg_rb/native/Cargo.toml +75 -44
  16. data/ext/kreuzberg_rb/native/README.md +425 -425
  17. data/ext/kreuzberg_rb/native/build.rs +15 -15
  18. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  19. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  20. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  21. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  22. data/ext/kreuzberg_rb/native/src/lib.rs +3802 -2998
  23. data/extconf.rb +60 -28
  24. data/kreuzberg.gemspec +199 -148
  25. data/lib/kreuzberg/api_proxy.rb +126 -142
  26. data/lib/kreuzberg/cache_api.rb +67 -46
  27. data/lib/kreuzberg/cli.rb +47 -55
  28. data/lib/kreuzberg/cli_proxy.rb +117 -127
  29. data/lib/kreuzberg/config.rb +936 -691
  30. data/lib/kreuzberg/error_context.rb +136 -32
  31. data/lib/kreuzberg/errors.rb +116 -118
  32. data/lib/kreuzberg/extraction_api.rb +313 -85
  33. data/lib/kreuzberg/mcp_proxy.rb +177 -186
  34. data/lib/kreuzberg/ocr_backend_protocol.rb +40 -113
  35. data/lib/kreuzberg/post_processor_protocol.rb +15 -86
  36. data/lib/kreuzberg/result.rb +334 -216
  37. data/lib/kreuzberg/setup_lib_path.rb +99 -80
  38. data/lib/kreuzberg/types.rb +170 -0
  39. data/lib/kreuzberg/validator_protocol.rb +16 -89
  40. data/lib/kreuzberg/version.rb +5 -5
  41. data/lib/kreuzberg.rb +96 -103
  42. data/lib/libpdfium.so +0 -0
  43. data/sig/kreuzberg/internal.rbs +184 -184
  44. data/sig/kreuzberg.rbs +561 -520
  45. data/spec/binding/async_operations_spec.rb +473 -0
  46. data/spec/binding/batch_operations_spec.rb +595 -0
  47. data/spec/binding/batch_spec.rb +359 -0
  48. data/spec/binding/cache_spec.rb +227 -227
  49. data/spec/binding/cli_proxy_spec.rb +85 -85
  50. data/spec/binding/cli_spec.rb +55 -55
  51. data/spec/binding/config_result_spec.rb +377 -0
  52. data/spec/binding/config_spec.rb +419 -345
  53. data/spec/binding/config_validation_spec.rb +377 -283
  54. data/spec/binding/embeddings_spec.rb +816 -0
  55. data/spec/binding/error_handling_spec.rb +399 -213
  56. data/spec/binding/error_recovery_spec.rb +488 -0
  57. data/spec/binding/errors_spec.rb +66 -66
  58. data/spec/binding/font_config_spec.rb +220 -0
  59. data/spec/binding/images_spec.rb +738 -0
  60. data/spec/binding/keywords_extraction_spec.rb +600 -0
  61. data/spec/binding/metadata_types_spec.rb +1228 -0
  62. data/spec/binding/pages_extraction_spec.rb +471 -0
  63. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  64. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  65. data/spec/binding/plugins/validator_spec.rb +273 -274
  66. data/spec/binding/tables_spec.rb +641 -0
  67. data/spec/fixtures/config.toml +38 -39
  68. data/spec/fixtures/config.yaml +41 -41
  69. data/spec/fixtures/invalid_config.toml +3 -4
  70. data/spec/smoke/package_spec.rb +177 -178
  71. data/spec/spec_helper.rb +40 -42
  72. data/spec/unit/config/chunking_config_spec.rb +213 -0
  73. data/spec/unit/config/embedding_config_spec.rb +343 -0
  74. data/spec/unit/config/extraction_config_spec.rb +438 -0
  75. data/spec/unit/config/font_config_spec.rb +285 -0
  76. data/spec/unit/config/hierarchy_config_spec.rb +314 -0
  77. data/spec/unit/config/image_extraction_config_spec.rb +209 -0
  78. data/spec/unit/config/image_preprocessing_config_spec.rb +249 -0
  79. data/spec/unit/config/keyword_config_spec.rb +229 -0
  80. data/spec/unit/config/language_detection_config_spec.rb +258 -0
  81. data/spec/unit/config/ocr_config_spec.rb +171 -0
  82. data/spec/unit/config/page_config_spec.rb +221 -0
  83. data/spec/unit/config/pdf_config_spec.rb +267 -0
  84. data/spec/unit/config/postprocessor_config_spec.rb +290 -0
  85. data/spec/unit/config/tesseract_config_spec.rb +181 -0
  86. data/spec/unit/config/token_reduction_config_spec.rb +251 -0
  87. data/test/metadata_types_test.rb +959 -0
  88. data/vendor/Cargo.toml +61 -0
  89. data/vendor/kreuzberg/Cargo.toml +259 -204
  90. data/vendor/kreuzberg/README.md +263 -175
  91. data/vendor/kreuzberg/build.rs +782 -474
  92. data/vendor/kreuzberg/examples/bench_fixes.rs +71 -0
  93. data/vendor/kreuzberg/examples/test_pdfium_fork.rs +62 -0
  94. data/vendor/kreuzberg/src/api/error.rs +81 -81
  95. data/vendor/kreuzberg/src/api/handlers.rs +320 -199
  96. data/vendor/kreuzberg/src/api/mod.rs +94 -79
  97. data/vendor/kreuzberg/src/api/server.rs +518 -353
  98. data/vendor/kreuzberg/src/api/types.rs +206 -170
  99. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
  100. data/vendor/kreuzberg/src/chunking/mod.rs +2303 -677
  101. data/vendor/kreuzberg/src/chunking/processor.rs +219 -0
  102. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
  103. data/vendor/kreuzberg/src/core/batch_optimizations.rs +385 -0
  104. data/vendor/kreuzberg/src/core/config.rs +1914 -1032
  105. data/vendor/kreuzberg/src/core/config_validation.rs +949 -0
  106. data/vendor/kreuzberg/src/core/extractor.rs +1200 -1024
  107. data/vendor/kreuzberg/src/core/formats.rs +235 -0
  108. data/vendor/kreuzberg/src/core/io.rs +329 -329
  109. data/vendor/kreuzberg/src/core/mime.rs +605 -605
  110. data/vendor/kreuzberg/src/core/mod.rs +61 -45
  111. data/vendor/kreuzberg/src/core/pipeline.rs +1223 -984
  112. data/vendor/kreuzberg/src/core/server_config.rs +1220 -0
  113. data/vendor/kreuzberg/src/embeddings.rs +471 -432
  114. data/vendor/kreuzberg/src/error.rs +431 -431
  115. data/vendor/kreuzberg/src/extraction/archive.rs +959 -954
  116. data/vendor/kreuzberg/src/extraction/capacity.rs +263 -0
  117. data/vendor/kreuzberg/src/extraction/docx.rs +404 -40
  118. data/vendor/kreuzberg/src/extraction/email.rs +855 -854
  119. data/vendor/kreuzberg/src/extraction/excel.rs +697 -688
  120. data/vendor/kreuzberg/src/extraction/html.rs +1830 -553
  121. data/vendor/kreuzberg/src/extraction/image.rs +492 -368
  122. data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -563
  123. data/vendor/kreuzberg/src/extraction/markdown.rs +216 -213
  124. data/vendor/kreuzberg/src/extraction/mod.rs +93 -81
  125. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  126. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  127. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  128. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
  129. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -287
  130. data/vendor/kreuzberg/src/extraction/pptx.rs +3102 -3000
  131. data/vendor/kreuzberg/src/extraction/structured.rs +491 -490
  132. data/vendor/kreuzberg/src/extraction/table.rs +329 -328
  133. data/vendor/kreuzberg/src/extraction/text.rs +277 -269
  134. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  135. data/vendor/kreuzberg/src/extractors/archive.rs +447 -446
  136. data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -469
  137. data/vendor/kreuzberg/src/extractors/docbook.rs +504 -502
  138. data/vendor/kreuzberg/src/extractors/docx.rs +400 -367
  139. data/vendor/kreuzberg/src/extractors/email.rs +157 -143
  140. data/vendor/kreuzberg/src/extractors/epub.rs +696 -707
  141. data/vendor/kreuzberg/src/extractors/excel.rs +385 -343
  142. data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -491
  143. data/vendor/kreuzberg/src/extractors/html.rs +419 -393
  144. data/vendor/kreuzberg/src/extractors/image.rs +219 -198
  145. data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1051
  146. data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -367
  147. data/vendor/kreuzberg/src/extractors/latex.rs +653 -652
  148. data/vendor/kreuzberg/src/extractors/markdown.rs +701 -700
  149. data/vendor/kreuzberg/src/extractors/mod.rs +429 -365
  150. data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
  151. data/vendor/kreuzberg/src/extractors/opml.rs +635 -634
  152. data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -528
  153. data/vendor/kreuzberg/src/extractors/pdf.rs +761 -493
  154. data/vendor/kreuzberg/src/extractors/pptx.rs +279 -248
  155. data/vendor/kreuzberg/src/extractors/rst.rs +577 -576
  156. data/vendor/kreuzberg/src/extractors/rtf.rs +809 -810
  157. data/vendor/kreuzberg/src/extractors/security.rs +484 -484
  158. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
  159. data/vendor/kreuzberg/src/extractors/structured.rs +142 -140
  160. data/vendor/kreuzberg/src/extractors/text.rs +265 -260
  161. data/vendor/kreuzberg/src/extractors/typst.rs +651 -650
  162. data/vendor/kreuzberg/src/extractors/xml.rs +147 -135
  163. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  164. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  165. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  166. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  167. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  168. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  169. data/vendor/kreuzberg/src/keywords/processor.rs +275 -267
  170. data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
  171. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  172. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  173. data/vendor/kreuzberg/src/language_detection/mod.rs +985 -942
  174. data/vendor/kreuzberg/src/language_detection/processor.rs +218 -0
  175. data/vendor/kreuzberg/src/lib.rs +114 -105
  176. data/vendor/kreuzberg/src/mcp/mod.rs +35 -32
  177. data/vendor/kreuzberg/src/mcp/server.rs +2090 -1968
  178. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  179. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  180. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  181. data/vendor/kreuzberg/src/ocr/language_registry.rs +520 -0
  182. data/vendor/kreuzberg/src/ocr/mod.rs +60 -58
  183. data/vendor/kreuzberg/src/ocr/processor.rs +858 -863
  184. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  185. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  186. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +456 -450
  187. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  188. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  189. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  190. data/vendor/kreuzberg/src/panic_context.rs +154 -154
  191. data/vendor/kreuzberg/src/pdf/bindings.rs +306 -0
  192. data/vendor/kreuzberg/src/pdf/bundled.rs +408 -0
  193. data/vendor/kreuzberg/src/pdf/error.rs +214 -122
  194. data/vendor/kreuzberg/src/pdf/fonts.rs +358 -0
  195. data/vendor/kreuzberg/src/pdf/hierarchy.rs +903 -0
  196. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  197. data/vendor/kreuzberg/src/pdf/metadata.rs +509 -346
  198. data/vendor/kreuzberg/src/pdf/mod.rs +81 -50
  199. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  200. data/vendor/kreuzberg/src/pdf/table.rs +417 -393
  201. data/vendor/kreuzberg/src/pdf/text.rs +553 -158
  202. data/vendor/kreuzberg/src/plugins/extractor.rs +1042 -1013
  203. data/vendor/kreuzberg/src/plugins/mod.rs +212 -209
  204. data/vendor/kreuzberg/src/plugins/ocr.rs +637 -620
  205. data/vendor/kreuzberg/src/plugins/processor.rs +650 -642
  206. data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1337
  207. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  208. data/vendor/kreuzberg/src/plugins/validator.rs +967 -956
  209. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  210. data/vendor/kreuzberg/src/text/mod.rs +27 -19
  211. data/vendor/kreuzberg/src/text/quality.rs +710 -697
  212. data/vendor/kreuzberg/src/text/quality_processor.rs +231 -0
  213. data/vendor/kreuzberg/src/text/string_utils.rs +229 -217
  214. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  215. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  216. data/vendor/kreuzberg/src/text/token_reduction/core.rs +832 -796
  217. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +923 -902
  218. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  219. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  220. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +148 -147
  221. data/vendor/kreuzberg/src/text/utf8_validation.rs +193 -0
  222. data/vendor/kreuzberg/src/types.rs +1713 -903
  223. data/vendor/kreuzberg/src/utils/mod.rs +31 -17
  224. data/vendor/kreuzberg/src/utils/pool.rs +503 -0
  225. data/vendor/kreuzberg/src/utils/pool_sizing.rs +364 -0
  226. data/vendor/kreuzberg/src/utils/quality.rs +968 -959
  227. data/vendor/kreuzberg/src/utils/string_pool.rs +761 -0
  228. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  229. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  230. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  231. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  232. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  233. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  234. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  235. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  236. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  237. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  238. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  239. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  240. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  241. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  242. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  243. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  244. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  245. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  246. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  247. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  248. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  249. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  250. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  251. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  252. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  253. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  254. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  255. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  256. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  257. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  258. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  259. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  260. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  261. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  262. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  263. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  264. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  265. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  266. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  267. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  268. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  269. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  270. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  271. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  272. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  273. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  274. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  275. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  276. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  277. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  278. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  279. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  280. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  281. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  282. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  283. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  284. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  285. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  286. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  287. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  288. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  289. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  290. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  291. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  292. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  293. data/vendor/kreuzberg/tests/api_embed.rs +360 -0
  294. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
  295. data/vendor/kreuzberg/tests/api_large_pdf_extraction.rs +471 -0
  296. data/vendor/kreuzberg/tests/api_large_pdf_extraction_diagnostics.rs +289 -0
  297. data/vendor/kreuzberg/tests/api_tests.rs +1472 -966
  298. data/vendor/kreuzberg/tests/archive_integration.rs +545 -543
  299. data/vendor/kreuzberg/tests/batch_orchestration.rs +587 -556
  300. data/vendor/kreuzberg/tests/batch_pooling_benchmark.rs +154 -0
  301. data/vendor/kreuzberg/tests/batch_processing.rs +328 -316
  302. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
  303. data/vendor/kreuzberg/tests/concurrency_stress.rs +541 -525
  304. data/vendor/kreuzberg/tests/config_features.rs +612 -598
  305. data/vendor/kreuzberg/tests/config_integration_test.rs +753 -0
  306. data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -415
  307. data/vendor/kreuzberg/tests/core_integration.rs +519 -510
  308. data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
  309. data/vendor/kreuzberg/tests/data/hierarchy_ground_truth.json +294 -0
  310. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -498
  311. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
  312. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
  313. data/vendor/kreuzberg/tests/email_integration.rs +327 -325
  314. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
  315. data/vendor/kreuzberg/tests/error_handling.rs +402 -393
  316. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
  317. data/vendor/kreuzberg/tests/format_integration.rs +165 -159
  318. data/vendor/kreuzberg/tests/helpers/mod.rs +202 -142
  319. data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
  320. data/vendor/kreuzberg/tests/image_integration.rs +255 -253
  321. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
  322. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
  323. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
  324. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  325. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  326. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
  327. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
  328. data/vendor/kreuzberg/tests/mime_detection.rs +429 -428
  329. data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -510
  330. data/vendor/kreuzberg/tests/ocr_errors.rs +698 -676
  331. data/vendor/kreuzberg/tests/ocr_language_registry.rs +191 -0
  332. data/vendor/kreuzberg/tests/ocr_quality.rs +629 -627
  333. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  334. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -695
  335. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
  336. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
  337. data/vendor/kreuzberg/tests/page_markers.rs +297 -0
  338. data/vendor/kreuzberg/tests/pdf_hierarchy_detection.rs +301 -0
  339. data/vendor/kreuzberg/tests/pdf_hierarchy_quality.rs +589 -0
  340. data/vendor/kreuzberg/tests/pdf_integration.rs +45 -43
  341. data/vendor/kreuzberg/tests/pdf_ocr_triggering.rs +301 -0
  342. data/vendor/kreuzberg/tests/pdf_text_merging.rs +475 -0
  343. data/vendor/kreuzberg/tests/pdfium_linking.rs +340 -0
  344. data/vendor/kreuzberg/tests/pipeline_integration.rs +1446 -1411
  345. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -771
  346. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +577 -560
  347. data/vendor/kreuzberg/tests/plugin_system.rs +927 -921
  348. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  349. data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -586
  350. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -692
  351. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -776
  352. data/vendor/kreuzberg/tests/security_validation.rs +416 -415
  353. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  354. data/vendor/kreuzberg/tests/test_fastembed.rs +631 -609
  355. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1259
  356. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -647
  357. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  358. data/vendor/kreuzberg-ffi/Cargo.toml +67 -0
  359. data/vendor/kreuzberg-ffi/README.md +851 -0
  360. data/vendor/kreuzberg-ffi/benches/result_view_benchmark.rs +227 -0
  361. data/vendor/kreuzberg-ffi/build.rs +168 -0
  362. data/vendor/kreuzberg-ffi/cbindgen.toml +37 -0
  363. data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -0
  364. data/vendor/kreuzberg-ffi/kreuzberg.h +3012 -0
  365. data/vendor/kreuzberg-ffi/src/batch_streaming.rs +588 -0
  366. data/vendor/kreuzberg-ffi/src/config.rs +1341 -0
  367. data/vendor/kreuzberg-ffi/src/error.rs +901 -0
  368. data/vendor/kreuzberg-ffi/src/extraction.rs +555 -0
  369. data/vendor/kreuzberg-ffi/src/helpers.rs +879 -0
  370. data/vendor/kreuzberg-ffi/src/lib.rs +977 -0
  371. data/vendor/kreuzberg-ffi/src/memory.rs +493 -0
  372. data/vendor/kreuzberg-ffi/src/mime.rs +329 -0
  373. data/vendor/kreuzberg-ffi/src/panic_shield.rs +265 -0
  374. data/vendor/kreuzberg-ffi/src/plugins/document_extractor.rs +442 -0
  375. data/vendor/kreuzberg-ffi/src/plugins/mod.rs +14 -0
  376. data/vendor/kreuzberg-ffi/src/plugins/ocr_backend.rs +628 -0
  377. data/vendor/kreuzberg-ffi/src/plugins/post_processor.rs +438 -0
  378. data/vendor/kreuzberg-ffi/src/plugins/validator.rs +329 -0
  379. data/vendor/kreuzberg-ffi/src/result.rs +510 -0
  380. data/vendor/kreuzberg-ffi/src/result_pool.rs +639 -0
  381. data/vendor/kreuzberg-ffi/src/result_view.rs +773 -0
  382. data/vendor/kreuzberg-ffi/src/string_intern.rs +568 -0
  383. data/vendor/kreuzberg-ffi/src/types.rs +363 -0
  384. data/vendor/kreuzberg-ffi/src/util.rs +210 -0
  385. data/vendor/kreuzberg-ffi/src/validation.rs +848 -0
  386. data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -0
  387. data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -0
  388. data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -0
  389. data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -0
  390. data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -0
  391. data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -0
  392. data/vendor/kreuzberg-tesseract/.crate-ignore +2 -0
  393. data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -0
  394. data/vendor/kreuzberg-tesseract/Cargo.toml +57 -0
  395. data/vendor/{rb-sys/LICENSE-MIT → kreuzberg-tesseract/LICENSE} +22 -21
  396. data/vendor/kreuzberg-tesseract/README.md +399 -0
  397. data/vendor/kreuzberg-tesseract/build.rs +1127 -0
  398. data/vendor/kreuzberg-tesseract/patches/README.md +71 -0
  399. data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -0
  400. data/vendor/kreuzberg-tesseract/src/api.rs +1371 -0
  401. data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -0
  402. data/vendor/kreuzberg-tesseract/src/enums.rs +297 -0
  403. data/vendor/kreuzberg-tesseract/src/error.rs +81 -0
  404. data/vendor/kreuzberg-tesseract/src/lib.rs +145 -0
  405. data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -0
  406. data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -0
  407. data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -0
  408. data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -0
  409. data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -0
  410. data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -0
  411. metadata +196 -45
  412. data/vendor/kreuzberg/benches/otel_overhead.rs +0 -48
  413. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +0 -738
  414. data/vendor/rb-sys/.cargo-ok +0 -1
  415. data/vendor/rb-sys/.cargo_vcs_info.json +0 -6
  416. data/vendor/rb-sys/Cargo.lock +0 -393
  417. data/vendor/rb-sys/Cargo.toml +0 -70
  418. data/vendor/rb-sys/Cargo.toml.orig +0 -57
  419. data/vendor/rb-sys/LICENSE-APACHE +0 -190
  420. data/vendor/rb-sys/bin/release.sh +0 -21
  421. data/vendor/rb-sys/build/features.rs +0 -108
  422. data/vendor/rb-sys/build/main.rs +0 -246
  423. data/vendor/rb-sys/build/stable_api_config.rs +0 -153
  424. data/vendor/rb-sys/build/version.rs +0 -48
  425. data/vendor/rb-sys/readme.md +0 -36
  426. data/vendor/rb-sys/src/bindings.rs +0 -21
  427. data/vendor/rb-sys/src/hidden.rs +0 -11
  428. data/vendor/rb-sys/src/lib.rs +0 -34
  429. data/vendor/rb-sys/src/macros.rs +0 -371
  430. data/vendor/rb-sys/src/memory.rs +0 -53
  431. data/vendor/rb-sys/src/ruby_abi_version.rs +0 -38
  432. data/vendor/rb-sys/src/special_consts.rs +0 -31
  433. data/vendor/rb-sys/src/stable_api/compiled.c +0 -179
  434. data/vendor/rb-sys/src/stable_api/compiled.rs +0 -257
  435. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +0 -316
  436. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +0 -316
  437. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +0 -324
  438. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +0 -317
  439. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +0 -315
  440. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +0 -326
  441. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +0 -327
  442. data/vendor/rb-sys/src/stable_api.rs +0 -261
  443. data/vendor/rb-sys/src/symbol.rs +0 -31
  444. data/vendor/rb-sys/src/tracking_allocator.rs +0 -332
  445. data/vendor/rb-sys/src/utils.rs +0 -89
  446. data/vendor/rb-sys/src/value_type.rs +0 -7
@@ -1,677 +1,2303 @@
1
- //! Text chunking utilities.
2
- //!
3
- //! This module provides text chunking functionality using the `text-splitter` library.
4
- //! It splits long text into smaller chunks while preserving semantic boundaries.
5
- //!
6
- //! # Features
7
- //!
8
- //! - **Smart splitting**: Respects word and sentence boundaries
9
- //! - **Markdown-aware**: Preserves Markdown structure (headings, code blocks, lists)
10
- //! - **Configurable overlap**: Overlap chunks to maintain context
11
- //! - **Unicode support**: Handles CJK characters and emojis correctly
12
- //! - **Batch processing**: Process multiple texts efficiently
13
- //!
14
- //! # Chunker Types
15
- //!
16
- //! - **Text**: Generic text splitter, splits on whitespace and punctuation
17
- //! - **Markdown**: Markdown-aware splitter, preserves formatting and structure
18
- //!
19
- //! # Example
20
- //!
21
- //! ```rust
22
- //! use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
23
- //!
24
- //! # fn example() -> kreuzberg::Result<()> {
25
- //! let config = ChunkingConfig {
26
- //! max_characters: 500,
27
- //! overlap: 50,
28
- //! trim: true,
29
- //! chunker_type: ChunkerType::Text,
30
- //! };
31
- //!
32
- //! let long_text = "This is a very long document...".repeat(100);
33
- //! let result = chunk_text(&long_text, &config)?;
34
- //!
35
- //! println!("Split into {} chunks", result.chunk_count);
36
- //! for (i, chunk) in result.chunks.iter().enumerate() {
37
- //! println!("Chunk {}: {} chars", i + 1, chunk.content.len());
38
- //! }
39
- //! # Ok(())
40
- //! # }
41
- //! ```
42
- //!
43
- //! # Use Cases
44
- //!
45
- //! - Splitting documents for LLM context windows
46
- //! - Creating overlapping chunks for semantic search
47
- //! - Processing large documents in batches
48
- //! - Maintaining context across chunk boundaries
49
- use crate::error::{KreuzbergError, Result};
50
- use crate::types::{Chunk, ChunkMetadata};
51
- use serde::{Deserialize, Serialize};
52
- use text_splitter::{Characters, ChunkCapacity, ChunkConfig, MarkdownSplitter, TextSplitter};
53
-
54
- #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
55
- pub enum ChunkerType {
56
- Text,
57
- Markdown,
58
- }
59
-
60
- #[derive(Debug, Clone, Serialize, Deserialize)]
61
- pub struct ChunkingResult {
62
- pub chunks: Vec<Chunk>,
63
- pub chunk_count: usize,
64
- }
65
-
66
- pub struct ChunkingConfig {
67
- pub max_characters: usize,
68
- pub overlap: usize,
69
- pub trim: bool,
70
- pub chunker_type: ChunkerType,
71
- }
72
-
73
- impl Default for ChunkingConfig {
74
- fn default() -> Self {
75
- Self {
76
- max_characters: 2000,
77
- overlap: 100,
78
- trim: true,
79
- chunker_type: ChunkerType::Text,
80
- }
81
- }
82
- }
83
-
84
- fn build_chunk_config(max_characters: usize, overlap: usize, trim: bool) -> Result<ChunkConfig<Characters>> {
85
- ChunkConfig::new(ChunkCapacity::new(max_characters))
86
- .with_overlap(overlap)
87
- .map(|config| config.with_trim(trim))
88
- .map_err(|e| KreuzbergError::validation(format!("Invalid chunking configuration: {}", e)))
89
- }
90
-
91
- pub fn chunk_text(text: &str, config: &ChunkingConfig) -> Result<ChunkingResult> {
92
- if text.is_empty() {
93
- return Ok(ChunkingResult {
94
- chunks: vec![],
95
- chunk_count: 0,
96
- });
97
- }
98
-
99
- let chunk_config = build_chunk_config(config.max_characters, config.overlap, config.trim)?;
100
-
101
- let text_chunks: Vec<&str> = match config.chunker_type {
102
- ChunkerType::Text => {
103
- let splitter = TextSplitter::new(chunk_config);
104
- splitter.chunks(text).collect()
105
- }
106
- ChunkerType::Markdown => {
107
- let splitter = MarkdownSplitter::new(chunk_config);
108
- splitter.chunks(text).collect()
109
- }
110
- };
111
-
112
- let total_chunks = text_chunks.len();
113
- let mut char_offset = 0;
114
-
115
- let chunks: Vec<Chunk> = text_chunks
116
- .into_iter()
117
- .enumerate()
118
- .map(|(index, chunk_text)| {
119
- let char_start = char_offset;
120
- let chunk_length = chunk_text.chars().count();
121
- let char_end = char_start + chunk_length;
122
-
123
- let overlap_chars = if index < total_chunks - 1 {
124
- config.overlap.min(chunk_length)
125
- } else {
126
- 0
127
- };
128
- char_offset = char_end - overlap_chars;
129
-
130
- Chunk {
131
- content: chunk_text.to_string(),
132
- embedding: None,
133
- metadata: ChunkMetadata {
134
- char_start,
135
- char_end,
136
- token_count: None,
137
- chunk_index: index,
138
- total_chunks,
139
- },
140
- }
141
- })
142
- .collect();
143
-
144
- let chunk_count = chunks.len();
145
-
146
- Ok(ChunkingResult { chunks, chunk_count })
147
- }
148
-
149
- pub fn chunk_text_with_type(
150
- text: &str,
151
- max_characters: usize,
152
- overlap: usize,
153
- trim: bool,
154
- chunker_type: ChunkerType,
155
- ) -> Result<ChunkingResult> {
156
- let config = ChunkingConfig {
157
- max_characters,
158
- overlap,
159
- trim,
160
- chunker_type,
161
- };
162
- chunk_text(text, &config)
163
- }
164
-
165
- pub fn chunk_texts_batch(texts: &[&str], config: &ChunkingConfig) -> Result<Vec<ChunkingResult>> {
166
- texts.iter().map(|text| chunk_text(text, config)).collect()
167
- }
168
-
169
- #[cfg(test)]
170
- mod tests {
171
- use super::*;
172
-
173
- #[test]
174
- fn test_chunk_empty_text() {
175
- let config = ChunkingConfig::default();
176
- let result = chunk_text("", &config).unwrap();
177
- assert_eq!(result.chunks.len(), 0);
178
- assert_eq!(result.chunk_count, 0);
179
- }
180
-
181
- #[test]
182
- fn test_chunk_short_text_single_chunk() {
183
- let config = ChunkingConfig {
184
- max_characters: 100,
185
- overlap: 10,
186
- trim: true,
187
- chunker_type: ChunkerType::Text,
188
- };
189
- let text = "This is a short text.";
190
- let result = chunk_text(text, &config).unwrap();
191
- assert_eq!(result.chunks.len(), 1);
192
- assert_eq!(result.chunk_count, 1);
193
- assert_eq!(result.chunks[0].content, text);
194
- }
195
-
196
- #[test]
197
- fn test_chunk_long_text_multiple_chunks() {
198
- let config = ChunkingConfig {
199
- max_characters: 20,
200
- overlap: 5,
201
- trim: true,
202
- chunker_type: ChunkerType::Text,
203
- };
204
- let text = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
205
- let result = chunk_text(text, &config).unwrap();
206
- assert!(result.chunk_count >= 2);
207
- assert_eq!(result.chunks.len(), result.chunk_count);
208
- assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 20));
209
- }
210
-
211
- #[test]
212
- fn test_chunk_text_with_overlap() {
213
- let config = ChunkingConfig {
214
- max_characters: 20,
215
- overlap: 5,
216
- trim: true,
217
- chunker_type: ChunkerType::Text,
218
- };
219
- let text = "abcdefghijklmnopqrstuvwxyz0123456789";
220
- let result = chunk_text(text, &config).unwrap();
221
- assert!(result.chunk_count >= 2);
222
-
223
- if result.chunks.len() >= 2 {
224
- let first_chunk_end = &result.chunks[0].content[result.chunks[0].content.len().saturating_sub(5)..];
225
- assert!(
226
- result.chunks[1].content.starts_with(first_chunk_end),
227
- "Expected overlap '{}' at start of second chunk '{}'",
228
- first_chunk_end,
229
- result.chunks[1].content
230
- );
231
- }
232
- }
233
-
234
- #[test]
235
- fn test_chunk_markdown_preserves_structure() {
236
- let config = ChunkingConfig {
237
- max_characters: 50,
238
- overlap: 10,
239
- trim: true,
240
- chunker_type: ChunkerType::Markdown,
241
- };
242
- let markdown = "# Title\n\nParagraph one.\n\n## Section\n\nParagraph two.";
243
- let result = chunk_text(markdown, &config).unwrap();
244
- assert!(result.chunk_count >= 1);
245
- assert!(result.chunks.iter().any(|chunk| chunk.content.contains("# Title")));
246
- }
247
-
248
- #[test]
249
- fn test_chunk_markdown_with_code_blocks() {
250
- let config = ChunkingConfig {
251
- max_characters: 100,
252
- overlap: 10,
253
- trim: true,
254
- chunker_type: ChunkerType::Markdown,
255
- };
256
- let markdown = "# Code Example\n\n```python\nprint('hello')\n```\n\nSome text after code.";
257
- let result = chunk_text(markdown, &config).unwrap();
258
- assert!(result.chunk_count >= 1);
259
- assert!(result.chunks.iter().any(|chunk| chunk.content.contains("```")));
260
- }
261
-
262
- #[test]
263
- fn test_chunk_markdown_with_links() {
264
- let config = ChunkingConfig {
265
- max_characters: 80,
266
- overlap: 10,
267
- trim: true,
268
- chunker_type: ChunkerType::Markdown,
269
- };
270
- let markdown = "Check out [this link](https://example.com) for more info.";
271
- let result = chunk_text(markdown, &config).unwrap();
272
- assert_eq!(result.chunk_count, 1);
273
- assert!(result.chunks[0].content.contains("[this link]"));
274
- }
275
-
276
- #[test]
277
- fn test_chunk_text_with_trim() {
278
- let config = ChunkingConfig {
279
- max_characters: 30,
280
- overlap: 5,
281
- trim: true,
282
- chunker_type: ChunkerType::Text,
283
- };
284
- let text = " Leading and trailing spaces should be trimmed ";
285
- let result = chunk_text(text, &config).unwrap();
286
- assert!(result.chunk_count >= 1);
287
- assert!(result.chunks.iter().all(|chunk| !chunk.content.starts_with(' ')));
288
- }
289
-
290
- #[test]
291
- fn test_chunk_text_without_trim() {
292
- let config = ChunkingConfig {
293
- max_characters: 30,
294
- overlap: 5,
295
- trim: false,
296
- chunker_type: ChunkerType::Text,
297
- };
298
- let text = " Text with spaces ";
299
- let result = chunk_text(text, &config).unwrap();
300
- assert_eq!(result.chunk_count, 1);
301
- assert!(result.chunks[0].content.starts_with(' ') || result.chunks[0].content.len() < text.len());
302
- }
303
-
304
- #[test]
305
- fn test_chunk_with_invalid_overlap() {
306
- let config = ChunkingConfig {
307
- max_characters: 10,
308
- overlap: 20,
309
- trim: true,
310
- chunker_type: ChunkerType::Text,
311
- };
312
- let result = chunk_text("Some text", &config);
313
- assert!(result.is_err());
314
- let err = result.unwrap_err();
315
- assert!(matches!(err, KreuzbergError::Validation { .. }));
316
- }
317
-
318
- #[test]
319
- fn test_chunk_text_with_type_text() {
320
- let result = chunk_text_with_type("Simple text", 50, 10, true, ChunkerType::Text).unwrap();
321
- assert_eq!(result.chunk_count, 1);
322
- assert_eq!(result.chunks[0].content, "Simple text");
323
- }
324
-
325
- #[test]
326
- fn test_chunk_text_with_type_markdown() {
327
- let markdown = "# Header\n\nContent here.";
328
- let result = chunk_text_with_type(markdown, 50, 10, true, ChunkerType::Markdown).unwrap();
329
- assert_eq!(result.chunk_count, 1);
330
- assert!(result.chunks[0].content.contains("# Header"));
331
- }
332
-
333
- #[test]
334
- fn test_chunk_texts_batch_empty() {
335
- let config = ChunkingConfig::default();
336
- let texts: Vec<&str> = vec![];
337
- let results = chunk_texts_batch(&texts, &config).unwrap();
338
- assert_eq!(results.len(), 0);
339
- }
340
-
341
- #[test]
342
- fn test_chunk_texts_batch_multiple() {
343
- let config = ChunkingConfig {
344
- max_characters: 30,
345
- overlap: 5,
346
- trim: true,
347
- chunker_type: ChunkerType::Text,
348
- };
349
- let texts = vec!["First text", "Second text", "Third text"];
350
- let results = chunk_texts_batch(&texts, &config).unwrap();
351
- assert_eq!(results.len(), 3);
352
- assert!(results.iter().all(|r| r.chunk_count >= 1));
353
- }
354
-
355
- #[test]
356
- fn test_chunk_texts_batch_mixed_lengths() {
357
- let config = ChunkingConfig {
358
- max_characters: 20,
359
- overlap: 5,
360
- trim: true,
361
- chunker_type: ChunkerType::Text,
362
- };
363
- let texts = vec![
364
- "Short",
365
- "This is a longer text that should be split into multiple chunks",
366
- "",
367
- ];
368
- let results = chunk_texts_batch(&texts, &config).unwrap();
369
- assert_eq!(results.len(), 3);
370
- assert_eq!(results[0].chunk_count, 1);
371
- assert!(results[1].chunk_count > 1);
372
- assert_eq!(results[2].chunk_count, 0);
373
- }
374
-
375
- #[test]
376
- fn test_chunk_texts_batch_error_propagation() {
377
- let config = ChunkingConfig {
378
- max_characters: 10,
379
- overlap: 20,
380
- trim: true,
381
- chunker_type: ChunkerType::Text,
382
- };
383
- let texts = vec!["Text one", "Text two"];
384
- let result = chunk_texts_batch(&texts, &config);
385
- assert!(result.is_err());
386
- }
387
-
388
- #[test]
389
- fn test_chunking_config_default() {
390
- let config = ChunkingConfig::default();
391
- assert_eq!(config.max_characters, 2000);
392
- assert_eq!(config.overlap, 100);
393
- assert!(config.trim);
394
- assert_eq!(config.chunker_type, ChunkerType::Text);
395
- }
396
-
397
- #[test]
398
- fn test_chunk_very_long_text() {
399
- let config = ChunkingConfig {
400
- max_characters: 100,
401
- overlap: 20,
402
- trim: true,
403
- chunker_type: ChunkerType::Text,
404
- };
405
- let text = "a".repeat(1000);
406
- let result = chunk_text(&text, &config).unwrap();
407
- assert!(result.chunk_count >= 10);
408
- assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 100));
409
- }
410
-
411
- #[test]
412
- fn test_chunk_text_with_newlines() {
413
- let config = ChunkingConfig {
414
- max_characters: 30,
415
- overlap: 5,
416
- trim: true,
417
- chunker_type: ChunkerType::Text,
418
- };
419
- let text = "Line one\nLine two\nLine three\nLine four\nLine five";
420
- let result = chunk_text(text, &config).unwrap();
421
- assert!(result.chunk_count >= 1);
422
- }
423
-
424
- #[test]
425
- fn test_chunk_markdown_with_lists() {
426
- let config = ChunkingConfig {
427
- max_characters: 100,
428
- overlap: 10,
429
- trim: true,
430
- chunker_type: ChunkerType::Markdown,
431
- };
432
- let markdown = "# List Example\n\n- Item 1\n- Item 2\n- Item 3\n\nMore text.";
433
- let result = chunk_text(markdown, &config).unwrap();
434
- assert!(result.chunk_count >= 1);
435
- assert!(result.chunks.iter().any(|chunk| chunk.content.contains("- Item")));
436
- }
437
-
438
- #[test]
439
- fn test_chunk_markdown_with_tables() {
440
- let config = ChunkingConfig {
441
- max_characters: 150,
442
- overlap: 10,
443
- trim: true,
444
- chunker_type: ChunkerType::Markdown,
445
- };
446
- let markdown = "# Table\n\n| Col1 | Col2 |\n|------|------|\n| A | B |\n| C | D |";
447
- let result = chunk_text(markdown, &config).unwrap();
448
- assert!(result.chunk_count >= 1);
449
- assert!(result.chunks.iter().any(|chunk| chunk.content.contains("|")));
450
- }
451
-
452
- #[test]
453
- fn test_chunk_special_characters() {
454
- let config = ChunkingConfig {
455
- max_characters: 50,
456
- overlap: 5,
457
- trim: true,
458
- chunker_type: ChunkerType::Text,
459
- };
460
- let text = "Special chars: @#$%^&*()[]{}|\\<>?/~`";
461
- let result = chunk_text(text, &config).unwrap();
462
- assert_eq!(result.chunk_count, 1);
463
- assert!(result.chunks[0].content.contains("@#$%"));
464
- }
465
-
466
- #[test]
467
- fn test_chunk_unicode_characters() {
468
- let config = ChunkingConfig {
469
- max_characters: 50,
470
- overlap: 5,
471
- trim: true,
472
- chunker_type: ChunkerType::Text,
473
- };
474
- let text = "Unicode: 你好世界 🌍 café résumé";
475
- let result = chunk_text(text, &config).unwrap();
476
- assert_eq!(result.chunk_count, 1);
477
- assert!(result.chunks[0].content.contains("你好"));
478
- assert!(result.chunks[0].content.contains("🌍"));
479
- }
480
-
481
- #[test]
482
- fn test_chunk_cjk_text() {
483
- let config = ChunkingConfig {
484
- max_characters: 30,
485
- overlap: 5,
486
- trim: true,
487
- chunker_type: ChunkerType::Text,
488
- };
489
- let text = "日本語のテキストです。これは長い文章で、複数のチャンクに分割されるべきです。";
490
- let result = chunk_text(text, &config).unwrap();
491
- assert!(result.chunk_count >= 1);
492
- }
493
-
494
- #[test]
495
- fn test_chunk_mixed_languages() {
496
- let config = ChunkingConfig {
497
- max_characters: 40,
498
- overlap: 5,
499
- trim: true,
500
- chunker_type: ChunkerType::Text,
501
- };
502
- let text = "English text mixed with 中文文本 and some français";
503
- let result = chunk_text(text, &config).unwrap();
504
- assert!(result.chunk_count >= 1);
505
- }
506
-
507
- #[test]
508
- fn test_chunk_offset_calculation_with_overlap() {
509
- let config = ChunkingConfig {
510
- max_characters: 20,
511
- overlap: 5,
512
- trim: false,
513
- chunker_type: ChunkerType::Text,
514
- };
515
- let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
516
- let result = chunk_text(text, &config).unwrap();
517
-
518
- assert!(result.chunks.len() >= 2, "Expected at least 2 chunks");
519
-
520
- for i in 0..result.chunks.len() {
521
- let chunk = &result.chunks[i];
522
- let metadata = &chunk.metadata;
523
-
524
- assert_eq!(
525
- metadata.char_end - metadata.char_start,
526
- chunk.content.chars().count(),
527
- "Chunk {} offset range doesn't match content length",
528
- i
529
- );
530
-
531
- assert_eq!(metadata.chunk_index, i);
532
- assert_eq!(metadata.total_chunks, result.chunks.len());
533
- }
534
-
535
- for i in 0..result.chunks.len() - 1 {
536
- let current_chunk = &result.chunks[i];
537
- let next_chunk = &result.chunks[i + 1];
538
-
539
- assert!(
540
- next_chunk.metadata.char_start < current_chunk.metadata.char_end,
541
- "Chunk {} and {} don't overlap: next starts at {} but current ends at {}",
542
- i,
543
- i + 1,
544
- next_chunk.metadata.char_start,
545
- current_chunk.metadata.char_end
546
- );
547
-
548
- let overlap_size = current_chunk.metadata.char_end - next_chunk.metadata.char_start;
549
- assert!(
550
- overlap_size <= config.overlap + 10,
551
- "Overlap between chunks {} and {} is too large: {}",
552
- i,
553
- i + 1,
554
- overlap_size
555
- );
556
- }
557
- }
558
-
559
- #[test]
560
- fn test_chunk_offset_calculation_without_overlap() {
561
- let config = ChunkingConfig {
562
- max_characters: 20,
563
- overlap: 0,
564
- trim: false,
565
- chunker_type: ChunkerType::Text,
566
- };
567
- let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
568
- let result = chunk_text(text, &config).unwrap();
569
-
570
- for i in 0..result.chunks.len() - 1 {
571
- let current_chunk = &result.chunks[i];
572
- let next_chunk = &result.chunks[i + 1];
573
-
574
- assert!(
575
- next_chunk.metadata.char_start >= current_chunk.metadata.char_end,
576
- "Chunk {} and {} overlap when they shouldn't: next starts at {} but current ends at {}",
577
- i,
578
- i + 1,
579
- next_chunk.metadata.char_start,
580
- current_chunk.metadata.char_end
581
- );
582
- }
583
- }
584
-
585
- #[test]
586
- fn test_chunk_offset_covers_full_text() {
587
- let config = ChunkingConfig {
588
- max_characters: 15,
589
- overlap: 3,
590
- trim: false,
591
- chunker_type: ChunkerType::Text,
592
- };
593
- let text = "0123456789 ABCDEFGHIJ KLMNOPQRST UVWXYZ";
594
- let result = chunk_text(text, &config).unwrap();
595
-
596
- assert!(result.chunks.len() >= 2, "Expected multiple chunks");
597
-
598
- assert_eq!(
599
- result.chunks[0].metadata.char_start, 0,
600
- "First chunk should start at position 0"
601
- );
602
-
603
- for i in 0..result.chunks.len() - 1 {
604
- let current_chunk = &result.chunks[i];
605
- let next_chunk = &result.chunks[i + 1];
606
-
607
- assert!(
608
- next_chunk.metadata.char_start <= current_chunk.metadata.char_end,
609
- "Gap detected between chunk {} (ends at {}) and chunk {} (starts at {})",
610
- i,
611
- current_chunk.metadata.char_end,
612
- i + 1,
613
- next_chunk.metadata.char_start
614
- );
615
- }
616
- }
617
-
618
- #[test]
619
- fn test_chunk_offset_with_various_overlap_sizes() {
620
- for overlap in [0, 5, 10, 20] {
621
- let config = ChunkingConfig {
622
- max_characters: 30,
623
- overlap,
624
- trim: false,
625
- chunker_type: ChunkerType::Text,
626
- };
627
- let text = "Word ".repeat(30);
628
- let result = chunk_text(&text, &config).unwrap();
629
-
630
- for chunk in &result.chunks {
631
- assert!(
632
- chunk.metadata.char_end > chunk.metadata.char_start,
633
- "Invalid offset range for overlap {}: start={}, end={}",
634
- overlap,
635
- chunk.metadata.char_start,
636
- chunk.metadata.char_end
637
- );
638
- }
639
-
640
- for chunk in &result.chunks {
641
- assert!(
642
- chunk.metadata.char_start < text.chars().count(),
643
- "char_start with overlap {} is out of bounds: {}",
644
- overlap,
645
- chunk.metadata.char_start
646
- );
647
- }
648
- }
649
- }
650
-
651
- #[test]
652
- fn test_chunk_last_chunk_offset() {
653
- let config = ChunkingConfig {
654
- max_characters: 20,
655
- overlap: 5,
656
- trim: false,
657
- chunker_type: ChunkerType::Text,
658
- };
659
- let text = "AAAAA BBBBB CCCCC DDDDD EEEEE";
660
- let result = chunk_text(text, &config).unwrap();
661
-
662
- assert!(result.chunks.len() >= 2, "Need multiple chunks for this test");
663
-
664
- let last_chunk = result.chunks.last().unwrap();
665
- let second_to_last = &result.chunks[result.chunks.len() - 2];
666
-
667
- assert!(
668
- last_chunk.metadata.char_start < second_to_last.metadata.char_end,
669
- "Last chunk should overlap with previous chunk"
670
- );
671
-
672
- let expected_end = text.chars().count();
673
- let last_chunk_covers_end =
674
- last_chunk.content.trim_end() == text.trim_end() || last_chunk.metadata.char_end >= expected_end - 5;
675
- assert!(last_chunk_covers_end, "Last chunk should cover the end of the text");
676
- }
677
- }
1
+ //! Text chunking utilities.
2
+ //!
3
+ //! This module provides text chunking functionality using the `text-splitter` library.
4
+ //! It splits long text into smaller chunks while preserving semantic boundaries.
5
+ //!
6
+ //! # Features
7
+ //!
8
+ //! - **Smart splitting**: Respects word and sentence boundaries
9
+ //! - **Markdown-aware**: Preserves Markdown structure (headings, code blocks, lists)
10
+ //! - **Configurable overlap**: Overlap chunks to maintain context
11
+ //! - **Unicode support**: Handles CJK characters and emojis correctly
12
+ //! - **Batch processing**: Process multiple texts efficiently
13
+ //!
14
+ //! # Chunker Types
15
+ //!
16
+ //! - **Text**: Generic text splitter, splits on whitespace and punctuation
17
+ //! - **Markdown**: Markdown-aware splitter, preserves formatting and structure
18
+ //!
19
+ //! # Example
20
+ //!
21
+ //! ```rust
22
+ //! use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
23
+ //!
24
+ //! # fn example() -> kreuzberg::Result<()> {
25
+ //! let config = ChunkingConfig {
26
+ //! max_characters: 500,
27
+ //! overlap: 50,
28
+ //! trim: true,
29
+ //! chunker_type: ChunkerType::Text,
30
+ //! };
31
+ //!
32
+ //! let long_text = "This is a very long document...".repeat(100);
33
+ //! let result = chunk_text(&long_text, &config, None)?;
34
+ //!
35
+ //! println!("Split into {} chunks", result.chunk_count);
36
+ //! for (i, chunk) in result.chunks.iter().enumerate() {
37
+ //! println!("Chunk {}: {} chars", i + 1, chunk.content.len());
38
+ //! }
39
+ //! # Ok(())
40
+ //! # }
41
+ //! ```
42
+ //!
43
+ //! # Use Cases
44
+ //!
45
+ //! - Splitting documents for LLM context windows
46
+ //! - Creating overlapping chunks for semantic search
47
+ //! - Processing large documents in batches
48
+ //! - Maintaining context across chunk boundaries
49
+ use crate::error::{KreuzbergError, Result};
50
+ use crate::types::{Chunk, ChunkMetadata, PageBoundary};
51
+ use bitvec::prelude::*;
52
+ use once_cell::sync::Lazy;
53
+ use serde::{Deserialize, Serialize};
54
+ use std::sync::Arc;
55
+ use text_splitter::{Characters, ChunkCapacity, ChunkConfig, MarkdownSplitter, TextSplitter};
56
+
57
+ pub mod processor;
58
+ pub use processor::ChunkingProcessor;
59
+
60
+ /// Threshold below which we use O(1) direct validation instead of precomputing a BitVec.
61
+ ///
62
+ /// When there are 10 or fewer boundaries, the overhead of creating a BitVec (which is O(n)
63
+ /// where n is the text length) exceeds the cost of calling `is_char_boundary()` directly
64
+ /// for each boundary position. This threshold balances performance across different scenarios:
65
+ /// - Small documents with few boundaries: fast path dominates
66
+ /// - Large documents with many boundaries: batch path leverages the precomputed BitVec
67
+ const ADAPTIVE_VALIDATION_THRESHOLD: usize = 10;
68
+
69
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
70
+ pub enum ChunkerType {
71
+ Text,
72
+ Markdown,
73
+ }
74
+
75
+ #[derive(Debug, Clone, Serialize, Deserialize)]
76
+ pub struct ChunkingResult {
77
+ pub chunks: Vec<Chunk>,
78
+ pub chunk_count: usize,
79
+ }
80
+
81
+ pub struct ChunkingConfig {
82
+ pub max_characters: usize,
83
+ pub overlap: usize,
84
+ pub trim: bool,
85
+ pub chunker_type: ChunkerType,
86
+ }
87
+
88
+ impl Default for ChunkingConfig {
89
+ fn default() -> Self {
90
+ Self {
91
+ max_characters: 2000,
92
+ overlap: 100,
93
+ trim: true,
94
+ chunker_type: ChunkerType::Text,
95
+ }
96
+ }
97
+ }
98
+
99
+ fn build_chunk_config(max_characters: usize, overlap: usize, trim: bool) -> Result<ChunkConfig<Characters>> {
100
+ ChunkConfig::new(ChunkCapacity::new(max_characters))
101
+ .with_overlap(overlap)
102
+ .map(|config| config.with_trim(trim))
103
+ .map_err(|e| KreuzbergError::validation(format!("Invalid chunking configuration: {}", e)))
104
+ }
105
+
106
+ /// Pre-computes valid UTF-8 character boundaries for a text string.
107
+ ///
108
+ /// This function performs a single O(n) pass through the text to identify all valid
109
+ /// UTF-8 character boundaries, storing them in a BitVec for O(1) lookups.
110
+ ///
111
+ /// # Arguments
112
+ ///
113
+ /// * `text` - The text to analyze
114
+ ///
115
+ /// # Returns
116
+ ///
117
+ /// A BitVec where each bit represents whether a byte offset is a valid UTF-8 character boundary.
118
+ /// The BitVec has length `text.len() + 1` (includes the end position).
119
+ ///
120
+ /// # Examples
121
+ ///
122
+ /// ```ignore
123
+ /// let text = "Hello 👋";
124
+ /// let boundaries = precompute_utf8_boundaries(text);
125
+ /// assert!(boundaries[0]); // Start is always valid
126
+ /// assert!(boundaries[6]); // 'H' + "ello " = 6 bytes
127
+ /// assert!(!boundaries[7]); // Middle of emoji (first byte of 4-byte sequence)
128
+ /// assert!(boundaries[10]); // After emoji (valid boundary)
129
+ /// ```
130
+ fn precompute_utf8_boundaries(text: &str) -> BitVec {
131
+ let text_len = text.len();
132
+ let mut boundaries = bitvec![0; text_len + 1];
133
+
134
+ boundaries.set(0, true);
135
+
136
+ for (i, _) in text.char_indices() {
137
+ if i <= text_len {
138
+ boundaries.set(i, true);
139
+ }
140
+ }
141
+
142
+ if text_len > 0 {
143
+ boundaries.set(text_len, true);
144
+ }
145
+
146
+ boundaries
147
+ }
148
+
149
+ /// Validates that byte offsets in page boundaries fall on valid UTF-8 character boundaries.
150
+ ///
151
+ /// This function ensures that all page boundary positions are at valid UTF-8 character
152
+ /// boundaries within the text. This is CRITICAL to prevent text corruption when boundaries
153
+ /// are created from language bindings or external sources, particularly with multibyte
154
+ /// UTF-8 characters (emoji, CJK characters, combining marks, etc.).
155
+ ///
156
+ /// **Performance Strategy**: Uses adaptive validation to optimize for different boundary counts:
157
+ /// - **Small sets (≤10 boundaries)**: O(k) approach using Rust's native `is_char_boundary()` for each position
158
+ /// - **Large sets (>10 boundaries)**: O(n) precomputation with O(1) lookups via BitVec
159
+ ///
160
+ /// For typical PDF documents with 1-10 page boundaries, the fast path provides 30-50% faster
161
+ /// validation than always precomputing. For documents with 100+ boundaries, batch precomputation
162
+ /// is 2-4% faster overall due to amortized costs. This gives ~2-4% improvement across all scenarios.
163
+ ///
164
+ /// # Arguments
165
+ ///
166
+ /// * `text` - The text being chunked
167
+ /// * `boundaries` - Page boundary markers to validate
168
+ ///
169
+ /// # Returns
170
+ ///
171
+ /// Returns `Ok(())` if all boundaries are at valid UTF-8 character boundaries.
172
+ /// Returns `KreuzbergError::Validation` if any boundary is at an invalid position.
173
+ ///
174
+ /// # UTF-8 Boundary Safety
175
+ ///
176
+ /// Rust strings use UTF-8 encoding where characters can be 1-4 bytes. For example:
177
+ /// - ASCII letters: 1 byte each
178
+ /// - Emoji (🌍): 4 bytes but 1 character
179
+ /// - CJK characters (中): 3 bytes but 1 character
180
+ ///
181
+ /// This function checks that all byte_start and byte_end values are at character boundaries
182
+ /// using an adaptive strategy: direct calls for small boundary sets, or precomputed BitVec
183
+ /// for large sets.
184
+ fn validate_utf8_boundaries(text: &str, boundaries: &[PageBoundary]) -> Result<()> {
185
+ if boundaries.is_empty() {
186
+ return Ok(());
187
+ }
188
+
189
+ let text_len = text.len();
190
+
191
+ if boundaries.len() <= ADAPTIVE_VALIDATION_THRESHOLD {
192
+ validate_utf8_boundaries_fast_path(text, boundaries, text_len)
193
+ } else {
194
+ validate_utf8_boundaries_batch_path(text, boundaries, text_len)
195
+ }
196
+ }
197
+
198
+ /// Fast path: direct UTF-8 boundary validation for small boundary counts (≤10).
199
+ ///
200
+ /// Uses Rust's native `str::is_char_boundary()` for O(1) checks on each boundary position.
201
+ /// This avoids the O(n) overhead of BitVec precomputation, making it ideal for typical
202
+ /// PDF documents with few page boundaries.
203
+ ///
204
+ /// # Arguments
205
+ ///
206
+ /// * `text` - The text being validated
207
+ /// * `boundaries` - Page boundary markers to validate
208
+ /// * `text_len` - Pre-computed text length (avoids recomputation)
209
+ ///
210
+ /// # Returns
211
+ ///
212
+ /// Returns `Ok(())` if all boundaries are at valid UTF-8 character boundaries.
213
+ /// Returns `KreuzbergError::Validation` if any boundary is invalid.
214
+ fn validate_utf8_boundaries_fast_path(text: &str, boundaries: &[PageBoundary], text_len: usize) -> Result<()> {
215
+ for (idx, boundary) in boundaries.iter().enumerate() {
216
+ if boundary.byte_start > text_len {
217
+ return Err(KreuzbergError::validation(format!(
218
+ "Page boundary {} has byte_start={} which exceeds text length {}",
219
+ idx, boundary.byte_start, text_len
220
+ )));
221
+ }
222
+
223
+ if boundary.byte_end > text_len {
224
+ return Err(KreuzbergError::validation(format!(
225
+ "Page boundary {} has byte_end={} which exceeds text length {}",
226
+ idx, boundary.byte_end, text_len
227
+ )));
228
+ }
229
+
230
+ if boundary.byte_start > 0 && boundary.byte_start < text_len && !text.is_char_boundary(boundary.byte_start) {
231
+ return Err(KreuzbergError::validation(format!(
232
+ "Page boundary {} has byte_start={} which is not a valid UTF-8 character boundary (text length={}). This may indicate corrupted multibyte characters (emoji, CJK, etc.)",
233
+ idx, boundary.byte_start, text_len
234
+ )));
235
+ }
236
+
237
+ if boundary.byte_end > 0 && boundary.byte_end < text_len && !text.is_char_boundary(boundary.byte_end) {
238
+ return Err(KreuzbergError::validation(format!(
239
+ "Page boundary {} has byte_end={} which is not a valid UTF-8 character boundary (text length={}). This may indicate corrupted multibyte characters (emoji, CJK, etc.)",
240
+ idx, boundary.byte_end, text_len
241
+ )));
242
+ }
243
+ }
244
+
245
+ Ok(())
246
+ }
247
+
248
+ /// Batch path: precomputed BitVec validation for large boundary counts (>10).
249
+ ///
250
+ /// Precomputes all valid UTF-8 boundaries in a single O(n) pass, then performs O(1)
251
+ /// lookups for each boundary position. This is more efficient than O(k*1) direct checks
252
+ /// when k is large or when the repeated `is_char_boundary()` calls have measurable overhead.
253
+ ///
254
+ /// # Arguments
255
+ ///
256
+ /// * `text` - The text being validated
257
+ /// * `boundaries` - Page boundary markers to validate
258
+ /// * `text_len` - Pre-computed text length (avoids recomputation)
259
+ ///
260
+ /// # Returns
261
+ ///
262
+ /// Returns `Ok(())` if all boundaries are at valid UTF-8 character boundaries.
263
+ /// Returns `KreuzbergError::Validation` if any boundary is invalid.
264
+ fn validate_utf8_boundaries_batch_path(text: &str, boundaries: &[PageBoundary], text_len: usize) -> Result<()> {
265
+ let valid_boundaries = precompute_utf8_boundaries(text);
266
+
267
+ for (idx, boundary) in boundaries.iter().enumerate() {
268
+ if boundary.byte_start > text_len {
269
+ return Err(KreuzbergError::validation(format!(
270
+ "Page boundary {} has byte_start={} which exceeds text length {}",
271
+ idx, boundary.byte_start, text_len
272
+ )));
273
+ }
274
+
275
+ if boundary.byte_end > text_len {
276
+ return Err(KreuzbergError::validation(format!(
277
+ "Page boundary {} has byte_end={} which exceeds text length {}",
278
+ idx, boundary.byte_end, text_len
279
+ )));
280
+ }
281
+
282
+ if boundary.byte_start > 0 && boundary.byte_start <= text_len && !valid_boundaries[boundary.byte_start] {
283
+ return Err(KreuzbergError::validation(format!(
284
+ "Page boundary {} has byte_start={} which is not a valid UTF-8 character boundary (text length={}). This may indicate corrupted multibyte characters (emoji, CJK, etc.)",
285
+ idx, boundary.byte_start, text_len
286
+ )));
287
+ }
288
+
289
+ if boundary.byte_end > 0 && boundary.byte_end <= text_len && !valid_boundaries[boundary.byte_end] {
290
+ return Err(KreuzbergError::validation(format!(
291
+ "Page boundary {} has byte_end={} which is not a valid UTF-8 character boundary (text length={}). This may indicate corrupted multibyte characters (emoji, CJK, etc.)",
292
+ idx, boundary.byte_end, text_len
293
+ )));
294
+ }
295
+ }
296
+
297
+ Ok(())
298
+ }
299
+
300
+ /// Calculate which pages a character range spans.
301
+ ///
302
+ /// # Arguments
303
+ ///
304
+ /// * `char_start` - Starting character offset of the chunk
305
+ /// * `char_end` - Ending character offset of the chunk
306
+ /// * `boundaries` - Page boundary markers from the document
307
+ ///
308
+ /// # Returns
309
+ ///
310
+ /// A tuple of (first_page, last_page) where page numbers are 1-indexed.
311
+ /// Returns (None, None) if boundaries are empty or chunk doesn't overlap any page.
312
+ /// Validates page boundaries for consistency and correctness.
313
+ ///
314
+ /// # Validation Rules
315
+ ///
316
+ /// 1. Boundaries must be sorted by char_start (monotonically increasing)
317
+ /// 2. Boundaries must not overlap (char_end[i] <= char_start[i+1])
318
+ /// 3. Each boundary must have char_start < char_end
319
+ ///
320
+ /// # Errors
321
+ ///
322
+ /// Returns `KreuzbergError::Validation` if any boundary is invalid.
323
+ fn validate_page_boundaries(boundaries: &[PageBoundary]) -> Result<()> {
324
+ if boundaries.is_empty() {
325
+ return Ok(());
326
+ }
327
+
328
+ for (idx, boundary) in boundaries.iter().enumerate() {
329
+ if boundary.byte_start >= boundary.byte_end {
330
+ return Err(KreuzbergError::validation(format!(
331
+ "Invalid boundary range at index {}: byte_start ({}) must be < byte_end ({})",
332
+ idx, boundary.byte_start, boundary.byte_end
333
+ )));
334
+ }
335
+ }
336
+
337
+ for i in 0..boundaries.len() - 1 {
338
+ let current = &boundaries[i];
339
+ let next = &boundaries[i + 1];
340
+
341
+ if current.byte_start > next.byte_start {
342
+ return Err(KreuzbergError::validation(format!(
343
+ "Page boundaries not sorted: boundary at index {} (byte_start={}) comes after boundary at index {} (byte_start={})",
344
+ i,
345
+ current.byte_start,
346
+ i + 1,
347
+ next.byte_start
348
+ )));
349
+ }
350
+
351
+ if current.byte_end > next.byte_start {
352
+ return Err(KreuzbergError::validation(format!(
353
+ "Overlapping page boundaries: boundary {} ends at {} but boundary {} starts at {}",
354
+ i,
355
+ current.byte_end,
356
+ i + 1,
357
+ next.byte_start
358
+ )));
359
+ }
360
+ }
361
+
362
+ Ok(())
363
+ }
364
+
365
+ /// Calculate which pages a byte range spans.
366
+ ///
367
+ /// # Arguments
368
+ ///
369
+ /// * `byte_start` - Starting byte offset of the chunk
370
+ /// * `byte_end` - Ending byte offset of the chunk
371
+ /// * `boundaries` - Page boundary markers from the document
372
+ ///
373
+ /// # Returns
374
+ ///
375
+ /// A tuple of (first_page, last_page) where page numbers are 1-indexed.
376
+ /// Returns (None, None) if boundaries are empty or chunk doesn't overlap any page.
377
+ ///
378
+ /// # Errors
379
+ ///
380
+ /// Returns `KreuzbergError::Validation` if boundaries are invalid.
381
+ fn calculate_page_range(
382
+ byte_start: usize,
383
+ byte_end: usize,
384
+ boundaries: &[PageBoundary],
385
+ ) -> Result<(Option<usize>, Option<usize>)> {
386
+ if boundaries.is_empty() {
387
+ return Ok((None, None));
388
+ }
389
+
390
+ validate_page_boundaries(boundaries)?;
391
+
392
+ let mut first_page = None;
393
+ let mut last_page = None;
394
+
395
+ for boundary in boundaries {
396
+ if byte_start < boundary.byte_end && byte_end > boundary.byte_start {
397
+ if first_page.is_none() {
398
+ first_page = Some(boundary.page_number);
399
+ }
400
+ last_page = Some(boundary.page_number);
401
+ }
402
+ }
403
+
404
+ Ok((first_page, last_page))
405
+ }
406
+
407
+ /// Split text into chunks with optional page boundary tracking.
408
+ ///
409
+ /// # Arguments
410
+ ///
411
+ /// * `text` - The text to split into chunks
412
+ /// * `config` - Chunking configuration (max size, overlap, type)
413
+ /// * `page_boundaries` - Optional page boundary markers for mapping chunks to pages
414
+ ///
415
+ /// # Returns
416
+ ///
417
+ /// A ChunkingResult containing all chunks and their metadata.
418
+ ///
419
+ /// # Examples
420
+ ///
421
+ /// ```rust
422
+ /// use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
423
+ ///
424
+ /// # fn example() -> kreuzberg::Result<()> {
425
+ /// let config = ChunkingConfig {
426
+ /// max_characters: 500,
427
+ /// overlap: 50,
428
+ /// trim: true,
429
+ /// chunker_type: ChunkerType::Text,
430
+ /// };
431
+ /// let result = chunk_text("Long text...", &config, None)?;
432
+ /// assert!(!result.chunks.is_empty());
433
+ /// # Ok(())
434
+ /// # }
435
+ /// ```
436
+ pub fn chunk_text(
437
+ text: &str,
438
+ config: &ChunkingConfig,
439
+ page_boundaries: Option<&[PageBoundary]>,
440
+ ) -> Result<ChunkingResult> {
441
+ if text.is_empty() {
442
+ return Ok(ChunkingResult {
443
+ chunks: vec![],
444
+ chunk_count: 0,
445
+ });
446
+ }
447
+
448
+ if let Some(boundaries) = page_boundaries {
449
+ validate_utf8_boundaries(text, boundaries)?;
450
+ }
451
+
452
+ let chunk_config = build_chunk_config(config.max_characters, config.overlap, config.trim)?;
453
+
454
+ let text_chunks: Vec<&str> = match config.chunker_type {
455
+ ChunkerType::Text => {
456
+ let splitter = TextSplitter::new(chunk_config);
457
+ splitter.chunks(text).collect()
458
+ }
459
+ ChunkerType::Markdown => {
460
+ let splitter = MarkdownSplitter::new(chunk_config);
461
+ splitter.chunks(text).collect()
462
+ }
463
+ };
464
+
465
+ let total_chunks = text_chunks.len();
466
+ let mut byte_offset = 0;
467
+
468
+ let mut chunks: Vec<Chunk> = Vec::new();
469
+
470
+ for (index, chunk_text) in text_chunks.into_iter().enumerate() {
471
+ let byte_start = byte_offset;
472
+ let chunk_length = chunk_text.len();
473
+ let byte_end = byte_start + chunk_length;
474
+
475
+ let overlap_chars = if index < total_chunks - 1 {
476
+ config.overlap.min(chunk_length)
477
+ } else {
478
+ 0
479
+ };
480
+ byte_offset = byte_end - overlap_chars;
481
+
482
+ let (first_page, last_page) = if let Some(boundaries) = page_boundaries {
483
+ calculate_page_range(byte_start, byte_end, boundaries)?
484
+ } else {
485
+ (None, None)
486
+ };
487
+
488
+ chunks.push(Chunk {
489
+ content: chunk_text.to_string(),
490
+ embedding: None,
491
+ metadata: ChunkMetadata {
492
+ byte_start,
493
+ byte_end,
494
+ token_count: None,
495
+ chunk_index: index,
496
+ total_chunks,
497
+ first_page,
498
+ last_page,
499
+ },
500
+ });
501
+ }
502
+
503
+ let chunk_count = chunks.len();
504
+
505
+ Ok(ChunkingResult { chunks, chunk_count })
506
+ }
507
+
508
+ pub fn chunk_text_with_type(
509
+ text: &str,
510
+ max_characters: usize,
511
+ overlap: usize,
512
+ trim: bool,
513
+ chunker_type: ChunkerType,
514
+ ) -> Result<ChunkingResult> {
515
+ let config = ChunkingConfig {
516
+ max_characters,
517
+ overlap,
518
+ trim,
519
+ chunker_type,
520
+ };
521
+ chunk_text(text, &config, None)
522
+ }
523
+
524
+ pub fn chunk_texts_batch(texts: &[&str], config: &ChunkingConfig) -> Result<Vec<ChunkingResult>> {
525
+ texts.iter().map(|text| chunk_text(text, config, None)).collect()
526
+ }
527
+
528
+ /// Lazy-initialized flag that ensures chunking processor is registered exactly once.
529
+ ///
530
+ /// This static is accessed on first use to automatically register the
531
+ /// chunking processor with the plugin registry.
532
+ static PROCESSOR_INITIALIZED: Lazy<Result<()>> = Lazy::new(register_chunking_processor);
533
+
534
+ /// Ensure the chunking processor is registered.
535
+ ///
536
+ /// This function is called automatically when needed.
537
+ /// It's safe to call multiple times - registration only happens once.
538
+ pub fn ensure_initialized() -> Result<()> {
539
+ PROCESSOR_INITIALIZED
540
+ .as_ref()
541
+ .map(|_| ())
542
+ .map_err(|e| crate::KreuzbergError::Plugin {
543
+ message: format!("Failed to register chunking processor: {}", e),
544
+ plugin_name: "text-chunking".to_string(),
545
+ })
546
+ }
547
+
548
+ /// Register the chunking processor with the global registry.
549
+ ///
550
+ /// This function should be called once at application startup to register
551
+ /// the chunking post-processor.
552
+ ///
553
+ /// **Note:** This is called automatically on first use.
554
+ /// Explicit calling is optional.
555
+ pub fn register_chunking_processor() -> Result<()> {
556
+ let registry = crate::plugins::registry::get_post_processor_registry();
557
+ let mut registry = registry
558
+ .write()
559
+ .map_err(|e| crate::KreuzbergError::Other(format!("Post-processor registry lock poisoned: {}", e)))?;
560
+
561
+ registry.register(Arc::new(ChunkingProcessor), 50)?;
562
+
563
+ Ok(())
564
+ }
565
+
566
+ #[cfg(test)]
567
+ mod tests {
568
+ use super::*;
569
+
570
+ #[test]
571
+ fn test_chunk_empty_text() {
572
+ let config = ChunkingConfig::default();
573
+ let result = chunk_text("", &config, None).unwrap();
574
+ assert_eq!(result.chunks.len(), 0);
575
+ assert_eq!(result.chunk_count, 0);
576
+ }
577
+
578
+ #[test]
579
+ fn test_chunk_short_text_single_chunk() {
580
+ let config = ChunkingConfig {
581
+ max_characters: 100,
582
+ overlap: 10,
583
+ trim: true,
584
+ chunker_type: ChunkerType::Text,
585
+ };
586
+ let text = "This is a short text.";
587
+ let result = chunk_text(text, &config, None).unwrap();
588
+ assert_eq!(result.chunks.len(), 1);
589
+ assert_eq!(result.chunk_count, 1);
590
+ assert_eq!(result.chunks[0].content, text);
591
+ }
592
+
593
+ #[test]
594
+ fn test_chunk_long_text_multiple_chunks() {
595
+ let config = ChunkingConfig {
596
+ max_characters: 20,
597
+ overlap: 5,
598
+ trim: true,
599
+ chunker_type: ChunkerType::Text,
600
+ };
601
+ let text = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
602
+ let result = chunk_text(text, &config, None).unwrap();
603
+ assert!(result.chunk_count >= 2);
604
+ assert_eq!(result.chunks.len(), result.chunk_count);
605
+ assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 20));
606
+ }
607
+
608
+ #[test]
609
+ fn test_chunk_text_with_overlap() {
610
+ let config = ChunkingConfig {
611
+ max_characters: 20,
612
+ overlap: 5,
613
+ trim: true,
614
+ chunker_type: ChunkerType::Text,
615
+ };
616
+ let text = "abcdefghijklmnopqrstuvwxyz0123456789";
617
+ let result = chunk_text(text, &config, None).unwrap();
618
+ assert!(result.chunk_count >= 2);
619
+
620
+ if result.chunks.len() >= 2 {
621
+ let first_chunk_end = &result.chunks[0].content[result.chunks[0].content.len().saturating_sub(5)..];
622
+ assert!(
623
+ result.chunks[1].content.starts_with(first_chunk_end),
624
+ "Expected overlap '{}' at start of second chunk '{}'",
625
+ first_chunk_end,
626
+ result.chunks[1].content
627
+ );
628
+ }
629
+ }
630
+
631
+ #[test]
632
+ fn test_chunk_markdown_preserves_structure() {
633
+ let config = ChunkingConfig {
634
+ max_characters: 50,
635
+ overlap: 10,
636
+ trim: true,
637
+ chunker_type: ChunkerType::Markdown,
638
+ };
639
+ let markdown = "# Title\n\nParagraph one.\n\n## Section\n\nParagraph two.";
640
+ let result = chunk_text(markdown, &config, None).unwrap();
641
+ assert!(result.chunk_count >= 1);
642
+ assert!(result.chunks.iter().any(|chunk| chunk.content.contains("# Title")));
643
+ }
644
+
645
+ #[test]
646
+ fn test_chunk_markdown_with_code_blocks() {
647
+ let config = ChunkingConfig {
648
+ max_characters: 100,
649
+ overlap: 10,
650
+ trim: true,
651
+ chunker_type: ChunkerType::Markdown,
652
+ };
653
+ let markdown = "# Code Example\n\n```python\nprint('hello')\n```\n\nSome text after code.";
654
+ let result = chunk_text(markdown, &config, None).unwrap();
655
+ assert!(result.chunk_count >= 1);
656
+ assert!(result.chunks.iter().any(|chunk| chunk.content.contains("```")));
657
+ }
658
+
659
+ #[test]
660
+ fn test_chunk_markdown_with_links() {
661
+ let config = ChunkingConfig {
662
+ max_characters: 80,
663
+ overlap: 10,
664
+ trim: true,
665
+ chunker_type: ChunkerType::Markdown,
666
+ };
667
+ let markdown = "Check out [this link](https://example.com) for more info.";
668
+ let result = chunk_text(markdown, &config, None).unwrap();
669
+ assert_eq!(result.chunk_count, 1);
670
+ assert!(result.chunks[0].content.contains("[this link]"));
671
+ }
672
+
673
+ #[test]
674
+ fn test_chunk_text_with_trim() {
675
+ let config = ChunkingConfig {
676
+ max_characters: 30,
677
+ overlap: 5,
678
+ trim: true,
679
+ chunker_type: ChunkerType::Text,
680
+ };
681
+ let text = " Leading and trailing spaces should be trimmed ";
682
+ let result = chunk_text(text, &config, None).unwrap();
683
+ assert!(result.chunk_count >= 1);
684
+ assert!(result.chunks.iter().all(|chunk| !chunk.content.starts_with(' ')));
685
+ }
686
+
687
+ #[test]
688
+ fn test_chunk_text_without_trim() {
689
+ let config = ChunkingConfig {
690
+ max_characters: 30,
691
+ overlap: 5,
692
+ trim: false,
693
+ chunker_type: ChunkerType::Text,
694
+ };
695
+ let text = " Text with spaces ";
696
+ let result = chunk_text(text, &config, None).unwrap();
697
+ assert_eq!(result.chunk_count, 1);
698
+ assert!(result.chunks[0].content.starts_with(' ') || result.chunks[0].content.len() < text.len());
699
+ }
700
+
701
+ #[test]
702
+ fn test_chunk_with_invalid_overlap() {
703
+ let config = ChunkingConfig {
704
+ max_characters: 10,
705
+ overlap: 20,
706
+ trim: true,
707
+ chunker_type: ChunkerType::Text,
708
+ };
709
+ let result = chunk_text("Some text", &config, None);
710
+ assert!(result.is_err());
711
+ let err = result.unwrap_err();
712
+ assert!(matches!(err, KreuzbergError::Validation { .. }));
713
+ }
714
+
715
+ #[test]
716
+ fn test_chunk_text_with_type_text() {
717
+ let result = chunk_text_with_type("Simple text", 50, 10, true, ChunkerType::Text).unwrap();
718
+ assert_eq!(result.chunk_count, 1);
719
+ assert_eq!(result.chunks[0].content, "Simple text");
720
+ }
721
+
722
+ #[test]
723
+ fn test_chunk_text_with_type_markdown() {
724
+ let markdown = "# Header\n\nContent here.";
725
+ let result = chunk_text_with_type(markdown, 50, 10, true, ChunkerType::Markdown).unwrap();
726
+ assert_eq!(result.chunk_count, 1);
727
+ assert!(result.chunks[0].content.contains("# Header"));
728
+ }
729
+
730
+ #[test]
731
+ fn test_chunk_texts_batch_empty() {
732
+ let config = ChunkingConfig::default();
733
+ let texts: Vec<&str> = vec![];
734
+ let results = chunk_texts_batch(&texts, &config).unwrap();
735
+ assert_eq!(results.len(), 0);
736
+ }
737
+
738
+ #[test]
739
+ fn test_chunk_texts_batch_multiple() {
740
+ let config = ChunkingConfig {
741
+ max_characters: 30,
742
+ overlap: 5,
743
+ trim: true,
744
+ chunker_type: ChunkerType::Text,
745
+ };
746
+ let texts = vec!["First text", "Second text", "Third text"];
747
+ let results = chunk_texts_batch(&texts, &config).unwrap();
748
+ assert_eq!(results.len(), 3);
749
+ assert!(results.iter().all(|r| r.chunk_count >= 1));
750
+ }
751
+
752
+ #[test]
753
+ fn test_chunk_texts_batch_mixed_lengths() {
754
+ let config = ChunkingConfig {
755
+ max_characters: 20,
756
+ overlap: 5,
757
+ trim: true,
758
+ chunker_type: ChunkerType::Text,
759
+ };
760
+ let texts = vec![
761
+ "Short",
762
+ "This is a longer text that should be split into multiple chunks",
763
+ "",
764
+ ];
765
+ let results = chunk_texts_batch(&texts, &config).unwrap();
766
+ assert_eq!(results.len(), 3);
767
+ assert_eq!(results[0].chunk_count, 1);
768
+ assert!(results[1].chunk_count > 1);
769
+ assert_eq!(results[2].chunk_count, 0);
770
+ }
771
+
772
+ #[test]
773
+ fn test_chunk_texts_batch_error_propagation() {
774
+ let config = ChunkingConfig {
775
+ max_characters: 10,
776
+ overlap: 20,
777
+ trim: true,
778
+ chunker_type: ChunkerType::Text,
779
+ };
780
+ let texts = vec!["Text one", "Text two"];
781
+ let result = chunk_texts_batch(&texts, &config);
782
+ assert!(result.is_err());
783
+ }
784
+
785
+ #[test]
786
+ fn test_chunking_config_default() {
787
+ let config = ChunkingConfig::default();
788
+ assert_eq!(config.max_characters, 2000);
789
+ assert_eq!(config.overlap, 100);
790
+ assert!(config.trim);
791
+ assert_eq!(config.chunker_type, ChunkerType::Text);
792
+ }
793
+
794
+ #[test]
795
+ fn test_chunk_very_long_text() {
796
+ let config = ChunkingConfig {
797
+ max_characters: 100,
798
+ overlap: 20,
799
+ trim: true,
800
+ chunker_type: ChunkerType::Text,
801
+ };
802
+ let text = "a".repeat(1000);
803
+ let result = chunk_text(&text, &config, None).unwrap();
804
+ assert!(result.chunk_count >= 10);
805
+ assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 100));
806
+ }
807
+
808
+ #[test]
809
+ fn test_chunk_text_with_newlines() {
810
+ let config = ChunkingConfig {
811
+ max_characters: 30,
812
+ overlap: 5,
813
+ trim: true,
814
+ chunker_type: ChunkerType::Text,
815
+ };
816
+ let text = "Line one\nLine two\nLine three\nLine four\nLine five";
817
+ let result = chunk_text(text, &config, None).unwrap();
818
+ assert!(result.chunk_count >= 1);
819
+ }
820
+
821
+ #[test]
822
+ fn test_chunk_markdown_with_lists() {
823
+ let config = ChunkingConfig {
824
+ max_characters: 100,
825
+ overlap: 10,
826
+ trim: true,
827
+ chunker_type: ChunkerType::Markdown,
828
+ };
829
+ let markdown = "# List Example\n\n- Item 1\n- Item 2\n- Item 3\n\nMore text.";
830
+ let result = chunk_text(markdown, &config, None).unwrap();
831
+ assert!(result.chunk_count >= 1);
832
+ assert!(result.chunks.iter().any(|chunk| chunk.content.contains("- Item")));
833
+ }
834
+
835
+ #[test]
836
+ fn test_chunk_markdown_with_tables() {
837
+ let config = ChunkingConfig {
838
+ max_characters: 150,
839
+ overlap: 10,
840
+ trim: true,
841
+ chunker_type: ChunkerType::Markdown,
842
+ };
843
+ let markdown = "# Table\n\n| Col1 | Col2 |\n|------|------|\n| A | B |\n| C | D |";
844
+ let result = chunk_text(markdown, &config, None).unwrap();
845
+ assert!(result.chunk_count >= 1);
846
+ assert!(result.chunks.iter().any(|chunk| chunk.content.contains("|")));
847
+ }
848
+
849
+ #[test]
850
+ fn test_chunk_special_characters() {
851
+ let config = ChunkingConfig {
852
+ max_characters: 50,
853
+ overlap: 5,
854
+ trim: true,
855
+ chunker_type: ChunkerType::Text,
856
+ };
857
+ let text = "Special chars: @#$%^&*()[]{}|\\<>?/~`";
858
+ let result = chunk_text(text, &config, None).unwrap();
859
+ assert_eq!(result.chunk_count, 1);
860
+ assert!(result.chunks[0].content.contains("@#$%"));
861
+ }
862
+
863
+ #[test]
864
+ fn test_chunk_unicode_characters() {
865
+ let config = ChunkingConfig {
866
+ max_characters: 50,
867
+ overlap: 5,
868
+ trim: true,
869
+ chunker_type: ChunkerType::Text,
870
+ };
871
+ let text = "Unicode: 你好世界 🌍 café résumé";
872
+ let result = chunk_text(text, &config, None).unwrap();
873
+ assert_eq!(result.chunk_count, 1);
874
+ assert!(result.chunks[0].content.contains("你好"));
875
+ assert!(result.chunks[0].content.contains("🌍"));
876
+ }
877
+
878
+ #[test]
879
+ fn test_chunk_cjk_text() {
880
+ let config = ChunkingConfig {
881
+ max_characters: 30,
882
+ overlap: 5,
883
+ trim: true,
884
+ chunker_type: ChunkerType::Text,
885
+ };
886
+ let text = "日本語のテキストです。これは長い文章で、複数のチャンクに分割されるべきです。";
887
+ let result = chunk_text(text, &config, None).unwrap();
888
+ assert!(result.chunk_count >= 1);
889
+ }
890
+
891
+ #[test]
892
+ fn test_chunk_mixed_languages() {
893
+ let config = ChunkingConfig {
894
+ max_characters: 40,
895
+ overlap: 5,
896
+ trim: true,
897
+ chunker_type: ChunkerType::Text,
898
+ };
899
+ let text = "English text mixed with 中文文本 and some français";
900
+ let result = chunk_text(text, &config, None).unwrap();
901
+ assert!(result.chunk_count >= 1);
902
+ }
903
+
904
+ #[test]
905
+ fn test_chunk_offset_calculation_with_overlap() {
906
+ let config = ChunkingConfig {
907
+ max_characters: 20,
908
+ overlap: 5,
909
+ trim: false,
910
+ chunker_type: ChunkerType::Text,
911
+ };
912
+ let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
913
+ let result = chunk_text(text, &config, None).unwrap();
914
+
915
+ assert!(result.chunks.len() >= 2, "Expected at least 2 chunks");
916
+
917
+ for i in 0..result.chunks.len() {
918
+ let chunk = &result.chunks[i];
919
+ let metadata = &chunk.metadata;
920
+
921
+ assert_eq!(
922
+ metadata.byte_end - metadata.byte_start,
923
+ chunk.content.len(),
924
+ "Chunk {} offset range doesn't match content length",
925
+ i
926
+ );
927
+
928
+ assert_eq!(metadata.chunk_index, i);
929
+ assert_eq!(metadata.total_chunks, result.chunks.len());
930
+ }
931
+
932
+ for i in 0..result.chunks.len() - 1 {
933
+ let current_chunk = &result.chunks[i];
934
+ let next_chunk = &result.chunks[i + 1];
935
+
936
+ assert!(
937
+ next_chunk.metadata.byte_start < current_chunk.metadata.byte_end,
938
+ "Chunk {} and {} don't overlap: next starts at {} but current ends at {}",
939
+ i,
940
+ i + 1,
941
+ next_chunk.metadata.byte_start,
942
+ current_chunk.metadata.byte_end
943
+ );
944
+
945
+ let overlap_size = current_chunk.metadata.byte_end - next_chunk.metadata.byte_start;
946
+ assert!(
947
+ overlap_size <= config.overlap + 10,
948
+ "Overlap between chunks {} and {} is too large: {}",
949
+ i,
950
+ i + 1,
951
+ overlap_size
952
+ );
953
+ }
954
+ }
955
+
956
+ #[test]
957
+ fn test_chunk_offset_calculation_without_overlap() {
958
+ let config = ChunkingConfig {
959
+ max_characters: 20,
960
+ overlap: 0,
961
+ trim: false,
962
+ chunker_type: ChunkerType::Text,
963
+ };
964
+ let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
965
+ let result = chunk_text(text, &config, None).unwrap();
966
+
967
+ for i in 0..result.chunks.len() - 1 {
968
+ let current_chunk = &result.chunks[i];
969
+ let next_chunk = &result.chunks[i + 1];
970
+
971
+ assert!(
972
+ next_chunk.metadata.byte_start >= current_chunk.metadata.byte_end,
973
+ "Chunk {} and {} overlap when they shouldn't: next starts at {} but current ends at {}",
974
+ i,
975
+ i + 1,
976
+ next_chunk.metadata.byte_start,
977
+ current_chunk.metadata.byte_end
978
+ );
979
+ }
980
+ }
981
+
982
+ #[test]
983
+ fn test_chunk_offset_covers_full_text() {
984
+ let config = ChunkingConfig {
985
+ max_characters: 15,
986
+ overlap: 3,
987
+ trim: false,
988
+ chunker_type: ChunkerType::Text,
989
+ };
990
+ let text = "0123456789 ABCDEFGHIJ KLMNOPQRST UVWXYZ";
991
+ let result = chunk_text(text, &config, None).unwrap();
992
+
993
+ assert!(result.chunks.len() >= 2, "Expected multiple chunks");
994
+
995
+ assert_eq!(
996
+ result.chunks[0].metadata.byte_start, 0,
997
+ "First chunk should start at position 0"
998
+ );
999
+
1000
+ for i in 0..result.chunks.len() - 1 {
1001
+ let current_chunk = &result.chunks[i];
1002
+ let next_chunk = &result.chunks[i + 1];
1003
+
1004
+ assert!(
1005
+ next_chunk.metadata.byte_start <= current_chunk.metadata.byte_end,
1006
+ "Gap detected between chunk {} (ends at {}) and chunk {} (starts at {})",
1007
+ i,
1008
+ current_chunk.metadata.byte_end,
1009
+ i + 1,
1010
+ next_chunk.metadata.byte_start
1011
+ );
1012
+ }
1013
+ }
1014
+
1015
+ #[test]
1016
+ fn test_chunk_offset_with_various_overlap_sizes() {
1017
+ for overlap in [0, 5, 10, 20] {
1018
+ let config = ChunkingConfig {
1019
+ max_characters: 30,
1020
+ overlap,
1021
+ trim: false,
1022
+ chunker_type: ChunkerType::Text,
1023
+ };
1024
+ let text = "Word ".repeat(30);
1025
+ let result = chunk_text(&text, &config, None).unwrap();
1026
+
1027
+ for chunk in &result.chunks {
1028
+ assert!(
1029
+ chunk.metadata.byte_end > chunk.metadata.byte_start,
1030
+ "Invalid offset range for overlap {}: start={}, end={}",
1031
+ overlap,
1032
+ chunk.metadata.byte_start,
1033
+ chunk.metadata.byte_end
1034
+ );
1035
+ }
1036
+
1037
+ for chunk in &result.chunks {
1038
+ assert!(
1039
+ chunk.metadata.byte_start < text.len(),
1040
+ "char_start with overlap {} is out of bounds: {}",
1041
+ overlap,
1042
+ chunk.metadata.byte_start
1043
+ );
1044
+ }
1045
+ }
1046
+ }
1047
+
1048
+ #[test]
1049
+ fn test_chunk_last_chunk_offset() {
1050
+ let config = ChunkingConfig {
1051
+ max_characters: 20,
1052
+ overlap: 5,
1053
+ trim: false,
1054
+ chunker_type: ChunkerType::Text,
1055
+ };
1056
+ let text = "AAAAA BBBBB CCCCC DDDDD EEEEE";
1057
+ let result = chunk_text(text, &config, None).unwrap();
1058
+
1059
+ assert!(result.chunks.len() >= 2, "Need multiple chunks for this test");
1060
+
1061
+ let last_chunk = result.chunks.last().unwrap();
1062
+ let second_to_last = &result.chunks[result.chunks.len() - 2];
1063
+
1064
+ assert!(
1065
+ last_chunk.metadata.byte_start < second_to_last.metadata.byte_end,
1066
+ "Last chunk should overlap with previous chunk"
1067
+ );
1068
+
1069
+ let expected_end = text.len();
1070
+ let last_chunk_covers_end =
1071
+ last_chunk.content.trim_end() == text.trim_end() || last_chunk.metadata.byte_end >= expected_end - 5;
1072
+ assert!(last_chunk_covers_end, "Last chunk should cover the end of the text");
1073
+ }
1074
+
1075
+ #[test]
1076
+ fn test_chunk_with_page_boundaries() {
1077
+ use crate::types::PageBoundary;
1078
+
1079
+ let config = ChunkingConfig {
1080
+ max_characters: 30,
1081
+ overlap: 5,
1082
+ trim: true,
1083
+ chunker_type: ChunkerType::Text,
1084
+ };
1085
+ let text = "Page one content here. Page two starts here and continues.";
1086
+
1087
+ let boundaries = vec![
1088
+ PageBoundary {
1089
+ byte_start: 0,
1090
+ byte_end: 21,
1091
+ page_number: 1,
1092
+ },
1093
+ PageBoundary {
1094
+ byte_start: 22,
1095
+ byte_end: 58,
1096
+ page_number: 2,
1097
+ },
1098
+ ];
1099
+
1100
+ let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
1101
+ assert!(result.chunks.len() >= 2);
1102
+
1103
+ assert_eq!(result.chunks[0].metadata.first_page, Some(1));
1104
+
1105
+ let last_chunk = result.chunks.last().unwrap();
1106
+ assert_eq!(last_chunk.metadata.last_page, Some(2));
1107
+ }
1108
+
1109
+ #[test]
1110
+ fn test_chunk_without_page_boundaries() {
1111
+ let config = ChunkingConfig {
1112
+ max_characters: 30,
1113
+ overlap: 5,
1114
+ trim: true,
1115
+ chunker_type: ChunkerType::Text,
1116
+ };
1117
+ let text = "This is some test content that should be split into multiple chunks.";
1118
+
1119
+ let result = chunk_text(text, &config, None).unwrap();
1120
+ assert!(result.chunks.len() >= 2);
1121
+
1122
+ for chunk in &result.chunks {
1123
+ assert_eq!(chunk.metadata.first_page, None);
1124
+ assert_eq!(chunk.metadata.last_page, None);
1125
+ }
1126
+ }
1127
+
1128
+ #[test]
1129
+ fn test_chunk_empty_boundaries() {
1130
+ let config = ChunkingConfig {
1131
+ max_characters: 30,
1132
+ overlap: 5,
1133
+ trim: true,
1134
+ chunker_type: ChunkerType::Text,
1135
+ };
1136
+ let text = "Some text content here.";
1137
+ let boundaries: Vec<PageBoundary> = vec![];
1138
+
1139
+ let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
1140
+ assert_eq!(result.chunks.len(), 1);
1141
+
1142
+ assert_eq!(result.chunks[0].metadata.first_page, None);
1143
+ assert_eq!(result.chunks[0].metadata.last_page, None);
1144
+ }
1145
+
1146
+ #[test]
1147
+ fn test_chunk_spanning_multiple_pages() {
1148
+ use crate::types::PageBoundary;
1149
+
1150
+ let config = ChunkingConfig {
1151
+ max_characters: 50,
1152
+ overlap: 5,
1153
+ trim: false,
1154
+ chunker_type: ChunkerType::Text,
1155
+ };
1156
+ let text = "0123456789 AAAAAAAAAA 1111111111 BBBBBBBBBB 2222222222";
1157
+
1158
+ let boundaries = vec![
1159
+ PageBoundary {
1160
+ byte_start: 0,
1161
+ byte_end: 20,
1162
+ page_number: 1,
1163
+ },
1164
+ PageBoundary {
1165
+ byte_start: 20,
1166
+ byte_end: 40,
1167
+ page_number: 2,
1168
+ },
1169
+ PageBoundary {
1170
+ byte_start: 40,
1171
+ byte_end: 54,
1172
+ page_number: 3,
1173
+ },
1174
+ ];
1175
+
1176
+ let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
1177
+ assert!(result.chunks.len() >= 2);
1178
+
1179
+ for chunk in &result.chunks {
1180
+ assert!(chunk.metadata.first_page.is_some() || chunk.metadata.last_page.is_some());
1181
+ }
1182
+ }
1183
+
1184
+ #[test]
1185
+ fn test_chunk_text_with_invalid_boundary_range() {
1186
+ use crate::types::PageBoundary;
1187
+
1188
+ let config = ChunkingConfig {
1189
+ max_characters: 30,
1190
+ overlap: 5,
1191
+ trim: true,
1192
+ chunker_type: ChunkerType::Text,
1193
+ };
1194
+ let text = "Page one content here. Page two content.";
1195
+
1196
+ let boundaries = vec![PageBoundary {
1197
+ byte_start: 10,
1198
+ byte_end: 5,
1199
+ page_number: 1,
1200
+ }];
1201
+
1202
+ let result = chunk_text(text, &config, Some(&boundaries));
1203
+ assert!(result.is_err());
1204
+ let err = result.unwrap_err();
1205
+ assert!(err.to_string().contains("Invalid boundary range"));
1206
+ assert!(err.to_string().contains("byte_start"));
1207
+ }
1208
+
1209
+ #[test]
1210
+ fn test_chunk_text_with_unsorted_boundaries() {
1211
+ use crate::types::PageBoundary;
1212
+
1213
+ let config = ChunkingConfig {
1214
+ max_characters: 30,
1215
+ overlap: 5,
1216
+ trim: true,
1217
+ chunker_type: ChunkerType::Text,
1218
+ };
1219
+ let text = "Page one content here. Page two content.";
1220
+
1221
+ let boundaries = vec![
1222
+ PageBoundary {
1223
+ byte_start: 22,
1224
+ byte_end: 40,
1225
+ page_number: 2,
1226
+ },
1227
+ PageBoundary {
1228
+ byte_start: 0,
1229
+ byte_end: 21,
1230
+ page_number: 1,
1231
+ },
1232
+ ];
1233
+
1234
+ let result = chunk_text(text, &config, Some(&boundaries));
1235
+ assert!(result.is_err());
1236
+ let err = result.unwrap_err();
1237
+ assert!(err.to_string().contains("not sorted"));
1238
+ assert!(err.to_string().contains("boundaries"));
1239
+ }
1240
+
1241
+ #[test]
1242
+ fn test_chunk_text_with_overlapping_boundaries() {
1243
+ use crate::types::PageBoundary;
1244
+
1245
+ let config = ChunkingConfig {
1246
+ max_characters: 30,
1247
+ overlap: 5,
1248
+ trim: true,
1249
+ chunker_type: ChunkerType::Text,
1250
+ };
1251
+ let text = "Page one content here. Page two content.";
1252
+
1253
+ let boundaries = vec![
1254
+ PageBoundary {
1255
+ byte_start: 0,
1256
+ byte_end: 25,
1257
+ page_number: 1,
1258
+ },
1259
+ PageBoundary {
1260
+ byte_start: 20,
1261
+ byte_end: 40,
1262
+ page_number: 2,
1263
+ },
1264
+ ];
1265
+
1266
+ let result = chunk_text(text, &config, Some(&boundaries));
1267
+ assert!(result.is_err());
1268
+ let err = result.unwrap_err();
1269
+ assert!(err.to_string().contains("Overlapping"));
1270
+ assert!(err.to_string().contains("boundaries"));
1271
+ }
1272
+
1273
+ #[test]
1274
+ fn test_calculate_page_range_with_invalid_boundaries() {
1275
+ use crate::types::PageBoundary;
1276
+
1277
+ let boundaries = vec![PageBoundary {
1278
+ byte_start: 15,
1279
+ byte_end: 10,
1280
+ page_number: 1,
1281
+ }];
1282
+
1283
+ let result = calculate_page_range(0, 20, &boundaries);
1284
+ assert!(result.is_err());
1285
+ let err = result.unwrap_err();
1286
+ assert!(err.to_string().contains("Invalid boundary range"));
1287
+ }
1288
+
1289
+ #[test]
1290
+ fn test_validate_page_boundaries_valid() {
1291
+ use crate::types::PageBoundary;
1292
+
1293
+ let boundaries = vec![
1294
+ PageBoundary {
1295
+ byte_start: 0,
1296
+ byte_end: 20,
1297
+ page_number: 1,
1298
+ },
1299
+ PageBoundary {
1300
+ byte_start: 20,
1301
+ byte_end: 40,
1302
+ page_number: 2,
1303
+ },
1304
+ PageBoundary {
1305
+ byte_start: 40,
1306
+ byte_end: 60,
1307
+ page_number: 3,
1308
+ },
1309
+ ];
1310
+
1311
+ let result = chunk_text(
1312
+ "x".repeat(60).as_str(),
1313
+ &ChunkingConfig {
1314
+ max_characters: 30,
1315
+ overlap: 5,
1316
+ trim: false,
1317
+ chunker_type: ChunkerType::Text,
1318
+ },
1319
+ Some(&boundaries),
1320
+ );
1321
+ assert!(result.is_ok());
1322
+ }
1323
+
1324
+ #[test]
1325
+ fn test_validate_page_boundaries_empty() {
1326
+ let boundaries: Vec<PageBoundary> = vec![];
1327
+ let result = chunk_text(
1328
+ "Some test text",
1329
+ &ChunkingConfig {
1330
+ max_characters: 30,
1331
+ overlap: 5,
1332
+ trim: true,
1333
+ chunker_type: ChunkerType::Text,
1334
+ },
1335
+ Some(&boundaries),
1336
+ );
1337
+ assert!(result.is_ok());
1338
+ }
1339
+
1340
+ #[test]
1341
+ fn test_page_boundaries_with_gaps() {
1342
+ use crate::types::PageBoundary;
1343
+
1344
+ let boundaries = vec![
1345
+ PageBoundary {
1346
+ byte_start: 0,
1347
+ byte_end: 10,
1348
+ page_number: 1,
1349
+ },
1350
+ PageBoundary {
1351
+ byte_start: 15,
1352
+ byte_end: 25,
1353
+ page_number: 2,
1354
+ },
1355
+ ];
1356
+
1357
+ let text = "0123456789XXXXX0123456789";
1358
+ let result = chunk_text(
1359
+ text,
1360
+ &ChunkingConfig {
1361
+ max_characters: 30,
1362
+ overlap: 5,
1363
+ trim: false,
1364
+ chunker_type: ChunkerType::Text,
1365
+ },
1366
+ Some(&boundaries),
1367
+ );
1368
+ assert!(result.is_ok());
1369
+ }
1370
+
1371
+ #[test]
1372
+ fn test_chunk_with_same_start_and_end() {
1373
+ use crate::types::PageBoundary;
1374
+
1375
+ let boundaries = vec![PageBoundary {
1376
+ byte_start: 10,
1377
+ byte_end: 10,
1378
+ page_number: 1,
1379
+ }];
1380
+
1381
+ let result = chunk_text(
1382
+ "test content here",
1383
+ &ChunkingConfig {
1384
+ max_characters: 30,
1385
+ overlap: 5,
1386
+ trim: true,
1387
+ chunker_type: ChunkerType::Text,
1388
+ },
1389
+ Some(&boundaries),
1390
+ );
1391
+ assert!(result.is_err());
1392
+ let err = result.unwrap_err();
1393
+ assert!(err.to_string().contains("Invalid boundary range"));
1394
+ }
1395
+
1396
+ #[test]
1397
+ fn test_multiple_overlapping_errors() {
1398
+ use crate::types::PageBoundary;
1399
+
1400
+ let text = "This is a longer test content string that spans more bytes";
1401
+ let boundaries = vec![
1402
+ PageBoundary {
1403
+ byte_start: 20,
1404
+ byte_end: 40,
1405
+ page_number: 2,
1406
+ },
1407
+ PageBoundary {
1408
+ byte_start: 10,
1409
+ byte_end: 35,
1410
+ page_number: 1,
1411
+ },
1412
+ ];
1413
+
1414
+ let result = chunk_text(
1415
+ text,
1416
+ &ChunkingConfig {
1417
+ max_characters: 30,
1418
+ overlap: 5,
1419
+ trim: true,
1420
+ chunker_type: ChunkerType::Text,
1421
+ },
1422
+ Some(&boundaries),
1423
+ );
1424
+ assert!(result.is_err());
1425
+ assert!(result.unwrap_err().to_string().contains("not sorted"));
1426
+ }
1427
+
1428
+ #[test]
1429
+ fn test_chunk_with_pages_basic() {
1430
+ use crate::types::PageBoundary;
1431
+
1432
+ let config = ChunkingConfig {
1433
+ max_characters: 25,
1434
+ overlap: 5,
1435
+ trim: true,
1436
+ chunker_type: ChunkerType::Text,
1437
+ };
1438
+ let text = "First page content here.Second page content here.Third page.";
1439
+
1440
+ let boundaries = vec![
1441
+ PageBoundary {
1442
+ byte_start: 0,
1443
+ byte_end: 24,
1444
+ page_number: 1,
1445
+ },
1446
+ PageBoundary {
1447
+ byte_start: 24,
1448
+ byte_end: 50,
1449
+ page_number: 2,
1450
+ },
1451
+ PageBoundary {
1452
+ byte_start: 50,
1453
+ byte_end: 60,
1454
+ page_number: 3,
1455
+ },
1456
+ ];
1457
+
1458
+ let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
1459
+
1460
+ if !result.chunks.is_empty() {
1461
+ assert!(result.chunks[0].metadata.first_page.is_some());
1462
+ }
1463
+ }
1464
+
1465
+ #[test]
1466
+ fn test_chunk_with_pages_single_page_chunk() {
1467
+ use crate::types::PageBoundary;
1468
+
1469
+ let config = ChunkingConfig {
1470
+ max_characters: 100,
1471
+ overlap: 10,
1472
+ trim: true,
1473
+ chunker_type: ChunkerType::Text,
1474
+ };
1475
+ let text = "All content on single page fits in one chunk.";
1476
+
1477
+ let boundaries = vec![PageBoundary {
1478
+ byte_start: 0,
1479
+ byte_end: 45,
1480
+ page_number: 1,
1481
+ }];
1482
+
1483
+ let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
1484
+ assert_eq!(result.chunks.len(), 1);
1485
+ assert_eq!(result.chunks[0].metadata.first_page, Some(1));
1486
+ assert_eq!(result.chunks[0].metadata.last_page, Some(1));
1487
+ }
1488
+
1489
+ #[test]
1490
+ fn test_chunk_with_pages_no_overlap() {
1491
+ use crate::types::PageBoundary;
1492
+
1493
+ let config = ChunkingConfig {
1494
+ max_characters: 20,
1495
+ overlap: 0,
1496
+ trim: false,
1497
+ chunker_type: ChunkerType::Text,
1498
+ };
1499
+ let text = "AAAAA BBBBB CCCCC DDDDD";
1500
+
1501
+ let boundaries = vec![
1502
+ PageBoundary {
1503
+ byte_start: 0,
1504
+ byte_end: 11,
1505
+ page_number: 1,
1506
+ },
1507
+ PageBoundary {
1508
+ byte_start: 11,
1509
+ byte_end: 23,
1510
+ page_number: 2,
1511
+ },
1512
+ ];
1513
+
1514
+ let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
1515
+ assert!(!result.chunks.is_empty());
1516
+
1517
+ for chunk in &result.chunks {
1518
+ if let (Some(first), Some(last)) = (chunk.metadata.first_page, chunk.metadata.last_page) {
1519
+ assert!(first <= last);
1520
+ }
1521
+ }
1522
+ }
1523
+
1524
+ #[test]
1525
+ fn test_calculate_page_range_within_page() {
1526
+ let boundaries = vec![
1527
+ PageBoundary {
1528
+ byte_start: 0,
1529
+ byte_end: 100,
1530
+ page_number: 1,
1531
+ },
1532
+ PageBoundary {
1533
+ byte_start: 100,
1534
+ byte_end: 200,
1535
+ page_number: 2,
1536
+ },
1537
+ ];
1538
+
1539
+ let (first, last) = calculate_page_range(10, 50, &boundaries).unwrap();
1540
+ assert_eq!(first, Some(1));
1541
+ assert_eq!(last, Some(1));
1542
+ }
1543
+
1544
+ #[test]
1545
+ fn test_calculate_page_range_spanning_pages() {
1546
+ let boundaries = vec![
1547
+ PageBoundary {
1548
+ byte_start: 0,
1549
+ byte_end: 100,
1550
+ page_number: 1,
1551
+ },
1552
+ PageBoundary {
1553
+ byte_start: 100,
1554
+ byte_end: 200,
1555
+ page_number: 2,
1556
+ },
1557
+ ];
1558
+
1559
+ let (first, last) = calculate_page_range(50, 150, &boundaries).unwrap();
1560
+ assert_eq!(first, Some(1));
1561
+ assert_eq!(last, Some(2));
1562
+ }
1563
+
1564
+ #[test]
1565
+ fn test_calculate_page_range_empty_boundaries() {
1566
+ let boundaries: Vec<PageBoundary> = vec![];
1567
+
1568
+ let (first, last) = calculate_page_range(0, 50, &boundaries).unwrap();
1569
+ assert_eq!(first, None);
1570
+ assert_eq!(last, None);
1571
+ }
1572
+
1573
+ #[test]
1574
+ fn test_calculate_page_range_no_overlap() {
1575
+ let boundaries = vec![
1576
+ PageBoundary {
1577
+ byte_start: 0,
1578
+ byte_end: 100,
1579
+ page_number: 1,
1580
+ },
1581
+ PageBoundary {
1582
+ byte_start: 100,
1583
+ byte_end: 200,
1584
+ page_number: 2,
1585
+ },
1586
+ ];
1587
+
1588
+ let (first, last) = calculate_page_range(200, 250, &boundaries).unwrap();
1589
+ assert_eq!(first, None);
1590
+ assert_eq!(last, None);
1591
+ }
1592
+
1593
+ #[test]
1594
+ fn test_calculate_page_range_three_pages() {
1595
+ let boundaries = vec![
1596
+ PageBoundary {
1597
+ byte_start: 0,
1598
+ byte_end: 100,
1599
+ page_number: 1,
1600
+ },
1601
+ PageBoundary {
1602
+ byte_start: 100,
1603
+ byte_end: 200,
1604
+ page_number: 2,
1605
+ },
1606
+ PageBoundary {
1607
+ byte_start: 200,
1608
+ byte_end: 300,
1609
+ page_number: 3,
1610
+ },
1611
+ ];
1612
+
1613
+ let (first, last) = calculate_page_range(50, 250, &boundaries).unwrap();
1614
+ assert_eq!(first, Some(1));
1615
+ assert_eq!(last, Some(3));
1616
+ }
1617
+
1618
+ #[test]
1619
+ fn test_chunk_metadata_page_range_accuracy() {
1620
+ use crate::types::PageBoundary;
1621
+
1622
+ let config = ChunkingConfig {
1623
+ max_characters: 30,
1624
+ overlap: 5,
1625
+ trim: true,
1626
+ chunker_type: ChunkerType::Text,
1627
+ };
1628
+ let text = "Page One Content Here.Page Two.";
1629
+
1630
+ let boundaries = vec![
1631
+ PageBoundary {
1632
+ byte_start: 0,
1633
+ byte_end: 21,
1634
+ page_number: 1,
1635
+ },
1636
+ PageBoundary {
1637
+ byte_start: 21,
1638
+ byte_end: 31,
1639
+ page_number: 2,
1640
+ },
1641
+ ];
1642
+
1643
+ let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
1644
+
1645
+ for chunk in &result.chunks {
1646
+ assert_eq!(chunk.metadata.byte_end - chunk.metadata.byte_start, chunk.content.len());
1647
+ }
1648
+ }
1649
+
1650
+ #[test]
1651
+ fn test_chunk_page_range_boundary_edge_cases() {
1652
+ use crate::types::PageBoundary;
1653
+
1654
+ let config = ChunkingConfig {
1655
+ max_characters: 10,
1656
+ overlap: 2,
1657
+ trim: false,
1658
+ chunker_type: ChunkerType::Text,
1659
+ };
1660
+ let text = "0123456789ABCDEFGHIJ";
1661
+
1662
+ let boundaries = vec![
1663
+ PageBoundary {
1664
+ byte_start: 0,
1665
+ byte_end: 10,
1666
+ page_number: 1,
1667
+ },
1668
+ PageBoundary {
1669
+ byte_start: 10,
1670
+ byte_end: 20,
1671
+ page_number: 2,
1672
+ },
1673
+ ];
1674
+
1675
+ let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
1676
+
1677
+ for chunk in &result.chunks {
1678
+ let on_page1 = chunk.metadata.byte_start < 10;
1679
+ let on_page2 = chunk.metadata.byte_end > 10;
1680
+
1681
+ if on_page1 && on_page2 {
1682
+ assert_eq!(chunk.metadata.first_page, Some(1));
1683
+ assert_eq!(chunk.metadata.last_page, Some(2));
1684
+ } else if on_page1 {
1685
+ assert_eq!(chunk.metadata.first_page, Some(1));
1686
+ } else if on_page2 {
1687
+ assert_eq!(chunk.metadata.first_page, Some(2));
1688
+ }
1689
+ }
1690
+ }
1691
+
1692
+ #[test]
1693
+ fn test_validate_utf8_boundaries_valid_ascii() {
1694
+ use crate::types::PageBoundary;
1695
+
1696
+ let text = "This is ASCII text.";
1697
+ let boundaries = vec![
1698
+ PageBoundary {
1699
+ byte_start: 0,
1700
+ byte_end: 10,
1701
+ page_number: 1,
1702
+ },
1703
+ PageBoundary {
1704
+ byte_start: 10,
1705
+ byte_end: 19,
1706
+ page_number: 2,
1707
+ },
1708
+ ];
1709
+
1710
+ let result = chunk_text(text, &ChunkingConfig::default(), Some(&boundaries));
1711
+ assert!(result.is_ok());
1712
+ }
1713
+
1714
+ #[test]
1715
+ fn test_validate_utf8_boundaries_valid_emoji() {
1716
+ use crate::types::PageBoundary;
1717
+
1718
+ let text = "Hello 👋 World 🌍 End";
1719
+ let config = ChunkingConfig::default();
1720
+
1721
+ let boundaries = vec![
1722
+ PageBoundary {
1723
+ byte_start: 0,
1724
+ byte_end: 11,
1725
+ page_number: 1,
1726
+ },
1727
+ PageBoundary {
1728
+ byte_start: 11,
1729
+ byte_end: 25,
1730
+ page_number: 2,
1731
+ },
1732
+ ];
1733
+
1734
+ let result = chunk_text(text, &config, Some(&boundaries));
1735
+ assert!(result.is_ok());
1736
+ }
1737
+
1738
+ #[test]
1739
+ fn test_validate_utf8_boundaries_valid_cjk() {
1740
+ use crate::types::PageBoundary;
1741
+
1742
+ let text = "你好世界 こんにちは 안녕하세요";
1743
+ let config = ChunkingConfig::default();
1744
+
1745
+ let boundaries = vec![
1746
+ PageBoundary {
1747
+ byte_start: 0,
1748
+ byte_end: 13,
1749
+ page_number: 1,
1750
+ },
1751
+ PageBoundary {
1752
+ byte_start: 13,
1753
+ byte_end: 44,
1754
+ page_number: 2,
1755
+ },
1756
+ ];
1757
+
1758
+ let result = chunk_text(text, &config, Some(&boundaries));
1759
+ assert!(result.is_ok());
1760
+ }
1761
+
1762
+ #[test]
1763
+ fn test_validate_utf8_boundaries_invalid_mid_emoji() {
1764
+ use crate::types::PageBoundary;
1765
+
1766
+ let text = "Hello 👋 World";
1767
+ let boundaries = vec![PageBoundary {
1768
+ byte_start: 0,
1769
+ byte_end: 7,
1770
+ page_number: 1,
1771
+ }];
1772
+
1773
+ let config = ChunkingConfig::default();
1774
+ let result = chunk_text(text, &config, Some(&boundaries));
1775
+ assert!(result.is_err());
1776
+ let err = result.unwrap_err();
1777
+ assert!(err.to_string().contains("UTF-8 character boundary"));
1778
+ assert!(err.to_string().contains("byte_end=7"));
1779
+ }
1780
+
1781
+ #[test]
1782
+ fn test_validate_utf8_boundaries_invalid_mid_multibyte_cjk() {
1783
+ use crate::types::PageBoundary;
1784
+
1785
+ let text = "中文文本";
1786
+ let boundaries = vec![PageBoundary {
1787
+ byte_start: 0,
1788
+ byte_end: 1,
1789
+ page_number: 1,
1790
+ }];
1791
+
1792
+ let config = ChunkingConfig::default();
1793
+ let result = chunk_text(text, &config, Some(&boundaries));
1794
+ assert!(result.is_err());
1795
+ let err = result.unwrap_err();
1796
+ assert!(err.to_string().contains("UTF-8 character boundary"));
1797
+ }
1798
+
1799
+ #[test]
1800
+ fn test_validate_utf8_boundaries_byte_start_exceeds_length() {
1801
+ use crate::types::PageBoundary;
1802
+
1803
+ let text = "Short";
1804
+ let boundaries = vec![
1805
+ PageBoundary {
1806
+ byte_start: 0,
1807
+ byte_end: 3,
1808
+ page_number: 1,
1809
+ },
1810
+ PageBoundary {
1811
+ byte_start: 10,
1812
+ byte_end: 15,
1813
+ page_number: 2,
1814
+ },
1815
+ ];
1816
+
1817
+ let config = ChunkingConfig::default();
1818
+ let result = chunk_text(text, &config, Some(&boundaries));
1819
+ assert!(result.is_err());
1820
+ let err = result.unwrap_err();
1821
+ assert!(err.to_string().contains("exceeds text length"));
1822
+ }
1823
+
1824
+ #[test]
1825
+ fn test_validate_utf8_boundaries_byte_end_exceeds_length() {
1826
+ use crate::types::PageBoundary;
1827
+
1828
+ let text = "Short";
1829
+ let boundaries = vec![PageBoundary {
1830
+ byte_start: 0,
1831
+ byte_end: 100,
1832
+ page_number: 1,
1833
+ }];
1834
+
1835
+ let config = ChunkingConfig::default();
1836
+ let result = chunk_text(text, &config, Some(&boundaries));
1837
+ assert!(result.is_err());
1838
+ let err = result.unwrap_err();
1839
+ assert!(err.to_string().contains("exceeds text length"));
1840
+ }
1841
+
1842
+ #[test]
1843
+ fn test_validate_utf8_boundaries_empty_boundaries() {
1844
+ use crate::types::PageBoundary;
1845
+
1846
+ let text = "Some text";
1847
+ let boundaries: Vec<PageBoundary> = vec![];
1848
+
1849
+ let config = ChunkingConfig::default();
1850
+ let result = chunk_text(text, &config, Some(&boundaries));
1851
+ assert!(result.is_ok());
1852
+ }
1853
+
1854
+ #[test]
1855
+ fn test_validate_utf8_boundaries_at_text_boundaries() {
1856
+ use crate::types::PageBoundary;
1857
+
1858
+ let text = "Exact boundary test";
1859
+ let text_len = text.len();
1860
+ let boundaries = vec![PageBoundary {
1861
+ byte_start: 0,
1862
+ byte_end: text_len,
1863
+ page_number: 1,
1864
+ }];
1865
+
1866
+ let config = ChunkingConfig::default();
1867
+ let result = chunk_text(text, &config, Some(&boundaries));
1868
+ assert!(result.is_ok());
1869
+ }
1870
+
1871
+ #[test]
1872
+ fn test_validate_utf8_boundaries_mixed_languages() {
1873
+ use crate::types::PageBoundary;
1874
+
1875
+ let text = "English text mixed with 中文 and français";
1876
+ let config = ChunkingConfig::default();
1877
+
1878
+ let boundaries = vec![
1879
+ PageBoundary {
1880
+ byte_start: 0,
1881
+ byte_end: 24,
1882
+ page_number: 1,
1883
+ },
1884
+ PageBoundary {
1885
+ byte_start: 24,
1886
+ byte_end: text.len(),
1887
+ page_number: 2,
1888
+ },
1889
+ ];
1890
+
1891
+ let result = chunk_text(text, &config, Some(&boundaries));
1892
+ assert!(result.is_ok());
1893
+ }
1894
+
1895
+ #[test]
1896
+ fn test_chunk_text_rejects_invalid_utf8_boundaries() {
1897
+ use crate::types::PageBoundary;
1898
+
1899
+ let text = "🌍🌎🌏 Three emoji planets";
1900
+ let config = ChunkingConfig::default();
1901
+
1902
+ let boundaries = vec![PageBoundary {
1903
+ byte_start: 0,
1904
+ byte_end: 1000,
1905
+ page_number: 1,
1906
+ }];
1907
+
1908
+ let result = chunk_text(text, &config, Some(&boundaries));
1909
+ assert!(result.is_err());
1910
+ }
1911
+
1912
+ #[test]
1913
+ fn test_validate_utf8_boundaries_combining_diacriticals() {
1914
+ use crate::types::PageBoundary;
1915
+
1916
+ let text = "café";
1917
+ let config = ChunkingConfig::default();
1918
+
1919
+ let boundaries = vec![
1920
+ PageBoundary {
1921
+ byte_start: 0,
1922
+ byte_end: 2,
1923
+ page_number: 1,
1924
+ },
1925
+ PageBoundary {
1926
+ byte_start: 2,
1927
+ byte_end: text.len(),
1928
+ page_number: 2,
1929
+ },
1930
+ ];
1931
+
1932
+ let result = chunk_text(text, &config, Some(&boundaries));
1933
+ assert!(result.is_ok());
1934
+ }
1935
+
1936
+ #[test]
1937
+ fn test_validate_utf8_boundaries_error_messages_are_clear() {
1938
+ use crate::types::PageBoundary;
1939
+
1940
+ let text = "Test 👋 text";
1941
+ let config = ChunkingConfig::default();
1942
+
1943
+ let boundaries = vec![PageBoundary {
1944
+ byte_start: 0,
1945
+ byte_end: 6,
1946
+ page_number: 1,
1947
+ }];
1948
+
1949
+ let result = chunk_text(text, &config, Some(&boundaries));
1950
+ assert!(result.is_err());
1951
+ let err = result.unwrap_err();
1952
+ let err_msg = err.to_string();
1953
+ assert!(err_msg.contains("UTF-8"));
1954
+ assert!(err_msg.contains("boundary"));
1955
+ assert!(err_msg.contains("6"));
1956
+ }
1957
+
1958
+ #[test]
1959
+ fn test_validate_utf8_boundaries_multiple_valid_boundaries() {
1960
+ use crate::types::PageBoundary;
1961
+
1962
+ let text = "First👋Second🌍Third";
1963
+ let config = ChunkingConfig::default();
1964
+
1965
+ let boundaries = vec![
1966
+ PageBoundary {
1967
+ byte_start: 0,
1968
+ byte_end: 5,
1969
+ page_number: 1,
1970
+ },
1971
+ PageBoundary {
1972
+ byte_start: 5,
1973
+ byte_end: 9,
1974
+ page_number: 2,
1975
+ },
1976
+ PageBoundary {
1977
+ byte_start: 9,
1978
+ byte_end: 15,
1979
+ page_number: 3,
1980
+ },
1981
+ PageBoundary {
1982
+ byte_start: 15,
1983
+ byte_end: 19,
1984
+ page_number: 4,
1985
+ },
1986
+ PageBoundary {
1987
+ byte_start: 19,
1988
+ byte_end: text.len(),
1989
+ page_number: 5,
1990
+ },
1991
+ ];
1992
+
1993
+ let result = chunk_text(text, &config, Some(&boundaries));
1994
+ assert!(result.is_ok());
1995
+ }
1996
+
1997
+ #[test]
1998
+ fn test_validate_utf8_boundaries_zero_start_and_end() {
1999
+ use crate::types::PageBoundary;
2000
+
2001
+ let text = "Text";
2002
+ let config = ChunkingConfig::default();
2003
+
2004
+ let boundaries = vec![PageBoundary {
2005
+ byte_start: 0,
2006
+ byte_end: 0,
2007
+ page_number: 1,
2008
+ }];
2009
+
2010
+ let result = chunk_text(text, &config, Some(&boundaries));
2011
+ assert!(result.is_err());
2012
+ }
2013
+
2014
+ #[test]
2015
+ fn test_utf8_boundaries_caching_with_many_boundaries() {
2016
+ use crate::types::PageBoundary;
2017
+
2018
+ let config = ChunkingConfig {
2019
+ max_characters: 500,
2020
+ overlap: 50,
2021
+ trim: true,
2022
+ chunker_type: ChunkerType::Text,
2023
+ };
2024
+
2025
+ let text = "🌍 Hello World ".repeat(200);
2026
+ let text_len = text.len();
2027
+
2028
+ let mut boundaries = vec![];
2029
+ let boundary_count = 10;
2030
+ let step = text_len / boundary_count;
2031
+
2032
+ for i in 0..boundary_count {
2033
+ let start = i * step;
2034
+ let end = if i == boundary_count - 1 {
2035
+ text_len
2036
+ } else {
2037
+ (i + 1) * step
2038
+ };
2039
+
2040
+ if start < end
2041
+ && start <= text_len
2042
+ && end <= text_len
2043
+ && let Some(boundary_start) = text[..start].char_indices().last().map(|(idx, _)| idx)
2044
+ && let Some(boundary_end) = text[..end].char_indices().last().map(|(idx, _)| idx)
2045
+ {
2046
+ boundaries.push(PageBoundary {
2047
+ byte_start: boundary_start,
2048
+ byte_end: boundary_end,
2049
+ page_number: i + 1,
2050
+ });
2051
+ }
2052
+ }
2053
+
2054
+ if !boundaries.is_empty() {
2055
+ let result = chunk_text(&text, &config, Some(&boundaries));
2056
+ assert!(
2057
+ result.is_ok(),
2058
+ "Failed to chunk text with {} boundaries",
2059
+ boundaries.len()
2060
+ );
2061
+
2062
+ let chunks = result.unwrap();
2063
+ assert!(chunks.chunk_count > 0);
2064
+ }
2065
+ }
2066
+
2067
+ #[test]
2068
+ fn test_utf8_boundaries_caching_large_document_with_emojis() {
2069
+ use crate::types::PageBoundary;
2070
+
2071
+ let config = ChunkingConfig {
2072
+ max_characters: 1000,
2073
+ overlap: 100,
2074
+ trim: true,
2075
+ chunker_type: ChunkerType::Text,
2076
+ };
2077
+
2078
+ let large_text = "This is a large document with lots of emoji: 🌍 🚀 💻 🎉 🔥 ✨ 🎨 🌟 ".repeat(100);
2079
+
2080
+ let all_indices: Vec<usize> = large_text.char_indices().map(|(idx, _)| idx).collect();
2081
+
2082
+ let third_idx = all_indices.len() / 3;
2083
+ let two_thirds_idx = (2 * all_indices.len()) / 3;
2084
+
2085
+ let boundary_start_1 = if third_idx < all_indices.len() {
2086
+ all_indices[third_idx]
2087
+ } else {
2088
+ large_text.len()
2089
+ };
2090
+
2091
+ let boundary_start_2 = if two_thirds_idx < all_indices.len() {
2092
+ all_indices[two_thirds_idx]
2093
+ } else {
2094
+ large_text.len()
2095
+ };
2096
+
2097
+ let boundaries = vec![
2098
+ PageBoundary {
2099
+ byte_start: 0,
2100
+ byte_end: boundary_start_1,
2101
+ page_number: 1,
2102
+ },
2103
+ PageBoundary {
2104
+ byte_start: boundary_start_1,
2105
+ byte_end: boundary_start_2,
2106
+ page_number: 2,
2107
+ },
2108
+ PageBoundary {
2109
+ byte_start: boundary_start_2,
2110
+ byte_end: large_text.len(),
2111
+ page_number: 3,
2112
+ },
2113
+ ];
2114
+
2115
+ let result = chunk_text(&large_text, &config, Some(&boundaries));
2116
+ assert!(result.is_ok());
2117
+
2118
+ let chunks = result.unwrap();
2119
+ assert!(!chunks.chunks.is_empty());
2120
+
2121
+ for chunk in &chunks.chunks {
2122
+ assert!(!chunk.content.is_empty());
2123
+ if let (Some(first), Some(last)) = (chunk.metadata.first_page, chunk.metadata.last_page) {
2124
+ assert!(first <= last);
2125
+ }
2126
+ }
2127
+ }
2128
+
2129
+ #[test]
2130
+ fn test_adaptive_validation_small_boundary_set() {
2131
+ use crate::types::PageBoundary;
2132
+
2133
+ let config = ChunkingConfig {
2134
+ max_characters: 100,
2135
+ overlap: 10,
2136
+ trim: true,
2137
+ chunker_type: ChunkerType::Text,
2138
+ };
2139
+ let text = "Hello 👋 World 🌍 End";
2140
+
2141
+ let boundaries = vec![
2142
+ PageBoundary {
2143
+ byte_start: 0,
2144
+ byte_end: 6,
2145
+ page_number: 1,
2146
+ },
2147
+ PageBoundary {
2148
+ byte_start: 6,
2149
+ byte_end: 15,
2150
+ page_number: 2,
2151
+ },
2152
+ PageBoundary {
2153
+ byte_start: 15,
2154
+ byte_end: text.len(),
2155
+ page_number: 3,
2156
+ },
2157
+ ];
2158
+
2159
+ let result = chunk_text(text, &config, Some(&boundaries));
2160
+ assert!(result.is_ok());
2161
+ }
2162
+
2163
+ #[test]
2164
+ fn test_adaptive_validation_threshold_boundary() {
2165
+ use crate::types::PageBoundary;
2166
+
2167
+ let config = ChunkingConfig {
2168
+ max_characters: 200,
2169
+ overlap: 20,
2170
+ trim: true,
2171
+ chunker_type: ChunkerType::Text,
2172
+ };
2173
+ let text = "Test text ".repeat(50);
2174
+ let text_len = text.len();
2175
+
2176
+ let mut boundaries = vec![];
2177
+ let step = text_len / ADAPTIVE_VALIDATION_THRESHOLD;
2178
+
2179
+ for i in 0..ADAPTIVE_VALIDATION_THRESHOLD {
2180
+ let start = i * step;
2181
+ let end = if i == ADAPTIVE_VALIDATION_THRESHOLD - 1 {
2182
+ text_len
2183
+ } else {
2184
+ (i + 1) * step
2185
+ };
2186
+
2187
+ if start < end
2188
+ && start <= text_len
2189
+ && end <= text_len
2190
+ && let Some(boundary_start) = text[..start.min(text_len - 1)]
2191
+ .char_indices()
2192
+ .last()
2193
+ .map(|(idx, _)| idx)
2194
+ && let Some(boundary_end) = text[..end.min(text_len)].char_indices().last().map(|(idx, _)| idx)
2195
+ && boundary_start < boundary_end
2196
+ {
2197
+ boundaries.push(PageBoundary {
2198
+ byte_start: boundary_start,
2199
+ byte_end: boundary_end,
2200
+ page_number: i + 1,
2201
+ });
2202
+ }
2203
+ }
2204
+
2205
+ if !boundaries.is_empty() {
2206
+ let result = chunk_text(&text, &config, Some(&boundaries));
2207
+ assert!(result.is_ok());
2208
+ }
2209
+ }
2210
+
2211
+ #[test]
2212
+ fn test_adaptive_validation_large_boundary_set() {
2213
+ use crate::types::PageBoundary;
2214
+
2215
+ let config = ChunkingConfig {
2216
+ max_characters: 500,
2217
+ overlap: 50,
2218
+ trim: true,
2219
+ chunker_type: ChunkerType::Text,
2220
+ };
2221
+ let text = "Lorem ipsum dolor sit amet ".repeat(100);
2222
+ let text_len = text.len();
2223
+
2224
+ let mut boundaries = vec![];
2225
+ let boundary_count = 50;
2226
+ let step = text_len / boundary_count;
2227
+
2228
+ for i in 0..boundary_count {
2229
+ let start = i * step;
2230
+ let end = if i == boundary_count - 1 {
2231
+ text_len
2232
+ } else {
2233
+ (i + 1) * step
2234
+ };
2235
+
2236
+ if start < end
2237
+ && start <= text_len
2238
+ && end <= text_len
2239
+ && let Some(boundary_start) = text[..start.min(text_len - 1)]
2240
+ .char_indices()
2241
+ .last()
2242
+ .map(|(idx, _)| idx)
2243
+ && let Some(boundary_end) = text[..end.min(text_len)].char_indices().last().map(|(idx, _)| idx)
2244
+ && boundary_start < boundary_end
2245
+ {
2246
+ boundaries.push(PageBoundary {
2247
+ byte_start: boundary_start,
2248
+ byte_end: boundary_end,
2249
+ page_number: i + 1,
2250
+ });
2251
+ }
2252
+ }
2253
+
2254
+ if !boundaries.is_empty() {
2255
+ let result = chunk_text(&text, &config, Some(&boundaries));
2256
+ assert!(result.is_ok());
2257
+ }
2258
+ }
2259
+
2260
+ #[test]
2261
+ fn test_adaptive_validation_consistency() {
2262
+ use crate::types::PageBoundary;
2263
+
2264
+ let config = ChunkingConfig {
2265
+ max_characters: 300,
2266
+ overlap: 30,
2267
+ trim: true,
2268
+ chunker_type: ChunkerType::Text,
2269
+ };
2270
+ let text = "Mixed language: 你好 مرحبا Здравствуй ".repeat(50);
2271
+
2272
+ let boundaries = vec![
2273
+ PageBoundary {
2274
+ byte_start: 0,
2275
+ byte_end: 50,
2276
+ page_number: 1,
2277
+ },
2278
+ PageBoundary {
2279
+ byte_start: 50,
2280
+ byte_end: 100,
2281
+ page_number: 2,
2282
+ },
2283
+ PageBoundary {
2284
+ byte_start: 100,
2285
+ byte_end: 150,
2286
+ page_number: 3,
2287
+ },
2288
+ PageBoundary {
2289
+ byte_start: 150,
2290
+ byte_end: 200,
2291
+ page_number: 4,
2292
+ },
2293
+ PageBoundary {
2294
+ byte_start: 200,
2295
+ byte_end: text.len(),
2296
+ page_number: 5,
2297
+ },
2298
+ ];
2299
+
2300
+ let result = chunk_text(&text, &config, Some(&boundaries));
2301
+ let _ = result;
2302
+ }
2303
+ }