kreuzberg 4.0.0.pre.rc.8 → 4.0.0.pre.rc.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -14
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -1
- data/.rubocop.yml +538 -538
- data/Gemfile +8 -8
- data/Gemfile.lock +4 -104
- data/README.md +454 -432
- data/Rakefile +25 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -341
- data/ext/kreuzberg_rb/extconf.rb +45 -45
- data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
- data/ext/kreuzberg_rb/native/Cargo.lock +6941 -6721
- data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -15
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3135
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +214 -182
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +81 -46
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +724 -724
- data/lib/kreuzberg/error_context.rb +80 -32
- data/lib/kreuzberg/errors.rb +118 -118
- data/lib/kreuzberg/extraction_api.rb +340 -85
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +279 -279
- data/lib/kreuzberg/setup_lib_path.rb +80 -80
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +109 -103
- data/lib/pdfium.dll +0 -0
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +546 -537
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -85
- data/spec/binding/cli_spec.rb +55 -55
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -41
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/Cargo.toml +45 -0
- data/vendor/kreuzberg/Cargo.toml +61 -38
- data/vendor/kreuzberg/README.md +230 -221
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
- data/vendor/kreuzberg/build.rs +843 -891
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
- data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
- data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
- data/vendor/kreuzberg/src/core/config.rs +1080 -1080
- data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
- data/vendor/kreuzberg/src/core/io.rs +329 -329
- data/vendor/kreuzberg/src/core/mime.rs +605 -605
- data/vendor/kreuzberg/src/core/mod.rs +47 -47
- data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1171
- data/vendor/kreuzberg/src/embeddings.rs +500 -432
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +601 -569
- data/vendor/kreuzberg/src/extraction/image.rs +491 -491
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -562
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
- data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
- data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
- data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
- data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
- data/vendor/kreuzberg/src/extractors/email.rs +157 -157
- data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
- data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
- data/vendor/kreuzberg/src/extractors/html.rs +407 -407
- data/vendor/kreuzberg/src/extractors/image.rs +219 -219
- data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
- data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
- data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
- data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
- data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
- data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
- data/vendor/kreuzberg/src/extractors/pdf.rs +749 -673
- data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
- data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
- data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
- data/vendor/kreuzberg/src/extractors/security.rs +484 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
- data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
- data/vendor/kreuzberg/src/extractors/text.rs +265 -265
- data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
- data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
- data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
- data/vendor/kreuzberg/src/lib.rs +113 -113
- data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
- data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -154
- data/vendor/kreuzberg/src/pdf/bindings.rs +44 -0
- data/vendor/kreuzberg/src/pdf/bundled.rs +346 -328
- data/vendor/kreuzberg/src/pdf/error.rs +130 -130
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
- data/vendor/kreuzberg/src/pdf/mod.rs +68 -66
- data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
- data/vendor/kreuzberg/src/pdf/table.rs +420 -417
- data/vendor/kreuzberg/src/pdf/text.rs +240 -240
- data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
- data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
- data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
- data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
- data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +25 -25
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +1055 -1055
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
- data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
- data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
- data/vendor/kreuzberg/tests/config_features.rs +612 -612
- data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
- data/vendor/kreuzberg/tests/core_integration.rs +510 -510
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
- data/vendor/kreuzberg/tests/email_integration.rs +327 -327
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
- data/vendor/kreuzberg/tests/error_handling.rs +402 -402
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
- data/vendor/kreuzberg/tests/format_integration.rs +164 -161
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
- data/vendor/kreuzberg/tests/image_integration.rs +255 -255
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
- data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
- data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
- data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
- data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
- data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
- data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
- data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
- data/vendor/kreuzberg/tests/security_validation.rs +416 -416
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/kreuzberg-ffi/Cargo.toml +63 -0
- data/vendor/kreuzberg-ffi/README.md +851 -0
- data/vendor/kreuzberg-ffi/build.rs +176 -0
- data/vendor/kreuzberg-ffi/cbindgen.toml +27 -0
- data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +12 -0
- data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -0
- data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -0
- data/vendor/kreuzberg-ffi/src/lib.rs +3616 -0
- data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -0
- data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -0
- data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -0
- data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -0
- data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -0
- data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -0
- data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -0
- data/vendor/kreuzberg-tesseract/.crate-ignore +2 -0
- data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -0
- data/vendor/kreuzberg-tesseract/Cargo.toml +48 -0
- data/vendor/kreuzberg-tesseract/LICENSE +22 -0
- data/vendor/kreuzberg-tesseract/README.md +399 -0
- data/vendor/kreuzberg-tesseract/build.rs +1354 -0
- data/vendor/kreuzberg-tesseract/patches/README.md +71 -0
- data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -0
- data/vendor/kreuzberg-tesseract/src/api.rs +1371 -0
- data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -0
- data/vendor/kreuzberg-tesseract/src/enums.rs +297 -0
- data/vendor/kreuzberg-tesseract/src/error.rs +81 -0
- data/vendor/kreuzberg-tesseract/src/lib.rs +145 -0
- data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -0
- data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -0
- data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -0
- data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -0
- data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -0
- data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -0
- data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
- data/vendor/rb-sys/Cargo.lock +393 -393
- data/vendor/rb-sys/Cargo.toml +70 -70
- data/vendor/rb-sys/Cargo.toml.orig +57 -57
- data/vendor/rb-sys/LICENSE-APACHE +190 -190
- data/vendor/rb-sys/LICENSE-MIT +21 -21
- data/vendor/rb-sys/build/features.rs +111 -111
- data/vendor/rb-sys/build/main.rs +286 -286
- data/vendor/rb-sys/build/stable_api_config.rs +155 -155
- data/vendor/rb-sys/build/version.rs +50 -50
- data/vendor/rb-sys/readme.md +36 -36
- data/vendor/rb-sys/src/bindings.rs +21 -21
- data/vendor/rb-sys/src/hidden.rs +11 -11
- data/vendor/rb-sys/src/lib.rs +35 -35
- data/vendor/rb-sys/src/macros.rs +371 -371
- data/vendor/rb-sys/src/memory.rs +53 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
- data/vendor/rb-sys/src/special_consts.rs +31 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
- data/vendor/rb-sys/src/stable_api.rs +260 -260
- data/vendor/rb-sys/src/symbol.rs +31 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
- data/vendor/rb-sys/src/utils.rs +89 -89
- data/vendor/rb-sys/src/value_type.rs +7 -7
- metadata +44 -81
- data/vendor/rb-sys/bin/release.sh +0 -21
|
@@ -1,1877 +1,1877 @@
|
|
|
1
|
-
//! Text chunking utilities.
|
|
2
|
-
//!
|
|
3
|
-
//! This module provides text chunking functionality using the `text-splitter` library.
|
|
4
|
-
//! It splits long text into smaller chunks while preserving semantic boundaries.
|
|
5
|
-
//!
|
|
6
|
-
//! # Features
|
|
7
|
-
//!
|
|
8
|
-
//! - **Smart splitting**: Respects word and sentence boundaries
|
|
9
|
-
//! - **Markdown-aware**: Preserves Markdown structure (headings, code blocks, lists)
|
|
10
|
-
//! - **Configurable overlap**: Overlap chunks to maintain context
|
|
11
|
-
//! - **Unicode support**: Handles CJK characters and emojis correctly
|
|
12
|
-
//! - **Batch processing**: Process multiple texts efficiently
|
|
13
|
-
//!
|
|
14
|
-
//! # Chunker Types
|
|
15
|
-
//!
|
|
16
|
-
//! - **Text**: Generic text splitter, splits on whitespace and punctuation
|
|
17
|
-
//! - **Markdown**: Markdown-aware splitter, preserves formatting and structure
|
|
18
|
-
//!
|
|
19
|
-
//! # Example
|
|
20
|
-
//!
|
|
21
|
-
//! ```rust
|
|
22
|
-
//! use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
|
|
23
|
-
//!
|
|
24
|
-
//! # fn example() -> kreuzberg::Result<()> {
|
|
25
|
-
//! let config = ChunkingConfig {
|
|
26
|
-
//! max_characters: 500,
|
|
27
|
-
//! overlap: 50,
|
|
28
|
-
//! trim: true,
|
|
29
|
-
//! chunker_type: ChunkerType::Text,
|
|
30
|
-
//! };
|
|
31
|
-
//!
|
|
32
|
-
//! let long_text = "This is a very long document...".repeat(100);
|
|
33
|
-
//! let result = chunk_text(&long_text, &config, None)?;
|
|
34
|
-
//!
|
|
35
|
-
//! println!("Split into {} chunks", result.chunk_count);
|
|
36
|
-
//! for (i, chunk) in result.chunks.iter().enumerate() {
|
|
37
|
-
//! println!("Chunk {}: {} chars", i + 1, chunk.content.len());
|
|
38
|
-
//! }
|
|
39
|
-
//! # Ok(())
|
|
40
|
-
//! # }
|
|
41
|
-
//! ```
|
|
42
|
-
//!
|
|
43
|
-
//! # Use Cases
|
|
44
|
-
//!
|
|
45
|
-
//! - Splitting documents for LLM context windows
|
|
46
|
-
//! - Creating overlapping chunks for semantic search
|
|
47
|
-
//! - Processing large documents in batches
|
|
48
|
-
//! - Maintaining context across chunk boundaries
|
|
49
|
-
use crate::error::{KreuzbergError, Result};
|
|
50
|
-
use crate::types::{Chunk, ChunkMetadata, PageBoundary};
|
|
51
|
-
use once_cell::sync::Lazy;
|
|
52
|
-
use serde::{Deserialize, Serialize};
|
|
53
|
-
use std::sync::Arc;
|
|
54
|
-
use text_splitter::{Characters, ChunkCapacity, ChunkConfig, MarkdownSplitter, TextSplitter};
|
|
55
|
-
|
|
56
|
-
pub mod processor;
|
|
57
|
-
pub use processor::ChunkingProcessor;
|
|
58
|
-
|
|
59
|
-
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
|
60
|
-
pub enum ChunkerType {
|
|
61
|
-
Text,
|
|
62
|
-
Markdown,
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
66
|
-
pub struct ChunkingResult {
|
|
67
|
-
pub chunks: Vec<Chunk>,
|
|
68
|
-
pub chunk_count: usize,
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
pub struct ChunkingConfig {
|
|
72
|
-
pub max_characters: usize,
|
|
73
|
-
pub overlap: usize,
|
|
74
|
-
pub trim: bool,
|
|
75
|
-
pub chunker_type: ChunkerType,
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
impl Default for ChunkingConfig {
|
|
79
|
-
fn default() -> Self {
|
|
80
|
-
Self {
|
|
81
|
-
max_characters: 2000,
|
|
82
|
-
overlap: 100,
|
|
83
|
-
trim: true,
|
|
84
|
-
chunker_type: ChunkerType::Text,
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
fn build_chunk_config(max_characters: usize, overlap: usize, trim: bool) -> Result<ChunkConfig<Characters>> {
|
|
90
|
-
ChunkConfig::new(ChunkCapacity::new(max_characters))
|
|
91
|
-
.with_overlap(overlap)
|
|
92
|
-
.map(|config| config.with_trim(trim))
|
|
93
|
-
.map_err(|e| KreuzbergError::validation(format!("Invalid chunking configuration: {}", e)))
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
/// Validates that byte offsets in page boundaries fall on valid UTF-8 character boundaries.
|
|
97
|
-
///
|
|
98
|
-
/// This function ensures that all page boundary positions are at valid UTF-8 character
|
|
99
|
-
/// boundaries within the text. This is CRITICAL to prevent text corruption when boundaries
|
|
100
|
-
/// are created from language bindings or external sources, particularly with multibyte
|
|
101
|
-
/// UTF-8 characters (emoji, CJK characters, combining marks, etc.).
|
|
102
|
-
///
|
|
103
|
-
/// # Arguments
|
|
104
|
-
///
|
|
105
|
-
/// * `text` - The text being chunked
|
|
106
|
-
/// * `boundaries` - Page boundary markers to validate
|
|
107
|
-
///
|
|
108
|
-
/// # Returns
|
|
109
|
-
///
|
|
110
|
-
/// Returns `Ok(())` if all boundaries are at valid UTF-8 character boundaries.
|
|
111
|
-
/// Returns `KreuzbergError::Validation` if any boundary is at an invalid position.
|
|
112
|
-
///
|
|
113
|
-
/// # UTF-8 Boundary Safety
|
|
114
|
-
///
|
|
115
|
-
/// Rust strings use UTF-8 encoding where characters can be 1-4 bytes. For example:
|
|
116
|
-
/// - ASCII letters: 1 byte each
|
|
117
|
-
/// - Emoji (🌍): 4 bytes but 1 character
|
|
118
|
-
/// - CJK characters (中): 3 bytes but 1 character
|
|
119
|
-
///
|
|
120
|
-
/// This function checks that all byte_start and byte_end values are at character
|
|
121
|
-
/// boundaries using Rust's `is_char_boundary()` method.
|
|
122
|
-
fn validate_utf8_boundaries(text: &str, boundaries: &[PageBoundary]) -> Result<()> {
|
|
123
|
-
for (idx, boundary) in boundaries.iter().enumerate() {
|
|
124
|
-
if boundary.byte_start > 0 && boundary.byte_start <= text.len() {
|
|
125
|
-
if !text.is_char_boundary(boundary.byte_start) {
|
|
126
|
-
return Err(KreuzbergError::validation(format!(
|
|
127
|
-
"Page boundary {} has byte_start={} which is not a valid UTF-8 character boundary (text length={}). This may indicate corrupted multibyte characters (emoji, CJK, etc.)",
|
|
128
|
-
idx,
|
|
129
|
-
boundary.byte_start,
|
|
130
|
-
text.len()
|
|
131
|
-
)));
|
|
132
|
-
}
|
|
133
|
-
} else if boundary.byte_start > text.len() {
|
|
134
|
-
return Err(KreuzbergError::validation(format!(
|
|
135
|
-
"Page boundary {} has byte_start={} which exceeds text length {}",
|
|
136
|
-
idx,
|
|
137
|
-
boundary.byte_start,
|
|
138
|
-
text.len()
|
|
139
|
-
)));
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
if boundary.byte_end > 0 && boundary.byte_end <= text.len() {
|
|
143
|
-
if !text.is_char_boundary(boundary.byte_end) {
|
|
144
|
-
return Err(KreuzbergError::validation(format!(
|
|
145
|
-
"Page boundary {} has byte_end={} which is not a valid UTF-8 character boundary (text length={}). This may indicate corrupted multibyte characters (emoji, CJK, etc.)",
|
|
146
|
-
idx,
|
|
147
|
-
boundary.byte_end,
|
|
148
|
-
text.len()
|
|
149
|
-
)));
|
|
150
|
-
}
|
|
151
|
-
} else if boundary.byte_end > text.len() {
|
|
152
|
-
return Err(KreuzbergError::validation(format!(
|
|
153
|
-
"Page boundary {} has byte_end={} which exceeds text length {}",
|
|
154
|
-
idx,
|
|
155
|
-
boundary.byte_end,
|
|
156
|
-
text.len()
|
|
157
|
-
)));
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
Ok(())
|
|
162
|
-
}
|
|
163
|
-
|
|
164
|
-
/// Calculate which pages a character range spans.
|
|
165
|
-
///
|
|
166
|
-
/// # Arguments
|
|
167
|
-
///
|
|
168
|
-
/// * `char_start` - Starting character offset of the chunk
|
|
169
|
-
/// * `char_end` - Ending character offset of the chunk
|
|
170
|
-
/// * `boundaries` - Page boundary markers from the document
|
|
171
|
-
///
|
|
172
|
-
/// # Returns
|
|
173
|
-
///
|
|
174
|
-
/// A tuple of (first_page, last_page) where page numbers are 1-indexed.
|
|
175
|
-
/// Returns (None, None) if boundaries are empty or chunk doesn't overlap any page.
|
|
176
|
-
/// Validates page boundaries for consistency and correctness.
|
|
177
|
-
///
|
|
178
|
-
/// # Validation Rules
|
|
179
|
-
///
|
|
180
|
-
/// 1. Boundaries must be sorted by char_start (monotonically increasing)
|
|
181
|
-
/// 2. Boundaries must not overlap (char_end[i] <= char_start[i+1])
|
|
182
|
-
/// 3. Each boundary must have char_start < char_end
|
|
183
|
-
///
|
|
184
|
-
/// # Errors
|
|
185
|
-
///
|
|
186
|
-
/// Returns `KreuzbergError::Validation` if any boundary is invalid.
|
|
187
|
-
fn validate_page_boundaries(boundaries: &[PageBoundary]) -> Result<()> {
|
|
188
|
-
if boundaries.is_empty() {
|
|
189
|
-
return Ok(());
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
for (idx, boundary) in boundaries.iter().enumerate() {
|
|
193
|
-
if boundary.byte_start >= boundary.byte_end {
|
|
194
|
-
return Err(KreuzbergError::validation(format!(
|
|
195
|
-
"Invalid boundary range at index {}: byte_start ({}) must be < byte_end ({})",
|
|
196
|
-
idx, boundary.byte_start, boundary.byte_end
|
|
197
|
-
)));
|
|
198
|
-
}
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
for i in 0..boundaries.len() - 1 {
|
|
202
|
-
let current = &boundaries[i];
|
|
203
|
-
let next = &boundaries[i + 1];
|
|
204
|
-
|
|
205
|
-
if current.byte_start > next.byte_start {
|
|
206
|
-
return Err(KreuzbergError::validation(format!(
|
|
207
|
-
"Page boundaries not sorted: boundary at index {} (byte_start={}) comes after boundary at index {} (byte_start={})",
|
|
208
|
-
i,
|
|
209
|
-
current.byte_start,
|
|
210
|
-
i + 1,
|
|
211
|
-
next.byte_start
|
|
212
|
-
)));
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
if current.byte_end > next.byte_start {
|
|
216
|
-
return Err(KreuzbergError::validation(format!(
|
|
217
|
-
"Overlapping page boundaries: boundary {} ends at {} but boundary {} starts at {}",
|
|
218
|
-
i,
|
|
219
|
-
current.byte_end,
|
|
220
|
-
i + 1,
|
|
221
|
-
next.byte_start
|
|
222
|
-
)));
|
|
223
|
-
}
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
Ok(())
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
/// Calculate which pages a byte range spans.
|
|
230
|
-
///
|
|
231
|
-
/// # Arguments
|
|
232
|
-
///
|
|
233
|
-
/// * `byte_start` - Starting byte offset of the chunk
|
|
234
|
-
/// * `byte_end` - Ending byte offset of the chunk
|
|
235
|
-
/// * `boundaries` - Page boundary markers from the document
|
|
236
|
-
///
|
|
237
|
-
/// # Returns
|
|
238
|
-
///
|
|
239
|
-
/// A tuple of (first_page, last_page) where page numbers are 1-indexed.
|
|
240
|
-
/// Returns (None, None) if boundaries are empty or chunk doesn't overlap any page.
|
|
241
|
-
///
|
|
242
|
-
/// # Errors
|
|
243
|
-
///
|
|
244
|
-
/// Returns `KreuzbergError::Validation` if boundaries are invalid.
|
|
245
|
-
fn calculate_page_range(
|
|
246
|
-
byte_start: usize,
|
|
247
|
-
byte_end: usize,
|
|
248
|
-
boundaries: &[PageBoundary],
|
|
249
|
-
) -> Result<(Option<usize>, Option<usize>)> {
|
|
250
|
-
if boundaries.is_empty() {
|
|
251
|
-
return Ok((None, None));
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
validate_page_boundaries(boundaries)?;
|
|
255
|
-
|
|
256
|
-
let mut first_page = None;
|
|
257
|
-
let mut last_page = None;
|
|
258
|
-
|
|
259
|
-
for boundary in boundaries {
|
|
260
|
-
if byte_start < boundary.byte_end && byte_end > boundary.byte_start {
|
|
261
|
-
if first_page.is_none() {
|
|
262
|
-
first_page = Some(boundary.page_number);
|
|
263
|
-
}
|
|
264
|
-
last_page = Some(boundary.page_number);
|
|
265
|
-
}
|
|
266
|
-
}
|
|
267
|
-
|
|
268
|
-
Ok((first_page, last_page))
|
|
269
|
-
}
|
|
270
|
-
|
|
271
|
-
/// Split text into chunks with optional page boundary tracking.
|
|
272
|
-
///
|
|
273
|
-
/// # Arguments
|
|
274
|
-
///
|
|
275
|
-
/// * `text` - The text to split into chunks
|
|
276
|
-
/// * `config` - Chunking configuration (max size, overlap, type)
|
|
277
|
-
/// * `page_boundaries` - Optional page boundary markers for mapping chunks to pages
|
|
278
|
-
///
|
|
279
|
-
/// # Returns
|
|
280
|
-
///
|
|
281
|
-
/// A ChunkingResult containing all chunks and their metadata.
|
|
282
|
-
///
|
|
283
|
-
/// # Examples
|
|
284
|
-
///
|
|
285
|
-
/// ```rust
|
|
286
|
-
/// use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
|
|
287
|
-
///
|
|
288
|
-
/// # fn example() -> kreuzberg::Result<()> {
|
|
289
|
-
/// let config = ChunkingConfig {
|
|
290
|
-
/// max_characters: 500,
|
|
291
|
-
/// overlap: 50,
|
|
292
|
-
/// trim: true,
|
|
293
|
-
/// chunker_type: ChunkerType::Text,
|
|
294
|
-
/// };
|
|
295
|
-
/// let result = chunk_text("Long text...", &config, None)?;
|
|
296
|
-
/// assert!(!result.chunks.is_empty());
|
|
297
|
-
/// # Ok(())
|
|
298
|
-
/// # }
|
|
299
|
-
/// ```
|
|
300
|
-
pub fn chunk_text(
|
|
301
|
-
text: &str,
|
|
302
|
-
config: &ChunkingConfig,
|
|
303
|
-
page_boundaries: Option<&[PageBoundary]>,
|
|
304
|
-
) -> Result<ChunkingResult> {
|
|
305
|
-
if text.is_empty() {
|
|
306
|
-
return Ok(ChunkingResult {
|
|
307
|
-
chunks: vec![],
|
|
308
|
-
chunk_count: 0,
|
|
309
|
-
});
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
if let Some(boundaries) = page_boundaries {
|
|
313
|
-
validate_utf8_boundaries(text, boundaries)?;
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
let chunk_config = build_chunk_config(config.max_characters, config.overlap, config.trim)?;
|
|
317
|
-
|
|
318
|
-
let text_chunks: Vec<&str> = match config.chunker_type {
|
|
319
|
-
ChunkerType::Text => {
|
|
320
|
-
let splitter = TextSplitter::new(chunk_config);
|
|
321
|
-
splitter.chunks(text).collect()
|
|
322
|
-
}
|
|
323
|
-
ChunkerType::Markdown => {
|
|
324
|
-
let splitter = MarkdownSplitter::new(chunk_config);
|
|
325
|
-
splitter.chunks(text).collect()
|
|
326
|
-
}
|
|
327
|
-
};
|
|
328
|
-
|
|
329
|
-
let total_chunks = text_chunks.len();
|
|
330
|
-
let mut byte_offset = 0;
|
|
331
|
-
|
|
332
|
-
let mut chunks: Vec<Chunk> = Vec::new();
|
|
333
|
-
|
|
334
|
-
for (index, chunk_text) in text_chunks.into_iter().enumerate() {
|
|
335
|
-
let byte_start = byte_offset;
|
|
336
|
-
let chunk_length = chunk_text.len();
|
|
337
|
-
let byte_end = byte_start + chunk_length;
|
|
338
|
-
|
|
339
|
-
let overlap_chars = if index < total_chunks - 1 {
|
|
340
|
-
config.overlap.min(chunk_length)
|
|
341
|
-
} else {
|
|
342
|
-
0
|
|
343
|
-
};
|
|
344
|
-
byte_offset = byte_end - overlap_chars;
|
|
345
|
-
|
|
346
|
-
let (first_page, last_page) = if let Some(boundaries) = page_boundaries {
|
|
347
|
-
calculate_page_range(byte_start, byte_end, boundaries)?
|
|
348
|
-
} else {
|
|
349
|
-
(None, None)
|
|
350
|
-
};
|
|
351
|
-
|
|
352
|
-
chunks.push(Chunk {
|
|
353
|
-
content: chunk_text.to_string(),
|
|
354
|
-
embedding: None,
|
|
355
|
-
metadata: ChunkMetadata {
|
|
356
|
-
byte_start,
|
|
357
|
-
byte_end,
|
|
358
|
-
token_count: None,
|
|
359
|
-
chunk_index: index,
|
|
360
|
-
total_chunks,
|
|
361
|
-
first_page,
|
|
362
|
-
last_page,
|
|
363
|
-
},
|
|
364
|
-
});
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
let chunk_count = chunks.len();
|
|
368
|
-
|
|
369
|
-
Ok(ChunkingResult { chunks, chunk_count })
|
|
370
|
-
}
|
|
371
|
-
|
|
372
|
-
pub fn chunk_text_with_type(
|
|
373
|
-
text: &str,
|
|
374
|
-
max_characters: usize,
|
|
375
|
-
overlap: usize,
|
|
376
|
-
trim: bool,
|
|
377
|
-
chunker_type: ChunkerType,
|
|
378
|
-
) -> Result<ChunkingResult> {
|
|
379
|
-
let config = ChunkingConfig {
|
|
380
|
-
max_characters,
|
|
381
|
-
overlap,
|
|
382
|
-
trim,
|
|
383
|
-
chunker_type,
|
|
384
|
-
};
|
|
385
|
-
chunk_text(text, &config, None)
|
|
386
|
-
}
|
|
387
|
-
|
|
388
|
-
pub fn chunk_texts_batch(texts: &[&str], config: &ChunkingConfig) -> Result<Vec<ChunkingResult>> {
|
|
389
|
-
texts.iter().map(|text| chunk_text(text, config, None)).collect()
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
#[cfg(test)]
|
|
393
|
-
mod tests {
|
|
394
|
-
use super::*;
|
|
395
|
-
|
|
396
|
-
#[test]
|
|
397
|
-
fn test_chunk_empty_text() {
|
|
398
|
-
let config = ChunkingConfig::default();
|
|
399
|
-
let result = chunk_text("", &config, None).unwrap();
|
|
400
|
-
assert_eq!(result.chunks.len(), 0);
|
|
401
|
-
assert_eq!(result.chunk_count, 0);
|
|
402
|
-
}
|
|
403
|
-
|
|
404
|
-
#[test]
|
|
405
|
-
fn test_chunk_short_text_single_chunk() {
|
|
406
|
-
let config = ChunkingConfig {
|
|
407
|
-
max_characters: 100,
|
|
408
|
-
overlap: 10,
|
|
409
|
-
trim: true,
|
|
410
|
-
chunker_type: ChunkerType::Text,
|
|
411
|
-
};
|
|
412
|
-
let text = "This is a short text.";
|
|
413
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
414
|
-
assert_eq!(result.chunks.len(), 1);
|
|
415
|
-
assert_eq!(result.chunk_count, 1);
|
|
416
|
-
assert_eq!(result.chunks[0].content, text);
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
#[test]
|
|
420
|
-
fn test_chunk_long_text_multiple_chunks() {
|
|
421
|
-
let config = ChunkingConfig {
|
|
422
|
-
max_characters: 20,
|
|
423
|
-
overlap: 5,
|
|
424
|
-
trim: true,
|
|
425
|
-
chunker_type: ChunkerType::Text,
|
|
426
|
-
};
|
|
427
|
-
let text = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
|
428
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
429
|
-
assert!(result.chunk_count >= 2);
|
|
430
|
-
assert_eq!(result.chunks.len(), result.chunk_count);
|
|
431
|
-
assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 20));
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
#[test]
|
|
435
|
-
fn test_chunk_text_with_overlap() {
|
|
436
|
-
let config = ChunkingConfig {
|
|
437
|
-
max_characters: 20,
|
|
438
|
-
overlap: 5,
|
|
439
|
-
trim: true,
|
|
440
|
-
chunker_type: ChunkerType::Text,
|
|
441
|
-
};
|
|
442
|
-
let text = "abcdefghijklmnopqrstuvwxyz0123456789";
|
|
443
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
444
|
-
assert!(result.chunk_count >= 2);
|
|
445
|
-
|
|
446
|
-
if result.chunks.len() >= 2 {
|
|
447
|
-
let first_chunk_end = &result.chunks[0].content[result.chunks[0].content.len().saturating_sub(5)..];
|
|
448
|
-
assert!(
|
|
449
|
-
result.chunks[1].content.starts_with(first_chunk_end),
|
|
450
|
-
"Expected overlap '{}' at start of second chunk '{}'",
|
|
451
|
-
first_chunk_end,
|
|
452
|
-
result.chunks[1].content
|
|
453
|
-
);
|
|
454
|
-
}
|
|
455
|
-
}
|
|
456
|
-
|
|
457
|
-
#[test]
|
|
458
|
-
fn test_chunk_markdown_preserves_structure() {
|
|
459
|
-
let config = ChunkingConfig {
|
|
460
|
-
max_characters: 50,
|
|
461
|
-
overlap: 10,
|
|
462
|
-
trim: true,
|
|
463
|
-
chunker_type: ChunkerType::Markdown,
|
|
464
|
-
};
|
|
465
|
-
let markdown = "# Title\n\nParagraph one.\n\n## Section\n\nParagraph two.";
|
|
466
|
-
let result = chunk_text(markdown, &config, None).unwrap();
|
|
467
|
-
assert!(result.chunk_count >= 1);
|
|
468
|
-
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("# Title")));
|
|
469
|
-
}
|
|
470
|
-
|
|
471
|
-
#[test]
|
|
472
|
-
fn test_chunk_markdown_with_code_blocks() {
|
|
473
|
-
let config = ChunkingConfig {
|
|
474
|
-
max_characters: 100,
|
|
475
|
-
overlap: 10,
|
|
476
|
-
trim: true,
|
|
477
|
-
chunker_type: ChunkerType::Markdown,
|
|
478
|
-
};
|
|
479
|
-
let markdown = "# Code Example\n\n```python\nprint('hello')\n```\n\nSome text after code.";
|
|
480
|
-
let result = chunk_text(markdown, &config, None).unwrap();
|
|
481
|
-
assert!(result.chunk_count >= 1);
|
|
482
|
-
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("```")));
|
|
483
|
-
}
|
|
484
|
-
|
|
485
|
-
#[test]
|
|
486
|
-
fn test_chunk_markdown_with_links() {
|
|
487
|
-
let config = ChunkingConfig {
|
|
488
|
-
max_characters: 80,
|
|
489
|
-
overlap: 10,
|
|
490
|
-
trim: true,
|
|
491
|
-
chunker_type: ChunkerType::Markdown,
|
|
492
|
-
};
|
|
493
|
-
let markdown = "Check out [this link](https://example.com) for more info.";
|
|
494
|
-
let result = chunk_text(markdown, &config, None).unwrap();
|
|
495
|
-
assert_eq!(result.chunk_count, 1);
|
|
496
|
-
assert!(result.chunks[0].content.contains("[this link]"));
|
|
497
|
-
}
|
|
498
|
-
|
|
499
|
-
#[test]
|
|
500
|
-
fn test_chunk_text_with_trim() {
|
|
501
|
-
let config = ChunkingConfig {
|
|
502
|
-
max_characters: 30,
|
|
503
|
-
overlap: 5,
|
|
504
|
-
trim: true,
|
|
505
|
-
chunker_type: ChunkerType::Text,
|
|
506
|
-
};
|
|
507
|
-
let text = " Leading and trailing spaces should be trimmed ";
|
|
508
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
509
|
-
assert!(result.chunk_count >= 1);
|
|
510
|
-
assert!(result.chunks.iter().all(|chunk| !chunk.content.starts_with(' ')));
|
|
511
|
-
}
|
|
512
|
-
|
|
513
|
-
#[test]
|
|
514
|
-
fn test_chunk_text_without_trim() {
|
|
515
|
-
let config = ChunkingConfig {
|
|
516
|
-
max_characters: 30,
|
|
517
|
-
overlap: 5,
|
|
518
|
-
trim: false,
|
|
519
|
-
chunker_type: ChunkerType::Text,
|
|
520
|
-
};
|
|
521
|
-
let text = " Text with spaces ";
|
|
522
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
523
|
-
assert_eq!(result.chunk_count, 1);
|
|
524
|
-
assert!(result.chunks[0].content.starts_with(' ') || result.chunks[0].content.len() < text.len());
|
|
525
|
-
}
|
|
526
|
-
|
|
527
|
-
#[test]
|
|
528
|
-
fn test_chunk_with_invalid_overlap() {
|
|
529
|
-
let config = ChunkingConfig {
|
|
530
|
-
max_characters: 10,
|
|
531
|
-
overlap: 20,
|
|
532
|
-
trim: true,
|
|
533
|
-
chunker_type: ChunkerType::Text,
|
|
534
|
-
};
|
|
535
|
-
let result = chunk_text("Some text", &config, None);
|
|
536
|
-
assert!(result.is_err());
|
|
537
|
-
let err = result.unwrap_err();
|
|
538
|
-
assert!(matches!(err, KreuzbergError::Validation { .. }));
|
|
539
|
-
}
|
|
540
|
-
|
|
541
|
-
#[test]
|
|
542
|
-
fn test_chunk_text_with_type_text() {
|
|
543
|
-
let result = chunk_text_with_type("Simple text", 50, 10, true, ChunkerType::Text).unwrap();
|
|
544
|
-
assert_eq!(result.chunk_count, 1);
|
|
545
|
-
assert_eq!(result.chunks[0].content, "Simple text");
|
|
546
|
-
}
|
|
547
|
-
|
|
548
|
-
#[test]
|
|
549
|
-
fn test_chunk_text_with_type_markdown() {
|
|
550
|
-
let markdown = "# Header\n\nContent here.";
|
|
551
|
-
let result = chunk_text_with_type(markdown, 50, 10, true, ChunkerType::Markdown).unwrap();
|
|
552
|
-
assert_eq!(result.chunk_count, 1);
|
|
553
|
-
assert!(result.chunks[0].content.contains("# Header"));
|
|
554
|
-
}
|
|
555
|
-
|
|
556
|
-
#[test]
|
|
557
|
-
fn test_chunk_texts_batch_empty() {
|
|
558
|
-
let config = ChunkingConfig::default();
|
|
559
|
-
let texts: Vec<&str> = vec![];
|
|
560
|
-
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
561
|
-
assert_eq!(results.len(), 0);
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
#[test]
|
|
565
|
-
fn test_chunk_texts_batch_multiple() {
|
|
566
|
-
let config = ChunkingConfig {
|
|
567
|
-
max_characters: 30,
|
|
568
|
-
overlap: 5,
|
|
569
|
-
trim: true,
|
|
570
|
-
chunker_type: ChunkerType::Text,
|
|
571
|
-
};
|
|
572
|
-
let texts = vec!["First text", "Second text", "Third text"];
|
|
573
|
-
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
574
|
-
assert_eq!(results.len(), 3);
|
|
575
|
-
assert!(results.iter().all(|r| r.chunk_count >= 1));
|
|
576
|
-
}
|
|
577
|
-
|
|
578
|
-
#[test]
|
|
579
|
-
fn test_chunk_texts_batch_mixed_lengths() {
|
|
580
|
-
let config = ChunkingConfig {
|
|
581
|
-
max_characters: 20,
|
|
582
|
-
overlap: 5,
|
|
583
|
-
trim: true,
|
|
584
|
-
chunker_type: ChunkerType::Text,
|
|
585
|
-
};
|
|
586
|
-
let texts = vec![
|
|
587
|
-
"Short",
|
|
588
|
-
"This is a longer text that should be split into multiple chunks",
|
|
589
|
-
"",
|
|
590
|
-
];
|
|
591
|
-
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
592
|
-
assert_eq!(results.len(), 3);
|
|
593
|
-
assert_eq!(results[0].chunk_count, 1);
|
|
594
|
-
assert!(results[1].chunk_count > 1);
|
|
595
|
-
assert_eq!(results[2].chunk_count, 0);
|
|
596
|
-
}
|
|
597
|
-
|
|
598
|
-
#[test]
|
|
599
|
-
fn test_chunk_texts_batch_error_propagation() {
|
|
600
|
-
let config = ChunkingConfig {
|
|
601
|
-
max_characters: 10,
|
|
602
|
-
overlap: 20,
|
|
603
|
-
trim: true,
|
|
604
|
-
chunker_type: ChunkerType::Text,
|
|
605
|
-
};
|
|
606
|
-
let texts = vec!["Text one", "Text two"];
|
|
607
|
-
let result = chunk_texts_batch(&texts, &config);
|
|
608
|
-
assert!(result.is_err());
|
|
609
|
-
}
|
|
610
|
-
|
|
611
|
-
#[test]
|
|
612
|
-
fn test_chunking_config_default() {
|
|
613
|
-
let config = ChunkingConfig::default();
|
|
614
|
-
assert_eq!(config.max_characters, 2000);
|
|
615
|
-
assert_eq!(config.overlap, 100);
|
|
616
|
-
assert!(config.trim);
|
|
617
|
-
assert_eq!(config.chunker_type, ChunkerType::Text);
|
|
618
|
-
}
|
|
619
|
-
|
|
620
|
-
#[test]
|
|
621
|
-
fn test_chunk_very_long_text() {
|
|
622
|
-
let config = ChunkingConfig {
|
|
623
|
-
max_characters: 100,
|
|
624
|
-
overlap: 20,
|
|
625
|
-
trim: true,
|
|
626
|
-
chunker_type: ChunkerType::Text,
|
|
627
|
-
};
|
|
628
|
-
let text = "a".repeat(1000);
|
|
629
|
-
let result = chunk_text(&text, &config, None).unwrap();
|
|
630
|
-
assert!(result.chunk_count >= 10);
|
|
631
|
-
assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 100));
|
|
632
|
-
}
|
|
633
|
-
|
|
634
|
-
#[test]
|
|
635
|
-
fn test_chunk_text_with_newlines() {
|
|
636
|
-
let config = ChunkingConfig {
|
|
637
|
-
max_characters: 30,
|
|
638
|
-
overlap: 5,
|
|
639
|
-
trim: true,
|
|
640
|
-
chunker_type: ChunkerType::Text,
|
|
641
|
-
};
|
|
642
|
-
let text = "Line one\nLine two\nLine three\nLine four\nLine five";
|
|
643
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
644
|
-
assert!(result.chunk_count >= 1);
|
|
645
|
-
}
|
|
646
|
-
|
|
647
|
-
#[test]
|
|
648
|
-
fn test_chunk_markdown_with_lists() {
|
|
649
|
-
let config = ChunkingConfig {
|
|
650
|
-
max_characters: 100,
|
|
651
|
-
overlap: 10,
|
|
652
|
-
trim: true,
|
|
653
|
-
chunker_type: ChunkerType::Markdown,
|
|
654
|
-
};
|
|
655
|
-
let markdown = "# List Example\n\n- Item 1\n- Item 2\n- Item 3\n\nMore text.";
|
|
656
|
-
let result = chunk_text(markdown, &config, None).unwrap();
|
|
657
|
-
assert!(result.chunk_count >= 1);
|
|
658
|
-
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("- Item")));
|
|
659
|
-
}
|
|
660
|
-
|
|
661
|
-
#[test]
|
|
662
|
-
fn test_chunk_markdown_with_tables() {
|
|
663
|
-
let config = ChunkingConfig {
|
|
664
|
-
max_characters: 150,
|
|
665
|
-
overlap: 10,
|
|
666
|
-
trim: true,
|
|
667
|
-
chunker_type: ChunkerType::Markdown,
|
|
668
|
-
};
|
|
669
|
-
let markdown = "# Table\n\n| Col1 | Col2 |\n|------|------|\n| A | B |\n| C | D |";
|
|
670
|
-
let result = chunk_text(markdown, &config, None).unwrap();
|
|
671
|
-
assert!(result.chunk_count >= 1);
|
|
672
|
-
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("|")));
|
|
673
|
-
}
|
|
674
|
-
|
|
675
|
-
#[test]
|
|
676
|
-
fn test_chunk_special_characters() {
|
|
677
|
-
let config = ChunkingConfig {
|
|
678
|
-
max_characters: 50,
|
|
679
|
-
overlap: 5,
|
|
680
|
-
trim: true,
|
|
681
|
-
chunker_type: ChunkerType::Text,
|
|
682
|
-
};
|
|
683
|
-
let text = "Special chars: @#$%^&*()[]{}|\\<>?/~`";
|
|
684
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
685
|
-
assert_eq!(result.chunk_count, 1);
|
|
686
|
-
assert!(result.chunks[0].content.contains("@#$%"));
|
|
687
|
-
}
|
|
688
|
-
|
|
689
|
-
#[test]
|
|
690
|
-
fn test_chunk_unicode_characters() {
|
|
691
|
-
let config = ChunkingConfig {
|
|
692
|
-
max_characters: 50,
|
|
693
|
-
overlap: 5,
|
|
694
|
-
trim: true,
|
|
695
|
-
chunker_type: ChunkerType::Text,
|
|
696
|
-
};
|
|
697
|
-
let text = "Unicode: 你好世界 🌍 café résumé";
|
|
698
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
699
|
-
assert_eq!(result.chunk_count, 1);
|
|
700
|
-
assert!(result.chunks[0].content.contains("你好"));
|
|
701
|
-
assert!(result.chunks[0].content.contains("🌍"));
|
|
702
|
-
}
|
|
703
|
-
|
|
704
|
-
#[test]
|
|
705
|
-
fn test_chunk_cjk_text() {
|
|
706
|
-
let config = ChunkingConfig {
|
|
707
|
-
max_characters: 30,
|
|
708
|
-
overlap: 5,
|
|
709
|
-
trim: true,
|
|
710
|
-
chunker_type: ChunkerType::Text,
|
|
711
|
-
};
|
|
712
|
-
let text = "日本語のテキストです。これは長い文章で、複数のチャンクに分割されるべきです。";
|
|
713
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
714
|
-
assert!(result.chunk_count >= 1);
|
|
715
|
-
}
|
|
716
|
-
|
|
717
|
-
#[test]
|
|
718
|
-
fn test_chunk_mixed_languages() {
|
|
719
|
-
let config = ChunkingConfig {
|
|
720
|
-
max_characters: 40,
|
|
721
|
-
overlap: 5,
|
|
722
|
-
trim: true,
|
|
723
|
-
chunker_type: ChunkerType::Text,
|
|
724
|
-
};
|
|
725
|
-
let text = "English text mixed with 中文文本 and some français";
|
|
726
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
727
|
-
assert!(result.chunk_count >= 1);
|
|
728
|
-
}
|
|
729
|
-
|
|
730
|
-
#[test]
|
|
731
|
-
fn test_chunk_offset_calculation_with_overlap() {
|
|
732
|
-
let config = ChunkingConfig {
|
|
733
|
-
max_characters: 20,
|
|
734
|
-
overlap: 5,
|
|
735
|
-
trim: false,
|
|
736
|
-
chunker_type: ChunkerType::Text,
|
|
737
|
-
};
|
|
738
|
-
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
739
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
740
|
-
|
|
741
|
-
assert!(result.chunks.len() >= 2, "Expected at least 2 chunks");
|
|
742
|
-
|
|
743
|
-
for i in 0..result.chunks.len() {
|
|
744
|
-
let chunk = &result.chunks[i];
|
|
745
|
-
let metadata = &chunk.metadata;
|
|
746
|
-
|
|
747
|
-
assert_eq!(
|
|
748
|
-
metadata.byte_end - metadata.byte_start,
|
|
749
|
-
chunk.content.len(),
|
|
750
|
-
"Chunk {} offset range doesn't match content length",
|
|
751
|
-
i
|
|
752
|
-
);
|
|
753
|
-
|
|
754
|
-
assert_eq!(metadata.chunk_index, i);
|
|
755
|
-
assert_eq!(metadata.total_chunks, result.chunks.len());
|
|
756
|
-
}
|
|
757
|
-
|
|
758
|
-
for i in 0..result.chunks.len() - 1 {
|
|
759
|
-
let current_chunk = &result.chunks[i];
|
|
760
|
-
let next_chunk = &result.chunks[i + 1];
|
|
761
|
-
|
|
762
|
-
assert!(
|
|
763
|
-
next_chunk.metadata.byte_start < current_chunk.metadata.byte_end,
|
|
764
|
-
"Chunk {} and {} don't overlap: next starts at {} but current ends at {}",
|
|
765
|
-
i,
|
|
766
|
-
i + 1,
|
|
767
|
-
next_chunk.metadata.byte_start,
|
|
768
|
-
current_chunk.metadata.byte_end
|
|
769
|
-
);
|
|
770
|
-
|
|
771
|
-
let overlap_size = current_chunk.metadata.byte_end - next_chunk.metadata.byte_start;
|
|
772
|
-
assert!(
|
|
773
|
-
overlap_size <= config.overlap + 10,
|
|
774
|
-
"Overlap between chunks {} and {} is too large: {}",
|
|
775
|
-
i,
|
|
776
|
-
i + 1,
|
|
777
|
-
overlap_size
|
|
778
|
-
);
|
|
779
|
-
}
|
|
780
|
-
}
|
|
781
|
-
|
|
782
|
-
#[test]
|
|
783
|
-
fn test_chunk_offset_calculation_without_overlap() {
|
|
784
|
-
let config = ChunkingConfig {
|
|
785
|
-
max_characters: 20,
|
|
786
|
-
overlap: 0,
|
|
787
|
-
trim: false,
|
|
788
|
-
chunker_type: ChunkerType::Text,
|
|
789
|
-
};
|
|
790
|
-
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
791
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
792
|
-
|
|
793
|
-
for i in 0..result.chunks.len() - 1 {
|
|
794
|
-
let current_chunk = &result.chunks[i];
|
|
795
|
-
let next_chunk = &result.chunks[i + 1];
|
|
796
|
-
|
|
797
|
-
assert!(
|
|
798
|
-
next_chunk.metadata.byte_start >= current_chunk.metadata.byte_end,
|
|
799
|
-
"Chunk {} and {} overlap when they shouldn't: next starts at {} but current ends at {}",
|
|
800
|
-
i,
|
|
801
|
-
i + 1,
|
|
802
|
-
next_chunk.metadata.byte_start,
|
|
803
|
-
current_chunk.metadata.byte_end
|
|
804
|
-
);
|
|
805
|
-
}
|
|
806
|
-
}
|
|
807
|
-
|
|
808
|
-
#[test]
|
|
809
|
-
fn test_chunk_offset_covers_full_text() {
|
|
810
|
-
let config = ChunkingConfig {
|
|
811
|
-
max_characters: 15,
|
|
812
|
-
overlap: 3,
|
|
813
|
-
trim: false,
|
|
814
|
-
chunker_type: ChunkerType::Text,
|
|
815
|
-
};
|
|
816
|
-
let text = "0123456789 ABCDEFGHIJ KLMNOPQRST UVWXYZ";
|
|
817
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
818
|
-
|
|
819
|
-
assert!(result.chunks.len() >= 2, "Expected multiple chunks");
|
|
820
|
-
|
|
821
|
-
assert_eq!(
|
|
822
|
-
result.chunks[0].metadata.byte_start, 0,
|
|
823
|
-
"First chunk should start at position 0"
|
|
824
|
-
);
|
|
825
|
-
|
|
826
|
-
for i in 0..result.chunks.len() - 1 {
|
|
827
|
-
let current_chunk = &result.chunks[i];
|
|
828
|
-
let next_chunk = &result.chunks[i + 1];
|
|
829
|
-
|
|
830
|
-
assert!(
|
|
831
|
-
next_chunk.metadata.byte_start <= current_chunk.metadata.byte_end,
|
|
832
|
-
"Gap detected between chunk {} (ends at {}) and chunk {} (starts at {})",
|
|
833
|
-
i,
|
|
834
|
-
current_chunk.metadata.byte_end,
|
|
835
|
-
i + 1,
|
|
836
|
-
next_chunk.metadata.byte_start
|
|
837
|
-
);
|
|
838
|
-
}
|
|
839
|
-
}
|
|
840
|
-
|
|
841
|
-
#[test]
|
|
842
|
-
fn test_chunk_offset_with_various_overlap_sizes() {
|
|
843
|
-
for overlap in [0, 5, 10, 20] {
|
|
844
|
-
let config = ChunkingConfig {
|
|
845
|
-
max_characters: 30,
|
|
846
|
-
overlap,
|
|
847
|
-
trim: false,
|
|
848
|
-
chunker_type: ChunkerType::Text,
|
|
849
|
-
};
|
|
850
|
-
let text = "Word ".repeat(30);
|
|
851
|
-
let result = chunk_text(&text, &config, None).unwrap();
|
|
852
|
-
|
|
853
|
-
for chunk in &result.chunks {
|
|
854
|
-
assert!(
|
|
855
|
-
chunk.metadata.byte_end > chunk.metadata.byte_start,
|
|
856
|
-
"Invalid offset range for overlap {}: start={}, end={}",
|
|
857
|
-
overlap,
|
|
858
|
-
chunk.metadata.byte_start,
|
|
859
|
-
chunk.metadata.byte_end
|
|
860
|
-
);
|
|
861
|
-
}
|
|
862
|
-
|
|
863
|
-
for chunk in &result.chunks {
|
|
864
|
-
assert!(
|
|
865
|
-
chunk.metadata.byte_start < text.len(),
|
|
866
|
-
"char_start with overlap {} is out of bounds: {}",
|
|
867
|
-
overlap,
|
|
868
|
-
chunk.metadata.byte_start
|
|
869
|
-
);
|
|
870
|
-
}
|
|
871
|
-
}
|
|
872
|
-
}
|
|
873
|
-
|
|
874
|
-
#[test]
|
|
875
|
-
fn test_chunk_last_chunk_offset() {
|
|
876
|
-
let config = ChunkingConfig {
|
|
877
|
-
max_characters: 20,
|
|
878
|
-
overlap: 5,
|
|
879
|
-
trim: false,
|
|
880
|
-
chunker_type: ChunkerType::Text,
|
|
881
|
-
};
|
|
882
|
-
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE";
|
|
883
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
884
|
-
|
|
885
|
-
assert!(result.chunks.len() >= 2, "Need multiple chunks for this test");
|
|
886
|
-
|
|
887
|
-
let last_chunk = result.chunks.last().unwrap();
|
|
888
|
-
let second_to_last = &result.chunks[result.chunks.len() - 2];
|
|
889
|
-
|
|
890
|
-
assert!(
|
|
891
|
-
last_chunk.metadata.byte_start < second_to_last.metadata.byte_end,
|
|
892
|
-
"Last chunk should overlap with previous chunk"
|
|
893
|
-
);
|
|
894
|
-
|
|
895
|
-
let expected_end = text.len();
|
|
896
|
-
let last_chunk_covers_end =
|
|
897
|
-
last_chunk.content.trim_end() == text.trim_end() || last_chunk.metadata.byte_end >= expected_end - 5;
|
|
898
|
-
assert!(last_chunk_covers_end, "Last chunk should cover the end of the text");
|
|
899
|
-
}
|
|
900
|
-
|
|
901
|
-
#[test]
|
|
902
|
-
fn test_chunk_with_page_boundaries() {
|
|
903
|
-
use crate::types::PageBoundary;
|
|
904
|
-
|
|
905
|
-
let config = ChunkingConfig {
|
|
906
|
-
max_characters: 30,
|
|
907
|
-
overlap: 5,
|
|
908
|
-
trim: true,
|
|
909
|
-
chunker_type: ChunkerType::Text,
|
|
910
|
-
};
|
|
911
|
-
let text = "Page one content here. Page two starts here and continues.";
|
|
912
|
-
|
|
913
|
-
let boundaries = vec![
|
|
914
|
-
PageBoundary {
|
|
915
|
-
byte_start: 0,
|
|
916
|
-
byte_end: 21,
|
|
917
|
-
page_number: 1,
|
|
918
|
-
},
|
|
919
|
-
PageBoundary {
|
|
920
|
-
byte_start: 22,
|
|
921
|
-
byte_end: 58,
|
|
922
|
-
page_number: 2,
|
|
923
|
-
},
|
|
924
|
-
];
|
|
925
|
-
|
|
926
|
-
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
927
|
-
assert!(result.chunks.len() >= 2);
|
|
928
|
-
|
|
929
|
-
assert_eq!(result.chunks[0].metadata.first_page, Some(1));
|
|
930
|
-
|
|
931
|
-
let last_chunk = result.chunks.last().unwrap();
|
|
932
|
-
assert_eq!(last_chunk.metadata.last_page, Some(2));
|
|
933
|
-
}
|
|
934
|
-
|
|
935
|
-
#[test]
|
|
936
|
-
fn test_chunk_without_page_boundaries() {
|
|
937
|
-
let config = ChunkingConfig {
|
|
938
|
-
max_characters: 30,
|
|
939
|
-
overlap: 5,
|
|
940
|
-
trim: true,
|
|
941
|
-
chunker_type: ChunkerType::Text,
|
|
942
|
-
};
|
|
943
|
-
let text = "This is some test content that should be split into multiple chunks.";
|
|
944
|
-
|
|
945
|
-
let result = chunk_text(text, &config, None).unwrap();
|
|
946
|
-
assert!(result.chunks.len() >= 2);
|
|
947
|
-
|
|
948
|
-
for chunk in &result.chunks {
|
|
949
|
-
assert_eq!(chunk.metadata.first_page, None);
|
|
950
|
-
assert_eq!(chunk.metadata.last_page, None);
|
|
951
|
-
}
|
|
952
|
-
}
|
|
953
|
-
|
|
954
|
-
#[test]
|
|
955
|
-
fn test_chunk_empty_boundaries() {
|
|
956
|
-
let config = ChunkingConfig {
|
|
957
|
-
max_characters: 30,
|
|
958
|
-
overlap: 5,
|
|
959
|
-
trim: true,
|
|
960
|
-
chunker_type: ChunkerType::Text,
|
|
961
|
-
};
|
|
962
|
-
let text = "Some text content here.";
|
|
963
|
-
let boundaries: Vec<PageBoundary> = vec![];
|
|
964
|
-
|
|
965
|
-
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
966
|
-
assert_eq!(result.chunks.len(), 1);
|
|
967
|
-
|
|
968
|
-
assert_eq!(result.chunks[0].metadata.first_page, None);
|
|
969
|
-
assert_eq!(result.chunks[0].metadata.last_page, None);
|
|
970
|
-
}
|
|
971
|
-
|
|
972
|
-
#[test]
|
|
973
|
-
fn test_chunk_spanning_multiple_pages() {
|
|
974
|
-
use crate::types::PageBoundary;
|
|
975
|
-
|
|
976
|
-
let config = ChunkingConfig {
|
|
977
|
-
max_characters: 50,
|
|
978
|
-
overlap: 5,
|
|
979
|
-
trim: false,
|
|
980
|
-
chunker_type: ChunkerType::Text,
|
|
981
|
-
};
|
|
982
|
-
let text = "0123456789 AAAAAAAAAA 1111111111 BBBBBBBBBB 2222222222";
|
|
983
|
-
|
|
984
|
-
let boundaries = vec![
|
|
985
|
-
PageBoundary {
|
|
986
|
-
byte_start: 0,
|
|
987
|
-
byte_end: 20,
|
|
988
|
-
page_number: 1,
|
|
989
|
-
},
|
|
990
|
-
PageBoundary {
|
|
991
|
-
byte_start: 20,
|
|
992
|
-
byte_end: 40,
|
|
993
|
-
page_number: 2,
|
|
994
|
-
},
|
|
995
|
-
PageBoundary {
|
|
996
|
-
byte_start: 40,
|
|
997
|
-
byte_end: 54,
|
|
998
|
-
page_number: 3,
|
|
999
|
-
},
|
|
1000
|
-
];
|
|
1001
|
-
|
|
1002
|
-
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1003
|
-
assert!(result.chunks.len() >= 2);
|
|
1004
|
-
|
|
1005
|
-
for chunk in &result.chunks {
|
|
1006
|
-
assert!(chunk.metadata.first_page.is_some() || chunk.metadata.last_page.is_some());
|
|
1007
|
-
}
|
|
1008
|
-
}
|
|
1009
|
-
|
|
1010
|
-
#[test]
|
|
1011
|
-
fn test_chunk_text_with_invalid_boundary_range() {
|
|
1012
|
-
use crate::types::PageBoundary;
|
|
1013
|
-
|
|
1014
|
-
let config = ChunkingConfig {
|
|
1015
|
-
max_characters: 30,
|
|
1016
|
-
overlap: 5,
|
|
1017
|
-
trim: true,
|
|
1018
|
-
chunker_type: ChunkerType::Text,
|
|
1019
|
-
};
|
|
1020
|
-
let text = "Page one content here. Page two content.";
|
|
1021
|
-
|
|
1022
|
-
let boundaries = vec![PageBoundary {
|
|
1023
|
-
byte_start: 10,
|
|
1024
|
-
byte_end: 5,
|
|
1025
|
-
page_number: 1,
|
|
1026
|
-
}];
|
|
1027
|
-
|
|
1028
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1029
|
-
assert!(result.is_err());
|
|
1030
|
-
let err = result.unwrap_err();
|
|
1031
|
-
assert!(err.to_string().contains("Invalid boundary range"));
|
|
1032
|
-
assert!(err.to_string().contains("byte_start"));
|
|
1033
|
-
}
|
|
1034
|
-
|
|
1035
|
-
#[test]
|
|
1036
|
-
fn test_chunk_text_with_unsorted_boundaries() {
|
|
1037
|
-
use crate::types::PageBoundary;
|
|
1038
|
-
|
|
1039
|
-
let config = ChunkingConfig {
|
|
1040
|
-
max_characters: 30,
|
|
1041
|
-
overlap: 5,
|
|
1042
|
-
trim: true,
|
|
1043
|
-
chunker_type: ChunkerType::Text,
|
|
1044
|
-
};
|
|
1045
|
-
let text = "Page one content here. Page two content.";
|
|
1046
|
-
|
|
1047
|
-
let boundaries = vec![
|
|
1048
|
-
PageBoundary {
|
|
1049
|
-
byte_start: 22,
|
|
1050
|
-
byte_end: 40,
|
|
1051
|
-
page_number: 2,
|
|
1052
|
-
},
|
|
1053
|
-
PageBoundary {
|
|
1054
|
-
byte_start: 0,
|
|
1055
|
-
byte_end: 21,
|
|
1056
|
-
page_number: 1,
|
|
1057
|
-
},
|
|
1058
|
-
];
|
|
1059
|
-
|
|
1060
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1061
|
-
assert!(result.is_err());
|
|
1062
|
-
let err = result.unwrap_err();
|
|
1063
|
-
assert!(err.to_string().contains("not sorted"));
|
|
1064
|
-
assert!(err.to_string().contains("boundaries"));
|
|
1065
|
-
}
|
|
1066
|
-
|
|
1067
|
-
#[test]
|
|
1068
|
-
fn test_chunk_text_with_overlapping_boundaries() {
|
|
1069
|
-
use crate::types::PageBoundary;
|
|
1070
|
-
|
|
1071
|
-
let config = ChunkingConfig {
|
|
1072
|
-
max_characters: 30,
|
|
1073
|
-
overlap: 5,
|
|
1074
|
-
trim: true,
|
|
1075
|
-
chunker_type: ChunkerType::Text,
|
|
1076
|
-
};
|
|
1077
|
-
let text = "Page one content here. Page two content.";
|
|
1078
|
-
|
|
1079
|
-
let boundaries = vec![
|
|
1080
|
-
PageBoundary {
|
|
1081
|
-
byte_start: 0,
|
|
1082
|
-
byte_end: 25,
|
|
1083
|
-
page_number: 1,
|
|
1084
|
-
},
|
|
1085
|
-
PageBoundary {
|
|
1086
|
-
byte_start: 20,
|
|
1087
|
-
byte_end: 40,
|
|
1088
|
-
page_number: 2,
|
|
1089
|
-
},
|
|
1090
|
-
];
|
|
1091
|
-
|
|
1092
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1093
|
-
assert!(result.is_err());
|
|
1094
|
-
let err = result.unwrap_err();
|
|
1095
|
-
assert!(err.to_string().contains("Overlapping"));
|
|
1096
|
-
assert!(err.to_string().contains("boundaries"));
|
|
1097
|
-
}
|
|
1098
|
-
|
|
1099
|
-
#[test]
|
|
1100
|
-
fn test_calculate_page_range_with_invalid_boundaries() {
|
|
1101
|
-
use crate::types::PageBoundary;
|
|
1102
|
-
|
|
1103
|
-
let boundaries = vec![PageBoundary {
|
|
1104
|
-
byte_start: 15,
|
|
1105
|
-
byte_end: 10,
|
|
1106
|
-
page_number: 1,
|
|
1107
|
-
}];
|
|
1108
|
-
|
|
1109
|
-
let result = calculate_page_range(0, 20, &boundaries);
|
|
1110
|
-
assert!(result.is_err());
|
|
1111
|
-
let err = result.unwrap_err();
|
|
1112
|
-
assert!(err.to_string().contains("Invalid boundary range"));
|
|
1113
|
-
}
|
|
1114
|
-
|
|
1115
|
-
#[test]
|
|
1116
|
-
fn test_validate_page_boundaries_valid() {
|
|
1117
|
-
use crate::types::PageBoundary;
|
|
1118
|
-
|
|
1119
|
-
let boundaries = vec![
|
|
1120
|
-
PageBoundary {
|
|
1121
|
-
byte_start: 0,
|
|
1122
|
-
byte_end: 20,
|
|
1123
|
-
page_number: 1,
|
|
1124
|
-
},
|
|
1125
|
-
PageBoundary {
|
|
1126
|
-
byte_start: 20,
|
|
1127
|
-
byte_end: 40,
|
|
1128
|
-
page_number: 2,
|
|
1129
|
-
},
|
|
1130
|
-
PageBoundary {
|
|
1131
|
-
byte_start: 40,
|
|
1132
|
-
byte_end: 60,
|
|
1133
|
-
page_number: 3,
|
|
1134
|
-
},
|
|
1135
|
-
];
|
|
1136
|
-
|
|
1137
|
-
let result = chunk_text(
|
|
1138
|
-
"x".repeat(60).as_str(),
|
|
1139
|
-
&ChunkingConfig {
|
|
1140
|
-
max_characters: 30,
|
|
1141
|
-
overlap: 5,
|
|
1142
|
-
trim: false,
|
|
1143
|
-
chunker_type: ChunkerType::Text,
|
|
1144
|
-
},
|
|
1145
|
-
Some(&boundaries),
|
|
1146
|
-
);
|
|
1147
|
-
assert!(result.is_ok());
|
|
1148
|
-
}
|
|
1149
|
-
|
|
1150
|
-
#[test]
|
|
1151
|
-
fn test_validate_page_boundaries_empty() {
|
|
1152
|
-
let boundaries: Vec<PageBoundary> = vec![];
|
|
1153
|
-
let result = chunk_text(
|
|
1154
|
-
"Some test text",
|
|
1155
|
-
&ChunkingConfig {
|
|
1156
|
-
max_characters: 30,
|
|
1157
|
-
overlap: 5,
|
|
1158
|
-
trim: true,
|
|
1159
|
-
chunker_type: ChunkerType::Text,
|
|
1160
|
-
},
|
|
1161
|
-
Some(&boundaries),
|
|
1162
|
-
);
|
|
1163
|
-
assert!(result.is_ok());
|
|
1164
|
-
}
|
|
1165
|
-
|
|
1166
|
-
#[test]
|
|
1167
|
-
fn test_page_boundaries_with_gaps() {
|
|
1168
|
-
use crate::types::PageBoundary;
|
|
1169
|
-
|
|
1170
|
-
let boundaries = vec![
|
|
1171
|
-
PageBoundary {
|
|
1172
|
-
byte_start: 0,
|
|
1173
|
-
byte_end: 10,
|
|
1174
|
-
page_number: 1,
|
|
1175
|
-
},
|
|
1176
|
-
PageBoundary {
|
|
1177
|
-
byte_start: 15,
|
|
1178
|
-
byte_end: 25,
|
|
1179
|
-
page_number: 2,
|
|
1180
|
-
},
|
|
1181
|
-
];
|
|
1182
|
-
|
|
1183
|
-
let text = "0123456789XXXXX0123456789";
|
|
1184
|
-
let result = chunk_text(
|
|
1185
|
-
text,
|
|
1186
|
-
&ChunkingConfig {
|
|
1187
|
-
max_characters: 30,
|
|
1188
|
-
overlap: 5,
|
|
1189
|
-
trim: false,
|
|
1190
|
-
chunker_type: ChunkerType::Text,
|
|
1191
|
-
},
|
|
1192
|
-
Some(&boundaries),
|
|
1193
|
-
);
|
|
1194
|
-
assert!(result.is_ok());
|
|
1195
|
-
}
|
|
1196
|
-
|
|
1197
|
-
#[test]
|
|
1198
|
-
fn test_chunk_with_same_start_and_end() {
|
|
1199
|
-
use crate::types::PageBoundary;
|
|
1200
|
-
|
|
1201
|
-
let boundaries = vec![PageBoundary {
|
|
1202
|
-
byte_start: 10,
|
|
1203
|
-
byte_end: 10,
|
|
1204
|
-
page_number: 1,
|
|
1205
|
-
}];
|
|
1206
|
-
|
|
1207
|
-
let result = chunk_text(
|
|
1208
|
-
"test content here",
|
|
1209
|
-
&ChunkingConfig {
|
|
1210
|
-
max_characters: 30,
|
|
1211
|
-
overlap: 5,
|
|
1212
|
-
trim: true,
|
|
1213
|
-
chunker_type: ChunkerType::Text,
|
|
1214
|
-
},
|
|
1215
|
-
Some(&boundaries),
|
|
1216
|
-
);
|
|
1217
|
-
assert!(result.is_err());
|
|
1218
|
-
let err = result.unwrap_err();
|
|
1219
|
-
assert!(err.to_string().contains("Invalid boundary range"));
|
|
1220
|
-
}
|
|
1221
|
-
|
|
1222
|
-
#[test]
|
|
1223
|
-
fn test_multiple_overlapping_errors() {
|
|
1224
|
-
use crate::types::PageBoundary;
|
|
1225
|
-
|
|
1226
|
-
let text = "This is a longer test content string that spans more bytes";
|
|
1227
|
-
let boundaries = vec![
|
|
1228
|
-
PageBoundary {
|
|
1229
|
-
byte_start: 20,
|
|
1230
|
-
byte_end: 40,
|
|
1231
|
-
page_number: 2,
|
|
1232
|
-
},
|
|
1233
|
-
PageBoundary {
|
|
1234
|
-
byte_start: 10,
|
|
1235
|
-
byte_end: 35,
|
|
1236
|
-
page_number: 1,
|
|
1237
|
-
},
|
|
1238
|
-
];
|
|
1239
|
-
|
|
1240
|
-
let result = chunk_text(
|
|
1241
|
-
text,
|
|
1242
|
-
&ChunkingConfig {
|
|
1243
|
-
max_characters: 30,
|
|
1244
|
-
overlap: 5,
|
|
1245
|
-
trim: true,
|
|
1246
|
-
chunker_type: ChunkerType::Text,
|
|
1247
|
-
},
|
|
1248
|
-
Some(&boundaries),
|
|
1249
|
-
);
|
|
1250
|
-
assert!(result.is_err());
|
|
1251
|
-
assert!(result.unwrap_err().to_string().contains("not sorted"));
|
|
1252
|
-
}
|
|
1253
|
-
|
|
1254
|
-
#[test]
|
|
1255
|
-
fn test_chunk_with_pages_basic() {
|
|
1256
|
-
use crate::types::PageBoundary;
|
|
1257
|
-
|
|
1258
|
-
let config = ChunkingConfig {
|
|
1259
|
-
max_characters: 25,
|
|
1260
|
-
overlap: 5,
|
|
1261
|
-
trim: true,
|
|
1262
|
-
chunker_type: ChunkerType::Text,
|
|
1263
|
-
};
|
|
1264
|
-
let text = "First page content here.Second page content here.Third page.";
|
|
1265
|
-
|
|
1266
|
-
let boundaries = vec![
|
|
1267
|
-
PageBoundary {
|
|
1268
|
-
byte_start: 0,
|
|
1269
|
-
byte_end: 24,
|
|
1270
|
-
page_number: 1,
|
|
1271
|
-
},
|
|
1272
|
-
PageBoundary {
|
|
1273
|
-
byte_start: 24,
|
|
1274
|
-
byte_end: 50,
|
|
1275
|
-
page_number: 2,
|
|
1276
|
-
},
|
|
1277
|
-
PageBoundary {
|
|
1278
|
-
byte_start: 50,
|
|
1279
|
-
byte_end: 60,
|
|
1280
|
-
page_number: 3,
|
|
1281
|
-
},
|
|
1282
|
-
];
|
|
1283
|
-
|
|
1284
|
-
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1285
|
-
|
|
1286
|
-
if !result.chunks.is_empty() {
|
|
1287
|
-
assert!(result.chunks[0].metadata.first_page.is_some());
|
|
1288
|
-
}
|
|
1289
|
-
}
|
|
1290
|
-
|
|
1291
|
-
#[test]
|
|
1292
|
-
fn test_chunk_with_pages_single_page_chunk() {
|
|
1293
|
-
use crate::types::PageBoundary;
|
|
1294
|
-
|
|
1295
|
-
let config = ChunkingConfig {
|
|
1296
|
-
max_characters: 100,
|
|
1297
|
-
overlap: 10,
|
|
1298
|
-
trim: true,
|
|
1299
|
-
chunker_type: ChunkerType::Text,
|
|
1300
|
-
};
|
|
1301
|
-
let text = "All content on single page fits in one chunk.";
|
|
1302
|
-
|
|
1303
|
-
let boundaries = vec![PageBoundary {
|
|
1304
|
-
byte_start: 0,
|
|
1305
|
-
byte_end: 45,
|
|
1306
|
-
page_number: 1,
|
|
1307
|
-
}];
|
|
1308
|
-
|
|
1309
|
-
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1310
|
-
assert_eq!(result.chunks.len(), 1);
|
|
1311
|
-
assert_eq!(result.chunks[0].metadata.first_page, Some(1));
|
|
1312
|
-
assert_eq!(result.chunks[0].metadata.last_page, Some(1));
|
|
1313
|
-
}
|
|
1314
|
-
|
|
1315
|
-
#[test]
|
|
1316
|
-
fn test_chunk_with_pages_no_overlap() {
|
|
1317
|
-
use crate::types::PageBoundary;
|
|
1318
|
-
|
|
1319
|
-
let config = ChunkingConfig {
|
|
1320
|
-
max_characters: 20,
|
|
1321
|
-
overlap: 0,
|
|
1322
|
-
trim: false,
|
|
1323
|
-
chunker_type: ChunkerType::Text,
|
|
1324
|
-
};
|
|
1325
|
-
let text = "AAAAA BBBBB CCCCC DDDDD";
|
|
1326
|
-
|
|
1327
|
-
let boundaries = vec![
|
|
1328
|
-
PageBoundary {
|
|
1329
|
-
byte_start: 0,
|
|
1330
|
-
byte_end: 11,
|
|
1331
|
-
page_number: 1,
|
|
1332
|
-
},
|
|
1333
|
-
PageBoundary {
|
|
1334
|
-
byte_start: 11,
|
|
1335
|
-
byte_end: 23,
|
|
1336
|
-
page_number: 2,
|
|
1337
|
-
},
|
|
1338
|
-
];
|
|
1339
|
-
|
|
1340
|
-
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1341
|
-
assert!(!result.chunks.is_empty());
|
|
1342
|
-
|
|
1343
|
-
for chunk in &result.chunks {
|
|
1344
|
-
if let (Some(first), Some(last)) = (chunk.metadata.first_page, chunk.metadata.last_page) {
|
|
1345
|
-
assert!(first <= last);
|
|
1346
|
-
}
|
|
1347
|
-
}
|
|
1348
|
-
}
|
|
1349
|
-
|
|
1350
|
-
#[test]
|
|
1351
|
-
fn test_calculate_page_range_within_page() {
|
|
1352
|
-
let boundaries = vec![
|
|
1353
|
-
PageBoundary {
|
|
1354
|
-
byte_start: 0,
|
|
1355
|
-
byte_end: 100,
|
|
1356
|
-
page_number: 1,
|
|
1357
|
-
},
|
|
1358
|
-
PageBoundary {
|
|
1359
|
-
byte_start: 100,
|
|
1360
|
-
byte_end: 200,
|
|
1361
|
-
page_number: 2,
|
|
1362
|
-
},
|
|
1363
|
-
];
|
|
1364
|
-
|
|
1365
|
-
let (first, last) = calculate_page_range(10, 50, &boundaries).unwrap();
|
|
1366
|
-
assert_eq!(first, Some(1));
|
|
1367
|
-
assert_eq!(last, Some(1));
|
|
1368
|
-
}
|
|
1369
|
-
|
|
1370
|
-
#[test]
|
|
1371
|
-
fn test_calculate_page_range_spanning_pages() {
|
|
1372
|
-
let boundaries = vec![
|
|
1373
|
-
PageBoundary {
|
|
1374
|
-
byte_start: 0,
|
|
1375
|
-
byte_end: 100,
|
|
1376
|
-
page_number: 1,
|
|
1377
|
-
},
|
|
1378
|
-
PageBoundary {
|
|
1379
|
-
byte_start: 100,
|
|
1380
|
-
byte_end: 200,
|
|
1381
|
-
page_number: 2,
|
|
1382
|
-
},
|
|
1383
|
-
];
|
|
1384
|
-
|
|
1385
|
-
let (first, last) = calculate_page_range(50, 150, &boundaries).unwrap();
|
|
1386
|
-
assert_eq!(first, Some(1));
|
|
1387
|
-
assert_eq!(last, Some(2));
|
|
1388
|
-
}
|
|
1389
|
-
|
|
1390
|
-
#[test]
|
|
1391
|
-
fn test_calculate_page_range_empty_boundaries() {
|
|
1392
|
-
let boundaries: Vec<PageBoundary> = vec![];
|
|
1393
|
-
|
|
1394
|
-
let (first, last) = calculate_page_range(0, 50, &boundaries).unwrap();
|
|
1395
|
-
assert_eq!(first, None);
|
|
1396
|
-
assert_eq!(last, None);
|
|
1397
|
-
}
|
|
1398
|
-
|
|
1399
|
-
#[test]
|
|
1400
|
-
fn test_calculate_page_range_no_overlap() {
|
|
1401
|
-
let boundaries = vec![
|
|
1402
|
-
PageBoundary {
|
|
1403
|
-
byte_start: 0,
|
|
1404
|
-
byte_end: 100,
|
|
1405
|
-
page_number: 1,
|
|
1406
|
-
},
|
|
1407
|
-
PageBoundary {
|
|
1408
|
-
byte_start: 100,
|
|
1409
|
-
byte_end: 200,
|
|
1410
|
-
page_number: 2,
|
|
1411
|
-
},
|
|
1412
|
-
];
|
|
1413
|
-
|
|
1414
|
-
let (first, last) = calculate_page_range(200, 250, &boundaries).unwrap();
|
|
1415
|
-
assert_eq!(first, None);
|
|
1416
|
-
assert_eq!(last, None);
|
|
1417
|
-
}
|
|
1418
|
-
|
|
1419
|
-
#[test]
|
|
1420
|
-
fn test_calculate_page_range_three_pages() {
|
|
1421
|
-
let boundaries = vec![
|
|
1422
|
-
PageBoundary {
|
|
1423
|
-
byte_start: 0,
|
|
1424
|
-
byte_end: 100,
|
|
1425
|
-
page_number: 1,
|
|
1426
|
-
},
|
|
1427
|
-
PageBoundary {
|
|
1428
|
-
byte_start: 100,
|
|
1429
|
-
byte_end: 200,
|
|
1430
|
-
page_number: 2,
|
|
1431
|
-
},
|
|
1432
|
-
PageBoundary {
|
|
1433
|
-
byte_start: 200,
|
|
1434
|
-
byte_end: 300,
|
|
1435
|
-
page_number: 3,
|
|
1436
|
-
},
|
|
1437
|
-
];
|
|
1438
|
-
|
|
1439
|
-
let (first, last) = calculate_page_range(50, 250, &boundaries).unwrap();
|
|
1440
|
-
assert_eq!(first, Some(1));
|
|
1441
|
-
assert_eq!(last, Some(3));
|
|
1442
|
-
}
|
|
1443
|
-
|
|
1444
|
-
#[test]
|
|
1445
|
-
fn test_chunk_metadata_page_range_accuracy() {
|
|
1446
|
-
use crate::types::PageBoundary;
|
|
1447
|
-
|
|
1448
|
-
let config = ChunkingConfig {
|
|
1449
|
-
max_characters: 30,
|
|
1450
|
-
overlap: 5,
|
|
1451
|
-
trim: true,
|
|
1452
|
-
chunker_type: ChunkerType::Text,
|
|
1453
|
-
};
|
|
1454
|
-
let text = "Page One Content Here.Page Two.";
|
|
1455
|
-
|
|
1456
|
-
let boundaries = vec![
|
|
1457
|
-
PageBoundary {
|
|
1458
|
-
byte_start: 0,
|
|
1459
|
-
byte_end: 21,
|
|
1460
|
-
page_number: 1,
|
|
1461
|
-
},
|
|
1462
|
-
PageBoundary {
|
|
1463
|
-
byte_start: 21,
|
|
1464
|
-
byte_end: 31,
|
|
1465
|
-
page_number: 2,
|
|
1466
|
-
},
|
|
1467
|
-
];
|
|
1468
|
-
|
|
1469
|
-
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1470
|
-
|
|
1471
|
-
for chunk in &result.chunks {
|
|
1472
|
-
assert_eq!(chunk.metadata.byte_end - chunk.metadata.byte_start, chunk.content.len());
|
|
1473
|
-
}
|
|
1474
|
-
}
|
|
1475
|
-
|
|
1476
|
-
#[test]
|
|
1477
|
-
fn test_chunk_page_range_boundary_edge_cases() {
|
|
1478
|
-
use crate::types::PageBoundary;
|
|
1479
|
-
|
|
1480
|
-
let config = ChunkingConfig {
|
|
1481
|
-
max_characters: 10,
|
|
1482
|
-
overlap: 2,
|
|
1483
|
-
trim: false,
|
|
1484
|
-
chunker_type: ChunkerType::Text,
|
|
1485
|
-
};
|
|
1486
|
-
let text = "0123456789ABCDEFGHIJ";
|
|
1487
|
-
|
|
1488
|
-
let boundaries = vec![
|
|
1489
|
-
PageBoundary {
|
|
1490
|
-
byte_start: 0,
|
|
1491
|
-
byte_end: 10,
|
|
1492
|
-
page_number: 1,
|
|
1493
|
-
},
|
|
1494
|
-
PageBoundary {
|
|
1495
|
-
byte_start: 10,
|
|
1496
|
-
byte_end: 20,
|
|
1497
|
-
page_number: 2,
|
|
1498
|
-
},
|
|
1499
|
-
];
|
|
1500
|
-
|
|
1501
|
-
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1502
|
-
|
|
1503
|
-
for chunk in &result.chunks {
|
|
1504
|
-
let on_page1 = chunk.metadata.byte_start < 10;
|
|
1505
|
-
let on_page2 = chunk.metadata.byte_end > 10;
|
|
1506
|
-
|
|
1507
|
-
if on_page1 && on_page2 {
|
|
1508
|
-
assert_eq!(chunk.metadata.first_page, Some(1));
|
|
1509
|
-
assert_eq!(chunk.metadata.last_page, Some(2));
|
|
1510
|
-
} else if on_page1 {
|
|
1511
|
-
assert_eq!(chunk.metadata.first_page, Some(1));
|
|
1512
|
-
} else if on_page2 {
|
|
1513
|
-
assert_eq!(chunk.metadata.first_page, Some(2));
|
|
1514
|
-
}
|
|
1515
|
-
}
|
|
1516
|
-
}
|
|
1517
|
-
|
|
1518
|
-
#[test]
|
|
1519
|
-
fn test_validate_utf8_boundaries_valid_ascii() {
|
|
1520
|
-
use crate::types::PageBoundary;
|
|
1521
|
-
|
|
1522
|
-
let text = "This is ASCII text.";
|
|
1523
|
-
let boundaries = vec![
|
|
1524
|
-
PageBoundary {
|
|
1525
|
-
byte_start: 0,
|
|
1526
|
-
byte_end: 10,
|
|
1527
|
-
page_number: 1,
|
|
1528
|
-
},
|
|
1529
|
-
PageBoundary {
|
|
1530
|
-
byte_start: 10,
|
|
1531
|
-
byte_end: 19,
|
|
1532
|
-
page_number: 2,
|
|
1533
|
-
},
|
|
1534
|
-
];
|
|
1535
|
-
|
|
1536
|
-
let result = chunk_text(text, &ChunkingConfig::default(), Some(&boundaries));
|
|
1537
|
-
assert!(result.is_ok());
|
|
1538
|
-
}
|
|
1539
|
-
|
|
1540
|
-
#[test]
|
|
1541
|
-
fn test_validate_utf8_boundaries_valid_emoji() {
|
|
1542
|
-
use crate::types::PageBoundary;
|
|
1543
|
-
|
|
1544
|
-
let text = "Hello 👋 World 🌍 End";
|
|
1545
|
-
let config = ChunkingConfig::default();
|
|
1546
|
-
|
|
1547
|
-
let boundaries = vec![
|
|
1548
|
-
PageBoundary {
|
|
1549
|
-
byte_start: 0,
|
|
1550
|
-
byte_end: 11,
|
|
1551
|
-
page_number: 1,
|
|
1552
|
-
},
|
|
1553
|
-
PageBoundary {
|
|
1554
|
-
byte_start: 11,
|
|
1555
|
-
byte_end: 25,
|
|
1556
|
-
page_number: 2,
|
|
1557
|
-
},
|
|
1558
|
-
];
|
|
1559
|
-
|
|
1560
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1561
|
-
assert!(result.is_ok());
|
|
1562
|
-
}
|
|
1563
|
-
|
|
1564
|
-
#[test]
|
|
1565
|
-
fn test_validate_utf8_boundaries_valid_cjk() {
|
|
1566
|
-
use crate::types::PageBoundary;
|
|
1567
|
-
|
|
1568
|
-
let text = "你好世界 こんにちは 안녕하세요";
|
|
1569
|
-
let config = ChunkingConfig::default();
|
|
1570
|
-
|
|
1571
|
-
let boundaries = vec![
|
|
1572
|
-
PageBoundary {
|
|
1573
|
-
byte_start: 0,
|
|
1574
|
-
byte_end: 13,
|
|
1575
|
-
page_number: 1,
|
|
1576
|
-
},
|
|
1577
|
-
PageBoundary {
|
|
1578
|
-
byte_start: 13,
|
|
1579
|
-
byte_end: 44,
|
|
1580
|
-
page_number: 2,
|
|
1581
|
-
},
|
|
1582
|
-
];
|
|
1583
|
-
|
|
1584
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1585
|
-
assert!(result.is_ok());
|
|
1586
|
-
}
|
|
1587
|
-
|
|
1588
|
-
#[test]
|
|
1589
|
-
fn test_validate_utf8_boundaries_invalid_mid_emoji() {
|
|
1590
|
-
use crate::types::PageBoundary;
|
|
1591
|
-
|
|
1592
|
-
let text = "Hello 👋 World";
|
|
1593
|
-
let boundaries = vec![PageBoundary {
|
|
1594
|
-
byte_start: 0,
|
|
1595
|
-
byte_end: 7,
|
|
1596
|
-
page_number: 1,
|
|
1597
|
-
}];
|
|
1598
|
-
|
|
1599
|
-
let config = ChunkingConfig::default();
|
|
1600
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1601
|
-
assert!(result.is_err());
|
|
1602
|
-
let err = result.unwrap_err();
|
|
1603
|
-
assert!(err.to_string().contains("UTF-8 character boundary"));
|
|
1604
|
-
assert!(err.to_string().contains("byte_end=7"));
|
|
1605
|
-
}
|
|
1606
|
-
|
|
1607
|
-
#[test]
|
|
1608
|
-
fn test_validate_utf8_boundaries_invalid_mid_multibyte_cjk() {
|
|
1609
|
-
use crate::types::PageBoundary;
|
|
1610
|
-
|
|
1611
|
-
let text = "中文文本";
|
|
1612
|
-
let boundaries = vec![PageBoundary {
|
|
1613
|
-
byte_start: 0,
|
|
1614
|
-
byte_end: 1,
|
|
1615
|
-
page_number: 1,
|
|
1616
|
-
}];
|
|
1617
|
-
|
|
1618
|
-
let config = ChunkingConfig::default();
|
|
1619
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1620
|
-
assert!(result.is_err());
|
|
1621
|
-
let err = result.unwrap_err();
|
|
1622
|
-
assert!(err.to_string().contains("UTF-8 character boundary"));
|
|
1623
|
-
}
|
|
1624
|
-
|
|
1625
|
-
#[test]
|
|
1626
|
-
fn test_validate_utf8_boundaries_byte_start_exceeds_length() {
|
|
1627
|
-
use crate::types::PageBoundary;
|
|
1628
|
-
|
|
1629
|
-
let text = "Short";
|
|
1630
|
-
let boundaries = vec![
|
|
1631
|
-
PageBoundary {
|
|
1632
|
-
byte_start: 0,
|
|
1633
|
-
byte_end: 3,
|
|
1634
|
-
page_number: 1,
|
|
1635
|
-
},
|
|
1636
|
-
PageBoundary {
|
|
1637
|
-
byte_start: 10,
|
|
1638
|
-
byte_end: 15,
|
|
1639
|
-
page_number: 2,
|
|
1640
|
-
},
|
|
1641
|
-
];
|
|
1642
|
-
|
|
1643
|
-
let config = ChunkingConfig::default();
|
|
1644
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1645
|
-
assert!(result.is_err());
|
|
1646
|
-
let err = result.unwrap_err();
|
|
1647
|
-
assert!(err.to_string().contains("exceeds text length"));
|
|
1648
|
-
}
|
|
1649
|
-
|
|
1650
|
-
#[test]
|
|
1651
|
-
fn test_validate_utf8_boundaries_byte_end_exceeds_length() {
|
|
1652
|
-
use crate::types::PageBoundary;
|
|
1653
|
-
|
|
1654
|
-
let text = "Short";
|
|
1655
|
-
let boundaries = vec![PageBoundary {
|
|
1656
|
-
byte_start: 0,
|
|
1657
|
-
byte_end: 100,
|
|
1658
|
-
page_number: 1,
|
|
1659
|
-
}];
|
|
1660
|
-
|
|
1661
|
-
let config = ChunkingConfig::default();
|
|
1662
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1663
|
-
assert!(result.is_err());
|
|
1664
|
-
let err = result.unwrap_err();
|
|
1665
|
-
assert!(err.to_string().contains("exceeds text length"));
|
|
1666
|
-
}
|
|
1667
|
-
|
|
1668
|
-
#[test]
|
|
1669
|
-
fn test_validate_utf8_boundaries_empty_boundaries() {
|
|
1670
|
-
use crate::types::PageBoundary;
|
|
1671
|
-
|
|
1672
|
-
let text = "Some text";
|
|
1673
|
-
let boundaries: Vec<PageBoundary> = vec![];
|
|
1674
|
-
|
|
1675
|
-
let config = ChunkingConfig::default();
|
|
1676
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1677
|
-
assert!(result.is_ok());
|
|
1678
|
-
}
|
|
1679
|
-
|
|
1680
|
-
#[test]
|
|
1681
|
-
fn test_validate_utf8_boundaries_at_text_boundaries() {
|
|
1682
|
-
use crate::types::PageBoundary;
|
|
1683
|
-
|
|
1684
|
-
let text = "Exact boundary test";
|
|
1685
|
-
let text_len = text.len();
|
|
1686
|
-
let boundaries = vec![PageBoundary {
|
|
1687
|
-
byte_start: 0,
|
|
1688
|
-
byte_end: text_len,
|
|
1689
|
-
page_number: 1,
|
|
1690
|
-
}];
|
|
1691
|
-
|
|
1692
|
-
let config = ChunkingConfig::default();
|
|
1693
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1694
|
-
assert!(result.is_ok());
|
|
1695
|
-
}
|
|
1696
|
-
|
|
1697
|
-
#[test]
|
|
1698
|
-
fn test_validate_utf8_boundaries_mixed_languages() {
|
|
1699
|
-
use crate::types::PageBoundary;
|
|
1700
|
-
|
|
1701
|
-
let text = "English text mixed with 中文 and français";
|
|
1702
|
-
let config = ChunkingConfig::default();
|
|
1703
|
-
|
|
1704
|
-
let boundaries = vec![
|
|
1705
|
-
PageBoundary {
|
|
1706
|
-
byte_start: 0,
|
|
1707
|
-
byte_end: 24,
|
|
1708
|
-
page_number: 1,
|
|
1709
|
-
},
|
|
1710
|
-
PageBoundary {
|
|
1711
|
-
byte_start: 24,
|
|
1712
|
-
byte_end: text.len(),
|
|
1713
|
-
page_number: 2,
|
|
1714
|
-
},
|
|
1715
|
-
];
|
|
1716
|
-
|
|
1717
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1718
|
-
assert!(result.is_ok());
|
|
1719
|
-
}
|
|
1720
|
-
|
|
1721
|
-
#[test]
|
|
1722
|
-
fn test_chunk_text_rejects_invalid_utf8_boundaries() {
|
|
1723
|
-
use crate::types::PageBoundary;
|
|
1724
|
-
|
|
1725
|
-
let text = "🌍🌎🌏 Three emoji planets";
|
|
1726
|
-
let config = ChunkingConfig::default();
|
|
1727
|
-
|
|
1728
|
-
let boundaries = vec![PageBoundary {
|
|
1729
|
-
byte_start: 0,
|
|
1730
|
-
byte_end: 1000,
|
|
1731
|
-
page_number: 1,
|
|
1732
|
-
}];
|
|
1733
|
-
|
|
1734
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1735
|
-
assert!(result.is_err());
|
|
1736
|
-
}
|
|
1737
|
-
|
|
1738
|
-
#[test]
|
|
1739
|
-
fn test_validate_utf8_boundaries_combining_diacriticals() {
|
|
1740
|
-
use crate::types::PageBoundary;
|
|
1741
|
-
|
|
1742
|
-
let text = "café";
|
|
1743
|
-
let config = ChunkingConfig::default();
|
|
1744
|
-
|
|
1745
|
-
let boundaries = vec![
|
|
1746
|
-
PageBoundary {
|
|
1747
|
-
byte_start: 0,
|
|
1748
|
-
byte_end: 2,
|
|
1749
|
-
page_number: 1,
|
|
1750
|
-
},
|
|
1751
|
-
PageBoundary {
|
|
1752
|
-
byte_start: 2,
|
|
1753
|
-
byte_end: text.len(),
|
|
1754
|
-
page_number: 2,
|
|
1755
|
-
},
|
|
1756
|
-
];
|
|
1757
|
-
|
|
1758
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1759
|
-
assert!(result.is_ok());
|
|
1760
|
-
}
|
|
1761
|
-
|
|
1762
|
-
#[test]
|
|
1763
|
-
fn test_validate_utf8_boundaries_error_messages_are_clear() {
|
|
1764
|
-
use crate::types::PageBoundary;
|
|
1765
|
-
|
|
1766
|
-
let text = "Test 👋 text";
|
|
1767
|
-
let config = ChunkingConfig::default();
|
|
1768
|
-
|
|
1769
|
-
let boundaries = vec![PageBoundary {
|
|
1770
|
-
byte_start: 0,
|
|
1771
|
-
byte_end: 6,
|
|
1772
|
-
page_number: 1,
|
|
1773
|
-
}];
|
|
1774
|
-
|
|
1775
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1776
|
-
assert!(result.is_err());
|
|
1777
|
-
let err = result.unwrap_err();
|
|
1778
|
-
let err_msg = err.to_string();
|
|
1779
|
-
assert!(err_msg.contains("UTF-8"));
|
|
1780
|
-
assert!(err_msg.contains("boundary"));
|
|
1781
|
-
assert!(err_msg.contains("6"));
|
|
1782
|
-
}
|
|
1783
|
-
|
|
1784
|
-
#[test]
|
|
1785
|
-
fn test_validate_utf8_boundaries_multiple_valid_boundaries() {
|
|
1786
|
-
use crate::types::PageBoundary;
|
|
1787
|
-
|
|
1788
|
-
let text = "First👋Second🌍Third";
|
|
1789
|
-
let config = ChunkingConfig::default();
|
|
1790
|
-
|
|
1791
|
-
let boundaries = vec![
|
|
1792
|
-
PageBoundary {
|
|
1793
|
-
byte_start: 0,
|
|
1794
|
-
byte_end: 5,
|
|
1795
|
-
page_number: 1,
|
|
1796
|
-
},
|
|
1797
|
-
PageBoundary {
|
|
1798
|
-
byte_start: 5,
|
|
1799
|
-
byte_end: 9,
|
|
1800
|
-
page_number: 2,
|
|
1801
|
-
},
|
|
1802
|
-
PageBoundary {
|
|
1803
|
-
byte_start: 9,
|
|
1804
|
-
byte_end: 15,
|
|
1805
|
-
page_number: 3,
|
|
1806
|
-
},
|
|
1807
|
-
PageBoundary {
|
|
1808
|
-
byte_start: 15,
|
|
1809
|
-
byte_end: 19,
|
|
1810
|
-
page_number: 4,
|
|
1811
|
-
},
|
|
1812
|
-
PageBoundary {
|
|
1813
|
-
byte_start: 19,
|
|
1814
|
-
byte_end: text.len(),
|
|
1815
|
-
page_number: 5,
|
|
1816
|
-
},
|
|
1817
|
-
];
|
|
1818
|
-
|
|
1819
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1820
|
-
assert!(result.is_ok());
|
|
1821
|
-
}
|
|
1822
|
-
|
|
1823
|
-
#[test]
|
|
1824
|
-
fn test_validate_utf8_boundaries_zero_start_and_end() {
|
|
1825
|
-
use crate::types::PageBoundary;
|
|
1826
|
-
|
|
1827
|
-
let text = "Text";
|
|
1828
|
-
let config = ChunkingConfig::default();
|
|
1829
|
-
|
|
1830
|
-
let boundaries = vec![PageBoundary {
|
|
1831
|
-
byte_start: 0,
|
|
1832
|
-
byte_end: 0,
|
|
1833
|
-
page_number: 1,
|
|
1834
|
-
}];
|
|
1835
|
-
|
|
1836
|
-
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1837
|
-
assert!(result.is_err());
|
|
1838
|
-
}
|
|
1839
|
-
}
|
|
1840
|
-
|
|
1841
|
-
/// Lazy-initialized flag that ensures chunking processor is registered exactly once.
|
|
1842
|
-
///
|
|
1843
|
-
/// This static is accessed on first use to automatically register the
|
|
1844
|
-
/// chunking processor with the plugin registry.
|
|
1845
|
-
static PROCESSOR_INITIALIZED: Lazy<Result<()>> = Lazy::new(register_chunking_processor);
|
|
1846
|
-
|
|
1847
|
-
/// Ensure the chunking processor is registered.
|
|
1848
|
-
///
|
|
1849
|
-
/// This function is called automatically when needed.
|
|
1850
|
-
/// It's safe to call multiple times - registration only happens once.
|
|
1851
|
-
pub fn ensure_initialized() -> Result<()> {
|
|
1852
|
-
PROCESSOR_INITIALIZED
|
|
1853
|
-
.as_ref()
|
|
1854
|
-
.map(|_| ())
|
|
1855
|
-
.map_err(|e| crate::KreuzbergError::Plugin {
|
|
1856
|
-
message: format!("Failed to register chunking processor: {}", e),
|
|
1857
|
-
plugin_name: "text-chunking".to_string(),
|
|
1858
|
-
})
|
|
1859
|
-
}
|
|
1860
|
-
|
|
1861
|
-
/// Register the chunking processor with the global registry.
|
|
1862
|
-
///
|
|
1863
|
-
/// This function should be called once at application startup to register
|
|
1864
|
-
/// the chunking post-processor.
|
|
1865
|
-
///
|
|
1866
|
-
/// **Note:** This is called automatically on first use.
|
|
1867
|
-
/// Explicit calling is optional.
|
|
1868
|
-
pub fn register_chunking_processor() -> Result<()> {
|
|
1869
|
-
let registry = crate::plugins::registry::get_post_processor_registry();
|
|
1870
|
-
let mut registry = registry
|
|
1871
|
-
.write()
|
|
1872
|
-
.map_err(|e| crate::KreuzbergError::Other(format!("Post-processor registry lock poisoned: {}", e)))?;
|
|
1873
|
-
|
|
1874
|
-
registry.register(Arc::new(ChunkingProcessor), 50)?;
|
|
1875
|
-
|
|
1876
|
-
Ok(())
|
|
1877
|
-
}
|
|
1
|
+
//! Text chunking utilities.
|
|
2
|
+
//!
|
|
3
|
+
//! This module provides text chunking functionality using the `text-splitter` library.
|
|
4
|
+
//! It splits long text into smaller chunks while preserving semantic boundaries.
|
|
5
|
+
//!
|
|
6
|
+
//! # Features
|
|
7
|
+
//!
|
|
8
|
+
//! - **Smart splitting**: Respects word and sentence boundaries
|
|
9
|
+
//! - **Markdown-aware**: Preserves Markdown structure (headings, code blocks, lists)
|
|
10
|
+
//! - **Configurable overlap**: Overlap chunks to maintain context
|
|
11
|
+
//! - **Unicode support**: Handles CJK characters and emojis correctly
|
|
12
|
+
//! - **Batch processing**: Process multiple texts efficiently
|
|
13
|
+
//!
|
|
14
|
+
//! # Chunker Types
|
|
15
|
+
//!
|
|
16
|
+
//! - **Text**: Generic text splitter, splits on whitespace and punctuation
|
|
17
|
+
//! - **Markdown**: Markdown-aware splitter, preserves formatting and structure
|
|
18
|
+
//!
|
|
19
|
+
//! # Example
|
|
20
|
+
//!
|
|
21
|
+
//! ```rust
|
|
22
|
+
//! use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
|
|
23
|
+
//!
|
|
24
|
+
//! # fn example() -> kreuzberg::Result<()> {
|
|
25
|
+
//! let config = ChunkingConfig {
|
|
26
|
+
//! max_characters: 500,
|
|
27
|
+
//! overlap: 50,
|
|
28
|
+
//! trim: true,
|
|
29
|
+
//! chunker_type: ChunkerType::Text,
|
|
30
|
+
//! };
|
|
31
|
+
//!
|
|
32
|
+
//! let long_text = "This is a very long document...".repeat(100);
|
|
33
|
+
//! let result = chunk_text(&long_text, &config, None)?;
|
|
34
|
+
//!
|
|
35
|
+
//! println!("Split into {} chunks", result.chunk_count);
|
|
36
|
+
//! for (i, chunk) in result.chunks.iter().enumerate() {
|
|
37
|
+
//! println!("Chunk {}: {} chars", i + 1, chunk.content.len());
|
|
38
|
+
//! }
|
|
39
|
+
//! # Ok(())
|
|
40
|
+
//! # }
|
|
41
|
+
//! ```
|
|
42
|
+
//!
|
|
43
|
+
//! # Use Cases
|
|
44
|
+
//!
|
|
45
|
+
//! - Splitting documents for LLM context windows
|
|
46
|
+
//! - Creating overlapping chunks for semantic search
|
|
47
|
+
//! - Processing large documents in batches
|
|
48
|
+
//! - Maintaining context across chunk boundaries
|
|
49
|
+
use crate::error::{KreuzbergError, Result};
|
|
50
|
+
use crate::types::{Chunk, ChunkMetadata, PageBoundary};
|
|
51
|
+
use once_cell::sync::Lazy;
|
|
52
|
+
use serde::{Deserialize, Serialize};
|
|
53
|
+
use std::sync::Arc;
|
|
54
|
+
use text_splitter::{Characters, ChunkCapacity, ChunkConfig, MarkdownSplitter, TextSplitter};
|
|
55
|
+
|
|
56
|
+
pub mod processor;
|
|
57
|
+
pub use processor::ChunkingProcessor;
|
|
58
|
+
|
|
59
|
+
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
|
60
|
+
pub enum ChunkerType {
|
|
61
|
+
Text,
|
|
62
|
+
Markdown,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
66
|
+
pub struct ChunkingResult {
|
|
67
|
+
pub chunks: Vec<Chunk>,
|
|
68
|
+
pub chunk_count: usize,
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
pub struct ChunkingConfig {
|
|
72
|
+
pub max_characters: usize,
|
|
73
|
+
pub overlap: usize,
|
|
74
|
+
pub trim: bool,
|
|
75
|
+
pub chunker_type: ChunkerType,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
impl Default for ChunkingConfig {
|
|
79
|
+
fn default() -> Self {
|
|
80
|
+
Self {
|
|
81
|
+
max_characters: 2000,
|
|
82
|
+
overlap: 100,
|
|
83
|
+
trim: true,
|
|
84
|
+
chunker_type: ChunkerType::Text,
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
fn build_chunk_config(max_characters: usize, overlap: usize, trim: bool) -> Result<ChunkConfig<Characters>> {
|
|
90
|
+
ChunkConfig::new(ChunkCapacity::new(max_characters))
|
|
91
|
+
.with_overlap(overlap)
|
|
92
|
+
.map(|config| config.with_trim(trim))
|
|
93
|
+
.map_err(|e| KreuzbergError::validation(format!("Invalid chunking configuration: {}", e)))
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/// Validates that byte offsets in page boundaries fall on valid UTF-8 character boundaries.
|
|
97
|
+
///
|
|
98
|
+
/// This function ensures that all page boundary positions are at valid UTF-8 character
|
|
99
|
+
/// boundaries within the text. This is CRITICAL to prevent text corruption when boundaries
|
|
100
|
+
/// are created from language bindings or external sources, particularly with multibyte
|
|
101
|
+
/// UTF-8 characters (emoji, CJK characters, combining marks, etc.).
|
|
102
|
+
///
|
|
103
|
+
/// # Arguments
|
|
104
|
+
///
|
|
105
|
+
/// * `text` - The text being chunked
|
|
106
|
+
/// * `boundaries` - Page boundary markers to validate
|
|
107
|
+
///
|
|
108
|
+
/// # Returns
|
|
109
|
+
///
|
|
110
|
+
/// Returns `Ok(())` if all boundaries are at valid UTF-8 character boundaries.
|
|
111
|
+
/// Returns `KreuzbergError::Validation` if any boundary is at an invalid position.
|
|
112
|
+
///
|
|
113
|
+
/// # UTF-8 Boundary Safety
|
|
114
|
+
///
|
|
115
|
+
/// Rust strings use UTF-8 encoding where characters can be 1-4 bytes. For example:
|
|
116
|
+
/// - ASCII letters: 1 byte each
|
|
117
|
+
/// - Emoji (🌍): 4 bytes but 1 character
|
|
118
|
+
/// - CJK characters (中): 3 bytes but 1 character
|
|
119
|
+
///
|
|
120
|
+
/// This function checks that all byte_start and byte_end values are at character
|
|
121
|
+
/// boundaries using Rust's `is_char_boundary()` method.
|
|
122
|
+
fn validate_utf8_boundaries(text: &str, boundaries: &[PageBoundary]) -> Result<()> {
|
|
123
|
+
for (idx, boundary) in boundaries.iter().enumerate() {
|
|
124
|
+
if boundary.byte_start > 0 && boundary.byte_start <= text.len() {
|
|
125
|
+
if !text.is_char_boundary(boundary.byte_start) {
|
|
126
|
+
return Err(KreuzbergError::validation(format!(
|
|
127
|
+
"Page boundary {} has byte_start={} which is not a valid UTF-8 character boundary (text length={}). This may indicate corrupted multibyte characters (emoji, CJK, etc.)",
|
|
128
|
+
idx,
|
|
129
|
+
boundary.byte_start,
|
|
130
|
+
text.len()
|
|
131
|
+
)));
|
|
132
|
+
}
|
|
133
|
+
} else if boundary.byte_start > text.len() {
|
|
134
|
+
return Err(KreuzbergError::validation(format!(
|
|
135
|
+
"Page boundary {} has byte_start={} which exceeds text length {}",
|
|
136
|
+
idx,
|
|
137
|
+
boundary.byte_start,
|
|
138
|
+
text.len()
|
|
139
|
+
)));
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
if boundary.byte_end > 0 && boundary.byte_end <= text.len() {
|
|
143
|
+
if !text.is_char_boundary(boundary.byte_end) {
|
|
144
|
+
return Err(KreuzbergError::validation(format!(
|
|
145
|
+
"Page boundary {} has byte_end={} which is not a valid UTF-8 character boundary (text length={}). This may indicate corrupted multibyte characters (emoji, CJK, etc.)",
|
|
146
|
+
idx,
|
|
147
|
+
boundary.byte_end,
|
|
148
|
+
text.len()
|
|
149
|
+
)));
|
|
150
|
+
}
|
|
151
|
+
} else if boundary.byte_end > text.len() {
|
|
152
|
+
return Err(KreuzbergError::validation(format!(
|
|
153
|
+
"Page boundary {} has byte_end={} which exceeds text length {}",
|
|
154
|
+
idx,
|
|
155
|
+
boundary.byte_end,
|
|
156
|
+
text.len()
|
|
157
|
+
)));
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
Ok(())
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/// Calculate which pages a character range spans.
|
|
165
|
+
///
|
|
166
|
+
/// # Arguments
|
|
167
|
+
///
|
|
168
|
+
/// * `char_start` - Starting character offset of the chunk
|
|
169
|
+
/// * `char_end` - Ending character offset of the chunk
|
|
170
|
+
/// * `boundaries` - Page boundary markers from the document
|
|
171
|
+
///
|
|
172
|
+
/// # Returns
|
|
173
|
+
///
|
|
174
|
+
/// A tuple of (first_page, last_page) where page numbers are 1-indexed.
|
|
175
|
+
/// Returns (None, None) if boundaries are empty or chunk doesn't overlap any page.
|
|
176
|
+
/// Validates page boundaries for consistency and correctness.
|
|
177
|
+
///
|
|
178
|
+
/// # Validation Rules
|
|
179
|
+
///
|
|
180
|
+
/// 1. Boundaries must be sorted by char_start (monotonically increasing)
|
|
181
|
+
/// 2. Boundaries must not overlap (char_end[i] <= char_start[i+1])
|
|
182
|
+
/// 3. Each boundary must have char_start < char_end
|
|
183
|
+
///
|
|
184
|
+
/// # Errors
|
|
185
|
+
///
|
|
186
|
+
/// Returns `KreuzbergError::Validation` if any boundary is invalid.
|
|
187
|
+
fn validate_page_boundaries(boundaries: &[PageBoundary]) -> Result<()> {
|
|
188
|
+
if boundaries.is_empty() {
|
|
189
|
+
return Ok(());
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
for (idx, boundary) in boundaries.iter().enumerate() {
|
|
193
|
+
if boundary.byte_start >= boundary.byte_end {
|
|
194
|
+
return Err(KreuzbergError::validation(format!(
|
|
195
|
+
"Invalid boundary range at index {}: byte_start ({}) must be < byte_end ({})",
|
|
196
|
+
idx, boundary.byte_start, boundary.byte_end
|
|
197
|
+
)));
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
for i in 0..boundaries.len() - 1 {
|
|
202
|
+
let current = &boundaries[i];
|
|
203
|
+
let next = &boundaries[i + 1];
|
|
204
|
+
|
|
205
|
+
if current.byte_start > next.byte_start {
|
|
206
|
+
return Err(KreuzbergError::validation(format!(
|
|
207
|
+
"Page boundaries not sorted: boundary at index {} (byte_start={}) comes after boundary at index {} (byte_start={})",
|
|
208
|
+
i,
|
|
209
|
+
current.byte_start,
|
|
210
|
+
i + 1,
|
|
211
|
+
next.byte_start
|
|
212
|
+
)));
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
if current.byte_end > next.byte_start {
|
|
216
|
+
return Err(KreuzbergError::validation(format!(
|
|
217
|
+
"Overlapping page boundaries: boundary {} ends at {} but boundary {} starts at {}",
|
|
218
|
+
i,
|
|
219
|
+
current.byte_end,
|
|
220
|
+
i + 1,
|
|
221
|
+
next.byte_start
|
|
222
|
+
)));
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
Ok(())
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/// Calculate which pages a byte range spans.
|
|
230
|
+
///
|
|
231
|
+
/// # Arguments
|
|
232
|
+
///
|
|
233
|
+
/// * `byte_start` - Starting byte offset of the chunk
|
|
234
|
+
/// * `byte_end` - Ending byte offset of the chunk
|
|
235
|
+
/// * `boundaries` - Page boundary markers from the document
|
|
236
|
+
///
|
|
237
|
+
/// # Returns
|
|
238
|
+
///
|
|
239
|
+
/// A tuple of (first_page, last_page) where page numbers are 1-indexed.
|
|
240
|
+
/// Returns (None, None) if boundaries are empty or chunk doesn't overlap any page.
|
|
241
|
+
///
|
|
242
|
+
/// # Errors
|
|
243
|
+
///
|
|
244
|
+
/// Returns `KreuzbergError::Validation` if boundaries are invalid.
|
|
245
|
+
fn calculate_page_range(
|
|
246
|
+
byte_start: usize,
|
|
247
|
+
byte_end: usize,
|
|
248
|
+
boundaries: &[PageBoundary],
|
|
249
|
+
) -> Result<(Option<usize>, Option<usize>)> {
|
|
250
|
+
if boundaries.is_empty() {
|
|
251
|
+
return Ok((None, None));
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
validate_page_boundaries(boundaries)?;
|
|
255
|
+
|
|
256
|
+
let mut first_page = None;
|
|
257
|
+
let mut last_page = None;
|
|
258
|
+
|
|
259
|
+
for boundary in boundaries {
|
|
260
|
+
if byte_start < boundary.byte_end && byte_end > boundary.byte_start {
|
|
261
|
+
if first_page.is_none() {
|
|
262
|
+
first_page = Some(boundary.page_number);
|
|
263
|
+
}
|
|
264
|
+
last_page = Some(boundary.page_number);
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
Ok((first_page, last_page))
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
/// Split text into chunks with optional page boundary tracking.
|
|
272
|
+
///
|
|
273
|
+
/// # Arguments
|
|
274
|
+
///
|
|
275
|
+
/// * `text` - The text to split into chunks
|
|
276
|
+
/// * `config` - Chunking configuration (max size, overlap, type)
|
|
277
|
+
/// * `page_boundaries` - Optional page boundary markers for mapping chunks to pages
|
|
278
|
+
///
|
|
279
|
+
/// # Returns
|
|
280
|
+
///
|
|
281
|
+
/// A ChunkingResult containing all chunks and their metadata.
|
|
282
|
+
///
|
|
283
|
+
/// # Examples
|
|
284
|
+
///
|
|
285
|
+
/// ```rust
|
|
286
|
+
/// use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
|
|
287
|
+
///
|
|
288
|
+
/// # fn example() -> kreuzberg::Result<()> {
|
|
289
|
+
/// let config = ChunkingConfig {
|
|
290
|
+
/// max_characters: 500,
|
|
291
|
+
/// overlap: 50,
|
|
292
|
+
/// trim: true,
|
|
293
|
+
/// chunker_type: ChunkerType::Text,
|
|
294
|
+
/// };
|
|
295
|
+
/// let result = chunk_text("Long text...", &config, None)?;
|
|
296
|
+
/// assert!(!result.chunks.is_empty());
|
|
297
|
+
/// # Ok(())
|
|
298
|
+
/// # }
|
|
299
|
+
/// ```
|
|
300
|
+
pub fn chunk_text(
|
|
301
|
+
text: &str,
|
|
302
|
+
config: &ChunkingConfig,
|
|
303
|
+
page_boundaries: Option<&[PageBoundary]>,
|
|
304
|
+
) -> Result<ChunkingResult> {
|
|
305
|
+
if text.is_empty() {
|
|
306
|
+
return Ok(ChunkingResult {
|
|
307
|
+
chunks: vec![],
|
|
308
|
+
chunk_count: 0,
|
|
309
|
+
});
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
if let Some(boundaries) = page_boundaries {
|
|
313
|
+
validate_utf8_boundaries(text, boundaries)?;
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
let chunk_config = build_chunk_config(config.max_characters, config.overlap, config.trim)?;
|
|
317
|
+
|
|
318
|
+
let text_chunks: Vec<&str> = match config.chunker_type {
|
|
319
|
+
ChunkerType::Text => {
|
|
320
|
+
let splitter = TextSplitter::new(chunk_config);
|
|
321
|
+
splitter.chunks(text).collect()
|
|
322
|
+
}
|
|
323
|
+
ChunkerType::Markdown => {
|
|
324
|
+
let splitter = MarkdownSplitter::new(chunk_config);
|
|
325
|
+
splitter.chunks(text).collect()
|
|
326
|
+
}
|
|
327
|
+
};
|
|
328
|
+
|
|
329
|
+
let total_chunks = text_chunks.len();
|
|
330
|
+
let mut byte_offset = 0;
|
|
331
|
+
|
|
332
|
+
let mut chunks: Vec<Chunk> = Vec::new();
|
|
333
|
+
|
|
334
|
+
for (index, chunk_text) in text_chunks.into_iter().enumerate() {
|
|
335
|
+
let byte_start = byte_offset;
|
|
336
|
+
let chunk_length = chunk_text.len();
|
|
337
|
+
let byte_end = byte_start + chunk_length;
|
|
338
|
+
|
|
339
|
+
let overlap_chars = if index < total_chunks - 1 {
|
|
340
|
+
config.overlap.min(chunk_length)
|
|
341
|
+
} else {
|
|
342
|
+
0
|
|
343
|
+
};
|
|
344
|
+
byte_offset = byte_end - overlap_chars;
|
|
345
|
+
|
|
346
|
+
let (first_page, last_page) = if let Some(boundaries) = page_boundaries {
|
|
347
|
+
calculate_page_range(byte_start, byte_end, boundaries)?
|
|
348
|
+
} else {
|
|
349
|
+
(None, None)
|
|
350
|
+
};
|
|
351
|
+
|
|
352
|
+
chunks.push(Chunk {
|
|
353
|
+
content: chunk_text.to_string(),
|
|
354
|
+
embedding: None,
|
|
355
|
+
metadata: ChunkMetadata {
|
|
356
|
+
byte_start,
|
|
357
|
+
byte_end,
|
|
358
|
+
token_count: None,
|
|
359
|
+
chunk_index: index,
|
|
360
|
+
total_chunks,
|
|
361
|
+
first_page,
|
|
362
|
+
last_page,
|
|
363
|
+
},
|
|
364
|
+
});
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
let chunk_count = chunks.len();
|
|
368
|
+
|
|
369
|
+
Ok(ChunkingResult { chunks, chunk_count })
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
pub fn chunk_text_with_type(
|
|
373
|
+
text: &str,
|
|
374
|
+
max_characters: usize,
|
|
375
|
+
overlap: usize,
|
|
376
|
+
trim: bool,
|
|
377
|
+
chunker_type: ChunkerType,
|
|
378
|
+
) -> Result<ChunkingResult> {
|
|
379
|
+
let config = ChunkingConfig {
|
|
380
|
+
max_characters,
|
|
381
|
+
overlap,
|
|
382
|
+
trim,
|
|
383
|
+
chunker_type,
|
|
384
|
+
};
|
|
385
|
+
chunk_text(text, &config, None)
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
pub fn chunk_texts_batch(texts: &[&str], config: &ChunkingConfig) -> Result<Vec<ChunkingResult>> {
|
|
389
|
+
texts.iter().map(|text| chunk_text(text, config, None)).collect()
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
#[cfg(test)]
|
|
393
|
+
mod tests {
|
|
394
|
+
use super::*;
|
|
395
|
+
|
|
396
|
+
#[test]
|
|
397
|
+
fn test_chunk_empty_text() {
|
|
398
|
+
let config = ChunkingConfig::default();
|
|
399
|
+
let result = chunk_text("", &config, None).unwrap();
|
|
400
|
+
assert_eq!(result.chunks.len(), 0);
|
|
401
|
+
assert_eq!(result.chunk_count, 0);
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
#[test]
|
|
405
|
+
fn test_chunk_short_text_single_chunk() {
|
|
406
|
+
let config = ChunkingConfig {
|
|
407
|
+
max_characters: 100,
|
|
408
|
+
overlap: 10,
|
|
409
|
+
trim: true,
|
|
410
|
+
chunker_type: ChunkerType::Text,
|
|
411
|
+
};
|
|
412
|
+
let text = "This is a short text.";
|
|
413
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
414
|
+
assert_eq!(result.chunks.len(), 1);
|
|
415
|
+
assert_eq!(result.chunk_count, 1);
|
|
416
|
+
assert_eq!(result.chunks[0].content, text);
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
#[test]
|
|
420
|
+
fn test_chunk_long_text_multiple_chunks() {
|
|
421
|
+
let config = ChunkingConfig {
|
|
422
|
+
max_characters: 20,
|
|
423
|
+
overlap: 5,
|
|
424
|
+
trim: true,
|
|
425
|
+
chunker_type: ChunkerType::Text,
|
|
426
|
+
};
|
|
427
|
+
let text = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
|
428
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
429
|
+
assert!(result.chunk_count >= 2);
|
|
430
|
+
assert_eq!(result.chunks.len(), result.chunk_count);
|
|
431
|
+
assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 20));
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
#[test]
|
|
435
|
+
fn test_chunk_text_with_overlap() {
|
|
436
|
+
let config = ChunkingConfig {
|
|
437
|
+
max_characters: 20,
|
|
438
|
+
overlap: 5,
|
|
439
|
+
trim: true,
|
|
440
|
+
chunker_type: ChunkerType::Text,
|
|
441
|
+
};
|
|
442
|
+
let text = "abcdefghijklmnopqrstuvwxyz0123456789";
|
|
443
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
444
|
+
assert!(result.chunk_count >= 2);
|
|
445
|
+
|
|
446
|
+
if result.chunks.len() >= 2 {
|
|
447
|
+
let first_chunk_end = &result.chunks[0].content[result.chunks[0].content.len().saturating_sub(5)..];
|
|
448
|
+
assert!(
|
|
449
|
+
result.chunks[1].content.starts_with(first_chunk_end),
|
|
450
|
+
"Expected overlap '{}' at start of second chunk '{}'",
|
|
451
|
+
first_chunk_end,
|
|
452
|
+
result.chunks[1].content
|
|
453
|
+
);
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
#[test]
|
|
458
|
+
fn test_chunk_markdown_preserves_structure() {
|
|
459
|
+
let config = ChunkingConfig {
|
|
460
|
+
max_characters: 50,
|
|
461
|
+
overlap: 10,
|
|
462
|
+
trim: true,
|
|
463
|
+
chunker_type: ChunkerType::Markdown,
|
|
464
|
+
};
|
|
465
|
+
let markdown = "# Title\n\nParagraph one.\n\n## Section\n\nParagraph two.";
|
|
466
|
+
let result = chunk_text(markdown, &config, None).unwrap();
|
|
467
|
+
assert!(result.chunk_count >= 1);
|
|
468
|
+
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("# Title")));
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
#[test]
|
|
472
|
+
fn test_chunk_markdown_with_code_blocks() {
|
|
473
|
+
let config = ChunkingConfig {
|
|
474
|
+
max_characters: 100,
|
|
475
|
+
overlap: 10,
|
|
476
|
+
trim: true,
|
|
477
|
+
chunker_type: ChunkerType::Markdown,
|
|
478
|
+
};
|
|
479
|
+
let markdown = "# Code Example\n\n```python\nprint('hello')\n```\n\nSome text after code.";
|
|
480
|
+
let result = chunk_text(markdown, &config, None).unwrap();
|
|
481
|
+
assert!(result.chunk_count >= 1);
|
|
482
|
+
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("```")));
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
#[test]
|
|
486
|
+
fn test_chunk_markdown_with_links() {
|
|
487
|
+
let config = ChunkingConfig {
|
|
488
|
+
max_characters: 80,
|
|
489
|
+
overlap: 10,
|
|
490
|
+
trim: true,
|
|
491
|
+
chunker_type: ChunkerType::Markdown,
|
|
492
|
+
};
|
|
493
|
+
let markdown = "Check out [this link](https://example.com) for more info.";
|
|
494
|
+
let result = chunk_text(markdown, &config, None).unwrap();
|
|
495
|
+
assert_eq!(result.chunk_count, 1);
|
|
496
|
+
assert!(result.chunks[0].content.contains("[this link]"));
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
#[test]
|
|
500
|
+
fn test_chunk_text_with_trim() {
|
|
501
|
+
let config = ChunkingConfig {
|
|
502
|
+
max_characters: 30,
|
|
503
|
+
overlap: 5,
|
|
504
|
+
trim: true,
|
|
505
|
+
chunker_type: ChunkerType::Text,
|
|
506
|
+
};
|
|
507
|
+
let text = " Leading and trailing spaces should be trimmed ";
|
|
508
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
509
|
+
assert!(result.chunk_count >= 1);
|
|
510
|
+
assert!(result.chunks.iter().all(|chunk| !chunk.content.starts_with(' ')));
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
#[test]
|
|
514
|
+
fn test_chunk_text_without_trim() {
|
|
515
|
+
let config = ChunkingConfig {
|
|
516
|
+
max_characters: 30,
|
|
517
|
+
overlap: 5,
|
|
518
|
+
trim: false,
|
|
519
|
+
chunker_type: ChunkerType::Text,
|
|
520
|
+
};
|
|
521
|
+
let text = " Text with spaces ";
|
|
522
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
523
|
+
assert_eq!(result.chunk_count, 1);
|
|
524
|
+
assert!(result.chunks[0].content.starts_with(' ') || result.chunks[0].content.len() < text.len());
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
#[test]
|
|
528
|
+
fn test_chunk_with_invalid_overlap() {
|
|
529
|
+
let config = ChunkingConfig {
|
|
530
|
+
max_characters: 10,
|
|
531
|
+
overlap: 20,
|
|
532
|
+
trim: true,
|
|
533
|
+
chunker_type: ChunkerType::Text,
|
|
534
|
+
};
|
|
535
|
+
let result = chunk_text("Some text", &config, None);
|
|
536
|
+
assert!(result.is_err());
|
|
537
|
+
let err = result.unwrap_err();
|
|
538
|
+
assert!(matches!(err, KreuzbergError::Validation { .. }));
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
#[test]
|
|
542
|
+
fn test_chunk_text_with_type_text() {
|
|
543
|
+
let result = chunk_text_with_type("Simple text", 50, 10, true, ChunkerType::Text).unwrap();
|
|
544
|
+
assert_eq!(result.chunk_count, 1);
|
|
545
|
+
assert_eq!(result.chunks[0].content, "Simple text");
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
#[test]
|
|
549
|
+
fn test_chunk_text_with_type_markdown() {
|
|
550
|
+
let markdown = "# Header\n\nContent here.";
|
|
551
|
+
let result = chunk_text_with_type(markdown, 50, 10, true, ChunkerType::Markdown).unwrap();
|
|
552
|
+
assert_eq!(result.chunk_count, 1);
|
|
553
|
+
assert!(result.chunks[0].content.contains("# Header"));
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
#[test]
|
|
557
|
+
fn test_chunk_texts_batch_empty() {
|
|
558
|
+
let config = ChunkingConfig::default();
|
|
559
|
+
let texts: Vec<&str> = vec![];
|
|
560
|
+
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
561
|
+
assert_eq!(results.len(), 0);
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
#[test]
|
|
565
|
+
fn test_chunk_texts_batch_multiple() {
|
|
566
|
+
let config = ChunkingConfig {
|
|
567
|
+
max_characters: 30,
|
|
568
|
+
overlap: 5,
|
|
569
|
+
trim: true,
|
|
570
|
+
chunker_type: ChunkerType::Text,
|
|
571
|
+
};
|
|
572
|
+
let texts = vec!["First text", "Second text", "Third text"];
|
|
573
|
+
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
574
|
+
assert_eq!(results.len(), 3);
|
|
575
|
+
assert!(results.iter().all(|r| r.chunk_count >= 1));
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
#[test]
|
|
579
|
+
fn test_chunk_texts_batch_mixed_lengths() {
|
|
580
|
+
let config = ChunkingConfig {
|
|
581
|
+
max_characters: 20,
|
|
582
|
+
overlap: 5,
|
|
583
|
+
trim: true,
|
|
584
|
+
chunker_type: ChunkerType::Text,
|
|
585
|
+
};
|
|
586
|
+
let texts = vec![
|
|
587
|
+
"Short",
|
|
588
|
+
"This is a longer text that should be split into multiple chunks",
|
|
589
|
+
"",
|
|
590
|
+
];
|
|
591
|
+
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
592
|
+
assert_eq!(results.len(), 3);
|
|
593
|
+
assert_eq!(results[0].chunk_count, 1);
|
|
594
|
+
assert!(results[1].chunk_count > 1);
|
|
595
|
+
assert_eq!(results[2].chunk_count, 0);
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
#[test]
|
|
599
|
+
fn test_chunk_texts_batch_error_propagation() {
|
|
600
|
+
let config = ChunkingConfig {
|
|
601
|
+
max_characters: 10,
|
|
602
|
+
overlap: 20,
|
|
603
|
+
trim: true,
|
|
604
|
+
chunker_type: ChunkerType::Text,
|
|
605
|
+
};
|
|
606
|
+
let texts = vec!["Text one", "Text two"];
|
|
607
|
+
let result = chunk_texts_batch(&texts, &config);
|
|
608
|
+
assert!(result.is_err());
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
#[test]
|
|
612
|
+
fn test_chunking_config_default() {
|
|
613
|
+
let config = ChunkingConfig::default();
|
|
614
|
+
assert_eq!(config.max_characters, 2000);
|
|
615
|
+
assert_eq!(config.overlap, 100);
|
|
616
|
+
assert!(config.trim);
|
|
617
|
+
assert_eq!(config.chunker_type, ChunkerType::Text);
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
#[test]
|
|
621
|
+
fn test_chunk_very_long_text() {
|
|
622
|
+
let config = ChunkingConfig {
|
|
623
|
+
max_characters: 100,
|
|
624
|
+
overlap: 20,
|
|
625
|
+
trim: true,
|
|
626
|
+
chunker_type: ChunkerType::Text,
|
|
627
|
+
};
|
|
628
|
+
let text = "a".repeat(1000);
|
|
629
|
+
let result = chunk_text(&text, &config, None).unwrap();
|
|
630
|
+
assert!(result.chunk_count >= 10);
|
|
631
|
+
assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 100));
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
#[test]
|
|
635
|
+
fn test_chunk_text_with_newlines() {
|
|
636
|
+
let config = ChunkingConfig {
|
|
637
|
+
max_characters: 30,
|
|
638
|
+
overlap: 5,
|
|
639
|
+
trim: true,
|
|
640
|
+
chunker_type: ChunkerType::Text,
|
|
641
|
+
};
|
|
642
|
+
let text = "Line one\nLine two\nLine three\nLine four\nLine five";
|
|
643
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
644
|
+
assert!(result.chunk_count >= 1);
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
#[test]
|
|
648
|
+
fn test_chunk_markdown_with_lists() {
|
|
649
|
+
let config = ChunkingConfig {
|
|
650
|
+
max_characters: 100,
|
|
651
|
+
overlap: 10,
|
|
652
|
+
trim: true,
|
|
653
|
+
chunker_type: ChunkerType::Markdown,
|
|
654
|
+
};
|
|
655
|
+
let markdown = "# List Example\n\n- Item 1\n- Item 2\n- Item 3\n\nMore text.";
|
|
656
|
+
let result = chunk_text(markdown, &config, None).unwrap();
|
|
657
|
+
assert!(result.chunk_count >= 1);
|
|
658
|
+
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("- Item")));
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
#[test]
|
|
662
|
+
fn test_chunk_markdown_with_tables() {
|
|
663
|
+
let config = ChunkingConfig {
|
|
664
|
+
max_characters: 150,
|
|
665
|
+
overlap: 10,
|
|
666
|
+
trim: true,
|
|
667
|
+
chunker_type: ChunkerType::Markdown,
|
|
668
|
+
};
|
|
669
|
+
let markdown = "# Table\n\n| Col1 | Col2 |\n|------|------|\n| A | B |\n| C | D |";
|
|
670
|
+
let result = chunk_text(markdown, &config, None).unwrap();
|
|
671
|
+
assert!(result.chunk_count >= 1);
|
|
672
|
+
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("|")));
|
|
673
|
+
}
|
|
674
|
+
|
|
675
|
+
#[test]
|
|
676
|
+
fn test_chunk_special_characters() {
|
|
677
|
+
let config = ChunkingConfig {
|
|
678
|
+
max_characters: 50,
|
|
679
|
+
overlap: 5,
|
|
680
|
+
trim: true,
|
|
681
|
+
chunker_type: ChunkerType::Text,
|
|
682
|
+
};
|
|
683
|
+
let text = "Special chars: @#$%^&*()[]{}|\\<>?/~`";
|
|
684
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
685
|
+
assert_eq!(result.chunk_count, 1);
|
|
686
|
+
assert!(result.chunks[0].content.contains("@#$%"));
|
|
687
|
+
}
|
|
688
|
+
|
|
689
|
+
#[test]
|
|
690
|
+
fn test_chunk_unicode_characters() {
|
|
691
|
+
let config = ChunkingConfig {
|
|
692
|
+
max_characters: 50,
|
|
693
|
+
overlap: 5,
|
|
694
|
+
trim: true,
|
|
695
|
+
chunker_type: ChunkerType::Text,
|
|
696
|
+
};
|
|
697
|
+
let text = "Unicode: 你好世界 🌍 café résumé";
|
|
698
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
699
|
+
assert_eq!(result.chunk_count, 1);
|
|
700
|
+
assert!(result.chunks[0].content.contains("你好"));
|
|
701
|
+
assert!(result.chunks[0].content.contains("🌍"));
|
|
702
|
+
}
|
|
703
|
+
|
|
704
|
+
#[test]
|
|
705
|
+
fn test_chunk_cjk_text() {
|
|
706
|
+
let config = ChunkingConfig {
|
|
707
|
+
max_characters: 30,
|
|
708
|
+
overlap: 5,
|
|
709
|
+
trim: true,
|
|
710
|
+
chunker_type: ChunkerType::Text,
|
|
711
|
+
};
|
|
712
|
+
let text = "日本語のテキストです。これは長い文章で、複数のチャンクに分割されるべきです。";
|
|
713
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
714
|
+
assert!(result.chunk_count >= 1);
|
|
715
|
+
}
|
|
716
|
+
|
|
717
|
+
#[test]
|
|
718
|
+
fn test_chunk_mixed_languages() {
|
|
719
|
+
let config = ChunkingConfig {
|
|
720
|
+
max_characters: 40,
|
|
721
|
+
overlap: 5,
|
|
722
|
+
trim: true,
|
|
723
|
+
chunker_type: ChunkerType::Text,
|
|
724
|
+
};
|
|
725
|
+
let text = "English text mixed with 中文文本 and some français";
|
|
726
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
727
|
+
assert!(result.chunk_count >= 1);
|
|
728
|
+
}
|
|
729
|
+
|
|
730
|
+
#[test]
|
|
731
|
+
fn test_chunk_offset_calculation_with_overlap() {
|
|
732
|
+
let config = ChunkingConfig {
|
|
733
|
+
max_characters: 20,
|
|
734
|
+
overlap: 5,
|
|
735
|
+
trim: false,
|
|
736
|
+
chunker_type: ChunkerType::Text,
|
|
737
|
+
};
|
|
738
|
+
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
739
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
740
|
+
|
|
741
|
+
assert!(result.chunks.len() >= 2, "Expected at least 2 chunks");
|
|
742
|
+
|
|
743
|
+
for i in 0..result.chunks.len() {
|
|
744
|
+
let chunk = &result.chunks[i];
|
|
745
|
+
let metadata = &chunk.metadata;
|
|
746
|
+
|
|
747
|
+
assert_eq!(
|
|
748
|
+
metadata.byte_end - metadata.byte_start,
|
|
749
|
+
chunk.content.len(),
|
|
750
|
+
"Chunk {} offset range doesn't match content length",
|
|
751
|
+
i
|
|
752
|
+
);
|
|
753
|
+
|
|
754
|
+
assert_eq!(metadata.chunk_index, i);
|
|
755
|
+
assert_eq!(metadata.total_chunks, result.chunks.len());
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
for i in 0..result.chunks.len() - 1 {
|
|
759
|
+
let current_chunk = &result.chunks[i];
|
|
760
|
+
let next_chunk = &result.chunks[i + 1];
|
|
761
|
+
|
|
762
|
+
assert!(
|
|
763
|
+
next_chunk.metadata.byte_start < current_chunk.metadata.byte_end,
|
|
764
|
+
"Chunk {} and {} don't overlap: next starts at {} but current ends at {}",
|
|
765
|
+
i,
|
|
766
|
+
i + 1,
|
|
767
|
+
next_chunk.metadata.byte_start,
|
|
768
|
+
current_chunk.metadata.byte_end
|
|
769
|
+
);
|
|
770
|
+
|
|
771
|
+
let overlap_size = current_chunk.metadata.byte_end - next_chunk.metadata.byte_start;
|
|
772
|
+
assert!(
|
|
773
|
+
overlap_size <= config.overlap + 10,
|
|
774
|
+
"Overlap between chunks {} and {} is too large: {}",
|
|
775
|
+
i,
|
|
776
|
+
i + 1,
|
|
777
|
+
overlap_size
|
|
778
|
+
);
|
|
779
|
+
}
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
#[test]
|
|
783
|
+
fn test_chunk_offset_calculation_without_overlap() {
|
|
784
|
+
let config = ChunkingConfig {
|
|
785
|
+
max_characters: 20,
|
|
786
|
+
overlap: 0,
|
|
787
|
+
trim: false,
|
|
788
|
+
chunker_type: ChunkerType::Text,
|
|
789
|
+
};
|
|
790
|
+
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
791
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
792
|
+
|
|
793
|
+
for i in 0..result.chunks.len() - 1 {
|
|
794
|
+
let current_chunk = &result.chunks[i];
|
|
795
|
+
let next_chunk = &result.chunks[i + 1];
|
|
796
|
+
|
|
797
|
+
assert!(
|
|
798
|
+
next_chunk.metadata.byte_start >= current_chunk.metadata.byte_end,
|
|
799
|
+
"Chunk {} and {} overlap when they shouldn't: next starts at {} but current ends at {}",
|
|
800
|
+
i,
|
|
801
|
+
i + 1,
|
|
802
|
+
next_chunk.metadata.byte_start,
|
|
803
|
+
current_chunk.metadata.byte_end
|
|
804
|
+
);
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
#[test]
|
|
809
|
+
fn test_chunk_offset_covers_full_text() {
|
|
810
|
+
let config = ChunkingConfig {
|
|
811
|
+
max_characters: 15,
|
|
812
|
+
overlap: 3,
|
|
813
|
+
trim: false,
|
|
814
|
+
chunker_type: ChunkerType::Text,
|
|
815
|
+
};
|
|
816
|
+
let text = "0123456789 ABCDEFGHIJ KLMNOPQRST UVWXYZ";
|
|
817
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
818
|
+
|
|
819
|
+
assert!(result.chunks.len() >= 2, "Expected multiple chunks");
|
|
820
|
+
|
|
821
|
+
assert_eq!(
|
|
822
|
+
result.chunks[0].metadata.byte_start, 0,
|
|
823
|
+
"First chunk should start at position 0"
|
|
824
|
+
);
|
|
825
|
+
|
|
826
|
+
for i in 0..result.chunks.len() - 1 {
|
|
827
|
+
let current_chunk = &result.chunks[i];
|
|
828
|
+
let next_chunk = &result.chunks[i + 1];
|
|
829
|
+
|
|
830
|
+
assert!(
|
|
831
|
+
next_chunk.metadata.byte_start <= current_chunk.metadata.byte_end,
|
|
832
|
+
"Gap detected between chunk {} (ends at {}) and chunk {} (starts at {})",
|
|
833
|
+
i,
|
|
834
|
+
current_chunk.metadata.byte_end,
|
|
835
|
+
i + 1,
|
|
836
|
+
next_chunk.metadata.byte_start
|
|
837
|
+
);
|
|
838
|
+
}
|
|
839
|
+
}
|
|
840
|
+
|
|
841
|
+
#[test]
|
|
842
|
+
fn test_chunk_offset_with_various_overlap_sizes() {
|
|
843
|
+
for overlap in [0, 5, 10, 20] {
|
|
844
|
+
let config = ChunkingConfig {
|
|
845
|
+
max_characters: 30,
|
|
846
|
+
overlap,
|
|
847
|
+
trim: false,
|
|
848
|
+
chunker_type: ChunkerType::Text,
|
|
849
|
+
};
|
|
850
|
+
let text = "Word ".repeat(30);
|
|
851
|
+
let result = chunk_text(&text, &config, None).unwrap();
|
|
852
|
+
|
|
853
|
+
for chunk in &result.chunks {
|
|
854
|
+
assert!(
|
|
855
|
+
chunk.metadata.byte_end > chunk.metadata.byte_start,
|
|
856
|
+
"Invalid offset range for overlap {}: start={}, end={}",
|
|
857
|
+
overlap,
|
|
858
|
+
chunk.metadata.byte_start,
|
|
859
|
+
chunk.metadata.byte_end
|
|
860
|
+
);
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
for chunk in &result.chunks {
|
|
864
|
+
assert!(
|
|
865
|
+
chunk.metadata.byte_start < text.len(),
|
|
866
|
+
"char_start with overlap {} is out of bounds: {}",
|
|
867
|
+
overlap,
|
|
868
|
+
chunk.metadata.byte_start
|
|
869
|
+
);
|
|
870
|
+
}
|
|
871
|
+
}
|
|
872
|
+
}
|
|
873
|
+
|
|
874
|
+
#[test]
|
|
875
|
+
fn test_chunk_last_chunk_offset() {
|
|
876
|
+
let config = ChunkingConfig {
|
|
877
|
+
max_characters: 20,
|
|
878
|
+
overlap: 5,
|
|
879
|
+
trim: false,
|
|
880
|
+
chunker_type: ChunkerType::Text,
|
|
881
|
+
};
|
|
882
|
+
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE";
|
|
883
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
884
|
+
|
|
885
|
+
assert!(result.chunks.len() >= 2, "Need multiple chunks for this test");
|
|
886
|
+
|
|
887
|
+
let last_chunk = result.chunks.last().unwrap();
|
|
888
|
+
let second_to_last = &result.chunks[result.chunks.len() - 2];
|
|
889
|
+
|
|
890
|
+
assert!(
|
|
891
|
+
last_chunk.metadata.byte_start < second_to_last.metadata.byte_end,
|
|
892
|
+
"Last chunk should overlap with previous chunk"
|
|
893
|
+
);
|
|
894
|
+
|
|
895
|
+
let expected_end = text.len();
|
|
896
|
+
let last_chunk_covers_end =
|
|
897
|
+
last_chunk.content.trim_end() == text.trim_end() || last_chunk.metadata.byte_end >= expected_end - 5;
|
|
898
|
+
assert!(last_chunk_covers_end, "Last chunk should cover the end of the text");
|
|
899
|
+
}
|
|
900
|
+
|
|
901
|
+
#[test]
|
|
902
|
+
fn test_chunk_with_page_boundaries() {
|
|
903
|
+
use crate::types::PageBoundary;
|
|
904
|
+
|
|
905
|
+
let config = ChunkingConfig {
|
|
906
|
+
max_characters: 30,
|
|
907
|
+
overlap: 5,
|
|
908
|
+
trim: true,
|
|
909
|
+
chunker_type: ChunkerType::Text,
|
|
910
|
+
};
|
|
911
|
+
let text = "Page one content here. Page two starts here and continues.";
|
|
912
|
+
|
|
913
|
+
let boundaries = vec![
|
|
914
|
+
PageBoundary {
|
|
915
|
+
byte_start: 0,
|
|
916
|
+
byte_end: 21,
|
|
917
|
+
page_number: 1,
|
|
918
|
+
},
|
|
919
|
+
PageBoundary {
|
|
920
|
+
byte_start: 22,
|
|
921
|
+
byte_end: 58,
|
|
922
|
+
page_number: 2,
|
|
923
|
+
},
|
|
924
|
+
];
|
|
925
|
+
|
|
926
|
+
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
927
|
+
assert!(result.chunks.len() >= 2);
|
|
928
|
+
|
|
929
|
+
assert_eq!(result.chunks[0].metadata.first_page, Some(1));
|
|
930
|
+
|
|
931
|
+
let last_chunk = result.chunks.last().unwrap();
|
|
932
|
+
assert_eq!(last_chunk.metadata.last_page, Some(2));
|
|
933
|
+
}
|
|
934
|
+
|
|
935
|
+
#[test]
|
|
936
|
+
fn test_chunk_without_page_boundaries() {
|
|
937
|
+
let config = ChunkingConfig {
|
|
938
|
+
max_characters: 30,
|
|
939
|
+
overlap: 5,
|
|
940
|
+
trim: true,
|
|
941
|
+
chunker_type: ChunkerType::Text,
|
|
942
|
+
};
|
|
943
|
+
let text = "This is some test content that should be split into multiple chunks.";
|
|
944
|
+
|
|
945
|
+
let result = chunk_text(text, &config, None).unwrap();
|
|
946
|
+
assert!(result.chunks.len() >= 2);
|
|
947
|
+
|
|
948
|
+
for chunk in &result.chunks {
|
|
949
|
+
assert_eq!(chunk.metadata.first_page, None);
|
|
950
|
+
assert_eq!(chunk.metadata.last_page, None);
|
|
951
|
+
}
|
|
952
|
+
}
|
|
953
|
+
|
|
954
|
+
#[test]
|
|
955
|
+
fn test_chunk_empty_boundaries() {
|
|
956
|
+
let config = ChunkingConfig {
|
|
957
|
+
max_characters: 30,
|
|
958
|
+
overlap: 5,
|
|
959
|
+
trim: true,
|
|
960
|
+
chunker_type: ChunkerType::Text,
|
|
961
|
+
};
|
|
962
|
+
let text = "Some text content here.";
|
|
963
|
+
let boundaries: Vec<PageBoundary> = vec![];
|
|
964
|
+
|
|
965
|
+
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
966
|
+
assert_eq!(result.chunks.len(), 1);
|
|
967
|
+
|
|
968
|
+
assert_eq!(result.chunks[0].metadata.first_page, None);
|
|
969
|
+
assert_eq!(result.chunks[0].metadata.last_page, None);
|
|
970
|
+
}
|
|
971
|
+
|
|
972
|
+
#[test]
|
|
973
|
+
fn test_chunk_spanning_multiple_pages() {
|
|
974
|
+
use crate::types::PageBoundary;
|
|
975
|
+
|
|
976
|
+
let config = ChunkingConfig {
|
|
977
|
+
max_characters: 50,
|
|
978
|
+
overlap: 5,
|
|
979
|
+
trim: false,
|
|
980
|
+
chunker_type: ChunkerType::Text,
|
|
981
|
+
};
|
|
982
|
+
let text = "0123456789 AAAAAAAAAA 1111111111 BBBBBBBBBB 2222222222";
|
|
983
|
+
|
|
984
|
+
let boundaries = vec![
|
|
985
|
+
PageBoundary {
|
|
986
|
+
byte_start: 0,
|
|
987
|
+
byte_end: 20,
|
|
988
|
+
page_number: 1,
|
|
989
|
+
},
|
|
990
|
+
PageBoundary {
|
|
991
|
+
byte_start: 20,
|
|
992
|
+
byte_end: 40,
|
|
993
|
+
page_number: 2,
|
|
994
|
+
},
|
|
995
|
+
PageBoundary {
|
|
996
|
+
byte_start: 40,
|
|
997
|
+
byte_end: 54,
|
|
998
|
+
page_number: 3,
|
|
999
|
+
},
|
|
1000
|
+
];
|
|
1001
|
+
|
|
1002
|
+
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1003
|
+
assert!(result.chunks.len() >= 2);
|
|
1004
|
+
|
|
1005
|
+
for chunk in &result.chunks {
|
|
1006
|
+
assert!(chunk.metadata.first_page.is_some() || chunk.metadata.last_page.is_some());
|
|
1007
|
+
}
|
|
1008
|
+
}
|
|
1009
|
+
|
|
1010
|
+
#[test]
|
|
1011
|
+
fn test_chunk_text_with_invalid_boundary_range() {
|
|
1012
|
+
use crate::types::PageBoundary;
|
|
1013
|
+
|
|
1014
|
+
let config = ChunkingConfig {
|
|
1015
|
+
max_characters: 30,
|
|
1016
|
+
overlap: 5,
|
|
1017
|
+
trim: true,
|
|
1018
|
+
chunker_type: ChunkerType::Text,
|
|
1019
|
+
};
|
|
1020
|
+
let text = "Page one content here. Page two content.";
|
|
1021
|
+
|
|
1022
|
+
let boundaries = vec![PageBoundary {
|
|
1023
|
+
byte_start: 10,
|
|
1024
|
+
byte_end: 5,
|
|
1025
|
+
page_number: 1,
|
|
1026
|
+
}];
|
|
1027
|
+
|
|
1028
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1029
|
+
assert!(result.is_err());
|
|
1030
|
+
let err = result.unwrap_err();
|
|
1031
|
+
assert!(err.to_string().contains("Invalid boundary range"));
|
|
1032
|
+
assert!(err.to_string().contains("byte_start"));
|
|
1033
|
+
}
|
|
1034
|
+
|
|
1035
|
+
#[test]
|
|
1036
|
+
fn test_chunk_text_with_unsorted_boundaries() {
|
|
1037
|
+
use crate::types::PageBoundary;
|
|
1038
|
+
|
|
1039
|
+
let config = ChunkingConfig {
|
|
1040
|
+
max_characters: 30,
|
|
1041
|
+
overlap: 5,
|
|
1042
|
+
trim: true,
|
|
1043
|
+
chunker_type: ChunkerType::Text,
|
|
1044
|
+
};
|
|
1045
|
+
let text = "Page one content here. Page two content.";
|
|
1046
|
+
|
|
1047
|
+
let boundaries = vec![
|
|
1048
|
+
PageBoundary {
|
|
1049
|
+
byte_start: 22,
|
|
1050
|
+
byte_end: 40,
|
|
1051
|
+
page_number: 2,
|
|
1052
|
+
},
|
|
1053
|
+
PageBoundary {
|
|
1054
|
+
byte_start: 0,
|
|
1055
|
+
byte_end: 21,
|
|
1056
|
+
page_number: 1,
|
|
1057
|
+
},
|
|
1058
|
+
];
|
|
1059
|
+
|
|
1060
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1061
|
+
assert!(result.is_err());
|
|
1062
|
+
let err = result.unwrap_err();
|
|
1063
|
+
assert!(err.to_string().contains("not sorted"));
|
|
1064
|
+
assert!(err.to_string().contains("boundaries"));
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
#[test]
|
|
1068
|
+
fn test_chunk_text_with_overlapping_boundaries() {
|
|
1069
|
+
use crate::types::PageBoundary;
|
|
1070
|
+
|
|
1071
|
+
let config = ChunkingConfig {
|
|
1072
|
+
max_characters: 30,
|
|
1073
|
+
overlap: 5,
|
|
1074
|
+
trim: true,
|
|
1075
|
+
chunker_type: ChunkerType::Text,
|
|
1076
|
+
};
|
|
1077
|
+
let text = "Page one content here. Page two content.";
|
|
1078
|
+
|
|
1079
|
+
let boundaries = vec![
|
|
1080
|
+
PageBoundary {
|
|
1081
|
+
byte_start: 0,
|
|
1082
|
+
byte_end: 25,
|
|
1083
|
+
page_number: 1,
|
|
1084
|
+
},
|
|
1085
|
+
PageBoundary {
|
|
1086
|
+
byte_start: 20,
|
|
1087
|
+
byte_end: 40,
|
|
1088
|
+
page_number: 2,
|
|
1089
|
+
},
|
|
1090
|
+
];
|
|
1091
|
+
|
|
1092
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1093
|
+
assert!(result.is_err());
|
|
1094
|
+
let err = result.unwrap_err();
|
|
1095
|
+
assert!(err.to_string().contains("Overlapping"));
|
|
1096
|
+
assert!(err.to_string().contains("boundaries"));
|
|
1097
|
+
}
|
|
1098
|
+
|
|
1099
|
+
#[test]
|
|
1100
|
+
fn test_calculate_page_range_with_invalid_boundaries() {
|
|
1101
|
+
use crate::types::PageBoundary;
|
|
1102
|
+
|
|
1103
|
+
let boundaries = vec![PageBoundary {
|
|
1104
|
+
byte_start: 15,
|
|
1105
|
+
byte_end: 10,
|
|
1106
|
+
page_number: 1,
|
|
1107
|
+
}];
|
|
1108
|
+
|
|
1109
|
+
let result = calculate_page_range(0, 20, &boundaries);
|
|
1110
|
+
assert!(result.is_err());
|
|
1111
|
+
let err = result.unwrap_err();
|
|
1112
|
+
assert!(err.to_string().contains("Invalid boundary range"));
|
|
1113
|
+
}
|
|
1114
|
+
|
|
1115
|
+
#[test]
|
|
1116
|
+
fn test_validate_page_boundaries_valid() {
|
|
1117
|
+
use crate::types::PageBoundary;
|
|
1118
|
+
|
|
1119
|
+
let boundaries = vec![
|
|
1120
|
+
PageBoundary {
|
|
1121
|
+
byte_start: 0,
|
|
1122
|
+
byte_end: 20,
|
|
1123
|
+
page_number: 1,
|
|
1124
|
+
},
|
|
1125
|
+
PageBoundary {
|
|
1126
|
+
byte_start: 20,
|
|
1127
|
+
byte_end: 40,
|
|
1128
|
+
page_number: 2,
|
|
1129
|
+
},
|
|
1130
|
+
PageBoundary {
|
|
1131
|
+
byte_start: 40,
|
|
1132
|
+
byte_end: 60,
|
|
1133
|
+
page_number: 3,
|
|
1134
|
+
},
|
|
1135
|
+
];
|
|
1136
|
+
|
|
1137
|
+
let result = chunk_text(
|
|
1138
|
+
"x".repeat(60).as_str(),
|
|
1139
|
+
&ChunkingConfig {
|
|
1140
|
+
max_characters: 30,
|
|
1141
|
+
overlap: 5,
|
|
1142
|
+
trim: false,
|
|
1143
|
+
chunker_type: ChunkerType::Text,
|
|
1144
|
+
},
|
|
1145
|
+
Some(&boundaries),
|
|
1146
|
+
);
|
|
1147
|
+
assert!(result.is_ok());
|
|
1148
|
+
}
|
|
1149
|
+
|
|
1150
|
+
#[test]
|
|
1151
|
+
fn test_validate_page_boundaries_empty() {
|
|
1152
|
+
let boundaries: Vec<PageBoundary> = vec![];
|
|
1153
|
+
let result = chunk_text(
|
|
1154
|
+
"Some test text",
|
|
1155
|
+
&ChunkingConfig {
|
|
1156
|
+
max_characters: 30,
|
|
1157
|
+
overlap: 5,
|
|
1158
|
+
trim: true,
|
|
1159
|
+
chunker_type: ChunkerType::Text,
|
|
1160
|
+
},
|
|
1161
|
+
Some(&boundaries),
|
|
1162
|
+
);
|
|
1163
|
+
assert!(result.is_ok());
|
|
1164
|
+
}
|
|
1165
|
+
|
|
1166
|
+
#[test]
|
|
1167
|
+
fn test_page_boundaries_with_gaps() {
|
|
1168
|
+
use crate::types::PageBoundary;
|
|
1169
|
+
|
|
1170
|
+
let boundaries = vec![
|
|
1171
|
+
PageBoundary {
|
|
1172
|
+
byte_start: 0,
|
|
1173
|
+
byte_end: 10,
|
|
1174
|
+
page_number: 1,
|
|
1175
|
+
},
|
|
1176
|
+
PageBoundary {
|
|
1177
|
+
byte_start: 15,
|
|
1178
|
+
byte_end: 25,
|
|
1179
|
+
page_number: 2,
|
|
1180
|
+
},
|
|
1181
|
+
];
|
|
1182
|
+
|
|
1183
|
+
let text = "0123456789XXXXX0123456789";
|
|
1184
|
+
let result = chunk_text(
|
|
1185
|
+
text,
|
|
1186
|
+
&ChunkingConfig {
|
|
1187
|
+
max_characters: 30,
|
|
1188
|
+
overlap: 5,
|
|
1189
|
+
trim: false,
|
|
1190
|
+
chunker_type: ChunkerType::Text,
|
|
1191
|
+
},
|
|
1192
|
+
Some(&boundaries),
|
|
1193
|
+
);
|
|
1194
|
+
assert!(result.is_ok());
|
|
1195
|
+
}
|
|
1196
|
+
|
|
1197
|
+
#[test]
|
|
1198
|
+
fn test_chunk_with_same_start_and_end() {
|
|
1199
|
+
use crate::types::PageBoundary;
|
|
1200
|
+
|
|
1201
|
+
let boundaries = vec![PageBoundary {
|
|
1202
|
+
byte_start: 10,
|
|
1203
|
+
byte_end: 10,
|
|
1204
|
+
page_number: 1,
|
|
1205
|
+
}];
|
|
1206
|
+
|
|
1207
|
+
let result = chunk_text(
|
|
1208
|
+
"test content here",
|
|
1209
|
+
&ChunkingConfig {
|
|
1210
|
+
max_characters: 30,
|
|
1211
|
+
overlap: 5,
|
|
1212
|
+
trim: true,
|
|
1213
|
+
chunker_type: ChunkerType::Text,
|
|
1214
|
+
},
|
|
1215
|
+
Some(&boundaries),
|
|
1216
|
+
);
|
|
1217
|
+
assert!(result.is_err());
|
|
1218
|
+
let err = result.unwrap_err();
|
|
1219
|
+
assert!(err.to_string().contains("Invalid boundary range"));
|
|
1220
|
+
}
|
|
1221
|
+
|
|
1222
|
+
#[test]
|
|
1223
|
+
fn test_multiple_overlapping_errors() {
|
|
1224
|
+
use crate::types::PageBoundary;
|
|
1225
|
+
|
|
1226
|
+
let text = "This is a longer test content string that spans more bytes";
|
|
1227
|
+
let boundaries = vec![
|
|
1228
|
+
PageBoundary {
|
|
1229
|
+
byte_start: 20,
|
|
1230
|
+
byte_end: 40,
|
|
1231
|
+
page_number: 2,
|
|
1232
|
+
},
|
|
1233
|
+
PageBoundary {
|
|
1234
|
+
byte_start: 10,
|
|
1235
|
+
byte_end: 35,
|
|
1236
|
+
page_number: 1,
|
|
1237
|
+
},
|
|
1238
|
+
];
|
|
1239
|
+
|
|
1240
|
+
let result = chunk_text(
|
|
1241
|
+
text,
|
|
1242
|
+
&ChunkingConfig {
|
|
1243
|
+
max_characters: 30,
|
|
1244
|
+
overlap: 5,
|
|
1245
|
+
trim: true,
|
|
1246
|
+
chunker_type: ChunkerType::Text,
|
|
1247
|
+
},
|
|
1248
|
+
Some(&boundaries),
|
|
1249
|
+
);
|
|
1250
|
+
assert!(result.is_err());
|
|
1251
|
+
assert!(result.unwrap_err().to_string().contains("not sorted"));
|
|
1252
|
+
}
|
|
1253
|
+
|
|
1254
|
+
#[test]
|
|
1255
|
+
fn test_chunk_with_pages_basic() {
|
|
1256
|
+
use crate::types::PageBoundary;
|
|
1257
|
+
|
|
1258
|
+
let config = ChunkingConfig {
|
|
1259
|
+
max_characters: 25,
|
|
1260
|
+
overlap: 5,
|
|
1261
|
+
trim: true,
|
|
1262
|
+
chunker_type: ChunkerType::Text,
|
|
1263
|
+
};
|
|
1264
|
+
let text = "First page content here.Second page content here.Third page.";
|
|
1265
|
+
|
|
1266
|
+
let boundaries = vec![
|
|
1267
|
+
PageBoundary {
|
|
1268
|
+
byte_start: 0,
|
|
1269
|
+
byte_end: 24,
|
|
1270
|
+
page_number: 1,
|
|
1271
|
+
},
|
|
1272
|
+
PageBoundary {
|
|
1273
|
+
byte_start: 24,
|
|
1274
|
+
byte_end: 50,
|
|
1275
|
+
page_number: 2,
|
|
1276
|
+
},
|
|
1277
|
+
PageBoundary {
|
|
1278
|
+
byte_start: 50,
|
|
1279
|
+
byte_end: 60,
|
|
1280
|
+
page_number: 3,
|
|
1281
|
+
},
|
|
1282
|
+
];
|
|
1283
|
+
|
|
1284
|
+
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1285
|
+
|
|
1286
|
+
if !result.chunks.is_empty() {
|
|
1287
|
+
assert!(result.chunks[0].metadata.first_page.is_some());
|
|
1288
|
+
}
|
|
1289
|
+
}
|
|
1290
|
+
|
|
1291
|
+
#[test]
|
|
1292
|
+
fn test_chunk_with_pages_single_page_chunk() {
|
|
1293
|
+
use crate::types::PageBoundary;
|
|
1294
|
+
|
|
1295
|
+
let config = ChunkingConfig {
|
|
1296
|
+
max_characters: 100,
|
|
1297
|
+
overlap: 10,
|
|
1298
|
+
trim: true,
|
|
1299
|
+
chunker_type: ChunkerType::Text,
|
|
1300
|
+
};
|
|
1301
|
+
let text = "All content on single page fits in one chunk.";
|
|
1302
|
+
|
|
1303
|
+
let boundaries = vec![PageBoundary {
|
|
1304
|
+
byte_start: 0,
|
|
1305
|
+
byte_end: 45,
|
|
1306
|
+
page_number: 1,
|
|
1307
|
+
}];
|
|
1308
|
+
|
|
1309
|
+
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1310
|
+
assert_eq!(result.chunks.len(), 1);
|
|
1311
|
+
assert_eq!(result.chunks[0].metadata.first_page, Some(1));
|
|
1312
|
+
assert_eq!(result.chunks[0].metadata.last_page, Some(1));
|
|
1313
|
+
}
|
|
1314
|
+
|
|
1315
|
+
#[test]
|
|
1316
|
+
fn test_chunk_with_pages_no_overlap() {
|
|
1317
|
+
use crate::types::PageBoundary;
|
|
1318
|
+
|
|
1319
|
+
let config = ChunkingConfig {
|
|
1320
|
+
max_characters: 20,
|
|
1321
|
+
overlap: 0,
|
|
1322
|
+
trim: false,
|
|
1323
|
+
chunker_type: ChunkerType::Text,
|
|
1324
|
+
};
|
|
1325
|
+
let text = "AAAAA BBBBB CCCCC DDDDD";
|
|
1326
|
+
|
|
1327
|
+
let boundaries = vec![
|
|
1328
|
+
PageBoundary {
|
|
1329
|
+
byte_start: 0,
|
|
1330
|
+
byte_end: 11,
|
|
1331
|
+
page_number: 1,
|
|
1332
|
+
},
|
|
1333
|
+
PageBoundary {
|
|
1334
|
+
byte_start: 11,
|
|
1335
|
+
byte_end: 23,
|
|
1336
|
+
page_number: 2,
|
|
1337
|
+
},
|
|
1338
|
+
];
|
|
1339
|
+
|
|
1340
|
+
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1341
|
+
assert!(!result.chunks.is_empty());
|
|
1342
|
+
|
|
1343
|
+
for chunk in &result.chunks {
|
|
1344
|
+
if let (Some(first), Some(last)) = (chunk.metadata.first_page, chunk.metadata.last_page) {
|
|
1345
|
+
assert!(first <= last);
|
|
1346
|
+
}
|
|
1347
|
+
}
|
|
1348
|
+
}
|
|
1349
|
+
|
|
1350
|
+
#[test]
|
|
1351
|
+
fn test_calculate_page_range_within_page() {
|
|
1352
|
+
let boundaries = vec![
|
|
1353
|
+
PageBoundary {
|
|
1354
|
+
byte_start: 0,
|
|
1355
|
+
byte_end: 100,
|
|
1356
|
+
page_number: 1,
|
|
1357
|
+
},
|
|
1358
|
+
PageBoundary {
|
|
1359
|
+
byte_start: 100,
|
|
1360
|
+
byte_end: 200,
|
|
1361
|
+
page_number: 2,
|
|
1362
|
+
},
|
|
1363
|
+
];
|
|
1364
|
+
|
|
1365
|
+
let (first, last) = calculate_page_range(10, 50, &boundaries).unwrap();
|
|
1366
|
+
assert_eq!(first, Some(1));
|
|
1367
|
+
assert_eq!(last, Some(1));
|
|
1368
|
+
}
|
|
1369
|
+
|
|
1370
|
+
#[test]
|
|
1371
|
+
fn test_calculate_page_range_spanning_pages() {
|
|
1372
|
+
let boundaries = vec![
|
|
1373
|
+
PageBoundary {
|
|
1374
|
+
byte_start: 0,
|
|
1375
|
+
byte_end: 100,
|
|
1376
|
+
page_number: 1,
|
|
1377
|
+
},
|
|
1378
|
+
PageBoundary {
|
|
1379
|
+
byte_start: 100,
|
|
1380
|
+
byte_end: 200,
|
|
1381
|
+
page_number: 2,
|
|
1382
|
+
},
|
|
1383
|
+
];
|
|
1384
|
+
|
|
1385
|
+
let (first, last) = calculate_page_range(50, 150, &boundaries).unwrap();
|
|
1386
|
+
assert_eq!(first, Some(1));
|
|
1387
|
+
assert_eq!(last, Some(2));
|
|
1388
|
+
}
|
|
1389
|
+
|
|
1390
|
+
#[test]
|
|
1391
|
+
fn test_calculate_page_range_empty_boundaries() {
|
|
1392
|
+
let boundaries: Vec<PageBoundary> = vec![];
|
|
1393
|
+
|
|
1394
|
+
let (first, last) = calculate_page_range(0, 50, &boundaries).unwrap();
|
|
1395
|
+
assert_eq!(first, None);
|
|
1396
|
+
assert_eq!(last, None);
|
|
1397
|
+
}
|
|
1398
|
+
|
|
1399
|
+
#[test]
|
|
1400
|
+
fn test_calculate_page_range_no_overlap() {
|
|
1401
|
+
let boundaries = vec![
|
|
1402
|
+
PageBoundary {
|
|
1403
|
+
byte_start: 0,
|
|
1404
|
+
byte_end: 100,
|
|
1405
|
+
page_number: 1,
|
|
1406
|
+
},
|
|
1407
|
+
PageBoundary {
|
|
1408
|
+
byte_start: 100,
|
|
1409
|
+
byte_end: 200,
|
|
1410
|
+
page_number: 2,
|
|
1411
|
+
},
|
|
1412
|
+
];
|
|
1413
|
+
|
|
1414
|
+
let (first, last) = calculate_page_range(200, 250, &boundaries).unwrap();
|
|
1415
|
+
assert_eq!(first, None);
|
|
1416
|
+
assert_eq!(last, None);
|
|
1417
|
+
}
|
|
1418
|
+
|
|
1419
|
+
#[test]
|
|
1420
|
+
fn test_calculate_page_range_three_pages() {
|
|
1421
|
+
let boundaries = vec![
|
|
1422
|
+
PageBoundary {
|
|
1423
|
+
byte_start: 0,
|
|
1424
|
+
byte_end: 100,
|
|
1425
|
+
page_number: 1,
|
|
1426
|
+
},
|
|
1427
|
+
PageBoundary {
|
|
1428
|
+
byte_start: 100,
|
|
1429
|
+
byte_end: 200,
|
|
1430
|
+
page_number: 2,
|
|
1431
|
+
},
|
|
1432
|
+
PageBoundary {
|
|
1433
|
+
byte_start: 200,
|
|
1434
|
+
byte_end: 300,
|
|
1435
|
+
page_number: 3,
|
|
1436
|
+
},
|
|
1437
|
+
];
|
|
1438
|
+
|
|
1439
|
+
let (first, last) = calculate_page_range(50, 250, &boundaries).unwrap();
|
|
1440
|
+
assert_eq!(first, Some(1));
|
|
1441
|
+
assert_eq!(last, Some(3));
|
|
1442
|
+
}
|
|
1443
|
+
|
|
1444
|
+
#[test]
|
|
1445
|
+
fn test_chunk_metadata_page_range_accuracy() {
|
|
1446
|
+
use crate::types::PageBoundary;
|
|
1447
|
+
|
|
1448
|
+
let config = ChunkingConfig {
|
|
1449
|
+
max_characters: 30,
|
|
1450
|
+
overlap: 5,
|
|
1451
|
+
trim: true,
|
|
1452
|
+
chunker_type: ChunkerType::Text,
|
|
1453
|
+
};
|
|
1454
|
+
let text = "Page One Content Here.Page Two.";
|
|
1455
|
+
|
|
1456
|
+
let boundaries = vec![
|
|
1457
|
+
PageBoundary {
|
|
1458
|
+
byte_start: 0,
|
|
1459
|
+
byte_end: 21,
|
|
1460
|
+
page_number: 1,
|
|
1461
|
+
},
|
|
1462
|
+
PageBoundary {
|
|
1463
|
+
byte_start: 21,
|
|
1464
|
+
byte_end: 31,
|
|
1465
|
+
page_number: 2,
|
|
1466
|
+
},
|
|
1467
|
+
];
|
|
1468
|
+
|
|
1469
|
+
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1470
|
+
|
|
1471
|
+
for chunk in &result.chunks {
|
|
1472
|
+
assert_eq!(chunk.metadata.byte_end - chunk.metadata.byte_start, chunk.content.len());
|
|
1473
|
+
}
|
|
1474
|
+
}
|
|
1475
|
+
|
|
1476
|
+
#[test]
|
|
1477
|
+
fn test_chunk_page_range_boundary_edge_cases() {
|
|
1478
|
+
use crate::types::PageBoundary;
|
|
1479
|
+
|
|
1480
|
+
let config = ChunkingConfig {
|
|
1481
|
+
max_characters: 10,
|
|
1482
|
+
overlap: 2,
|
|
1483
|
+
trim: false,
|
|
1484
|
+
chunker_type: ChunkerType::Text,
|
|
1485
|
+
};
|
|
1486
|
+
let text = "0123456789ABCDEFGHIJ";
|
|
1487
|
+
|
|
1488
|
+
let boundaries = vec![
|
|
1489
|
+
PageBoundary {
|
|
1490
|
+
byte_start: 0,
|
|
1491
|
+
byte_end: 10,
|
|
1492
|
+
page_number: 1,
|
|
1493
|
+
},
|
|
1494
|
+
PageBoundary {
|
|
1495
|
+
byte_start: 10,
|
|
1496
|
+
byte_end: 20,
|
|
1497
|
+
page_number: 2,
|
|
1498
|
+
},
|
|
1499
|
+
];
|
|
1500
|
+
|
|
1501
|
+
let result = chunk_text(text, &config, Some(&boundaries)).unwrap();
|
|
1502
|
+
|
|
1503
|
+
for chunk in &result.chunks {
|
|
1504
|
+
let on_page1 = chunk.metadata.byte_start < 10;
|
|
1505
|
+
let on_page2 = chunk.metadata.byte_end > 10;
|
|
1506
|
+
|
|
1507
|
+
if on_page1 && on_page2 {
|
|
1508
|
+
assert_eq!(chunk.metadata.first_page, Some(1));
|
|
1509
|
+
assert_eq!(chunk.metadata.last_page, Some(2));
|
|
1510
|
+
} else if on_page1 {
|
|
1511
|
+
assert_eq!(chunk.metadata.first_page, Some(1));
|
|
1512
|
+
} else if on_page2 {
|
|
1513
|
+
assert_eq!(chunk.metadata.first_page, Some(2));
|
|
1514
|
+
}
|
|
1515
|
+
}
|
|
1516
|
+
}
|
|
1517
|
+
|
|
1518
|
+
#[test]
|
|
1519
|
+
fn test_validate_utf8_boundaries_valid_ascii() {
|
|
1520
|
+
use crate::types::PageBoundary;
|
|
1521
|
+
|
|
1522
|
+
let text = "This is ASCII text.";
|
|
1523
|
+
let boundaries = vec![
|
|
1524
|
+
PageBoundary {
|
|
1525
|
+
byte_start: 0,
|
|
1526
|
+
byte_end: 10,
|
|
1527
|
+
page_number: 1,
|
|
1528
|
+
},
|
|
1529
|
+
PageBoundary {
|
|
1530
|
+
byte_start: 10,
|
|
1531
|
+
byte_end: 19,
|
|
1532
|
+
page_number: 2,
|
|
1533
|
+
},
|
|
1534
|
+
];
|
|
1535
|
+
|
|
1536
|
+
let result = chunk_text(text, &ChunkingConfig::default(), Some(&boundaries));
|
|
1537
|
+
assert!(result.is_ok());
|
|
1538
|
+
}
|
|
1539
|
+
|
|
1540
|
+
#[test]
|
|
1541
|
+
fn test_validate_utf8_boundaries_valid_emoji() {
|
|
1542
|
+
use crate::types::PageBoundary;
|
|
1543
|
+
|
|
1544
|
+
let text = "Hello 👋 World 🌍 End";
|
|
1545
|
+
let config = ChunkingConfig::default();
|
|
1546
|
+
|
|
1547
|
+
let boundaries = vec![
|
|
1548
|
+
PageBoundary {
|
|
1549
|
+
byte_start: 0,
|
|
1550
|
+
byte_end: 11,
|
|
1551
|
+
page_number: 1,
|
|
1552
|
+
},
|
|
1553
|
+
PageBoundary {
|
|
1554
|
+
byte_start: 11,
|
|
1555
|
+
byte_end: 25,
|
|
1556
|
+
page_number: 2,
|
|
1557
|
+
},
|
|
1558
|
+
];
|
|
1559
|
+
|
|
1560
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1561
|
+
assert!(result.is_ok());
|
|
1562
|
+
}
|
|
1563
|
+
|
|
1564
|
+
#[test]
|
|
1565
|
+
fn test_validate_utf8_boundaries_valid_cjk() {
|
|
1566
|
+
use crate::types::PageBoundary;
|
|
1567
|
+
|
|
1568
|
+
let text = "你好世界 こんにちは 안녕하세요";
|
|
1569
|
+
let config = ChunkingConfig::default();
|
|
1570
|
+
|
|
1571
|
+
let boundaries = vec![
|
|
1572
|
+
PageBoundary {
|
|
1573
|
+
byte_start: 0,
|
|
1574
|
+
byte_end: 13,
|
|
1575
|
+
page_number: 1,
|
|
1576
|
+
},
|
|
1577
|
+
PageBoundary {
|
|
1578
|
+
byte_start: 13,
|
|
1579
|
+
byte_end: 44,
|
|
1580
|
+
page_number: 2,
|
|
1581
|
+
},
|
|
1582
|
+
];
|
|
1583
|
+
|
|
1584
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1585
|
+
assert!(result.is_ok());
|
|
1586
|
+
}
|
|
1587
|
+
|
|
1588
|
+
#[test]
|
|
1589
|
+
fn test_validate_utf8_boundaries_invalid_mid_emoji() {
|
|
1590
|
+
use crate::types::PageBoundary;
|
|
1591
|
+
|
|
1592
|
+
let text = "Hello 👋 World";
|
|
1593
|
+
let boundaries = vec![PageBoundary {
|
|
1594
|
+
byte_start: 0,
|
|
1595
|
+
byte_end: 7,
|
|
1596
|
+
page_number: 1,
|
|
1597
|
+
}];
|
|
1598
|
+
|
|
1599
|
+
let config = ChunkingConfig::default();
|
|
1600
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1601
|
+
assert!(result.is_err());
|
|
1602
|
+
let err = result.unwrap_err();
|
|
1603
|
+
assert!(err.to_string().contains("UTF-8 character boundary"));
|
|
1604
|
+
assert!(err.to_string().contains("byte_end=7"));
|
|
1605
|
+
}
|
|
1606
|
+
|
|
1607
|
+
#[test]
|
|
1608
|
+
fn test_validate_utf8_boundaries_invalid_mid_multibyte_cjk() {
|
|
1609
|
+
use crate::types::PageBoundary;
|
|
1610
|
+
|
|
1611
|
+
let text = "中文文本";
|
|
1612
|
+
let boundaries = vec![PageBoundary {
|
|
1613
|
+
byte_start: 0,
|
|
1614
|
+
byte_end: 1,
|
|
1615
|
+
page_number: 1,
|
|
1616
|
+
}];
|
|
1617
|
+
|
|
1618
|
+
let config = ChunkingConfig::default();
|
|
1619
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1620
|
+
assert!(result.is_err());
|
|
1621
|
+
let err = result.unwrap_err();
|
|
1622
|
+
assert!(err.to_string().contains("UTF-8 character boundary"));
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
#[test]
|
|
1626
|
+
fn test_validate_utf8_boundaries_byte_start_exceeds_length() {
|
|
1627
|
+
use crate::types::PageBoundary;
|
|
1628
|
+
|
|
1629
|
+
let text = "Short";
|
|
1630
|
+
let boundaries = vec![
|
|
1631
|
+
PageBoundary {
|
|
1632
|
+
byte_start: 0,
|
|
1633
|
+
byte_end: 3,
|
|
1634
|
+
page_number: 1,
|
|
1635
|
+
},
|
|
1636
|
+
PageBoundary {
|
|
1637
|
+
byte_start: 10,
|
|
1638
|
+
byte_end: 15,
|
|
1639
|
+
page_number: 2,
|
|
1640
|
+
},
|
|
1641
|
+
];
|
|
1642
|
+
|
|
1643
|
+
let config = ChunkingConfig::default();
|
|
1644
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1645
|
+
assert!(result.is_err());
|
|
1646
|
+
let err = result.unwrap_err();
|
|
1647
|
+
assert!(err.to_string().contains("exceeds text length"));
|
|
1648
|
+
}
|
|
1649
|
+
|
|
1650
|
+
#[test]
|
|
1651
|
+
fn test_validate_utf8_boundaries_byte_end_exceeds_length() {
|
|
1652
|
+
use crate::types::PageBoundary;
|
|
1653
|
+
|
|
1654
|
+
let text = "Short";
|
|
1655
|
+
let boundaries = vec![PageBoundary {
|
|
1656
|
+
byte_start: 0,
|
|
1657
|
+
byte_end: 100,
|
|
1658
|
+
page_number: 1,
|
|
1659
|
+
}];
|
|
1660
|
+
|
|
1661
|
+
let config = ChunkingConfig::default();
|
|
1662
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1663
|
+
assert!(result.is_err());
|
|
1664
|
+
let err = result.unwrap_err();
|
|
1665
|
+
assert!(err.to_string().contains("exceeds text length"));
|
|
1666
|
+
}
|
|
1667
|
+
|
|
1668
|
+
#[test]
|
|
1669
|
+
fn test_validate_utf8_boundaries_empty_boundaries() {
|
|
1670
|
+
use crate::types::PageBoundary;
|
|
1671
|
+
|
|
1672
|
+
let text = "Some text";
|
|
1673
|
+
let boundaries: Vec<PageBoundary> = vec![];
|
|
1674
|
+
|
|
1675
|
+
let config = ChunkingConfig::default();
|
|
1676
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1677
|
+
assert!(result.is_ok());
|
|
1678
|
+
}
|
|
1679
|
+
|
|
1680
|
+
#[test]
|
|
1681
|
+
fn test_validate_utf8_boundaries_at_text_boundaries() {
|
|
1682
|
+
use crate::types::PageBoundary;
|
|
1683
|
+
|
|
1684
|
+
let text = "Exact boundary test";
|
|
1685
|
+
let text_len = text.len();
|
|
1686
|
+
let boundaries = vec![PageBoundary {
|
|
1687
|
+
byte_start: 0,
|
|
1688
|
+
byte_end: text_len,
|
|
1689
|
+
page_number: 1,
|
|
1690
|
+
}];
|
|
1691
|
+
|
|
1692
|
+
let config = ChunkingConfig::default();
|
|
1693
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1694
|
+
assert!(result.is_ok());
|
|
1695
|
+
}
|
|
1696
|
+
|
|
1697
|
+
#[test]
|
|
1698
|
+
fn test_validate_utf8_boundaries_mixed_languages() {
|
|
1699
|
+
use crate::types::PageBoundary;
|
|
1700
|
+
|
|
1701
|
+
let text = "English text mixed with 中文 and français";
|
|
1702
|
+
let config = ChunkingConfig::default();
|
|
1703
|
+
|
|
1704
|
+
let boundaries = vec![
|
|
1705
|
+
PageBoundary {
|
|
1706
|
+
byte_start: 0,
|
|
1707
|
+
byte_end: 24,
|
|
1708
|
+
page_number: 1,
|
|
1709
|
+
},
|
|
1710
|
+
PageBoundary {
|
|
1711
|
+
byte_start: 24,
|
|
1712
|
+
byte_end: text.len(),
|
|
1713
|
+
page_number: 2,
|
|
1714
|
+
},
|
|
1715
|
+
];
|
|
1716
|
+
|
|
1717
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1718
|
+
assert!(result.is_ok());
|
|
1719
|
+
}
|
|
1720
|
+
|
|
1721
|
+
#[test]
|
|
1722
|
+
fn test_chunk_text_rejects_invalid_utf8_boundaries() {
|
|
1723
|
+
use crate::types::PageBoundary;
|
|
1724
|
+
|
|
1725
|
+
let text = "🌍🌎🌏 Three emoji planets";
|
|
1726
|
+
let config = ChunkingConfig::default();
|
|
1727
|
+
|
|
1728
|
+
let boundaries = vec![PageBoundary {
|
|
1729
|
+
byte_start: 0,
|
|
1730
|
+
byte_end: 1000,
|
|
1731
|
+
page_number: 1,
|
|
1732
|
+
}];
|
|
1733
|
+
|
|
1734
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1735
|
+
assert!(result.is_err());
|
|
1736
|
+
}
|
|
1737
|
+
|
|
1738
|
+
#[test]
|
|
1739
|
+
fn test_validate_utf8_boundaries_combining_diacriticals() {
|
|
1740
|
+
use crate::types::PageBoundary;
|
|
1741
|
+
|
|
1742
|
+
let text = "café";
|
|
1743
|
+
let config = ChunkingConfig::default();
|
|
1744
|
+
|
|
1745
|
+
let boundaries = vec![
|
|
1746
|
+
PageBoundary {
|
|
1747
|
+
byte_start: 0,
|
|
1748
|
+
byte_end: 2,
|
|
1749
|
+
page_number: 1,
|
|
1750
|
+
},
|
|
1751
|
+
PageBoundary {
|
|
1752
|
+
byte_start: 2,
|
|
1753
|
+
byte_end: text.len(),
|
|
1754
|
+
page_number: 2,
|
|
1755
|
+
},
|
|
1756
|
+
];
|
|
1757
|
+
|
|
1758
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1759
|
+
assert!(result.is_ok());
|
|
1760
|
+
}
|
|
1761
|
+
|
|
1762
|
+
#[test]
|
|
1763
|
+
fn test_validate_utf8_boundaries_error_messages_are_clear() {
|
|
1764
|
+
use crate::types::PageBoundary;
|
|
1765
|
+
|
|
1766
|
+
let text = "Test 👋 text";
|
|
1767
|
+
let config = ChunkingConfig::default();
|
|
1768
|
+
|
|
1769
|
+
let boundaries = vec![PageBoundary {
|
|
1770
|
+
byte_start: 0,
|
|
1771
|
+
byte_end: 6,
|
|
1772
|
+
page_number: 1,
|
|
1773
|
+
}];
|
|
1774
|
+
|
|
1775
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1776
|
+
assert!(result.is_err());
|
|
1777
|
+
let err = result.unwrap_err();
|
|
1778
|
+
let err_msg = err.to_string();
|
|
1779
|
+
assert!(err_msg.contains("UTF-8"));
|
|
1780
|
+
assert!(err_msg.contains("boundary"));
|
|
1781
|
+
assert!(err_msg.contains("6"));
|
|
1782
|
+
}
|
|
1783
|
+
|
|
1784
|
+
#[test]
|
|
1785
|
+
fn test_validate_utf8_boundaries_multiple_valid_boundaries() {
|
|
1786
|
+
use crate::types::PageBoundary;
|
|
1787
|
+
|
|
1788
|
+
let text = "First👋Second🌍Third";
|
|
1789
|
+
let config = ChunkingConfig::default();
|
|
1790
|
+
|
|
1791
|
+
let boundaries = vec![
|
|
1792
|
+
PageBoundary {
|
|
1793
|
+
byte_start: 0,
|
|
1794
|
+
byte_end: 5,
|
|
1795
|
+
page_number: 1,
|
|
1796
|
+
},
|
|
1797
|
+
PageBoundary {
|
|
1798
|
+
byte_start: 5,
|
|
1799
|
+
byte_end: 9,
|
|
1800
|
+
page_number: 2,
|
|
1801
|
+
},
|
|
1802
|
+
PageBoundary {
|
|
1803
|
+
byte_start: 9,
|
|
1804
|
+
byte_end: 15,
|
|
1805
|
+
page_number: 3,
|
|
1806
|
+
},
|
|
1807
|
+
PageBoundary {
|
|
1808
|
+
byte_start: 15,
|
|
1809
|
+
byte_end: 19,
|
|
1810
|
+
page_number: 4,
|
|
1811
|
+
},
|
|
1812
|
+
PageBoundary {
|
|
1813
|
+
byte_start: 19,
|
|
1814
|
+
byte_end: text.len(),
|
|
1815
|
+
page_number: 5,
|
|
1816
|
+
},
|
|
1817
|
+
];
|
|
1818
|
+
|
|
1819
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1820
|
+
assert!(result.is_ok());
|
|
1821
|
+
}
|
|
1822
|
+
|
|
1823
|
+
#[test]
|
|
1824
|
+
fn test_validate_utf8_boundaries_zero_start_and_end() {
|
|
1825
|
+
use crate::types::PageBoundary;
|
|
1826
|
+
|
|
1827
|
+
let text = "Text";
|
|
1828
|
+
let config = ChunkingConfig::default();
|
|
1829
|
+
|
|
1830
|
+
let boundaries = vec![PageBoundary {
|
|
1831
|
+
byte_start: 0,
|
|
1832
|
+
byte_end: 0,
|
|
1833
|
+
page_number: 1,
|
|
1834
|
+
}];
|
|
1835
|
+
|
|
1836
|
+
let result = chunk_text(text, &config, Some(&boundaries));
|
|
1837
|
+
assert!(result.is_err());
|
|
1838
|
+
}
|
|
1839
|
+
}
|
|
1840
|
+
|
|
1841
|
+
/// Lazy-initialized flag that ensures chunking processor is registered exactly once.
|
|
1842
|
+
///
|
|
1843
|
+
/// This static is accessed on first use to automatically register the
|
|
1844
|
+
/// chunking processor with the plugin registry.
|
|
1845
|
+
static PROCESSOR_INITIALIZED: Lazy<Result<()>> = Lazy::new(register_chunking_processor);
|
|
1846
|
+
|
|
1847
|
+
/// Ensure the chunking processor is registered.
|
|
1848
|
+
///
|
|
1849
|
+
/// This function is called automatically when needed.
|
|
1850
|
+
/// It's safe to call multiple times - registration only happens once.
|
|
1851
|
+
pub fn ensure_initialized() -> Result<()> {
|
|
1852
|
+
PROCESSOR_INITIALIZED
|
|
1853
|
+
.as_ref()
|
|
1854
|
+
.map(|_| ())
|
|
1855
|
+
.map_err(|e| crate::KreuzbergError::Plugin {
|
|
1856
|
+
message: format!("Failed to register chunking processor: {}", e),
|
|
1857
|
+
plugin_name: "text-chunking".to_string(),
|
|
1858
|
+
})
|
|
1859
|
+
}
|
|
1860
|
+
|
|
1861
|
+
/// Register the chunking processor with the global registry.
|
|
1862
|
+
///
|
|
1863
|
+
/// This function should be called once at application startup to register
|
|
1864
|
+
/// the chunking post-processor.
|
|
1865
|
+
///
|
|
1866
|
+
/// **Note:** This is called automatically on first use.
|
|
1867
|
+
/// Explicit calling is optional.
|
|
1868
|
+
pub fn register_chunking_processor() -> Result<()> {
|
|
1869
|
+
let registry = crate::plugins::registry::get_post_processor_registry();
|
|
1870
|
+
let mut registry = registry
|
|
1871
|
+
.write()
|
|
1872
|
+
.map_err(|e| crate::KreuzbergError::Other(format!("Post-processor registry lock poisoned: {}", e)))?;
|
|
1873
|
+
|
|
1874
|
+
registry.register(Arc::new(ChunkingProcessor), 50)?;
|
|
1875
|
+
|
|
1876
|
+
Ok(())
|
|
1877
|
+
}
|