kreuzberg 4.0.0.rc1 → 4.0.0.rc2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -8
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -534
- data/.rubocop.yml +538 -0
- data/Gemfile +8 -9
- data/Gemfile.lock +9 -109
- data/README.md +426 -421
- data/Rakefile +25 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -340
- data/ext/kreuzberg_rb/extconf.rb +45 -35
- data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
- data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -17
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +148 -105
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +46 -45
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +691 -684
- data/lib/kreuzberg/error_context.rb +32 -0
- data/lib/kreuzberg/errors.rb +118 -50
- data/lib/kreuzberg/extraction_api.rb +85 -84
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +216 -216
- data/lib/kreuzberg/setup_lib_path.rb +80 -79
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +103 -82
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +520 -468
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -87
- data/spec/binding/cli_spec.rb +55 -54
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -42
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/kreuzberg/Cargo.toml +204 -134
- data/vendor/kreuzberg/README.md +175 -175
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
- data/vendor/kreuzberg/build.rs +474 -460
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
- data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
- data/vendor/kreuzberg/src/core/config.rs +1032 -1032
- data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
- data/vendor/kreuzberg/src/core/io.rs +329 -327
- data/vendor/kreuzberg/src/core/mime.rs +605 -615
- data/vendor/kreuzberg/src/core/mod.rs +45 -42
- data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
- data/vendor/kreuzberg/src/embeddings.rs +432 -323
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +553 -553
- data/vendor/kreuzberg/src/extraction/image.rs +368 -368
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
- data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
- data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
- data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
- data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
- data/vendor/kreuzberg/src/extractors/email.rs +143 -129
- data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
- data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
- data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
- data/vendor/kreuzberg/src/extractors/html.rs +393 -410
- data/vendor/kreuzberg/src/extractors/image.rs +198 -195
- data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
- data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
- data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
- data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
- data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
- data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
- data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
- data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
- data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
- data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
- data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
- data/vendor/kreuzberg/src/extractors/security.rs +484 -0
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
- data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
- data/vendor/kreuzberg/src/extractors/text.rs +260 -242
- data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
- data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
- data/vendor/kreuzberg/src/lib.rs +105 -102
- data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
- data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -0
- data/vendor/kreuzberg/src/pdf/error.rs +122 -122
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
- data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
- data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
- data/vendor/kreuzberg/src/pdf/table.rs +393 -420
- data/vendor/kreuzberg/src/pdf/text.rs +158 -161
- data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
- data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
- data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
- data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
- data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +19 -19
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +903 -873
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
- data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
- data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
- data/vendor/kreuzberg/tests/config_features.rs +598 -580
- data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
- data/vendor/kreuzberg/tests/core_integration.rs +510 -493
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
- data/vendor/kreuzberg/tests/email_integration.rs +325 -325
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
- data/vendor/kreuzberg/tests/error_handling.rs +393 -393
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
- data/vendor/kreuzberg/tests/format_integration.rs +159 -159
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
- data/vendor/kreuzberg/tests/image_integration.rs +253 -253
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
- data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
- data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
- data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
- data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
- data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
- data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
- data/vendor/kreuzberg/tests/security_validation.rs +415 -404
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/rb-sys/.cargo-ok +1 -0
- data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
- data/vendor/rb-sys/Cargo.lock +393 -0
- data/vendor/rb-sys/Cargo.toml +70 -0
- data/vendor/rb-sys/Cargo.toml.orig +57 -0
- data/vendor/rb-sys/LICENSE-APACHE +190 -0
- data/vendor/rb-sys/LICENSE-MIT +21 -0
- data/vendor/rb-sys/bin/release.sh +21 -0
- data/vendor/rb-sys/build/features.rs +108 -0
- data/vendor/rb-sys/build/main.rs +246 -0
- data/vendor/rb-sys/build/stable_api_config.rs +153 -0
- data/vendor/rb-sys/build/version.rs +48 -0
- data/vendor/rb-sys/readme.md +36 -0
- data/vendor/rb-sys/src/bindings.rs +21 -0
- data/vendor/rb-sys/src/hidden.rs +11 -0
- data/vendor/rb-sys/src/lib.rs +34 -0
- data/vendor/rb-sys/src/macros.rs +371 -0
- data/vendor/rb-sys/src/memory.rs +53 -0
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
- data/vendor/rb-sys/src/special_consts.rs +31 -0
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
- data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
- data/vendor/rb-sys/src/stable_api.rs +261 -0
- data/vendor/rb-sys/src/symbol.rs +31 -0
- data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
- data/vendor/rb-sys/src/utils.rs +89 -0
- data/vendor/rb-sys/src/value_type.rs +7 -0
- metadata +90 -95
- data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
- data/spec/examples.txt +0 -104
- data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
- data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
- data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
- data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
- data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
- data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
- data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
- data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
- data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
- data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
|
@@ -1,677 +1,677 @@
|
|
|
1
|
-
//! Text chunking utilities.
|
|
2
|
-
//!
|
|
3
|
-
//! This module provides text chunking functionality using the `text-splitter` library.
|
|
4
|
-
//! It splits long text into smaller chunks while preserving semantic boundaries.
|
|
5
|
-
//!
|
|
6
|
-
//! # Features
|
|
7
|
-
//!
|
|
8
|
-
//! - **Smart splitting**: Respects word and sentence boundaries
|
|
9
|
-
//! - **Markdown-aware**: Preserves Markdown structure (headings, code blocks, lists)
|
|
10
|
-
//! - **Configurable overlap**: Overlap chunks to maintain context
|
|
11
|
-
//! - **Unicode support**: Handles CJK characters and emojis correctly
|
|
12
|
-
//! - **Batch processing**: Process multiple texts efficiently
|
|
13
|
-
//!
|
|
14
|
-
//! # Chunker Types
|
|
15
|
-
//!
|
|
16
|
-
//! - **Text**: Generic text splitter, splits on whitespace and punctuation
|
|
17
|
-
//! - **Markdown**: Markdown-aware splitter, preserves formatting and structure
|
|
18
|
-
//!
|
|
19
|
-
//! # Example
|
|
20
|
-
//!
|
|
21
|
-
//! ```rust
|
|
22
|
-
//! use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
|
|
23
|
-
//!
|
|
24
|
-
//! # fn example() -> kreuzberg::Result<()> {
|
|
25
|
-
//! let config = ChunkingConfig {
|
|
26
|
-
//! max_characters: 500,
|
|
27
|
-
//! overlap: 50,
|
|
28
|
-
//! trim: true,
|
|
29
|
-
//! chunker_type: ChunkerType::Text,
|
|
30
|
-
//! };
|
|
31
|
-
//!
|
|
32
|
-
//! let long_text = "This is a very long document...".repeat(100);
|
|
33
|
-
//! let result = chunk_text(&long_text, &config)?;
|
|
34
|
-
//!
|
|
35
|
-
//! println!("Split into {} chunks", result.chunk_count);
|
|
36
|
-
//! for (i, chunk) in result.chunks.iter().enumerate() {
|
|
37
|
-
//! println!("Chunk {}: {} chars", i + 1, chunk.content.len());
|
|
38
|
-
//! }
|
|
39
|
-
//! # Ok(())
|
|
40
|
-
//! # }
|
|
41
|
-
//! ```
|
|
42
|
-
//!
|
|
43
|
-
//! # Use Cases
|
|
44
|
-
//!
|
|
45
|
-
//! - Splitting documents for LLM context windows
|
|
46
|
-
//! - Creating overlapping chunks for semantic search
|
|
47
|
-
//! - Processing large documents in batches
|
|
48
|
-
//! - Maintaining context across chunk boundaries
|
|
49
|
-
use crate::error::{KreuzbergError, Result};
|
|
50
|
-
use crate::types::{Chunk, ChunkMetadata};
|
|
51
|
-
use serde::{Deserialize, Serialize};
|
|
52
|
-
use text_splitter::{Characters, ChunkCapacity, ChunkConfig, MarkdownSplitter, TextSplitter};
|
|
53
|
-
|
|
54
|
-
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
|
55
|
-
pub enum ChunkerType {
|
|
56
|
-
Text,
|
|
57
|
-
Markdown,
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
61
|
-
pub struct ChunkingResult {
|
|
62
|
-
pub chunks: Vec<Chunk>,
|
|
63
|
-
pub chunk_count: usize,
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
pub struct ChunkingConfig {
|
|
67
|
-
pub max_characters: usize,
|
|
68
|
-
pub overlap: usize,
|
|
69
|
-
pub trim: bool,
|
|
70
|
-
pub chunker_type: ChunkerType,
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
impl Default for ChunkingConfig {
|
|
74
|
-
fn default() -> Self {
|
|
75
|
-
Self {
|
|
76
|
-
max_characters: 2000,
|
|
77
|
-
overlap: 100,
|
|
78
|
-
trim: true,
|
|
79
|
-
chunker_type: ChunkerType::Text,
|
|
80
|
-
}
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
fn build_chunk_config(max_characters: usize, overlap: usize, trim: bool) -> Result<ChunkConfig<Characters>> {
|
|
85
|
-
ChunkConfig::new(ChunkCapacity::new(max_characters))
|
|
86
|
-
.with_overlap(overlap)
|
|
87
|
-
.map(|config| config.with_trim(trim))
|
|
88
|
-
.map_err(|e| KreuzbergError::validation(format!("Invalid chunking configuration: {}", e)))
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
pub fn chunk_text(text: &str, config: &ChunkingConfig) -> Result<ChunkingResult> {
|
|
92
|
-
if text.is_empty() {
|
|
93
|
-
return Ok(ChunkingResult {
|
|
94
|
-
chunks: vec![],
|
|
95
|
-
chunk_count: 0,
|
|
96
|
-
});
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
let chunk_config = build_chunk_config(config.max_characters, config.overlap, config.trim)?;
|
|
100
|
-
|
|
101
|
-
let text_chunks: Vec<&str> = match config.chunker_type {
|
|
102
|
-
ChunkerType::Text => {
|
|
103
|
-
let splitter = TextSplitter::new(chunk_config);
|
|
104
|
-
splitter.chunks(text).collect()
|
|
105
|
-
}
|
|
106
|
-
ChunkerType::Markdown => {
|
|
107
|
-
let splitter = MarkdownSplitter::new(chunk_config);
|
|
108
|
-
splitter.chunks(text).collect()
|
|
109
|
-
}
|
|
110
|
-
};
|
|
111
|
-
|
|
112
|
-
let total_chunks = text_chunks.len();
|
|
113
|
-
let mut char_offset = 0;
|
|
114
|
-
|
|
115
|
-
let chunks: Vec<Chunk> = text_chunks
|
|
116
|
-
.into_iter()
|
|
117
|
-
.enumerate()
|
|
118
|
-
.map(|(index, chunk_text)| {
|
|
119
|
-
let char_start = char_offset;
|
|
120
|
-
let chunk_length = chunk_text.chars().count();
|
|
121
|
-
let char_end = char_start + chunk_length;
|
|
122
|
-
|
|
123
|
-
let overlap_chars = if index < total_chunks - 1 {
|
|
124
|
-
config.overlap.min(chunk_length)
|
|
125
|
-
} else {
|
|
126
|
-
0
|
|
127
|
-
};
|
|
128
|
-
char_offset = char_end - overlap_chars;
|
|
129
|
-
|
|
130
|
-
Chunk {
|
|
131
|
-
content: chunk_text.to_string(),
|
|
132
|
-
embedding: None,
|
|
133
|
-
metadata: ChunkMetadata {
|
|
134
|
-
char_start,
|
|
135
|
-
char_end,
|
|
136
|
-
token_count: None,
|
|
137
|
-
chunk_index: index,
|
|
138
|
-
total_chunks,
|
|
139
|
-
},
|
|
140
|
-
}
|
|
141
|
-
})
|
|
142
|
-
.collect();
|
|
143
|
-
|
|
144
|
-
let chunk_count = chunks.len();
|
|
145
|
-
|
|
146
|
-
Ok(ChunkingResult { chunks, chunk_count })
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
pub fn chunk_text_with_type(
|
|
150
|
-
text: &str,
|
|
151
|
-
max_characters: usize,
|
|
152
|
-
overlap: usize,
|
|
153
|
-
trim: bool,
|
|
154
|
-
chunker_type: ChunkerType,
|
|
155
|
-
) -> Result<ChunkingResult> {
|
|
156
|
-
let config = ChunkingConfig {
|
|
157
|
-
max_characters,
|
|
158
|
-
overlap,
|
|
159
|
-
trim,
|
|
160
|
-
chunker_type,
|
|
161
|
-
};
|
|
162
|
-
chunk_text(text, &config)
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
pub fn chunk_texts_batch(texts: &[&str], config: &ChunkingConfig) -> Result<Vec<ChunkingResult>> {
|
|
166
|
-
texts.iter().map(|text| chunk_text(text, config)).collect()
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
#[cfg(test)]
|
|
170
|
-
mod tests {
|
|
171
|
-
use super::*;
|
|
172
|
-
|
|
173
|
-
#[test]
|
|
174
|
-
fn test_chunk_empty_text() {
|
|
175
|
-
let config = ChunkingConfig::default();
|
|
176
|
-
let result = chunk_text("", &config).unwrap();
|
|
177
|
-
assert_eq!(result.chunks.len(), 0);
|
|
178
|
-
assert_eq!(result.chunk_count, 0);
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
#[test]
|
|
182
|
-
fn test_chunk_short_text_single_chunk() {
|
|
183
|
-
let config = ChunkingConfig {
|
|
184
|
-
max_characters: 100,
|
|
185
|
-
overlap: 10,
|
|
186
|
-
trim: true,
|
|
187
|
-
chunker_type: ChunkerType::Text,
|
|
188
|
-
};
|
|
189
|
-
let text = "This is a short text.";
|
|
190
|
-
let result = chunk_text(text, &config).unwrap();
|
|
191
|
-
assert_eq!(result.chunks.len(), 1);
|
|
192
|
-
assert_eq!(result.chunk_count, 1);
|
|
193
|
-
assert_eq!(result.chunks[0].content, text);
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
#[test]
|
|
197
|
-
fn test_chunk_long_text_multiple_chunks() {
|
|
198
|
-
let config = ChunkingConfig {
|
|
199
|
-
max_characters: 20,
|
|
200
|
-
overlap: 5,
|
|
201
|
-
trim: true,
|
|
202
|
-
chunker_type: ChunkerType::Text,
|
|
203
|
-
};
|
|
204
|
-
let text = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
|
205
|
-
let result = chunk_text(text, &config).unwrap();
|
|
206
|
-
assert!(result.chunk_count >= 2);
|
|
207
|
-
assert_eq!(result.chunks.len(), result.chunk_count);
|
|
208
|
-
assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 20));
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
#[test]
|
|
212
|
-
fn test_chunk_text_with_overlap() {
|
|
213
|
-
let config = ChunkingConfig {
|
|
214
|
-
max_characters: 20,
|
|
215
|
-
overlap: 5,
|
|
216
|
-
trim: true,
|
|
217
|
-
chunker_type: ChunkerType::Text,
|
|
218
|
-
};
|
|
219
|
-
let text = "abcdefghijklmnopqrstuvwxyz0123456789";
|
|
220
|
-
let result = chunk_text(text, &config).unwrap();
|
|
221
|
-
assert!(result.chunk_count >= 2);
|
|
222
|
-
|
|
223
|
-
if result.chunks.len() >= 2 {
|
|
224
|
-
let first_chunk_end = &result.chunks[0].content[result.chunks[0].content.len().saturating_sub(5)..];
|
|
225
|
-
assert!(
|
|
226
|
-
result.chunks[1].content.starts_with(first_chunk_end),
|
|
227
|
-
"Expected overlap '{}' at start of second chunk '{}'",
|
|
228
|
-
first_chunk_end,
|
|
229
|
-
result.chunks[1].content
|
|
230
|
-
);
|
|
231
|
-
}
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
#[test]
|
|
235
|
-
fn test_chunk_markdown_preserves_structure() {
|
|
236
|
-
let config = ChunkingConfig {
|
|
237
|
-
max_characters: 50,
|
|
238
|
-
overlap: 10,
|
|
239
|
-
trim: true,
|
|
240
|
-
chunker_type: ChunkerType::Markdown,
|
|
241
|
-
};
|
|
242
|
-
let markdown = "# Title\n\nParagraph one.\n\n## Section\n\nParagraph two.";
|
|
243
|
-
let result = chunk_text(markdown, &config).unwrap();
|
|
244
|
-
assert!(result.chunk_count >= 1);
|
|
245
|
-
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("# Title")));
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
#[test]
|
|
249
|
-
fn test_chunk_markdown_with_code_blocks() {
|
|
250
|
-
let config = ChunkingConfig {
|
|
251
|
-
max_characters: 100,
|
|
252
|
-
overlap: 10,
|
|
253
|
-
trim: true,
|
|
254
|
-
chunker_type: ChunkerType::Markdown,
|
|
255
|
-
};
|
|
256
|
-
let markdown = "# Code Example\n\n```python\nprint('hello')\n```\n\nSome text after code.";
|
|
257
|
-
let result = chunk_text(markdown, &config).unwrap();
|
|
258
|
-
assert!(result.chunk_count >= 1);
|
|
259
|
-
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("```")));
|
|
260
|
-
}
|
|
261
|
-
|
|
262
|
-
#[test]
|
|
263
|
-
fn test_chunk_markdown_with_links() {
|
|
264
|
-
let config = ChunkingConfig {
|
|
265
|
-
max_characters: 80,
|
|
266
|
-
overlap: 10,
|
|
267
|
-
trim: true,
|
|
268
|
-
chunker_type: ChunkerType::Markdown,
|
|
269
|
-
};
|
|
270
|
-
let markdown = "Check out [this link](https://example.com) for more info.";
|
|
271
|
-
let result = chunk_text(markdown, &config).unwrap();
|
|
272
|
-
assert_eq!(result.chunk_count, 1);
|
|
273
|
-
assert!(result.chunks[0].content.contains("[this link]"));
|
|
274
|
-
}
|
|
275
|
-
|
|
276
|
-
#[test]
|
|
277
|
-
fn test_chunk_text_with_trim() {
|
|
278
|
-
let config = ChunkingConfig {
|
|
279
|
-
max_characters: 30,
|
|
280
|
-
overlap: 5,
|
|
281
|
-
trim: true,
|
|
282
|
-
chunker_type: ChunkerType::Text,
|
|
283
|
-
};
|
|
284
|
-
let text = " Leading and trailing spaces should be trimmed ";
|
|
285
|
-
let result = chunk_text(text, &config).unwrap();
|
|
286
|
-
assert!(result.chunk_count >= 1);
|
|
287
|
-
assert!(result.chunks.iter().all(|chunk| !chunk.content.starts_with(' ')));
|
|
288
|
-
}
|
|
289
|
-
|
|
290
|
-
#[test]
|
|
291
|
-
fn test_chunk_text_without_trim() {
|
|
292
|
-
let config = ChunkingConfig {
|
|
293
|
-
max_characters: 30,
|
|
294
|
-
overlap: 5,
|
|
295
|
-
trim: false,
|
|
296
|
-
chunker_type: ChunkerType::Text,
|
|
297
|
-
};
|
|
298
|
-
let text = " Text with spaces ";
|
|
299
|
-
let result = chunk_text(text, &config).unwrap();
|
|
300
|
-
assert_eq!(result.chunk_count, 1);
|
|
301
|
-
assert!(result.chunks[0].content.starts_with(' ') || result.chunks[0].content.len() < text.len());
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
#[test]
|
|
305
|
-
fn test_chunk_with_invalid_overlap() {
|
|
306
|
-
let config = ChunkingConfig {
|
|
307
|
-
max_characters: 10,
|
|
308
|
-
overlap: 20,
|
|
309
|
-
trim: true,
|
|
310
|
-
chunker_type: ChunkerType::Text,
|
|
311
|
-
};
|
|
312
|
-
let result = chunk_text("Some text", &config);
|
|
313
|
-
assert!(result.is_err());
|
|
314
|
-
let err = result.unwrap_err();
|
|
315
|
-
assert!(matches!(err, KreuzbergError::Validation { .. }));
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
#[test]
|
|
319
|
-
fn test_chunk_text_with_type_text() {
|
|
320
|
-
let result = chunk_text_with_type("Simple text", 50, 10, true, ChunkerType::Text).unwrap();
|
|
321
|
-
assert_eq!(result.chunk_count, 1);
|
|
322
|
-
assert_eq!(result.chunks[0].content, "Simple text");
|
|
323
|
-
}
|
|
324
|
-
|
|
325
|
-
#[test]
|
|
326
|
-
fn test_chunk_text_with_type_markdown() {
|
|
327
|
-
let markdown = "# Header\n\nContent here.";
|
|
328
|
-
let result = chunk_text_with_type(markdown, 50, 10, true, ChunkerType::Markdown).unwrap();
|
|
329
|
-
assert_eq!(result.chunk_count, 1);
|
|
330
|
-
assert!(result.chunks[0].content.contains("# Header"));
|
|
331
|
-
}
|
|
332
|
-
|
|
333
|
-
#[test]
|
|
334
|
-
fn test_chunk_texts_batch_empty() {
|
|
335
|
-
let config = ChunkingConfig::default();
|
|
336
|
-
let texts: Vec<&str> = vec![];
|
|
337
|
-
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
338
|
-
assert_eq!(results.len(), 0);
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
#[test]
|
|
342
|
-
fn test_chunk_texts_batch_multiple() {
|
|
343
|
-
let config = ChunkingConfig {
|
|
344
|
-
max_characters: 30,
|
|
345
|
-
overlap: 5,
|
|
346
|
-
trim: true,
|
|
347
|
-
chunker_type: ChunkerType::Text,
|
|
348
|
-
};
|
|
349
|
-
let texts = vec!["First text", "Second text", "Third text"];
|
|
350
|
-
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
351
|
-
assert_eq!(results.len(), 3);
|
|
352
|
-
assert!(results.iter().all(|r| r.chunk_count >= 1));
|
|
353
|
-
}
|
|
354
|
-
|
|
355
|
-
#[test]
|
|
356
|
-
fn test_chunk_texts_batch_mixed_lengths() {
|
|
357
|
-
let config = ChunkingConfig {
|
|
358
|
-
max_characters: 20,
|
|
359
|
-
overlap: 5,
|
|
360
|
-
trim: true,
|
|
361
|
-
chunker_type: ChunkerType::Text,
|
|
362
|
-
};
|
|
363
|
-
let texts = vec![
|
|
364
|
-
"Short",
|
|
365
|
-
"This is a longer text that should be split into multiple chunks",
|
|
366
|
-
"",
|
|
367
|
-
];
|
|
368
|
-
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
369
|
-
assert_eq!(results.len(), 3);
|
|
370
|
-
assert_eq!(results[0].chunk_count, 1);
|
|
371
|
-
assert!(results[1].chunk_count > 1);
|
|
372
|
-
assert_eq!(results[2].chunk_count, 0);
|
|
373
|
-
}
|
|
374
|
-
|
|
375
|
-
#[test]
|
|
376
|
-
fn test_chunk_texts_batch_error_propagation() {
|
|
377
|
-
let config = ChunkingConfig {
|
|
378
|
-
max_characters: 10,
|
|
379
|
-
overlap: 20,
|
|
380
|
-
trim: true,
|
|
381
|
-
chunker_type: ChunkerType::Text,
|
|
382
|
-
};
|
|
383
|
-
let texts = vec!["Text one", "Text two"];
|
|
384
|
-
let result = chunk_texts_batch(&texts, &config);
|
|
385
|
-
assert!(result.is_err());
|
|
386
|
-
}
|
|
387
|
-
|
|
388
|
-
#[test]
|
|
389
|
-
fn test_chunking_config_default() {
|
|
390
|
-
let config = ChunkingConfig::default();
|
|
391
|
-
assert_eq!(config.max_characters, 2000);
|
|
392
|
-
assert_eq!(config.overlap, 100);
|
|
393
|
-
assert!(config.trim);
|
|
394
|
-
assert_eq!(config.chunker_type, ChunkerType::Text);
|
|
395
|
-
}
|
|
396
|
-
|
|
397
|
-
#[test]
|
|
398
|
-
fn test_chunk_very_long_text() {
|
|
399
|
-
let config = ChunkingConfig {
|
|
400
|
-
max_characters: 100,
|
|
401
|
-
overlap: 20,
|
|
402
|
-
trim: true,
|
|
403
|
-
chunker_type: ChunkerType::Text,
|
|
404
|
-
};
|
|
405
|
-
let text = "a".repeat(1000);
|
|
406
|
-
let result = chunk_text(&text, &config).unwrap();
|
|
407
|
-
assert!(result.chunk_count >= 10);
|
|
408
|
-
assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 100));
|
|
409
|
-
}
|
|
410
|
-
|
|
411
|
-
#[test]
|
|
412
|
-
fn test_chunk_text_with_newlines() {
|
|
413
|
-
let config = ChunkingConfig {
|
|
414
|
-
max_characters: 30,
|
|
415
|
-
overlap: 5,
|
|
416
|
-
trim: true,
|
|
417
|
-
chunker_type: ChunkerType::Text,
|
|
418
|
-
};
|
|
419
|
-
let text = "Line one\nLine two\nLine three\nLine four\nLine five";
|
|
420
|
-
let result = chunk_text(text, &config).unwrap();
|
|
421
|
-
assert!(result.chunk_count >= 1);
|
|
422
|
-
}
|
|
423
|
-
|
|
424
|
-
#[test]
|
|
425
|
-
fn test_chunk_markdown_with_lists() {
|
|
426
|
-
let config = ChunkingConfig {
|
|
427
|
-
max_characters: 100,
|
|
428
|
-
overlap: 10,
|
|
429
|
-
trim: true,
|
|
430
|
-
chunker_type: ChunkerType::Markdown,
|
|
431
|
-
};
|
|
432
|
-
let markdown = "# List Example\n\n- Item 1\n- Item 2\n- Item 3\n\nMore text.";
|
|
433
|
-
let result = chunk_text(markdown, &config).unwrap();
|
|
434
|
-
assert!(result.chunk_count >= 1);
|
|
435
|
-
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("- Item")));
|
|
436
|
-
}
|
|
437
|
-
|
|
438
|
-
#[test]
|
|
439
|
-
fn test_chunk_markdown_with_tables() {
|
|
440
|
-
let config = ChunkingConfig {
|
|
441
|
-
max_characters: 150,
|
|
442
|
-
overlap: 10,
|
|
443
|
-
trim: true,
|
|
444
|
-
chunker_type: ChunkerType::Markdown,
|
|
445
|
-
};
|
|
446
|
-
let markdown = "# Table\n\n| Col1 | Col2 |\n|------|------|\n| A | B |\n| C | D |";
|
|
447
|
-
let result = chunk_text(markdown, &config).unwrap();
|
|
448
|
-
assert!(result.chunk_count >= 1);
|
|
449
|
-
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("|")));
|
|
450
|
-
}
|
|
451
|
-
|
|
452
|
-
#[test]
|
|
453
|
-
fn test_chunk_special_characters() {
|
|
454
|
-
let config = ChunkingConfig {
|
|
455
|
-
max_characters: 50,
|
|
456
|
-
overlap: 5,
|
|
457
|
-
trim: true,
|
|
458
|
-
chunker_type: ChunkerType::Text,
|
|
459
|
-
};
|
|
460
|
-
let text = "Special chars: @#$%^&*()[]{}|\\<>?/~`";
|
|
461
|
-
let result = chunk_text(text, &config).unwrap();
|
|
462
|
-
assert_eq!(result.chunk_count, 1);
|
|
463
|
-
assert!(result.chunks[0].content.contains("@#$%"));
|
|
464
|
-
}
|
|
465
|
-
|
|
466
|
-
#[test]
|
|
467
|
-
fn test_chunk_unicode_characters() {
|
|
468
|
-
let config = ChunkingConfig {
|
|
469
|
-
max_characters: 50,
|
|
470
|
-
overlap: 5,
|
|
471
|
-
trim: true,
|
|
472
|
-
chunker_type: ChunkerType::Text,
|
|
473
|
-
};
|
|
474
|
-
let text = "Unicode: 你好世界 🌍 café résumé";
|
|
475
|
-
let result = chunk_text(text, &config).unwrap();
|
|
476
|
-
assert_eq!(result.chunk_count, 1);
|
|
477
|
-
assert!(result.chunks[0].content.contains("你好"));
|
|
478
|
-
assert!(result.chunks[0].content.contains("🌍"));
|
|
479
|
-
}
|
|
480
|
-
|
|
481
|
-
#[test]
|
|
482
|
-
fn test_chunk_cjk_text() {
|
|
483
|
-
let config = ChunkingConfig {
|
|
484
|
-
max_characters: 30,
|
|
485
|
-
overlap: 5,
|
|
486
|
-
trim: true,
|
|
487
|
-
chunker_type: ChunkerType::Text,
|
|
488
|
-
};
|
|
489
|
-
let text = "日本語のテキストです。これは長い文章で、複数のチャンクに分割されるべきです。";
|
|
490
|
-
let result = chunk_text(text, &config).unwrap();
|
|
491
|
-
assert!(result.chunk_count >= 1);
|
|
492
|
-
}
|
|
493
|
-
|
|
494
|
-
#[test]
|
|
495
|
-
fn test_chunk_mixed_languages() {
|
|
496
|
-
let config = ChunkingConfig {
|
|
497
|
-
max_characters: 40,
|
|
498
|
-
overlap: 5,
|
|
499
|
-
trim: true,
|
|
500
|
-
chunker_type: ChunkerType::Text,
|
|
501
|
-
};
|
|
502
|
-
let text = "English text mixed with 中文文本 and some français";
|
|
503
|
-
let result = chunk_text(text, &config).unwrap();
|
|
504
|
-
assert!(result.chunk_count >= 1);
|
|
505
|
-
}
|
|
506
|
-
|
|
507
|
-
#[test]
|
|
508
|
-
fn test_chunk_offset_calculation_with_overlap() {
|
|
509
|
-
let config = ChunkingConfig {
|
|
510
|
-
max_characters: 20,
|
|
511
|
-
overlap: 5,
|
|
512
|
-
trim: false,
|
|
513
|
-
chunker_type: ChunkerType::Text,
|
|
514
|
-
};
|
|
515
|
-
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
516
|
-
let result = chunk_text(text, &config).unwrap();
|
|
517
|
-
|
|
518
|
-
assert!(result.chunks.len() >= 2, "Expected at least 2 chunks");
|
|
519
|
-
|
|
520
|
-
for i in 0..result.chunks.len() {
|
|
521
|
-
let chunk = &result.chunks[i];
|
|
522
|
-
let metadata = &chunk.metadata;
|
|
523
|
-
|
|
524
|
-
assert_eq!(
|
|
525
|
-
metadata.char_end - metadata.char_start,
|
|
526
|
-
chunk.content.chars().count(),
|
|
527
|
-
"Chunk {} offset range doesn't match content length",
|
|
528
|
-
i
|
|
529
|
-
);
|
|
530
|
-
|
|
531
|
-
assert_eq!(metadata.chunk_index, i);
|
|
532
|
-
assert_eq!(metadata.total_chunks, result.chunks.len());
|
|
533
|
-
}
|
|
534
|
-
|
|
535
|
-
for i in 0..result.chunks.len() - 1 {
|
|
536
|
-
let current_chunk = &result.chunks[i];
|
|
537
|
-
let next_chunk = &result.chunks[i + 1];
|
|
538
|
-
|
|
539
|
-
assert!(
|
|
540
|
-
next_chunk.metadata.char_start < current_chunk.metadata.char_end,
|
|
541
|
-
"Chunk {} and {} don't overlap: next starts at {} but current ends at {}",
|
|
542
|
-
i,
|
|
543
|
-
i + 1,
|
|
544
|
-
next_chunk.metadata.char_start,
|
|
545
|
-
current_chunk.metadata.char_end
|
|
546
|
-
);
|
|
547
|
-
|
|
548
|
-
let overlap_size = current_chunk.metadata.char_end - next_chunk.metadata.char_start;
|
|
549
|
-
assert!(
|
|
550
|
-
overlap_size <= config.overlap + 10,
|
|
551
|
-
"Overlap between chunks {} and {} is too large: {}",
|
|
552
|
-
i,
|
|
553
|
-
i + 1,
|
|
554
|
-
overlap_size
|
|
555
|
-
);
|
|
556
|
-
}
|
|
557
|
-
}
|
|
558
|
-
|
|
559
|
-
#[test]
|
|
560
|
-
fn test_chunk_offset_calculation_without_overlap() {
|
|
561
|
-
let config = ChunkingConfig {
|
|
562
|
-
max_characters: 20,
|
|
563
|
-
overlap: 0,
|
|
564
|
-
trim: false,
|
|
565
|
-
chunker_type: ChunkerType::Text,
|
|
566
|
-
};
|
|
567
|
-
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
568
|
-
let result = chunk_text(text, &config).unwrap();
|
|
569
|
-
|
|
570
|
-
for i in 0..result.chunks.len() - 1 {
|
|
571
|
-
let current_chunk = &result.chunks[i];
|
|
572
|
-
let next_chunk = &result.chunks[i + 1];
|
|
573
|
-
|
|
574
|
-
assert!(
|
|
575
|
-
next_chunk.metadata.char_start >= current_chunk.metadata.char_end,
|
|
576
|
-
"Chunk {} and {} overlap when they shouldn't: next starts at {} but current ends at {}",
|
|
577
|
-
i,
|
|
578
|
-
i + 1,
|
|
579
|
-
next_chunk.metadata.char_start,
|
|
580
|
-
current_chunk.metadata.char_end
|
|
581
|
-
);
|
|
582
|
-
}
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
#[test]
|
|
586
|
-
fn test_chunk_offset_covers_full_text() {
|
|
587
|
-
let config = ChunkingConfig {
|
|
588
|
-
max_characters: 15,
|
|
589
|
-
overlap: 3,
|
|
590
|
-
trim: false,
|
|
591
|
-
chunker_type: ChunkerType::Text,
|
|
592
|
-
};
|
|
593
|
-
let text = "0123456789 ABCDEFGHIJ KLMNOPQRST UVWXYZ";
|
|
594
|
-
let result = chunk_text(text, &config).unwrap();
|
|
595
|
-
|
|
596
|
-
assert!(result.chunks.len() >= 2, "Expected multiple chunks");
|
|
597
|
-
|
|
598
|
-
assert_eq!(
|
|
599
|
-
result.chunks[0].metadata.char_start, 0,
|
|
600
|
-
"First chunk should start at position 0"
|
|
601
|
-
);
|
|
602
|
-
|
|
603
|
-
for i in 0..result.chunks.len() - 1 {
|
|
604
|
-
let current_chunk = &result.chunks[i];
|
|
605
|
-
let next_chunk = &result.chunks[i + 1];
|
|
606
|
-
|
|
607
|
-
assert!(
|
|
608
|
-
next_chunk.metadata.char_start <= current_chunk.metadata.char_end,
|
|
609
|
-
"Gap detected between chunk {} (ends at {}) and chunk {} (starts at {})",
|
|
610
|
-
i,
|
|
611
|
-
current_chunk.metadata.char_end,
|
|
612
|
-
i + 1,
|
|
613
|
-
next_chunk.metadata.char_start
|
|
614
|
-
);
|
|
615
|
-
}
|
|
616
|
-
}
|
|
617
|
-
|
|
618
|
-
#[test]
|
|
619
|
-
fn test_chunk_offset_with_various_overlap_sizes() {
|
|
620
|
-
for overlap in [0, 5, 10, 20] {
|
|
621
|
-
let config = ChunkingConfig {
|
|
622
|
-
max_characters: 30,
|
|
623
|
-
overlap,
|
|
624
|
-
trim: false,
|
|
625
|
-
chunker_type: ChunkerType::Text,
|
|
626
|
-
};
|
|
627
|
-
let text = "Word ".repeat(30);
|
|
628
|
-
let result = chunk_text(&text, &config).unwrap();
|
|
629
|
-
|
|
630
|
-
for chunk in &result.chunks {
|
|
631
|
-
assert!(
|
|
632
|
-
chunk.metadata.char_end > chunk.metadata.char_start,
|
|
633
|
-
"Invalid offset range for overlap {}: start={}, end={}",
|
|
634
|
-
overlap,
|
|
635
|
-
chunk.metadata.char_start,
|
|
636
|
-
chunk.metadata.char_end
|
|
637
|
-
);
|
|
638
|
-
}
|
|
639
|
-
|
|
640
|
-
for chunk in &result.chunks {
|
|
641
|
-
assert!(
|
|
642
|
-
chunk.metadata.char_start < text.chars().count(),
|
|
643
|
-
"char_start with overlap {} is out of bounds: {}",
|
|
644
|
-
overlap,
|
|
645
|
-
chunk.metadata.char_start
|
|
646
|
-
);
|
|
647
|
-
}
|
|
648
|
-
}
|
|
649
|
-
}
|
|
650
|
-
|
|
651
|
-
#[test]
|
|
652
|
-
fn test_chunk_last_chunk_offset() {
|
|
653
|
-
let config = ChunkingConfig {
|
|
654
|
-
max_characters: 20,
|
|
655
|
-
overlap: 5,
|
|
656
|
-
trim: false,
|
|
657
|
-
chunker_type: ChunkerType::Text,
|
|
658
|
-
};
|
|
659
|
-
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE";
|
|
660
|
-
let result = chunk_text(text, &config).unwrap();
|
|
661
|
-
|
|
662
|
-
assert!(result.chunks.len() >= 2, "Need multiple chunks for this test");
|
|
663
|
-
|
|
664
|
-
let last_chunk = result.chunks.last().unwrap();
|
|
665
|
-
let second_to_last = &result.chunks[result.chunks.len() - 2];
|
|
666
|
-
|
|
667
|
-
assert!(
|
|
668
|
-
last_chunk.metadata.char_start < second_to_last.metadata.char_end,
|
|
669
|
-
"Last chunk should overlap with previous chunk"
|
|
670
|
-
);
|
|
671
|
-
|
|
672
|
-
let expected_end = text.chars().count();
|
|
673
|
-
let last_chunk_covers_end =
|
|
674
|
-
last_chunk.content.trim_end() == text.trim_end() || last_chunk.metadata.char_end >= expected_end - 5;
|
|
675
|
-
assert!(last_chunk_covers_end, "Last chunk should cover the end of the text");
|
|
676
|
-
}
|
|
677
|
-
}
|
|
1
|
+
//! Text chunking utilities.
|
|
2
|
+
//!
|
|
3
|
+
//! This module provides text chunking functionality using the `text-splitter` library.
|
|
4
|
+
//! It splits long text into smaller chunks while preserving semantic boundaries.
|
|
5
|
+
//!
|
|
6
|
+
//! # Features
|
|
7
|
+
//!
|
|
8
|
+
//! - **Smart splitting**: Respects word and sentence boundaries
|
|
9
|
+
//! - **Markdown-aware**: Preserves Markdown structure (headings, code blocks, lists)
|
|
10
|
+
//! - **Configurable overlap**: Overlap chunks to maintain context
|
|
11
|
+
//! - **Unicode support**: Handles CJK characters and emojis correctly
|
|
12
|
+
//! - **Batch processing**: Process multiple texts efficiently
|
|
13
|
+
//!
|
|
14
|
+
//! # Chunker Types
|
|
15
|
+
//!
|
|
16
|
+
//! - **Text**: Generic text splitter, splits on whitespace and punctuation
|
|
17
|
+
//! - **Markdown**: Markdown-aware splitter, preserves formatting and structure
|
|
18
|
+
//!
|
|
19
|
+
//! # Example
|
|
20
|
+
//!
|
|
21
|
+
//! ```rust
|
|
22
|
+
//! use kreuzberg::chunking::{chunk_text, ChunkingConfig, ChunkerType};
|
|
23
|
+
//!
|
|
24
|
+
//! # fn example() -> kreuzberg::Result<()> {
|
|
25
|
+
//! let config = ChunkingConfig {
|
|
26
|
+
//! max_characters: 500,
|
|
27
|
+
//! overlap: 50,
|
|
28
|
+
//! trim: true,
|
|
29
|
+
//! chunker_type: ChunkerType::Text,
|
|
30
|
+
//! };
|
|
31
|
+
//!
|
|
32
|
+
//! let long_text = "This is a very long document...".repeat(100);
|
|
33
|
+
//! let result = chunk_text(&long_text, &config)?;
|
|
34
|
+
//!
|
|
35
|
+
//! println!("Split into {} chunks", result.chunk_count);
|
|
36
|
+
//! for (i, chunk) in result.chunks.iter().enumerate() {
|
|
37
|
+
//! println!("Chunk {}: {} chars", i + 1, chunk.content.len());
|
|
38
|
+
//! }
|
|
39
|
+
//! # Ok(())
|
|
40
|
+
//! # }
|
|
41
|
+
//! ```
|
|
42
|
+
//!
|
|
43
|
+
//! # Use Cases
|
|
44
|
+
//!
|
|
45
|
+
//! - Splitting documents for LLM context windows
|
|
46
|
+
//! - Creating overlapping chunks for semantic search
|
|
47
|
+
//! - Processing large documents in batches
|
|
48
|
+
//! - Maintaining context across chunk boundaries
|
|
49
|
+
use crate::error::{KreuzbergError, Result};
|
|
50
|
+
use crate::types::{Chunk, ChunkMetadata};
|
|
51
|
+
use serde::{Deserialize, Serialize};
|
|
52
|
+
use text_splitter::{Characters, ChunkCapacity, ChunkConfig, MarkdownSplitter, TextSplitter};
|
|
53
|
+
|
|
54
|
+
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
|
55
|
+
pub enum ChunkerType {
|
|
56
|
+
Text,
|
|
57
|
+
Markdown,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
61
|
+
pub struct ChunkingResult {
|
|
62
|
+
pub chunks: Vec<Chunk>,
|
|
63
|
+
pub chunk_count: usize,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
pub struct ChunkingConfig {
|
|
67
|
+
pub max_characters: usize,
|
|
68
|
+
pub overlap: usize,
|
|
69
|
+
pub trim: bool,
|
|
70
|
+
pub chunker_type: ChunkerType,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
impl Default for ChunkingConfig {
|
|
74
|
+
fn default() -> Self {
|
|
75
|
+
Self {
|
|
76
|
+
max_characters: 2000,
|
|
77
|
+
overlap: 100,
|
|
78
|
+
trim: true,
|
|
79
|
+
chunker_type: ChunkerType::Text,
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
fn build_chunk_config(max_characters: usize, overlap: usize, trim: bool) -> Result<ChunkConfig<Characters>> {
|
|
85
|
+
ChunkConfig::new(ChunkCapacity::new(max_characters))
|
|
86
|
+
.with_overlap(overlap)
|
|
87
|
+
.map(|config| config.with_trim(trim))
|
|
88
|
+
.map_err(|e| KreuzbergError::validation(format!("Invalid chunking configuration: {}", e)))
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
pub fn chunk_text(text: &str, config: &ChunkingConfig) -> Result<ChunkingResult> {
|
|
92
|
+
if text.is_empty() {
|
|
93
|
+
return Ok(ChunkingResult {
|
|
94
|
+
chunks: vec![],
|
|
95
|
+
chunk_count: 0,
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
let chunk_config = build_chunk_config(config.max_characters, config.overlap, config.trim)?;
|
|
100
|
+
|
|
101
|
+
let text_chunks: Vec<&str> = match config.chunker_type {
|
|
102
|
+
ChunkerType::Text => {
|
|
103
|
+
let splitter = TextSplitter::new(chunk_config);
|
|
104
|
+
splitter.chunks(text).collect()
|
|
105
|
+
}
|
|
106
|
+
ChunkerType::Markdown => {
|
|
107
|
+
let splitter = MarkdownSplitter::new(chunk_config);
|
|
108
|
+
splitter.chunks(text).collect()
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
let total_chunks = text_chunks.len();
|
|
113
|
+
let mut char_offset = 0;
|
|
114
|
+
|
|
115
|
+
let chunks: Vec<Chunk> = text_chunks
|
|
116
|
+
.into_iter()
|
|
117
|
+
.enumerate()
|
|
118
|
+
.map(|(index, chunk_text)| {
|
|
119
|
+
let char_start = char_offset;
|
|
120
|
+
let chunk_length = chunk_text.chars().count();
|
|
121
|
+
let char_end = char_start + chunk_length;
|
|
122
|
+
|
|
123
|
+
let overlap_chars = if index < total_chunks - 1 {
|
|
124
|
+
config.overlap.min(chunk_length)
|
|
125
|
+
} else {
|
|
126
|
+
0
|
|
127
|
+
};
|
|
128
|
+
char_offset = char_end - overlap_chars;
|
|
129
|
+
|
|
130
|
+
Chunk {
|
|
131
|
+
content: chunk_text.to_string(),
|
|
132
|
+
embedding: None,
|
|
133
|
+
metadata: ChunkMetadata {
|
|
134
|
+
char_start,
|
|
135
|
+
char_end,
|
|
136
|
+
token_count: None,
|
|
137
|
+
chunk_index: index,
|
|
138
|
+
total_chunks,
|
|
139
|
+
},
|
|
140
|
+
}
|
|
141
|
+
})
|
|
142
|
+
.collect();
|
|
143
|
+
|
|
144
|
+
let chunk_count = chunks.len();
|
|
145
|
+
|
|
146
|
+
Ok(ChunkingResult { chunks, chunk_count })
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
pub fn chunk_text_with_type(
|
|
150
|
+
text: &str,
|
|
151
|
+
max_characters: usize,
|
|
152
|
+
overlap: usize,
|
|
153
|
+
trim: bool,
|
|
154
|
+
chunker_type: ChunkerType,
|
|
155
|
+
) -> Result<ChunkingResult> {
|
|
156
|
+
let config = ChunkingConfig {
|
|
157
|
+
max_characters,
|
|
158
|
+
overlap,
|
|
159
|
+
trim,
|
|
160
|
+
chunker_type,
|
|
161
|
+
};
|
|
162
|
+
chunk_text(text, &config)
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
pub fn chunk_texts_batch(texts: &[&str], config: &ChunkingConfig) -> Result<Vec<ChunkingResult>> {
|
|
166
|
+
texts.iter().map(|text| chunk_text(text, config)).collect()
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
#[cfg(test)]
|
|
170
|
+
mod tests {
|
|
171
|
+
use super::*;
|
|
172
|
+
|
|
173
|
+
#[test]
|
|
174
|
+
fn test_chunk_empty_text() {
|
|
175
|
+
let config = ChunkingConfig::default();
|
|
176
|
+
let result = chunk_text("", &config).unwrap();
|
|
177
|
+
assert_eq!(result.chunks.len(), 0);
|
|
178
|
+
assert_eq!(result.chunk_count, 0);
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
#[test]
|
|
182
|
+
fn test_chunk_short_text_single_chunk() {
|
|
183
|
+
let config = ChunkingConfig {
|
|
184
|
+
max_characters: 100,
|
|
185
|
+
overlap: 10,
|
|
186
|
+
trim: true,
|
|
187
|
+
chunker_type: ChunkerType::Text,
|
|
188
|
+
};
|
|
189
|
+
let text = "This is a short text.";
|
|
190
|
+
let result = chunk_text(text, &config).unwrap();
|
|
191
|
+
assert_eq!(result.chunks.len(), 1);
|
|
192
|
+
assert_eq!(result.chunk_count, 1);
|
|
193
|
+
assert_eq!(result.chunks[0].content, text);
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
#[test]
|
|
197
|
+
fn test_chunk_long_text_multiple_chunks() {
|
|
198
|
+
let config = ChunkingConfig {
|
|
199
|
+
max_characters: 20,
|
|
200
|
+
overlap: 5,
|
|
201
|
+
trim: true,
|
|
202
|
+
chunker_type: ChunkerType::Text,
|
|
203
|
+
};
|
|
204
|
+
let text = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
|
205
|
+
let result = chunk_text(text, &config).unwrap();
|
|
206
|
+
assert!(result.chunk_count >= 2);
|
|
207
|
+
assert_eq!(result.chunks.len(), result.chunk_count);
|
|
208
|
+
assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 20));
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
#[test]
|
|
212
|
+
fn test_chunk_text_with_overlap() {
|
|
213
|
+
let config = ChunkingConfig {
|
|
214
|
+
max_characters: 20,
|
|
215
|
+
overlap: 5,
|
|
216
|
+
trim: true,
|
|
217
|
+
chunker_type: ChunkerType::Text,
|
|
218
|
+
};
|
|
219
|
+
let text = "abcdefghijklmnopqrstuvwxyz0123456789";
|
|
220
|
+
let result = chunk_text(text, &config).unwrap();
|
|
221
|
+
assert!(result.chunk_count >= 2);
|
|
222
|
+
|
|
223
|
+
if result.chunks.len() >= 2 {
|
|
224
|
+
let first_chunk_end = &result.chunks[0].content[result.chunks[0].content.len().saturating_sub(5)..];
|
|
225
|
+
assert!(
|
|
226
|
+
result.chunks[1].content.starts_with(first_chunk_end),
|
|
227
|
+
"Expected overlap '{}' at start of second chunk '{}'",
|
|
228
|
+
first_chunk_end,
|
|
229
|
+
result.chunks[1].content
|
|
230
|
+
);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
#[test]
|
|
235
|
+
fn test_chunk_markdown_preserves_structure() {
|
|
236
|
+
let config = ChunkingConfig {
|
|
237
|
+
max_characters: 50,
|
|
238
|
+
overlap: 10,
|
|
239
|
+
trim: true,
|
|
240
|
+
chunker_type: ChunkerType::Markdown,
|
|
241
|
+
};
|
|
242
|
+
let markdown = "# Title\n\nParagraph one.\n\n## Section\n\nParagraph two.";
|
|
243
|
+
let result = chunk_text(markdown, &config).unwrap();
|
|
244
|
+
assert!(result.chunk_count >= 1);
|
|
245
|
+
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("# Title")));
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
#[test]
|
|
249
|
+
fn test_chunk_markdown_with_code_blocks() {
|
|
250
|
+
let config = ChunkingConfig {
|
|
251
|
+
max_characters: 100,
|
|
252
|
+
overlap: 10,
|
|
253
|
+
trim: true,
|
|
254
|
+
chunker_type: ChunkerType::Markdown,
|
|
255
|
+
};
|
|
256
|
+
let markdown = "# Code Example\n\n```python\nprint('hello')\n```\n\nSome text after code.";
|
|
257
|
+
let result = chunk_text(markdown, &config).unwrap();
|
|
258
|
+
assert!(result.chunk_count >= 1);
|
|
259
|
+
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("```")));
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
#[test]
|
|
263
|
+
fn test_chunk_markdown_with_links() {
|
|
264
|
+
let config = ChunkingConfig {
|
|
265
|
+
max_characters: 80,
|
|
266
|
+
overlap: 10,
|
|
267
|
+
trim: true,
|
|
268
|
+
chunker_type: ChunkerType::Markdown,
|
|
269
|
+
};
|
|
270
|
+
let markdown = "Check out [this link](https://example.com) for more info.";
|
|
271
|
+
let result = chunk_text(markdown, &config).unwrap();
|
|
272
|
+
assert_eq!(result.chunk_count, 1);
|
|
273
|
+
assert!(result.chunks[0].content.contains("[this link]"));
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
#[test]
|
|
277
|
+
fn test_chunk_text_with_trim() {
|
|
278
|
+
let config = ChunkingConfig {
|
|
279
|
+
max_characters: 30,
|
|
280
|
+
overlap: 5,
|
|
281
|
+
trim: true,
|
|
282
|
+
chunker_type: ChunkerType::Text,
|
|
283
|
+
};
|
|
284
|
+
let text = " Leading and trailing spaces should be trimmed ";
|
|
285
|
+
let result = chunk_text(text, &config).unwrap();
|
|
286
|
+
assert!(result.chunk_count >= 1);
|
|
287
|
+
assert!(result.chunks.iter().all(|chunk| !chunk.content.starts_with(' ')));
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
#[test]
|
|
291
|
+
fn test_chunk_text_without_trim() {
|
|
292
|
+
let config = ChunkingConfig {
|
|
293
|
+
max_characters: 30,
|
|
294
|
+
overlap: 5,
|
|
295
|
+
trim: false,
|
|
296
|
+
chunker_type: ChunkerType::Text,
|
|
297
|
+
};
|
|
298
|
+
let text = " Text with spaces ";
|
|
299
|
+
let result = chunk_text(text, &config).unwrap();
|
|
300
|
+
assert_eq!(result.chunk_count, 1);
|
|
301
|
+
assert!(result.chunks[0].content.starts_with(' ') || result.chunks[0].content.len() < text.len());
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
#[test]
|
|
305
|
+
fn test_chunk_with_invalid_overlap() {
|
|
306
|
+
let config = ChunkingConfig {
|
|
307
|
+
max_characters: 10,
|
|
308
|
+
overlap: 20,
|
|
309
|
+
trim: true,
|
|
310
|
+
chunker_type: ChunkerType::Text,
|
|
311
|
+
};
|
|
312
|
+
let result = chunk_text("Some text", &config);
|
|
313
|
+
assert!(result.is_err());
|
|
314
|
+
let err = result.unwrap_err();
|
|
315
|
+
assert!(matches!(err, KreuzbergError::Validation { .. }));
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
#[test]
|
|
319
|
+
fn test_chunk_text_with_type_text() {
|
|
320
|
+
let result = chunk_text_with_type("Simple text", 50, 10, true, ChunkerType::Text).unwrap();
|
|
321
|
+
assert_eq!(result.chunk_count, 1);
|
|
322
|
+
assert_eq!(result.chunks[0].content, "Simple text");
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
#[test]
|
|
326
|
+
fn test_chunk_text_with_type_markdown() {
|
|
327
|
+
let markdown = "# Header\n\nContent here.";
|
|
328
|
+
let result = chunk_text_with_type(markdown, 50, 10, true, ChunkerType::Markdown).unwrap();
|
|
329
|
+
assert_eq!(result.chunk_count, 1);
|
|
330
|
+
assert!(result.chunks[0].content.contains("# Header"));
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
#[test]
|
|
334
|
+
fn test_chunk_texts_batch_empty() {
|
|
335
|
+
let config = ChunkingConfig::default();
|
|
336
|
+
let texts: Vec<&str> = vec![];
|
|
337
|
+
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
338
|
+
assert_eq!(results.len(), 0);
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
#[test]
|
|
342
|
+
fn test_chunk_texts_batch_multiple() {
|
|
343
|
+
let config = ChunkingConfig {
|
|
344
|
+
max_characters: 30,
|
|
345
|
+
overlap: 5,
|
|
346
|
+
trim: true,
|
|
347
|
+
chunker_type: ChunkerType::Text,
|
|
348
|
+
};
|
|
349
|
+
let texts = vec!["First text", "Second text", "Third text"];
|
|
350
|
+
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
351
|
+
assert_eq!(results.len(), 3);
|
|
352
|
+
assert!(results.iter().all(|r| r.chunk_count >= 1));
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
#[test]
|
|
356
|
+
fn test_chunk_texts_batch_mixed_lengths() {
|
|
357
|
+
let config = ChunkingConfig {
|
|
358
|
+
max_characters: 20,
|
|
359
|
+
overlap: 5,
|
|
360
|
+
trim: true,
|
|
361
|
+
chunker_type: ChunkerType::Text,
|
|
362
|
+
};
|
|
363
|
+
let texts = vec![
|
|
364
|
+
"Short",
|
|
365
|
+
"This is a longer text that should be split into multiple chunks",
|
|
366
|
+
"",
|
|
367
|
+
];
|
|
368
|
+
let results = chunk_texts_batch(&texts, &config).unwrap();
|
|
369
|
+
assert_eq!(results.len(), 3);
|
|
370
|
+
assert_eq!(results[0].chunk_count, 1);
|
|
371
|
+
assert!(results[1].chunk_count > 1);
|
|
372
|
+
assert_eq!(results[2].chunk_count, 0);
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
#[test]
|
|
376
|
+
fn test_chunk_texts_batch_error_propagation() {
|
|
377
|
+
let config = ChunkingConfig {
|
|
378
|
+
max_characters: 10,
|
|
379
|
+
overlap: 20,
|
|
380
|
+
trim: true,
|
|
381
|
+
chunker_type: ChunkerType::Text,
|
|
382
|
+
};
|
|
383
|
+
let texts = vec!["Text one", "Text two"];
|
|
384
|
+
let result = chunk_texts_batch(&texts, &config);
|
|
385
|
+
assert!(result.is_err());
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
#[test]
|
|
389
|
+
fn test_chunking_config_default() {
|
|
390
|
+
let config = ChunkingConfig::default();
|
|
391
|
+
assert_eq!(config.max_characters, 2000);
|
|
392
|
+
assert_eq!(config.overlap, 100);
|
|
393
|
+
assert!(config.trim);
|
|
394
|
+
assert_eq!(config.chunker_type, ChunkerType::Text);
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
#[test]
|
|
398
|
+
fn test_chunk_very_long_text() {
|
|
399
|
+
let config = ChunkingConfig {
|
|
400
|
+
max_characters: 100,
|
|
401
|
+
overlap: 20,
|
|
402
|
+
trim: true,
|
|
403
|
+
chunker_type: ChunkerType::Text,
|
|
404
|
+
};
|
|
405
|
+
let text = "a".repeat(1000);
|
|
406
|
+
let result = chunk_text(&text, &config).unwrap();
|
|
407
|
+
assert!(result.chunk_count >= 10);
|
|
408
|
+
assert!(result.chunks.iter().all(|chunk| chunk.content.len() <= 100));
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
#[test]
|
|
412
|
+
fn test_chunk_text_with_newlines() {
|
|
413
|
+
let config = ChunkingConfig {
|
|
414
|
+
max_characters: 30,
|
|
415
|
+
overlap: 5,
|
|
416
|
+
trim: true,
|
|
417
|
+
chunker_type: ChunkerType::Text,
|
|
418
|
+
};
|
|
419
|
+
let text = "Line one\nLine two\nLine three\nLine four\nLine five";
|
|
420
|
+
let result = chunk_text(text, &config).unwrap();
|
|
421
|
+
assert!(result.chunk_count >= 1);
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
#[test]
|
|
425
|
+
fn test_chunk_markdown_with_lists() {
|
|
426
|
+
let config = ChunkingConfig {
|
|
427
|
+
max_characters: 100,
|
|
428
|
+
overlap: 10,
|
|
429
|
+
trim: true,
|
|
430
|
+
chunker_type: ChunkerType::Markdown,
|
|
431
|
+
};
|
|
432
|
+
let markdown = "# List Example\n\n- Item 1\n- Item 2\n- Item 3\n\nMore text.";
|
|
433
|
+
let result = chunk_text(markdown, &config).unwrap();
|
|
434
|
+
assert!(result.chunk_count >= 1);
|
|
435
|
+
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("- Item")));
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
#[test]
|
|
439
|
+
fn test_chunk_markdown_with_tables() {
|
|
440
|
+
let config = ChunkingConfig {
|
|
441
|
+
max_characters: 150,
|
|
442
|
+
overlap: 10,
|
|
443
|
+
trim: true,
|
|
444
|
+
chunker_type: ChunkerType::Markdown,
|
|
445
|
+
};
|
|
446
|
+
let markdown = "# Table\n\n| Col1 | Col2 |\n|------|------|\n| A | B |\n| C | D |";
|
|
447
|
+
let result = chunk_text(markdown, &config).unwrap();
|
|
448
|
+
assert!(result.chunk_count >= 1);
|
|
449
|
+
assert!(result.chunks.iter().any(|chunk| chunk.content.contains("|")));
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
#[test]
|
|
453
|
+
fn test_chunk_special_characters() {
|
|
454
|
+
let config = ChunkingConfig {
|
|
455
|
+
max_characters: 50,
|
|
456
|
+
overlap: 5,
|
|
457
|
+
trim: true,
|
|
458
|
+
chunker_type: ChunkerType::Text,
|
|
459
|
+
};
|
|
460
|
+
let text = "Special chars: @#$%^&*()[]{}|\\<>?/~`";
|
|
461
|
+
let result = chunk_text(text, &config).unwrap();
|
|
462
|
+
assert_eq!(result.chunk_count, 1);
|
|
463
|
+
assert!(result.chunks[0].content.contains("@#$%"));
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
#[test]
|
|
467
|
+
fn test_chunk_unicode_characters() {
|
|
468
|
+
let config = ChunkingConfig {
|
|
469
|
+
max_characters: 50,
|
|
470
|
+
overlap: 5,
|
|
471
|
+
trim: true,
|
|
472
|
+
chunker_type: ChunkerType::Text,
|
|
473
|
+
};
|
|
474
|
+
let text = "Unicode: 你好世界 🌍 café résumé";
|
|
475
|
+
let result = chunk_text(text, &config).unwrap();
|
|
476
|
+
assert_eq!(result.chunk_count, 1);
|
|
477
|
+
assert!(result.chunks[0].content.contains("你好"));
|
|
478
|
+
assert!(result.chunks[0].content.contains("🌍"));
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
#[test]
|
|
482
|
+
fn test_chunk_cjk_text() {
|
|
483
|
+
let config = ChunkingConfig {
|
|
484
|
+
max_characters: 30,
|
|
485
|
+
overlap: 5,
|
|
486
|
+
trim: true,
|
|
487
|
+
chunker_type: ChunkerType::Text,
|
|
488
|
+
};
|
|
489
|
+
let text = "日本語のテキストです。これは長い文章で、複数のチャンクに分割されるべきです。";
|
|
490
|
+
let result = chunk_text(text, &config).unwrap();
|
|
491
|
+
assert!(result.chunk_count >= 1);
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
#[test]
|
|
495
|
+
fn test_chunk_mixed_languages() {
|
|
496
|
+
let config = ChunkingConfig {
|
|
497
|
+
max_characters: 40,
|
|
498
|
+
overlap: 5,
|
|
499
|
+
trim: true,
|
|
500
|
+
chunker_type: ChunkerType::Text,
|
|
501
|
+
};
|
|
502
|
+
let text = "English text mixed with 中文文本 and some français";
|
|
503
|
+
let result = chunk_text(text, &config).unwrap();
|
|
504
|
+
assert!(result.chunk_count >= 1);
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
#[test]
|
|
508
|
+
fn test_chunk_offset_calculation_with_overlap() {
|
|
509
|
+
let config = ChunkingConfig {
|
|
510
|
+
max_characters: 20,
|
|
511
|
+
overlap: 5,
|
|
512
|
+
trim: false,
|
|
513
|
+
chunker_type: ChunkerType::Text,
|
|
514
|
+
};
|
|
515
|
+
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
516
|
+
let result = chunk_text(text, &config).unwrap();
|
|
517
|
+
|
|
518
|
+
assert!(result.chunks.len() >= 2, "Expected at least 2 chunks");
|
|
519
|
+
|
|
520
|
+
for i in 0..result.chunks.len() {
|
|
521
|
+
let chunk = &result.chunks[i];
|
|
522
|
+
let metadata = &chunk.metadata;
|
|
523
|
+
|
|
524
|
+
assert_eq!(
|
|
525
|
+
metadata.char_end - metadata.char_start,
|
|
526
|
+
chunk.content.chars().count(),
|
|
527
|
+
"Chunk {} offset range doesn't match content length",
|
|
528
|
+
i
|
|
529
|
+
);
|
|
530
|
+
|
|
531
|
+
assert_eq!(metadata.chunk_index, i);
|
|
532
|
+
assert_eq!(metadata.total_chunks, result.chunks.len());
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
for i in 0..result.chunks.len() - 1 {
|
|
536
|
+
let current_chunk = &result.chunks[i];
|
|
537
|
+
let next_chunk = &result.chunks[i + 1];
|
|
538
|
+
|
|
539
|
+
assert!(
|
|
540
|
+
next_chunk.metadata.char_start < current_chunk.metadata.char_end,
|
|
541
|
+
"Chunk {} and {} don't overlap: next starts at {} but current ends at {}",
|
|
542
|
+
i,
|
|
543
|
+
i + 1,
|
|
544
|
+
next_chunk.metadata.char_start,
|
|
545
|
+
current_chunk.metadata.char_end
|
|
546
|
+
);
|
|
547
|
+
|
|
548
|
+
let overlap_size = current_chunk.metadata.char_end - next_chunk.metadata.char_start;
|
|
549
|
+
assert!(
|
|
550
|
+
overlap_size <= config.overlap + 10,
|
|
551
|
+
"Overlap between chunks {} and {} is too large: {}",
|
|
552
|
+
i,
|
|
553
|
+
i + 1,
|
|
554
|
+
overlap_size
|
|
555
|
+
);
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
#[test]
|
|
560
|
+
fn test_chunk_offset_calculation_without_overlap() {
|
|
561
|
+
let config = ChunkingConfig {
|
|
562
|
+
max_characters: 20,
|
|
563
|
+
overlap: 0,
|
|
564
|
+
trim: false,
|
|
565
|
+
chunker_type: ChunkerType::Text,
|
|
566
|
+
};
|
|
567
|
+
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
568
|
+
let result = chunk_text(text, &config).unwrap();
|
|
569
|
+
|
|
570
|
+
for i in 0..result.chunks.len() - 1 {
|
|
571
|
+
let current_chunk = &result.chunks[i];
|
|
572
|
+
let next_chunk = &result.chunks[i + 1];
|
|
573
|
+
|
|
574
|
+
assert!(
|
|
575
|
+
next_chunk.metadata.char_start >= current_chunk.metadata.char_end,
|
|
576
|
+
"Chunk {} and {} overlap when they shouldn't: next starts at {} but current ends at {}",
|
|
577
|
+
i,
|
|
578
|
+
i + 1,
|
|
579
|
+
next_chunk.metadata.char_start,
|
|
580
|
+
current_chunk.metadata.char_end
|
|
581
|
+
);
|
|
582
|
+
}
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
#[test]
|
|
586
|
+
fn test_chunk_offset_covers_full_text() {
|
|
587
|
+
let config = ChunkingConfig {
|
|
588
|
+
max_characters: 15,
|
|
589
|
+
overlap: 3,
|
|
590
|
+
trim: false,
|
|
591
|
+
chunker_type: ChunkerType::Text,
|
|
592
|
+
};
|
|
593
|
+
let text = "0123456789 ABCDEFGHIJ KLMNOPQRST UVWXYZ";
|
|
594
|
+
let result = chunk_text(text, &config).unwrap();
|
|
595
|
+
|
|
596
|
+
assert!(result.chunks.len() >= 2, "Expected multiple chunks");
|
|
597
|
+
|
|
598
|
+
assert_eq!(
|
|
599
|
+
result.chunks[0].metadata.char_start, 0,
|
|
600
|
+
"First chunk should start at position 0"
|
|
601
|
+
);
|
|
602
|
+
|
|
603
|
+
for i in 0..result.chunks.len() - 1 {
|
|
604
|
+
let current_chunk = &result.chunks[i];
|
|
605
|
+
let next_chunk = &result.chunks[i + 1];
|
|
606
|
+
|
|
607
|
+
assert!(
|
|
608
|
+
next_chunk.metadata.char_start <= current_chunk.metadata.char_end,
|
|
609
|
+
"Gap detected between chunk {} (ends at {}) and chunk {} (starts at {})",
|
|
610
|
+
i,
|
|
611
|
+
current_chunk.metadata.char_end,
|
|
612
|
+
i + 1,
|
|
613
|
+
next_chunk.metadata.char_start
|
|
614
|
+
);
|
|
615
|
+
}
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
#[test]
|
|
619
|
+
fn test_chunk_offset_with_various_overlap_sizes() {
|
|
620
|
+
for overlap in [0, 5, 10, 20] {
|
|
621
|
+
let config = ChunkingConfig {
|
|
622
|
+
max_characters: 30,
|
|
623
|
+
overlap,
|
|
624
|
+
trim: false,
|
|
625
|
+
chunker_type: ChunkerType::Text,
|
|
626
|
+
};
|
|
627
|
+
let text = "Word ".repeat(30);
|
|
628
|
+
let result = chunk_text(&text, &config).unwrap();
|
|
629
|
+
|
|
630
|
+
for chunk in &result.chunks {
|
|
631
|
+
assert!(
|
|
632
|
+
chunk.metadata.char_end > chunk.metadata.char_start,
|
|
633
|
+
"Invalid offset range for overlap {}: start={}, end={}",
|
|
634
|
+
overlap,
|
|
635
|
+
chunk.metadata.char_start,
|
|
636
|
+
chunk.metadata.char_end
|
|
637
|
+
);
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
for chunk in &result.chunks {
|
|
641
|
+
assert!(
|
|
642
|
+
chunk.metadata.char_start < text.chars().count(),
|
|
643
|
+
"char_start with overlap {} is out of bounds: {}",
|
|
644
|
+
overlap,
|
|
645
|
+
chunk.metadata.char_start
|
|
646
|
+
);
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
#[test]
|
|
652
|
+
fn test_chunk_last_chunk_offset() {
|
|
653
|
+
let config = ChunkingConfig {
|
|
654
|
+
max_characters: 20,
|
|
655
|
+
overlap: 5,
|
|
656
|
+
trim: false,
|
|
657
|
+
chunker_type: ChunkerType::Text,
|
|
658
|
+
};
|
|
659
|
+
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE";
|
|
660
|
+
let result = chunk_text(text, &config).unwrap();
|
|
661
|
+
|
|
662
|
+
assert!(result.chunks.len() >= 2, "Need multiple chunks for this test");
|
|
663
|
+
|
|
664
|
+
let last_chunk = result.chunks.last().unwrap();
|
|
665
|
+
let second_to_last = &result.chunks[result.chunks.len() - 2];
|
|
666
|
+
|
|
667
|
+
assert!(
|
|
668
|
+
last_chunk.metadata.char_start < second_to_last.metadata.char_end,
|
|
669
|
+
"Last chunk should overlap with previous chunk"
|
|
670
|
+
);
|
|
671
|
+
|
|
672
|
+
let expected_end = text.chars().count();
|
|
673
|
+
let last_chunk_covers_end =
|
|
674
|
+
last_chunk.content.trim_end() == text.trim_end() || last_chunk.metadata.char_end >= expected_end - 5;
|
|
675
|
+
assert!(last_chunk_covers_end, "Last chunk should cover the end of the text");
|
|
676
|
+
}
|
|
677
|
+
}
|