kreuzberg 4.0.0.pre.rc.13 → 4.0.0.pre.rc.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -14
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -1
- data/.rubocop.yml +538 -538
- data/Gemfile +8 -8
- data/Gemfile.lock +104 -2
- data/README.md +454 -454
- data/Rakefile +33 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -341
- data/ext/kreuzberg_rb/extconf.rb +45 -45
- data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
- data/ext/kreuzberg_rb/native/Cargo.lock +6750 -6941
- data/ext/kreuzberg_rb/native/Cargo.toml +53 -54
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +52 -15
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +214 -214
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +81 -81
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +724 -724
- data/lib/kreuzberg/error_context.rb +80 -80
- data/lib/kreuzberg/errors.rb +118 -118
- data/lib/kreuzberg/extraction_api.rb +340 -340
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +279 -279
- data/lib/kreuzberg/setup_lib_path.rb +80 -80
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +109 -109
- data/lib/{pdfium.dll → libpdfium.so} +0 -0
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +546 -546
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -85
- data/spec/binding/cli_spec.rb +55 -55
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -41
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/Cargo.toml +2 -2
- data/vendor/kreuzberg/Cargo.toml +5 -5
- data/vendor/kreuzberg/README.md +230 -230
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
- data/vendor/kreuzberg/build.rs +887 -843
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +87 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
- data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
- data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
- data/vendor/kreuzberg/src/core/config.rs +1080 -1080
- data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
- data/vendor/kreuzberg/src/core/io.rs +329 -329
- data/vendor/kreuzberg/src/core/mime.rs +605 -605
- data/vendor/kreuzberg/src/core/mod.rs +47 -47
- data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
- data/vendor/kreuzberg/src/embeddings.rs +500 -500
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +634 -601
- data/vendor/kreuzberg/src/extraction/image.rs +491 -491
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -574
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
- data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
- data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
- data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
- data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
- data/vendor/kreuzberg/src/extractors/email.rs +157 -157
- data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
- data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
- data/vendor/kreuzberg/src/extractors/html.rs +407 -407
- data/vendor/kreuzberg/src/extractors/image.rs +219 -219
- data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
- data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
- data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
- data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
- data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
- data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
- data/vendor/kreuzberg/src/extractors/pdf.rs +749 -749
- data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
- data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
- data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
- data/vendor/kreuzberg/src/extractors/security.rs +484 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
- data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
- data/vendor/kreuzberg/src/extractors/text.rs +265 -265
- data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
- data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
- data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
- data/vendor/kreuzberg/src/lib.rs +113 -113
- data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
- data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -154
- data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
- data/vendor/kreuzberg/src/pdf/bundled.rs +452 -346
- data/vendor/kreuzberg/src/pdf/error.rs +130 -130
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
- data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
- data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
- data/vendor/kreuzberg/src/pdf/table.rs +420 -420
- data/vendor/kreuzberg/src/pdf/text.rs +240 -240
- data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
- data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
- data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
- data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
- data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +25 -25
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +1055 -1055
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
- data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
- data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
- data/vendor/kreuzberg/tests/config_features.rs +612 -612
- data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
- data/vendor/kreuzberg/tests/core_integration.rs +510 -510
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
- data/vendor/kreuzberg/tests/email_integration.rs +327 -327
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
- data/vendor/kreuzberg/tests/error_handling.rs +402 -402
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
- data/vendor/kreuzberg/tests/format_integration.rs +165 -164
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
- data/vendor/kreuzberg/tests/image_integration.rs +255 -255
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
- data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
- data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
- data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
- data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
- data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
- data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
- data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
- data/vendor/kreuzberg/tests/security_validation.rs +416 -416
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
- data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
- data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
- data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
- data/vendor/kreuzberg-tesseract/LICENSE +22 -22
- data/vendor/kreuzberg-tesseract/README.md +399 -399
- data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
- data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
- data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
- data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
- data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
- data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
- data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
- data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
- data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
- data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
- data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
- data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
- data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
- data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
- data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
- data/vendor/rb-sys/Cargo.lock +393 -393
- data/vendor/rb-sys/Cargo.toml +70 -70
- data/vendor/rb-sys/Cargo.toml.orig +57 -57
- data/vendor/rb-sys/LICENSE-APACHE +190 -190
- data/vendor/rb-sys/LICENSE-MIT +21 -21
- data/vendor/rb-sys/build/features.rs +111 -111
- data/vendor/rb-sys/build/main.rs +286 -286
- data/vendor/rb-sys/build/stable_api_config.rs +155 -155
- data/vendor/rb-sys/build/version.rs +50 -50
- data/vendor/rb-sys/readme.md +36 -36
- data/vendor/rb-sys/src/bindings.rs +21 -21
- data/vendor/rb-sys/src/hidden.rs +11 -11
- data/vendor/rb-sys/src/lib.rs +35 -35
- data/vendor/rb-sys/src/macros.rs +371 -371
- data/vendor/rb-sys/src/memory.rs +53 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
- data/vendor/rb-sys/src/special_consts.rs +31 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
- data/vendor/rb-sys/src/stable_api.rs +260 -260
- data/vendor/rb-sys/src/symbol.rs +31 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
- data/vendor/rb-sys/src/utils.rs +89 -89
- data/vendor/rb-sys/src/value_type.rs +7 -7
- metadata +81 -22
- data/vendor/kreuzberg-ffi/Cargo.toml +0 -63
- data/vendor/kreuzberg-ffi/README.md +0 -851
- data/vendor/kreuzberg-ffi/build.rs +0 -176
- data/vendor/kreuzberg-ffi/cbindgen.toml +0 -27
- data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +0 -12
- data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +0 -12
- data/vendor/kreuzberg-ffi/kreuzberg.h +0 -1087
- data/vendor/kreuzberg-ffi/src/lib.rs +0 -3616
- data/vendor/kreuzberg-ffi/src/panic_shield.rs +0 -247
- data/vendor/kreuzberg-ffi/tests.disabled/README.md +0 -48
- data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +0 -299
- data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +0 -346
- data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +0 -232
- data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +0 -470
|
@@ -1,556 +1,556 @@
|
|
|
1
|
-
//! Batch processing orchestration tests.
|
|
2
|
-
//!
|
|
3
|
-
//! Validates efficient parallel processing at multiple levels:
|
|
4
|
-
//! - Multiple documents in parallel
|
|
5
|
-
//! - Multiple pages within PDFs
|
|
6
|
-
//! - OCR across pages
|
|
7
|
-
//! - File I/O optimization
|
|
8
|
-
//! - Resource utilization (CPU cores)
|
|
9
|
-
|
|
10
|
-
use kreuzberg::core::config::ExtractionConfig;
|
|
11
|
-
use kreuzberg::core::extractor::{batch_extract_bytes, batch_extract_file};
|
|
12
|
-
use std::time::{Duration, Instant};
|
|
13
|
-
|
|
14
|
-
#[cfg(feature = "ocr")]
|
|
15
|
-
use kreuzberg::core::config::OcrConfig;
|
|
16
|
-
|
|
17
|
-
#[cfg(feature = "ocr")]
|
|
18
|
-
use kreuzberg::core::extractor::extract_file_sync;
|
|
19
|
-
|
|
20
|
-
mod helpers;
|
|
21
|
-
|
|
22
|
-
fn trim_trailing_newlines(value: &str) -> &str {
|
|
23
|
-
value.trim_end_matches(['\n', '\r'])
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
fn assert_text_content(actual: &str, expected: &str) {
|
|
27
|
-
assert_eq!(
|
|
28
|
-
trim_trailing_newlines(actual),
|
|
29
|
-
expected,
|
|
30
|
-
"Content mismatch after trimming trailing newlines"
|
|
31
|
-
);
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
/// Test that batch extraction processes documents in parallel.
|
|
35
|
-
///
|
|
36
|
-
/// Validates:
|
|
37
|
-
/// - Multiple documents process concurrently
|
|
38
|
-
/// - Parallel processing is faster than sequential
|
|
39
|
-
/// - Results maintain correct order
|
|
40
|
-
#[tokio::test]
|
|
41
|
-
async fn test_batch_documents_parallel_execution() {
|
|
42
|
-
use helpers::get_test_file_path;
|
|
43
|
-
use std::path::PathBuf;
|
|
44
|
-
|
|
45
|
-
let config = ExtractionConfig::default();
|
|
46
|
-
|
|
47
|
-
let test_files = vec![
|
|
48
|
-
"text/contract.txt",
|
|
49
|
-
"json/sample_document.json",
|
|
50
|
-
"xml/simple_note.xml",
|
|
51
|
-
"text/readme.md",
|
|
52
|
-
];
|
|
53
|
-
|
|
54
|
-
let mut paths: Vec<PathBuf> = Vec::new();
|
|
55
|
-
for _ in 0..5 {
|
|
56
|
-
for file in &test_files {
|
|
57
|
-
paths.push(get_test_file_path(file));
|
|
58
|
-
}
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
let parallel_start = Instant::now();
|
|
62
|
-
let results = batch_extract_file(paths.clone(), &config).await;
|
|
63
|
-
let parallel_duration = parallel_start.elapsed();
|
|
64
|
-
|
|
65
|
-
assert!(results.is_ok(), "Batch extraction should succeed");
|
|
66
|
-
let results = results.unwrap();
|
|
67
|
-
assert_eq!(results.len(), 20, "Should process all 20 files");
|
|
68
|
-
|
|
69
|
-
for result in &results {
|
|
70
|
-
assert!(
|
|
71
|
-
!result.content.is_empty() || result.metadata.error.is_some(),
|
|
72
|
-
"Each result should have content or error"
|
|
73
|
-
);
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
assert!(
|
|
77
|
-
parallel_duration < Duration::from_secs(5),
|
|
78
|
-
"Batch processing 20 files should take <5s, took: {:?}",
|
|
79
|
-
parallel_duration
|
|
80
|
-
);
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
/// Test concurrency limiting in batch processing.
|
|
84
|
-
///
|
|
85
|
-
/// Validates that batch extraction respects max_concurrent_extractions config.
|
|
86
|
-
#[tokio::test]
|
|
87
|
-
async fn test_batch_documents_concurrency_limiting() {
|
|
88
|
-
use helpers::get_test_file_path;
|
|
89
|
-
|
|
90
|
-
let config = ExtractionConfig {
|
|
91
|
-
max_concurrent_extractions: Some(2),
|
|
92
|
-
..Default::default()
|
|
93
|
-
};
|
|
94
|
-
|
|
95
|
-
let paths = vec![
|
|
96
|
-
get_test_file_path("text/contract.txt"),
|
|
97
|
-
get_test_file_path("json/sample_document.json"),
|
|
98
|
-
get_test_file_path("xml/simple_note.xml"),
|
|
99
|
-
get_test_file_path("text/readme.md"),
|
|
100
|
-
];
|
|
101
|
-
|
|
102
|
-
let results = batch_extract_file(paths, &config).await;
|
|
103
|
-
|
|
104
|
-
assert!(results.is_ok());
|
|
105
|
-
let results = results.unwrap();
|
|
106
|
-
assert_eq!(results.len(), 4);
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
/// Test batch extraction with CPU-bound limit (default: num_cpus * 2).
|
|
110
|
-
#[tokio::test]
|
|
111
|
-
async fn test_batch_documents_default_concurrency() {
|
|
112
|
-
use helpers::get_test_file_path;
|
|
113
|
-
|
|
114
|
-
let config = ExtractionConfig::default();
|
|
115
|
-
|
|
116
|
-
let mut paths = Vec::new();
|
|
117
|
-
for _ in 0..13 {
|
|
118
|
-
paths.push(get_test_file_path("text/contract.txt"));
|
|
119
|
-
paths.push(get_test_file_path("json/sample_document.json"));
|
|
120
|
-
paths.push(get_test_file_path("xml/simple_note.xml"));
|
|
121
|
-
paths.push(get_test_file_path("text/readme.md"));
|
|
122
|
-
}
|
|
123
|
-
let paths = paths.into_iter().take(50).collect::<Vec<_>>();
|
|
124
|
-
|
|
125
|
-
let start = Instant::now();
|
|
126
|
-
let results = batch_extract_file(paths, &config).await;
|
|
127
|
-
let duration = start.elapsed();
|
|
128
|
-
|
|
129
|
-
assert!(results.is_ok());
|
|
130
|
-
let results = results.unwrap();
|
|
131
|
-
assert_eq!(results.len(), 50);
|
|
132
|
-
|
|
133
|
-
println!("Processed 50 files in {:?}", duration);
|
|
134
|
-
assert!(
|
|
135
|
-
duration < Duration::from_secs(10),
|
|
136
|
-
"50 files should process in <10s with parallelism, took: {:?}",
|
|
137
|
-
duration
|
|
138
|
-
);
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
/// Test that batch processing maintains result order.
|
|
142
|
-
#[cfg(feature = "xml")]
|
|
143
|
-
#[tokio::test]
|
|
144
|
-
async fn test_batch_documents_preserves_order() {
|
|
145
|
-
use helpers::get_test_file_path;
|
|
146
|
-
|
|
147
|
-
let config = ExtractionConfig::default();
|
|
148
|
-
|
|
149
|
-
let paths = vec![
|
|
150
|
-
get_test_file_path("text/contract.txt"),
|
|
151
|
-
get_test_file_path("json/sample_document.json"),
|
|
152
|
-
get_test_file_path("xml/simple_note.xml"),
|
|
153
|
-
];
|
|
154
|
-
|
|
155
|
-
let results = batch_extract_file(paths, &config).await.unwrap();
|
|
156
|
-
|
|
157
|
-
assert_eq!(results.len(), 3, "Should have 3 results");
|
|
158
|
-
|
|
159
|
-
assert!(!results[0].content.is_empty(), "First result should have content");
|
|
160
|
-
assert!(!results[1].content.is_empty(), "Second result should have content");
|
|
161
|
-
assert!(!results[2].content.is_empty(), "Third result should have content");
|
|
162
|
-
|
|
163
|
-
assert!(
|
|
164
|
-
results[0].content.contains("contract"),
|
|
165
|
-
"First result should be from contract.txt, got: '{}'",
|
|
166
|
-
results[0].content
|
|
167
|
-
);
|
|
168
|
-
assert!(
|
|
169
|
-
results[1].content.contains("Sample") || results[1].content.contains("author"),
|
|
170
|
-
"Second result should be from JSON document, got: '{}'",
|
|
171
|
-
results[1].content
|
|
172
|
-
);
|
|
173
|
-
assert!(
|
|
174
|
-
results[2].content.contains("Tove") || results[2].content.contains("note"),
|
|
175
|
-
"Third result should be from XML note, got: '{}'",
|
|
176
|
-
results[2].content
|
|
177
|
-
);
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
/// Test that multi-page PDF extraction is efficient.
|
|
181
|
-
///
|
|
182
|
-
/// Validates:
|
|
183
|
-
/// - Multiple pages are processed
|
|
184
|
-
/// - OCR is applied to all pages if needed
|
|
185
|
-
/// - Content from all pages is combined
|
|
186
|
-
#[cfg(feature = "pdf")]
|
|
187
|
-
#[tokio::test]
|
|
188
|
-
async fn test_multipage_pdf_extraction() {
|
|
189
|
-
use helpers::{get_test_file_path, skip_if_missing};
|
|
190
|
-
|
|
191
|
-
if skip_if_missing("pdfs/multi_page.pdf") {
|
|
192
|
-
tracing::debug!("Skipping multi-page PDF test: test file not available");
|
|
193
|
-
return;
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
let config = ExtractionConfig::default();
|
|
197
|
-
let pdf_path = get_test_file_path("pdfs/multi_page.pdf");
|
|
198
|
-
|
|
199
|
-
let start = Instant::now();
|
|
200
|
-
let result = kreuzberg::core::extractor::extract_file(&pdf_path, None, &config).await;
|
|
201
|
-
let duration = start.elapsed();
|
|
202
|
-
|
|
203
|
-
assert!(result.is_ok(), "Multi-page PDF extraction should succeed");
|
|
204
|
-
let extraction = result.unwrap();
|
|
205
|
-
|
|
206
|
-
assert!(!extraction.content.is_empty(), "Should extract text from all pages");
|
|
207
|
-
println!("Extracted multi-page PDF in {:?}", duration);
|
|
208
|
-
}
|
|
209
|
-
|
|
210
|
-
/// Test concurrent PDF extractions (multiple PDFs at once).
|
|
211
|
-
#[cfg(feature = "pdf")]
|
|
212
|
-
#[tokio::test]
|
|
213
|
-
async fn test_concurrent_pdf_extractions() {
|
|
214
|
-
use helpers::{get_test_file_path, skip_if_missing};
|
|
215
|
-
|
|
216
|
-
if skip_if_missing("pdfs/simple.pdf") {
|
|
217
|
-
tracing::debug!("Skipping concurrent PDF test: test file not available");
|
|
218
|
-
return;
|
|
219
|
-
}
|
|
220
|
-
|
|
221
|
-
let config = ExtractionConfig::default();
|
|
222
|
-
|
|
223
|
-
let mut paths = Vec::new();
|
|
224
|
-
for _ in 0..10 {
|
|
225
|
-
paths.push(get_test_file_path("pdfs/simple.pdf"));
|
|
226
|
-
}
|
|
227
|
-
|
|
228
|
-
let start = Instant::now();
|
|
229
|
-
let results = batch_extract_file(paths, &config).await;
|
|
230
|
-
let duration = start.elapsed();
|
|
231
|
-
|
|
232
|
-
assert!(results.is_ok());
|
|
233
|
-
let results = results.unwrap();
|
|
234
|
-
assert_eq!(results.len(), 10);
|
|
235
|
-
|
|
236
|
-
println!("Processed 10 PDFs in {:?}", duration);
|
|
237
|
-
}
|
|
238
|
-
|
|
239
|
-
/// Test OCR on multi-page scanned document.
|
|
240
|
-
///
|
|
241
|
-
/// Validates:
|
|
242
|
-
/// - All pages are OCR'd
|
|
243
|
-
/// - Results are combined correctly
|
|
244
|
-
/// - Processing is efficient
|
|
245
|
-
#[cfg(feature = "ocr")]
|
|
246
|
-
#[test]
|
|
247
|
-
fn test_ocr_multipage_efficiency() {
|
|
248
|
-
use helpers::{get_test_file_path, skip_if_missing};
|
|
249
|
-
|
|
250
|
-
if skip_if_missing("images/ocr_image.jpg") {
|
|
251
|
-
tracing::debug!("Skipping OCR multi-page test: test file not available");
|
|
252
|
-
return;
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
let config = ExtractionConfig {
|
|
256
|
-
ocr: Some(OcrConfig {
|
|
257
|
-
backend: "tesseract".to_string(),
|
|
258
|
-
language: "eng".to_string(),
|
|
259
|
-
tesseract_config: None,
|
|
260
|
-
}),
|
|
261
|
-
force_ocr: false,
|
|
262
|
-
use_cache: true,
|
|
263
|
-
..Default::default()
|
|
264
|
-
};
|
|
265
|
-
|
|
266
|
-
let file_path = get_test_file_path("images/ocr_image.jpg");
|
|
267
|
-
|
|
268
|
-
let start = Instant::now();
|
|
269
|
-
let result1 = extract_file_sync(&file_path, None, &config);
|
|
270
|
-
let first_duration = start.elapsed();
|
|
271
|
-
|
|
272
|
-
assert!(result1.is_ok(), "First OCR should succeed");
|
|
273
|
-
|
|
274
|
-
let start = Instant::now();
|
|
275
|
-
let result2 = extract_file_sync(&file_path, None, &config);
|
|
276
|
-
let second_duration = start.elapsed();
|
|
277
|
-
|
|
278
|
-
assert!(result2.is_ok(), "Second OCR should succeed");
|
|
279
|
-
|
|
280
|
-
println!(
|
|
281
|
-
"OCR timing: first={:?}, cached={:?}, speedup={:.1}x",
|
|
282
|
-
first_duration,
|
|
283
|
-
second_duration,
|
|
284
|
-
first_duration.as_secs_f64() / second_duration.as_secs_f64().max(0.001)
|
|
285
|
-
);
|
|
286
|
-
|
|
287
|
-
assert!(
|
|
288
|
-
second_duration < first_duration / 2,
|
|
289
|
-
"Cached OCR should be at least 2x faster. First: {:?}, Second: {:?}",
|
|
290
|
-
first_duration,
|
|
291
|
-
second_duration
|
|
292
|
-
);
|
|
293
|
-
}
|
|
294
|
-
|
|
295
|
-
/// Test parallel processing of byte arrays.
|
|
296
|
-
///
|
|
297
|
-
/// Validates that batch_extract_bytes processes data in parallel.
|
|
298
|
-
#[tokio::test]
|
|
299
|
-
async fn test_batch_bytes_parallel_processing() {
|
|
300
|
-
let config = ExtractionConfig::default();
|
|
301
|
-
|
|
302
|
-
let contents: Vec<(Vec<u8>, &str)> = (0..30)
|
|
303
|
-
.map(|i| {
|
|
304
|
-
let content = format!("Test content number {}", i);
|
|
305
|
-
(content.into_bytes(), "text/plain")
|
|
306
|
-
})
|
|
307
|
-
.collect();
|
|
308
|
-
|
|
309
|
-
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
310
|
-
|
|
311
|
-
let start = Instant::now();
|
|
312
|
-
let results = batch_extract_bytes(contents_ref, &config).await;
|
|
313
|
-
let duration = start.elapsed();
|
|
314
|
-
|
|
315
|
-
assert!(results.is_ok());
|
|
316
|
-
let results = results.unwrap();
|
|
317
|
-
assert_eq!(results.len(), 30);
|
|
318
|
-
|
|
319
|
-
for (i, result) in results.iter().enumerate() {
|
|
320
|
-
let expected = format!("Test content number {}", i);
|
|
321
|
-
assert_text_content(&result.content, &expected);
|
|
322
|
-
}
|
|
323
|
-
|
|
324
|
-
println!("Batch processed 30 byte arrays in {:?}", duration);
|
|
325
|
-
}
|
|
326
|
-
|
|
327
|
-
/// Test error handling in batch bytes processing.
|
|
328
|
-
#[tokio::test]
|
|
329
|
-
async fn test_batch_bytes_mixed_valid_invalid() {
|
|
330
|
-
let config = ExtractionConfig::default();
|
|
331
|
-
|
|
332
|
-
let contents = vec![
|
|
333
|
-
(b"valid content 1".as_slice(), "text/plain"),
|
|
334
|
-
(b"invalid content".as_slice(), "invalid/mime"),
|
|
335
|
-
(b"valid content 2".as_slice(), "text/plain"),
|
|
336
|
-
(b"more invalid".as_slice(), "bad/type"),
|
|
337
|
-
(b"valid content 3".as_slice(), "text/plain"),
|
|
338
|
-
];
|
|
339
|
-
|
|
340
|
-
let results = batch_extract_bytes(contents, &config).await;
|
|
341
|
-
|
|
342
|
-
assert!(results.is_ok());
|
|
343
|
-
let results = results.unwrap();
|
|
344
|
-
assert_eq!(results.len(), 5);
|
|
345
|
-
|
|
346
|
-
assert_text_content(&results[0].content, "valid content 1");
|
|
347
|
-
assert_text_content(&results[2].content, "valid content 2");
|
|
348
|
-
assert_text_content(&results[4].content, "valid content 3");
|
|
349
|
-
|
|
350
|
-
assert!(results[1].metadata.error.is_some());
|
|
351
|
-
assert!(results[3].metadata.error.is_some());
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
/// Test that batch processing utilizes multiple CPU cores.
|
|
355
|
-
///
|
|
356
|
-
/// Validates that parallel extraction actually runs in parallel,
|
|
357
|
-
/// not just sequentially with fancy task management.
|
|
358
|
-
#[tokio::test]
|
|
359
|
-
async fn test_batch_utilizes_multiple_cores() {
|
|
360
|
-
let config = ExtractionConfig {
|
|
361
|
-
max_concurrent_extractions: Some(num_cpus::get()),
|
|
362
|
-
..Default::default()
|
|
363
|
-
};
|
|
364
|
-
|
|
365
|
-
let mut contents = Vec::new();
|
|
366
|
-
for i in 0..20 {
|
|
367
|
-
let json = format!(
|
|
368
|
-
r#"{{"id": {}, "data": "{}", "nested": {{"value": "{}"}}}}"#,
|
|
369
|
-
i,
|
|
370
|
-
"x".repeat(100),
|
|
371
|
-
"y".repeat(100)
|
|
372
|
-
);
|
|
373
|
-
contents.push((json.into_bytes(), "application/json"));
|
|
374
|
-
}
|
|
375
|
-
|
|
376
|
-
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
377
|
-
|
|
378
|
-
let start = Instant::now();
|
|
379
|
-
let results = batch_extract_bytes(contents_ref, &config).await;
|
|
380
|
-
let duration = start.elapsed();
|
|
381
|
-
|
|
382
|
-
assert!(results.is_ok());
|
|
383
|
-
let results = results.unwrap();
|
|
384
|
-
assert_eq!(results.len(), 20);
|
|
385
|
-
|
|
386
|
-
println!(
|
|
387
|
-
"Processed 20 JSON documents in {:?} with {} cores",
|
|
388
|
-
duration,
|
|
389
|
-
num_cpus::get()
|
|
390
|
-
);
|
|
391
|
-
|
|
392
|
-
assert!(
|
|
393
|
-
duration < Duration::from_secs(2),
|
|
394
|
-
"Batch processing should leverage parallelism, took: {:?}",
|
|
395
|
-
duration
|
|
396
|
-
);
|
|
397
|
-
}
|
|
398
|
-
|
|
399
|
-
/// Test batch processing under memory pressure.
|
|
400
|
-
///
|
|
401
|
-
/// Validates that semaphore prevents resource exhaustion.
|
|
402
|
-
#[tokio::test]
|
|
403
|
-
async fn test_batch_memory_pressure_handling() {
|
|
404
|
-
let config = ExtractionConfig {
|
|
405
|
-
max_concurrent_extractions: Some(4),
|
|
406
|
-
..Default::default()
|
|
407
|
-
};
|
|
408
|
-
|
|
409
|
-
let mut contents = Vec::new();
|
|
410
|
-
for i in 0..50 {
|
|
411
|
-
let json = format!(r#"{{"id": {}, "large_data": "{}"}}"#, i, "x".repeat(10000));
|
|
412
|
-
contents.push((json.into_bytes(), "application/json"));
|
|
413
|
-
}
|
|
414
|
-
|
|
415
|
-
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
416
|
-
|
|
417
|
-
let start = Instant::now();
|
|
418
|
-
let results = batch_extract_bytes(contents_ref, &config).await;
|
|
419
|
-
let duration = start.elapsed();
|
|
420
|
-
|
|
421
|
-
assert!(results.is_ok());
|
|
422
|
-
let results = results.unwrap();
|
|
423
|
-
assert_eq!(results.len(), 50);
|
|
424
|
-
|
|
425
|
-
println!("Processed 50 large documents with concurrency limit in {:?}", duration);
|
|
426
|
-
|
|
427
|
-
for result in &results {
|
|
428
|
-
assert!(!result.content.is_empty());
|
|
429
|
-
}
|
|
430
|
-
}
|
|
431
|
-
|
|
432
|
-
/// Test that batch processing scales with CPU count.
|
|
433
|
-
#[tokio::test]
|
|
434
|
-
async fn test_batch_scales_with_cpu_count() {
|
|
435
|
-
let cpu_count = num_cpus::get();
|
|
436
|
-
|
|
437
|
-
let contents: Vec<(Vec<u8>, &str)> = (0..30)
|
|
438
|
-
.map(|i| (format!("Content {}", i).into_bytes(), "text/plain"))
|
|
439
|
-
.collect();
|
|
440
|
-
|
|
441
|
-
let config_1 = ExtractionConfig {
|
|
442
|
-
max_concurrent_extractions: Some(1),
|
|
443
|
-
..Default::default()
|
|
444
|
-
};
|
|
445
|
-
|
|
446
|
-
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
447
|
-
|
|
448
|
-
let start = Instant::now();
|
|
449
|
-
let _ = batch_extract_bytes(contents_ref.clone(), &config_1).await.unwrap();
|
|
450
|
-
let duration_1 = start.elapsed();
|
|
451
|
-
|
|
452
|
-
let config_full = ExtractionConfig {
|
|
453
|
-
max_concurrent_extractions: Some(cpu_count),
|
|
454
|
-
..Default::default()
|
|
455
|
-
};
|
|
456
|
-
|
|
457
|
-
let start = Instant::now();
|
|
458
|
-
let _ = batch_extract_bytes(contents_ref, &config_full).await.unwrap();
|
|
459
|
-
let duration_full = start.elapsed();
|
|
460
|
-
|
|
461
|
-
println!(
|
|
462
|
-
"Concurrency=1: {:?}, Concurrency={}: {:?}, Speedup: {:.2}x",
|
|
463
|
-
duration_1,
|
|
464
|
-
cpu_count,
|
|
465
|
-
duration_full,
|
|
466
|
-
duration_1.as_secs_f64() / duration_full.as_secs_f64()
|
|
467
|
-
);
|
|
468
|
-
|
|
469
|
-
if cpu_count > 1 {
|
|
470
|
-
let slowdown_ratio = duration_full.as_secs_f64() / duration_1.as_secs_f64();
|
|
471
|
-
assert!(
|
|
472
|
-
slowdown_ratio <= 5.0,
|
|
473
|
-
"Parallel execution should not be excessively slower (got {:.2}x slowdown)",
|
|
474
|
-
slowdown_ratio
|
|
475
|
-
);
|
|
476
|
-
}
|
|
477
|
-
}
|
|
478
|
-
|
|
479
|
-
/// End-to-end test: batch process mixed document types.
|
|
480
|
-
#[cfg(feature = "xml")]
|
|
481
|
-
#[tokio::test]
|
|
482
|
-
async fn test_batch_mixed_document_types() {
|
|
483
|
-
use helpers::get_test_file_path;
|
|
484
|
-
|
|
485
|
-
let config = ExtractionConfig::default();
|
|
486
|
-
|
|
487
|
-
let paths = vec![
|
|
488
|
-
get_test_file_path("text/contract.txt"),
|
|
489
|
-
get_test_file_path("json/sample_document.json"),
|
|
490
|
-
get_test_file_path("xml/simple_note.xml"),
|
|
491
|
-
get_test_file_path("text/readme.md"),
|
|
492
|
-
];
|
|
493
|
-
|
|
494
|
-
let results = batch_extract_file(paths, &config).await;
|
|
495
|
-
|
|
496
|
-
assert!(results.is_ok());
|
|
497
|
-
let results = results.unwrap();
|
|
498
|
-
assert_eq!(results.len(), 4);
|
|
499
|
-
|
|
500
|
-
for (i, result) in results.iter().enumerate() {
|
|
501
|
-
assert!(
|
|
502
|
-
!result.content.is_empty(),
|
|
503
|
-
"Document {} should have extracted content",
|
|
504
|
-
i
|
|
505
|
-
);
|
|
506
|
-
}
|
|
507
|
-
|
|
508
|
-
assert!(
|
|
509
|
-
results[0].content.contains("contract"),
|
|
510
|
-
"First result should be from contract.txt, got: '{}'",
|
|
511
|
-
results[0].content
|
|
512
|
-
);
|
|
513
|
-
assert!(
|
|
514
|
-
results[1].content.contains("Sample") || results[1].content.contains("author"),
|
|
515
|
-
"Second result should be from JSON document, got: '{}'",
|
|
516
|
-
results[1].content
|
|
517
|
-
);
|
|
518
|
-
assert!(
|
|
519
|
-
results[2].content.contains("Tove") || results[2].content.contains("note"),
|
|
520
|
-
"Third result should be from XML, got: '{}'",
|
|
521
|
-
results[2].content
|
|
522
|
-
);
|
|
523
|
-
assert!(
|
|
524
|
-
!results[3].content.is_empty(),
|
|
525
|
-
"Fourth result should be from markdown, got: '{}'",
|
|
526
|
-
results[3].content
|
|
527
|
-
);
|
|
528
|
-
}
|
|
529
|
-
|
|
530
|
-
/// Test batch processing maintains high accuracy under load.
|
|
531
|
-
#[tokio::test]
|
|
532
|
-
async fn test_batch_accuracy_under_load() {
|
|
533
|
-
let config = ExtractionConfig::default();
|
|
534
|
-
|
|
535
|
-
let mut contents = Vec::new();
|
|
536
|
-
for i in 0..100 {
|
|
537
|
-
let content = format!("Document number {} with unique content", i);
|
|
538
|
-
contents.push((content.into_bytes(), "text/plain"));
|
|
539
|
-
}
|
|
540
|
-
|
|
541
|
-
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
542
|
-
|
|
543
|
-
let results = batch_extract_bytes(contents_ref, &config).await.unwrap();
|
|
544
|
-
|
|
545
|
-
assert_eq!(results.len(), 100);
|
|
546
|
-
|
|
547
|
-
for (i, result) in results.iter().enumerate() {
|
|
548
|
-
let expected = format!("Document number {} with unique content", i);
|
|
549
|
-
assert_eq!(
|
|
550
|
-
trim_trailing_newlines(&result.content),
|
|
551
|
-
expected,
|
|
552
|
-
"Document {} content mismatch - possible cross-contamination",
|
|
553
|
-
i
|
|
554
|
-
);
|
|
555
|
-
}
|
|
556
|
-
}
|
|
1
|
+
//! Batch processing orchestration tests.
|
|
2
|
+
//!
|
|
3
|
+
//! Validates efficient parallel processing at multiple levels:
|
|
4
|
+
//! - Multiple documents in parallel
|
|
5
|
+
//! - Multiple pages within PDFs
|
|
6
|
+
//! - OCR across pages
|
|
7
|
+
//! - File I/O optimization
|
|
8
|
+
//! - Resource utilization (CPU cores)
|
|
9
|
+
|
|
10
|
+
use kreuzberg::core::config::ExtractionConfig;
|
|
11
|
+
use kreuzberg::core::extractor::{batch_extract_bytes, batch_extract_file};
|
|
12
|
+
use std::time::{Duration, Instant};
|
|
13
|
+
|
|
14
|
+
#[cfg(feature = "ocr")]
|
|
15
|
+
use kreuzberg::core::config::OcrConfig;
|
|
16
|
+
|
|
17
|
+
#[cfg(feature = "ocr")]
|
|
18
|
+
use kreuzberg::core::extractor::extract_file_sync;
|
|
19
|
+
|
|
20
|
+
mod helpers;
|
|
21
|
+
|
|
22
|
+
fn trim_trailing_newlines(value: &str) -> &str {
|
|
23
|
+
value.trim_end_matches(['\n', '\r'])
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
fn assert_text_content(actual: &str, expected: &str) {
|
|
27
|
+
assert_eq!(
|
|
28
|
+
trim_trailing_newlines(actual),
|
|
29
|
+
expected,
|
|
30
|
+
"Content mismatch after trimming trailing newlines"
|
|
31
|
+
);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/// Test that batch extraction processes documents in parallel.
|
|
35
|
+
///
|
|
36
|
+
/// Validates:
|
|
37
|
+
/// - Multiple documents process concurrently
|
|
38
|
+
/// - Parallel processing is faster than sequential
|
|
39
|
+
/// - Results maintain correct order
|
|
40
|
+
#[tokio::test]
|
|
41
|
+
async fn test_batch_documents_parallel_execution() {
|
|
42
|
+
use helpers::get_test_file_path;
|
|
43
|
+
use std::path::PathBuf;
|
|
44
|
+
|
|
45
|
+
let config = ExtractionConfig::default();
|
|
46
|
+
|
|
47
|
+
let test_files = vec![
|
|
48
|
+
"text/contract.txt",
|
|
49
|
+
"json/sample_document.json",
|
|
50
|
+
"xml/simple_note.xml",
|
|
51
|
+
"text/readme.md",
|
|
52
|
+
];
|
|
53
|
+
|
|
54
|
+
let mut paths: Vec<PathBuf> = Vec::new();
|
|
55
|
+
for _ in 0..5 {
|
|
56
|
+
for file in &test_files {
|
|
57
|
+
paths.push(get_test_file_path(file));
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
let parallel_start = Instant::now();
|
|
62
|
+
let results = batch_extract_file(paths.clone(), &config).await;
|
|
63
|
+
let parallel_duration = parallel_start.elapsed();
|
|
64
|
+
|
|
65
|
+
assert!(results.is_ok(), "Batch extraction should succeed");
|
|
66
|
+
let results = results.unwrap();
|
|
67
|
+
assert_eq!(results.len(), 20, "Should process all 20 files");
|
|
68
|
+
|
|
69
|
+
for result in &results {
|
|
70
|
+
assert!(
|
|
71
|
+
!result.content.is_empty() || result.metadata.error.is_some(),
|
|
72
|
+
"Each result should have content or error"
|
|
73
|
+
);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
assert!(
|
|
77
|
+
parallel_duration < Duration::from_secs(5),
|
|
78
|
+
"Batch processing 20 files should take <5s, took: {:?}",
|
|
79
|
+
parallel_duration
|
|
80
|
+
);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/// Test concurrency limiting in batch processing.
|
|
84
|
+
///
|
|
85
|
+
/// Validates that batch extraction respects max_concurrent_extractions config.
|
|
86
|
+
#[tokio::test]
|
|
87
|
+
async fn test_batch_documents_concurrency_limiting() {
|
|
88
|
+
use helpers::get_test_file_path;
|
|
89
|
+
|
|
90
|
+
let config = ExtractionConfig {
|
|
91
|
+
max_concurrent_extractions: Some(2),
|
|
92
|
+
..Default::default()
|
|
93
|
+
};
|
|
94
|
+
|
|
95
|
+
let paths = vec![
|
|
96
|
+
get_test_file_path("text/contract.txt"),
|
|
97
|
+
get_test_file_path("json/sample_document.json"),
|
|
98
|
+
get_test_file_path("xml/simple_note.xml"),
|
|
99
|
+
get_test_file_path("text/readme.md"),
|
|
100
|
+
];
|
|
101
|
+
|
|
102
|
+
let results = batch_extract_file(paths, &config).await;
|
|
103
|
+
|
|
104
|
+
assert!(results.is_ok());
|
|
105
|
+
let results = results.unwrap();
|
|
106
|
+
assert_eq!(results.len(), 4);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/// Test batch extraction with CPU-bound limit (default: num_cpus * 2).
|
|
110
|
+
#[tokio::test]
|
|
111
|
+
async fn test_batch_documents_default_concurrency() {
|
|
112
|
+
use helpers::get_test_file_path;
|
|
113
|
+
|
|
114
|
+
let config = ExtractionConfig::default();
|
|
115
|
+
|
|
116
|
+
let mut paths = Vec::new();
|
|
117
|
+
for _ in 0..13 {
|
|
118
|
+
paths.push(get_test_file_path("text/contract.txt"));
|
|
119
|
+
paths.push(get_test_file_path("json/sample_document.json"));
|
|
120
|
+
paths.push(get_test_file_path("xml/simple_note.xml"));
|
|
121
|
+
paths.push(get_test_file_path("text/readme.md"));
|
|
122
|
+
}
|
|
123
|
+
let paths = paths.into_iter().take(50).collect::<Vec<_>>();
|
|
124
|
+
|
|
125
|
+
let start = Instant::now();
|
|
126
|
+
let results = batch_extract_file(paths, &config).await;
|
|
127
|
+
let duration = start.elapsed();
|
|
128
|
+
|
|
129
|
+
assert!(results.is_ok());
|
|
130
|
+
let results = results.unwrap();
|
|
131
|
+
assert_eq!(results.len(), 50);
|
|
132
|
+
|
|
133
|
+
println!("Processed 50 files in {:?}", duration);
|
|
134
|
+
assert!(
|
|
135
|
+
duration < Duration::from_secs(10),
|
|
136
|
+
"50 files should process in <10s with parallelism, took: {:?}",
|
|
137
|
+
duration
|
|
138
|
+
);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
/// Test that batch processing maintains result order.
|
|
142
|
+
#[cfg(feature = "xml")]
|
|
143
|
+
#[tokio::test]
|
|
144
|
+
async fn test_batch_documents_preserves_order() {
|
|
145
|
+
use helpers::get_test_file_path;
|
|
146
|
+
|
|
147
|
+
let config = ExtractionConfig::default();
|
|
148
|
+
|
|
149
|
+
let paths = vec![
|
|
150
|
+
get_test_file_path("text/contract.txt"),
|
|
151
|
+
get_test_file_path("json/sample_document.json"),
|
|
152
|
+
get_test_file_path("xml/simple_note.xml"),
|
|
153
|
+
];
|
|
154
|
+
|
|
155
|
+
let results = batch_extract_file(paths, &config).await.unwrap();
|
|
156
|
+
|
|
157
|
+
assert_eq!(results.len(), 3, "Should have 3 results");
|
|
158
|
+
|
|
159
|
+
assert!(!results[0].content.is_empty(), "First result should have content");
|
|
160
|
+
assert!(!results[1].content.is_empty(), "Second result should have content");
|
|
161
|
+
assert!(!results[2].content.is_empty(), "Third result should have content");
|
|
162
|
+
|
|
163
|
+
assert!(
|
|
164
|
+
results[0].content.contains("contract"),
|
|
165
|
+
"First result should be from contract.txt, got: '{}'",
|
|
166
|
+
results[0].content
|
|
167
|
+
);
|
|
168
|
+
assert!(
|
|
169
|
+
results[1].content.contains("Sample") || results[1].content.contains("author"),
|
|
170
|
+
"Second result should be from JSON document, got: '{}'",
|
|
171
|
+
results[1].content
|
|
172
|
+
);
|
|
173
|
+
assert!(
|
|
174
|
+
results[2].content.contains("Tove") || results[2].content.contains("note"),
|
|
175
|
+
"Third result should be from XML note, got: '{}'",
|
|
176
|
+
results[2].content
|
|
177
|
+
);
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
/// Test that multi-page PDF extraction is efficient.
|
|
181
|
+
///
|
|
182
|
+
/// Validates:
|
|
183
|
+
/// - Multiple pages are processed
|
|
184
|
+
/// - OCR is applied to all pages if needed
|
|
185
|
+
/// - Content from all pages is combined
|
|
186
|
+
#[cfg(feature = "pdf")]
|
|
187
|
+
#[tokio::test]
|
|
188
|
+
async fn test_multipage_pdf_extraction() {
|
|
189
|
+
use helpers::{get_test_file_path, skip_if_missing};
|
|
190
|
+
|
|
191
|
+
if skip_if_missing("pdfs/multi_page.pdf") {
|
|
192
|
+
tracing::debug!("Skipping multi-page PDF test: test file not available");
|
|
193
|
+
return;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
let config = ExtractionConfig::default();
|
|
197
|
+
let pdf_path = get_test_file_path("pdfs/multi_page.pdf");
|
|
198
|
+
|
|
199
|
+
let start = Instant::now();
|
|
200
|
+
let result = kreuzberg::core::extractor::extract_file(&pdf_path, None, &config).await;
|
|
201
|
+
let duration = start.elapsed();
|
|
202
|
+
|
|
203
|
+
assert!(result.is_ok(), "Multi-page PDF extraction should succeed");
|
|
204
|
+
let extraction = result.unwrap();
|
|
205
|
+
|
|
206
|
+
assert!(!extraction.content.is_empty(), "Should extract text from all pages");
|
|
207
|
+
println!("Extracted multi-page PDF in {:?}", duration);
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
/// Test concurrent PDF extractions (multiple PDFs at once).
|
|
211
|
+
#[cfg(feature = "pdf")]
|
|
212
|
+
#[tokio::test]
|
|
213
|
+
async fn test_concurrent_pdf_extractions() {
|
|
214
|
+
use helpers::{get_test_file_path, skip_if_missing};
|
|
215
|
+
|
|
216
|
+
if skip_if_missing("pdfs/simple.pdf") {
|
|
217
|
+
tracing::debug!("Skipping concurrent PDF test: test file not available");
|
|
218
|
+
return;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
let config = ExtractionConfig::default();
|
|
222
|
+
|
|
223
|
+
let mut paths = Vec::new();
|
|
224
|
+
for _ in 0..10 {
|
|
225
|
+
paths.push(get_test_file_path("pdfs/simple.pdf"));
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
let start = Instant::now();
|
|
229
|
+
let results = batch_extract_file(paths, &config).await;
|
|
230
|
+
let duration = start.elapsed();
|
|
231
|
+
|
|
232
|
+
assert!(results.is_ok());
|
|
233
|
+
let results = results.unwrap();
|
|
234
|
+
assert_eq!(results.len(), 10);
|
|
235
|
+
|
|
236
|
+
println!("Processed 10 PDFs in {:?}", duration);
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/// Test OCR on multi-page scanned document.
|
|
240
|
+
///
|
|
241
|
+
/// Validates:
|
|
242
|
+
/// - All pages are OCR'd
|
|
243
|
+
/// - Results are combined correctly
|
|
244
|
+
/// - Processing is efficient
|
|
245
|
+
#[cfg(feature = "ocr")]
|
|
246
|
+
#[test]
|
|
247
|
+
fn test_ocr_multipage_efficiency() {
|
|
248
|
+
use helpers::{get_test_file_path, skip_if_missing};
|
|
249
|
+
|
|
250
|
+
if skip_if_missing("images/ocr_image.jpg") {
|
|
251
|
+
tracing::debug!("Skipping OCR multi-page test: test file not available");
|
|
252
|
+
return;
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
let config = ExtractionConfig {
|
|
256
|
+
ocr: Some(OcrConfig {
|
|
257
|
+
backend: "tesseract".to_string(),
|
|
258
|
+
language: "eng".to_string(),
|
|
259
|
+
tesseract_config: None,
|
|
260
|
+
}),
|
|
261
|
+
force_ocr: false,
|
|
262
|
+
use_cache: true,
|
|
263
|
+
..Default::default()
|
|
264
|
+
};
|
|
265
|
+
|
|
266
|
+
let file_path = get_test_file_path("images/ocr_image.jpg");
|
|
267
|
+
|
|
268
|
+
let start = Instant::now();
|
|
269
|
+
let result1 = extract_file_sync(&file_path, None, &config);
|
|
270
|
+
let first_duration = start.elapsed();
|
|
271
|
+
|
|
272
|
+
assert!(result1.is_ok(), "First OCR should succeed");
|
|
273
|
+
|
|
274
|
+
let start = Instant::now();
|
|
275
|
+
let result2 = extract_file_sync(&file_path, None, &config);
|
|
276
|
+
let second_duration = start.elapsed();
|
|
277
|
+
|
|
278
|
+
assert!(result2.is_ok(), "Second OCR should succeed");
|
|
279
|
+
|
|
280
|
+
println!(
|
|
281
|
+
"OCR timing: first={:?}, cached={:?}, speedup={:.1}x",
|
|
282
|
+
first_duration,
|
|
283
|
+
second_duration,
|
|
284
|
+
first_duration.as_secs_f64() / second_duration.as_secs_f64().max(0.001)
|
|
285
|
+
);
|
|
286
|
+
|
|
287
|
+
assert!(
|
|
288
|
+
second_duration < first_duration / 2,
|
|
289
|
+
"Cached OCR should be at least 2x faster. First: {:?}, Second: {:?}",
|
|
290
|
+
first_duration,
|
|
291
|
+
second_duration
|
|
292
|
+
);
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
/// Test parallel processing of byte arrays.
|
|
296
|
+
///
|
|
297
|
+
/// Validates that batch_extract_bytes processes data in parallel.
|
|
298
|
+
#[tokio::test]
|
|
299
|
+
async fn test_batch_bytes_parallel_processing() {
|
|
300
|
+
let config = ExtractionConfig::default();
|
|
301
|
+
|
|
302
|
+
let contents: Vec<(Vec<u8>, &str)> = (0..30)
|
|
303
|
+
.map(|i| {
|
|
304
|
+
let content = format!("Test content number {}", i);
|
|
305
|
+
(content.into_bytes(), "text/plain")
|
|
306
|
+
})
|
|
307
|
+
.collect();
|
|
308
|
+
|
|
309
|
+
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
310
|
+
|
|
311
|
+
let start = Instant::now();
|
|
312
|
+
let results = batch_extract_bytes(contents_ref, &config).await;
|
|
313
|
+
let duration = start.elapsed();
|
|
314
|
+
|
|
315
|
+
assert!(results.is_ok());
|
|
316
|
+
let results = results.unwrap();
|
|
317
|
+
assert_eq!(results.len(), 30);
|
|
318
|
+
|
|
319
|
+
for (i, result) in results.iter().enumerate() {
|
|
320
|
+
let expected = format!("Test content number {}", i);
|
|
321
|
+
assert_text_content(&result.content, &expected);
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
println!("Batch processed 30 byte arrays in {:?}", duration);
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
/// Test error handling in batch bytes processing.
|
|
328
|
+
#[tokio::test]
|
|
329
|
+
async fn test_batch_bytes_mixed_valid_invalid() {
|
|
330
|
+
let config = ExtractionConfig::default();
|
|
331
|
+
|
|
332
|
+
let contents = vec![
|
|
333
|
+
(b"valid content 1".as_slice(), "text/plain"),
|
|
334
|
+
(b"invalid content".as_slice(), "invalid/mime"),
|
|
335
|
+
(b"valid content 2".as_slice(), "text/plain"),
|
|
336
|
+
(b"more invalid".as_slice(), "bad/type"),
|
|
337
|
+
(b"valid content 3".as_slice(), "text/plain"),
|
|
338
|
+
];
|
|
339
|
+
|
|
340
|
+
let results = batch_extract_bytes(contents, &config).await;
|
|
341
|
+
|
|
342
|
+
assert!(results.is_ok());
|
|
343
|
+
let results = results.unwrap();
|
|
344
|
+
assert_eq!(results.len(), 5);
|
|
345
|
+
|
|
346
|
+
assert_text_content(&results[0].content, "valid content 1");
|
|
347
|
+
assert_text_content(&results[2].content, "valid content 2");
|
|
348
|
+
assert_text_content(&results[4].content, "valid content 3");
|
|
349
|
+
|
|
350
|
+
assert!(results[1].metadata.error.is_some());
|
|
351
|
+
assert!(results[3].metadata.error.is_some());
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
/// Test that batch processing utilizes multiple CPU cores.
|
|
355
|
+
///
|
|
356
|
+
/// Validates that parallel extraction actually runs in parallel,
|
|
357
|
+
/// not just sequentially with fancy task management.
|
|
358
|
+
#[tokio::test]
|
|
359
|
+
async fn test_batch_utilizes_multiple_cores() {
|
|
360
|
+
let config = ExtractionConfig {
|
|
361
|
+
max_concurrent_extractions: Some(num_cpus::get()),
|
|
362
|
+
..Default::default()
|
|
363
|
+
};
|
|
364
|
+
|
|
365
|
+
let mut contents = Vec::new();
|
|
366
|
+
for i in 0..20 {
|
|
367
|
+
let json = format!(
|
|
368
|
+
r#"{{"id": {}, "data": "{}", "nested": {{"value": "{}"}}}}"#,
|
|
369
|
+
i,
|
|
370
|
+
"x".repeat(100),
|
|
371
|
+
"y".repeat(100)
|
|
372
|
+
);
|
|
373
|
+
contents.push((json.into_bytes(), "application/json"));
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
377
|
+
|
|
378
|
+
let start = Instant::now();
|
|
379
|
+
let results = batch_extract_bytes(contents_ref, &config).await;
|
|
380
|
+
let duration = start.elapsed();
|
|
381
|
+
|
|
382
|
+
assert!(results.is_ok());
|
|
383
|
+
let results = results.unwrap();
|
|
384
|
+
assert_eq!(results.len(), 20);
|
|
385
|
+
|
|
386
|
+
println!(
|
|
387
|
+
"Processed 20 JSON documents in {:?} with {} cores",
|
|
388
|
+
duration,
|
|
389
|
+
num_cpus::get()
|
|
390
|
+
);
|
|
391
|
+
|
|
392
|
+
assert!(
|
|
393
|
+
duration < Duration::from_secs(2),
|
|
394
|
+
"Batch processing should leverage parallelism, took: {:?}",
|
|
395
|
+
duration
|
|
396
|
+
);
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
/// Test batch processing under memory pressure.
|
|
400
|
+
///
|
|
401
|
+
/// Validates that semaphore prevents resource exhaustion.
|
|
402
|
+
#[tokio::test]
|
|
403
|
+
async fn test_batch_memory_pressure_handling() {
|
|
404
|
+
let config = ExtractionConfig {
|
|
405
|
+
max_concurrent_extractions: Some(4),
|
|
406
|
+
..Default::default()
|
|
407
|
+
};
|
|
408
|
+
|
|
409
|
+
let mut contents = Vec::new();
|
|
410
|
+
for i in 0..50 {
|
|
411
|
+
let json = format!(r#"{{"id": {}, "large_data": "{}"}}"#, i, "x".repeat(10000));
|
|
412
|
+
contents.push((json.into_bytes(), "application/json"));
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
416
|
+
|
|
417
|
+
let start = Instant::now();
|
|
418
|
+
let results = batch_extract_bytes(contents_ref, &config).await;
|
|
419
|
+
let duration = start.elapsed();
|
|
420
|
+
|
|
421
|
+
assert!(results.is_ok());
|
|
422
|
+
let results = results.unwrap();
|
|
423
|
+
assert_eq!(results.len(), 50);
|
|
424
|
+
|
|
425
|
+
println!("Processed 50 large documents with concurrency limit in {:?}", duration);
|
|
426
|
+
|
|
427
|
+
for result in &results {
|
|
428
|
+
assert!(!result.content.is_empty());
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
/// Test that batch processing scales with CPU count.
|
|
433
|
+
#[tokio::test]
|
|
434
|
+
async fn test_batch_scales_with_cpu_count() {
|
|
435
|
+
let cpu_count = num_cpus::get();
|
|
436
|
+
|
|
437
|
+
let contents: Vec<(Vec<u8>, &str)> = (0..30)
|
|
438
|
+
.map(|i| (format!("Content {}", i).into_bytes(), "text/plain"))
|
|
439
|
+
.collect();
|
|
440
|
+
|
|
441
|
+
let config_1 = ExtractionConfig {
|
|
442
|
+
max_concurrent_extractions: Some(1),
|
|
443
|
+
..Default::default()
|
|
444
|
+
};
|
|
445
|
+
|
|
446
|
+
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
447
|
+
|
|
448
|
+
let start = Instant::now();
|
|
449
|
+
let _ = batch_extract_bytes(contents_ref.clone(), &config_1).await.unwrap();
|
|
450
|
+
let duration_1 = start.elapsed();
|
|
451
|
+
|
|
452
|
+
let config_full = ExtractionConfig {
|
|
453
|
+
max_concurrent_extractions: Some(cpu_count),
|
|
454
|
+
..Default::default()
|
|
455
|
+
};
|
|
456
|
+
|
|
457
|
+
let start = Instant::now();
|
|
458
|
+
let _ = batch_extract_bytes(contents_ref, &config_full).await.unwrap();
|
|
459
|
+
let duration_full = start.elapsed();
|
|
460
|
+
|
|
461
|
+
println!(
|
|
462
|
+
"Concurrency=1: {:?}, Concurrency={}: {:?}, Speedup: {:.2}x",
|
|
463
|
+
duration_1,
|
|
464
|
+
cpu_count,
|
|
465
|
+
duration_full,
|
|
466
|
+
duration_1.as_secs_f64() / duration_full.as_secs_f64()
|
|
467
|
+
);
|
|
468
|
+
|
|
469
|
+
if cpu_count > 1 {
|
|
470
|
+
let slowdown_ratio = duration_full.as_secs_f64() / duration_1.as_secs_f64();
|
|
471
|
+
assert!(
|
|
472
|
+
slowdown_ratio <= 5.0,
|
|
473
|
+
"Parallel execution should not be excessively slower (got {:.2}x slowdown)",
|
|
474
|
+
slowdown_ratio
|
|
475
|
+
);
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
/// End-to-end test: batch process mixed document types.
|
|
480
|
+
#[cfg(feature = "xml")]
|
|
481
|
+
#[tokio::test]
|
|
482
|
+
async fn test_batch_mixed_document_types() {
|
|
483
|
+
use helpers::get_test_file_path;
|
|
484
|
+
|
|
485
|
+
let config = ExtractionConfig::default();
|
|
486
|
+
|
|
487
|
+
let paths = vec![
|
|
488
|
+
get_test_file_path("text/contract.txt"),
|
|
489
|
+
get_test_file_path("json/sample_document.json"),
|
|
490
|
+
get_test_file_path("xml/simple_note.xml"),
|
|
491
|
+
get_test_file_path("text/readme.md"),
|
|
492
|
+
];
|
|
493
|
+
|
|
494
|
+
let results = batch_extract_file(paths, &config).await;
|
|
495
|
+
|
|
496
|
+
assert!(results.is_ok());
|
|
497
|
+
let results = results.unwrap();
|
|
498
|
+
assert_eq!(results.len(), 4);
|
|
499
|
+
|
|
500
|
+
for (i, result) in results.iter().enumerate() {
|
|
501
|
+
assert!(
|
|
502
|
+
!result.content.is_empty(),
|
|
503
|
+
"Document {} should have extracted content",
|
|
504
|
+
i
|
|
505
|
+
);
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
assert!(
|
|
509
|
+
results[0].content.contains("contract"),
|
|
510
|
+
"First result should be from contract.txt, got: '{}'",
|
|
511
|
+
results[0].content
|
|
512
|
+
);
|
|
513
|
+
assert!(
|
|
514
|
+
results[1].content.contains("Sample") || results[1].content.contains("author"),
|
|
515
|
+
"Second result should be from JSON document, got: '{}'",
|
|
516
|
+
results[1].content
|
|
517
|
+
);
|
|
518
|
+
assert!(
|
|
519
|
+
results[2].content.contains("Tove") || results[2].content.contains("note"),
|
|
520
|
+
"Third result should be from XML, got: '{}'",
|
|
521
|
+
results[2].content
|
|
522
|
+
);
|
|
523
|
+
assert!(
|
|
524
|
+
!results[3].content.is_empty(),
|
|
525
|
+
"Fourth result should be from markdown, got: '{}'",
|
|
526
|
+
results[3].content
|
|
527
|
+
);
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
/// Test batch processing maintains high accuracy under load.
|
|
531
|
+
#[tokio::test]
|
|
532
|
+
async fn test_batch_accuracy_under_load() {
|
|
533
|
+
let config = ExtractionConfig::default();
|
|
534
|
+
|
|
535
|
+
let mut contents = Vec::new();
|
|
536
|
+
for i in 0..100 {
|
|
537
|
+
let content = format!("Document number {} with unique content", i);
|
|
538
|
+
contents.push((content.into_bytes(), "text/plain"));
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
let contents_ref: Vec<(&[u8], &str)> = contents.iter().map(|(bytes, mime)| (bytes.as_slice(), *mime)).collect();
|
|
542
|
+
|
|
543
|
+
let results = batch_extract_bytes(contents_ref, &config).await.unwrap();
|
|
544
|
+
|
|
545
|
+
assert_eq!(results.len(), 100);
|
|
546
|
+
|
|
547
|
+
for (i, result) in results.iter().enumerate() {
|
|
548
|
+
let expected = format!("Document number {} with unique content", i);
|
|
549
|
+
assert_eq!(
|
|
550
|
+
trim_trailing_newlines(&result.content),
|
|
551
|
+
expected,
|
|
552
|
+
"Document {} content mismatch - possible cross-contamination",
|
|
553
|
+
i
|
|
554
|
+
);
|
|
555
|
+
}
|
|
556
|
+
}
|