kreuzberg 4.0.0.pre.rc.11 → 4.0.0.pre.rc.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -14
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -1
- data/.rubocop.yml +538 -538
- data/Gemfile +8 -8
- data/Gemfile.lock +2 -105
- data/README.md +454 -454
- data/Rakefile +25 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -341
- data/ext/kreuzberg_rb/extconf.rb +45 -45
- data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
- data/ext/kreuzberg_rb/native/Cargo.lock +6941 -6941
- data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -15
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +214 -214
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +81 -81
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +724 -724
- data/lib/kreuzberg/error_context.rb +80 -80
- data/lib/kreuzberg/errors.rb +118 -118
- data/lib/kreuzberg/extraction_api.rb +340 -340
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +279 -279
- data/lib/kreuzberg/setup_lib_path.rb +80 -80
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +109 -109
- data/lib/{libpdfium.dylib → pdfium.dll} +0 -0
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +546 -546
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -85
- data/spec/binding/cli_spec.rb +55 -55
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -41
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/Cargo.toml +2 -1
- data/vendor/kreuzberg/Cargo.toml +2 -2
- data/vendor/kreuzberg/README.md +230 -230
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
- data/vendor/kreuzberg/build.rs +843 -843
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
- data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
- data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
- data/vendor/kreuzberg/src/core/config.rs +1080 -1080
- data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
- data/vendor/kreuzberg/src/core/io.rs +329 -329
- data/vendor/kreuzberg/src/core/mime.rs +605 -605
- data/vendor/kreuzberg/src/core/mod.rs +47 -47
- data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
- data/vendor/kreuzberg/src/embeddings.rs +500 -500
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +601 -601
- data/vendor/kreuzberg/src/extraction/image.rs +491 -491
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -562
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
- data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
- data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
- data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
- data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
- data/vendor/kreuzberg/src/extractors/email.rs +157 -157
- data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
- data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
- data/vendor/kreuzberg/src/extractors/html.rs +407 -407
- data/vendor/kreuzberg/src/extractors/image.rs +219 -219
- data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
- data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
- data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
- data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
- data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
- data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
- data/vendor/kreuzberg/src/extractors/pdf.rs +749 -722
- data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
- data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
- data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
- data/vendor/kreuzberg/src/extractors/security.rs +484 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
- data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
- data/vendor/kreuzberg/src/extractors/text.rs +265 -265
- data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
- data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
- data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
- data/vendor/kreuzberg/src/lib.rs +113 -113
- data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
- data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -154
- data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
- data/vendor/kreuzberg/src/pdf/bundled.rs +346 -346
- data/vendor/kreuzberg/src/pdf/error.rs +130 -130
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
- data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
- data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
- data/vendor/kreuzberg/src/pdf/table.rs +420 -420
- data/vendor/kreuzberg/src/pdf/text.rs +240 -240
- data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
- data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
- data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
- data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
- data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +25 -25
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +1055 -1055
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
- data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
- data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
- data/vendor/kreuzberg/tests/config_features.rs +612 -612
- data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
- data/vendor/kreuzberg/tests/core_integration.rs +510 -510
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
- data/vendor/kreuzberg/tests/email_integration.rs +327 -327
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
- data/vendor/kreuzberg/tests/error_handling.rs +402 -402
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
- data/vendor/kreuzberg/tests/format_integration.rs +164 -164
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
- data/vendor/kreuzberg/tests/image_integration.rs +255 -255
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
- data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
- data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
- data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
- data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
- data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
- data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
- data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
- data/vendor/kreuzberg/tests/security_validation.rs +416 -416
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/kreuzberg-ffi/Cargo.toml +3 -3
- data/vendor/kreuzberg-ffi/README.md +851 -851
- data/vendor/kreuzberg-ffi/build.rs +176 -176
- data/vendor/kreuzberg-ffi/cbindgen.toml +27 -27
- data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +12 -12
- data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -12
- data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -1087
- data/vendor/kreuzberg-ffi/src/lib.rs +3616 -3616
- data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -247
- data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -48
- data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -299
- data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -346
- data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -232
- data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -470
- data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
- data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
- data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
- data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
- data/vendor/kreuzberg-tesseract/LICENSE +22 -22
- data/vendor/kreuzberg-tesseract/README.md +399 -399
- data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
- data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
- data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
- data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
- data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
- data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
- data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
- data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
- data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
- data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
- data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
- data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
- data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
- data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
- data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
- data/vendor/rb-sys/Cargo.lock +393 -393
- data/vendor/rb-sys/Cargo.toml +70 -70
- data/vendor/rb-sys/Cargo.toml.orig +57 -57
- data/vendor/rb-sys/LICENSE-APACHE +190 -190
- data/vendor/rb-sys/LICENSE-MIT +21 -21
- data/vendor/rb-sys/build/features.rs +111 -111
- data/vendor/rb-sys/build/main.rs +286 -286
- data/vendor/rb-sys/build/stable_api_config.rs +155 -155
- data/vendor/rb-sys/build/version.rs +50 -50
- data/vendor/rb-sys/readme.md +36 -36
- data/vendor/rb-sys/src/bindings.rs +21 -21
- data/vendor/rb-sys/src/hidden.rs +11 -11
- data/vendor/rb-sys/src/lib.rs +35 -35
- data/vendor/rb-sys/src/macros.rs +371 -371
- data/vendor/rb-sys/src/memory.rs +53 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
- data/vendor/rb-sys/src/special_consts.rs +31 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
- data/vendor/rb-sys/src/stable_api.rs +260 -260
- data/vendor/rb-sys/src/symbol.rs +31 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
- data/vendor/rb-sys/src/utils.rs +89 -89
- data/vendor/rb-sys/src/value_type.rs +7 -7
- metadata +7 -80
|
@@ -1,533 +1,533 @@
|
|
|
1
|
-
//! Comprehensive concurrency and parallelism stress tests.
|
|
2
|
-
//!
|
|
3
|
-
//! Validates that the Kreuzberg core handles concurrent operations correctly:
|
|
4
|
-
//! - Parallel extractions don't interfere with each other
|
|
5
|
-
//! - OCR processing is thread-safe and efficient
|
|
6
|
-
//! - Pipeline processing works correctly under concurrent load
|
|
7
|
-
//! - Cache access is safe with multiple readers/writers
|
|
8
|
-
//! - Registry access is thread-safe
|
|
9
|
-
//!
|
|
10
|
-
//! These tests ensure production workloads with high concurrency work correctly.
|
|
11
|
-
|
|
12
|
-
use async_trait::async_trait;
|
|
13
|
-
use kreuzberg::Result;
|
|
14
|
-
use kreuzberg::core::config::{ExtractionConfig, PostProcessorConfig};
|
|
15
|
-
use kreuzberg::core::extractor::{batch_extract_bytes, extract_bytes};
|
|
16
|
-
use kreuzberg::core::pipeline::run_pipeline;
|
|
17
|
-
use kreuzberg::plugins::registry::{get_document_extractor_registry, get_post_processor_registry};
|
|
18
|
-
use kreuzberg::plugins::{Plugin, PostProcessor, ProcessingStage};
|
|
19
|
-
use kreuzberg::types::{ExtractionResult, Metadata};
|
|
20
|
-
use std::sync::Arc;
|
|
21
|
-
|
|
22
|
-
#[cfg(feature = "ocr")]
|
|
23
|
-
use kreuzberg::core::config::OcrConfig;
|
|
24
|
-
|
|
25
|
-
#[cfg(feature = "ocr")]
|
|
26
|
-
use kreuzberg::core::extractor::extract_file_sync;
|
|
27
|
-
use std::time::Duration;
|
|
28
|
-
use tokio::time::timeout;
|
|
29
|
-
|
|
30
|
-
mod helpers;
|
|
31
|
-
|
|
32
|
-
fn trim_trailing_newlines(value: &str) -> &str {
|
|
33
|
-
value.trim_end_matches(['\n', '\r'])
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
fn assert_text_content(actual: &str, expected: &str) {
|
|
37
|
-
assert_eq!(
|
|
38
|
-
trim_trailing_newlines(actual),
|
|
39
|
-
expected,
|
|
40
|
-
"Content mismatch after trimming trailing newlines"
|
|
41
|
-
);
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
/// Test many concurrent extractions of different MIME types.
|
|
45
|
-
///
|
|
46
|
-
/// Validates that:
|
|
47
|
-
/// - Registry lookups don't block each other unnecessarily
|
|
48
|
-
/// - Different extractors can run in parallel
|
|
49
|
-
/// - No data races or corruption
|
|
50
|
-
#[tokio::test]
|
|
51
|
-
async fn test_concurrent_extractions_mixed_formats() {
|
|
52
|
-
let config = ExtractionConfig::default();
|
|
53
|
-
|
|
54
|
-
#[allow(unused_mut)]
|
|
55
|
-
let mut test_cases = vec![
|
|
56
|
-
(b"Plain text content" as &[u8], "text/plain"),
|
|
57
|
-
(b"{\"key\": \"value\"}", "application/json"),
|
|
58
|
-
(b"# Markdown\n\nContent here", "text/markdown"),
|
|
59
|
-
];
|
|
60
|
-
|
|
61
|
-
#[cfg(feature = "xml")]
|
|
62
|
-
test_cases.push((b"<root><item>XML content</item></root>" as &[u8], "application/xml"));
|
|
63
|
-
|
|
64
|
-
let mut handles = vec![];
|
|
65
|
-
for _ in 0..10 {
|
|
66
|
-
for (data, mime_type) in &test_cases {
|
|
67
|
-
let config = config.clone();
|
|
68
|
-
let data = data.to_vec();
|
|
69
|
-
let mime_type = mime_type.to_string();
|
|
70
|
-
|
|
71
|
-
handles.push(tokio::spawn(
|
|
72
|
-
async move { extract_bytes(&data, &mime_type, &config).await },
|
|
73
|
-
));
|
|
74
|
-
}
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
let results = timeout(Duration::from_secs(30), async {
|
|
78
|
-
let mut results = vec![];
|
|
79
|
-
for handle in handles {
|
|
80
|
-
results.push(handle.await.expect("Task should not panic"));
|
|
81
|
-
}
|
|
82
|
-
results
|
|
83
|
-
})
|
|
84
|
-
.await
|
|
85
|
-
.expect("All extractions should complete within 30s");
|
|
86
|
-
|
|
87
|
-
for result in results {
|
|
88
|
-
assert!(
|
|
89
|
-
result.is_ok(),
|
|
90
|
-
"Concurrent extraction should succeed: {:?}",
|
|
91
|
-
result.err()
|
|
92
|
-
);
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
/// Test concurrent batch extractions.
|
|
97
|
-
///
|
|
98
|
-
/// Validates that batch processing correctly handles parallelism internally.
|
|
99
|
-
#[tokio::test]
|
|
100
|
-
async fn test_concurrent_batch_extractions() {
|
|
101
|
-
let config = ExtractionConfig::default();
|
|
102
|
-
|
|
103
|
-
let contents: Vec<Vec<u8>> = (0..20).map(|i| format!("Content {}", i).into_bytes()).collect();
|
|
104
|
-
|
|
105
|
-
let mut handles = vec![];
|
|
106
|
-
for _ in 0..5 {
|
|
107
|
-
let config = config.clone();
|
|
108
|
-
let contents_clone = contents.clone();
|
|
109
|
-
|
|
110
|
-
handles.push(tokio::spawn(async move {
|
|
111
|
-
let data: Vec<(&[u8], &str)> = contents_clone.iter().map(|c| (c.as_slice(), "text/plain")).collect();
|
|
112
|
-
batch_extract_bytes(data, &config).await
|
|
113
|
-
}));
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
for handle in handles {
|
|
117
|
-
let results = handle.await.expect("Task should not panic");
|
|
118
|
-
assert!(results.is_ok(), "Batch extraction should succeed");
|
|
119
|
-
let results = results.unwrap();
|
|
120
|
-
assert_eq!(results.len(), 20, "Should return all results");
|
|
121
|
-
}
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
/// Test concurrent extractions with caching enabled.
|
|
125
|
-
///
|
|
126
|
-
/// Validates that:
|
|
127
|
-
/// - Cache reads/writes are thread-safe
|
|
128
|
-
/// - No cache corruption under concurrent access
|
|
129
|
-
/// - Cache hits work correctly across threads
|
|
130
|
-
#[tokio::test]
|
|
131
|
-
async fn test_concurrent_extractions_with_cache() {
|
|
132
|
-
let config = ExtractionConfig {
|
|
133
|
-
use_cache: true,
|
|
134
|
-
postprocessor: Some(PostProcessorConfig {
|
|
135
|
-
enabled: false,
|
|
136
|
-
enabled_processors: None,
|
|
137
|
-
disabled_processors: None,
|
|
138
|
-
}),
|
|
139
|
-
..Default::default()
|
|
140
|
-
};
|
|
141
|
-
|
|
142
|
-
let test_data = b"Cached content for concurrent access test";
|
|
143
|
-
|
|
144
|
-
let _ = extract_bytes(test_data, "text/plain", &config).await.unwrap();
|
|
145
|
-
|
|
146
|
-
let mut handles = vec![];
|
|
147
|
-
for _ in 0..100 {
|
|
148
|
-
let config = config.clone();
|
|
149
|
-
let data = test_data.to_vec();
|
|
150
|
-
|
|
151
|
-
handles.push(tokio::spawn(async move {
|
|
152
|
-
extract_bytes(&data, "text/plain", &config).await
|
|
153
|
-
}));
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
let expected_content = "Cached content for concurrent access test";
|
|
157
|
-
for handle in handles {
|
|
158
|
-
let result = handle.await.expect("Task should not panic");
|
|
159
|
-
assert!(result.is_ok(), "Cache read should succeed");
|
|
160
|
-
let extraction = result.unwrap();
|
|
161
|
-
assert_text_content(&extraction.content, expected_content);
|
|
162
|
-
}
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
/// Test concurrent OCR processing of different images.
|
|
166
|
-
///
|
|
167
|
-
/// Validates that:
|
|
168
|
-
/// - OCR backend is thread-safe
|
|
169
|
-
/// - Multiple OCR operations don't interfere
|
|
170
|
-
/// - OCR cache handles concurrent access correctly
|
|
171
|
-
#[cfg(feature = "ocr")]
|
|
172
|
-
#[tokio::test]
|
|
173
|
-
async fn test_concurrent_ocr_processing() {
|
|
174
|
-
use helpers::{get_test_file_path, skip_if_missing};
|
|
175
|
-
|
|
176
|
-
if cfg!(windows) {
|
|
177
|
-
return;
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
if skip_if_missing("images/ocr_image.jpg") {
|
|
181
|
-
tracing::debug!("Skipping concurrent OCR test: test file not available");
|
|
182
|
-
return;
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
let config = ExtractionConfig {
|
|
186
|
-
ocr: Some(OcrConfig {
|
|
187
|
-
backend: "tesseract".to_string(),
|
|
188
|
-
language: "eng".to_string(),
|
|
189
|
-
tesseract_config: None,
|
|
190
|
-
}),
|
|
191
|
-
force_ocr: false,
|
|
192
|
-
use_cache: true,
|
|
193
|
-
..Default::default()
|
|
194
|
-
};
|
|
195
|
-
|
|
196
|
-
let file_path = get_test_file_path("images/ocr_image.jpg");
|
|
197
|
-
|
|
198
|
-
let mut handles = vec![];
|
|
199
|
-
for _ in 0..20 {
|
|
200
|
-
let file_path = file_path.clone();
|
|
201
|
-
let config = config.clone();
|
|
202
|
-
|
|
203
|
-
handles.push(tokio::task::spawn_blocking(move || {
|
|
204
|
-
extract_file_sync(&file_path, None, &config)
|
|
205
|
-
}));
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
let results = timeout(Duration::from_secs(60), async {
|
|
209
|
-
let mut results = vec![];
|
|
210
|
-
for handle in handles {
|
|
211
|
-
results.push(handle.await.expect("Task should not panic"));
|
|
212
|
-
}
|
|
213
|
-
results
|
|
214
|
-
})
|
|
215
|
-
.await
|
|
216
|
-
.expect("All OCR operations should complete within 60s");
|
|
217
|
-
|
|
218
|
-
let mut extracted_texts = vec![];
|
|
219
|
-
for result in results {
|
|
220
|
-
assert!(result.is_ok(), "OCR should succeed: {:?}", result.err());
|
|
221
|
-
let extraction = result.unwrap();
|
|
222
|
-
assert!(!extraction.content.is_empty(), "OCR should extract text");
|
|
223
|
-
extracted_texts.push(extraction.content);
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
let first_text = &extracted_texts[0];
|
|
227
|
-
for text in &extracted_texts[1..] {
|
|
228
|
-
assert_eq!(text, first_text, "Concurrent OCR should produce identical results");
|
|
229
|
-
}
|
|
230
|
-
}
|
|
231
|
-
|
|
232
|
-
/// Test concurrent OCR with cache warming.
|
|
233
|
-
///
|
|
234
|
-
/// Validates cache performance under concurrent load.
|
|
235
|
-
///
|
|
236
|
-
/// Note: This test is simplified to avoid runtime nesting issues.
|
|
237
|
-
/// It validates that concurrent OCR extractions work correctly with caching.
|
|
238
|
-
///
|
|
239
|
-
/// WARNING: This test uses timing heuristics (<500ms = cache hit) which are unreliable
|
|
240
|
-
/// in CI environments where even cached operations may exceed the threshold on slow runners.
|
|
241
|
-
/// Ignored to prevent flaky failures - cache hit rates vary significantly across platforms.
|
|
242
|
-
#[cfg(feature = "ocr")]
|
|
243
|
-
#[ignore = "flaky timing-based cache heuristic - cache hit rates vary significantly across platforms"]
|
|
244
|
-
#[test]
|
|
245
|
-
fn test_concurrent_ocr_cache_stress() {
|
|
246
|
-
use helpers::{get_test_file_path, skip_if_missing};
|
|
247
|
-
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
248
|
-
|
|
249
|
-
if skip_if_missing("images/ocr_image.jpg") {
|
|
250
|
-
tracing::debug!("Skipping OCR cache stress test: test file not available");
|
|
251
|
-
return;
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
let config = ExtractionConfig {
|
|
255
|
-
ocr: Some(OcrConfig {
|
|
256
|
-
backend: "tesseract".to_string(),
|
|
257
|
-
language: "eng".to_string(),
|
|
258
|
-
tesseract_config: None,
|
|
259
|
-
}),
|
|
260
|
-
force_ocr: false,
|
|
261
|
-
use_cache: true,
|
|
262
|
-
..Default::default()
|
|
263
|
-
};
|
|
264
|
-
|
|
265
|
-
let file_path = get_test_file_path("images/ocr_image.jpg");
|
|
266
|
-
|
|
267
|
-
let first_result = extract_file_sync(&file_path, None, &config);
|
|
268
|
-
assert!(first_result.is_ok(), "Initial OCR should succeed");
|
|
269
|
-
|
|
270
|
-
let cache_hit_count = Arc::new(AtomicUsize::new(0));
|
|
271
|
-
|
|
272
|
-
let mut handles = vec![];
|
|
273
|
-
for _ in 0..50 {
|
|
274
|
-
let file_path = file_path.clone();
|
|
275
|
-
let config = config.clone();
|
|
276
|
-
let hit_count = Arc::clone(&cache_hit_count);
|
|
277
|
-
|
|
278
|
-
handles.push(std::thread::spawn(move || {
|
|
279
|
-
let start = std::time::Instant::now();
|
|
280
|
-
let result = extract_file_sync(&file_path, None, &config);
|
|
281
|
-
let duration = start.elapsed();
|
|
282
|
-
|
|
283
|
-
if duration < Duration::from_millis(500) {
|
|
284
|
-
hit_count.fetch_add(1, Ordering::Relaxed);
|
|
285
|
-
}
|
|
286
|
-
|
|
287
|
-
result
|
|
288
|
-
}));
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
for handle in handles {
|
|
292
|
-
let result = handle.join().expect("Thread should not panic");
|
|
293
|
-
assert!(result.is_ok(), "Cached OCR should succeed");
|
|
294
|
-
}
|
|
295
|
-
|
|
296
|
-
let hits = cache_hit_count.load(Ordering::Relaxed);
|
|
297
|
-
assert!(
|
|
298
|
-
hits >= 20,
|
|
299
|
-
"At least 20/50 requests should hit cache, got {} hits",
|
|
300
|
-
hits
|
|
301
|
-
);
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
/// Test concurrent pipeline processing.
|
|
305
|
-
///
|
|
306
|
-
/// Validates that:
|
|
307
|
-
/// - Pipeline can process multiple results in parallel
|
|
308
|
-
/// - Processors don't interfere with each other
|
|
309
|
-
/// - Registry reads are thread-safe
|
|
310
|
-
#[tokio::test]
|
|
311
|
-
async fn test_concurrent_pipeline_processing() {
|
|
312
|
-
struct ConcurrentTestProcessor;
|
|
313
|
-
|
|
314
|
-
impl Plugin for ConcurrentTestProcessor {
|
|
315
|
-
fn name(&self) -> &str {
|
|
316
|
-
"concurrent-test"
|
|
317
|
-
}
|
|
318
|
-
fn version(&self) -> String {
|
|
319
|
-
"1.0.0".to_string()
|
|
320
|
-
}
|
|
321
|
-
fn initialize(&self) -> Result<()> {
|
|
322
|
-
Ok(())
|
|
323
|
-
}
|
|
324
|
-
fn shutdown(&self) -> Result<()> {
|
|
325
|
-
Ok(())
|
|
326
|
-
}
|
|
327
|
-
}
|
|
328
|
-
|
|
329
|
-
#[async_trait]
|
|
330
|
-
impl PostProcessor for ConcurrentTestProcessor {
|
|
331
|
-
async fn process(&self, result: &mut ExtractionResult, _: &ExtractionConfig) -> Result<()> {
|
|
332
|
-
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
333
|
-
result.content.push_str("[processed]");
|
|
334
|
-
Ok(())
|
|
335
|
-
}
|
|
336
|
-
|
|
337
|
-
fn processing_stage(&self) -> ProcessingStage {
|
|
338
|
-
ProcessingStage::Early
|
|
339
|
-
}
|
|
340
|
-
}
|
|
341
|
-
|
|
342
|
-
let registry = get_post_processor_registry();
|
|
343
|
-
{
|
|
344
|
-
let mut reg = registry.write().expect("Should acquire write lock");
|
|
345
|
-
let processor = Arc::new(ConcurrentTestProcessor);
|
|
346
|
-
let _ = reg.remove("concurrent-test");
|
|
347
|
-
reg.register(processor, 50).expect("Should register processor");
|
|
348
|
-
}
|
|
349
|
-
|
|
350
|
-
let config = ExtractionConfig {
|
|
351
|
-
postprocessor: Some(PostProcessorConfig {
|
|
352
|
-
enabled: true,
|
|
353
|
-
enabled_processors: Some(vec!["concurrent-test".to_string()]),
|
|
354
|
-
disabled_processors: None,
|
|
355
|
-
}),
|
|
356
|
-
..Default::default()
|
|
357
|
-
};
|
|
358
|
-
|
|
359
|
-
let mut handles = vec![];
|
|
360
|
-
for i in 0..50 {
|
|
361
|
-
let config = config.clone();
|
|
362
|
-
|
|
363
|
-
handles.push(tokio::spawn(async move {
|
|
364
|
-
let result = ExtractionResult {
|
|
365
|
-
content: format!("Content {}", i),
|
|
366
|
-
mime_type: "text/plain".to_string(),
|
|
367
|
-
metadata: Metadata::default(),
|
|
368
|
-
tables: vec![],
|
|
369
|
-
detected_languages: None,
|
|
370
|
-
chunks: None,
|
|
371
|
-
images: None,
|
|
372
|
-
pages: None,
|
|
373
|
-
};
|
|
374
|
-
|
|
375
|
-
run_pipeline(result, &config).await
|
|
376
|
-
}));
|
|
377
|
-
}
|
|
378
|
-
|
|
379
|
-
for handle in handles {
|
|
380
|
-
let result = handle.await.expect("Task should not panic");
|
|
381
|
-
assert!(result.is_ok(), "Pipeline should succeed");
|
|
382
|
-
let processed = result.unwrap();
|
|
383
|
-
assert!(processed.content.contains("[processed]"), "Processor should run");
|
|
384
|
-
}
|
|
385
|
-
|
|
386
|
-
{
|
|
387
|
-
let mut reg = registry.write().expect("Should acquire write lock");
|
|
388
|
-
let _ = reg.remove("concurrent-test");
|
|
389
|
-
}
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
/// Test concurrent registry reads don't block unnecessarily.
|
|
393
|
-
///
|
|
394
|
-
/// Validates that:
|
|
395
|
-
/// - Multiple readers can access registry simultaneously
|
|
396
|
-
/// - Registry lookups are fast under concurrent load
|
|
397
|
-
#[tokio::test]
|
|
398
|
-
async fn test_concurrent_registry_reads() {
|
|
399
|
-
let registry = get_document_extractor_registry();
|
|
400
|
-
|
|
401
|
-
let mut handles = vec![];
|
|
402
|
-
for _ in 0..200 {
|
|
403
|
-
let registry_clone = Arc::clone(®istry);
|
|
404
|
-
handles.push(tokio::spawn(async move {
|
|
405
|
-
let start = std::time::Instant::now();
|
|
406
|
-
|
|
407
|
-
let reg = registry_clone.read().expect("Should acquire read lock");
|
|
408
|
-
let _extractor = reg.get("text/plain");
|
|
409
|
-
|
|
410
|
-
start.elapsed()
|
|
411
|
-
}));
|
|
412
|
-
}
|
|
413
|
-
|
|
414
|
-
let mut max_duration = Duration::from_secs(0);
|
|
415
|
-
for handle in handles {
|
|
416
|
-
let duration = handle.await.expect("Task should not panic");
|
|
417
|
-
if duration > max_duration {
|
|
418
|
-
max_duration = duration;
|
|
419
|
-
}
|
|
420
|
-
}
|
|
421
|
-
|
|
422
|
-
assert!(
|
|
423
|
-
max_duration < Duration::from_millis(10),
|
|
424
|
-
"Registry reads should be fast, max duration: {:?}",
|
|
425
|
-
max_duration
|
|
426
|
-
);
|
|
427
|
-
}
|
|
428
|
-
|
|
429
|
-
/// Test that extraction throughput scales with concurrency.
|
|
430
|
-
///
|
|
431
|
-
/// Validates that:
|
|
432
|
-
/// - Parallel extractions are actually running in parallel
|
|
433
|
-
/// - No global bottlenecks limiting throughput
|
|
434
|
-
///
|
|
435
|
-
/// Note: This is a performance benchmark that can be flaky based on system load,
|
|
436
|
-
/// CPU availability, and other factors. Marked as #[ignore] to run only on demand.
|
|
437
|
-
#[tokio::test]
|
|
438
|
-
#[ignore]
|
|
439
|
-
async fn test_extraction_throughput_scales() {
|
|
440
|
-
let config = ExtractionConfig::default();
|
|
441
|
-
let test_data = b"Throughput test content";
|
|
442
|
-
|
|
443
|
-
let sequential_start = std::time::Instant::now();
|
|
444
|
-
for _ in 0..20 {
|
|
445
|
-
let _ = extract_bytes(test_data, "text/plain", &config).await.unwrap();
|
|
446
|
-
}
|
|
447
|
-
let sequential_duration = sequential_start.elapsed();
|
|
448
|
-
|
|
449
|
-
let parallel_start = std::time::Instant::now();
|
|
450
|
-
let mut handles = vec![];
|
|
451
|
-
for _ in 0..20 {
|
|
452
|
-
let config = config.clone();
|
|
453
|
-
let data = test_data.to_vec();
|
|
454
|
-
|
|
455
|
-
handles.push(tokio::spawn(async move {
|
|
456
|
-
extract_bytes(&data, "text/plain", &config).await
|
|
457
|
-
}));
|
|
458
|
-
}
|
|
459
|
-
|
|
460
|
-
for handle in handles {
|
|
461
|
-
let _ = handle.await.expect("Task should not panic");
|
|
462
|
-
}
|
|
463
|
-
let parallel_duration = parallel_start.elapsed();
|
|
464
|
-
|
|
465
|
-
println!(
|
|
466
|
-
"Sequential: {:?}, Parallel: {:?}, Speedup: {:.2}x",
|
|
467
|
-
sequential_duration,
|
|
468
|
-
parallel_duration,
|
|
469
|
-
sequential_duration.as_secs_f64() / parallel_duration.as_secs_f64()
|
|
470
|
-
);
|
|
471
|
-
|
|
472
|
-
let speedup = sequential_duration.as_secs_f64() / parallel_duration.as_secs_f64();
|
|
473
|
-
|
|
474
|
-
assert!(
|
|
475
|
-
speedup > 0.5,
|
|
476
|
-
"Parallel execution should not be significantly slower than sequential. Sequential: {:?}, Parallel: {:?}, Speedup: {:.2}x",
|
|
477
|
-
sequential_duration,
|
|
478
|
-
parallel_duration,
|
|
479
|
-
speedup
|
|
480
|
-
);
|
|
481
|
-
}
|
|
482
|
-
|
|
483
|
-
/// High-load stress test with many concurrent operations.
|
|
484
|
-
///
|
|
485
|
-
/// Validates system stability under sustained concurrent load.
|
|
486
|
-
#[tokio::test]
|
|
487
|
-
async fn test_high_concurrency_stress() {
|
|
488
|
-
let config = ExtractionConfig {
|
|
489
|
-
use_cache: true,
|
|
490
|
-
..Default::default()
|
|
491
|
-
};
|
|
492
|
-
|
|
493
|
-
#[allow(unused_mut)]
|
|
494
|
-
let mut formats = vec![
|
|
495
|
-
(b"Text content" as &[u8], "text/plain"),
|
|
496
|
-
(b"{\"json\": true}", "application/json"),
|
|
497
|
-
(b"# Markdown\n\nContent", "text/markdown"),
|
|
498
|
-
];
|
|
499
|
-
|
|
500
|
-
#[cfg(feature = "xml")]
|
|
501
|
-
formats.push((b"<xml><item>content</item></xml>" as &[u8], "application/xml"));
|
|
502
|
-
|
|
503
|
-
let mut handles = vec![];
|
|
504
|
-
for _ in 0..100 {
|
|
505
|
-
for (data, mime_type) in &formats {
|
|
506
|
-
let config = config.clone();
|
|
507
|
-
let data = data.to_vec();
|
|
508
|
-
let mime_type = mime_type.to_string();
|
|
509
|
-
|
|
510
|
-
handles.push(tokio::spawn(
|
|
511
|
-
async move { extract_bytes(&data, &mime_type, &config).await },
|
|
512
|
-
));
|
|
513
|
-
}
|
|
514
|
-
}
|
|
515
|
-
|
|
516
|
-
let results = timeout(Duration::from_secs(60), async {
|
|
517
|
-
let mut results = vec![];
|
|
518
|
-
for handle in handles {
|
|
519
|
-
results.push(handle.await.expect("Task should not panic"));
|
|
520
|
-
}
|
|
521
|
-
results
|
|
522
|
-
})
|
|
523
|
-
.await
|
|
524
|
-
.expect("High-load stress test should complete within 60s");
|
|
525
|
-
|
|
526
|
-
let expected_successes = 100 * formats.len();
|
|
527
|
-
let success_count = results.iter().filter(|r| r.is_ok()).count();
|
|
528
|
-
assert_eq!(
|
|
529
|
-
success_count, expected_successes,
|
|
530
|
-
"All extractions should succeed under stress, got {} successes",
|
|
531
|
-
success_count
|
|
532
|
-
);
|
|
533
|
-
}
|
|
1
|
+
//! Comprehensive concurrency and parallelism stress tests.
|
|
2
|
+
//!
|
|
3
|
+
//! Validates that the Kreuzberg core handles concurrent operations correctly:
|
|
4
|
+
//! - Parallel extractions don't interfere with each other
|
|
5
|
+
//! - OCR processing is thread-safe and efficient
|
|
6
|
+
//! - Pipeline processing works correctly under concurrent load
|
|
7
|
+
//! - Cache access is safe with multiple readers/writers
|
|
8
|
+
//! - Registry access is thread-safe
|
|
9
|
+
//!
|
|
10
|
+
//! These tests ensure production workloads with high concurrency work correctly.
|
|
11
|
+
|
|
12
|
+
use async_trait::async_trait;
|
|
13
|
+
use kreuzberg::Result;
|
|
14
|
+
use kreuzberg::core::config::{ExtractionConfig, PostProcessorConfig};
|
|
15
|
+
use kreuzberg::core::extractor::{batch_extract_bytes, extract_bytes};
|
|
16
|
+
use kreuzberg::core::pipeline::run_pipeline;
|
|
17
|
+
use kreuzberg::plugins::registry::{get_document_extractor_registry, get_post_processor_registry};
|
|
18
|
+
use kreuzberg::plugins::{Plugin, PostProcessor, ProcessingStage};
|
|
19
|
+
use kreuzberg::types::{ExtractionResult, Metadata};
|
|
20
|
+
use std::sync::Arc;
|
|
21
|
+
|
|
22
|
+
#[cfg(feature = "ocr")]
|
|
23
|
+
use kreuzberg::core::config::OcrConfig;
|
|
24
|
+
|
|
25
|
+
#[cfg(feature = "ocr")]
|
|
26
|
+
use kreuzberg::core::extractor::extract_file_sync;
|
|
27
|
+
use std::time::Duration;
|
|
28
|
+
use tokio::time::timeout;
|
|
29
|
+
|
|
30
|
+
mod helpers;
|
|
31
|
+
|
|
32
|
+
fn trim_trailing_newlines(value: &str) -> &str {
|
|
33
|
+
value.trim_end_matches(['\n', '\r'])
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
fn assert_text_content(actual: &str, expected: &str) {
|
|
37
|
+
assert_eq!(
|
|
38
|
+
trim_trailing_newlines(actual),
|
|
39
|
+
expected,
|
|
40
|
+
"Content mismatch after trimming trailing newlines"
|
|
41
|
+
);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/// Test many concurrent extractions of different MIME types.
|
|
45
|
+
///
|
|
46
|
+
/// Validates that:
|
|
47
|
+
/// - Registry lookups don't block each other unnecessarily
|
|
48
|
+
/// - Different extractors can run in parallel
|
|
49
|
+
/// - No data races or corruption
|
|
50
|
+
#[tokio::test]
|
|
51
|
+
async fn test_concurrent_extractions_mixed_formats() {
|
|
52
|
+
let config = ExtractionConfig::default();
|
|
53
|
+
|
|
54
|
+
#[allow(unused_mut)]
|
|
55
|
+
let mut test_cases = vec![
|
|
56
|
+
(b"Plain text content" as &[u8], "text/plain"),
|
|
57
|
+
(b"{\"key\": \"value\"}", "application/json"),
|
|
58
|
+
(b"# Markdown\n\nContent here", "text/markdown"),
|
|
59
|
+
];
|
|
60
|
+
|
|
61
|
+
#[cfg(feature = "xml")]
|
|
62
|
+
test_cases.push((b"<root><item>XML content</item></root>" as &[u8], "application/xml"));
|
|
63
|
+
|
|
64
|
+
let mut handles = vec![];
|
|
65
|
+
for _ in 0..10 {
|
|
66
|
+
for (data, mime_type) in &test_cases {
|
|
67
|
+
let config = config.clone();
|
|
68
|
+
let data = data.to_vec();
|
|
69
|
+
let mime_type = mime_type.to_string();
|
|
70
|
+
|
|
71
|
+
handles.push(tokio::spawn(
|
|
72
|
+
async move { extract_bytes(&data, &mime_type, &config).await },
|
|
73
|
+
));
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
let results = timeout(Duration::from_secs(30), async {
|
|
78
|
+
let mut results = vec![];
|
|
79
|
+
for handle in handles {
|
|
80
|
+
results.push(handle.await.expect("Task should not panic"));
|
|
81
|
+
}
|
|
82
|
+
results
|
|
83
|
+
})
|
|
84
|
+
.await
|
|
85
|
+
.expect("All extractions should complete within 30s");
|
|
86
|
+
|
|
87
|
+
for result in results {
|
|
88
|
+
assert!(
|
|
89
|
+
result.is_ok(),
|
|
90
|
+
"Concurrent extraction should succeed: {:?}",
|
|
91
|
+
result.err()
|
|
92
|
+
);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/// Test concurrent batch extractions.
|
|
97
|
+
///
|
|
98
|
+
/// Validates that batch processing correctly handles parallelism internally.
|
|
99
|
+
#[tokio::test]
|
|
100
|
+
async fn test_concurrent_batch_extractions() {
|
|
101
|
+
let config = ExtractionConfig::default();
|
|
102
|
+
|
|
103
|
+
let contents: Vec<Vec<u8>> = (0..20).map(|i| format!("Content {}", i).into_bytes()).collect();
|
|
104
|
+
|
|
105
|
+
let mut handles = vec![];
|
|
106
|
+
for _ in 0..5 {
|
|
107
|
+
let config = config.clone();
|
|
108
|
+
let contents_clone = contents.clone();
|
|
109
|
+
|
|
110
|
+
handles.push(tokio::spawn(async move {
|
|
111
|
+
let data: Vec<(&[u8], &str)> = contents_clone.iter().map(|c| (c.as_slice(), "text/plain")).collect();
|
|
112
|
+
batch_extract_bytes(data, &config).await
|
|
113
|
+
}));
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
for handle in handles {
|
|
117
|
+
let results = handle.await.expect("Task should not panic");
|
|
118
|
+
assert!(results.is_ok(), "Batch extraction should succeed");
|
|
119
|
+
let results = results.unwrap();
|
|
120
|
+
assert_eq!(results.len(), 20, "Should return all results");
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
/// Test concurrent extractions with caching enabled.
|
|
125
|
+
///
|
|
126
|
+
/// Validates that:
|
|
127
|
+
/// - Cache reads/writes are thread-safe
|
|
128
|
+
/// - No cache corruption under concurrent access
|
|
129
|
+
/// - Cache hits work correctly across threads
|
|
130
|
+
#[tokio::test]
|
|
131
|
+
async fn test_concurrent_extractions_with_cache() {
|
|
132
|
+
let config = ExtractionConfig {
|
|
133
|
+
use_cache: true,
|
|
134
|
+
postprocessor: Some(PostProcessorConfig {
|
|
135
|
+
enabled: false,
|
|
136
|
+
enabled_processors: None,
|
|
137
|
+
disabled_processors: None,
|
|
138
|
+
}),
|
|
139
|
+
..Default::default()
|
|
140
|
+
};
|
|
141
|
+
|
|
142
|
+
let test_data = b"Cached content for concurrent access test";
|
|
143
|
+
|
|
144
|
+
let _ = extract_bytes(test_data, "text/plain", &config).await.unwrap();
|
|
145
|
+
|
|
146
|
+
let mut handles = vec![];
|
|
147
|
+
for _ in 0..100 {
|
|
148
|
+
let config = config.clone();
|
|
149
|
+
let data = test_data.to_vec();
|
|
150
|
+
|
|
151
|
+
handles.push(tokio::spawn(async move {
|
|
152
|
+
extract_bytes(&data, "text/plain", &config).await
|
|
153
|
+
}));
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
let expected_content = "Cached content for concurrent access test";
|
|
157
|
+
for handle in handles {
|
|
158
|
+
let result = handle.await.expect("Task should not panic");
|
|
159
|
+
assert!(result.is_ok(), "Cache read should succeed");
|
|
160
|
+
let extraction = result.unwrap();
|
|
161
|
+
assert_text_content(&extraction.content, expected_content);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/// Test concurrent OCR processing of different images.
|
|
166
|
+
///
|
|
167
|
+
/// Validates that:
|
|
168
|
+
/// - OCR backend is thread-safe
|
|
169
|
+
/// - Multiple OCR operations don't interfere
|
|
170
|
+
/// - OCR cache handles concurrent access correctly
|
|
171
|
+
#[cfg(feature = "ocr")]
|
|
172
|
+
#[tokio::test]
|
|
173
|
+
async fn test_concurrent_ocr_processing() {
|
|
174
|
+
use helpers::{get_test_file_path, skip_if_missing};
|
|
175
|
+
|
|
176
|
+
if cfg!(windows) {
|
|
177
|
+
return;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
if skip_if_missing("images/ocr_image.jpg") {
|
|
181
|
+
tracing::debug!("Skipping concurrent OCR test: test file not available");
|
|
182
|
+
return;
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
let config = ExtractionConfig {
|
|
186
|
+
ocr: Some(OcrConfig {
|
|
187
|
+
backend: "tesseract".to_string(),
|
|
188
|
+
language: "eng".to_string(),
|
|
189
|
+
tesseract_config: None,
|
|
190
|
+
}),
|
|
191
|
+
force_ocr: false,
|
|
192
|
+
use_cache: true,
|
|
193
|
+
..Default::default()
|
|
194
|
+
};
|
|
195
|
+
|
|
196
|
+
let file_path = get_test_file_path("images/ocr_image.jpg");
|
|
197
|
+
|
|
198
|
+
let mut handles = vec![];
|
|
199
|
+
for _ in 0..20 {
|
|
200
|
+
let file_path = file_path.clone();
|
|
201
|
+
let config = config.clone();
|
|
202
|
+
|
|
203
|
+
handles.push(tokio::task::spawn_blocking(move || {
|
|
204
|
+
extract_file_sync(&file_path, None, &config)
|
|
205
|
+
}));
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
let results = timeout(Duration::from_secs(60), async {
|
|
209
|
+
let mut results = vec![];
|
|
210
|
+
for handle in handles {
|
|
211
|
+
results.push(handle.await.expect("Task should not panic"));
|
|
212
|
+
}
|
|
213
|
+
results
|
|
214
|
+
})
|
|
215
|
+
.await
|
|
216
|
+
.expect("All OCR operations should complete within 60s");
|
|
217
|
+
|
|
218
|
+
let mut extracted_texts = vec![];
|
|
219
|
+
for result in results {
|
|
220
|
+
assert!(result.is_ok(), "OCR should succeed: {:?}", result.err());
|
|
221
|
+
let extraction = result.unwrap();
|
|
222
|
+
assert!(!extraction.content.is_empty(), "OCR should extract text");
|
|
223
|
+
extracted_texts.push(extraction.content);
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
let first_text = &extracted_texts[0];
|
|
227
|
+
for text in &extracted_texts[1..] {
|
|
228
|
+
assert_eq!(text, first_text, "Concurrent OCR should produce identical results");
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
/// Test concurrent OCR with cache warming.
|
|
233
|
+
///
|
|
234
|
+
/// Validates cache performance under concurrent load.
|
|
235
|
+
///
|
|
236
|
+
/// Note: This test is simplified to avoid runtime nesting issues.
|
|
237
|
+
/// It validates that concurrent OCR extractions work correctly with caching.
|
|
238
|
+
///
|
|
239
|
+
/// WARNING: This test uses timing heuristics (<500ms = cache hit) which are unreliable
|
|
240
|
+
/// in CI environments where even cached operations may exceed the threshold on slow runners.
|
|
241
|
+
/// Ignored to prevent flaky failures - cache hit rates vary significantly across platforms.
|
|
242
|
+
#[cfg(feature = "ocr")]
|
|
243
|
+
#[ignore = "flaky timing-based cache heuristic - cache hit rates vary significantly across platforms"]
|
|
244
|
+
#[test]
|
|
245
|
+
fn test_concurrent_ocr_cache_stress() {
|
|
246
|
+
use helpers::{get_test_file_path, skip_if_missing};
|
|
247
|
+
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
248
|
+
|
|
249
|
+
if skip_if_missing("images/ocr_image.jpg") {
|
|
250
|
+
tracing::debug!("Skipping OCR cache stress test: test file not available");
|
|
251
|
+
return;
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
let config = ExtractionConfig {
|
|
255
|
+
ocr: Some(OcrConfig {
|
|
256
|
+
backend: "tesseract".to_string(),
|
|
257
|
+
language: "eng".to_string(),
|
|
258
|
+
tesseract_config: None,
|
|
259
|
+
}),
|
|
260
|
+
force_ocr: false,
|
|
261
|
+
use_cache: true,
|
|
262
|
+
..Default::default()
|
|
263
|
+
};
|
|
264
|
+
|
|
265
|
+
let file_path = get_test_file_path("images/ocr_image.jpg");
|
|
266
|
+
|
|
267
|
+
let first_result = extract_file_sync(&file_path, None, &config);
|
|
268
|
+
assert!(first_result.is_ok(), "Initial OCR should succeed");
|
|
269
|
+
|
|
270
|
+
let cache_hit_count = Arc::new(AtomicUsize::new(0));
|
|
271
|
+
|
|
272
|
+
let mut handles = vec![];
|
|
273
|
+
for _ in 0..50 {
|
|
274
|
+
let file_path = file_path.clone();
|
|
275
|
+
let config = config.clone();
|
|
276
|
+
let hit_count = Arc::clone(&cache_hit_count);
|
|
277
|
+
|
|
278
|
+
handles.push(std::thread::spawn(move || {
|
|
279
|
+
let start = std::time::Instant::now();
|
|
280
|
+
let result = extract_file_sync(&file_path, None, &config);
|
|
281
|
+
let duration = start.elapsed();
|
|
282
|
+
|
|
283
|
+
if duration < Duration::from_millis(500) {
|
|
284
|
+
hit_count.fetch_add(1, Ordering::Relaxed);
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
result
|
|
288
|
+
}));
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
for handle in handles {
|
|
292
|
+
let result = handle.join().expect("Thread should not panic");
|
|
293
|
+
assert!(result.is_ok(), "Cached OCR should succeed");
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
let hits = cache_hit_count.load(Ordering::Relaxed);
|
|
297
|
+
assert!(
|
|
298
|
+
hits >= 20,
|
|
299
|
+
"At least 20/50 requests should hit cache, got {} hits",
|
|
300
|
+
hits
|
|
301
|
+
);
|
|
302
|
+
}
|
|
303
|
+
|
|
304
|
+
/// Test concurrent pipeline processing.
|
|
305
|
+
///
|
|
306
|
+
/// Validates that:
|
|
307
|
+
/// - Pipeline can process multiple results in parallel
|
|
308
|
+
/// - Processors don't interfere with each other
|
|
309
|
+
/// - Registry reads are thread-safe
|
|
310
|
+
#[tokio::test]
|
|
311
|
+
async fn test_concurrent_pipeline_processing() {
|
|
312
|
+
struct ConcurrentTestProcessor;
|
|
313
|
+
|
|
314
|
+
impl Plugin for ConcurrentTestProcessor {
|
|
315
|
+
fn name(&self) -> &str {
|
|
316
|
+
"concurrent-test"
|
|
317
|
+
}
|
|
318
|
+
fn version(&self) -> String {
|
|
319
|
+
"1.0.0".to_string()
|
|
320
|
+
}
|
|
321
|
+
fn initialize(&self) -> Result<()> {
|
|
322
|
+
Ok(())
|
|
323
|
+
}
|
|
324
|
+
fn shutdown(&self) -> Result<()> {
|
|
325
|
+
Ok(())
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
#[async_trait]
|
|
330
|
+
impl PostProcessor for ConcurrentTestProcessor {
|
|
331
|
+
async fn process(&self, result: &mut ExtractionResult, _: &ExtractionConfig) -> Result<()> {
|
|
332
|
+
tokio::time::sleep(Duration::from_millis(10)).await;
|
|
333
|
+
result.content.push_str("[processed]");
|
|
334
|
+
Ok(())
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
fn processing_stage(&self) -> ProcessingStage {
|
|
338
|
+
ProcessingStage::Early
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
let registry = get_post_processor_registry();
|
|
343
|
+
{
|
|
344
|
+
let mut reg = registry.write().expect("Should acquire write lock");
|
|
345
|
+
let processor = Arc::new(ConcurrentTestProcessor);
|
|
346
|
+
let _ = reg.remove("concurrent-test");
|
|
347
|
+
reg.register(processor, 50).expect("Should register processor");
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
let config = ExtractionConfig {
|
|
351
|
+
postprocessor: Some(PostProcessorConfig {
|
|
352
|
+
enabled: true,
|
|
353
|
+
enabled_processors: Some(vec!["concurrent-test".to_string()]),
|
|
354
|
+
disabled_processors: None,
|
|
355
|
+
}),
|
|
356
|
+
..Default::default()
|
|
357
|
+
};
|
|
358
|
+
|
|
359
|
+
let mut handles = vec![];
|
|
360
|
+
for i in 0..50 {
|
|
361
|
+
let config = config.clone();
|
|
362
|
+
|
|
363
|
+
handles.push(tokio::spawn(async move {
|
|
364
|
+
let result = ExtractionResult {
|
|
365
|
+
content: format!("Content {}", i),
|
|
366
|
+
mime_type: "text/plain".to_string(),
|
|
367
|
+
metadata: Metadata::default(),
|
|
368
|
+
tables: vec![],
|
|
369
|
+
detected_languages: None,
|
|
370
|
+
chunks: None,
|
|
371
|
+
images: None,
|
|
372
|
+
pages: None,
|
|
373
|
+
};
|
|
374
|
+
|
|
375
|
+
run_pipeline(result, &config).await
|
|
376
|
+
}));
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
for handle in handles {
|
|
380
|
+
let result = handle.await.expect("Task should not panic");
|
|
381
|
+
assert!(result.is_ok(), "Pipeline should succeed");
|
|
382
|
+
let processed = result.unwrap();
|
|
383
|
+
assert!(processed.content.contains("[processed]"), "Processor should run");
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
{
|
|
387
|
+
let mut reg = registry.write().expect("Should acquire write lock");
|
|
388
|
+
let _ = reg.remove("concurrent-test");
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
/// Test concurrent registry reads don't block unnecessarily.
|
|
393
|
+
///
|
|
394
|
+
/// Validates that:
|
|
395
|
+
/// - Multiple readers can access registry simultaneously
|
|
396
|
+
/// - Registry lookups are fast under concurrent load
|
|
397
|
+
#[tokio::test]
|
|
398
|
+
async fn test_concurrent_registry_reads() {
|
|
399
|
+
let registry = get_document_extractor_registry();
|
|
400
|
+
|
|
401
|
+
let mut handles = vec![];
|
|
402
|
+
for _ in 0..200 {
|
|
403
|
+
let registry_clone = Arc::clone(®istry);
|
|
404
|
+
handles.push(tokio::spawn(async move {
|
|
405
|
+
let start = std::time::Instant::now();
|
|
406
|
+
|
|
407
|
+
let reg = registry_clone.read().expect("Should acquire read lock");
|
|
408
|
+
let _extractor = reg.get("text/plain");
|
|
409
|
+
|
|
410
|
+
start.elapsed()
|
|
411
|
+
}));
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
let mut max_duration = Duration::from_secs(0);
|
|
415
|
+
for handle in handles {
|
|
416
|
+
let duration = handle.await.expect("Task should not panic");
|
|
417
|
+
if duration > max_duration {
|
|
418
|
+
max_duration = duration;
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
assert!(
|
|
423
|
+
max_duration < Duration::from_millis(10),
|
|
424
|
+
"Registry reads should be fast, max duration: {:?}",
|
|
425
|
+
max_duration
|
|
426
|
+
);
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
/// Test that extraction throughput scales with concurrency.
|
|
430
|
+
///
|
|
431
|
+
/// Validates that:
|
|
432
|
+
/// - Parallel extractions are actually running in parallel
|
|
433
|
+
/// - No global bottlenecks limiting throughput
|
|
434
|
+
///
|
|
435
|
+
/// Note: This is a performance benchmark that can be flaky based on system load,
|
|
436
|
+
/// CPU availability, and other factors. Marked as #[ignore] to run only on demand.
|
|
437
|
+
#[tokio::test]
|
|
438
|
+
#[ignore]
|
|
439
|
+
async fn test_extraction_throughput_scales() {
|
|
440
|
+
let config = ExtractionConfig::default();
|
|
441
|
+
let test_data = b"Throughput test content";
|
|
442
|
+
|
|
443
|
+
let sequential_start = std::time::Instant::now();
|
|
444
|
+
for _ in 0..20 {
|
|
445
|
+
let _ = extract_bytes(test_data, "text/plain", &config).await.unwrap();
|
|
446
|
+
}
|
|
447
|
+
let sequential_duration = sequential_start.elapsed();
|
|
448
|
+
|
|
449
|
+
let parallel_start = std::time::Instant::now();
|
|
450
|
+
let mut handles = vec![];
|
|
451
|
+
for _ in 0..20 {
|
|
452
|
+
let config = config.clone();
|
|
453
|
+
let data = test_data.to_vec();
|
|
454
|
+
|
|
455
|
+
handles.push(tokio::spawn(async move {
|
|
456
|
+
extract_bytes(&data, "text/plain", &config).await
|
|
457
|
+
}));
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
for handle in handles {
|
|
461
|
+
let _ = handle.await.expect("Task should not panic");
|
|
462
|
+
}
|
|
463
|
+
let parallel_duration = parallel_start.elapsed();
|
|
464
|
+
|
|
465
|
+
println!(
|
|
466
|
+
"Sequential: {:?}, Parallel: {:?}, Speedup: {:.2}x",
|
|
467
|
+
sequential_duration,
|
|
468
|
+
parallel_duration,
|
|
469
|
+
sequential_duration.as_secs_f64() / parallel_duration.as_secs_f64()
|
|
470
|
+
);
|
|
471
|
+
|
|
472
|
+
let speedup = sequential_duration.as_secs_f64() / parallel_duration.as_secs_f64();
|
|
473
|
+
|
|
474
|
+
assert!(
|
|
475
|
+
speedup > 0.5,
|
|
476
|
+
"Parallel execution should not be significantly slower than sequential. Sequential: {:?}, Parallel: {:?}, Speedup: {:.2}x",
|
|
477
|
+
sequential_duration,
|
|
478
|
+
parallel_duration,
|
|
479
|
+
speedup
|
|
480
|
+
);
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
/// High-load stress test with many concurrent operations.
|
|
484
|
+
///
|
|
485
|
+
/// Validates system stability under sustained concurrent load.
|
|
486
|
+
#[tokio::test]
|
|
487
|
+
async fn test_high_concurrency_stress() {
|
|
488
|
+
let config = ExtractionConfig {
|
|
489
|
+
use_cache: true,
|
|
490
|
+
..Default::default()
|
|
491
|
+
};
|
|
492
|
+
|
|
493
|
+
#[allow(unused_mut)]
|
|
494
|
+
let mut formats = vec![
|
|
495
|
+
(b"Text content" as &[u8], "text/plain"),
|
|
496
|
+
(b"{\"json\": true}", "application/json"),
|
|
497
|
+
(b"# Markdown\n\nContent", "text/markdown"),
|
|
498
|
+
];
|
|
499
|
+
|
|
500
|
+
#[cfg(feature = "xml")]
|
|
501
|
+
formats.push((b"<xml><item>content</item></xml>" as &[u8], "application/xml"));
|
|
502
|
+
|
|
503
|
+
let mut handles = vec![];
|
|
504
|
+
for _ in 0..100 {
|
|
505
|
+
for (data, mime_type) in &formats {
|
|
506
|
+
let config = config.clone();
|
|
507
|
+
let data = data.to_vec();
|
|
508
|
+
let mime_type = mime_type.to_string();
|
|
509
|
+
|
|
510
|
+
handles.push(tokio::spawn(
|
|
511
|
+
async move { extract_bytes(&data, &mime_type, &config).await },
|
|
512
|
+
));
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
|
|
516
|
+
let results = timeout(Duration::from_secs(60), async {
|
|
517
|
+
let mut results = vec![];
|
|
518
|
+
for handle in handles {
|
|
519
|
+
results.push(handle.await.expect("Task should not panic"));
|
|
520
|
+
}
|
|
521
|
+
results
|
|
522
|
+
})
|
|
523
|
+
.await
|
|
524
|
+
.expect("High-load stress test should complete within 60s");
|
|
525
|
+
|
|
526
|
+
let expected_successes = 100 * formats.len();
|
|
527
|
+
let success_count = results.iter().filter(|r| r.is_ok()).count();
|
|
528
|
+
assert_eq!(
|
|
529
|
+
success_count, expected_successes,
|
|
530
|
+
"All extractions should succeed under stress, got {} successes",
|
|
531
|
+
success_count
|
|
532
|
+
);
|
|
533
|
+
}
|