kreuzberg 4.0.0.rc1 → 4.0.0.rc2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -8
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -534
- data/.rubocop.yml +538 -0
- data/Gemfile +8 -9
- data/Gemfile.lock +9 -109
- data/README.md +426 -421
- data/Rakefile +25 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -340
- data/ext/kreuzberg_rb/extconf.rb +45 -35
- data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
- data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -17
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +148 -105
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +46 -45
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +691 -684
- data/lib/kreuzberg/error_context.rb +32 -0
- data/lib/kreuzberg/errors.rb +118 -50
- data/lib/kreuzberg/extraction_api.rb +85 -84
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +216 -216
- data/lib/kreuzberg/setup_lib_path.rb +80 -79
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +103 -82
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +520 -468
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -87
- data/spec/binding/cli_spec.rb +55 -54
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -42
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/kreuzberg/Cargo.toml +204 -134
- data/vendor/kreuzberg/README.md +175 -175
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
- data/vendor/kreuzberg/build.rs +474 -460
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
- data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
- data/vendor/kreuzberg/src/core/config.rs +1032 -1032
- data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
- data/vendor/kreuzberg/src/core/io.rs +329 -327
- data/vendor/kreuzberg/src/core/mime.rs +605 -615
- data/vendor/kreuzberg/src/core/mod.rs +45 -42
- data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
- data/vendor/kreuzberg/src/embeddings.rs +432 -323
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +553 -553
- data/vendor/kreuzberg/src/extraction/image.rs +368 -368
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
- data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
- data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
- data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
- data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
- data/vendor/kreuzberg/src/extractors/email.rs +143 -129
- data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
- data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
- data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
- data/vendor/kreuzberg/src/extractors/html.rs +393 -410
- data/vendor/kreuzberg/src/extractors/image.rs +198 -195
- data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
- data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
- data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
- data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
- data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
- data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
- data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
- data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
- data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
- data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
- data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
- data/vendor/kreuzberg/src/extractors/security.rs +484 -0
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
- data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
- data/vendor/kreuzberg/src/extractors/text.rs +260 -242
- data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
- data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
- data/vendor/kreuzberg/src/lib.rs +105 -102
- data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
- data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -0
- data/vendor/kreuzberg/src/pdf/error.rs +122 -122
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
- data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
- data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
- data/vendor/kreuzberg/src/pdf/table.rs +393 -420
- data/vendor/kreuzberg/src/pdf/text.rs +158 -161
- data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
- data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
- data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
- data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
- data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +19 -19
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +903 -873
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
- data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
- data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
- data/vendor/kreuzberg/tests/config_features.rs +598 -580
- data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
- data/vendor/kreuzberg/tests/core_integration.rs +510 -493
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
- data/vendor/kreuzberg/tests/email_integration.rs +325 -325
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
- data/vendor/kreuzberg/tests/error_handling.rs +393 -393
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
- data/vendor/kreuzberg/tests/format_integration.rs +159 -159
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
- data/vendor/kreuzberg/tests/image_integration.rs +253 -253
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
- data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
- data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
- data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
- data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
- data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
- data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
- data/vendor/kreuzberg/tests/security_validation.rs +415 -404
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/rb-sys/.cargo-ok +1 -0
- data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
- data/vendor/rb-sys/Cargo.lock +393 -0
- data/vendor/rb-sys/Cargo.toml +70 -0
- data/vendor/rb-sys/Cargo.toml.orig +57 -0
- data/vendor/rb-sys/LICENSE-APACHE +190 -0
- data/vendor/rb-sys/LICENSE-MIT +21 -0
- data/vendor/rb-sys/bin/release.sh +21 -0
- data/vendor/rb-sys/build/features.rs +108 -0
- data/vendor/rb-sys/build/main.rs +246 -0
- data/vendor/rb-sys/build/stable_api_config.rs +153 -0
- data/vendor/rb-sys/build/version.rs +48 -0
- data/vendor/rb-sys/readme.md +36 -0
- data/vendor/rb-sys/src/bindings.rs +21 -0
- data/vendor/rb-sys/src/hidden.rs +11 -0
- data/vendor/rb-sys/src/lib.rs +34 -0
- data/vendor/rb-sys/src/macros.rs +371 -0
- data/vendor/rb-sys/src/memory.rs +53 -0
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
- data/vendor/rb-sys/src/special_consts.rs +31 -0
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
- data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
- data/vendor/rb-sys/src/stable_api.rs +261 -0
- data/vendor/rb-sys/src/symbol.rs +31 -0
- data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
- data/vendor/rb-sys/src/utils.rs +89 -0
- data/vendor/rb-sys/src/value_type.rs +7 -0
- metadata +90 -95
- data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
- data/spec/examples.txt +0 -104
- data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
- data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
- data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
- data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
- data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
- data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
- data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
- data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
- data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
- data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
|
@@ -1,627 +1,627 @@
|
|
|
1
|
-
//! OCR quality assessment tests.
|
|
2
|
-
//!
|
|
3
|
-
//! This module tests OCR quality by comparing against ground truth (native PDF text layers).
|
|
4
|
-
//! Adopts techniques from scripts/ocr_quality_report.py:
|
|
5
|
-
//! - Token-based precision/recall/F1 scoring
|
|
6
|
-
//! - Numeric accuracy tracking (critical for tables, data)
|
|
7
|
-
//! - Layout fidelity (line count preservation)
|
|
8
|
-
//! - Markdown structure preservation
|
|
9
|
-
//!
|
|
10
|
-
//! Test philosophy:
|
|
11
|
-
//! - Compare OCR output against searchable PDF text (ground truth)
|
|
12
|
-
//! - Measure accuracy with precision, recall, F1 metrics
|
|
13
|
-
//! - Track numeric token accuracy separately (higher importance)
|
|
14
|
-
//! - Verify layout preservation (line counts, structure)
|
|
15
|
-
//! - Assert minimum quality thresholds
|
|
16
|
-
|
|
17
|
-
mod helpers;
|
|
18
|
-
|
|
19
|
-
use helpers::*;
|
|
20
|
-
use kreuzberg::core::config::{ExtractionConfig, OcrConfig};
|
|
21
|
-
use kreuzberg::extract_file_sync;
|
|
22
|
-
use std::collections::HashMap;
|
|
23
|
-
|
|
24
|
-
#[derive(Debug, Clone)]
|
|
25
|
-
struct TokenScores {
|
|
26
|
-
precision: f64,
|
|
27
|
-
recall: f64,
|
|
28
|
-
f1: f64,
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
impl TokenScores {
|
|
32
|
-
fn new(precision: f64, recall: f64) -> Self {
|
|
33
|
-
let f1 = if precision + recall == 0.0 {
|
|
34
|
-
0.0
|
|
35
|
-
} else {
|
|
36
|
-
2.0 * precision * recall / (precision + recall)
|
|
37
|
-
};
|
|
38
|
-
Self { precision, recall, f1 }
|
|
39
|
-
}
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
/// Tokenize and normalize text for comparison.
|
|
43
|
-
/// Matches Python implementation: lowercase, normalize dashes, remove punctuation.
|
|
44
|
-
fn tokenize_text(text: &str) -> HashMap<String, usize> {
|
|
45
|
-
let normalized = text
|
|
46
|
-
.to_lowercase()
|
|
47
|
-
.replace(['\u{2013}', '\u{2014}'], "-")
|
|
48
|
-
.chars()
|
|
49
|
-
.map(|ch| {
|
|
50
|
-
if ch >= ' ' || ch == '\n' || ch == '\r' || ch == '\t' {
|
|
51
|
-
ch
|
|
52
|
-
} else {
|
|
53
|
-
' '
|
|
54
|
-
}
|
|
55
|
-
})
|
|
56
|
-
.collect::<String>();
|
|
57
|
-
|
|
58
|
-
let normalized = normalized
|
|
59
|
-
.chars()
|
|
60
|
-
.map(|ch| if "()[],.;:+`".contains(ch) { ' ' } else { ch })
|
|
61
|
-
.collect::<String>();
|
|
62
|
-
|
|
63
|
-
let mut tokens: HashMap<String, usize> = HashMap::new();
|
|
64
|
-
for token in normalized.split_whitespace() {
|
|
65
|
-
*tokens.entry(token.to_string()).or_insert(0) += 1;
|
|
66
|
-
}
|
|
67
|
-
tokens
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
/// Extract numeric tokens from token map.
|
|
71
|
-
/// Critical for measuring accuracy on tables, data, figures.
|
|
72
|
-
fn extract_numeric_tokens(tokens: &HashMap<String, usize>) -> HashMap<String, usize> {
|
|
73
|
-
let mut numeric_tokens: HashMap<String, usize> = HashMap::new();
|
|
74
|
-
|
|
75
|
-
for (token, count) in tokens {
|
|
76
|
-
let stripped = token.trim_matches(|c: char| "()[]{}".contains(c));
|
|
77
|
-
|
|
78
|
-
if !stripped.chars().any(|ch| ch.is_ascii_digit()) {
|
|
79
|
-
continue;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
if stripped.chars().any(|ch| ch.is_ascii_alphabetic()) {
|
|
83
|
-
continue;
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
*numeric_tokens.entry(stripped.to_string()).or_insert(0) += count;
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
numeric_tokens
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
/// Calculate precision, recall, F1 for token sets.
|
|
93
|
-
fn calculate_token_scores(
|
|
94
|
-
truth_tokens: &HashMap<String, usize>,
|
|
95
|
-
candidate_tokens: &HashMap<String, usize>,
|
|
96
|
-
) -> TokenScores {
|
|
97
|
-
let truth_total: usize = truth_tokens.values().sum();
|
|
98
|
-
let candidate_total: usize = candidate_tokens.values().sum();
|
|
99
|
-
|
|
100
|
-
if truth_total == 0 && candidate_total == 0 {
|
|
101
|
-
return TokenScores::new(1.0, 1.0);
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
let overlap: usize = truth_tokens
|
|
105
|
-
.keys()
|
|
106
|
-
.map(|token| {
|
|
107
|
-
let truth_count = truth_tokens.get(token).unwrap_or(&0);
|
|
108
|
-
let candidate_count = candidate_tokens.get(token).unwrap_or(&0);
|
|
109
|
-
truth_count.min(candidate_count)
|
|
110
|
-
})
|
|
111
|
-
.sum();
|
|
112
|
-
|
|
113
|
-
let precision = if candidate_total > 0 {
|
|
114
|
-
overlap as f64 / candidate_total as f64
|
|
115
|
-
} else {
|
|
116
|
-
0.0
|
|
117
|
-
};
|
|
118
|
-
|
|
119
|
-
let recall = if truth_total > 0 {
|
|
120
|
-
overlap as f64 / truth_total as f64
|
|
121
|
-
} else {
|
|
122
|
-
0.0
|
|
123
|
-
};
|
|
124
|
-
|
|
125
|
-
TokenScores::new(precision, recall)
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
/// Count non-empty lines in text (layout fidelity metric).
|
|
129
|
-
fn count_lines(text: &str) -> usize {
|
|
130
|
-
text.lines().filter(|line| !line.trim().is_empty()).count()
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
/// Calculate relative layout delta (0.0 = perfect, 1.0 = worst).
|
|
134
|
-
fn layout_delta(truth_lines: usize, ocr_lines: usize) -> f64 {
|
|
135
|
-
if truth_lines == 0 {
|
|
136
|
-
return if ocr_lines == 0 { 0.0 } else { 1.0 };
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
let delta = (ocr_lines as f64 - truth_lines as f64).abs() / truth_lines as f64;
|
|
140
|
-
delta.min(1.0)
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
#[test]
|
|
144
|
-
fn test_ocr_quality_simple_text_high_accuracy() {
|
|
145
|
-
if skip_if_missing("pdfs/fake_memo.pdf") {
|
|
146
|
-
return;
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
let file_path = get_test_file_path("pdfs/fake_memo.pdf");
|
|
150
|
-
|
|
151
|
-
let truth_result =
|
|
152
|
-
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
153
|
-
|
|
154
|
-
assert!(
|
|
155
|
-
truth_result.chunks.is_none(),
|
|
156
|
-
"Chunks should be None without chunking config"
|
|
157
|
-
);
|
|
158
|
-
assert!(
|
|
159
|
-
truth_result.detected_languages.is_none(),
|
|
160
|
-
"Language detection not enabled"
|
|
161
|
-
);
|
|
162
|
-
|
|
163
|
-
let ocr_config = ExtractionConfig {
|
|
164
|
-
ocr: Some(OcrConfig {
|
|
165
|
-
backend: "tesseract".to_string(),
|
|
166
|
-
language: "eng".to_string(),
|
|
167
|
-
tesseract_config: None,
|
|
168
|
-
}),
|
|
169
|
-
force_ocr: true,
|
|
170
|
-
..Default::default()
|
|
171
|
-
};
|
|
172
|
-
|
|
173
|
-
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
174
|
-
|
|
175
|
-
assert!(
|
|
176
|
-
ocr_result.chunks.is_none(),
|
|
177
|
-
"Chunks should be None without chunking config"
|
|
178
|
-
);
|
|
179
|
-
assert!(
|
|
180
|
-
ocr_result.detected_languages.is_none(),
|
|
181
|
-
"Language detection not enabled"
|
|
182
|
-
);
|
|
183
|
-
|
|
184
|
-
println!("Truth content length: {}", truth_result.content.len());
|
|
185
|
-
println!("OCR content length: {}", ocr_result.content.len());
|
|
186
|
-
println!(
|
|
187
|
-
"Truth first 100 chars: {:?}",
|
|
188
|
-
&truth_result.content.chars().take(100).collect::<String>()
|
|
189
|
-
);
|
|
190
|
-
println!(
|
|
191
|
-
"OCR first 100 chars: {:?}",
|
|
192
|
-
&ocr_result.content.chars().take(100).collect::<String>()
|
|
193
|
-
);
|
|
194
|
-
|
|
195
|
-
let truth_tokens = tokenize_text(&truth_result.content);
|
|
196
|
-
let ocr_tokens = tokenize_text(&ocr_result.content);
|
|
197
|
-
|
|
198
|
-
println!("Truth token count: {}", truth_tokens.len());
|
|
199
|
-
println!("OCR token count: {}", ocr_tokens.len());
|
|
200
|
-
|
|
201
|
-
let scores = calculate_token_scores(&truth_tokens, &ocr_tokens);
|
|
202
|
-
|
|
203
|
-
println!("Simple text OCR quality:");
|
|
204
|
-
println!(" Precision: {:.3}", scores.precision);
|
|
205
|
-
println!(" Recall: {:.3}", scores.recall);
|
|
206
|
-
println!(" F1: {:.3}", scores.f1);
|
|
207
|
-
|
|
208
|
-
assert!(
|
|
209
|
-
scores.f1 >= 0.70,
|
|
210
|
-
"OCR F1 score too low: {:.3} (expected >= 0.70). Precision: {:.3}, Recall: {:.3}",
|
|
211
|
-
scores.f1,
|
|
212
|
-
scores.precision,
|
|
213
|
-
scores.recall
|
|
214
|
-
);
|
|
215
|
-
}
|
|
216
|
-
|
|
217
|
-
#[test]
|
|
218
|
-
fn test_ocr_quality_numeric_accuracy() {
|
|
219
|
-
if skip_if_missing("pdfs/embedded_images_tables.pdf") {
|
|
220
|
-
return;
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
let file_path = get_test_file_path("pdfs/embedded_images_tables.pdf");
|
|
224
|
-
|
|
225
|
-
let truth_result =
|
|
226
|
-
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
227
|
-
|
|
228
|
-
assert!(
|
|
229
|
-
truth_result.chunks.is_none(),
|
|
230
|
-
"Chunks should be None without chunking config"
|
|
231
|
-
);
|
|
232
|
-
assert!(
|
|
233
|
-
truth_result.detected_languages.is_none(),
|
|
234
|
-
"Language detection not enabled"
|
|
235
|
-
);
|
|
236
|
-
|
|
237
|
-
let ocr_config = ExtractionConfig {
|
|
238
|
-
ocr: Some(OcrConfig {
|
|
239
|
-
backend: "tesseract".to_string(),
|
|
240
|
-
language: "eng".to_string(),
|
|
241
|
-
tesseract_config: None,
|
|
242
|
-
}),
|
|
243
|
-
force_ocr: true,
|
|
244
|
-
..Default::default()
|
|
245
|
-
};
|
|
246
|
-
|
|
247
|
-
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
248
|
-
|
|
249
|
-
assert!(
|
|
250
|
-
ocr_result.chunks.is_none(),
|
|
251
|
-
"Chunks should be None without chunking config"
|
|
252
|
-
);
|
|
253
|
-
assert!(
|
|
254
|
-
ocr_result.detected_languages.is_none(),
|
|
255
|
-
"Language detection not enabled"
|
|
256
|
-
);
|
|
257
|
-
|
|
258
|
-
let truth_tokens = tokenize_text(&truth_result.content);
|
|
259
|
-
let ocr_tokens = tokenize_text(&ocr_result.content);
|
|
260
|
-
|
|
261
|
-
let truth_numeric = extract_numeric_tokens(&truth_tokens);
|
|
262
|
-
let ocr_numeric = extract_numeric_tokens(&ocr_tokens);
|
|
263
|
-
|
|
264
|
-
if !truth_numeric.is_empty() {
|
|
265
|
-
let numeric_scores = calculate_token_scores(&truth_numeric, &ocr_numeric);
|
|
266
|
-
|
|
267
|
-
println!("Numeric token OCR quality:");
|
|
268
|
-
println!(" Precision: {:.3}", numeric_scores.precision);
|
|
269
|
-
println!(" Recall: {:.3}", numeric_scores.recall);
|
|
270
|
-
println!(" F1: {:.3}", numeric_scores.f1);
|
|
271
|
-
println!(" Numeric tokens in truth: {}", truth_numeric.len());
|
|
272
|
-
println!(" Numeric tokens in OCR: {}", ocr_numeric.len());
|
|
273
|
-
|
|
274
|
-
assert!(
|
|
275
|
-
numeric_scores.f1 >= 0.75,
|
|
276
|
-
"Numeric F1 score too low: {:.3} (expected >= 0.75). Numbers must be accurate!",
|
|
277
|
-
numeric_scores.f1
|
|
278
|
-
);
|
|
279
|
-
}
|
|
280
|
-
}
|
|
281
|
-
|
|
282
|
-
#[test]
|
|
283
|
-
fn test_ocr_quality_layout_preservation() {
|
|
284
|
-
if skip_if_missing("pdfs/fake_memo.pdf") {
|
|
285
|
-
return;
|
|
286
|
-
}
|
|
287
|
-
|
|
288
|
-
let file_path = get_test_file_path("pdfs/fake_memo.pdf");
|
|
289
|
-
|
|
290
|
-
let truth_result =
|
|
291
|
-
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
292
|
-
|
|
293
|
-
assert!(
|
|
294
|
-
truth_result.chunks.is_none(),
|
|
295
|
-
"Chunks should be None without chunking config"
|
|
296
|
-
);
|
|
297
|
-
assert!(
|
|
298
|
-
truth_result.detected_languages.is_none(),
|
|
299
|
-
"Language detection not enabled"
|
|
300
|
-
);
|
|
301
|
-
|
|
302
|
-
let ocr_config = ExtractionConfig {
|
|
303
|
-
ocr: Some(OcrConfig {
|
|
304
|
-
backend: "tesseract".to_string(),
|
|
305
|
-
language: "eng".to_string(),
|
|
306
|
-
tesseract_config: None,
|
|
307
|
-
}),
|
|
308
|
-
force_ocr: true,
|
|
309
|
-
..Default::default()
|
|
310
|
-
};
|
|
311
|
-
|
|
312
|
-
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
313
|
-
|
|
314
|
-
assert!(
|
|
315
|
-
ocr_result.chunks.is_none(),
|
|
316
|
-
"Chunks should be None without chunking config"
|
|
317
|
-
);
|
|
318
|
-
assert!(
|
|
319
|
-
ocr_result.detected_languages.is_none(),
|
|
320
|
-
"Language detection not enabled"
|
|
321
|
-
);
|
|
322
|
-
|
|
323
|
-
let truth_lines = count_lines(&truth_result.content);
|
|
324
|
-
let ocr_lines = count_lines(&ocr_result.content);
|
|
325
|
-
let delta = layout_delta(truth_lines, ocr_lines);
|
|
326
|
-
|
|
327
|
-
println!("Layout preservation:");
|
|
328
|
-
println!(" Truth lines: {}", truth_lines);
|
|
329
|
-
println!(" OCR lines: {}", ocr_lines);
|
|
330
|
-
println!(" Layout delta: {:.3}", delta);
|
|
331
|
-
|
|
332
|
-
assert!(
|
|
333
|
-
delta <= 0.40,
|
|
334
|
-
"Layout delta too high: {:.3} (expected <= 0.40). Truth: {} lines, OCR: {} lines",
|
|
335
|
-
delta,
|
|
336
|
-
truth_lines,
|
|
337
|
-
ocr_lines
|
|
338
|
-
);
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
#[test]
|
|
342
|
-
fn test_ocr_quality_technical_document() {
|
|
343
|
-
if skip_if_missing("pdfs/code_and_formula.pdf") {
|
|
344
|
-
return;
|
|
345
|
-
}
|
|
346
|
-
|
|
347
|
-
let file_path = get_test_file_path("pdfs/code_and_formula.pdf");
|
|
348
|
-
|
|
349
|
-
let truth_result =
|
|
350
|
-
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
351
|
-
|
|
352
|
-
assert!(
|
|
353
|
-
truth_result.chunks.is_none(),
|
|
354
|
-
"Chunks should be None without chunking config"
|
|
355
|
-
);
|
|
356
|
-
assert!(
|
|
357
|
-
truth_result.detected_languages.is_none(),
|
|
358
|
-
"Language detection not enabled"
|
|
359
|
-
);
|
|
360
|
-
|
|
361
|
-
let ocr_config = ExtractionConfig {
|
|
362
|
-
ocr: Some(OcrConfig {
|
|
363
|
-
backend: "tesseract".to_string(),
|
|
364
|
-
language: "eng".to_string(),
|
|
365
|
-
tesseract_config: None,
|
|
366
|
-
}),
|
|
367
|
-
force_ocr: true,
|
|
368
|
-
..Default::default()
|
|
369
|
-
};
|
|
370
|
-
|
|
371
|
-
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
372
|
-
|
|
373
|
-
assert!(
|
|
374
|
-
ocr_result.chunks.is_none(),
|
|
375
|
-
"Chunks should be None without chunking config"
|
|
376
|
-
);
|
|
377
|
-
assert!(
|
|
378
|
-
ocr_result.detected_languages.is_none(),
|
|
379
|
-
"Language detection not enabled"
|
|
380
|
-
);
|
|
381
|
-
|
|
382
|
-
let truth_tokens = tokenize_text(&truth_result.content);
|
|
383
|
-
let ocr_tokens = tokenize_text(&ocr_result.content);
|
|
384
|
-
let scores = calculate_token_scores(&truth_tokens, &ocr_tokens);
|
|
385
|
-
|
|
386
|
-
println!("Technical document OCR quality:");
|
|
387
|
-
println!(" Precision: {:.3}", scores.precision);
|
|
388
|
-
println!(" Recall: {:.3}", scores.recall);
|
|
389
|
-
println!(" F1: {:.3}", scores.f1);
|
|
390
|
-
|
|
391
|
-
assert!(
|
|
392
|
-
scores.f1 >= 0.60,
|
|
393
|
-
"Technical document F1 score too low: {:.3} (expected >= 0.60)",
|
|
394
|
-
scores.f1
|
|
395
|
-
);
|
|
396
|
-
}
|
|
397
|
-
|
|
398
|
-
#[test]
|
|
399
|
-
fn test_ocr_consistency_across_runs() {
|
|
400
|
-
if skip_if_missing("pdfs/fake_memo.pdf") {
|
|
401
|
-
return;
|
|
402
|
-
}
|
|
403
|
-
|
|
404
|
-
let file_path = get_test_file_path("pdfs/fake_memo.pdf");
|
|
405
|
-
let ocr_config = ExtractionConfig {
|
|
406
|
-
ocr: Some(OcrConfig {
|
|
407
|
-
backend: "tesseract".to_string(),
|
|
408
|
-
language: "eng".to_string(),
|
|
409
|
-
tesseract_config: None,
|
|
410
|
-
}),
|
|
411
|
-
force_ocr: true,
|
|
412
|
-
use_cache: false,
|
|
413
|
-
..Default::default()
|
|
414
|
-
};
|
|
415
|
-
|
|
416
|
-
let result1 = extract_file_sync(&file_path, None, &ocr_config).expect("First OCR run should succeed");
|
|
417
|
-
let result2 = extract_file_sync(&file_path, None, &ocr_config).expect("Second OCR run should succeed");
|
|
418
|
-
let result3 = extract_file_sync(&file_path, None, &ocr_config).expect("Third OCR run should succeed");
|
|
419
|
-
|
|
420
|
-
assert!(
|
|
421
|
-
result1.chunks.is_none(),
|
|
422
|
-
"Chunks should be None without chunking config"
|
|
423
|
-
);
|
|
424
|
-
assert!(result1.detected_languages.is_none(), "Language detection not enabled");
|
|
425
|
-
assert!(
|
|
426
|
-
result2.chunks.is_none(),
|
|
427
|
-
"Chunks should be None without chunking config"
|
|
428
|
-
);
|
|
429
|
-
assert!(result2.detected_languages.is_none(), "Language detection not enabled");
|
|
430
|
-
assert!(
|
|
431
|
-
result3.chunks.is_none(),
|
|
432
|
-
"Chunks should be None without chunking config"
|
|
433
|
-
);
|
|
434
|
-
assert!(result3.detected_languages.is_none(), "Language detection not enabled");
|
|
435
|
-
|
|
436
|
-
let tokens1 = tokenize_text(&result1.content);
|
|
437
|
-
let tokens2 = tokenize_text(&result2.content);
|
|
438
|
-
let tokens3 = tokenize_text(&result3.content);
|
|
439
|
-
|
|
440
|
-
let scores_1_2 = calculate_token_scores(&tokens1, &tokens2);
|
|
441
|
-
let scores_1_3 = calculate_token_scores(&tokens1, &tokens3);
|
|
442
|
-
|
|
443
|
-
println!("OCR consistency across runs:");
|
|
444
|
-
println!(" Run1 vs Run2 F1: {:.3}", scores_1_2.f1);
|
|
445
|
-
println!(" Run1 vs Run3 F1: {:.3}", scores_1_3.f1);
|
|
446
|
-
|
|
447
|
-
assert!(
|
|
448
|
-
scores_1_2.f1 >= 0.98,
|
|
449
|
-
"OCR inconsistent between runs: F1 {:.3} (expected >= 0.98)",
|
|
450
|
-
scores_1_2.f1
|
|
451
|
-
);
|
|
452
|
-
assert!(
|
|
453
|
-
scores_1_3.f1 >= 0.98,
|
|
454
|
-
"OCR inconsistent between runs: F1 {:.3} (expected >= 0.98)",
|
|
455
|
-
scores_1_3.f1
|
|
456
|
-
);
|
|
457
|
-
}
|
|
458
|
-
|
|
459
|
-
#[test]
|
|
460
|
-
fn test_ocr_consistency_with_different_psm() {
|
|
461
|
-
if skip_if_missing("pdfs/fake_memo.pdf") {
|
|
462
|
-
return;
|
|
463
|
-
}
|
|
464
|
-
|
|
465
|
-
let file_path = get_test_file_path("pdfs/fake_memo.pdf");
|
|
466
|
-
|
|
467
|
-
let config_psm3 = ExtractionConfig {
|
|
468
|
-
ocr: Some(OcrConfig {
|
|
469
|
-
backend: "tesseract".to_string(),
|
|
470
|
-
language: "eng".to_string(),
|
|
471
|
-
tesseract_config: Some(kreuzberg::types::TesseractConfig {
|
|
472
|
-
psm: 3,
|
|
473
|
-
..Default::default()
|
|
474
|
-
}),
|
|
475
|
-
}),
|
|
476
|
-
force_ocr: true,
|
|
477
|
-
..Default::default()
|
|
478
|
-
};
|
|
479
|
-
|
|
480
|
-
let config_psm6 = ExtractionConfig {
|
|
481
|
-
ocr: Some(OcrConfig {
|
|
482
|
-
backend: "tesseract".to_string(),
|
|
483
|
-
language: "eng".to_string(),
|
|
484
|
-
tesseract_config: Some(kreuzberg::types::TesseractConfig {
|
|
485
|
-
psm: 6,
|
|
486
|
-
..Default::default()
|
|
487
|
-
}),
|
|
488
|
-
}),
|
|
489
|
-
force_ocr: true,
|
|
490
|
-
..Default::default()
|
|
491
|
-
};
|
|
492
|
-
|
|
493
|
-
let result_psm3 = extract_file_sync(&file_path, None, &config_psm3).expect("PSM 3 extraction should succeed");
|
|
494
|
-
let result_psm6 = extract_file_sync(&file_path, None, &config_psm6).expect("PSM 6 extraction should succeed");
|
|
495
|
-
|
|
496
|
-
assert!(
|
|
497
|
-
result_psm3.chunks.is_none(),
|
|
498
|
-
"Chunks should be None without chunking config"
|
|
499
|
-
);
|
|
500
|
-
assert!(
|
|
501
|
-
result_psm3.detected_languages.is_none(),
|
|
502
|
-
"Language detection not enabled"
|
|
503
|
-
);
|
|
504
|
-
assert!(
|
|
505
|
-
result_psm6.chunks.is_none(),
|
|
506
|
-
"Chunks should be None without chunking config"
|
|
507
|
-
);
|
|
508
|
-
assert!(
|
|
509
|
-
result_psm6.detected_languages.is_none(),
|
|
510
|
-
"Language detection not enabled"
|
|
511
|
-
);
|
|
512
|
-
|
|
513
|
-
let tokens_psm3 = tokenize_text(&result_psm3.content);
|
|
514
|
-
let tokens_psm6 = tokenize_text(&result_psm6.content);
|
|
515
|
-
|
|
516
|
-
let scores = calculate_token_scores(&tokens_psm3, &tokens_psm6);
|
|
517
|
-
|
|
518
|
-
println!("OCR consistency across PSM modes:");
|
|
519
|
-
println!(" PSM 3 vs PSM 6 F1: {:.3}", scores.f1);
|
|
520
|
-
|
|
521
|
-
assert!(
|
|
522
|
-
scores.f1 >= 0.85,
|
|
523
|
-
"PSM modes produce too different results: F1 {:.3} (expected >= 0.85)",
|
|
524
|
-
scores.f1
|
|
525
|
-
);
|
|
526
|
-
}
|
|
527
|
-
|
|
528
|
-
#[test]
|
|
529
|
-
fn test_ocr_quality_multi_page_consistency() {
|
|
530
|
-
if skip_if_missing("pdfs/a_course_in_machine_learning_ciml_v0_9_all.pdf") {
|
|
531
|
-
return;
|
|
532
|
-
}
|
|
533
|
-
|
|
534
|
-
if std::env::var_os("KREUZBERG_RUN_FULL_OCR").is_none() {
|
|
535
|
-
println!("Skipping test_ocr_quality_multi_page_consistency: set KREUZBERG_RUN_FULL_OCR=1 to enable");
|
|
536
|
-
return;
|
|
537
|
-
}
|
|
538
|
-
|
|
539
|
-
let file_path = get_test_file_path("pdfs/a_course_in_machine_learning_ciml_v0_9_all.pdf");
|
|
540
|
-
|
|
541
|
-
let truth_result =
|
|
542
|
-
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
543
|
-
|
|
544
|
-
assert!(
|
|
545
|
-
truth_result.chunks.is_none(),
|
|
546
|
-
"Chunks should be None without chunking config"
|
|
547
|
-
);
|
|
548
|
-
assert!(
|
|
549
|
-
truth_result.detected_languages.is_none(),
|
|
550
|
-
"Language detection not enabled"
|
|
551
|
-
);
|
|
552
|
-
|
|
553
|
-
let ocr_config = ExtractionConfig {
|
|
554
|
-
ocr: Some(OcrConfig {
|
|
555
|
-
backend: "tesseract".to_string(),
|
|
556
|
-
language: "eng".to_string(),
|
|
557
|
-
tesseract_config: None,
|
|
558
|
-
}),
|
|
559
|
-
force_ocr: true,
|
|
560
|
-
..Default::default()
|
|
561
|
-
};
|
|
562
|
-
|
|
563
|
-
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
564
|
-
|
|
565
|
-
assert!(
|
|
566
|
-
ocr_result.chunks.is_none(),
|
|
567
|
-
"Chunks should be None without chunking config"
|
|
568
|
-
);
|
|
569
|
-
assert!(
|
|
570
|
-
ocr_result.detected_languages.is_none(),
|
|
571
|
-
"Language detection not enabled"
|
|
572
|
-
);
|
|
573
|
-
|
|
574
|
-
let truth_tokens = tokenize_text(&truth_result.content);
|
|
575
|
-
let ocr_tokens = tokenize_text(&ocr_result.content);
|
|
576
|
-
|
|
577
|
-
let truth_count: usize = truth_tokens.values().sum();
|
|
578
|
-
let ocr_count: usize = ocr_tokens.values().sum();
|
|
579
|
-
|
|
580
|
-
println!("Multi-page document quality:");
|
|
581
|
-
println!(" Truth token count: {}", truth_count);
|
|
582
|
-
println!(" OCR token count: {}", ocr_count);
|
|
583
|
-
|
|
584
|
-
assert!(
|
|
585
|
-
ocr_count >= (truth_count * 50 / 100),
|
|
586
|
-
"OCR extracted too few tokens: {} (expected >= 50% of {})",
|
|
587
|
-
ocr_count,
|
|
588
|
-
truth_count
|
|
589
|
-
);
|
|
590
|
-
}
|
|
591
|
-
|
|
592
|
-
#[test]
|
|
593
|
-
fn test_ocr_quality_with_tables() {
|
|
594
|
-
if skip_if_missing("pdfs/embedded_images_tables.pdf") {
|
|
595
|
-
return;
|
|
596
|
-
}
|
|
597
|
-
|
|
598
|
-
let file_path = get_test_file_path("pdfs/embedded_images_tables.pdf");
|
|
599
|
-
|
|
600
|
-
let ocr_config = ExtractionConfig {
|
|
601
|
-
ocr: Some(OcrConfig {
|
|
602
|
-
backend: "tesseract".to_string(),
|
|
603
|
-
language: "eng".to_string(),
|
|
604
|
-
tesseract_config: Some(kreuzberg::types::TesseractConfig {
|
|
605
|
-
enable_table_detection: true,
|
|
606
|
-
table_min_confidence: 0.5,
|
|
607
|
-
..Default::default()
|
|
608
|
-
}),
|
|
609
|
-
}),
|
|
610
|
-
force_ocr: true,
|
|
611
|
-
..Default::default()
|
|
612
|
-
};
|
|
613
|
-
|
|
614
|
-
let result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with table detection");
|
|
615
|
-
|
|
616
|
-
assert!(result.chunks.is_none(), "Chunks should be None without chunking config");
|
|
617
|
-
assert!(result.detected_languages.is_none(), "Language detection not enabled");
|
|
618
|
-
|
|
619
|
-
println!("Table extraction quality:");
|
|
620
|
-
println!(" Tables found: {}", result.tables.len());
|
|
621
|
-
println!(" Content length: {}", result.content.len());
|
|
622
|
-
|
|
623
|
-
assert!(
|
|
624
|
-
!result.content.trim().is_empty(),
|
|
625
|
-
"OCR with tables should produce content"
|
|
626
|
-
);
|
|
627
|
-
}
|
|
1
|
+
//! OCR quality assessment tests.
|
|
2
|
+
//!
|
|
3
|
+
//! This module tests OCR quality by comparing against ground truth (native PDF text layers).
|
|
4
|
+
//! Adopts techniques from scripts/ocr_quality_report.py:
|
|
5
|
+
//! - Token-based precision/recall/F1 scoring
|
|
6
|
+
//! - Numeric accuracy tracking (critical for tables, data)
|
|
7
|
+
//! - Layout fidelity (line count preservation)
|
|
8
|
+
//! - Markdown structure preservation
|
|
9
|
+
//!
|
|
10
|
+
//! Test philosophy:
|
|
11
|
+
//! - Compare OCR output against searchable PDF text (ground truth)
|
|
12
|
+
//! - Measure accuracy with precision, recall, F1 metrics
|
|
13
|
+
//! - Track numeric token accuracy separately (higher importance)
|
|
14
|
+
//! - Verify layout preservation (line counts, structure)
|
|
15
|
+
//! - Assert minimum quality thresholds
|
|
16
|
+
|
|
17
|
+
mod helpers;
|
|
18
|
+
|
|
19
|
+
use helpers::*;
|
|
20
|
+
use kreuzberg::core::config::{ExtractionConfig, OcrConfig};
|
|
21
|
+
use kreuzberg::extract_file_sync;
|
|
22
|
+
use std::collections::HashMap;
|
|
23
|
+
|
|
24
|
+
#[derive(Debug, Clone)]
|
|
25
|
+
struct TokenScores {
|
|
26
|
+
precision: f64,
|
|
27
|
+
recall: f64,
|
|
28
|
+
f1: f64,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
impl TokenScores {
|
|
32
|
+
fn new(precision: f64, recall: f64) -> Self {
|
|
33
|
+
let f1 = if precision + recall == 0.0 {
|
|
34
|
+
0.0
|
|
35
|
+
} else {
|
|
36
|
+
2.0 * precision * recall / (precision + recall)
|
|
37
|
+
};
|
|
38
|
+
Self { precision, recall, f1 }
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/// Tokenize and normalize text for comparison.
|
|
43
|
+
/// Matches Python implementation: lowercase, normalize dashes, remove punctuation.
|
|
44
|
+
fn tokenize_text(text: &str) -> HashMap<String, usize> {
|
|
45
|
+
let normalized = text
|
|
46
|
+
.to_lowercase()
|
|
47
|
+
.replace(['\u{2013}', '\u{2014}'], "-")
|
|
48
|
+
.chars()
|
|
49
|
+
.map(|ch| {
|
|
50
|
+
if ch >= ' ' || ch == '\n' || ch == '\r' || ch == '\t' {
|
|
51
|
+
ch
|
|
52
|
+
} else {
|
|
53
|
+
' '
|
|
54
|
+
}
|
|
55
|
+
})
|
|
56
|
+
.collect::<String>();
|
|
57
|
+
|
|
58
|
+
let normalized = normalized
|
|
59
|
+
.chars()
|
|
60
|
+
.map(|ch| if "()[],.;:+`".contains(ch) { ' ' } else { ch })
|
|
61
|
+
.collect::<String>();
|
|
62
|
+
|
|
63
|
+
let mut tokens: HashMap<String, usize> = HashMap::new();
|
|
64
|
+
for token in normalized.split_whitespace() {
|
|
65
|
+
*tokens.entry(token.to_string()).or_insert(0) += 1;
|
|
66
|
+
}
|
|
67
|
+
tokens
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/// Extract numeric tokens from token map.
|
|
71
|
+
/// Critical for measuring accuracy on tables, data, figures.
|
|
72
|
+
fn extract_numeric_tokens(tokens: &HashMap<String, usize>) -> HashMap<String, usize> {
|
|
73
|
+
let mut numeric_tokens: HashMap<String, usize> = HashMap::new();
|
|
74
|
+
|
|
75
|
+
for (token, count) in tokens {
|
|
76
|
+
let stripped = token.trim_matches(|c: char| "()[]{}".contains(c));
|
|
77
|
+
|
|
78
|
+
if !stripped.chars().any(|ch| ch.is_ascii_digit()) {
|
|
79
|
+
continue;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
if stripped.chars().any(|ch| ch.is_ascii_alphabetic()) {
|
|
83
|
+
continue;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
*numeric_tokens.entry(stripped.to_string()).or_insert(0) += count;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
numeric_tokens
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/// Calculate precision, recall, F1 for token sets.
|
|
93
|
+
fn calculate_token_scores(
|
|
94
|
+
truth_tokens: &HashMap<String, usize>,
|
|
95
|
+
candidate_tokens: &HashMap<String, usize>,
|
|
96
|
+
) -> TokenScores {
|
|
97
|
+
let truth_total: usize = truth_tokens.values().sum();
|
|
98
|
+
let candidate_total: usize = candidate_tokens.values().sum();
|
|
99
|
+
|
|
100
|
+
if truth_total == 0 && candidate_total == 0 {
|
|
101
|
+
return TokenScores::new(1.0, 1.0);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
let overlap: usize = truth_tokens
|
|
105
|
+
.keys()
|
|
106
|
+
.map(|token| {
|
|
107
|
+
let truth_count = truth_tokens.get(token).unwrap_or(&0);
|
|
108
|
+
let candidate_count = candidate_tokens.get(token).unwrap_or(&0);
|
|
109
|
+
truth_count.min(candidate_count)
|
|
110
|
+
})
|
|
111
|
+
.sum();
|
|
112
|
+
|
|
113
|
+
let precision = if candidate_total > 0 {
|
|
114
|
+
overlap as f64 / candidate_total as f64
|
|
115
|
+
} else {
|
|
116
|
+
0.0
|
|
117
|
+
};
|
|
118
|
+
|
|
119
|
+
let recall = if truth_total > 0 {
|
|
120
|
+
overlap as f64 / truth_total as f64
|
|
121
|
+
} else {
|
|
122
|
+
0.0
|
|
123
|
+
};
|
|
124
|
+
|
|
125
|
+
TokenScores::new(precision, recall)
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/// Count non-empty lines in text (layout fidelity metric).
|
|
129
|
+
fn count_lines(text: &str) -> usize {
|
|
130
|
+
text.lines().filter(|line| !line.trim().is_empty()).count()
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
/// Calculate relative layout delta (0.0 = perfect, 1.0 = worst).
|
|
134
|
+
fn layout_delta(truth_lines: usize, ocr_lines: usize) -> f64 {
|
|
135
|
+
if truth_lines == 0 {
|
|
136
|
+
return if ocr_lines == 0 { 0.0 } else { 1.0 };
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
let delta = (ocr_lines as f64 - truth_lines as f64).abs() / truth_lines as f64;
|
|
140
|
+
delta.min(1.0)
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
#[test]
|
|
144
|
+
fn test_ocr_quality_simple_text_high_accuracy() {
|
|
145
|
+
if skip_if_missing("pdfs/fake_memo.pdf") {
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
let file_path = get_test_file_path("pdfs/fake_memo.pdf");
|
|
150
|
+
|
|
151
|
+
let truth_result =
|
|
152
|
+
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
153
|
+
|
|
154
|
+
assert!(
|
|
155
|
+
truth_result.chunks.is_none(),
|
|
156
|
+
"Chunks should be None without chunking config"
|
|
157
|
+
);
|
|
158
|
+
assert!(
|
|
159
|
+
truth_result.detected_languages.is_none(),
|
|
160
|
+
"Language detection not enabled"
|
|
161
|
+
);
|
|
162
|
+
|
|
163
|
+
let ocr_config = ExtractionConfig {
|
|
164
|
+
ocr: Some(OcrConfig {
|
|
165
|
+
backend: "tesseract".to_string(),
|
|
166
|
+
language: "eng".to_string(),
|
|
167
|
+
tesseract_config: None,
|
|
168
|
+
}),
|
|
169
|
+
force_ocr: true,
|
|
170
|
+
..Default::default()
|
|
171
|
+
};
|
|
172
|
+
|
|
173
|
+
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
174
|
+
|
|
175
|
+
assert!(
|
|
176
|
+
ocr_result.chunks.is_none(),
|
|
177
|
+
"Chunks should be None without chunking config"
|
|
178
|
+
);
|
|
179
|
+
assert!(
|
|
180
|
+
ocr_result.detected_languages.is_none(),
|
|
181
|
+
"Language detection not enabled"
|
|
182
|
+
);
|
|
183
|
+
|
|
184
|
+
println!("Truth content length: {}", truth_result.content.len());
|
|
185
|
+
println!("OCR content length: {}", ocr_result.content.len());
|
|
186
|
+
println!(
|
|
187
|
+
"Truth first 100 chars: {:?}",
|
|
188
|
+
&truth_result.content.chars().take(100).collect::<String>()
|
|
189
|
+
);
|
|
190
|
+
println!(
|
|
191
|
+
"OCR first 100 chars: {:?}",
|
|
192
|
+
&ocr_result.content.chars().take(100).collect::<String>()
|
|
193
|
+
);
|
|
194
|
+
|
|
195
|
+
let truth_tokens = tokenize_text(&truth_result.content);
|
|
196
|
+
let ocr_tokens = tokenize_text(&ocr_result.content);
|
|
197
|
+
|
|
198
|
+
println!("Truth token count: {}", truth_tokens.len());
|
|
199
|
+
println!("OCR token count: {}", ocr_tokens.len());
|
|
200
|
+
|
|
201
|
+
let scores = calculate_token_scores(&truth_tokens, &ocr_tokens);
|
|
202
|
+
|
|
203
|
+
println!("Simple text OCR quality:");
|
|
204
|
+
println!(" Precision: {:.3}", scores.precision);
|
|
205
|
+
println!(" Recall: {:.3}", scores.recall);
|
|
206
|
+
println!(" F1: {:.3}", scores.f1);
|
|
207
|
+
|
|
208
|
+
assert!(
|
|
209
|
+
scores.f1 >= 0.70,
|
|
210
|
+
"OCR F1 score too low: {:.3} (expected >= 0.70). Precision: {:.3}, Recall: {:.3}",
|
|
211
|
+
scores.f1,
|
|
212
|
+
scores.precision,
|
|
213
|
+
scores.recall
|
|
214
|
+
);
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
#[test]
|
|
218
|
+
fn test_ocr_quality_numeric_accuracy() {
|
|
219
|
+
if skip_if_missing("pdfs/embedded_images_tables.pdf") {
|
|
220
|
+
return;
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
let file_path = get_test_file_path("pdfs/embedded_images_tables.pdf");
|
|
224
|
+
|
|
225
|
+
let truth_result =
|
|
226
|
+
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
227
|
+
|
|
228
|
+
assert!(
|
|
229
|
+
truth_result.chunks.is_none(),
|
|
230
|
+
"Chunks should be None without chunking config"
|
|
231
|
+
);
|
|
232
|
+
assert!(
|
|
233
|
+
truth_result.detected_languages.is_none(),
|
|
234
|
+
"Language detection not enabled"
|
|
235
|
+
);
|
|
236
|
+
|
|
237
|
+
let ocr_config = ExtractionConfig {
|
|
238
|
+
ocr: Some(OcrConfig {
|
|
239
|
+
backend: "tesseract".to_string(),
|
|
240
|
+
language: "eng".to_string(),
|
|
241
|
+
tesseract_config: None,
|
|
242
|
+
}),
|
|
243
|
+
force_ocr: true,
|
|
244
|
+
..Default::default()
|
|
245
|
+
};
|
|
246
|
+
|
|
247
|
+
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
248
|
+
|
|
249
|
+
assert!(
|
|
250
|
+
ocr_result.chunks.is_none(),
|
|
251
|
+
"Chunks should be None without chunking config"
|
|
252
|
+
);
|
|
253
|
+
assert!(
|
|
254
|
+
ocr_result.detected_languages.is_none(),
|
|
255
|
+
"Language detection not enabled"
|
|
256
|
+
);
|
|
257
|
+
|
|
258
|
+
let truth_tokens = tokenize_text(&truth_result.content);
|
|
259
|
+
let ocr_tokens = tokenize_text(&ocr_result.content);
|
|
260
|
+
|
|
261
|
+
let truth_numeric = extract_numeric_tokens(&truth_tokens);
|
|
262
|
+
let ocr_numeric = extract_numeric_tokens(&ocr_tokens);
|
|
263
|
+
|
|
264
|
+
if !truth_numeric.is_empty() {
|
|
265
|
+
let numeric_scores = calculate_token_scores(&truth_numeric, &ocr_numeric);
|
|
266
|
+
|
|
267
|
+
println!("Numeric token OCR quality:");
|
|
268
|
+
println!(" Precision: {:.3}", numeric_scores.precision);
|
|
269
|
+
println!(" Recall: {:.3}", numeric_scores.recall);
|
|
270
|
+
println!(" F1: {:.3}", numeric_scores.f1);
|
|
271
|
+
println!(" Numeric tokens in truth: {}", truth_numeric.len());
|
|
272
|
+
println!(" Numeric tokens in OCR: {}", ocr_numeric.len());
|
|
273
|
+
|
|
274
|
+
assert!(
|
|
275
|
+
numeric_scores.f1 >= 0.75,
|
|
276
|
+
"Numeric F1 score too low: {:.3} (expected >= 0.75). Numbers must be accurate!",
|
|
277
|
+
numeric_scores.f1
|
|
278
|
+
);
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
#[test]
|
|
283
|
+
fn test_ocr_quality_layout_preservation() {
|
|
284
|
+
if skip_if_missing("pdfs/fake_memo.pdf") {
|
|
285
|
+
return;
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
let file_path = get_test_file_path("pdfs/fake_memo.pdf");
|
|
289
|
+
|
|
290
|
+
let truth_result =
|
|
291
|
+
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
292
|
+
|
|
293
|
+
assert!(
|
|
294
|
+
truth_result.chunks.is_none(),
|
|
295
|
+
"Chunks should be None without chunking config"
|
|
296
|
+
);
|
|
297
|
+
assert!(
|
|
298
|
+
truth_result.detected_languages.is_none(),
|
|
299
|
+
"Language detection not enabled"
|
|
300
|
+
);
|
|
301
|
+
|
|
302
|
+
let ocr_config = ExtractionConfig {
|
|
303
|
+
ocr: Some(OcrConfig {
|
|
304
|
+
backend: "tesseract".to_string(),
|
|
305
|
+
language: "eng".to_string(),
|
|
306
|
+
tesseract_config: None,
|
|
307
|
+
}),
|
|
308
|
+
force_ocr: true,
|
|
309
|
+
..Default::default()
|
|
310
|
+
};
|
|
311
|
+
|
|
312
|
+
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
313
|
+
|
|
314
|
+
assert!(
|
|
315
|
+
ocr_result.chunks.is_none(),
|
|
316
|
+
"Chunks should be None without chunking config"
|
|
317
|
+
);
|
|
318
|
+
assert!(
|
|
319
|
+
ocr_result.detected_languages.is_none(),
|
|
320
|
+
"Language detection not enabled"
|
|
321
|
+
);
|
|
322
|
+
|
|
323
|
+
let truth_lines = count_lines(&truth_result.content);
|
|
324
|
+
let ocr_lines = count_lines(&ocr_result.content);
|
|
325
|
+
let delta = layout_delta(truth_lines, ocr_lines);
|
|
326
|
+
|
|
327
|
+
println!("Layout preservation:");
|
|
328
|
+
println!(" Truth lines: {}", truth_lines);
|
|
329
|
+
println!(" OCR lines: {}", ocr_lines);
|
|
330
|
+
println!(" Layout delta: {:.3}", delta);
|
|
331
|
+
|
|
332
|
+
assert!(
|
|
333
|
+
delta <= 0.40,
|
|
334
|
+
"Layout delta too high: {:.3} (expected <= 0.40). Truth: {} lines, OCR: {} lines",
|
|
335
|
+
delta,
|
|
336
|
+
truth_lines,
|
|
337
|
+
ocr_lines
|
|
338
|
+
);
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
#[test]
|
|
342
|
+
fn test_ocr_quality_technical_document() {
|
|
343
|
+
if skip_if_missing("pdfs/code_and_formula.pdf") {
|
|
344
|
+
return;
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
let file_path = get_test_file_path("pdfs/code_and_formula.pdf");
|
|
348
|
+
|
|
349
|
+
let truth_result =
|
|
350
|
+
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
351
|
+
|
|
352
|
+
assert!(
|
|
353
|
+
truth_result.chunks.is_none(),
|
|
354
|
+
"Chunks should be None without chunking config"
|
|
355
|
+
);
|
|
356
|
+
assert!(
|
|
357
|
+
truth_result.detected_languages.is_none(),
|
|
358
|
+
"Language detection not enabled"
|
|
359
|
+
);
|
|
360
|
+
|
|
361
|
+
let ocr_config = ExtractionConfig {
|
|
362
|
+
ocr: Some(OcrConfig {
|
|
363
|
+
backend: "tesseract".to_string(),
|
|
364
|
+
language: "eng".to_string(),
|
|
365
|
+
tesseract_config: None,
|
|
366
|
+
}),
|
|
367
|
+
force_ocr: true,
|
|
368
|
+
..Default::default()
|
|
369
|
+
};
|
|
370
|
+
|
|
371
|
+
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
372
|
+
|
|
373
|
+
assert!(
|
|
374
|
+
ocr_result.chunks.is_none(),
|
|
375
|
+
"Chunks should be None without chunking config"
|
|
376
|
+
);
|
|
377
|
+
assert!(
|
|
378
|
+
ocr_result.detected_languages.is_none(),
|
|
379
|
+
"Language detection not enabled"
|
|
380
|
+
);
|
|
381
|
+
|
|
382
|
+
let truth_tokens = tokenize_text(&truth_result.content);
|
|
383
|
+
let ocr_tokens = tokenize_text(&ocr_result.content);
|
|
384
|
+
let scores = calculate_token_scores(&truth_tokens, &ocr_tokens);
|
|
385
|
+
|
|
386
|
+
println!("Technical document OCR quality:");
|
|
387
|
+
println!(" Precision: {:.3}", scores.precision);
|
|
388
|
+
println!(" Recall: {:.3}", scores.recall);
|
|
389
|
+
println!(" F1: {:.3}", scores.f1);
|
|
390
|
+
|
|
391
|
+
assert!(
|
|
392
|
+
scores.f1 >= 0.60,
|
|
393
|
+
"Technical document F1 score too low: {:.3} (expected >= 0.60)",
|
|
394
|
+
scores.f1
|
|
395
|
+
);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
#[test]
|
|
399
|
+
fn test_ocr_consistency_across_runs() {
|
|
400
|
+
if skip_if_missing("pdfs/fake_memo.pdf") {
|
|
401
|
+
return;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
let file_path = get_test_file_path("pdfs/fake_memo.pdf");
|
|
405
|
+
let ocr_config = ExtractionConfig {
|
|
406
|
+
ocr: Some(OcrConfig {
|
|
407
|
+
backend: "tesseract".to_string(),
|
|
408
|
+
language: "eng".to_string(),
|
|
409
|
+
tesseract_config: None,
|
|
410
|
+
}),
|
|
411
|
+
force_ocr: true,
|
|
412
|
+
use_cache: false,
|
|
413
|
+
..Default::default()
|
|
414
|
+
};
|
|
415
|
+
|
|
416
|
+
let result1 = extract_file_sync(&file_path, None, &ocr_config).expect("First OCR run should succeed");
|
|
417
|
+
let result2 = extract_file_sync(&file_path, None, &ocr_config).expect("Second OCR run should succeed");
|
|
418
|
+
let result3 = extract_file_sync(&file_path, None, &ocr_config).expect("Third OCR run should succeed");
|
|
419
|
+
|
|
420
|
+
assert!(
|
|
421
|
+
result1.chunks.is_none(),
|
|
422
|
+
"Chunks should be None without chunking config"
|
|
423
|
+
);
|
|
424
|
+
assert!(result1.detected_languages.is_none(), "Language detection not enabled");
|
|
425
|
+
assert!(
|
|
426
|
+
result2.chunks.is_none(),
|
|
427
|
+
"Chunks should be None without chunking config"
|
|
428
|
+
);
|
|
429
|
+
assert!(result2.detected_languages.is_none(), "Language detection not enabled");
|
|
430
|
+
assert!(
|
|
431
|
+
result3.chunks.is_none(),
|
|
432
|
+
"Chunks should be None without chunking config"
|
|
433
|
+
);
|
|
434
|
+
assert!(result3.detected_languages.is_none(), "Language detection not enabled");
|
|
435
|
+
|
|
436
|
+
let tokens1 = tokenize_text(&result1.content);
|
|
437
|
+
let tokens2 = tokenize_text(&result2.content);
|
|
438
|
+
let tokens3 = tokenize_text(&result3.content);
|
|
439
|
+
|
|
440
|
+
let scores_1_2 = calculate_token_scores(&tokens1, &tokens2);
|
|
441
|
+
let scores_1_3 = calculate_token_scores(&tokens1, &tokens3);
|
|
442
|
+
|
|
443
|
+
println!("OCR consistency across runs:");
|
|
444
|
+
println!(" Run1 vs Run2 F1: {:.3}", scores_1_2.f1);
|
|
445
|
+
println!(" Run1 vs Run3 F1: {:.3}", scores_1_3.f1);
|
|
446
|
+
|
|
447
|
+
assert!(
|
|
448
|
+
scores_1_2.f1 >= 0.98,
|
|
449
|
+
"OCR inconsistent between runs: F1 {:.3} (expected >= 0.98)",
|
|
450
|
+
scores_1_2.f1
|
|
451
|
+
);
|
|
452
|
+
assert!(
|
|
453
|
+
scores_1_3.f1 >= 0.98,
|
|
454
|
+
"OCR inconsistent between runs: F1 {:.3} (expected >= 0.98)",
|
|
455
|
+
scores_1_3.f1
|
|
456
|
+
);
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
#[test]
|
|
460
|
+
fn test_ocr_consistency_with_different_psm() {
|
|
461
|
+
if skip_if_missing("pdfs/fake_memo.pdf") {
|
|
462
|
+
return;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
let file_path = get_test_file_path("pdfs/fake_memo.pdf");
|
|
466
|
+
|
|
467
|
+
let config_psm3 = ExtractionConfig {
|
|
468
|
+
ocr: Some(OcrConfig {
|
|
469
|
+
backend: "tesseract".to_string(),
|
|
470
|
+
language: "eng".to_string(),
|
|
471
|
+
tesseract_config: Some(kreuzberg::types::TesseractConfig {
|
|
472
|
+
psm: 3,
|
|
473
|
+
..Default::default()
|
|
474
|
+
}),
|
|
475
|
+
}),
|
|
476
|
+
force_ocr: true,
|
|
477
|
+
..Default::default()
|
|
478
|
+
};
|
|
479
|
+
|
|
480
|
+
let config_psm6 = ExtractionConfig {
|
|
481
|
+
ocr: Some(OcrConfig {
|
|
482
|
+
backend: "tesseract".to_string(),
|
|
483
|
+
language: "eng".to_string(),
|
|
484
|
+
tesseract_config: Some(kreuzberg::types::TesseractConfig {
|
|
485
|
+
psm: 6,
|
|
486
|
+
..Default::default()
|
|
487
|
+
}),
|
|
488
|
+
}),
|
|
489
|
+
force_ocr: true,
|
|
490
|
+
..Default::default()
|
|
491
|
+
};
|
|
492
|
+
|
|
493
|
+
let result_psm3 = extract_file_sync(&file_path, None, &config_psm3).expect("PSM 3 extraction should succeed");
|
|
494
|
+
let result_psm6 = extract_file_sync(&file_path, None, &config_psm6).expect("PSM 6 extraction should succeed");
|
|
495
|
+
|
|
496
|
+
assert!(
|
|
497
|
+
result_psm3.chunks.is_none(),
|
|
498
|
+
"Chunks should be None without chunking config"
|
|
499
|
+
);
|
|
500
|
+
assert!(
|
|
501
|
+
result_psm3.detected_languages.is_none(),
|
|
502
|
+
"Language detection not enabled"
|
|
503
|
+
);
|
|
504
|
+
assert!(
|
|
505
|
+
result_psm6.chunks.is_none(),
|
|
506
|
+
"Chunks should be None without chunking config"
|
|
507
|
+
);
|
|
508
|
+
assert!(
|
|
509
|
+
result_psm6.detected_languages.is_none(),
|
|
510
|
+
"Language detection not enabled"
|
|
511
|
+
);
|
|
512
|
+
|
|
513
|
+
let tokens_psm3 = tokenize_text(&result_psm3.content);
|
|
514
|
+
let tokens_psm6 = tokenize_text(&result_psm6.content);
|
|
515
|
+
|
|
516
|
+
let scores = calculate_token_scores(&tokens_psm3, &tokens_psm6);
|
|
517
|
+
|
|
518
|
+
println!("OCR consistency across PSM modes:");
|
|
519
|
+
println!(" PSM 3 vs PSM 6 F1: {:.3}", scores.f1);
|
|
520
|
+
|
|
521
|
+
assert!(
|
|
522
|
+
scores.f1 >= 0.85,
|
|
523
|
+
"PSM modes produce too different results: F1 {:.3} (expected >= 0.85)",
|
|
524
|
+
scores.f1
|
|
525
|
+
);
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
#[test]
|
|
529
|
+
fn test_ocr_quality_multi_page_consistency() {
|
|
530
|
+
if skip_if_missing("pdfs/a_course_in_machine_learning_ciml_v0_9_all.pdf") {
|
|
531
|
+
return;
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
if std::env::var_os("KREUZBERG_RUN_FULL_OCR").is_none() {
|
|
535
|
+
println!("Skipping test_ocr_quality_multi_page_consistency: set KREUZBERG_RUN_FULL_OCR=1 to enable");
|
|
536
|
+
return;
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
let file_path = get_test_file_path("pdfs/a_course_in_machine_learning_ciml_v0_9_all.pdf");
|
|
540
|
+
|
|
541
|
+
let truth_result =
|
|
542
|
+
extract_file_sync(&file_path, None, &ExtractionConfig::default()).expect("Should extract ground truth text");
|
|
543
|
+
|
|
544
|
+
assert!(
|
|
545
|
+
truth_result.chunks.is_none(),
|
|
546
|
+
"Chunks should be None without chunking config"
|
|
547
|
+
);
|
|
548
|
+
assert!(
|
|
549
|
+
truth_result.detected_languages.is_none(),
|
|
550
|
+
"Language detection not enabled"
|
|
551
|
+
);
|
|
552
|
+
|
|
553
|
+
let ocr_config = ExtractionConfig {
|
|
554
|
+
ocr: Some(OcrConfig {
|
|
555
|
+
backend: "tesseract".to_string(),
|
|
556
|
+
language: "eng".to_string(),
|
|
557
|
+
tesseract_config: None,
|
|
558
|
+
}),
|
|
559
|
+
force_ocr: true,
|
|
560
|
+
..Default::default()
|
|
561
|
+
};
|
|
562
|
+
|
|
563
|
+
let ocr_result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with OCR");
|
|
564
|
+
|
|
565
|
+
assert!(
|
|
566
|
+
ocr_result.chunks.is_none(),
|
|
567
|
+
"Chunks should be None without chunking config"
|
|
568
|
+
);
|
|
569
|
+
assert!(
|
|
570
|
+
ocr_result.detected_languages.is_none(),
|
|
571
|
+
"Language detection not enabled"
|
|
572
|
+
);
|
|
573
|
+
|
|
574
|
+
let truth_tokens = tokenize_text(&truth_result.content);
|
|
575
|
+
let ocr_tokens = tokenize_text(&ocr_result.content);
|
|
576
|
+
|
|
577
|
+
let truth_count: usize = truth_tokens.values().sum();
|
|
578
|
+
let ocr_count: usize = ocr_tokens.values().sum();
|
|
579
|
+
|
|
580
|
+
println!("Multi-page document quality:");
|
|
581
|
+
println!(" Truth token count: {}", truth_count);
|
|
582
|
+
println!(" OCR token count: {}", ocr_count);
|
|
583
|
+
|
|
584
|
+
assert!(
|
|
585
|
+
ocr_count >= (truth_count * 50 / 100),
|
|
586
|
+
"OCR extracted too few tokens: {} (expected >= 50% of {})",
|
|
587
|
+
ocr_count,
|
|
588
|
+
truth_count
|
|
589
|
+
);
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
#[test]
|
|
593
|
+
fn test_ocr_quality_with_tables() {
|
|
594
|
+
if skip_if_missing("pdfs/embedded_images_tables.pdf") {
|
|
595
|
+
return;
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
let file_path = get_test_file_path("pdfs/embedded_images_tables.pdf");
|
|
599
|
+
|
|
600
|
+
let ocr_config = ExtractionConfig {
|
|
601
|
+
ocr: Some(OcrConfig {
|
|
602
|
+
backend: "tesseract".to_string(),
|
|
603
|
+
language: "eng".to_string(),
|
|
604
|
+
tesseract_config: Some(kreuzberg::types::TesseractConfig {
|
|
605
|
+
enable_table_detection: true,
|
|
606
|
+
table_min_confidence: 0.5,
|
|
607
|
+
..Default::default()
|
|
608
|
+
}),
|
|
609
|
+
}),
|
|
610
|
+
force_ocr: true,
|
|
611
|
+
..Default::default()
|
|
612
|
+
};
|
|
613
|
+
|
|
614
|
+
let result = extract_file_sync(&file_path, None, &ocr_config).expect("Should extract with table detection");
|
|
615
|
+
|
|
616
|
+
assert!(result.chunks.is_none(), "Chunks should be None without chunking config");
|
|
617
|
+
assert!(result.detected_languages.is_none(), "Language detection not enabled");
|
|
618
|
+
|
|
619
|
+
println!("Table extraction quality:");
|
|
620
|
+
println!(" Tables found: {}", result.tables.len());
|
|
621
|
+
println!(" Content length: {}", result.content.len());
|
|
622
|
+
|
|
623
|
+
assert!(
|
|
624
|
+
!result.content.trim().is_empty(),
|
|
625
|
+
"OCR with tables should produce content"
|
|
626
|
+
);
|
|
627
|
+
}
|