kreuzberg 4.0.0.pre.rc.13 → 4.0.0.pre.rc.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -14
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -1
- data/.rubocop.yml +538 -538
- data/Gemfile +8 -8
- data/Gemfile.lock +105 -2
- data/README.md +454 -454
- data/Rakefile +33 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -341
- data/ext/kreuzberg_rb/extconf.rb +45 -45
- data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
- data/ext/kreuzberg_rb/native/Cargo.lock +6940 -6941
- data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -15
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +214 -214
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +81 -81
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +724 -724
- data/lib/kreuzberg/error_context.rb +80 -80
- data/lib/kreuzberg/errors.rb +118 -118
- data/lib/kreuzberg/extraction_api.rb +340 -340
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +279 -279
- data/lib/kreuzberg/setup_lib_path.rb +80 -80
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +109 -109
- data/lib/{pdfium.dll → libpdfium.dylib} +0 -0
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +546 -546
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -85
- data/spec/binding/cli_spec.rb +55 -55
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -41
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/Cargo.toml +1 -1
- data/vendor/kreuzberg/Cargo.toml +5 -5
- data/vendor/kreuzberg/README.md +230 -230
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
- data/vendor/kreuzberg/build.rs +843 -843
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
- data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
- data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
- data/vendor/kreuzberg/src/core/config.rs +1080 -1080
- data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
- data/vendor/kreuzberg/src/core/io.rs +329 -329
- data/vendor/kreuzberg/src/core/mime.rs +605 -605
- data/vendor/kreuzberg/src/core/mod.rs +47 -47
- data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
- data/vendor/kreuzberg/src/embeddings.rs +500 -500
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +601 -601
- data/vendor/kreuzberg/src/extraction/image.rs +491 -491
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -574
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
- data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
- data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
- data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
- data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
- data/vendor/kreuzberg/src/extractors/email.rs +157 -157
- data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
- data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
- data/vendor/kreuzberg/src/extractors/html.rs +407 -407
- data/vendor/kreuzberg/src/extractors/image.rs +219 -219
- data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
- data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
- data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
- data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
- data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
- data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
- data/vendor/kreuzberg/src/extractors/pdf.rs +749 -749
- data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
- data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
- data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
- data/vendor/kreuzberg/src/extractors/security.rs +484 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
- data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
- data/vendor/kreuzberg/src/extractors/text.rs +265 -265
- data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
- data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
- data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
- data/vendor/kreuzberg/src/lib.rs +113 -113
- data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
- data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -154
- data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
- data/vendor/kreuzberg/src/pdf/bundled.rs +346 -346
- data/vendor/kreuzberg/src/pdf/error.rs +130 -130
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
- data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
- data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
- data/vendor/kreuzberg/src/pdf/table.rs +420 -420
- data/vendor/kreuzberg/src/pdf/text.rs +240 -240
- data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
- data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
- data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
- data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
- data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +25 -25
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +1055 -1055
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
- data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
- data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
- data/vendor/kreuzberg/tests/config_features.rs +612 -612
- data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
- data/vendor/kreuzberg/tests/core_integration.rs +510 -510
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
- data/vendor/kreuzberg/tests/email_integration.rs +327 -327
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
- data/vendor/kreuzberg/tests/error_handling.rs +402 -402
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
- data/vendor/kreuzberg/tests/format_integration.rs +164 -164
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
- data/vendor/kreuzberg/tests/image_integration.rs +255 -255
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
- data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
- data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
- data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
- data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
- data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
- data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
- data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
- data/vendor/kreuzberg/tests/security_validation.rs +416 -416
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/kreuzberg-ffi/Cargo.toml +1 -1
- data/vendor/kreuzberg-ffi/README.md +851 -851
- data/vendor/kreuzberg-ffi/build.rs +176 -176
- data/vendor/kreuzberg-ffi/cbindgen.toml +27 -27
- data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -12
- data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -1087
- data/vendor/kreuzberg-ffi/src/lib.rs +3616 -3616
- data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -247
- data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -48
- data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -299
- data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -346
- data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -232
- data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -470
- data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
- data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
- data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
- data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
- data/vendor/kreuzberg-tesseract/LICENSE +22 -22
- data/vendor/kreuzberg-tesseract/README.md +399 -399
- data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
- data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
- data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
- data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
- data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
- data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
- data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
- data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
- data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
- data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
- data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
- data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
- data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
- data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
- data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
- data/vendor/rb-sys/Cargo.lock +393 -393
- data/vendor/rb-sys/Cargo.toml +70 -70
- data/vendor/rb-sys/Cargo.toml.orig +57 -57
- data/vendor/rb-sys/LICENSE-APACHE +190 -190
- data/vendor/rb-sys/LICENSE-MIT +21 -21
- data/vendor/rb-sys/build/features.rs +111 -111
- data/vendor/rb-sys/build/main.rs +286 -286
- data/vendor/rb-sys/build/stable_api_config.rs +155 -155
- data/vendor/rb-sys/build/version.rs +50 -50
- data/vendor/rb-sys/readme.md +36 -36
- data/vendor/rb-sys/src/bindings.rs +21 -21
- data/vendor/rb-sys/src/hidden.rs +11 -11
- data/vendor/rb-sys/src/lib.rs +35 -35
- data/vendor/rb-sys/src/macros.rs +371 -371
- data/vendor/rb-sys/src/memory.rs +53 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
- data/vendor/rb-sys/src/special_consts.rs +31 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
- data/vendor/rb-sys/src/stable_api.rs +260 -260
- data/vendor/rb-sys/src/symbol.rs +31 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
- data/vendor/rb-sys/src/utils.rs +89 -89
- data/vendor/rb-sys/src/value_type.rs +7 -7
- metadata +73 -4
- data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +0 -12
|
@@ -1,489 +1,489 @@
|
|
|
1
|
-
use super::bindings::bind_pdfium;
|
|
2
|
-
use super::error::{PdfError, Result};
|
|
3
|
-
use crate::types::{PageBoundary, PageInfo, PageStructure, PageUnitType};
|
|
4
|
-
use pdfium_render::prelude::*;
|
|
5
|
-
use serde::{Deserialize, Serialize};
|
|
6
|
-
|
|
7
|
-
/// PDF-specific metadata.
|
|
8
|
-
///
|
|
9
|
-
/// Contains metadata fields specific to PDF documents that are not in the common
|
|
10
|
-
/// `Metadata` structure. Common fields like title, authors, keywords, and dates
|
|
11
|
-
/// are now at the `Metadata` level.
|
|
12
|
-
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
|
13
|
-
pub struct PdfMetadata {
|
|
14
|
-
/// PDF version (e.g., "1.7", "2.0")
|
|
15
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
16
|
-
pub pdf_version: Option<String>,
|
|
17
|
-
|
|
18
|
-
/// PDF producer (application that created the PDF)
|
|
19
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
20
|
-
pub producer: Option<String>,
|
|
21
|
-
|
|
22
|
-
/// Whether the PDF is encrypted/password-protected
|
|
23
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
24
|
-
pub is_encrypted: Option<bool>,
|
|
25
|
-
|
|
26
|
-
/// First page width in points (1/72 inch)
|
|
27
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
28
|
-
pub width: Option<i64>,
|
|
29
|
-
|
|
30
|
-
/// First page height in points (1/72 inch)
|
|
31
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
32
|
-
pub height: Option<i64>,
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
/// Complete PDF extraction metadata including common and PDF-specific fields.
|
|
36
|
-
///
|
|
37
|
-
/// This struct combines common document fields (title, authors, dates) with
|
|
38
|
-
/// PDF-specific metadata and optional page structure information. It is returned
|
|
39
|
-
/// by `extract_metadata_from_document()` when page boundaries are provided.
|
|
40
|
-
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
41
|
-
pub struct PdfExtractionMetadata {
|
|
42
|
-
/// Document title
|
|
43
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
44
|
-
pub title: Option<String>,
|
|
45
|
-
|
|
46
|
-
/// Document subject or description
|
|
47
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
48
|
-
pub subject: Option<String>,
|
|
49
|
-
|
|
50
|
-
/// Document authors (parsed from PDF Author field)
|
|
51
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
52
|
-
pub authors: Option<Vec<String>>,
|
|
53
|
-
|
|
54
|
-
/// Document keywords (parsed from PDF Keywords field)
|
|
55
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
56
|
-
pub keywords: Option<Vec<String>>,
|
|
57
|
-
|
|
58
|
-
/// Creation timestamp (ISO 8601 format)
|
|
59
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
60
|
-
pub created_at: Option<String>,
|
|
61
|
-
|
|
62
|
-
/// Last modification timestamp (ISO 8601 format)
|
|
63
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
64
|
-
pub modified_at: Option<String>,
|
|
65
|
-
|
|
66
|
-
/// Application or user that created the document
|
|
67
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
68
|
-
pub created_by: Option<String>,
|
|
69
|
-
|
|
70
|
-
/// PDF-specific metadata
|
|
71
|
-
pub pdf_specific: PdfMetadata,
|
|
72
|
-
|
|
73
|
-
/// Page structure with boundaries and optional per-page metadata
|
|
74
|
-
#[serde(skip_serializing_if = "Option::is_none")]
|
|
75
|
-
pub page_structure: Option<PageStructure>,
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
/// Extract PDF-specific metadata from raw bytes.
|
|
79
|
-
///
|
|
80
|
-
/// Returns only PDF-specific metadata (version, producer, encryption status, dimensions).
|
|
81
|
-
pub fn extract_metadata(pdf_bytes: &[u8]) -> Result<PdfMetadata> {
|
|
82
|
-
extract_metadata_with_password(pdf_bytes, None)
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
/// Extract PDF-specific metadata from raw bytes with optional password.
|
|
86
|
-
///
|
|
87
|
-
/// Returns only PDF-specific metadata (version, producer, encryption status, dimensions).
|
|
88
|
-
pub fn extract_metadata_with_password(pdf_bytes: &[u8], password: Option<&str>) -> Result<PdfMetadata> {
|
|
89
|
-
let bindings = bind_pdfium(PdfError::MetadataExtractionFailed, "metadata extraction")?;
|
|
90
|
-
|
|
91
|
-
let pdfium = Pdfium::new(bindings);
|
|
92
|
-
|
|
93
|
-
let document = pdfium.load_pdf_from_byte_slice(pdf_bytes, password).map_err(|e| {
|
|
94
|
-
let err_msg = e.to_string();
|
|
95
|
-
if (err_msg.contains("password") || err_msg.contains("Password")) && password.is_some() {
|
|
96
|
-
PdfError::InvalidPassword
|
|
97
|
-
} else if err_msg.contains("password") || err_msg.contains("Password") {
|
|
98
|
-
PdfError::PasswordRequired
|
|
99
|
-
} else {
|
|
100
|
-
PdfError::MetadataExtractionFailed(err_msg)
|
|
101
|
-
}
|
|
102
|
-
})?;
|
|
103
|
-
|
|
104
|
-
extract_pdf_specific_metadata(&document)
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
pub fn extract_metadata_with_passwords(pdf_bytes: &[u8], passwords: &[&str]) -> Result<PdfMetadata> {
|
|
108
|
-
let mut last_error = None;
|
|
109
|
-
|
|
110
|
-
for password in passwords {
|
|
111
|
-
match extract_metadata_with_password(pdf_bytes, Some(password)) {
|
|
112
|
-
Ok(metadata) => return Ok(metadata),
|
|
113
|
-
Err(err) => {
|
|
114
|
-
last_error = Some(err);
|
|
115
|
-
continue;
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
if let Some(err) = last_error {
|
|
121
|
-
return Err(err);
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
extract_metadata(pdf_bytes)
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
/// Extract complete PDF metadata from a document.
|
|
128
|
-
///
|
|
129
|
-
/// Extracts common fields (title, subject, authors, keywords, dates, creator),
|
|
130
|
-
/// PDF-specific metadata, and optionally builds a PageStructure with boundaries.
|
|
131
|
-
///
|
|
132
|
-
/// # Arguments
|
|
133
|
-
///
|
|
134
|
-
/// * `document` - The PDF document to extract metadata from
|
|
135
|
-
/// * `page_boundaries` - Optional vector of PageBoundary entries for building PageStructure.
|
|
136
|
-
/// If provided, a PageStructure will be built with these boundaries.
|
|
137
|
-
///
|
|
138
|
-
/// # Returns
|
|
139
|
-
///
|
|
140
|
-
/// Returns a `PdfExtractionMetadata` struct containing all extracted metadata,
|
|
141
|
-
/// including page structure if boundaries were provided.
|
|
142
|
-
pub fn extract_metadata_from_document(
|
|
143
|
-
document: &PdfDocument<'_>,
|
|
144
|
-
page_boundaries: Option<&[PageBoundary]>,
|
|
145
|
-
) -> Result<PdfExtractionMetadata> {
|
|
146
|
-
let pdf_specific = extract_pdf_specific_metadata(document)?;
|
|
147
|
-
|
|
148
|
-
let common = extract_common_metadata_from_document(document)?;
|
|
149
|
-
|
|
150
|
-
let page_structure = if let Some(boundaries) = page_boundaries {
|
|
151
|
-
Some(build_page_structure(document, boundaries)?)
|
|
152
|
-
} else {
|
|
153
|
-
None
|
|
154
|
-
};
|
|
155
|
-
|
|
156
|
-
Ok(PdfExtractionMetadata {
|
|
157
|
-
title: common.title,
|
|
158
|
-
subject: common.subject,
|
|
159
|
-
authors: common.authors,
|
|
160
|
-
keywords: common.keywords,
|
|
161
|
-
created_at: common.created_at,
|
|
162
|
-
modified_at: common.modified_at,
|
|
163
|
-
created_by: common.created_by,
|
|
164
|
-
pdf_specific,
|
|
165
|
-
page_structure,
|
|
166
|
-
})
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
/// Extract PDF-specific metadata from a document.
|
|
170
|
-
///
|
|
171
|
-
/// Returns only PDF-specific metadata (version, producer, encryption status, dimensions).
|
|
172
|
-
fn extract_pdf_specific_metadata(document: &PdfDocument<'_>) -> Result<PdfMetadata> {
|
|
173
|
-
let pdf_metadata = document.metadata();
|
|
174
|
-
|
|
175
|
-
let mut metadata = PdfMetadata {
|
|
176
|
-
pdf_version: format_pdf_version(document.version()),
|
|
177
|
-
..Default::default()
|
|
178
|
-
};
|
|
179
|
-
|
|
180
|
-
metadata.is_encrypted = document
|
|
181
|
-
.permissions()
|
|
182
|
-
.security_handler_revision()
|
|
183
|
-
.ok()
|
|
184
|
-
.map(|revision| revision != PdfSecurityHandlerRevision::Unprotected);
|
|
185
|
-
|
|
186
|
-
metadata.producer = pdf_metadata
|
|
187
|
-
.get(PdfDocumentMetadataTagType::Producer)
|
|
188
|
-
.map(|tag| tag.value().to_string());
|
|
189
|
-
|
|
190
|
-
if !document.pages().is_empty()
|
|
191
|
-
&& let Ok(page_rect) = document.pages().page_size(0)
|
|
192
|
-
{
|
|
193
|
-
metadata.width = Some(page_rect.width().value.round() as i64);
|
|
194
|
-
metadata.height = Some(page_rect.height().value.round() as i64);
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
Ok(metadata)
|
|
198
|
-
}
|
|
199
|
-
|
|
200
|
-
/// Build a PageStructure from a document and page boundaries.
|
|
201
|
-
///
|
|
202
|
-
/// Constructs a complete PageStructure including:
|
|
203
|
-
/// - Total page count
|
|
204
|
-
/// - Unit type (Page)
|
|
205
|
-
/// - Character offset boundaries for each page
|
|
206
|
-
/// - Optional per-page metadata with dimensions
|
|
207
|
-
///
|
|
208
|
-
/// # Validation
|
|
209
|
-
///
|
|
210
|
-
/// - Boundaries must not be empty
|
|
211
|
-
/// - Boundary count must match the document's page count
|
|
212
|
-
fn build_page_structure(document: &PdfDocument<'_>, boundaries: &[PageBoundary]) -> Result<PageStructure> {
|
|
213
|
-
let total_count = document.pages().len() as usize;
|
|
214
|
-
|
|
215
|
-
if boundaries.is_empty() {
|
|
216
|
-
return Err(PdfError::MetadataExtractionFailed(
|
|
217
|
-
"No page boundaries provided for PageStructure".to_string(),
|
|
218
|
-
));
|
|
219
|
-
}
|
|
220
|
-
|
|
221
|
-
if boundaries.len() != total_count {
|
|
222
|
-
return Err(PdfError::MetadataExtractionFailed(format!(
|
|
223
|
-
"Boundary count {} doesn't match page count {}",
|
|
224
|
-
boundaries.len(),
|
|
225
|
-
total_count
|
|
226
|
-
)));
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
let mut pages = Vec::new();
|
|
230
|
-
for (index, boundary) in boundaries.iter().enumerate() {
|
|
231
|
-
let page_number = boundary.page_number;
|
|
232
|
-
|
|
233
|
-
let dimensions = if let Ok(page_rect) = document.pages().page_size(index as u16) {
|
|
234
|
-
Some((page_rect.width().value as f64, page_rect.height().value as f64))
|
|
235
|
-
} else {
|
|
236
|
-
None
|
|
237
|
-
};
|
|
238
|
-
|
|
239
|
-
pages.push(PageInfo {
|
|
240
|
-
number: page_number,
|
|
241
|
-
title: None,
|
|
242
|
-
dimensions,
|
|
243
|
-
image_count: None,
|
|
244
|
-
table_count: None,
|
|
245
|
-
hidden: None,
|
|
246
|
-
});
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
Ok(PageStructure {
|
|
250
|
-
total_count,
|
|
251
|
-
unit_type: PageUnitType::Page,
|
|
252
|
-
boundaries: Some(boundaries.to_vec()),
|
|
253
|
-
pages: if pages.is_empty() { None } else { Some(pages) },
|
|
254
|
-
})
|
|
255
|
-
}
|
|
256
|
-
|
|
257
|
-
/// Extract common metadata from a PDF document.
|
|
258
|
-
///
|
|
259
|
-
/// Returns common fields (title, authors, keywords, dates) that are now stored
|
|
260
|
-
/// in the base `Metadata` struct instead of format-specific metadata.
|
|
261
|
-
pub fn extract_common_metadata_from_document(document: &PdfDocument<'_>) -> Result<CommonPdfMetadata> {
|
|
262
|
-
let pdf_metadata = document.metadata();
|
|
263
|
-
|
|
264
|
-
let title = pdf_metadata
|
|
265
|
-
.get(PdfDocumentMetadataTagType::Title)
|
|
266
|
-
.map(|tag| tag.value().to_string());
|
|
267
|
-
|
|
268
|
-
let subject = pdf_metadata
|
|
269
|
-
.get(PdfDocumentMetadataTagType::Subject)
|
|
270
|
-
.map(|tag| tag.value().to_string());
|
|
271
|
-
|
|
272
|
-
let authors = if let Some(author_tag) = pdf_metadata.get(PdfDocumentMetadataTagType::Author) {
|
|
273
|
-
let parsed = parse_authors(author_tag.value());
|
|
274
|
-
if !parsed.is_empty() { Some(parsed) } else { None }
|
|
275
|
-
} else {
|
|
276
|
-
None
|
|
277
|
-
};
|
|
278
|
-
|
|
279
|
-
let keywords = if let Some(keywords_tag) = pdf_metadata.get(PdfDocumentMetadataTagType::Keywords) {
|
|
280
|
-
let parsed = parse_keywords(keywords_tag.value());
|
|
281
|
-
if !parsed.is_empty() { Some(parsed) } else { None }
|
|
282
|
-
} else {
|
|
283
|
-
None
|
|
284
|
-
};
|
|
285
|
-
|
|
286
|
-
let created_at = pdf_metadata
|
|
287
|
-
.get(PdfDocumentMetadataTagType::CreationDate)
|
|
288
|
-
.map(|tag| parse_pdf_date(tag.value()));
|
|
289
|
-
|
|
290
|
-
let modified_at = pdf_metadata
|
|
291
|
-
.get(PdfDocumentMetadataTagType::ModificationDate)
|
|
292
|
-
.map(|tag| parse_pdf_date(tag.value()));
|
|
293
|
-
|
|
294
|
-
let created_by = pdf_metadata
|
|
295
|
-
.get(PdfDocumentMetadataTagType::Creator)
|
|
296
|
-
.map(|tag| tag.value().to_string());
|
|
297
|
-
|
|
298
|
-
Ok(CommonPdfMetadata {
|
|
299
|
-
title,
|
|
300
|
-
subject,
|
|
301
|
-
authors,
|
|
302
|
-
keywords,
|
|
303
|
-
created_at,
|
|
304
|
-
modified_at,
|
|
305
|
-
created_by,
|
|
306
|
-
})
|
|
307
|
-
}
|
|
308
|
-
|
|
309
|
-
/// Common metadata fields extracted from a PDF.
|
|
310
|
-
pub struct CommonPdfMetadata {
|
|
311
|
-
pub title: Option<String>,
|
|
312
|
-
pub subject: Option<String>,
|
|
313
|
-
pub authors: Option<Vec<String>>,
|
|
314
|
-
pub keywords: Option<Vec<String>>,
|
|
315
|
-
pub created_at: Option<String>,
|
|
316
|
-
pub modified_at: Option<String>,
|
|
317
|
-
pub created_by: Option<String>,
|
|
318
|
-
}
|
|
319
|
-
|
|
320
|
-
fn parse_authors(author_str: &str) -> Vec<String> {
|
|
321
|
-
let author_str = author_str.replace(" and ", ", ");
|
|
322
|
-
let mut authors = Vec::new();
|
|
323
|
-
|
|
324
|
-
for segment in author_str.split(';') {
|
|
325
|
-
for author in segment.split(',') {
|
|
326
|
-
let trimmed = author.trim();
|
|
327
|
-
if !trimmed.is_empty() {
|
|
328
|
-
authors.push(trimmed.to_string());
|
|
329
|
-
}
|
|
330
|
-
}
|
|
331
|
-
}
|
|
332
|
-
|
|
333
|
-
authors
|
|
334
|
-
}
|
|
335
|
-
|
|
336
|
-
fn parse_keywords(keywords_str: &str) -> Vec<String> {
|
|
337
|
-
keywords_str
|
|
338
|
-
.replace(';', ",")
|
|
339
|
-
.split(',')
|
|
340
|
-
.filter_map(|k| {
|
|
341
|
-
let trimmed = k.trim();
|
|
342
|
-
if trimmed.is_empty() {
|
|
343
|
-
None
|
|
344
|
-
} else {
|
|
345
|
-
Some(trimmed.to_string())
|
|
346
|
-
}
|
|
347
|
-
})
|
|
348
|
-
.collect()
|
|
349
|
-
}
|
|
350
|
-
|
|
351
|
-
fn parse_pdf_date(date_str: &str) -> String {
|
|
352
|
-
let cleaned = date_str.trim();
|
|
353
|
-
|
|
354
|
-
if cleaned.starts_with("D:") && cleaned.len() >= 10 {
|
|
355
|
-
let year = &cleaned[2..6];
|
|
356
|
-
let month = &cleaned[6..8];
|
|
357
|
-
let day = &cleaned[8..10];
|
|
358
|
-
|
|
359
|
-
if cleaned.len() >= 16 {
|
|
360
|
-
let hour = &cleaned[10..12];
|
|
361
|
-
let minute = &cleaned[12..14];
|
|
362
|
-
let second = &cleaned[14..16];
|
|
363
|
-
format!("{}-{}-{}T{}:{}:{}Z", year, month, day, hour, minute, second)
|
|
364
|
-
} else if cleaned.len() >= 14 {
|
|
365
|
-
let hour = &cleaned[10..12];
|
|
366
|
-
let minute = &cleaned[12..14];
|
|
367
|
-
format!("{}-{}-{}T{}:{}:00Z", year, month, day, hour, minute)
|
|
368
|
-
} else {
|
|
369
|
-
format!("{}-{}-{}T00:00:00Z", year, month, day)
|
|
370
|
-
}
|
|
371
|
-
} else if cleaned.len() >= 8 {
|
|
372
|
-
let year = &cleaned[0..4];
|
|
373
|
-
let month = &cleaned[4..6];
|
|
374
|
-
let day = &cleaned[6..8];
|
|
375
|
-
format!("{}-{}-{}T00:00:00Z", year, month, day)
|
|
376
|
-
} else {
|
|
377
|
-
date_str.to_string()
|
|
378
|
-
}
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
fn format_pdf_version(version: PdfDocumentVersion) -> Option<String> {
|
|
382
|
-
match version {
|
|
383
|
-
PdfDocumentVersion::Unset => None,
|
|
384
|
-
PdfDocumentVersion::Pdf1_0 => Some("1.0".to_string()),
|
|
385
|
-
PdfDocumentVersion::Pdf1_1 => Some("1.1".to_string()),
|
|
386
|
-
PdfDocumentVersion::Pdf1_2 => Some("1.2".to_string()),
|
|
387
|
-
PdfDocumentVersion::Pdf1_3 => Some("1.3".to_string()),
|
|
388
|
-
PdfDocumentVersion::Pdf1_4 => Some("1.4".to_string()),
|
|
389
|
-
PdfDocumentVersion::Pdf1_5 => Some("1.5".to_string()),
|
|
390
|
-
PdfDocumentVersion::Pdf1_6 => Some("1.6".to_string()),
|
|
391
|
-
PdfDocumentVersion::Pdf1_7 => Some("1.7".to_string()),
|
|
392
|
-
PdfDocumentVersion::Pdf2_0 => Some("2.0".to_string()),
|
|
393
|
-
PdfDocumentVersion::Other(value) => {
|
|
394
|
-
if value >= 10 {
|
|
395
|
-
Some(format!("{}.{}", value / 10, value % 10))
|
|
396
|
-
} else {
|
|
397
|
-
Some(value.to_string())
|
|
398
|
-
}
|
|
399
|
-
}
|
|
400
|
-
}
|
|
401
|
-
}
|
|
402
|
-
|
|
403
|
-
#[cfg(test)]
|
|
404
|
-
mod tests {
|
|
405
|
-
use super::*;
|
|
406
|
-
|
|
407
|
-
#[test]
|
|
408
|
-
fn test_parse_authors_single() {
|
|
409
|
-
let authors = parse_authors("John Doe");
|
|
410
|
-
assert_eq!(authors, vec!["John Doe"]);
|
|
411
|
-
}
|
|
412
|
-
|
|
413
|
-
#[test]
|
|
414
|
-
fn test_parse_authors_multiple_comma() {
|
|
415
|
-
let authors = parse_authors("John Doe, Jane Smith");
|
|
416
|
-
assert_eq!(authors, vec!["John Doe", "Jane Smith"]);
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
#[test]
|
|
420
|
-
fn test_parse_authors_multiple_and() {
|
|
421
|
-
let authors = parse_authors("John Doe and Jane Smith");
|
|
422
|
-
assert_eq!(authors, vec!["John Doe", "Jane Smith"]);
|
|
423
|
-
}
|
|
424
|
-
|
|
425
|
-
#[test]
|
|
426
|
-
fn test_parse_authors_semicolon() {
|
|
427
|
-
let authors = parse_authors("John Doe;Jane Smith");
|
|
428
|
-
assert_eq!(authors, vec!["John Doe", "Jane Smith"]);
|
|
429
|
-
}
|
|
430
|
-
|
|
431
|
-
#[test]
|
|
432
|
-
fn test_parse_keywords() {
|
|
433
|
-
let keywords = parse_keywords("pdf, document, test");
|
|
434
|
-
assert_eq!(keywords, vec!["pdf", "document", "test"]);
|
|
435
|
-
}
|
|
436
|
-
|
|
437
|
-
#[test]
|
|
438
|
-
fn test_parse_keywords_semicolon() {
|
|
439
|
-
let keywords = parse_keywords("pdf;document;test");
|
|
440
|
-
assert_eq!(keywords, vec!["pdf", "document", "test"]);
|
|
441
|
-
}
|
|
442
|
-
|
|
443
|
-
#[test]
|
|
444
|
-
fn test_parse_keywords_empty() {
|
|
445
|
-
let keywords = parse_keywords("");
|
|
446
|
-
assert!(keywords.is_empty());
|
|
447
|
-
}
|
|
448
|
-
|
|
449
|
-
#[test]
|
|
450
|
-
fn test_parse_pdf_date_full() {
|
|
451
|
-
let date = parse_pdf_date("D:20230115123045");
|
|
452
|
-
assert_eq!(date, "2023-01-15T12:30:45Z");
|
|
453
|
-
}
|
|
454
|
-
|
|
455
|
-
#[test]
|
|
456
|
-
fn test_parse_pdf_date_no_time() {
|
|
457
|
-
let date = parse_pdf_date("D:20230115");
|
|
458
|
-
assert_eq!(date, "2023-01-15T00:00:00Z");
|
|
459
|
-
}
|
|
460
|
-
|
|
461
|
-
#[test]
|
|
462
|
-
fn test_parse_pdf_date_no_prefix() {
|
|
463
|
-
let date = parse_pdf_date("20230115");
|
|
464
|
-
assert_eq!(date, "2023-01-15T00:00:00Z");
|
|
465
|
-
}
|
|
466
|
-
|
|
467
|
-
#[test]
|
|
468
|
-
fn test_extract_metadata_invalid_pdf() {
|
|
469
|
-
let result = extract_metadata(b"not a pdf");
|
|
470
|
-
assert!(result.is_err());
|
|
471
|
-
}
|
|
472
|
-
|
|
473
|
-
#[test]
|
|
474
|
-
fn test_build_page_structure_empty_boundaries() {
|
|
475
|
-
let result_msg = "No page boundaries provided for PageStructure".to_string();
|
|
476
|
-
assert!(!result_msg.is_empty());
|
|
477
|
-
}
|
|
478
|
-
|
|
479
|
-
#[test]
|
|
480
|
-
fn test_build_page_structure_boundary_mismatch_message() {
|
|
481
|
-
let boundaries_count = 3;
|
|
482
|
-
let page_count = 5;
|
|
483
|
-
let error_msg = format!(
|
|
484
|
-
"Boundary count {} doesn't match page count {}",
|
|
485
|
-
boundaries_count, page_count
|
|
486
|
-
);
|
|
487
|
-
assert_eq!(error_msg, "Boundary count 3 doesn't match page count 5");
|
|
488
|
-
}
|
|
489
|
-
}
|
|
1
|
+
use super::bindings::bind_pdfium;
|
|
2
|
+
use super::error::{PdfError, Result};
|
|
3
|
+
use crate::types::{PageBoundary, PageInfo, PageStructure, PageUnitType};
|
|
4
|
+
use pdfium_render::prelude::*;
|
|
5
|
+
use serde::{Deserialize, Serialize};
|
|
6
|
+
|
|
7
|
+
/// PDF-specific metadata.
|
|
8
|
+
///
|
|
9
|
+
/// Contains metadata fields specific to PDF documents that are not in the common
|
|
10
|
+
/// `Metadata` structure. Common fields like title, authors, keywords, and dates
|
|
11
|
+
/// are now at the `Metadata` level.
|
|
12
|
+
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
|
13
|
+
pub struct PdfMetadata {
|
|
14
|
+
/// PDF version (e.g., "1.7", "2.0")
|
|
15
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
16
|
+
pub pdf_version: Option<String>,
|
|
17
|
+
|
|
18
|
+
/// PDF producer (application that created the PDF)
|
|
19
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
20
|
+
pub producer: Option<String>,
|
|
21
|
+
|
|
22
|
+
/// Whether the PDF is encrypted/password-protected
|
|
23
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
24
|
+
pub is_encrypted: Option<bool>,
|
|
25
|
+
|
|
26
|
+
/// First page width in points (1/72 inch)
|
|
27
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
28
|
+
pub width: Option<i64>,
|
|
29
|
+
|
|
30
|
+
/// First page height in points (1/72 inch)
|
|
31
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
32
|
+
pub height: Option<i64>,
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/// Complete PDF extraction metadata including common and PDF-specific fields.
|
|
36
|
+
///
|
|
37
|
+
/// This struct combines common document fields (title, authors, dates) with
|
|
38
|
+
/// PDF-specific metadata and optional page structure information. It is returned
|
|
39
|
+
/// by `extract_metadata_from_document()` when page boundaries are provided.
|
|
40
|
+
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
41
|
+
pub struct PdfExtractionMetadata {
|
|
42
|
+
/// Document title
|
|
43
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
44
|
+
pub title: Option<String>,
|
|
45
|
+
|
|
46
|
+
/// Document subject or description
|
|
47
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
48
|
+
pub subject: Option<String>,
|
|
49
|
+
|
|
50
|
+
/// Document authors (parsed from PDF Author field)
|
|
51
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
52
|
+
pub authors: Option<Vec<String>>,
|
|
53
|
+
|
|
54
|
+
/// Document keywords (parsed from PDF Keywords field)
|
|
55
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
56
|
+
pub keywords: Option<Vec<String>>,
|
|
57
|
+
|
|
58
|
+
/// Creation timestamp (ISO 8601 format)
|
|
59
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
60
|
+
pub created_at: Option<String>,
|
|
61
|
+
|
|
62
|
+
/// Last modification timestamp (ISO 8601 format)
|
|
63
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
64
|
+
pub modified_at: Option<String>,
|
|
65
|
+
|
|
66
|
+
/// Application or user that created the document
|
|
67
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
68
|
+
pub created_by: Option<String>,
|
|
69
|
+
|
|
70
|
+
/// PDF-specific metadata
|
|
71
|
+
pub pdf_specific: PdfMetadata,
|
|
72
|
+
|
|
73
|
+
/// Page structure with boundaries and optional per-page metadata
|
|
74
|
+
#[serde(skip_serializing_if = "Option::is_none")]
|
|
75
|
+
pub page_structure: Option<PageStructure>,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/// Extract PDF-specific metadata from raw bytes.
|
|
79
|
+
///
|
|
80
|
+
/// Returns only PDF-specific metadata (version, producer, encryption status, dimensions).
|
|
81
|
+
pub fn extract_metadata(pdf_bytes: &[u8]) -> Result<PdfMetadata> {
|
|
82
|
+
extract_metadata_with_password(pdf_bytes, None)
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/// Extract PDF-specific metadata from raw bytes with optional password.
|
|
86
|
+
///
|
|
87
|
+
/// Returns only PDF-specific metadata (version, producer, encryption status, dimensions).
|
|
88
|
+
pub fn extract_metadata_with_password(pdf_bytes: &[u8], password: Option<&str>) -> Result<PdfMetadata> {
|
|
89
|
+
let bindings = bind_pdfium(PdfError::MetadataExtractionFailed, "metadata extraction")?;
|
|
90
|
+
|
|
91
|
+
let pdfium = Pdfium::new(bindings);
|
|
92
|
+
|
|
93
|
+
let document = pdfium.load_pdf_from_byte_slice(pdf_bytes, password).map_err(|e| {
|
|
94
|
+
let err_msg = e.to_string();
|
|
95
|
+
if (err_msg.contains("password") || err_msg.contains("Password")) && password.is_some() {
|
|
96
|
+
PdfError::InvalidPassword
|
|
97
|
+
} else if err_msg.contains("password") || err_msg.contains("Password") {
|
|
98
|
+
PdfError::PasswordRequired
|
|
99
|
+
} else {
|
|
100
|
+
PdfError::MetadataExtractionFailed(err_msg)
|
|
101
|
+
}
|
|
102
|
+
})?;
|
|
103
|
+
|
|
104
|
+
extract_pdf_specific_metadata(&document)
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
pub fn extract_metadata_with_passwords(pdf_bytes: &[u8], passwords: &[&str]) -> Result<PdfMetadata> {
|
|
108
|
+
let mut last_error = None;
|
|
109
|
+
|
|
110
|
+
for password in passwords {
|
|
111
|
+
match extract_metadata_with_password(pdf_bytes, Some(password)) {
|
|
112
|
+
Ok(metadata) => return Ok(metadata),
|
|
113
|
+
Err(err) => {
|
|
114
|
+
last_error = Some(err);
|
|
115
|
+
continue;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
if let Some(err) = last_error {
|
|
121
|
+
return Err(err);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
extract_metadata(pdf_bytes)
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/// Extract complete PDF metadata from a document.
|
|
128
|
+
///
|
|
129
|
+
/// Extracts common fields (title, subject, authors, keywords, dates, creator),
|
|
130
|
+
/// PDF-specific metadata, and optionally builds a PageStructure with boundaries.
|
|
131
|
+
///
|
|
132
|
+
/// # Arguments
|
|
133
|
+
///
|
|
134
|
+
/// * `document` - The PDF document to extract metadata from
|
|
135
|
+
/// * `page_boundaries` - Optional vector of PageBoundary entries for building PageStructure.
|
|
136
|
+
/// If provided, a PageStructure will be built with these boundaries.
|
|
137
|
+
///
|
|
138
|
+
/// # Returns
|
|
139
|
+
///
|
|
140
|
+
/// Returns a `PdfExtractionMetadata` struct containing all extracted metadata,
|
|
141
|
+
/// including page structure if boundaries were provided.
|
|
142
|
+
pub fn extract_metadata_from_document(
|
|
143
|
+
document: &PdfDocument<'_>,
|
|
144
|
+
page_boundaries: Option<&[PageBoundary]>,
|
|
145
|
+
) -> Result<PdfExtractionMetadata> {
|
|
146
|
+
let pdf_specific = extract_pdf_specific_metadata(document)?;
|
|
147
|
+
|
|
148
|
+
let common = extract_common_metadata_from_document(document)?;
|
|
149
|
+
|
|
150
|
+
let page_structure = if let Some(boundaries) = page_boundaries {
|
|
151
|
+
Some(build_page_structure(document, boundaries)?)
|
|
152
|
+
} else {
|
|
153
|
+
None
|
|
154
|
+
};
|
|
155
|
+
|
|
156
|
+
Ok(PdfExtractionMetadata {
|
|
157
|
+
title: common.title,
|
|
158
|
+
subject: common.subject,
|
|
159
|
+
authors: common.authors,
|
|
160
|
+
keywords: common.keywords,
|
|
161
|
+
created_at: common.created_at,
|
|
162
|
+
modified_at: common.modified_at,
|
|
163
|
+
created_by: common.created_by,
|
|
164
|
+
pdf_specific,
|
|
165
|
+
page_structure,
|
|
166
|
+
})
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
/// Extract PDF-specific metadata from a document.
|
|
170
|
+
///
|
|
171
|
+
/// Returns only PDF-specific metadata (version, producer, encryption status, dimensions).
|
|
172
|
+
fn extract_pdf_specific_metadata(document: &PdfDocument<'_>) -> Result<PdfMetadata> {
|
|
173
|
+
let pdf_metadata = document.metadata();
|
|
174
|
+
|
|
175
|
+
let mut metadata = PdfMetadata {
|
|
176
|
+
pdf_version: format_pdf_version(document.version()),
|
|
177
|
+
..Default::default()
|
|
178
|
+
};
|
|
179
|
+
|
|
180
|
+
metadata.is_encrypted = document
|
|
181
|
+
.permissions()
|
|
182
|
+
.security_handler_revision()
|
|
183
|
+
.ok()
|
|
184
|
+
.map(|revision| revision != PdfSecurityHandlerRevision::Unprotected);
|
|
185
|
+
|
|
186
|
+
metadata.producer = pdf_metadata
|
|
187
|
+
.get(PdfDocumentMetadataTagType::Producer)
|
|
188
|
+
.map(|tag| tag.value().to_string());
|
|
189
|
+
|
|
190
|
+
if !document.pages().is_empty()
|
|
191
|
+
&& let Ok(page_rect) = document.pages().page_size(0)
|
|
192
|
+
{
|
|
193
|
+
metadata.width = Some(page_rect.width().value.round() as i64);
|
|
194
|
+
metadata.height = Some(page_rect.height().value.round() as i64);
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
Ok(metadata)
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/// Build a PageStructure from a document and page boundaries.
|
|
201
|
+
///
|
|
202
|
+
/// Constructs a complete PageStructure including:
|
|
203
|
+
/// - Total page count
|
|
204
|
+
/// - Unit type (Page)
|
|
205
|
+
/// - Character offset boundaries for each page
|
|
206
|
+
/// - Optional per-page metadata with dimensions
|
|
207
|
+
///
|
|
208
|
+
/// # Validation
|
|
209
|
+
///
|
|
210
|
+
/// - Boundaries must not be empty
|
|
211
|
+
/// - Boundary count must match the document's page count
|
|
212
|
+
fn build_page_structure(document: &PdfDocument<'_>, boundaries: &[PageBoundary]) -> Result<PageStructure> {
|
|
213
|
+
let total_count = document.pages().len() as usize;
|
|
214
|
+
|
|
215
|
+
if boundaries.is_empty() {
|
|
216
|
+
return Err(PdfError::MetadataExtractionFailed(
|
|
217
|
+
"No page boundaries provided for PageStructure".to_string(),
|
|
218
|
+
));
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
if boundaries.len() != total_count {
|
|
222
|
+
return Err(PdfError::MetadataExtractionFailed(format!(
|
|
223
|
+
"Boundary count {} doesn't match page count {}",
|
|
224
|
+
boundaries.len(),
|
|
225
|
+
total_count
|
|
226
|
+
)));
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
let mut pages = Vec::new();
|
|
230
|
+
for (index, boundary) in boundaries.iter().enumerate() {
|
|
231
|
+
let page_number = boundary.page_number;
|
|
232
|
+
|
|
233
|
+
let dimensions = if let Ok(page_rect) = document.pages().page_size(index as u16) {
|
|
234
|
+
Some((page_rect.width().value as f64, page_rect.height().value as f64))
|
|
235
|
+
} else {
|
|
236
|
+
None
|
|
237
|
+
};
|
|
238
|
+
|
|
239
|
+
pages.push(PageInfo {
|
|
240
|
+
number: page_number,
|
|
241
|
+
title: None,
|
|
242
|
+
dimensions,
|
|
243
|
+
image_count: None,
|
|
244
|
+
table_count: None,
|
|
245
|
+
hidden: None,
|
|
246
|
+
});
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
Ok(PageStructure {
|
|
250
|
+
total_count,
|
|
251
|
+
unit_type: PageUnitType::Page,
|
|
252
|
+
boundaries: Some(boundaries.to_vec()),
|
|
253
|
+
pages: if pages.is_empty() { None } else { Some(pages) },
|
|
254
|
+
})
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
/// Extract common metadata from a PDF document.
|
|
258
|
+
///
|
|
259
|
+
/// Returns common fields (title, authors, keywords, dates) that are now stored
|
|
260
|
+
/// in the base `Metadata` struct instead of format-specific metadata.
|
|
261
|
+
pub fn extract_common_metadata_from_document(document: &PdfDocument<'_>) -> Result<CommonPdfMetadata> {
|
|
262
|
+
let pdf_metadata = document.metadata();
|
|
263
|
+
|
|
264
|
+
let title = pdf_metadata
|
|
265
|
+
.get(PdfDocumentMetadataTagType::Title)
|
|
266
|
+
.map(|tag| tag.value().to_string());
|
|
267
|
+
|
|
268
|
+
let subject = pdf_metadata
|
|
269
|
+
.get(PdfDocumentMetadataTagType::Subject)
|
|
270
|
+
.map(|tag| tag.value().to_string());
|
|
271
|
+
|
|
272
|
+
let authors = if let Some(author_tag) = pdf_metadata.get(PdfDocumentMetadataTagType::Author) {
|
|
273
|
+
let parsed = parse_authors(author_tag.value());
|
|
274
|
+
if !parsed.is_empty() { Some(parsed) } else { None }
|
|
275
|
+
} else {
|
|
276
|
+
None
|
|
277
|
+
};
|
|
278
|
+
|
|
279
|
+
let keywords = if let Some(keywords_tag) = pdf_metadata.get(PdfDocumentMetadataTagType::Keywords) {
|
|
280
|
+
let parsed = parse_keywords(keywords_tag.value());
|
|
281
|
+
if !parsed.is_empty() { Some(parsed) } else { None }
|
|
282
|
+
} else {
|
|
283
|
+
None
|
|
284
|
+
};
|
|
285
|
+
|
|
286
|
+
let created_at = pdf_metadata
|
|
287
|
+
.get(PdfDocumentMetadataTagType::CreationDate)
|
|
288
|
+
.map(|tag| parse_pdf_date(tag.value()));
|
|
289
|
+
|
|
290
|
+
let modified_at = pdf_metadata
|
|
291
|
+
.get(PdfDocumentMetadataTagType::ModificationDate)
|
|
292
|
+
.map(|tag| parse_pdf_date(tag.value()));
|
|
293
|
+
|
|
294
|
+
let created_by = pdf_metadata
|
|
295
|
+
.get(PdfDocumentMetadataTagType::Creator)
|
|
296
|
+
.map(|tag| tag.value().to_string());
|
|
297
|
+
|
|
298
|
+
Ok(CommonPdfMetadata {
|
|
299
|
+
title,
|
|
300
|
+
subject,
|
|
301
|
+
authors,
|
|
302
|
+
keywords,
|
|
303
|
+
created_at,
|
|
304
|
+
modified_at,
|
|
305
|
+
created_by,
|
|
306
|
+
})
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
/// Common metadata fields extracted from a PDF.
|
|
310
|
+
pub struct CommonPdfMetadata {
|
|
311
|
+
pub title: Option<String>,
|
|
312
|
+
pub subject: Option<String>,
|
|
313
|
+
pub authors: Option<Vec<String>>,
|
|
314
|
+
pub keywords: Option<Vec<String>>,
|
|
315
|
+
pub created_at: Option<String>,
|
|
316
|
+
pub modified_at: Option<String>,
|
|
317
|
+
pub created_by: Option<String>,
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
fn parse_authors(author_str: &str) -> Vec<String> {
|
|
321
|
+
let author_str = author_str.replace(" and ", ", ");
|
|
322
|
+
let mut authors = Vec::new();
|
|
323
|
+
|
|
324
|
+
for segment in author_str.split(';') {
|
|
325
|
+
for author in segment.split(',') {
|
|
326
|
+
let trimmed = author.trim();
|
|
327
|
+
if !trimmed.is_empty() {
|
|
328
|
+
authors.push(trimmed.to_string());
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
authors
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
fn parse_keywords(keywords_str: &str) -> Vec<String> {
|
|
337
|
+
keywords_str
|
|
338
|
+
.replace(';', ",")
|
|
339
|
+
.split(',')
|
|
340
|
+
.filter_map(|k| {
|
|
341
|
+
let trimmed = k.trim();
|
|
342
|
+
if trimmed.is_empty() {
|
|
343
|
+
None
|
|
344
|
+
} else {
|
|
345
|
+
Some(trimmed.to_string())
|
|
346
|
+
}
|
|
347
|
+
})
|
|
348
|
+
.collect()
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
fn parse_pdf_date(date_str: &str) -> String {
|
|
352
|
+
let cleaned = date_str.trim();
|
|
353
|
+
|
|
354
|
+
if cleaned.starts_with("D:") && cleaned.len() >= 10 {
|
|
355
|
+
let year = &cleaned[2..6];
|
|
356
|
+
let month = &cleaned[6..8];
|
|
357
|
+
let day = &cleaned[8..10];
|
|
358
|
+
|
|
359
|
+
if cleaned.len() >= 16 {
|
|
360
|
+
let hour = &cleaned[10..12];
|
|
361
|
+
let minute = &cleaned[12..14];
|
|
362
|
+
let second = &cleaned[14..16];
|
|
363
|
+
format!("{}-{}-{}T{}:{}:{}Z", year, month, day, hour, minute, second)
|
|
364
|
+
} else if cleaned.len() >= 14 {
|
|
365
|
+
let hour = &cleaned[10..12];
|
|
366
|
+
let minute = &cleaned[12..14];
|
|
367
|
+
format!("{}-{}-{}T{}:{}:00Z", year, month, day, hour, minute)
|
|
368
|
+
} else {
|
|
369
|
+
format!("{}-{}-{}T00:00:00Z", year, month, day)
|
|
370
|
+
}
|
|
371
|
+
} else if cleaned.len() >= 8 {
|
|
372
|
+
let year = &cleaned[0..4];
|
|
373
|
+
let month = &cleaned[4..6];
|
|
374
|
+
let day = &cleaned[6..8];
|
|
375
|
+
format!("{}-{}-{}T00:00:00Z", year, month, day)
|
|
376
|
+
} else {
|
|
377
|
+
date_str.to_string()
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
fn format_pdf_version(version: PdfDocumentVersion) -> Option<String> {
|
|
382
|
+
match version {
|
|
383
|
+
PdfDocumentVersion::Unset => None,
|
|
384
|
+
PdfDocumentVersion::Pdf1_0 => Some("1.0".to_string()),
|
|
385
|
+
PdfDocumentVersion::Pdf1_1 => Some("1.1".to_string()),
|
|
386
|
+
PdfDocumentVersion::Pdf1_2 => Some("1.2".to_string()),
|
|
387
|
+
PdfDocumentVersion::Pdf1_3 => Some("1.3".to_string()),
|
|
388
|
+
PdfDocumentVersion::Pdf1_4 => Some("1.4".to_string()),
|
|
389
|
+
PdfDocumentVersion::Pdf1_5 => Some("1.5".to_string()),
|
|
390
|
+
PdfDocumentVersion::Pdf1_6 => Some("1.6".to_string()),
|
|
391
|
+
PdfDocumentVersion::Pdf1_7 => Some("1.7".to_string()),
|
|
392
|
+
PdfDocumentVersion::Pdf2_0 => Some("2.0".to_string()),
|
|
393
|
+
PdfDocumentVersion::Other(value) => {
|
|
394
|
+
if value >= 10 {
|
|
395
|
+
Some(format!("{}.{}", value / 10, value % 10))
|
|
396
|
+
} else {
|
|
397
|
+
Some(value.to_string())
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
#[cfg(test)]
|
|
404
|
+
mod tests {
|
|
405
|
+
use super::*;
|
|
406
|
+
|
|
407
|
+
#[test]
|
|
408
|
+
fn test_parse_authors_single() {
|
|
409
|
+
let authors = parse_authors("John Doe");
|
|
410
|
+
assert_eq!(authors, vec!["John Doe"]);
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
#[test]
|
|
414
|
+
fn test_parse_authors_multiple_comma() {
|
|
415
|
+
let authors = parse_authors("John Doe, Jane Smith");
|
|
416
|
+
assert_eq!(authors, vec!["John Doe", "Jane Smith"]);
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
#[test]
|
|
420
|
+
fn test_parse_authors_multiple_and() {
|
|
421
|
+
let authors = parse_authors("John Doe and Jane Smith");
|
|
422
|
+
assert_eq!(authors, vec!["John Doe", "Jane Smith"]);
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
#[test]
|
|
426
|
+
fn test_parse_authors_semicolon() {
|
|
427
|
+
let authors = parse_authors("John Doe;Jane Smith");
|
|
428
|
+
assert_eq!(authors, vec!["John Doe", "Jane Smith"]);
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
#[test]
|
|
432
|
+
fn test_parse_keywords() {
|
|
433
|
+
let keywords = parse_keywords("pdf, document, test");
|
|
434
|
+
assert_eq!(keywords, vec!["pdf", "document", "test"]);
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
#[test]
|
|
438
|
+
fn test_parse_keywords_semicolon() {
|
|
439
|
+
let keywords = parse_keywords("pdf;document;test");
|
|
440
|
+
assert_eq!(keywords, vec!["pdf", "document", "test"]);
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
#[test]
|
|
444
|
+
fn test_parse_keywords_empty() {
|
|
445
|
+
let keywords = parse_keywords("");
|
|
446
|
+
assert!(keywords.is_empty());
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
#[test]
|
|
450
|
+
fn test_parse_pdf_date_full() {
|
|
451
|
+
let date = parse_pdf_date("D:20230115123045");
|
|
452
|
+
assert_eq!(date, "2023-01-15T12:30:45Z");
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
#[test]
|
|
456
|
+
fn test_parse_pdf_date_no_time() {
|
|
457
|
+
let date = parse_pdf_date("D:20230115");
|
|
458
|
+
assert_eq!(date, "2023-01-15T00:00:00Z");
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
#[test]
|
|
462
|
+
fn test_parse_pdf_date_no_prefix() {
|
|
463
|
+
let date = parse_pdf_date("20230115");
|
|
464
|
+
assert_eq!(date, "2023-01-15T00:00:00Z");
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
#[test]
|
|
468
|
+
fn test_extract_metadata_invalid_pdf() {
|
|
469
|
+
let result = extract_metadata(b"not a pdf");
|
|
470
|
+
assert!(result.is_err());
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
#[test]
|
|
474
|
+
fn test_build_page_structure_empty_boundaries() {
|
|
475
|
+
let result_msg = "No page boundaries provided for PageStructure".to_string();
|
|
476
|
+
assert!(!result_msg.is_empty());
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
#[test]
|
|
480
|
+
fn test_build_page_structure_boundary_mismatch_message() {
|
|
481
|
+
let boundaries_count = 3;
|
|
482
|
+
let page_count = 5;
|
|
483
|
+
let error_msg = format!(
|
|
484
|
+
"Boundary count {} doesn't match page count {}",
|
|
485
|
+
boundaries_count, page_count
|
|
486
|
+
);
|
|
487
|
+
assert_eq!(error_msg, "Boundary count 3 doesn't match page count 5");
|
|
488
|
+
}
|
|
489
|
+
}
|