kreuzberg 4.0.0.rc1 → 4.0.0.rc2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -8
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -534
- data/.rubocop.yml +538 -0
- data/Gemfile +8 -9
- data/Gemfile.lock +9 -109
- data/README.md +426 -421
- data/Rakefile +25 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -340
- data/ext/kreuzberg_rb/extconf.rb +45 -35
- data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
- data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -17
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +148 -105
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +46 -45
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +691 -684
- data/lib/kreuzberg/error_context.rb +32 -0
- data/lib/kreuzberg/errors.rb +118 -50
- data/lib/kreuzberg/extraction_api.rb +85 -84
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +216 -216
- data/lib/kreuzberg/setup_lib_path.rb +80 -79
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +103 -82
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +520 -468
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -87
- data/spec/binding/cli_spec.rb +55 -54
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -42
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/kreuzberg/Cargo.toml +204 -134
- data/vendor/kreuzberg/README.md +175 -175
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
- data/vendor/kreuzberg/build.rs +474 -460
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
- data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
- data/vendor/kreuzberg/src/core/config.rs +1032 -1032
- data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
- data/vendor/kreuzberg/src/core/io.rs +329 -327
- data/vendor/kreuzberg/src/core/mime.rs +605 -615
- data/vendor/kreuzberg/src/core/mod.rs +45 -42
- data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
- data/vendor/kreuzberg/src/embeddings.rs +432 -323
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +553 -553
- data/vendor/kreuzberg/src/extraction/image.rs +368 -368
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
- data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
- data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
- data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
- data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
- data/vendor/kreuzberg/src/extractors/email.rs +143 -129
- data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
- data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
- data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
- data/vendor/kreuzberg/src/extractors/html.rs +393 -410
- data/vendor/kreuzberg/src/extractors/image.rs +198 -195
- data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
- data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
- data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
- data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
- data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
- data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
- data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
- data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
- data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
- data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
- data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
- data/vendor/kreuzberg/src/extractors/security.rs +484 -0
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
- data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
- data/vendor/kreuzberg/src/extractors/text.rs +260 -242
- data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
- data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
- data/vendor/kreuzberg/src/lib.rs +105 -102
- data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
- data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -0
- data/vendor/kreuzberg/src/pdf/error.rs +122 -122
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
- data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
- data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
- data/vendor/kreuzberg/src/pdf/table.rs +393 -420
- data/vendor/kreuzberg/src/pdf/text.rs +158 -161
- data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
- data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
- data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
- data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
- data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +19 -19
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +903 -873
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
- data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
- data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
- data/vendor/kreuzberg/tests/config_features.rs +598 -580
- data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
- data/vendor/kreuzberg/tests/core_integration.rs +510 -493
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
- data/vendor/kreuzberg/tests/email_integration.rs +325 -325
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
- data/vendor/kreuzberg/tests/error_handling.rs +393 -393
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
- data/vendor/kreuzberg/tests/format_integration.rs +159 -159
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
- data/vendor/kreuzberg/tests/image_integration.rs +253 -253
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
- data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
- data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
- data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
- data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
- data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
- data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
- data/vendor/kreuzberg/tests/security_validation.rs +415 -404
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/rb-sys/.cargo-ok +1 -0
- data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
- data/vendor/rb-sys/Cargo.lock +393 -0
- data/vendor/rb-sys/Cargo.toml +70 -0
- data/vendor/rb-sys/Cargo.toml.orig +57 -0
- data/vendor/rb-sys/LICENSE-APACHE +190 -0
- data/vendor/rb-sys/LICENSE-MIT +21 -0
- data/vendor/rb-sys/bin/release.sh +21 -0
- data/vendor/rb-sys/build/features.rs +108 -0
- data/vendor/rb-sys/build/main.rs +246 -0
- data/vendor/rb-sys/build/stable_api_config.rs +153 -0
- data/vendor/rb-sys/build/version.rs +48 -0
- data/vendor/rb-sys/readme.md +36 -0
- data/vendor/rb-sys/src/bindings.rs +21 -0
- data/vendor/rb-sys/src/hidden.rs +11 -0
- data/vendor/rb-sys/src/lib.rs +34 -0
- data/vendor/rb-sys/src/macros.rs +371 -0
- data/vendor/rb-sys/src/memory.rs +53 -0
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
- data/vendor/rb-sys/src/special_consts.rs +31 -0
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
- data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
- data/vendor/rb-sys/src/stable_api.rs +261 -0
- data/vendor/rb-sys/src/symbol.rs +31 -0
- data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
- data/vendor/rb-sys/src/utils.rs +89 -0
- data/vendor/rb-sys/src/value_type.rs +7 -0
- metadata +90 -95
- data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
- data/spec/examples.txt +0 -104
- data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
- data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
- data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
- data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
- data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
- data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
- data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
- data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
- data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
- data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
|
@@ -1,1188 +0,0 @@
|
|
|
1
|
-
use crate::error::{KreuzbergError, Result};
|
|
2
|
-
#[cfg(feature = "quality")]
|
|
3
|
-
use crate::text::normalize_spaces;
|
|
4
|
-
use serde_json::Value;
|
|
5
|
-
use std::collections::HashMap;
|
|
6
|
-
use std::path::{Path, PathBuf};
|
|
7
|
-
use tokio::fs;
|
|
8
|
-
use tokio::process::Command;
|
|
9
|
-
use tokio::time::{Duration, timeout};
|
|
10
|
-
|
|
11
|
-
/// Default timeout for Pandoc operations (120 seconds)
|
|
12
|
-
const PANDOC_TIMEOUT_SECONDS: u64 = 120;
|
|
13
|
-
|
|
14
|
-
/// RAII guard for automatic temporary file cleanup
|
|
15
|
-
struct TempFile {
|
|
16
|
-
path: PathBuf,
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
impl TempFile {
|
|
20
|
-
fn new(path: PathBuf) -> Self {
|
|
21
|
-
Self { path }
|
|
22
|
-
}
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
impl Drop for TempFile {
|
|
26
|
-
fn drop(&mut self) {
|
|
27
|
-
let path = self.path.clone();
|
|
28
|
-
tokio::spawn(async move {
|
|
29
|
-
let _ = fs::remove_file(&path).await;
|
|
30
|
-
});
|
|
31
|
-
}
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
/// Extract content from file using Pandoc (convert to markdown)
|
|
35
|
-
#[allow(dead_code)]
|
|
36
|
-
pub async fn extract_content(path: &Path, from_format: &str) -> Result<String> {
|
|
37
|
-
let child = Command::new("pandoc")
|
|
38
|
-
.arg(path)
|
|
39
|
-
.arg(format!("--from={}", from_format))
|
|
40
|
-
.arg("--to=markdown")
|
|
41
|
-
.arg("--standalone")
|
|
42
|
-
.arg("--wrap=preserve")
|
|
43
|
-
.arg("--quiet")
|
|
44
|
-
.stdout(std::process::Stdio::piped())
|
|
45
|
-
.stderr(std::process::Stdio::piped())
|
|
46
|
-
.spawn()
|
|
47
|
-
.map_err(|e| {
|
|
48
|
-
// Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
|
|
49
|
-
std::io::Error::other(format!("Failed to execute pandoc: {}", e))
|
|
50
|
-
})?;
|
|
51
|
-
|
|
52
|
-
let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
|
|
53
|
-
Ok(Ok(output)) => output,
|
|
54
|
-
Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
|
|
55
|
-
Err(_) => {
|
|
56
|
-
// Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
|
|
57
|
-
return Err(KreuzbergError::parsing(format!(
|
|
58
|
-
"Pandoc content extraction timed out after {} seconds",
|
|
59
|
-
PANDOC_TIMEOUT_SECONDS
|
|
60
|
-
)));
|
|
61
|
-
}
|
|
62
|
-
};
|
|
63
|
-
|
|
64
|
-
if !output.status.success() {
|
|
65
|
-
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
66
|
-
|
|
67
|
-
// Subprocess error analysis - wrap only if format/parsing error detected ~keep
|
|
68
|
-
let stderr_lower = stderr.to_lowercase();
|
|
69
|
-
if stderr_lower.contains("format")
|
|
70
|
-
|| stderr_lower.contains("unsupported")
|
|
71
|
-
|| stderr_lower.contains("error:")
|
|
72
|
-
|| stderr_lower.contains("failed")
|
|
73
|
-
{
|
|
74
|
-
return Err(KreuzbergError::parsing(format!(
|
|
75
|
-
"Pandoc format/parsing error: {}",
|
|
76
|
-
stderr
|
|
77
|
-
)));
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
// True system error - bubble up as IO error ~keep
|
|
81
|
-
return Err(std::io::Error::other(format!("Pandoc system error: {}", stderr)).into());
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
let content = String::from_utf8(output.stdout)
|
|
85
|
-
.map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc output: {}", e)))?;
|
|
86
|
-
|
|
87
|
-
#[cfg(feature = "quality")]
|
|
88
|
-
{
|
|
89
|
-
Ok(normalize_spaces(&content))
|
|
90
|
-
}
|
|
91
|
-
#[cfg(not(feature = "quality"))]
|
|
92
|
-
{
|
|
93
|
-
Ok(content)
|
|
94
|
-
}
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
/// Extract metadata from file using Pandoc JSON output
|
|
98
|
-
#[allow(dead_code)]
|
|
99
|
-
pub async fn extract_metadata(path: &Path, from_format: &str) -> Result<HashMap<String, Value>> {
|
|
100
|
-
let child = Command::new("pandoc")
|
|
101
|
-
.arg(path)
|
|
102
|
-
.arg(format!("--from={}", from_format))
|
|
103
|
-
.arg("--to=json")
|
|
104
|
-
.arg("--standalone")
|
|
105
|
-
.arg("--quiet")
|
|
106
|
-
.stdout(std::process::Stdio::piped())
|
|
107
|
-
.stderr(std::process::Stdio::piped())
|
|
108
|
-
.spawn()
|
|
109
|
-
.map_err(|e| {
|
|
110
|
-
// Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
|
|
111
|
-
std::io::Error::other(format!("Failed to execute pandoc: {}", e))
|
|
112
|
-
})?;
|
|
113
|
-
|
|
114
|
-
let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
|
|
115
|
-
Ok(Ok(output)) => output,
|
|
116
|
-
Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
|
|
117
|
-
Err(_) => {
|
|
118
|
-
// Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
|
|
119
|
-
return Err(KreuzbergError::parsing(format!(
|
|
120
|
-
"Pandoc metadata extraction timed out after {} seconds",
|
|
121
|
-
PANDOC_TIMEOUT_SECONDS
|
|
122
|
-
)));
|
|
123
|
-
}
|
|
124
|
-
};
|
|
125
|
-
|
|
126
|
-
if !output.status.success() {
|
|
127
|
-
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
128
|
-
|
|
129
|
-
// Subprocess error analysis - wrap only if format/parsing error detected ~keep
|
|
130
|
-
let stderr_lower = stderr.to_lowercase();
|
|
131
|
-
if stderr_lower.contains("format")
|
|
132
|
-
|| stderr_lower.contains("unsupported")
|
|
133
|
-
|| stderr_lower.contains("error:")
|
|
134
|
-
|| stderr_lower.contains("failed")
|
|
135
|
-
{
|
|
136
|
-
return Err(KreuzbergError::parsing(format!(
|
|
137
|
-
"Pandoc metadata extraction format/parsing error: {}",
|
|
138
|
-
stderr
|
|
139
|
-
)));
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
// True system error - bubble up as IO error ~keep
|
|
143
|
-
return Err(std::io::Error::other(format!("Pandoc metadata extraction system error: {}", stderr)).into());
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
let json_content = String::from_utf8(output.stdout)
|
|
147
|
-
.map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc JSON output: {}", e)))?;
|
|
148
|
-
|
|
149
|
-
let json_data: Value = serde_json::from_str(&json_content)
|
|
150
|
-
.map_err(|e| KreuzbergError::parsing(format!("Failed to parse pandoc JSON: {}", e)))?;
|
|
151
|
-
|
|
152
|
-
extract_metadata_from_json(&json_data)
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
/// Valid metadata field names (must match Python's _VALID_METADATA_KEYS)
|
|
156
|
-
const VALID_METADATA_KEYS: &[&str] = &[
|
|
157
|
-
"abstract",
|
|
158
|
-
"authors",
|
|
159
|
-
"categories",
|
|
160
|
-
"character_count",
|
|
161
|
-
"citations",
|
|
162
|
-
"code_blocks",
|
|
163
|
-
"comments",
|
|
164
|
-
"content",
|
|
165
|
-
"copyright",
|
|
166
|
-
"created_at",
|
|
167
|
-
"created_by",
|
|
168
|
-
"description",
|
|
169
|
-
"fonts",
|
|
170
|
-
"headers",
|
|
171
|
-
"height",
|
|
172
|
-
"identifier",
|
|
173
|
-
"keywords",
|
|
174
|
-
"languages",
|
|
175
|
-
"license",
|
|
176
|
-
"line_count",
|
|
177
|
-
"links",
|
|
178
|
-
"modified_at",
|
|
179
|
-
"modified_by",
|
|
180
|
-
"organization",
|
|
181
|
-
"parse_error",
|
|
182
|
-
"publisher",
|
|
183
|
-
"references",
|
|
184
|
-
"sheet_count",
|
|
185
|
-
"sheet_names",
|
|
186
|
-
"status",
|
|
187
|
-
"subject",
|
|
188
|
-
"subtitle",
|
|
189
|
-
"summary",
|
|
190
|
-
"title",
|
|
191
|
-
"total_cells",
|
|
192
|
-
"version",
|
|
193
|
-
"warning",
|
|
194
|
-
"width",
|
|
195
|
-
"word_count",
|
|
196
|
-
"email_from",
|
|
197
|
-
"email_to",
|
|
198
|
-
"email_cc",
|
|
199
|
-
"email_bcc",
|
|
200
|
-
"date",
|
|
201
|
-
"attachments",
|
|
202
|
-
"table_count",
|
|
203
|
-
"tables_summary",
|
|
204
|
-
"quality_score",
|
|
205
|
-
"image_preprocessing",
|
|
206
|
-
"source_format",
|
|
207
|
-
"converted_via",
|
|
208
|
-
"error",
|
|
209
|
-
"error_context",
|
|
210
|
-
"json_schema",
|
|
211
|
-
"notes",
|
|
212
|
-
"note",
|
|
213
|
-
"name",
|
|
214
|
-
"body",
|
|
215
|
-
"text",
|
|
216
|
-
"message",
|
|
217
|
-
"attributes",
|
|
218
|
-
"token_reduction",
|
|
219
|
-
"processing_errors",
|
|
220
|
-
"extraction_error",
|
|
221
|
-
"element_count",
|
|
222
|
-
"unique_elements",
|
|
223
|
-
];
|
|
224
|
-
|
|
225
|
-
/// Extract metadata from Pandoc JSON AST
|
|
226
|
-
pub(crate) fn extract_metadata_from_json(json: &Value) -> Result<HashMap<String, Value>> {
|
|
227
|
-
let mut metadata = HashMap::new();
|
|
228
|
-
|
|
229
|
-
if let Some(meta) = json.get("meta").and_then(|m| m.as_object()) {
|
|
230
|
-
for (key, value) in meta {
|
|
231
|
-
let pandoc_key = get_pandoc_key(key);
|
|
232
|
-
if !VALID_METADATA_KEYS.contains(&pandoc_key.as_str()) {
|
|
233
|
-
continue;
|
|
234
|
-
}
|
|
235
|
-
if let Some(extracted) = extract_meta_value(value) {
|
|
236
|
-
metadata.insert(pandoc_key, extracted);
|
|
237
|
-
}
|
|
238
|
-
}
|
|
239
|
-
}
|
|
240
|
-
|
|
241
|
-
if let Some(blocks) = json.get("blocks").and_then(|b| b.as_array()) {
|
|
242
|
-
let mut citations = Vec::new();
|
|
243
|
-
extract_citations_from_blocks(blocks, &mut citations);
|
|
244
|
-
|
|
245
|
-
if !citations.is_empty() {
|
|
246
|
-
if let Some(existing) = metadata.get_mut("citations") {
|
|
247
|
-
if let Some(arr) = existing.as_array_mut() {
|
|
248
|
-
for cite in citations {
|
|
249
|
-
if !arr.contains(&Value::String(cite.clone())) {
|
|
250
|
-
arr.push(Value::String(cite));
|
|
251
|
-
}
|
|
252
|
-
}
|
|
253
|
-
}
|
|
254
|
-
} else {
|
|
255
|
-
metadata.insert(
|
|
256
|
-
"citations".to_string(),
|
|
257
|
-
Value::Array(citations.into_iter().map(Value::String).collect()),
|
|
258
|
-
);
|
|
259
|
-
}
|
|
260
|
-
}
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
if let Some(citations) = json.get("citations").and_then(|c| c.as_array()) {
|
|
264
|
-
let cite_ids: Vec<String> = citations
|
|
265
|
-
.iter()
|
|
266
|
-
.filter_map(|c| c.get("citationId").and_then(|id| id.as_str()).map(String::from))
|
|
267
|
-
.collect();
|
|
268
|
-
|
|
269
|
-
if !cite_ids.is_empty() {
|
|
270
|
-
metadata.insert(
|
|
271
|
-
"citations".to_string(),
|
|
272
|
-
Value::Array(cite_ids.into_iter().map(Value::String).collect()),
|
|
273
|
-
);
|
|
274
|
-
}
|
|
275
|
-
}
|
|
276
|
-
|
|
277
|
-
Ok(metadata)
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
/// Extract markdown content from Pandoc JSON AST
|
|
281
|
-
///
|
|
282
|
-
/// Converts the JSON AST blocks back to markdown format, similar to what
|
|
283
|
-
/// `pandoc --to=markdown` would produce. This allows us to extract both
|
|
284
|
-
/// content and metadata from a single JSON extraction.
|
|
285
|
-
pub(crate) fn extract_content_from_json(json: &Value) -> Result<String> {
|
|
286
|
-
let mut content = String::new();
|
|
287
|
-
|
|
288
|
-
if let Some(meta) = json.get("meta").and_then(|m| m.as_object())
|
|
289
|
-
&& let Some(title_node) = meta.get("title")
|
|
290
|
-
&& let Some(title_value) = extract_meta_value(title_node)
|
|
291
|
-
&& let Some(title_str) = title_value.as_str()
|
|
292
|
-
{
|
|
293
|
-
content.push_str(&format!("# {}\n\n", title_str));
|
|
294
|
-
}
|
|
295
|
-
|
|
296
|
-
if let Some(blocks) = json.get("blocks").and_then(|b| b.as_array()) {
|
|
297
|
-
for block in blocks {
|
|
298
|
-
if let Some(text) = extract_block_text(block) {
|
|
299
|
-
if !content.is_empty() && !content.ends_with("\n\n") {
|
|
300
|
-
content.push_str("\n\n");
|
|
301
|
-
}
|
|
302
|
-
content.push_str(&text);
|
|
303
|
-
}
|
|
304
|
-
}
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
Ok(content)
|
|
308
|
-
}
|
|
309
|
-
|
|
310
|
-
/// Extract text from a Pandoc JSON AST block
|
|
311
|
-
fn extract_block_text(block: &Value) -> Option<String> {
|
|
312
|
-
let obj = block.as_object()?;
|
|
313
|
-
let block_type = obj.get("t")?.as_str()?;
|
|
314
|
-
let content = obj.get("c");
|
|
315
|
-
|
|
316
|
-
match block_type {
|
|
317
|
-
"Para" | "Plain" => {
|
|
318
|
-
if let Some(inlines) = content.and_then(|c| c.as_array()) {
|
|
319
|
-
return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
|
|
320
|
-
}
|
|
321
|
-
}
|
|
322
|
-
"Header" => {
|
|
323
|
-
if let Some(arr) = content.and_then(|c| c.as_array())
|
|
324
|
-
&& arr.len() >= 3
|
|
325
|
-
&& let Some(level) = arr[0].as_u64()
|
|
326
|
-
&& let Some(inlines) = arr[2].as_array()
|
|
327
|
-
{
|
|
328
|
-
let header_text = extract_inlines(inlines).and_then(|v| v.as_str().map(String::from))?;
|
|
329
|
-
let prefix = "#".repeat(level as usize);
|
|
330
|
-
return Some(format!("{} {}", prefix, header_text));
|
|
331
|
-
}
|
|
332
|
-
}
|
|
333
|
-
"CodeBlock" => {
|
|
334
|
-
if let Some(arr) = content.and_then(|c| c.as_array())
|
|
335
|
-
&& arr.len() >= 2
|
|
336
|
-
&& let Some(code) = arr[1].as_str()
|
|
337
|
-
{
|
|
338
|
-
return Some(format!("```\n{}\n```", code));
|
|
339
|
-
}
|
|
340
|
-
}
|
|
341
|
-
"BlockQuote" => {
|
|
342
|
-
if let Some(blocks) = content.and_then(|c| c.as_array()) {
|
|
343
|
-
let mut quote_text = String::new();
|
|
344
|
-
for inner_block in blocks {
|
|
345
|
-
if let Some(text) = extract_block_text(inner_block) {
|
|
346
|
-
quote_text.push_str("> ");
|
|
347
|
-
quote_text.push_str(&text);
|
|
348
|
-
quote_text.push('\n');
|
|
349
|
-
}
|
|
350
|
-
}
|
|
351
|
-
return Some(quote_text.trim_end().to_string());
|
|
352
|
-
}
|
|
353
|
-
}
|
|
354
|
-
"BulletList" => {
|
|
355
|
-
if let Some(items) = content.and_then(|c| c.as_array()) {
|
|
356
|
-
let mut list_text = String::new();
|
|
357
|
-
for item in items {
|
|
358
|
-
if let Some(item_blocks) = item.as_array() {
|
|
359
|
-
for block in item_blocks {
|
|
360
|
-
if let Some(text) = extract_block_text(block) {
|
|
361
|
-
list_text.push_str("- ");
|
|
362
|
-
list_text.push_str(&text);
|
|
363
|
-
list_text.push('\n');
|
|
364
|
-
}
|
|
365
|
-
}
|
|
366
|
-
}
|
|
367
|
-
}
|
|
368
|
-
return Some(list_text.trim_end().to_string());
|
|
369
|
-
}
|
|
370
|
-
}
|
|
371
|
-
"OrderedList" => {
|
|
372
|
-
if let Some(arr) = content.and_then(|c| c.as_array())
|
|
373
|
-
&& arr.len() >= 2
|
|
374
|
-
&& let Some(items) = arr[1].as_array()
|
|
375
|
-
{
|
|
376
|
-
let mut list_text = String::new();
|
|
377
|
-
for (idx, item) in items.iter().enumerate() {
|
|
378
|
-
if let Some(item_blocks) = item.as_array() {
|
|
379
|
-
for block in item_blocks {
|
|
380
|
-
if let Some(text) = extract_block_text(block) {
|
|
381
|
-
list_text.push_str(&format!("{}. {}\n", idx + 1, text));
|
|
382
|
-
}
|
|
383
|
-
}
|
|
384
|
-
}
|
|
385
|
-
}
|
|
386
|
-
return Some(list_text.trim_end().to_string());
|
|
387
|
-
}
|
|
388
|
-
}
|
|
389
|
-
"HorizontalRule" => {
|
|
390
|
-
return Some("---".to_string());
|
|
391
|
-
}
|
|
392
|
-
_ => {}
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
None
|
|
396
|
-
}
|
|
397
|
-
|
|
398
|
-
/// Map Pandoc metadata keys to standard keys
|
|
399
|
-
fn get_pandoc_key(key: &str) -> String {
|
|
400
|
-
match key {
|
|
401
|
-
"abstract" => "summary".to_string(),
|
|
402
|
-
"date" => "created_at".to_string(),
|
|
403
|
-
"contributors" | "author" => "authors".to_string(),
|
|
404
|
-
"institute" => "organization".to_string(),
|
|
405
|
-
_ => key.to_string(),
|
|
406
|
-
}
|
|
407
|
-
}
|
|
408
|
-
|
|
409
|
-
/// Extract value from Pandoc metadata node
|
|
410
|
-
fn extract_meta_value(node: &Value) -> Option<Value> {
|
|
411
|
-
if let Some(obj) = node.as_object() {
|
|
412
|
-
let node_type = obj.get("t")?.as_str()?;
|
|
413
|
-
let content = obj.get("c");
|
|
414
|
-
|
|
415
|
-
match node_type {
|
|
416
|
-
"MetaString" => {
|
|
417
|
-
if let Some(s) = content.and_then(|c| c.as_str()) {
|
|
418
|
-
return Some(Value::String(s.to_string()));
|
|
419
|
-
}
|
|
420
|
-
}
|
|
421
|
-
"MetaInlines" => {
|
|
422
|
-
if let Some(inlines) = content.and_then(|c| c.as_array()) {
|
|
423
|
-
return extract_inlines(inlines);
|
|
424
|
-
}
|
|
425
|
-
}
|
|
426
|
-
"MetaList" => {
|
|
427
|
-
if let Some(list) = content.and_then(|c| c.as_array()) {
|
|
428
|
-
let mut values = Vec::new();
|
|
429
|
-
for item in list {
|
|
430
|
-
if let Some(val) = extract_meta_value(item) {
|
|
431
|
-
if let Some(arr) = val.as_array() {
|
|
432
|
-
values.extend_from_slice(arr);
|
|
433
|
-
} else {
|
|
434
|
-
values.push(val);
|
|
435
|
-
}
|
|
436
|
-
}
|
|
437
|
-
}
|
|
438
|
-
if !values.is_empty() {
|
|
439
|
-
return Some(Value::Array(values));
|
|
440
|
-
}
|
|
441
|
-
}
|
|
442
|
-
}
|
|
443
|
-
"MetaBlocks" => {
|
|
444
|
-
if let Some(blocks) = content.and_then(|c| c.as_array()) {
|
|
445
|
-
let mut texts = Vec::new();
|
|
446
|
-
for block in blocks {
|
|
447
|
-
if let Some(block_obj) = block.as_object()
|
|
448
|
-
&& block_obj.get("t")?.as_str()? == "Para"
|
|
449
|
-
&& let Some(para_content) = block_obj.get("c").and_then(|c| c.as_array())
|
|
450
|
-
&& let Some(text) = extract_inlines(para_content)
|
|
451
|
-
&& let Some(s) = text.as_str()
|
|
452
|
-
{
|
|
453
|
-
texts.push(s.to_string());
|
|
454
|
-
}
|
|
455
|
-
}
|
|
456
|
-
if !texts.is_empty() {
|
|
457
|
-
return Some(Value::String(texts.join(" ")));
|
|
458
|
-
}
|
|
459
|
-
}
|
|
460
|
-
}
|
|
461
|
-
"MetaMap" => {
|
|
462
|
-
if let Some(map) = content.and_then(|c| c.as_object()) {
|
|
463
|
-
let mut result = serde_json::Map::new();
|
|
464
|
-
for (k, v) in map {
|
|
465
|
-
if let Some(val) = extract_meta_value(v) {
|
|
466
|
-
result.insert(k.clone(), val);
|
|
467
|
-
}
|
|
468
|
-
}
|
|
469
|
-
if !result.is_empty() {
|
|
470
|
-
return Some(Value::Object(result));
|
|
471
|
-
}
|
|
472
|
-
}
|
|
473
|
-
}
|
|
474
|
-
_ => {}
|
|
475
|
-
}
|
|
476
|
-
}
|
|
477
|
-
|
|
478
|
-
None
|
|
479
|
-
}
|
|
480
|
-
|
|
481
|
-
/// Extract inline text from Pandoc inline nodes
|
|
482
|
-
fn extract_inlines(inlines: &[Value]) -> Option<Value> {
|
|
483
|
-
let mut texts = Vec::new();
|
|
484
|
-
|
|
485
|
-
for inline in inlines {
|
|
486
|
-
if let Some(text) = extract_inline_text(inline) {
|
|
487
|
-
texts.push(text);
|
|
488
|
-
}
|
|
489
|
-
}
|
|
490
|
-
|
|
491
|
-
let result = texts.join("");
|
|
492
|
-
if result.is_empty() {
|
|
493
|
-
None
|
|
494
|
-
} else {
|
|
495
|
-
Some(Value::String(result))
|
|
496
|
-
}
|
|
497
|
-
}
|
|
498
|
-
|
|
499
|
-
/// Extract text from a single inline node
|
|
500
|
-
fn extract_inline_text(node: &Value) -> Option<String> {
|
|
501
|
-
if let Some(obj) = node.as_object() {
|
|
502
|
-
let node_type = obj.get("t")?.as_str()?;
|
|
503
|
-
|
|
504
|
-
match node_type {
|
|
505
|
-
"Str" => {
|
|
506
|
-
return obj.get("c")?.as_str().map(String::from);
|
|
507
|
-
}
|
|
508
|
-
"Space" => {
|
|
509
|
-
return Some(" ".to_string());
|
|
510
|
-
}
|
|
511
|
-
"Emph" | "Strong" | "Strikeout" | "Superscript" | "Subscript" | "SmallCaps" => {
|
|
512
|
-
if let Some(content) = obj.get("c").and_then(|c| c.as_array()) {
|
|
513
|
-
return extract_inlines(content).and_then(|v| v.as_str().map(String::from));
|
|
514
|
-
}
|
|
515
|
-
}
|
|
516
|
-
"Code" => {
|
|
517
|
-
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
518
|
-
&& arr.len() == 2
|
|
519
|
-
{
|
|
520
|
-
return arr[1].as_str().map(String::from);
|
|
521
|
-
}
|
|
522
|
-
}
|
|
523
|
-
"Link" | "Image" => {
|
|
524
|
-
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
525
|
-
&& arr.len() == 3
|
|
526
|
-
&& let Some(inlines) = arr[1].as_array()
|
|
527
|
-
{
|
|
528
|
-
return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
|
|
529
|
-
}
|
|
530
|
-
}
|
|
531
|
-
"Quoted" => {
|
|
532
|
-
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
533
|
-
&& arr.len() == 2
|
|
534
|
-
&& let Some(inlines) = arr[1].as_array()
|
|
535
|
-
{
|
|
536
|
-
return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
|
|
537
|
-
}
|
|
538
|
-
}
|
|
539
|
-
"Cite" => {
|
|
540
|
-
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
541
|
-
&& arr.len() == 2
|
|
542
|
-
&& let Some(inlines) = arr[1].as_array()
|
|
543
|
-
{
|
|
544
|
-
return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
|
|
545
|
-
}
|
|
546
|
-
}
|
|
547
|
-
"Math" => {
|
|
548
|
-
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
549
|
-
&& arr.len() == 2
|
|
550
|
-
{
|
|
551
|
-
return arr[1].as_str().map(String::from);
|
|
552
|
-
}
|
|
553
|
-
}
|
|
554
|
-
"LineBreak" | "SoftBreak" => {
|
|
555
|
-
return Some("\n".to_string());
|
|
556
|
-
}
|
|
557
|
-
_ => {}
|
|
558
|
-
}
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
None
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
/// Extract citations from block nodes
|
|
565
|
-
fn extract_citations_from_blocks(blocks: &[Value], citations: &mut Vec<String>) {
|
|
566
|
-
for block in blocks {
|
|
567
|
-
if let Some(obj) = block.as_object() {
|
|
568
|
-
let block_type = obj.get("t").and_then(|t| t.as_str());
|
|
569
|
-
|
|
570
|
-
if block_type == Some("Cite")
|
|
571
|
-
&& let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
572
|
-
&& let Some(cite_list) = arr.first().and_then(|c| c.as_array())
|
|
573
|
-
{
|
|
574
|
-
for cite in cite_list {
|
|
575
|
-
if let Some(cite_id) = cite.get("citationId").and_then(|id| id.as_str()) {
|
|
576
|
-
citations.push(cite_id.to_string());
|
|
577
|
-
}
|
|
578
|
-
}
|
|
579
|
-
}
|
|
580
|
-
|
|
581
|
-
if let Some(content) = obj.get("c") {
|
|
582
|
-
if let Some(nested_blocks) = content.as_array() {
|
|
583
|
-
extract_citations_from_blocks(nested_blocks, citations);
|
|
584
|
-
} else if let Some(nested_obj) = content.as_object() {
|
|
585
|
-
for value in nested_obj.values() {
|
|
586
|
-
if let Some(arr) = value.as_array() {
|
|
587
|
-
extract_citations_from_blocks(arr, citations);
|
|
588
|
-
}
|
|
589
|
-
}
|
|
590
|
-
}
|
|
591
|
-
}
|
|
592
|
-
}
|
|
593
|
-
}
|
|
594
|
-
}
|
|
595
|
-
|
|
596
|
-
/// Wrapper functions for backwards compatibility
|
|
597
|
-
pub async fn extract_with_pandoc(path: &Path, from_format: &str) -> Result<(String, HashMap<String, Value>)> {
|
|
598
|
-
let child = Command::new("pandoc")
|
|
599
|
-
.arg(path)
|
|
600
|
-
.arg(format!("--from={}", from_format))
|
|
601
|
-
.arg("--to=json")
|
|
602
|
-
.arg("--standalone")
|
|
603
|
-
.arg("--quiet")
|
|
604
|
-
.stdout(std::process::Stdio::piped())
|
|
605
|
-
.stderr(std::process::Stdio::piped())
|
|
606
|
-
.spawn()
|
|
607
|
-
.map_err(|e| {
|
|
608
|
-
// Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
|
|
609
|
-
std::io::Error::other(format!("Failed to execute pandoc: {}", e))
|
|
610
|
-
})?;
|
|
611
|
-
|
|
612
|
-
let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
|
|
613
|
-
Ok(Ok(output)) => output,
|
|
614
|
-
Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
|
|
615
|
-
Err(_) => {
|
|
616
|
-
// Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
|
|
617
|
-
return Err(KreuzbergError::parsing(format!(
|
|
618
|
-
"Pandoc extraction timed out after {} seconds",
|
|
619
|
-
PANDOC_TIMEOUT_SECONDS
|
|
620
|
-
)));
|
|
621
|
-
}
|
|
622
|
-
};
|
|
623
|
-
|
|
624
|
-
if !output.status.success() {
|
|
625
|
-
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
626
|
-
|
|
627
|
-
// Subprocess error analysis - wrap only if format/parsing error detected ~keep
|
|
628
|
-
let stderr_lower = stderr.to_lowercase();
|
|
629
|
-
if stderr_lower.contains("format")
|
|
630
|
-
|| stderr_lower.contains("unsupported")
|
|
631
|
-
|| stderr_lower.contains("error:")
|
|
632
|
-
|| stderr_lower.contains("failed")
|
|
633
|
-
{
|
|
634
|
-
return Err(KreuzbergError::parsing(format!(
|
|
635
|
-
"Pandoc format/parsing error: {}",
|
|
636
|
-
stderr
|
|
637
|
-
)));
|
|
638
|
-
}
|
|
639
|
-
|
|
640
|
-
// True system error - bubble up as IO error ~keep
|
|
641
|
-
return Err(std::io::Error::other(format!("Pandoc system error: {}", stderr)).into());
|
|
642
|
-
}
|
|
643
|
-
|
|
644
|
-
let json_content = String::from_utf8(output.stdout)
|
|
645
|
-
.map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc JSON output: {}", e)))?;
|
|
646
|
-
|
|
647
|
-
let json_data: Value = serde_json::from_str(&json_content)
|
|
648
|
-
.map_err(|e| KreuzbergError::parsing(format!("Failed to parse pandoc JSON: {}", e)))?;
|
|
649
|
-
|
|
650
|
-
let content = extract_content_from_json(&json_data)?;
|
|
651
|
-
let metadata = extract_metadata_from_json(&json_data)?;
|
|
652
|
-
|
|
653
|
-
#[cfg(feature = "quality")]
|
|
654
|
-
{
|
|
655
|
-
Ok((normalize_spaces(&content), metadata))
|
|
656
|
-
}
|
|
657
|
-
#[cfg(not(feature = "quality"))]
|
|
658
|
-
{
|
|
659
|
-
Ok((content, metadata))
|
|
660
|
-
}
|
|
661
|
-
}
|
|
662
|
-
|
|
663
|
-
pub async fn extract_with_pandoc_from_bytes(
|
|
664
|
-
bytes: &[u8],
|
|
665
|
-
from_format: &str,
|
|
666
|
-
extension: &str,
|
|
667
|
-
) -> Result<(String, HashMap<String, Value>)> {
|
|
668
|
-
let temp_dir = std::env::temp_dir();
|
|
669
|
-
let temp_file_path = temp_dir.join(format!(
|
|
670
|
-
"pandoc_temp_{}_{}.{}",
|
|
671
|
-
std::process::id(),
|
|
672
|
-
uuid::Uuid::new_v4(),
|
|
673
|
-
extension
|
|
674
|
-
));
|
|
675
|
-
|
|
676
|
-
// RAII guard ensures cleanup on all paths including panic ~keep
|
|
677
|
-
let _temp_guard = TempFile::new(temp_file_path.clone());
|
|
678
|
-
|
|
679
|
-
fs::write(&temp_file_path, bytes).await?;
|
|
680
|
-
|
|
681
|
-
extract_with_pandoc(&temp_file_path, from_format).await
|
|
682
|
-
}
|
|
683
|
-
|
|
684
|
-
#[cfg(test)]
|
|
685
|
-
mod tests {
|
|
686
|
-
use super::*;
|
|
687
|
-
use serde_json::json;
|
|
688
|
-
|
|
689
|
-
#[test]
|
|
690
|
-
fn test_get_pandoc_key() {
|
|
691
|
-
assert_eq!(get_pandoc_key("abstract"), "summary");
|
|
692
|
-
assert_eq!(get_pandoc_key("date"), "created_at");
|
|
693
|
-
assert_eq!(get_pandoc_key("author"), "authors");
|
|
694
|
-
assert_eq!(get_pandoc_key("contributors"), "authors");
|
|
695
|
-
assert_eq!(get_pandoc_key("institute"), "organization");
|
|
696
|
-
assert_eq!(get_pandoc_key("title"), "title");
|
|
697
|
-
}
|
|
698
|
-
|
|
699
|
-
#[test]
|
|
700
|
-
fn test_extract_meta_value_string() {
|
|
701
|
-
let node = json!({
|
|
702
|
-
"t": "MetaString",
|
|
703
|
-
"c": "Test Title"
|
|
704
|
-
});
|
|
705
|
-
|
|
706
|
-
let result = extract_meta_value(&node).unwrap();
|
|
707
|
-
assert_eq!(result, Value::String("Test Title".to_string()));
|
|
708
|
-
}
|
|
709
|
-
|
|
710
|
-
#[test]
|
|
711
|
-
fn test_extract_meta_value_inlines() {
|
|
712
|
-
let node = json!({
|
|
713
|
-
"t": "MetaInlines",
|
|
714
|
-
"c": [
|
|
715
|
-
{"t": "Str", "c": "Hello"},
|
|
716
|
-
{"t": "Space"},
|
|
717
|
-
{"t": "Str", "c": "World"}
|
|
718
|
-
]
|
|
719
|
-
});
|
|
720
|
-
|
|
721
|
-
let result = extract_meta_value(&node).unwrap();
|
|
722
|
-
assert_eq!(result, Value::String("Hello World".to_string()));
|
|
723
|
-
}
|
|
724
|
-
|
|
725
|
-
#[test]
|
|
726
|
-
fn test_extract_meta_value_list() {
|
|
727
|
-
let node = json!({
|
|
728
|
-
"t": "MetaList",
|
|
729
|
-
"c": [
|
|
730
|
-
{"t": "MetaString", "c": "Author1"},
|
|
731
|
-
{"t": "MetaString", "c": "Author2"}
|
|
732
|
-
]
|
|
733
|
-
});
|
|
734
|
-
|
|
735
|
-
let result = extract_meta_value(&node).unwrap();
|
|
736
|
-
assert_eq!(
|
|
737
|
-
result,
|
|
738
|
-
Value::Array(vec![
|
|
739
|
-
Value::String("Author1".to_string()),
|
|
740
|
-
Value::String("Author2".to_string())
|
|
741
|
-
])
|
|
742
|
-
);
|
|
743
|
-
}
|
|
744
|
-
|
|
745
|
-
#[test]
|
|
746
|
-
fn test_extract_inline_text_str() {
|
|
747
|
-
let node = json!({"t": "Str", "c": "Hello"});
|
|
748
|
-
let result = extract_inline_text(&node).unwrap();
|
|
749
|
-
assert_eq!(result, "Hello");
|
|
750
|
-
}
|
|
751
|
-
|
|
752
|
-
#[test]
|
|
753
|
-
fn test_extract_inline_text_space() {
|
|
754
|
-
let node = json!({"t": "Space"});
|
|
755
|
-
let result = extract_inline_text(&node).unwrap();
|
|
756
|
-
assert_eq!(result, " ");
|
|
757
|
-
}
|
|
758
|
-
|
|
759
|
-
#[test]
|
|
760
|
-
fn test_extract_inline_text_emph() {
|
|
761
|
-
let node = json!({
|
|
762
|
-
"t": "Emph",
|
|
763
|
-
"c": [
|
|
764
|
-
{"t": "Str", "c": "emphasized"}
|
|
765
|
-
]
|
|
766
|
-
});
|
|
767
|
-
let result = extract_inline_text(&node).unwrap();
|
|
768
|
-
assert_eq!(result, "emphasized");
|
|
769
|
-
}
|
|
770
|
-
|
|
771
|
-
#[test]
|
|
772
|
-
fn test_extract_inline_text_code() {
|
|
773
|
-
let node = json!({
|
|
774
|
-
"t": "Code",
|
|
775
|
-
"c": [["", [], []], "code_snippet"]
|
|
776
|
-
});
|
|
777
|
-
let result = extract_inline_text(&node).unwrap();
|
|
778
|
-
assert_eq!(result, "code_snippet");
|
|
779
|
-
}
|
|
780
|
-
|
|
781
|
-
#[test]
|
|
782
|
-
fn test_extract_inlines() {
|
|
783
|
-
let inlines = vec![
|
|
784
|
-
json!({"t": "Str", "c": "Hello"}),
|
|
785
|
-
json!({"t": "Space"}),
|
|
786
|
-
json!({"t": "Emph", "c": [{"t": "Str", "c": "World"}]}),
|
|
787
|
-
];
|
|
788
|
-
|
|
789
|
-
let result = extract_inlines(&inlines).unwrap();
|
|
790
|
-
assert_eq!(result, Value::String("Hello World".to_string()));
|
|
791
|
-
}
|
|
792
|
-
|
|
793
|
-
#[test]
|
|
794
|
-
fn test_extract_citations_from_blocks() {
|
|
795
|
-
let blocks = vec![json!({
|
|
796
|
-
"t": "Cite",
|
|
797
|
-
"c": [
|
|
798
|
-
[
|
|
799
|
-
{"citationId": "cite1"},
|
|
800
|
-
{"citationId": "cite2"}
|
|
801
|
-
],
|
|
802
|
-
[]
|
|
803
|
-
]
|
|
804
|
-
})];
|
|
805
|
-
|
|
806
|
-
let mut citations = Vec::new();
|
|
807
|
-
extract_citations_from_blocks(&blocks, &mut citations);
|
|
808
|
-
|
|
809
|
-
assert_eq!(citations, vec!["cite1", "cite2"]);
|
|
810
|
-
}
|
|
811
|
-
|
|
812
|
-
#[test]
|
|
813
|
-
fn test_extract_metadata_from_json() {
|
|
814
|
-
let json = json!({
|
|
815
|
-
"meta": {
|
|
816
|
-
"title": {"t": "MetaString", "c": "Test Document"},
|
|
817
|
-
"author": {"t": "MetaList", "c": [
|
|
818
|
-
{"t": "MetaString", "c": "Author One"}
|
|
819
|
-
]},
|
|
820
|
-
"date": {"t": "MetaString", "c": "2024-01-01"}
|
|
821
|
-
},
|
|
822
|
-
"blocks": []
|
|
823
|
-
});
|
|
824
|
-
|
|
825
|
-
let metadata = extract_metadata_from_json(&json).unwrap();
|
|
826
|
-
|
|
827
|
-
assert_eq!(
|
|
828
|
-
metadata.get("title").unwrap(),
|
|
829
|
-
&Value::String("Test Document".to_string())
|
|
830
|
-
);
|
|
831
|
-
assert_eq!(
|
|
832
|
-
metadata.get("authors").unwrap(),
|
|
833
|
-
&Value::Array(vec![Value::String("Author One".to_string())])
|
|
834
|
-
);
|
|
835
|
-
assert_eq!(
|
|
836
|
-
metadata.get("created_at").unwrap(),
|
|
837
|
-
&Value::String("2024-01-01".to_string())
|
|
838
|
-
);
|
|
839
|
-
}
|
|
840
|
-
|
|
841
|
-
#[test]
|
|
842
|
-
fn test_metadata_field_filtering() {
|
|
843
|
-
let json = json!({
|
|
844
|
-
"meta": {
|
|
845
|
-
"title": {"t": "MetaString", "c": "Valid Title"},
|
|
846
|
-
"invalid_field": {"t": "MetaString", "c": "Should be filtered"},
|
|
847
|
-
"random_key": {"t": "MetaString", "c": "Not in valid keys"},
|
|
848
|
-
"author": {"t": "MetaString", "c": "Valid Author"}
|
|
849
|
-
},
|
|
850
|
-
"blocks": []
|
|
851
|
-
});
|
|
852
|
-
|
|
853
|
-
let metadata = extract_metadata_from_json(&json).unwrap();
|
|
854
|
-
|
|
855
|
-
assert!(metadata.contains_key("title"));
|
|
856
|
-
assert!(metadata.contains_key("authors"));
|
|
857
|
-
|
|
858
|
-
assert!(!metadata.contains_key("invalid_field"));
|
|
859
|
-
assert!(!metadata.contains_key("random_key"));
|
|
860
|
-
}
|
|
861
|
-
|
|
862
|
-
#[test]
|
|
863
|
-
fn test_extract_meta_value_meta_blocks() {
|
|
864
|
-
let node = json!({
|
|
865
|
-
"t": "MetaBlocks",
|
|
866
|
-
"c": [
|
|
867
|
-
{
|
|
868
|
-
"t": "Para",
|
|
869
|
-
"c": [
|
|
870
|
-
{"t": "Str", "c": "First"},
|
|
871
|
-
{"t": "Space"},
|
|
872
|
-
{"t": "Str", "c": "paragraph"}
|
|
873
|
-
]
|
|
874
|
-
},
|
|
875
|
-
{
|
|
876
|
-
"t": "Para",
|
|
877
|
-
"c": [
|
|
878
|
-
{"t": "Str", "c": "Second"},
|
|
879
|
-
{"t": "Space"},
|
|
880
|
-
{"t": "Str", "c": "paragraph"}
|
|
881
|
-
]
|
|
882
|
-
}
|
|
883
|
-
]
|
|
884
|
-
});
|
|
885
|
-
|
|
886
|
-
let result = extract_meta_value(&node).unwrap();
|
|
887
|
-
assert_eq!(result, Value::String("First paragraph Second paragraph".to_string()));
|
|
888
|
-
}
|
|
889
|
-
|
|
890
|
-
#[test]
|
|
891
|
-
fn test_extract_meta_value_meta_map() {
|
|
892
|
-
let node = json!({
|
|
893
|
-
"t": "MetaMap",
|
|
894
|
-
"c": {
|
|
895
|
-
"key1": {"t": "MetaString", "c": "value1"},
|
|
896
|
-
"key2": {"t": "MetaString", "c": "value2"}
|
|
897
|
-
}
|
|
898
|
-
});
|
|
899
|
-
|
|
900
|
-
let result = extract_meta_value(&node).unwrap();
|
|
901
|
-
let obj = result.as_object().unwrap();
|
|
902
|
-
assert_eq!(obj.get("key1").unwrap(), &Value::String("value1".to_string()));
|
|
903
|
-
assert_eq!(obj.get("key2").unwrap(), &Value::String("value2".to_string()));
|
|
904
|
-
}
|
|
905
|
-
|
|
906
|
-
#[test]
|
|
907
|
-
fn test_extract_inline_text_strong() {
|
|
908
|
-
let node = json!({
|
|
909
|
-
"t": "Strong",
|
|
910
|
-
"c": [
|
|
911
|
-
{"t": "Str", "c": "bold"}
|
|
912
|
-
]
|
|
913
|
-
});
|
|
914
|
-
let result = extract_inline_text(&node).unwrap();
|
|
915
|
-
assert_eq!(result, "bold");
|
|
916
|
-
}
|
|
917
|
-
|
|
918
|
-
#[test]
|
|
919
|
-
fn test_extract_inline_text_link() {
|
|
920
|
-
let node = json!({
|
|
921
|
-
"t": "Link",
|
|
922
|
-
"c": [
|
|
923
|
-
["", [], []],
|
|
924
|
-
[{"t": "Str", "c": "link text"}],
|
|
925
|
-
["https://example.com", ""]
|
|
926
|
-
]
|
|
927
|
-
});
|
|
928
|
-
let result = extract_inline_text(&node).unwrap();
|
|
929
|
-
assert_eq!(result, "link text");
|
|
930
|
-
}
|
|
931
|
-
|
|
932
|
-
#[test]
|
|
933
|
-
fn test_extract_inline_text_image() {
|
|
934
|
-
let node = json!({
|
|
935
|
-
"t": "Image",
|
|
936
|
-
"c": [
|
|
937
|
-
["", [], []],
|
|
938
|
-
[{"t": "Str", "c": "alt text"}],
|
|
939
|
-
["image.png", ""]
|
|
940
|
-
]
|
|
941
|
-
});
|
|
942
|
-
let result = extract_inline_text(&node).unwrap();
|
|
943
|
-
assert_eq!(result, "alt text");
|
|
944
|
-
}
|
|
945
|
-
|
|
946
|
-
#[test]
|
|
947
|
-
fn test_extract_inline_text_quoted() {
|
|
948
|
-
let node = json!({
|
|
949
|
-
"t": "Quoted",
|
|
950
|
-
"c": [
|
|
951
|
-
{"t": "DoubleQuote"},
|
|
952
|
-
[{"t": "Str", "c": "quoted text"}]
|
|
953
|
-
]
|
|
954
|
-
});
|
|
955
|
-
let result = extract_inline_text(&node).unwrap();
|
|
956
|
-
assert_eq!(result, "quoted text");
|
|
957
|
-
}
|
|
958
|
-
|
|
959
|
-
#[test]
|
|
960
|
-
fn test_extract_inline_text_cite() {
|
|
961
|
-
let node = json!({
|
|
962
|
-
"t": "Cite",
|
|
963
|
-
"c": [
|
|
964
|
-
[{"citationId": "cite1"}],
|
|
965
|
-
[{"t": "Str", "c": "citation text"}]
|
|
966
|
-
]
|
|
967
|
-
});
|
|
968
|
-
let result = extract_inline_text(&node).unwrap();
|
|
969
|
-
assert_eq!(result, "citation text");
|
|
970
|
-
}
|
|
971
|
-
|
|
972
|
-
#[test]
|
|
973
|
-
fn test_extract_inline_text_math() {
|
|
974
|
-
let node = json!({
|
|
975
|
-
"t": "Math",
|
|
976
|
-
"c": [
|
|
977
|
-
{"t": "InlineMath"},
|
|
978
|
-
"x^2 + y^2"
|
|
979
|
-
]
|
|
980
|
-
});
|
|
981
|
-
let result = extract_inline_text(&node).unwrap();
|
|
982
|
-
assert_eq!(result, "x^2 + y^2");
|
|
983
|
-
}
|
|
984
|
-
|
|
985
|
-
#[test]
|
|
986
|
-
fn test_extract_inline_text_linebreak() {
|
|
987
|
-
let node = json!({"t": "LineBreak"});
|
|
988
|
-
let result = extract_inline_text(&node).unwrap();
|
|
989
|
-
assert_eq!(result, "\n");
|
|
990
|
-
}
|
|
991
|
-
|
|
992
|
-
#[test]
|
|
993
|
-
fn test_extract_inline_text_softbreak() {
|
|
994
|
-
let node = json!({"t": "SoftBreak"});
|
|
995
|
-
let result = extract_inline_text(&node).unwrap();
|
|
996
|
-
assert_eq!(result, "\n");
|
|
997
|
-
}
|
|
998
|
-
|
|
999
|
-
#[test]
|
|
1000
|
-
fn test_extract_inline_text_strikeout() {
|
|
1001
|
-
let node = json!({
|
|
1002
|
-
"t": "Strikeout",
|
|
1003
|
-
"c": [{"t": "Str", "c": "deleted"}]
|
|
1004
|
-
});
|
|
1005
|
-
let result = extract_inline_text(&node).unwrap();
|
|
1006
|
-
assert_eq!(result, "deleted");
|
|
1007
|
-
}
|
|
1008
|
-
|
|
1009
|
-
#[test]
|
|
1010
|
-
fn test_extract_inline_text_superscript() {
|
|
1011
|
-
let node = json!({
|
|
1012
|
-
"t": "Superscript",
|
|
1013
|
-
"c": [{"t": "Str", "c": "2"}]
|
|
1014
|
-
});
|
|
1015
|
-
let result = extract_inline_text(&node).unwrap();
|
|
1016
|
-
assert_eq!(result, "2");
|
|
1017
|
-
}
|
|
1018
|
-
|
|
1019
|
-
#[test]
|
|
1020
|
-
fn test_extract_inline_text_subscript() {
|
|
1021
|
-
let node = json!({
|
|
1022
|
-
"t": "Subscript",
|
|
1023
|
-
"c": [{"t": "Str", "c": "i"}]
|
|
1024
|
-
});
|
|
1025
|
-
let result = extract_inline_text(&node).unwrap();
|
|
1026
|
-
assert_eq!(result, "i");
|
|
1027
|
-
}
|
|
1028
|
-
|
|
1029
|
-
#[test]
|
|
1030
|
-
fn test_extract_inline_text_smallcaps() {
|
|
1031
|
-
let node = json!({
|
|
1032
|
-
"t": "SmallCaps",
|
|
1033
|
-
"c": [{"t": "Str", "c": "small"}]
|
|
1034
|
-
});
|
|
1035
|
-
let result = extract_inline_text(&node).unwrap();
|
|
1036
|
-
assert_eq!(result, "small");
|
|
1037
|
-
}
|
|
1038
|
-
|
|
1039
|
-
#[test]
|
|
1040
|
-
fn test_extract_inline_text_unknown_type() {
|
|
1041
|
-
let node = json!({
|
|
1042
|
-
"t": "UnknownType",
|
|
1043
|
-
"c": "should be ignored"
|
|
1044
|
-
});
|
|
1045
|
-
let result = extract_inline_text(&node);
|
|
1046
|
-
assert!(result.is_none());
|
|
1047
|
-
}
|
|
1048
|
-
|
|
1049
|
-
#[test]
|
|
1050
|
-
fn test_extract_citations_from_nested_blocks() {
|
|
1051
|
-
let blocks = vec![json!({
|
|
1052
|
-
"t": "BulletList",
|
|
1053
|
-
"c": [
|
|
1054
|
-
[
|
|
1055
|
-
{
|
|
1056
|
-
"t": "Plain",
|
|
1057
|
-
"c": [
|
|
1058
|
-
{"t": "Str", "c": "text"}
|
|
1059
|
-
]
|
|
1060
|
-
}
|
|
1061
|
-
]
|
|
1062
|
-
]
|
|
1063
|
-
})];
|
|
1064
|
-
|
|
1065
|
-
let mut citations = Vec::new();
|
|
1066
|
-
extract_citations_from_blocks(&blocks, &mut citations);
|
|
1067
|
-
|
|
1068
|
-
assert!(citations.is_empty());
|
|
1069
|
-
}
|
|
1070
|
-
|
|
1071
|
-
#[test]
|
|
1072
|
-
fn test_extract_metadata_from_json_with_citations() {
|
|
1073
|
-
let json = json!({
|
|
1074
|
-
"meta": {
|
|
1075
|
-
"title": {"t": "MetaString", "c": "Paper"}
|
|
1076
|
-
},
|
|
1077
|
-
"citations": [
|
|
1078
|
-
{"citationId": "cite1"},
|
|
1079
|
-
{"citationId": "cite2"}
|
|
1080
|
-
],
|
|
1081
|
-
"blocks": []
|
|
1082
|
-
});
|
|
1083
|
-
|
|
1084
|
-
let metadata = extract_metadata_from_json(&json).unwrap();
|
|
1085
|
-
|
|
1086
|
-
assert!(metadata.contains_key("citations"));
|
|
1087
|
-
let citations = metadata.get("citations").unwrap().as_array().unwrap();
|
|
1088
|
-
assert_eq!(citations.len(), 2);
|
|
1089
|
-
assert_eq!(citations[0], Value::String("cite1".to_string()));
|
|
1090
|
-
assert_eq!(citations[1], Value::String("cite2".to_string()));
|
|
1091
|
-
}
|
|
1092
|
-
|
|
1093
|
-
#[test]
|
|
1094
|
-
fn test_extract_metadata_from_json_empty_meta() {
|
|
1095
|
-
let json = json!({
|
|
1096
|
-
"meta": {},
|
|
1097
|
-
"blocks": []
|
|
1098
|
-
});
|
|
1099
|
-
|
|
1100
|
-
let metadata = extract_metadata_from_json(&json).unwrap();
|
|
1101
|
-
assert!(metadata.is_empty());
|
|
1102
|
-
}
|
|
1103
|
-
|
|
1104
|
-
#[test]
|
|
1105
|
-
fn test_extract_meta_value_empty_list() {
|
|
1106
|
-
let node = json!({
|
|
1107
|
-
"t": "MetaList",
|
|
1108
|
-
"c": []
|
|
1109
|
-
});
|
|
1110
|
-
|
|
1111
|
-
let result = extract_meta_value(&node);
|
|
1112
|
-
assert!(result.is_none());
|
|
1113
|
-
}
|
|
1114
|
-
|
|
1115
|
-
#[test]
|
|
1116
|
-
fn test_extract_meta_value_empty_map() {
|
|
1117
|
-
let node = json!({
|
|
1118
|
-
"t": "MetaMap",
|
|
1119
|
-
"c": {}
|
|
1120
|
-
});
|
|
1121
|
-
|
|
1122
|
-
let result = extract_meta_value(&node);
|
|
1123
|
-
assert!(result.is_none());
|
|
1124
|
-
}
|
|
1125
|
-
|
|
1126
|
-
#[test]
|
|
1127
|
-
fn test_extract_inlines_empty() {
|
|
1128
|
-
let inlines = vec![];
|
|
1129
|
-
let result = extract_inlines(&inlines);
|
|
1130
|
-
assert!(result.is_none());
|
|
1131
|
-
}
|
|
1132
|
-
|
|
1133
|
-
#[test]
|
|
1134
|
-
fn test_valid_metadata_keys_contains_standard_fields() {
|
|
1135
|
-
assert!(VALID_METADATA_KEYS.contains(&"title"));
|
|
1136
|
-
assert!(VALID_METADATA_KEYS.contains(&"authors"));
|
|
1137
|
-
assert!(VALID_METADATA_KEYS.contains(&"date"));
|
|
1138
|
-
assert!(VALID_METADATA_KEYS.contains(&"keywords"));
|
|
1139
|
-
assert!(VALID_METADATA_KEYS.contains(&"abstract"));
|
|
1140
|
-
assert!(VALID_METADATA_KEYS.contains(&"citations"));
|
|
1141
|
-
}
|
|
1142
|
-
|
|
1143
|
-
#[test]
|
|
1144
|
-
fn test_get_pandoc_key_unmapped() {
|
|
1145
|
-
assert_eq!(get_pandoc_key("title"), "title");
|
|
1146
|
-
assert_eq!(get_pandoc_key("keywords"), "keywords");
|
|
1147
|
-
assert_eq!(get_pandoc_key("custom_field"), "custom_field");
|
|
1148
|
-
}
|
|
1149
|
-
|
|
1150
|
-
#[tokio::test]
|
|
1151
|
-
async fn test_tempfile_raii_cleanup() {
|
|
1152
|
-
use crate::extraction::pandoc::version::validate_pandoc_version;
|
|
1153
|
-
|
|
1154
|
-
if validate_pandoc_version().await.is_err() {
|
|
1155
|
-
return;
|
|
1156
|
-
}
|
|
1157
|
-
|
|
1158
|
-
let temp_path = std::env::temp_dir().join(format!("test_raii_{}.md", uuid::Uuid::new_v4()));
|
|
1159
|
-
|
|
1160
|
-
{
|
|
1161
|
-
let _guard = TempFile::new(temp_path.clone());
|
|
1162
|
-
fs::write(&temp_path, b"test content").await.unwrap();
|
|
1163
|
-
assert!(temp_path.exists());
|
|
1164
|
-
}
|
|
1165
|
-
|
|
1166
|
-
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
|
1167
|
-
|
|
1168
|
-
assert!(!temp_path.exists());
|
|
1169
|
-
}
|
|
1170
|
-
|
|
1171
|
-
#[tokio::test]
|
|
1172
|
-
async fn test_extract_content_timeout_kills_process() {
|
|
1173
|
-
use crate::extraction::pandoc::version::validate_pandoc_version;
|
|
1174
|
-
|
|
1175
|
-
if validate_pandoc_version().await.is_err() {
|
|
1176
|
-
return;
|
|
1177
|
-
}
|
|
1178
|
-
|
|
1179
|
-
let temp_dir = std::env::temp_dir();
|
|
1180
|
-
let test_file = temp_dir.join(format!("test_timeout_{}.md", uuid::Uuid::new_v4()));
|
|
1181
|
-
fs::write(&test_file, b"# Test\n\nContent").await.unwrap();
|
|
1182
|
-
|
|
1183
|
-
let result = extract_content(&test_file, "markdown").await;
|
|
1184
|
-
assert!(result.is_ok());
|
|
1185
|
-
|
|
1186
|
-
let _ = fs::remove_file(&test_file).await;
|
|
1187
|
-
}
|
|
1188
|
-
}
|