kreuzberg 4.0.0.pre.rc.13 → 4.0.0.pre.rc.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -14
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -1
- data/.rubocop.yml +538 -538
- data/Gemfile +8 -8
- data/Gemfile.lock +105 -2
- data/README.md +454 -454
- data/Rakefile +33 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -341
- data/ext/kreuzberg_rb/extconf.rb +45 -45
- data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
- data/ext/kreuzberg_rb/native/Cargo.lock +6940 -6941
- data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -15
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +214 -214
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +81 -81
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +724 -724
- data/lib/kreuzberg/error_context.rb +80 -80
- data/lib/kreuzberg/errors.rb +118 -118
- data/lib/kreuzberg/extraction_api.rb +340 -340
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +279 -279
- data/lib/kreuzberg/setup_lib_path.rb +80 -80
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +109 -109
- data/lib/{pdfium.dll → libpdfium.dylib} +0 -0
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +546 -546
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -85
- data/spec/binding/cli_spec.rb +55 -55
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -41
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/Cargo.toml +1 -1
- data/vendor/kreuzberg/Cargo.toml +5 -5
- data/vendor/kreuzberg/README.md +230 -230
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
- data/vendor/kreuzberg/build.rs +843 -843
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
- data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
- data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
- data/vendor/kreuzberg/src/core/config.rs +1080 -1080
- data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
- data/vendor/kreuzberg/src/core/io.rs +329 -329
- data/vendor/kreuzberg/src/core/mime.rs +605 -605
- data/vendor/kreuzberg/src/core/mod.rs +47 -47
- data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
- data/vendor/kreuzberg/src/embeddings.rs +500 -500
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +601 -601
- data/vendor/kreuzberg/src/extraction/image.rs +491 -491
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -574
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
- data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
- data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
- data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
- data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
- data/vendor/kreuzberg/src/extractors/email.rs +157 -157
- data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
- data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
- data/vendor/kreuzberg/src/extractors/html.rs +407 -407
- data/vendor/kreuzberg/src/extractors/image.rs +219 -219
- data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
- data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
- data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
- data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
- data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
- data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
- data/vendor/kreuzberg/src/extractors/pdf.rs +749 -749
- data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
- data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
- data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
- data/vendor/kreuzberg/src/extractors/security.rs +484 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
- data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
- data/vendor/kreuzberg/src/extractors/text.rs +265 -265
- data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
- data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
- data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
- data/vendor/kreuzberg/src/lib.rs +113 -113
- data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
- data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -154
- data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
- data/vendor/kreuzberg/src/pdf/bundled.rs +346 -346
- data/vendor/kreuzberg/src/pdf/error.rs +130 -130
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
- data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
- data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
- data/vendor/kreuzberg/src/pdf/table.rs +420 -420
- data/vendor/kreuzberg/src/pdf/text.rs +240 -240
- data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
- data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
- data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
- data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
- data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +25 -25
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +1055 -1055
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
- data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
- data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
- data/vendor/kreuzberg/tests/config_features.rs +612 -612
- data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
- data/vendor/kreuzberg/tests/core_integration.rs +510 -510
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
- data/vendor/kreuzberg/tests/email_integration.rs +327 -327
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
- data/vendor/kreuzberg/tests/error_handling.rs +402 -402
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
- data/vendor/kreuzberg/tests/format_integration.rs +164 -164
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
- data/vendor/kreuzberg/tests/image_integration.rs +255 -255
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
- data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
- data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
- data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
- data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
- data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
- data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
- data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
- data/vendor/kreuzberg/tests/security_validation.rs +416 -416
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/kreuzberg-ffi/Cargo.toml +1 -1
- data/vendor/kreuzberg-ffi/README.md +851 -851
- data/vendor/kreuzberg-ffi/build.rs +176 -176
- data/vendor/kreuzberg-ffi/cbindgen.toml +27 -27
- data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -12
- data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -1087
- data/vendor/kreuzberg-ffi/src/lib.rs +3616 -3616
- data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -247
- data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -48
- data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -299
- data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -346
- data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -232
- data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -470
- data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
- data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
- data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
- data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
- data/vendor/kreuzberg-tesseract/LICENSE +22 -22
- data/vendor/kreuzberg-tesseract/README.md +399 -399
- data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
- data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
- data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
- data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
- data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
- data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
- data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
- data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
- data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
- data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
- data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
- data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
- data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
- data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
- data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
- data/vendor/rb-sys/Cargo.lock +393 -393
- data/vendor/rb-sys/Cargo.toml +70 -70
- data/vendor/rb-sys/Cargo.toml.orig +57 -57
- data/vendor/rb-sys/LICENSE-APACHE +190 -190
- data/vendor/rb-sys/LICENSE-MIT +21 -21
- data/vendor/rb-sys/build/features.rs +111 -111
- data/vendor/rb-sys/build/main.rs +286 -286
- data/vendor/rb-sys/build/stable_api_config.rs +155 -155
- data/vendor/rb-sys/build/version.rs +50 -50
- data/vendor/rb-sys/readme.md +36 -36
- data/vendor/rb-sys/src/bindings.rs +21 -21
- data/vendor/rb-sys/src/hidden.rs +11 -11
- data/vendor/rb-sys/src/lib.rs +35 -35
- data/vendor/rb-sys/src/macros.rs +371 -371
- data/vendor/rb-sys/src/memory.rs +53 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
- data/vendor/rb-sys/src/special_consts.rs +31 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
- data/vendor/rb-sys/src/stable_api.rs +260 -260
- data/vendor/rb-sys/src/symbol.rs +31 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
- data/vendor/rb-sys/src/utils.rs +89 -89
- data/vendor/rb-sys/src/value_type.rs +7 -7
- metadata +73 -4
- data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +0 -12
|
@@ -1,708 +1,708 @@
|
|
|
1
|
-
//! Native EPUB extractor using permissive-licensed dependencies.
|
|
2
|
-
//!
|
|
3
|
-
//! This extractor provides native Rust-based EPUB extraction without GPL-licensed
|
|
4
|
-
//! dependencies, extracting:
|
|
5
|
-
//! - Metadata from OPF (Open Packaging Format) using Dublin Core standards
|
|
6
|
-
//! - Content from XHTML files in spine order
|
|
7
|
-
//! - Proper handling of EPUB2 and EPUB3 formats
|
|
8
|
-
//!
|
|
9
|
-
//! Uses only permissive-licensed crates:
|
|
10
|
-
//! - `zip` (MIT/Apache) - for reading EPUB container
|
|
11
|
-
//! - `roxmltree` (MIT) - for parsing XML
|
|
12
|
-
//! - `html-to-markdown-rs` (MIT) - for converting XHTML to plain text
|
|
13
|
-
|
|
14
|
-
use crate::Result;
|
|
15
|
-
use crate::core::config::ExtractionConfig;
|
|
16
|
-
use crate::plugins::{DocumentExtractor, Plugin};
|
|
17
|
-
use crate::types::{ExtractionResult, Metadata};
|
|
18
|
-
use async_trait::async_trait;
|
|
19
|
-
use roxmltree;
|
|
20
|
-
use std::collections::BTreeMap;
|
|
21
|
-
use std::io::Cursor;
|
|
22
|
-
use zip::ZipArchive;
|
|
23
|
-
|
|
24
|
-
/// EPUB format extractor using permissive-licensed dependencies.
|
|
25
|
-
///
|
|
26
|
-
/// Extracts content and metadata from EPUB files (both EPUB2 and EPUB3)
|
|
27
|
-
/// using native Rust parsing without GPL-licensed dependencies.
|
|
28
|
-
pub struct EpubExtractor;
|
|
29
|
-
|
|
30
|
-
impl EpubExtractor {
|
|
31
|
-
/// Create a new EPUB extractor.
|
|
32
|
-
pub fn new() -> Self {
|
|
33
|
-
Self
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
/// Extract text content from an EPUB document by reading in spine order
|
|
37
|
-
fn extract_content(
|
|
38
|
-
archive: &mut ZipArchive<Cursor<Vec<u8>>>,
|
|
39
|
-
opf_path: &str,
|
|
40
|
-
manifest_dir: &str,
|
|
41
|
-
) -> Result<String> {
|
|
42
|
-
let opf_xml = Self::read_file_from_zip(archive, opf_path)?;
|
|
43
|
-
let (_, spine_hrefs) = Self::parse_opf(&opf_xml)?;
|
|
44
|
-
|
|
45
|
-
let mut content = String::new();
|
|
46
|
-
|
|
47
|
-
for (index, href) in spine_hrefs.iter().enumerate() {
|
|
48
|
-
let file_path = Self::resolve_path(manifest_dir, href);
|
|
49
|
-
|
|
50
|
-
match Self::read_file_from_zip(archive, &file_path) {
|
|
51
|
-
Ok(xhtml_content) => {
|
|
52
|
-
let text = Self::extract_text_from_xhtml(&xhtml_content);
|
|
53
|
-
if !text.is_empty() {
|
|
54
|
-
if index > 0 && !content.ends_with('\n') {
|
|
55
|
-
content.push('\n');
|
|
56
|
-
}
|
|
57
|
-
content.push_str(&text);
|
|
58
|
-
content.push('\n');
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
Err(_) => {
|
|
62
|
-
continue;
|
|
63
|
-
}
|
|
64
|
-
}
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
Ok(content.trim().to_string())
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
/// Extract text from XHTML content using html-to-markdown-rs
|
|
71
|
-
fn extract_text_from_xhtml(xhtml: &str) -> String {
|
|
72
|
-
match crate::extraction::html::convert_html_to_markdown(xhtml, None) {
|
|
73
|
-
Ok(markdown) => {
|
|
74
|
-
let text = Self::markdown_to_plain_text(&markdown);
|
|
75
|
-
Self::remove_html_comments(&text)
|
|
76
|
-
}
|
|
77
|
-
Err(_) => Self::strip_html_tags(xhtml),
|
|
78
|
-
}
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
/// Remove HTML comments from text
|
|
82
|
-
fn remove_html_comments(text: &str) -> String {
|
|
83
|
-
let mut result = String::new();
|
|
84
|
-
let mut in_comment = false;
|
|
85
|
-
let mut chars = text.chars().peekable();
|
|
86
|
-
|
|
87
|
-
while let Some(ch) = chars.next() {
|
|
88
|
-
if !in_comment && ch == '<' {
|
|
89
|
-
if chars.peek() == Some(&'!') {
|
|
90
|
-
chars.next();
|
|
91
|
-
if chars.peek() == Some(&'-') {
|
|
92
|
-
chars.next();
|
|
93
|
-
if chars.peek() == Some(&'-') {
|
|
94
|
-
chars.next();
|
|
95
|
-
in_comment = true;
|
|
96
|
-
continue;
|
|
97
|
-
} else {
|
|
98
|
-
result.push('<');
|
|
99
|
-
result.push('!');
|
|
100
|
-
result.push('-');
|
|
101
|
-
continue;
|
|
102
|
-
}
|
|
103
|
-
} else {
|
|
104
|
-
result.push('<');
|
|
105
|
-
result.push('!');
|
|
106
|
-
continue;
|
|
107
|
-
}
|
|
108
|
-
} else {
|
|
109
|
-
result.push(ch);
|
|
110
|
-
}
|
|
111
|
-
} else if in_comment {
|
|
112
|
-
if ch == '-' && chars.peek() == Some(&'-') {
|
|
113
|
-
chars.next();
|
|
114
|
-
if chars.peek() == Some(&'>') {
|
|
115
|
-
chars.next();
|
|
116
|
-
in_comment = false;
|
|
117
|
-
result.push('\n');
|
|
118
|
-
}
|
|
119
|
-
}
|
|
120
|
-
} else {
|
|
121
|
-
result.push(ch);
|
|
122
|
-
}
|
|
123
|
-
}
|
|
124
|
-
|
|
125
|
-
result
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
/// Convert markdown output to plain text by removing markdown syntax
|
|
129
|
-
fn markdown_to_plain_text(markdown: &str) -> String {
|
|
130
|
-
let mut text = String::new();
|
|
131
|
-
let mut in_code_block = false;
|
|
132
|
-
|
|
133
|
-
for line in markdown.lines() {
|
|
134
|
-
let trimmed = line.trim();
|
|
135
|
-
|
|
136
|
-
if trimmed.is_empty() {
|
|
137
|
-
if !text.is_empty() && !text.ends_with('\n') {
|
|
138
|
-
text.push('\n');
|
|
139
|
-
}
|
|
140
|
-
continue;
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
if trimmed.starts_with("```") {
|
|
144
|
-
in_code_block = !in_code_block;
|
|
145
|
-
continue;
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
if in_code_block {
|
|
149
|
-
text.push_str(trimmed);
|
|
150
|
-
text.push('\n');
|
|
151
|
-
continue;
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
let cleaned = if let Some(stripped) = trimmed.strip_prefix("- ").or_else(|| trimmed.strip_prefix("* ")) {
|
|
155
|
-
stripped
|
|
156
|
-
} else if let Some(stripped) = trimmed.strip_prefix(|c: char| c.is_ascii_digit()) {
|
|
157
|
-
if let Some(rest) = stripped.strip_prefix(". ") {
|
|
158
|
-
rest
|
|
159
|
-
} else {
|
|
160
|
-
trimmed
|
|
161
|
-
}
|
|
162
|
-
} else {
|
|
163
|
-
trimmed
|
|
164
|
-
};
|
|
165
|
-
|
|
166
|
-
let cleaned = cleaned.trim_start_matches('#').trim();
|
|
167
|
-
|
|
168
|
-
let cleaned = cleaned
|
|
169
|
-
.replace("**", "")
|
|
170
|
-
.replace("__", "")
|
|
171
|
-
.replace("*", "")
|
|
172
|
-
.replace("_", "");
|
|
173
|
-
|
|
174
|
-
let cleaned = Self::remove_markdown_links(&cleaned);
|
|
175
|
-
|
|
176
|
-
if !cleaned.is_empty() {
|
|
177
|
-
text.push_str(&cleaned);
|
|
178
|
-
text.push('\n');
|
|
179
|
-
}
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
text.trim().to_string()
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
/// Remove markdown links [text](url) -> text
|
|
186
|
-
fn remove_markdown_links(text: &str) -> String {
|
|
187
|
-
let mut result = String::new();
|
|
188
|
-
let mut chars = text.chars().peekable();
|
|
189
|
-
|
|
190
|
-
while let Some(ch) = chars.next() {
|
|
191
|
-
if ch == '[' {
|
|
192
|
-
let mut link_text = String::new();
|
|
193
|
-
let mut depth = 1;
|
|
194
|
-
|
|
195
|
-
while let Some(&next_ch) = chars.peek() {
|
|
196
|
-
chars.next();
|
|
197
|
-
if next_ch == '[' {
|
|
198
|
-
depth += 1;
|
|
199
|
-
link_text.push(next_ch);
|
|
200
|
-
} else if next_ch == ']' {
|
|
201
|
-
depth -= 1;
|
|
202
|
-
if depth == 0 {
|
|
203
|
-
break;
|
|
204
|
-
}
|
|
205
|
-
link_text.push(next_ch);
|
|
206
|
-
} else {
|
|
207
|
-
link_text.push(next_ch);
|
|
208
|
-
}
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
if let Some(&'(') = chars.peek() {
|
|
212
|
-
chars.next();
|
|
213
|
-
let mut paren_depth = 1;
|
|
214
|
-
while let Some(&next_ch) = chars.peek() {
|
|
215
|
-
chars.next();
|
|
216
|
-
if next_ch == '(' {
|
|
217
|
-
paren_depth += 1;
|
|
218
|
-
} else if next_ch == ')' {
|
|
219
|
-
paren_depth -= 1;
|
|
220
|
-
if paren_depth == 0 {
|
|
221
|
-
break;
|
|
222
|
-
}
|
|
223
|
-
}
|
|
224
|
-
}
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
result.push_str(&link_text);
|
|
228
|
-
} else {
|
|
229
|
-
result.push(ch);
|
|
230
|
-
}
|
|
231
|
-
}
|
|
232
|
-
|
|
233
|
-
result
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
/// Fallback: strip HTML tags without using specialized libraries
|
|
237
|
-
fn strip_html_tags(html: &str) -> String {
|
|
238
|
-
let mut text = String::new();
|
|
239
|
-
let mut in_tag = false;
|
|
240
|
-
let mut in_script_style = false;
|
|
241
|
-
let mut tag_name = String::new();
|
|
242
|
-
|
|
243
|
-
for ch in html.chars() {
|
|
244
|
-
if ch == '<' {
|
|
245
|
-
in_tag = true;
|
|
246
|
-
tag_name.clear();
|
|
247
|
-
continue;
|
|
248
|
-
}
|
|
249
|
-
|
|
250
|
-
if ch == '>' {
|
|
251
|
-
in_tag = false;
|
|
252
|
-
|
|
253
|
-
let tag_lower = tag_name.to_lowercase();
|
|
254
|
-
if tag_lower.contains("script") || tag_lower.contains("style") {
|
|
255
|
-
in_script_style = !tag_name.starts_with('/');
|
|
256
|
-
}
|
|
257
|
-
continue;
|
|
258
|
-
}
|
|
259
|
-
|
|
260
|
-
if in_tag {
|
|
261
|
-
tag_name.push(ch);
|
|
262
|
-
continue;
|
|
263
|
-
}
|
|
264
|
-
|
|
265
|
-
if in_script_style {
|
|
266
|
-
continue;
|
|
267
|
-
}
|
|
268
|
-
|
|
269
|
-
if ch == '\n' || ch == '\r' || ch == '\t' || ch == ' ' {
|
|
270
|
-
if !text.is_empty() && !text.ends_with(' ') {
|
|
271
|
-
text.push(' ');
|
|
272
|
-
}
|
|
273
|
-
} else {
|
|
274
|
-
text.push(ch);
|
|
275
|
-
}
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
let mut result = String::new();
|
|
279
|
-
let mut prev_space = false;
|
|
280
|
-
for ch in text.chars() {
|
|
281
|
-
if ch == ' ' {
|
|
282
|
-
if !prev_space {
|
|
283
|
-
result.push(ch);
|
|
284
|
-
}
|
|
285
|
-
prev_space = true;
|
|
286
|
-
} else {
|
|
287
|
-
result.push(ch);
|
|
288
|
-
prev_space = false;
|
|
289
|
-
}
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
result.trim().to_string()
|
|
293
|
-
}
|
|
294
|
-
|
|
295
|
-
/// Extract metadata from EPUB OPF file
|
|
296
|
-
fn extract_metadata(opf_xml: &str) -> Result<BTreeMap<String, serde_json::Value>> {
|
|
297
|
-
let mut metadata = BTreeMap::new();
|
|
298
|
-
|
|
299
|
-
let (epub_metadata, _) = Self::parse_opf(opf_xml)?;
|
|
300
|
-
|
|
301
|
-
if let Some(title) = epub_metadata.title {
|
|
302
|
-
metadata.insert("title".to_string(), serde_json::json!(title));
|
|
303
|
-
}
|
|
304
|
-
|
|
305
|
-
if let Some(creator) = epub_metadata.creator {
|
|
306
|
-
metadata.insert("creator".to_string(), serde_json::json!(creator.clone()));
|
|
307
|
-
metadata.insert("authors".to_string(), serde_json::json!(vec![creator]));
|
|
308
|
-
}
|
|
309
|
-
|
|
310
|
-
if let Some(date) = epub_metadata.date {
|
|
311
|
-
metadata.insert("date".to_string(), serde_json::json!(date));
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
if let Some(language) = epub_metadata.language {
|
|
315
|
-
metadata.insert("language".to_string(), serde_json::json!(language));
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
if let Some(identifier) = epub_metadata.identifier {
|
|
319
|
-
metadata.insert("identifier".to_string(), serde_json::json!(identifier));
|
|
320
|
-
}
|
|
321
|
-
|
|
322
|
-
if let Some(publisher) = epub_metadata.publisher {
|
|
323
|
-
metadata.insert("publisher".to_string(), serde_json::json!(publisher));
|
|
324
|
-
}
|
|
325
|
-
|
|
326
|
-
if let Some(subject) = epub_metadata.subject {
|
|
327
|
-
metadata.insert("subject".to_string(), serde_json::json!(subject));
|
|
328
|
-
}
|
|
329
|
-
|
|
330
|
-
if let Some(description) = epub_metadata.description {
|
|
331
|
-
metadata.insert("description".to_string(), serde_json::json!(description));
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
if let Some(rights) = epub_metadata.rights {
|
|
335
|
-
metadata.insert("rights".to_string(), serde_json::json!(rights));
|
|
336
|
-
}
|
|
337
|
-
|
|
338
|
-
Ok(metadata)
|
|
339
|
-
}
|
|
340
|
-
|
|
341
|
-
/// Parse container.xml to find the OPF file path
|
|
342
|
-
fn parse_container_xml(xml: &str) -> Result<String> {
|
|
343
|
-
match roxmltree::Document::parse(xml) {
|
|
344
|
-
Ok(doc) => {
|
|
345
|
-
for node in doc.descendants() {
|
|
346
|
-
if node.tag_name().name() == "rootfile"
|
|
347
|
-
&& let Some(full_path) = node.attribute("full-path")
|
|
348
|
-
{
|
|
349
|
-
return Ok(full_path.to_string());
|
|
350
|
-
}
|
|
351
|
-
}
|
|
352
|
-
Err(crate::KreuzbergError::Parsing {
|
|
353
|
-
message: "No rootfile found in container.xml".to_string(),
|
|
354
|
-
source: None,
|
|
355
|
-
})
|
|
356
|
-
}
|
|
357
|
-
Err(e) => Err(crate::KreuzbergError::Parsing {
|
|
358
|
-
message: format!("Failed to parse container.xml: {}", e),
|
|
359
|
-
source: None,
|
|
360
|
-
}),
|
|
361
|
-
}
|
|
362
|
-
}
|
|
363
|
-
|
|
364
|
-
/// Parse OPF file and extract metadata and spine order
|
|
365
|
-
fn parse_opf(xml: &str) -> Result<(OepbMetadata, Vec<String>)> {
|
|
366
|
-
match roxmltree::Document::parse(xml) {
|
|
367
|
-
Ok(doc) => {
|
|
368
|
-
let root = doc.root();
|
|
369
|
-
|
|
370
|
-
let mut metadata = OepbMetadata::default();
|
|
371
|
-
let mut manifest: BTreeMap<String, String> = BTreeMap::new();
|
|
372
|
-
let mut spine_order: Vec<String> = Vec::new();
|
|
373
|
-
|
|
374
|
-
for node in root.descendants() {
|
|
375
|
-
match node.tag_name().name() {
|
|
376
|
-
"title" => {
|
|
377
|
-
if let Some(text) = node.text() {
|
|
378
|
-
metadata.title = Some(text.trim().to_string());
|
|
379
|
-
}
|
|
380
|
-
}
|
|
381
|
-
"creator" => {
|
|
382
|
-
if let Some(text) = node.text() {
|
|
383
|
-
metadata.creator = Some(text.trim().to_string());
|
|
384
|
-
}
|
|
385
|
-
}
|
|
386
|
-
"date" => {
|
|
387
|
-
if let Some(text) = node.text() {
|
|
388
|
-
metadata.date = Some(text.trim().to_string());
|
|
389
|
-
}
|
|
390
|
-
}
|
|
391
|
-
"language" => {
|
|
392
|
-
if let Some(text) = node.text() {
|
|
393
|
-
metadata.language = Some(text.trim().to_string());
|
|
394
|
-
}
|
|
395
|
-
}
|
|
396
|
-
"identifier" => {
|
|
397
|
-
if let Some(text) = node.text() {
|
|
398
|
-
metadata.identifier = Some(text.trim().to_string());
|
|
399
|
-
}
|
|
400
|
-
}
|
|
401
|
-
"publisher" => {
|
|
402
|
-
if let Some(text) = node.text() {
|
|
403
|
-
metadata.publisher = Some(text.trim().to_string());
|
|
404
|
-
}
|
|
405
|
-
}
|
|
406
|
-
"subject" => {
|
|
407
|
-
if let Some(text) = node.text() {
|
|
408
|
-
metadata.subject = Some(text.trim().to_string());
|
|
409
|
-
}
|
|
410
|
-
}
|
|
411
|
-
"description" => {
|
|
412
|
-
if let Some(text) = node.text() {
|
|
413
|
-
metadata.description = Some(text.trim().to_string());
|
|
414
|
-
}
|
|
415
|
-
}
|
|
416
|
-
"rights" => {
|
|
417
|
-
if let Some(text) = node.text() {
|
|
418
|
-
metadata.rights = Some(text.trim().to_string());
|
|
419
|
-
}
|
|
420
|
-
}
|
|
421
|
-
"item" => {
|
|
422
|
-
if let Some(id) = node.attribute("id")
|
|
423
|
-
&& let Some(href) = node.attribute("href")
|
|
424
|
-
{
|
|
425
|
-
manifest.insert(id.to_string(), href.to_string());
|
|
426
|
-
}
|
|
427
|
-
}
|
|
428
|
-
_ => {}
|
|
429
|
-
}
|
|
430
|
-
}
|
|
431
|
-
|
|
432
|
-
for node in root.descendants() {
|
|
433
|
-
if node.tag_name().name() == "itemref"
|
|
434
|
-
&& let Some(idref) = node.attribute("idref")
|
|
435
|
-
&& let Some(href) = manifest.get(idref)
|
|
436
|
-
{
|
|
437
|
-
spine_order.push(href.clone());
|
|
438
|
-
}
|
|
439
|
-
}
|
|
440
|
-
|
|
441
|
-
Ok((metadata, spine_order))
|
|
442
|
-
}
|
|
443
|
-
Err(e) => Err(crate::KreuzbergError::Parsing {
|
|
444
|
-
message: format!("Failed to parse OPF file: {}", e),
|
|
445
|
-
source: None,
|
|
446
|
-
}),
|
|
447
|
-
}
|
|
448
|
-
}
|
|
449
|
-
|
|
450
|
-
/// Read a file from the ZIP archive
|
|
451
|
-
fn read_file_from_zip(archive: &mut ZipArchive<Cursor<Vec<u8>>>, path: &str) -> Result<String> {
|
|
452
|
-
match archive.by_name(path) {
|
|
453
|
-
Ok(mut file) => {
|
|
454
|
-
let mut content = String::new();
|
|
455
|
-
match std::io::Read::read_to_string(&mut file, &mut content) {
|
|
456
|
-
Ok(_) => Ok(content),
|
|
457
|
-
Err(e) => Err(crate::KreuzbergError::Parsing {
|
|
458
|
-
message: format!("Failed to read file from EPUB: {}", e),
|
|
459
|
-
source: None,
|
|
460
|
-
}),
|
|
461
|
-
}
|
|
462
|
-
}
|
|
463
|
-
Err(e) => Err(crate::KreuzbergError::Parsing {
|
|
464
|
-
message: format!("File not found in EPUB: {} ({})", path, e),
|
|
465
|
-
source: None,
|
|
466
|
-
}),
|
|
467
|
-
}
|
|
468
|
-
}
|
|
469
|
-
|
|
470
|
-
/// Resolve a relative path within the manifest directory
|
|
471
|
-
fn resolve_path(base_dir: &str, relative_path: &str) -> String {
|
|
472
|
-
if relative_path.starts_with('/') {
|
|
473
|
-
relative_path.trim_start_matches('/').to_string()
|
|
474
|
-
} else if base_dir.is_empty() || base_dir == "." {
|
|
475
|
-
relative_path.to_string()
|
|
476
|
-
} else {
|
|
477
|
-
format!("{}/{}", base_dir.trim_end_matches('/'), relative_path)
|
|
478
|
-
}
|
|
479
|
-
}
|
|
480
|
-
}
|
|
481
|
-
|
|
482
|
-
/// Metadata extracted from OPF (Open Packaging Format) file
|
|
483
|
-
#[derive(Debug, Default, Clone)]
|
|
484
|
-
struct OepbMetadata {
|
|
485
|
-
title: Option<String>,
|
|
486
|
-
creator: Option<String>,
|
|
487
|
-
date: Option<String>,
|
|
488
|
-
language: Option<String>,
|
|
489
|
-
identifier: Option<String>,
|
|
490
|
-
publisher: Option<String>,
|
|
491
|
-
subject: Option<String>,
|
|
492
|
-
description: Option<String>,
|
|
493
|
-
rights: Option<String>,
|
|
494
|
-
}
|
|
495
|
-
|
|
496
|
-
impl Default for EpubExtractor {
|
|
497
|
-
fn default() -> Self {
|
|
498
|
-
Self::new()
|
|
499
|
-
}
|
|
500
|
-
}
|
|
501
|
-
|
|
502
|
-
impl Plugin for EpubExtractor {
|
|
503
|
-
fn name(&self) -> &str {
|
|
504
|
-
"epub-extractor"
|
|
505
|
-
}
|
|
506
|
-
|
|
507
|
-
fn version(&self) -> String {
|
|
508
|
-
env!("CARGO_PKG_VERSION").to_string()
|
|
509
|
-
}
|
|
510
|
-
|
|
511
|
-
fn initialize(&self) -> Result<()> {
|
|
512
|
-
Ok(())
|
|
513
|
-
}
|
|
514
|
-
|
|
515
|
-
fn shutdown(&self) -> Result<()> {
|
|
516
|
-
Ok(())
|
|
517
|
-
}
|
|
518
|
-
|
|
519
|
-
fn description(&self) -> &str {
|
|
520
|
-
"Extracts content and metadata from EPUB documents (native Rust implementation with permissive licenses)"
|
|
521
|
-
}
|
|
522
|
-
|
|
523
|
-
fn author(&self) -> &str {
|
|
524
|
-
"Kreuzberg Team"
|
|
525
|
-
}
|
|
526
|
-
}
|
|
527
|
-
|
|
528
|
-
#[cfg(feature = "office")]
|
|
529
|
-
#[async_trait]
|
|
530
|
-
impl DocumentExtractor for EpubExtractor {
|
|
531
|
-
#[cfg_attr(
|
|
532
|
-
feature = "otel",
|
|
533
|
-
tracing::instrument(
|
|
534
|
-
skip(self, content, _config),
|
|
535
|
-
fields(
|
|
536
|
-
extractor.name = self.name(),
|
|
537
|
-
content.size_bytes = content.len(),
|
|
538
|
-
)
|
|
539
|
-
)
|
|
540
|
-
)]
|
|
541
|
-
async fn extract_bytes(
|
|
542
|
-
&self,
|
|
543
|
-
content: &[u8],
|
|
544
|
-
mime_type: &str,
|
|
545
|
-
_config: &ExtractionConfig,
|
|
546
|
-
) -> Result<ExtractionResult> {
|
|
547
|
-
let cursor = Cursor::new(content.to_vec());
|
|
548
|
-
|
|
549
|
-
let mut archive = ZipArchive::new(cursor).map_err(|e| crate::KreuzbergError::Parsing {
|
|
550
|
-
message: format!("Failed to open EPUB as ZIP: {}", e),
|
|
551
|
-
source: None,
|
|
552
|
-
})?;
|
|
553
|
-
|
|
554
|
-
let container_xml = Self::read_file_from_zip(&mut archive, "META-INF/container.xml")?;
|
|
555
|
-
let opf_path = Self::parse_container_xml(&container_xml)?;
|
|
556
|
-
|
|
557
|
-
let manifest_dir = if let Some(last_slash) = opf_path.rfind('/') {
|
|
558
|
-
opf_path[..last_slash].to_string()
|
|
559
|
-
} else {
|
|
560
|
-
String::new()
|
|
561
|
-
};
|
|
562
|
-
|
|
563
|
-
let opf_xml = Self::read_file_from_zip(&mut archive, &opf_path)?;
|
|
564
|
-
|
|
565
|
-
let extracted_content = Self::extract_content(&mut archive, &opf_path, &manifest_dir)?;
|
|
566
|
-
|
|
567
|
-
let metadata_btree = Self::extract_metadata(&opf_xml)?;
|
|
568
|
-
let metadata_map: std::collections::HashMap<String, serde_json::Value> = metadata_btree.into_iter().collect();
|
|
569
|
-
|
|
570
|
-
Ok(ExtractionResult {
|
|
571
|
-
content: extracted_content,
|
|
572
|
-
mime_type: mime_type.to_string(),
|
|
573
|
-
metadata: Metadata {
|
|
574
|
-
additional: metadata_map,
|
|
575
|
-
..Default::default()
|
|
576
|
-
},
|
|
577
|
-
pages: None,
|
|
578
|
-
tables: vec![],
|
|
579
|
-
detected_languages: None,
|
|
580
|
-
chunks: None,
|
|
581
|
-
images: None,
|
|
582
|
-
})
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
fn supported_mime_types(&self) -> &[&str] {
|
|
586
|
-
&[
|
|
587
|
-
"application/epub+zip",
|
|
588
|
-
"application/x-epub+zip",
|
|
589
|
-
"application/vnd.epub+zip",
|
|
590
|
-
]
|
|
591
|
-
}
|
|
592
|
-
|
|
593
|
-
fn priority(&self) -> i32 {
|
|
594
|
-
60
|
|
595
|
-
}
|
|
596
|
-
}
|
|
597
|
-
|
|
598
|
-
#[cfg(all(test, feature = "office"))]
|
|
599
|
-
mod tests {
|
|
600
|
-
use super::*;
|
|
601
|
-
|
|
602
|
-
#[test]
|
|
603
|
-
fn test_epub_extractor_plugin_interface() {
|
|
604
|
-
let extractor = EpubExtractor::new();
|
|
605
|
-
assert_eq!(extractor.name(), "epub-extractor");
|
|
606
|
-
assert_eq!(extractor.version(), env!("CARGO_PKG_VERSION"));
|
|
607
|
-
assert_eq!(extractor.priority(), 60);
|
|
608
|
-
assert!(!extractor.supported_mime_types().is_empty());
|
|
609
|
-
}
|
|
610
|
-
|
|
611
|
-
#[test]
|
|
612
|
-
fn test_epub_extractor_default() {
|
|
613
|
-
let extractor = EpubExtractor;
|
|
614
|
-
assert_eq!(extractor.name(), "epub-extractor");
|
|
615
|
-
}
|
|
616
|
-
|
|
617
|
-
#[tokio::test]
|
|
618
|
-
async fn test_epub_extractor_initialize_shutdown() {
|
|
619
|
-
let extractor = EpubExtractor::new();
|
|
620
|
-
assert!(extractor.initialize().is_ok());
|
|
621
|
-
assert!(extractor.shutdown().is_ok());
|
|
622
|
-
}
|
|
623
|
-
|
|
624
|
-
#[test]
|
|
625
|
-
fn test_strip_html_tags_simple() {
|
|
626
|
-
let html = "<html><body><p>Hello World</p></body></html>";
|
|
627
|
-
let text = EpubExtractor::strip_html_tags(html);
|
|
628
|
-
assert!(text.contains("Hello World"));
|
|
629
|
-
}
|
|
630
|
-
|
|
631
|
-
#[test]
|
|
632
|
-
fn test_strip_html_tags_with_scripts() {
|
|
633
|
-
let html = "<body><p>Text</p><script>alert('bad');</script><p>More</p></body>";
|
|
634
|
-
let text = EpubExtractor::strip_html_tags(html);
|
|
635
|
-
assert!(!text.contains("bad"));
|
|
636
|
-
assert!(text.contains("Text"));
|
|
637
|
-
assert!(text.contains("More"));
|
|
638
|
-
}
|
|
639
|
-
|
|
640
|
-
#[test]
|
|
641
|
-
fn test_strip_html_tags_with_styles() {
|
|
642
|
-
let html = "<body><p>Text</p><style>.class { color: red; }</style><p>More</p></body>";
|
|
643
|
-
let text = EpubExtractor::strip_html_tags(html);
|
|
644
|
-
assert!(!text.to_lowercase().contains("color"));
|
|
645
|
-
assert!(text.contains("Text"));
|
|
646
|
-
assert!(text.contains("More"));
|
|
647
|
-
}
|
|
648
|
-
|
|
649
|
-
#[test]
|
|
650
|
-
fn test_strip_html_tags_normalizes_whitespace() {
|
|
651
|
-
let html = "<p>Hello \n\t World</p>";
|
|
652
|
-
let text = EpubExtractor::strip_html_tags(html);
|
|
653
|
-
assert!(text.contains("Hello") && text.contains("World"));
|
|
654
|
-
}
|
|
655
|
-
|
|
656
|
-
#[test]
|
|
657
|
-
fn test_remove_markdown_links() {
|
|
658
|
-
let text = "This is a [link](http://example.com) in text";
|
|
659
|
-
let result = EpubExtractor::remove_markdown_links(text);
|
|
660
|
-
assert!(result.contains("link"));
|
|
661
|
-
assert!(!result.contains("http://"));
|
|
662
|
-
}
|
|
663
|
-
|
|
664
|
-
#[test]
|
|
665
|
-
fn test_resolve_path_with_base_dir() {
|
|
666
|
-
let result = EpubExtractor::resolve_path("OEBPS", "chapter.xhtml");
|
|
667
|
-
assert_eq!(result, "OEBPS/chapter.xhtml");
|
|
668
|
-
}
|
|
669
|
-
|
|
670
|
-
#[test]
|
|
671
|
-
fn test_resolve_path_absolute() {
|
|
672
|
-
let result = EpubExtractor::resolve_path("OEBPS", "/chapter.xhtml");
|
|
673
|
-
assert_eq!(result, "chapter.xhtml");
|
|
674
|
-
}
|
|
675
|
-
|
|
676
|
-
#[test]
|
|
677
|
-
fn test_resolve_path_empty_base() {
|
|
678
|
-
let result = EpubExtractor::resolve_path("", "chapter.xhtml");
|
|
679
|
-
assert_eq!(result, "chapter.xhtml");
|
|
680
|
-
}
|
|
681
|
-
|
|
682
|
-
#[test]
|
|
683
|
-
fn test_epub_extractor_supported_mime_types() {
|
|
684
|
-
let extractor = EpubExtractor::new();
|
|
685
|
-
let supported = extractor.supported_mime_types();
|
|
686
|
-
assert!(supported.contains(&"application/epub+zip"));
|
|
687
|
-
assert!(supported.contains(&"application/x-epub+zip"));
|
|
688
|
-
assert!(supported.contains(&"application/vnd.epub+zip"));
|
|
689
|
-
}
|
|
690
|
-
|
|
691
|
-
#[test]
|
|
692
|
-
fn test_markdown_to_plain_text_removes_formatting() {
|
|
693
|
-
let markdown = "# Heading\n\nThis is **bold** text with _italic_ emphasis.";
|
|
694
|
-
let result = EpubExtractor::markdown_to_plain_text(markdown);
|
|
695
|
-
assert!(result.contains("Heading"));
|
|
696
|
-
assert!(result.contains("bold"));
|
|
697
|
-
assert!(!result.contains("**"));
|
|
698
|
-
}
|
|
699
|
-
|
|
700
|
-
#[test]
|
|
701
|
-
fn test_markdown_to_plain_text_removes_list_markers() {
|
|
702
|
-
let markdown = "- Item 1\n- Item 2\n* Item 3";
|
|
703
|
-
let result = EpubExtractor::markdown_to_plain_text(markdown);
|
|
704
|
-
assert!(result.contains("Item 1"));
|
|
705
|
-
assert!(result.contains("Item 2"));
|
|
706
|
-
assert!(result.contains("Item 3"));
|
|
707
|
-
}
|
|
708
|
-
}
|
|
1
|
+
//! Native EPUB extractor using permissive-licensed dependencies.
|
|
2
|
+
//!
|
|
3
|
+
//! This extractor provides native Rust-based EPUB extraction without GPL-licensed
|
|
4
|
+
//! dependencies, extracting:
|
|
5
|
+
//! - Metadata from OPF (Open Packaging Format) using Dublin Core standards
|
|
6
|
+
//! - Content from XHTML files in spine order
|
|
7
|
+
//! - Proper handling of EPUB2 and EPUB3 formats
|
|
8
|
+
//!
|
|
9
|
+
//! Uses only permissive-licensed crates:
|
|
10
|
+
//! - `zip` (MIT/Apache) - for reading EPUB container
|
|
11
|
+
//! - `roxmltree` (MIT) - for parsing XML
|
|
12
|
+
//! - `html-to-markdown-rs` (MIT) - for converting XHTML to plain text
|
|
13
|
+
|
|
14
|
+
use crate::Result;
|
|
15
|
+
use crate::core::config::ExtractionConfig;
|
|
16
|
+
use crate::plugins::{DocumentExtractor, Plugin};
|
|
17
|
+
use crate::types::{ExtractionResult, Metadata};
|
|
18
|
+
use async_trait::async_trait;
|
|
19
|
+
use roxmltree;
|
|
20
|
+
use std::collections::BTreeMap;
|
|
21
|
+
use std::io::Cursor;
|
|
22
|
+
use zip::ZipArchive;
|
|
23
|
+
|
|
24
|
+
/// EPUB format extractor using permissive-licensed dependencies.
|
|
25
|
+
///
|
|
26
|
+
/// Extracts content and metadata from EPUB files (both EPUB2 and EPUB3)
|
|
27
|
+
/// using native Rust parsing without GPL-licensed dependencies.
|
|
28
|
+
pub struct EpubExtractor;
|
|
29
|
+
|
|
30
|
+
impl EpubExtractor {
|
|
31
|
+
/// Create a new EPUB extractor.
|
|
32
|
+
pub fn new() -> Self {
|
|
33
|
+
Self
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/// Extract text content from an EPUB document by reading in spine order
|
|
37
|
+
fn extract_content(
|
|
38
|
+
archive: &mut ZipArchive<Cursor<Vec<u8>>>,
|
|
39
|
+
opf_path: &str,
|
|
40
|
+
manifest_dir: &str,
|
|
41
|
+
) -> Result<String> {
|
|
42
|
+
let opf_xml = Self::read_file_from_zip(archive, opf_path)?;
|
|
43
|
+
let (_, spine_hrefs) = Self::parse_opf(&opf_xml)?;
|
|
44
|
+
|
|
45
|
+
let mut content = String::new();
|
|
46
|
+
|
|
47
|
+
for (index, href) in spine_hrefs.iter().enumerate() {
|
|
48
|
+
let file_path = Self::resolve_path(manifest_dir, href);
|
|
49
|
+
|
|
50
|
+
match Self::read_file_from_zip(archive, &file_path) {
|
|
51
|
+
Ok(xhtml_content) => {
|
|
52
|
+
let text = Self::extract_text_from_xhtml(&xhtml_content);
|
|
53
|
+
if !text.is_empty() {
|
|
54
|
+
if index > 0 && !content.ends_with('\n') {
|
|
55
|
+
content.push('\n');
|
|
56
|
+
}
|
|
57
|
+
content.push_str(&text);
|
|
58
|
+
content.push('\n');
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
Err(_) => {
|
|
62
|
+
continue;
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
Ok(content.trim().to_string())
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/// Extract text from XHTML content using html-to-markdown-rs
|
|
71
|
+
fn extract_text_from_xhtml(xhtml: &str) -> String {
|
|
72
|
+
match crate::extraction::html::convert_html_to_markdown(xhtml, None) {
|
|
73
|
+
Ok(markdown) => {
|
|
74
|
+
let text = Self::markdown_to_plain_text(&markdown);
|
|
75
|
+
Self::remove_html_comments(&text)
|
|
76
|
+
}
|
|
77
|
+
Err(_) => Self::strip_html_tags(xhtml),
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/// Remove HTML comments from text
|
|
82
|
+
fn remove_html_comments(text: &str) -> String {
|
|
83
|
+
let mut result = String::new();
|
|
84
|
+
let mut in_comment = false;
|
|
85
|
+
let mut chars = text.chars().peekable();
|
|
86
|
+
|
|
87
|
+
while let Some(ch) = chars.next() {
|
|
88
|
+
if !in_comment && ch == '<' {
|
|
89
|
+
if chars.peek() == Some(&'!') {
|
|
90
|
+
chars.next();
|
|
91
|
+
if chars.peek() == Some(&'-') {
|
|
92
|
+
chars.next();
|
|
93
|
+
if chars.peek() == Some(&'-') {
|
|
94
|
+
chars.next();
|
|
95
|
+
in_comment = true;
|
|
96
|
+
continue;
|
|
97
|
+
} else {
|
|
98
|
+
result.push('<');
|
|
99
|
+
result.push('!');
|
|
100
|
+
result.push('-');
|
|
101
|
+
continue;
|
|
102
|
+
}
|
|
103
|
+
} else {
|
|
104
|
+
result.push('<');
|
|
105
|
+
result.push('!');
|
|
106
|
+
continue;
|
|
107
|
+
}
|
|
108
|
+
} else {
|
|
109
|
+
result.push(ch);
|
|
110
|
+
}
|
|
111
|
+
} else if in_comment {
|
|
112
|
+
if ch == '-' && chars.peek() == Some(&'-') {
|
|
113
|
+
chars.next();
|
|
114
|
+
if chars.peek() == Some(&'>') {
|
|
115
|
+
chars.next();
|
|
116
|
+
in_comment = false;
|
|
117
|
+
result.push('\n');
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
} else {
|
|
121
|
+
result.push(ch);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
result
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/// Convert markdown output to plain text by removing markdown syntax
|
|
129
|
+
fn markdown_to_plain_text(markdown: &str) -> String {
|
|
130
|
+
let mut text = String::new();
|
|
131
|
+
let mut in_code_block = false;
|
|
132
|
+
|
|
133
|
+
for line in markdown.lines() {
|
|
134
|
+
let trimmed = line.trim();
|
|
135
|
+
|
|
136
|
+
if trimmed.is_empty() {
|
|
137
|
+
if !text.is_empty() && !text.ends_with('\n') {
|
|
138
|
+
text.push('\n');
|
|
139
|
+
}
|
|
140
|
+
continue;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
if trimmed.starts_with("```") {
|
|
144
|
+
in_code_block = !in_code_block;
|
|
145
|
+
continue;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
if in_code_block {
|
|
149
|
+
text.push_str(trimmed);
|
|
150
|
+
text.push('\n');
|
|
151
|
+
continue;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
let cleaned = if let Some(stripped) = trimmed.strip_prefix("- ").or_else(|| trimmed.strip_prefix("* ")) {
|
|
155
|
+
stripped
|
|
156
|
+
} else if let Some(stripped) = trimmed.strip_prefix(|c: char| c.is_ascii_digit()) {
|
|
157
|
+
if let Some(rest) = stripped.strip_prefix(". ") {
|
|
158
|
+
rest
|
|
159
|
+
} else {
|
|
160
|
+
trimmed
|
|
161
|
+
}
|
|
162
|
+
} else {
|
|
163
|
+
trimmed
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
let cleaned = cleaned.trim_start_matches('#').trim();
|
|
167
|
+
|
|
168
|
+
let cleaned = cleaned
|
|
169
|
+
.replace("**", "")
|
|
170
|
+
.replace("__", "")
|
|
171
|
+
.replace("*", "")
|
|
172
|
+
.replace("_", "");
|
|
173
|
+
|
|
174
|
+
let cleaned = Self::remove_markdown_links(&cleaned);
|
|
175
|
+
|
|
176
|
+
if !cleaned.is_empty() {
|
|
177
|
+
text.push_str(&cleaned);
|
|
178
|
+
text.push('\n');
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
text.trim().to_string()
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
/// Remove markdown links [text](url) -> text
|
|
186
|
+
fn remove_markdown_links(text: &str) -> String {
|
|
187
|
+
let mut result = String::new();
|
|
188
|
+
let mut chars = text.chars().peekable();
|
|
189
|
+
|
|
190
|
+
while let Some(ch) = chars.next() {
|
|
191
|
+
if ch == '[' {
|
|
192
|
+
let mut link_text = String::new();
|
|
193
|
+
let mut depth = 1;
|
|
194
|
+
|
|
195
|
+
while let Some(&next_ch) = chars.peek() {
|
|
196
|
+
chars.next();
|
|
197
|
+
if next_ch == '[' {
|
|
198
|
+
depth += 1;
|
|
199
|
+
link_text.push(next_ch);
|
|
200
|
+
} else if next_ch == ']' {
|
|
201
|
+
depth -= 1;
|
|
202
|
+
if depth == 0 {
|
|
203
|
+
break;
|
|
204
|
+
}
|
|
205
|
+
link_text.push(next_ch);
|
|
206
|
+
} else {
|
|
207
|
+
link_text.push(next_ch);
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
if let Some(&'(') = chars.peek() {
|
|
212
|
+
chars.next();
|
|
213
|
+
let mut paren_depth = 1;
|
|
214
|
+
while let Some(&next_ch) = chars.peek() {
|
|
215
|
+
chars.next();
|
|
216
|
+
if next_ch == '(' {
|
|
217
|
+
paren_depth += 1;
|
|
218
|
+
} else if next_ch == ')' {
|
|
219
|
+
paren_depth -= 1;
|
|
220
|
+
if paren_depth == 0 {
|
|
221
|
+
break;
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
result.push_str(&link_text);
|
|
228
|
+
} else {
|
|
229
|
+
result.push(ch);
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
result
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
/// Fallback: strip HTML tags without using specialized libraries
|
|
237
|
+
fn strip_html_tags(html: &str) -> String {
|
|
238
|
+
let mut text = String::new();
|
|
239
|
+
let mut in_tag = false;
|
|
240
|
+
let mut in_script_style = false;
|
|
241
|
+
let mut tag_name = String::new();
|
|
242
|
+
|
|
243
|
+
for ch in html.chars() {
|
|
244
|
+
if ch == '<' {
|
|
245
|
+
in_tag = true;
|
|
246
|
+
tag_name.clear();
|
|
247
|
+
continue;
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
if ch == '>' {
|
|
251
|
+
in_tag = false;
|
|
252
|
+
|
|
253
|
+
let tag_lower = tag_name.to_lowercase();
|
|
254
|
+
if tag_lower.contains("script") || tag_lower.contains("style") {
|
|
255
|
+
in_script_style = !tag_name.starts_with('/');
|
|
256
|
+
}
|
|
257
|
+
continue;
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
if in_tag {
|
|
261
|
+
tag_name.push(ch);
|
|
262
|
+
continue;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if in_script_style {
|
|
266
|
+
continue;
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
if ch == '\n' || ch == '\r' || ch == '\t' || ch == ' ' {
|
|
270
|
+
if !text.is_empty() && !text.ends_with(' ') {
|
|
271
|
+
text.push(' ');
|
|
272
|
+
}
|
|
273
|
+
} else {
|
|
274
|
+
text.push(ch);
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
let mut result = String::new();
|
|
279
|
+
let mut prev_space = false;
|
|
280
|
+
for ch in text.chars() {
|
|
281
|
+
if ch == ' ' {
|
|
282
|
+
if !prev_space {
|
|
283
|
+
result.push(ch);
|
|
284
|
+
}
|
|
285
|
+
prev_space = true;
|
|
286
|
+
} else {
|
|
287
|
+
result.push(ch);
|
|
288
|
+
prev_space = false;
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
result.trim().to_string()
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
/// Extract metadata from EPUB OPF file
|
|
296
|
+
fn extract_metadata(opf_xml: &str) -> Result<BTreeMap<String, serde_json::Value>> {
|
|
297
|
+
let mut metadata = BTreeMap::new();
|
|
298
|
+
|
|
299
|
+
let (epub_metadata, _) = Self::parse_opf(opf_xml)?;
|
|
300
|
+
|
|
301
|
+
if let Some(title) = epub_metadata.title {
|
|
302
|
+
metadata.insert("title".to_string(), serde_json::json!(title));
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
if let Some(creator) = epub_metadata.creator {
|
|
306
|
+
metadata.insert("creator".to_string(), serde_json::json!(creator.clone()));
|
|
307
|
+
metadata.insert("authors".to_string(), serde_json::json!(vec![creator]));
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
if let Some(date) = epub_metadata.date {
|
|
311
|
+
metadata.insert("date".to_string(), serde_json::json!(date));
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
if let Some(language) = epub_metadata.language {
|
|
315
|
+
metadata.insert("language".to_string(), serde_json::json!(language));
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
if let Some(identifier) = epub_metadata.identifier {
|
|
319
|
+
metadata.insert("identifier".to_string(), serde_json::json!(identifier));
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
if let Some(publisher) = epub_metadata.publisher {
|
|
323
|
+
metadata.insert("publisher".to_string(), serde_json::json!(publisher));
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
if let Some(subject) = epub_metadata.subject {
|
|
327
|
+
metadata.insert("subject".to_string(), serde_json::json!(subject));
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
if let Some(description) = epub_metadata.description {
|
|
331
|
+
metadata.insert("description".to_string(), serde_json::json!(description));
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
if let Some(rights) = epub_metadata.rights {
|
|
335
|
+
metadata.insert("rights".to_string(), serde_json::json!(rights));
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
Ok(metadata)
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
/// Parse container.xml to find the OPF file path
|
|
342
|
+
fn parse_container_xml(xml: &str) -> Result<String> {
|
|
343
|
+
match roxmltree::Document::parse(xml) {
|
|
344
|
+
Ok(doc) => {
|
|
345
|
+
for node in doc.descendants() {
|
|
346
|
+
if node.tag_name().name() == "rootfile"
|
|
347
|
+
&& let Some(full_path) = node.attribute("full-path")
|
|
348
|
+
{
|
|
349
|
+
return Ok(full_path.to_string());
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
Err(crate::KreuzbergError::Parsing {
|
|
353
|
+
message: "No rootfile found in container.xml".to_string(),
|
|
354
|
+
source: None,
|
|
355
|
+
})
|
|
356
|
+
}
|
|
357
|
+
Err(e) => Err(crate::KreuzbergError::Parsing {
|
|
358
|
+
message: format!("Failed to parse container.xml: {}", e),
|
|
359
|
+
source: None,
|
|
360
|
+
}),
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
/// Parse OPF file and extract metadata and spine order
|
|
365
|
+
fn parse_opf(xml: &str) -> Result<(OepbMetadata, Vec<String>)> {
|
|
366
|
+
match roxmltree::Document::parse(xml) {
|
|
367
|
+
Ok(doc) => {
|
|
368
|
+
let root = doc.root();
|
|
369
|
+
|
|
370
|
+
let mut metadata = OepbMetadata::default();
|
|
371
|
+
let mut manifest: BTreeMap<String, String> = BTreeMap::new();
|
|
372
|
+
let mut spine_order: Vec<String> = Vec::new();
|
|
373
|
+
|
|
374
|
+
for node in root.descendants() {
|
|
375
|
+
match node.tag_name().name() {
|
|
376
|
+
"title" => {
|
|
377
|
+
if let Some(text) = node.text() {
|
|
378
|
+
metadata.title = Some(text.trim().to_string());
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
"creator" => {
|
|
382
|
+
if let Some(text) = node.text() {
|
|
383
|
+
metadata.creator = Some(text.trim().to_string());
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
"date" => {
|
|
387
|
+
if let Some(text) = node.text() {
|
|
388
|
+
metadata.date = Some(text.trim().to_string());
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
"language" => {
|
|
392
|
+
if let Some(text) = node.text() {
|
|
393
|
+
metadata.language = Some(text.trim().to_string());
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
"identifier" => {
|
|
397
|
+
if let Some(text) = node.text() {
|
|
398
|
+
metadata.identifier = Some(text.trim().to_string());
|
|
399
|
+
}
|
|
400
|
+
}
|
|
401
|
+
"publisher" => {
|
|
402
|
+
if let Some(text) = node.text() {
|
|
403
|
+
metadata.publisher = Some(text.trim().to_string());
|
|
404
|
+
}
|
|
405
|
+
}
|
|
406
|
+
"subject" => {
|
|
407
|
+
if let Some(text) = node.text() {
|
|
408
|
+
metadata.subject = Some(text.trim().to_string());
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
"description" => {
|
|
412
|
+
if let Some(text) = node.text() {
|
|
413
|
+
metadata.description = Some(text.trim().to_string());
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
"rights" => {
|
|
417
|
+
if let Some(text) = node.text() {
|
|
418
|
+
metadata.rights = Some(text.trim().to_string());
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
"item" => {
|
|
422
|
+
if let Some(id) = node.attribute("id")
|
|
423
|
+
&& let Some(href) = node.attribute("href")
|
|
424
|
+
{
|
|
425
|
+
manifest.insert(id.to_string(), href.to_string());
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
_ => {}
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
for node in root.descendants() {
|
|
433
|
+
if node.tag_name().name() == "itemref"
|
|
434
|
+
&& let Some(idref) = node.attribute("idref")
|
|
435
|
+
&& let Some(href) = manifest.get(idref)
|
|
436
|
+
{
|
|
437
|
+
spine_order.push(href.clone());
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
Ok((metadata, spine_order))
|
|
442
|
+
}
|
|
443
|
+
Err(e) => Err(crate::KreuzbergError::Parsing {
|
|
444
|
+
message: format!("Failed to parse OPF file: {}", e),
|
|
445
|
+
source: None,
|
|
446
|
+
}),
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
/// Read a file from the ZIP archive
|
|
451
|
+
fn read_file_from_zip(archive: &mut ZipArchive<Cursor<Vec<u8>>>, path: &str) -> Result<String> {
|
|
452
|
+
match archive.by_name(path) {
|
|
453
|
+
Ok(mut file) => {
|
|
454
|
+
let mut content = String::new();
|
|
455
|
+
match std::io::Read::read_to_string(&mut file, &mut content) {
|
|
456
|
+
Ok(_) => Ok(content),
|
|
457
|
+
Err(e) => Err(crate::KreuzbergError::Parsing {
|
|
458
|
+
message: format!("Failed to read file from EPUB: {}", e),
|
|
459
|
+
source: None,
|
|
460
|
+
}),
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
Err(e) => Err(crate::KreuzbergError::Parsing {
|
|
464
|
+
message: format!("File not found in EPUB: {} ({})", path, e),
|
|
465
|
+
source: None,
|
|
466
|
+
}),
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
/// Resolve a relative path within the manifest directory
|
|
471
|
+
fn resolve_path(base_dir: &str, relative_path: &str) -> String {
|
|
472
|
+
if relative_path.starts_with('/') {
|
|
473
|
+
relative_path.trim_start_matches('/').to_string()
|
|
474
|
+
} else if base_dir.is_empty() || base_dir == "." {
|
|
475
|
+
relative_path.to_string()
|
|
476
|
+
} else {
|
|
477
|
+
format!("{}/{}", base_dir.trim_end_matches('/'), relative_path)
|
|
478
|
+
}
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
/// Metadata extracted from OPF (Open Packaging Format) file
|
|
483
|
+
#[derive(Debug, Default, Clone)]
|
|
484
|
+
struct OepbMetadata {
|
|
485
|
+
title: Option<String>,
|
|
486
|
+
creator: Option<String>,
|
|
487
|
+
date: Option<String>,
|
|
488
|
+
language: Option<String>,
|
|
489
|
+
identifier: Option<String>,
|
|
490
|
+
publisher: Option<String>,
|
|
491
|
+
subject: Option<String>,
|
|
492
|
+
description: Option<String>,
|
|
493
|
+
rights: Option<String>,
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
impl Default for EpubExtractor {
|
|
497
|
+
fn default() -> Self {
|
|
498
|
+
Self::new()
|
|
499
|
+
}
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
impl Plugin for EpubExtractor {
|
|
503
|
+
fn name(&self) -> &str {
|
|
504
|
+
"epub-extractor"
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
fn version(&self) -> String {
|
|
508
|
+
env!("CARGO_PKG_VERSION").to_string()
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
fn initialize(&self) -> Result<()> {
|
|
512
|
+
Ok(())
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
fn shutdown(&self) -> Result<()> {
|
|
516
|
+
Ok(())
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
fn description(&self) -> &str {
|
|
520
|
+
"Extracts content and metadata from EPUB documents (native Rust implementation with permissive licenses)"
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
fn author(&self) -> &str {
|
|
524
|
+
"Kreuzberg Team"
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
#[cfg(feature = "office")]
|
|
529
|
+
#[async_trait]
|
|
530
|
+
impl DocumentExtractor for EpubExtractor {
|
|
531
|
+
#[cfg_attr(
|
|
532
|
+
feature = "otel",
|
|
533
|
+
tracing::instrument(
|
|
534
|
+
skip(self, content, _config),
|
|
535
|
+
fields(
|
|
536
|
+
extractor.name = self.name(),
|
|
537
|
+
content.size_bytes = content.len(),
|
|
538
|
+
)
|
|
539
|
+
)
|
|
540
|
+
)]
|
|
541
|
+
async fn extract_bytes(
|
|
542
|
+
&self,
|
|
543
|
+
content: &[u8],
|
|
544
|
+
mime_type: &str,
|
|
545
|
+
_config: &ExtractionConfig,
|
|
546
|
+
) -> Result<ExtractionResult> {
|
|
547
|
+
let cursor = Cursor::new(content.to_vec());
|
|
548
|
+
|
|
549
|
+
let mut archive = ZipArchive::new(cursor).map_err(|e| crate::KreuzbergError::Parsing {
|
|
550
|
+
message: format!("Failed to open EPUB as ZIP: {}", e),
|
|
551
|
+
source: None,
|
|
552
|
+
})?;
|
|
553
|
+
|
|
554
|
+
let container_xml = Self::read_file_from_zip(&mut archive, "META-INF/container.xml")?;
|
|
555
|
+
let opf_path = Self::parse_container_xml(&container_xml)?;
|
|
556
|
+
|
|
557
|
+
let manifest_dir = if let Some(last_slash) = opf_path.rfind('/') {
|
|
558
|
+
opf_path[..last_slash].to_string()
|
|
559
|
+
} else {
|
|
560
|
+
String::new()
|
|
561
|
+
};
|
|
562
|
+
|
|
563
|
+
let opf_xml = Self::read_file_from_zip(&mut archive, &opf_path)?;
|
|
564
|
+
|
|
565
|
+
let extracted_content = Self::extract_content(&mut archive, &opf_path, &manifest_dir)?;
|
|
566
|
+
|
|
567
|
+
let metadata_btree = Self::extract_metadata(&opf_xml)?;
|
|
568
|
+
let metadata_map: std::collections::HashMap<String, serde_json::Value> = metadata_btree.into_iter().collect();
|
|
569
|
+
|
|
570
|
+
Ok(ExtractionResult {
|
|
571
|
+
content: extracted_content,
|
|
572
|
+
mime_type: mime_type.to_string(),
|
|
573
|
+
metadata: Metadata {
|
|
574
|
+
additional: metadata_map,
|
|
575
|
+
..Default::default()
|
|
576
|
+
},
|
|
577
|
+
pages: None,
|
|
578
|
+
tables: vec![],
|
|
579
|
+
detected_languages: None,
|
|
580
|
+
chunks: None,
|
|
581
|
+
images: None,
|
|
582
|
+
})
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
fn supported_mime_types(&self) -> &[&str] {
|
|
586
|
+
&[
|
|
587
|
+
"application/epub+zip",
|
|
588
|
+
"application/x-epub+zip",
|
|
589
|
+
"application/vnd.epub+zip",
|
|
590
|
+
]
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
fn priority(&self) -> i32 {
|
|
594
|
+
60
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
#[cfg(all(test, feature = "office"))]
|
|
599
|
+
mod tests {
|
|
600
|
+
use super::*;
|
|
601
|
+
|
|
602
|
+
#[test]
|
|
603
|
+
fn test_epub_extractor_plugin_interface() {
|
|
604
|
+
let extractor = EpubExtractor::new();
|
|
605
|
+
assert_eq!(extractor.name(), "epub-extractor");
|
|
606
|
+
assert_eq!(extractor.version(), env!("CARGO_PKG_VERSION"));
|
|
607
|
+
assert_eq!(extractor.priority(), 60);
|
|
608
|
+
assert!(!extractor.supported_mime_types().is_empty());
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
#[test]
|
|
612
|
+
fn test_epub_extractor_default() {
|
|
613
|
+
let extractor = EpubExtractor;
|
|
614
|
+
assert_eq!(extractor.name(), "epub-extractor");
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
#[tokio::test]
|
|
618
|
+
async fn test_epub_extractor_initialize_shutdown() {
|
|
619
|
+
let extractor = EpubExtractor::new();
|
|
620
|
+
assert!(extractor.initialize().is_ok());
|
|
621
|
+
assert!(extractor.shutdown().is_ok());
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
#[test]
|
|
625
|
+
fn test_strip_html_tags_simple() {
|
|
626
|
+
let html = "<html><body><p>Hello World</p></body></html>";
|
|
627
|
+
let text = EpubExtractor::strip_html_tags(html);
|
|
628
|
+
assert!(text.contains("Hello World"));
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
#[test]
|
|
632
|
+
fn test_strip_html_tags_with_scripts() {
|
|
633
|
+
let html = "<body><p>Text</p><script>alert('bad');</script><p>More</p></body>";
|
|
634
|
+
let text = EpubExtractor::strip_html_tags(html);
|
|
635
|
+
assert!(!text.contains("bad"));
|
|
636
|
+
assert!(text.contains("Text"));
|
|
637
|
+
assert!(text.contains("More"));
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
#[test]
|
|
641
|
+
fn test_strip_html_tags_with_styles() {
|
|
642
|
+
let html = "<body><p>Text</p><style>.class { color: red; }</style><p>More</p></body>";
|
|
643
|
+
let text = EpubExtractor::strip_html_tags(html);
|
|
644
|
+
assert!(!text.to_lowercase().contains("color"));
|
|
645
|
+
assert!(text.contains("Text"));
|
|
646
|
+
assert!(text.contains("More"));
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
#[test]
|
|
650
|
+
fn test_strip_html_tags_normalizes_whitespace() {
|
|
651
|
+
let html = "<p>Hello \n\t World</p>";
|
|
652
|
+
let text = EpubExtractor::strip_html_tags(html);
|
|
653
|
+
assert!(text.contains("Hello") && text.contains("World"));
|
|
654
|
+
}
|
|
655
|
+
|
|
656
|
+
#[test]
|
|
657
|
+
fn test_remove_markdown_links() {
|
|
658
|
+
let text = "This is a [link](http://example.com) in text";
|
|
659
|
+
let result = EpubExtractor::remove_markdown_links(text);
|
|
660
|
+
assert!(result.contains("link"));
|
|
661
|
+
assert!(!result.contains("http://"));
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
#[test]
|
|
665
|
+
fn test_resolve_path_with_base_dir() {
|
|
666
|
+
let result = EpubExtractor::resolve_path("OEBPS", "chapter.xhtml");
|
|
667
|
+
assert_eq!(result, "OEBPS/chapter.xhtml");
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
#[test]
|
|
671
|
+
fn test_resolve_path_absolute() {
|
|
672
|
+
let result = EpubExtractor::resolve_path("OEBPS", "/chapter.xhtml");
|
|
673
|
+
assert_eq!(result, "chapter.xhtml");
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
#[test]
|
|
677
|
+
fn test_resolve_path_empty_base() {
|
|
678
|
+
let result = EpubExtractor::resolve_path("", "chapter.xhtml");
|
|
679
|
+
assert_eq!(result, "chapter.xhtml");
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
#[test]
|
|
683
|
+
fn test_epub_extractor_supported_mime_types() {
|
|
684
|
+
let extractor = EpubExtractor::new();
|
|
685
|
+
let supported = extractor.supported_mime_types();
|
|
686
|
+
assert!(supported.contains(&"application/epub+zip"));
|
|
687
|
+
assert!(supported.contains(&"application/x-epub+zip"));
|
|
688
|
+
assert!(supported.contains(&"application/vnd.epub+zip"));
|
|
689
|
+
}
|
|
690
|
+
|
|
691
|
+
#[test]
|
|
692
|
+
fn test_markdown_to_plain_text_removes_formatting() {
|
|
693
|
+
let markdown = "# Heading\n\nThis is **bold** text with _italic_ emphasis.";
|
|
694
|
+
let result = EpubExtractor::markdown_to_plain_text(markdown);
|
|
695
|
+
assert!(result.contains("Heading"));
|
|
696
|
+
assert!(result.contains("bold"));
|
|
697
|
+
assert!(!result.contains("**"));
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
#[test]
|
|
701
|
+
fn test_markdown_to_plain_text_removes_list_markers() {
|
|
702
|
+
let markdown = "- Item 1\n- Item 2\n* Item 3";
|
|
703
|
+
let result = EpubExtractor::markdown_to_plain_text(markdown);
|
|
704
|
+
assert!(result.contains("Item 1"));
|
|
705
|
+
assert!(result.contains("Item 2"));
|
|
706
|
+
assert!(result.contains("Item 3"));
|
|
707
|
+
}
|
|
708
|
+
}
|