kreuzberg 4.0.0.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +8 -0
- data/.rspec +3 -0
- data/.rubocop.yaml +534 -0
- data/Gemfile +9 -0
- data/Gemfile.lock +157 -0
- data/README.md +421 -0
- data/Rakefile +25 -0
- data/Steepfile +47 -0
- data/examples/async_patterns.rb +340 -0
- data/ext/kreuzberg_rb/extconf.rb +35 -0
- data/ext/kreuzberg_rb/native/Cargo.toml +36 -0
- data/ext/kreuzberg_rb/native/README.md +425 -0
- data/ext/kreuzberg_rb/native/build.rs +17 -0
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -0
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -0
- data/ext/kreuzberg_rb/native/include/strings.h +20 -0
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -0
- data/ext/kreuzberg_rb/native/src/lib.rs +2939 -0
- data/extconf.rb +28 -0
- data/kreuzberg.gemspec +105 -0
- data/lib/kreuzberg/api_proxy.rb +142 -0
- data/lib/kreuzberg/cache_api.rb +45 -0
- data/lib/kreuzberg/cli.rb +55 -0
- data/lib/kreuzberg/cli_proxy.rb +127 -0
- data/lib/kreuzberg/config.rb +684 -0
- data/lib/kreuzberg/errors.rb +50 -0
- data/lib/kreuzberg/extraction_api.rb +84 -0
- data/lib/kreuzberg/mcp_proxy.rb +186 -0
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -0
- data/lib/kreuzberg/post_processor_protocol.rb +86 -0
- data/lib/kreuzberg/result.rb +216 -0
- data/lib/kreuzberg/setup_lib_path.rb +79 -0
- data/lib/kreuzberg/validator_protocol.rb +89 -0
- data/lib/kreuzberg/version.rb +5 -0
- data/lib/kreuzberg.rb +82 -0
- data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
- data/sig/kreuzberg/internal.rbs +184 -0
- data/sig/kreuzberg.rbs +468 -0
- data/spec/binding/cache_spec.rb +227 -0
- data/spec/binding/cli_proxy_spec.rb +87 -0
- data/spec/binding/cli_spec.rb +54 -0
- data/spec/binding/config_spec.rb +345 -0
- data/spec/binding/config_validation_spec.rb +283 -0
- data/spec/binding/error_handling_spec.rb +213 -0
- data/spec/binding/errors_spec.rb +66 -0
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -0
- data/spec/binding/plugins/postprocessor_spec.rb +269 -0
- data/spec/binding/plugins/validator_spec.rb +274 -0
- data/spec/examples.txt +104 -0
- data/spec/fixtures/config.toml +39 -0
- data/spec/fixtures/config.yaml +42 -0
- data/spec/fixtures/invalid_config.toml +4 -0
- data/spec/smoke/package_spec.rb +178 -0
- data/spec/spec_helper.rb +42 -0
- data/vendor/kreuzberg/Cargo.toml +134 -0
- data/vendor/kreuzberg/README.md +175 -0
- data/vendor/kreuzberg/build.rs +460 -0
- data/vendor/kreuzberg/src/api/error.rs +81 -0
- data/vendor/kreuzberg/src/api/handlers.rs +199 -0
- data/vendor/kreuzberg/src/api/mod.rs +79 -0
- data/vendor/kreuzberg/src/api/server.rs +353 -0
- data/vendor/kreuzberg/src/api/types.rs +170 -0
- data/vendor/kreuzberg/src/bin/profile_extract.rs +455 -0
- data/vendor/kreuzberg/src/cache/mod.rs +1143 -0
- data/vendor/kreuzberg/src/chunking/mod.rs +677 -0
- data/vendor/kreuzberg/src/core/batch_mode.rs +35 -0
- data/vendor/kreuzberg/src/core/config.rs +1032 -0
- data/vendor/kreuzberg/src/core/extractor.rs +903 -0
- data/vendor/kreuzberg/src/core/io.rs +327 -0
- data/vendor/kreuzberg/src/core/mime.rs +615 -0
- data/vendor/kreuzberg/src/core/mod.rs +42 -0
- data/vendor/kreuzberg/src/core/pipeline.rs +906 -0
- data/vendor/kreuzberg/src/embeddings.rs +323 -0
- data/vendor/kreuzberg/src/error.rs +431 -0
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -0
- data/vendor/kreuzberg/src/extraction/docx.rs +40 -0
- data/vendor/kreuzberg/src/extraction/email.rs +854 -0
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -0
- data/vendor/kreuzberg/src/extraction/html.rs +553 -0
- data/vendor/kreuzberg/src/extraction/image.rs +368 -0
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +564 -0
- data/vendor/kreuzberg/src/extraction/mod.rs +77 -0
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -0
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -0
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -0
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +128 -0
- data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +275 -0
- data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +178 -0
- data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +491 -0
- data/vendor/kreuzberg/src/extraction/pandoc/server.rs +496 -0
- data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +1188 -0
- data/vendor/kreuzberg/src/extraction/pandoc/version.rs +162 -0
- data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -0
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -0
- data/vendor/kreuzberg/src/extraction/table.rs +328 -0
- data/vendor/kreuzberg/src/extraction/text.rs +269 -0
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -0
- data/vendor/kreuzberg/src/extractors/archive.rs +425 -0
- data/vendor/kreuzberg/src/extractors/docx.rs +479 -0
- data/vendor/kreuzberg/src/extractors/email.rs +129 -0
- data/vendor/kreuzberg/src/extractors/excel.rs +344 -0
- data/vendor/kreuzberg/src/extractors/html.rs +410 -0
- data/vendor/kreuzberg/src/extractors/image.rs +195 -0
- data/vendor/kreuzberg/src/extractors/mod.rs +268 -0
- data/vendor/kreuzberg/src/extractors/pandoc.rs +201 -0
- data/vendor/kreuzberg/src/extractors/pdf.rs +496 -0
- data/vendor/kreuzberg/src/extractors/pptx.rs +234 -0
- data/vendor/kreuzberg/src/extractors/structured.rs +126 -0
- data/vendor/kreuzberg/src/extractors/text.rs +242 -0
- data/vendor/kreuzberg/src/extractors/xml.rs +128 -0
- data/vendor/kreuzberg/src/image/dpi.rs +164 -0
- data/vendor/kreuzberg/src/image/mod.rs +6 -0
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -0
- data/vendor/kreuzberg/src/image/resize.rs +89 -0
- data/vendor/kreuzberg/src/keywords/config.rs +154 -0
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -0
- data/vendor/kreuzberg/src/keywords/processor.rs +267 -0
- data/vendor/kreuzberg/src/keywords/rake.rs +294 -0
- data/vendor/kreuzberg/src/keywords/types.rs +68 -0
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -0
- data/vendor/kreuzberg/src/language_detection/mod.rs +942 -0
- data/vendor/kreuzberg/src/lib.rs +102 -0
- data/vendor/kreuzberg/src/mcp/mod.rs +32 -0
- data/vendor/kreuzberg/src/mcp/server.rs +1966 -0
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -0
- data/vendor/kreuzberg/src/ocr/error.rs +37 -0
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -0
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -0
- data/vendor/kreuzberg/src/ocr/processor.rs +847 -0
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -0
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -0
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -0
- data/vendor/kreuzberg/src/ocr/types.rs +393 -0
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -0
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -0
- data/vendor/kreuzberg/src/pdf/error.rs +122 -0
- data/vendor/kreuzberg/src/pdf/images.rs +139 -0
- data/vendor/kreuzberg/src/pdf/metadata.rs +346 -0
- data/vendor/kreuzberg/src/pdf/mod.rs +50 -0
- data/vendor/kreuzberg/src/pdf/rendering.rs +369 -0
- data/vendor/kreuzberg/src/pdf/table.rs +420 -0
- data/vendor/kreuzberg/src/pdf/text.rs +161 -0
- data/vendor/kreuzberg/src/plugins/extractor.rs +1010 -0
- data/vendor/kreuzberg/src/plugins/mod.rs +209 -0
- data/vendor/kreuzberg/src/plugins/ocr.rs +629 -0
- data/vendor/kreuzberg/src/plugins/processor.rs +641 -0
- data/vendor/kreuzberg/src/plugins/registry.rs +1324 -0
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -0
- data/vendor/kreuzberg/src/plugins/validator.rs +955 -0
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -0
- data/vendor/kreuzberg/src/text/mod.rs +19 -0
- data/vendor/kreuzberg/src/text/quality.rs +697 -0
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -0
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -0
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -0
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -0
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -0
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -0
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -0
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -0
- data/vendor/kreuzberg/src/types.rs +873 -0
- data/vendor/kreuzberg/src/utils/mod.rs +17 -0
- data/vendor/kreuzberg/src/utils/quality.rs +959 -0
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -0
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -0
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -0
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -0
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -0
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -0
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -0
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -0
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -0
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -0
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -0
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -0
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -0
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -0
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -0
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -0
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -0
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -0
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -0
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -0
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -0
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -0
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -0
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -0
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -0
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -0
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -0
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -0
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -0
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -0
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -0
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -0
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -0
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -0
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -0
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -0
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -0
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -0
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -0
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -0
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -0
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -0
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -0
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -0
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -0
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -0
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -0
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -0
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -0
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -0
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -0
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -0
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -0
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -0
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -0
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -0
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -0
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -0
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -0
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -0
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -0
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -0
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -0
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -0
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -0
- data/vendor/kreuzberg/tests/api_tests.rs +966 -0
- data/vendor/kreuzberg/tests/archive_integration.rs +543 -0
- data/vendor/kreuzberg/tests/batch_orchestration.rs +542 -0
- data/vendor/kreuzberg/tests/batch_processing.rs +304 -0
- data/vendor/kreuzberg/tests/chunking_offset_demo.rs +92 -0
- data/vendor/kreuzberg/tests/concurrency_stress.rs +509 -0
- data/vendor/kreuzberg/tests/config_features.rs +580 -0
- data/vendor/kreuzberg/tests/config_loading_tests.rs +439 -0
- data/vendor/kreuzberg/tests/core_integration.rs +493 -0
- data/vendor/kreuzberg/tests/csv_integration.rs +424 -0
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +124 -0
- data/vendor/kreuzberg/tests/email_integration.rs +325 -0
- data/vendor/kreuzberg/tests/error_handling.rs +393 -0
- data/vendor/kreuzberg/tests/format_integration.rs +159 -0
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -0
- data/vendor/kreuzberg/tests/image_integration.rs +253 -0
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -0
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -0
- data/vendor/kreuzberg/tests/mime_detection.rs +428 -0
- data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -0
- data/vendor/kreuzberg/tests/ocr_errors.rs +676 -0
- data/vendor/kreuzberg/tests/ocr_quality.rs +627 -0
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -0
- data/vendor/kreuzberg/tests/pandoc_integration.rs +503 -0
- data/vendor/kreuzberg/tests/pdf_integration.rs +43 -0
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1412 -0
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -0
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +561 -0
- data/vendor/kreuzberg/tests/plugin_system.rs +921 -0
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -0
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +607 -0
- data/vendor/kreuzberg/tests/security_validation.rs +404 -0
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -0
- data/vendor/kreuzberg/tests/test_fastembed.rs +609 -0
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -0
- metadata +471 -0
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
//! Batch processing integration tests.
|
|
2
|
+
//!
|
|
3
|
+
//! Tests for `batch_extract_file` and `batch_extract_bytes` functions.
|
|
4
|
+
//! Validates concurrent processing, error handling, and performance.
|
|
5
|
+
|
|
6
|
+
use kreuzberg::core::config::ExtractionConfig;
|
|
7
|
+
use kreuzberg::core::extractor::{
|
|
8
|
+
batch_extract_bytes, batch_extract_bytes_sync, batch_extract_file, batch_extract_file_sync,
|
|
9
|
+
};
|
|
10
|
+
use std::path::PathBuf;
|
|
11
|
+
|
|
12
|
+
mod helpers;
|
|
13
|
+
use helpers::{get_test_documents_dir, get_test_file_path, skip_if_missing, test_documents_available};
|
|
14
|
+
|
|
15
|
+
/// Test batch extraction with multiple file formats (PDF, DOCX, TXT).
|
|
16
|
+
#[tokio::test]
|
|
17
|
+
async fn test_batch_extract_file_multiple_formats() {
|
|
18
|
+
if !test_documents_available() {
|
|
19
|
+
println!("Skipping test: test_documents/ directory not found");
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
if skip_if_missing("pdfs/fake_memo.pdf")
|
|
24
|
+
|| skip_if_missing("documents/fake.docx")
|
|
25
|
+
|| skip_if_missing("text/fake_text.txt")
|
|
26
|
+
{
|
|
27
|
+
return;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
let config = ExtractionConfig::default();
|
|
31
|
+
|
|
32
|
+
let paths = vec![
|
|
33
|
+
get_test_file_path("pdfs/fake_memo.pdf"),
|
|
34
|
+
get_test_file_path("documents/fake.docx"),
|
|
35
|
+
get_test_file_path("text/fake_text.txt"),
|
|
36
|
+
];
|
|
37
|
+
|
|
38
|
+
let results = batch_extract_file(paths, &config).await;
|
|
39
|
+
|
|
40
|
+
assert!(results.is_ok(), "Batch extraction should succeed");
|
|
41
|
+
let results = results.unwrap();
|
|
42
|
+
|
|
43
|
+
assert_eq!(results.len(), 3);
|
|
44
|
+
|
|
45
|
+
assert!(!results[0].content.is_empty(), "PDF content should not be empty");
|
|
46
|
+
assert_eq!(results[0].mime_type, "application/pdf");
|
|
47
|
+
|
|
48
|
+
assert!(!results[1].content.is_empty(), "DOCX content should not be empty");
|
|
49
|
+
assert_eq!(
|
|
50
|
+
results[1].mime_type,
|
|
51
|
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
|
52
|
+
);
|
|
53
|
+
|
|
54
|
+
assert!(!results[2].content.is_empty(), "TXT content should not be empty");
|
|
55
|
+
assert_eq!(results[2].mime_type, "text/plain");
|
|
56
|
+
|
|
57
|
+
assert!(results[0].metadata.error.is_none());
|
|
58
|
+
assert!(results[1].metadata.error.is_none());
|
|
59
|
+
assert!(results[2].metadata.error.is_none());
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/// Test synchronous batch extraction variant.
|
|
63
|
+
#[test]
|
|
64
|
+
fn test_batch_extract_file_sync_variant() {
|
|
65
|
+
if !test_documents_available() {
|
|
66
|
+
println!("Skipping test: test_documents/ directory not found");
|
|
67
|
+
return;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
if skip_if_missing("pdfs/fake_memo.pdf") || skip_if_missing("text/fake_text.txt") {
|
|
71
|
+
return;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
let config = ExtractionConfig::default();
|
|
75
|
+
|
|
76
|
+
let paths = vec![
|
|
77
|
+
get_test_file_path("pdfs/fake_memo.pdf"),
|
|
78
|
+
get_test_file_path("text/fake_text.txt"),
|
|
79
|
+
];
|
|
80
|
+
|
|
81
|
+
let results = batch_extract_file_sync(paths, &config);
|
|
82
|
+
|
|
83
|
+
assert!(results.is_ok(), "Sync batch extraction should succeed");
|
|
84
|
+
let results = results.unwrap();
|
|
85
|
+
|
|
86
|
+
assert_eq!(results.len(), 2);
|
|
87
|
+
|
|
88
|
+
assert!(!results[0].content.is_empty(), "PDF content should not be empty");
|
|
89
|
+
assert_eq!(
|
|
90
|
+
results[0].mime_type, "application/pdf",
|
|
91
|
+
"PDF MIME type should be correct"
|
|
92
|
+
);
|
|
93
|
+
assert!(results[0].metadata.error.is_none(), "PDF should extract without errors");
|
|
94
|
+
|
|
95
|
+
assert!(!results[1].content.is_empty(), "Text content should not be empty");
|
|
96
|
+
assert_eq!(results[1].mime_type, "text/plain", "Text MIME type should be correct");
|
|
97
|
+
assert!(
|
|
98
|
+
results[1].metadata.error.is_none(),
|
|
99
|
+
"Text should extract without errors"
|
|
100
|
+
);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/// Test batch extraction from bytes.
|
|
104
|
+
#[tokio::test]
|
|
105
|
+
async fn test_batch_extract_bytes_multiple() {
|
|
106
|
+
let config = ExtractionConfig::default();
|
|
107
|
+
|
|
108
|
+
let text_bytes = b"This is plain text content";
|
|
109
|
+
let markdown_bytes = b"# Markdown Header\n\nThis is markdown content";
|
|
110
|
+
let json_bytes = b"{\"key\": \"value\", \"number\": 42}";
|
|
111
|
+
|
|
112
|
+
let contents = vec![
|
|
113
|
+
(text_bytes.as_slice(), "text/plain"),
|
|
114
|
+
(markdown_bytes.as_slice(), "text/markdown"),
|
|
115
|
+
(json_bytes.as_slice(), "application/json"),
|
|
116
|
+
];
|
|
117
|
+
|
|
118
|
+
let results = batch_extract_bytes(contents, &config).await;
|
|
119
|
+
|
|
120
|
+
assert!(results.is_ok(), "Batch bytes extraction should succeed");
|
|
121
|
+
let results = results.unwrap();
|
|
122
|
+
|
|
123
|
+
assert_eq!(results.len(), 3);
|
|
124
|
+
|
|
125
|
+
assert_eq!(results[0].content, "This is plain text content");
|
|
126
|
+
assert_eq!(results[0].mime_type, "text/plain");
|
|
127
|
+
|
|
128
|
+
assert!(results[1].content.contains("Markdown Header"));
|
|
129
|
+
assert_eq!(results[1].mime_type, "text/markdown");
|
|
130
|
+
|
|
131
|
+
assert!(results[2].content.contains("key"));
|
|
132
|
+
assert!(results[2].content.contains("value"));
|
|
133
|
+
assert_eq!(results[2].mime_type, "application/json");
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
/// Test batch extraction with empty file list.
|
|
137
|
+
#[tokio::test]
|
|
138
|
+
async fn test_batch_extract_empty_list() {
|
|
139
|
+
let config = ExtractionConfig::default();
|
|
140
|
+
|
|
141
|
+
let paths: Vec<PathBuf> = vec![];
|
|
142
|
+
let results = batch_extract_file(paths, &config).await;
|
|
143
|
+
|
|
144
|
+
assert!(results.is_ok(), "Empty batch should succeed");
|
|
145
|
+
assert_eq!(results.unwrap().len(), 0, "Should return empty vector");
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
/// Test batch extraction when one file fails (others should succeed).
|
|
149
|
+
#[tokio::test]
|
|
150
|
+
async fn test_batch_extract_one_file_fails() {
|
|
151
|
+
if !test_documents_available() {
|
|
152
|
+
println!("Skipping test: test_documents/ directory not found");
|
|
153
|
+
return;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
if skip_if_missing("text/fake_text.txt") {
|
|
157
|
+
return;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
let config = ExtractionConfig::default();
|
|
161
|
+
|
|
162
|
+
let paths = vec![
|
|
163
|
+
get_test_file_path("text/fake_text.txt"),
|
|
164
|
+
get_test_documents_dir().join("nonexistent_file.txt"),
|
|
165
|
+
get_test_file_path("text/contract.txt"),
|
|
166
|
+
];
|
|
167
|
+
|
|
168
|
+
let results = batch_extract_file(paths, &config).await;
|
|
169
|
+
|
|
170
|
+
assert!(results.is_ok(), "Batch should succeed even with one failure");
|
|
171
|
+
let results = results.unwrap();
|
|
172
|
+
|
|
173
|
+
assert_eq!(results.len(), 3);
|
|
174
|
+
|
|
175
|
+
assert!(!results[0].content.is_empty());
|
|
176
|
+
assert!(results[0].metadata.error.is_none());
|
|
177
|
+
|
|
178
|
+
assert!(results[1].metadata.error.is_some());
|
|
179
|
+
assert!(results[1].content.contains("Error:"));
|
|
180
|
+
|
|
181
|
+
assert!(!results[2].content.is_empty());
|
|
182
|
+
assert!(results[2].metadata.error.is_none());
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
/// Test batch extraction when all files fail.
|
|
186
|
+
#[tokio::test]
|
|
187
|
+
async fn test_batch_extract_all_fail() {
|
|
188
|
+
let config = ExtractionConfig::default();
|
|
189
|
+
|
|
190
|
+
let test_dir = get_test_documents_dir();
|
|
191
|
+
let paths = vec![
|
|
192
|
+
test_dir.join("nonexistent1.txt"),
|
|
193
|
+
test_dir.join("nonexistent2.pdf"),
|
|
194
|
+
test_dir.join("nonexistent3.docx"),
|
|
195
|
+
];
|
|
196
|
+
|
|
197
|
+
let results = batch_extract_file(paths, &config).await;
|
|
198
|
+
|
|
199
|
+
assert!(results.is_ok(), "Batch should succeed (errors in metadata)");
|
|
200
|
+
let results = results.unwrap();
|
|
201
|
+
|
|
202
|
+
assert_eq!(results.len(), 3);
|
|
203
|
+
|
|
204
|
+
assert!(results[0].metadata.error.is_some());
|
|
205
|
+
assert!(results[1].metadata.error.is_some());
|
|
206
|
+
assert!(results[2].metadata.error.is_some());
|
|
207
|
+
|
|
208
|
+
assert!(results[0].content.contains("Error:"));
|
|
209
|
+
assert!(results[1].content.contains("Error:"));
|
|
210
|
+
assert!(results[2].content.contains("Error:"));
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
/// Test concurrent batch processing (verify parallelism).
|
|
214
|
+
#[tokio::test]
|
|
215
|
+
async fn test_batch_extract_concurrent() {
|
|
216
|
+
if !test_documents_available() {
|
|
217
|
+
println!("Skipping test: test_documents/ directory not found");
|
|
218
|
+
return;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
if skip_if_missing("text/fake_text.txt") {
|
|
222
|
+
return;
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
let config = ExtractionConfig::default();
|
|
226
|
+
|
|
227
|
+
let base_path = get_test_file_path("text/fake_text.txt");
|
|
228
|
+
let paths: Vec<PathBuf> = (0..20).map(|_| base_path.clone()).collect();
|
|
229
|
+
|
|
230
|
+
let start = std::time::Instant::now();
|
|
231
|
+
let results = batch_extract_file(paths, &config).await;
|
|
232
|
+
let duration = start.elapsed();
|
|
233
|
+
|
|
234
|
+
assert!(results.is_ok(), "Concurrent batch should succeed");
|
|
235
|
+
let results = results.unwrap();
|
|
236
|
+
|
|
237
|
+
assert_eq!(results.len(), 20);
|
|
238
|
+
|
|
239
|
+
for result in &results {
|
|
240
|
+
assert!(result.metadata.error.is_none(), "Result should not have errors");
|
|
241
|
+
assert!(!result.content.is_empty(), "Result content should not be empty");
|
|
242
|
+
assert_eq!(result.mime_type, "text/plain", "MIME type should be text/plain");
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
assert!(
|
|
246
|
+
!results[0].content.is_empty(),
|
|
247
|
+
"Should have extracted actual text content"
|
|
248
|
+
);
|
|
249
|
+
|
|
250
|
+
assert!(duration.as_secs() < 5, "Batch processing took too long: {:?}", duration);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
/// Test large batch (50+ files).
|
|
254
|
+
#[tokio::test]
|
|
255
|
+
async fn test_batch_extract_large_batch() {
|
|
256
|
+
if !test_documents_available() {
|
|
257
|
+
println!("Skipping test: test_documents/ directory not found");
|
|
258
|
+
return;
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
if skip_if_missing("text/fake_text.txt") {
|
|
262
|
+
return;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
let config = ExtractionConfig::default();
|
|
266
|
+
|
|
267
|
+
let base_path = get_test_file_path("text/fake_text.txt");
|
|
268
|
+
let paths: Vec<PathBuf> = (0..50).map(|_| base_path.clone()).collect();
|
|
269
|
+
|
|
270
|
+
let results = batch_extract_file(paths, &config).await;
|
|
271
|
+
|
|
272
|
+
assert!(results.is_ok(), "Large batch should succeed");
|
|
273
|
+
let results = results.unwrap();
|
|
274
|
+
|
|
275
|
+
assert_eq!(results.len(), 50);
|
|
276
|
+
|
|
277
|
+
for result in &results {
|
|
278
|
+
assert!(result.metadata.error.is_none());
|
|
279
|
+
assert!(!result.content.is_empty());
|
|
280
|
+
assert_eq!(result.mime_type, "text/plain");
|
|
281
|
+
}
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
/// Test sync variant with bytes.
|
|
285
|
+
#[test]
|
|
286
|
+
fn test_batch_extract_bytes_sync_variant() {
|
|
287
|
+
let config = ExtractionConfig::default();
|
|
288
|
+
|
|
289
|
+
let contents = vec![
|
|
290
|
+
(b"content 1".as_slice(), "text/plain"),
|
|
291
|
+
(b"content 2".as_slice(), "text/plain"),
|
|
292
|
+
(b"# content 3".as_slice(), "text/markdown"),
|
|
293
|
+
];
|
|
294
|
+
|
|
295
|
+
let results = batch_extract_bytes_sync(contents, &config);
|
|
296
|
+
|
|
297
|
+
assert!(results.is_ok(), "Sync batch bytes extraction should succeed");
|
|
298
|
+
let results = results.unwrap();
|
|
299
|
+
|
|
300
|
+
assert_eq!(results.len(), 3);
|
|
301
|
+
assert_eq!(results[0].content, "content 1");
|
|
302
|
+
assert_eq!(results[1].content, "content 2");
|
|
303
|
+
assert!(results[2].content.contains("content 3"));
|
|
304
|
+
}
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
#[cfg(feature = "chunking")]
|
|
2
|
+
#[test]
|
|
3
|
+
fn demonstrate_correct_offset_calculation() {
|
|
4
|
+
use kreuzberg::chunking::{ChunkerType, ChunkingConfig, chunk_text};
|
|
5
|
+
|
|
6
|
+
println!("\n=== Demonstrating Correct Chunking Offset Calculation ===\n");
|
|
7
|
+
|
|
8
|
+
let config_with_overlap = ChunkingConfig {
|
|
9
|
+
max_characters: 20,
|
|
10
|
+
overlap: 5,
|
|
11
|
+
trim: false,
|
|
12
|
+
chunker_type: ChunkerType::Text,
|
|
13
|
+
};
|
|
14
|
+
|
|
15
|
+
let text = "AAAAA BBBBB CCCCC DDDDD EEEEE FFFFF";
|
|
16
|
+
println!("Text: \"{}\"", text);
|
|
17
|
+
println!(
|
|
18
|
+
"Max characters: {}, Overlap: {}\n",
|
|
19
|
+
config_with_overlap.max_characters, config_with_overlap.overlap
|
|
20
|
+
);
|
|
21
|
+
|
|
22
|
+
let result = chunk_text(text, &config_with_overlap).unwrap();
|
|
23
|
+
|
|
24
|
+
println!("WITH OVERLAP (5 chars):");
|
|
25
|
+
for (i, chunk) in result.chunks.iter().enumerate() {
|
|
26
|
+
println!(
|
|
27
|
+
" Chunk {}: [{:3} - {:3}] = \"{}\"",
|
|
28
|
+
i,
|
|
29
|
+
chunk.metadata.char_start,
|
|
30
|
+
chunk.metadata.char_end,
|
|
31
|
+
chunk.content.replace('\n', "\\n")
|
|
32
|
+
);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
println!("\nOverlap verification:");
|
|
36
|
+
for i in 0..result.chunks.len() - 1 {
|
|
37
|
+
let current = &result.chunks[i];
|
|
38
|
+
let next = &result.chunks[i + 1];
|
|
39
|
+
let overlap_size = current.metadata.char_end - next.metadata.char_start;
|
|
40
|
+
println!(
|
|
41
|
+
" Chunks {} and {}: overlap = {} chars (next starts at {} while current ends at {})",
|
|
42
|
+
i,
|
|
43
|
+
i + 1,
|
|
44
|
+
overlap_size,
|
|
45
|
+
next.metadata.char_start,
|
|
46
|
+
current.metadata.char_end
|
|
47
|
+
);
|
|
48
|
+
assert!(
|
|
49
|
+
overlap_size > 0 && overlap_size <= config_with_overlap.overlap + 10,
|
|
50
|
+
"Overlap should exist and be reasonable"
|
|
51
|
+
);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
println!("\n\n=== Without Overlap ===\n");
|
|
55
|
+
let config_no_overlap = ChunkingConfig {
|
|
56
|
+
max_characters: 20,
|
|
57
|
+
overlap: 0,
|
|
58
|
+
trim: false,
|
|
59
|
+
chunker_type: ChunkerType::Text,
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
let result_no_overlap = chunk_text(text, &config_no_overlap).unwrap();
|
|
63
|
+
|
|
64
|
+
println!("WITHOUT OVERLAP:");
|
|
65
|
+
for (i, chunk) in result_no_overlap.chunks.iter().enumerate() {
|
|
66
|
+
println!(
|
|
67
|
+
" Chunk {}: [{:3} - {:3}] = \"{}\"",
|
|
68
|
+
i,
|
|
69
|
+
chunk.metadata.char_start,
|
|
70
|
+
chunk.metadata.char_end,
|
|
71
|
+
chunk.content.replace('\n', "\\n")
|
|
72
|
+
);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
println!("\nAdjacency verification:");
|
|
76
|
+
for i in 0..result_no_overlap.chunks.len() - 1 {
|
|
77
|
+
let current = &result_no_overlap.chunks[i];
|
|
78
|
+
let next = &result_no_overlap.chunks[i + 1];
|
|
79
|
+
let gap = next.metadata.char_start as i32 - current.metadata.char_end as i32;
|
|
80
|
+
println!(
|
|
81
|
+
" Chunks {} and {}: gap = {} (next starts at {}, current ends at {})",
|
|
82
|
+
i,
|
|
83
|
+
i + 1,
|
|
84
|
+
gap,
|
|
85
|
+
next.metadata.char_start,
|
|
86
|
+
current.metadata.char_end
|
|
87
|
+
);
|
|
88
|
+
assert!(gap >= 0, "Should have no overlap (gap >= 0)");
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
println!("\n✓ All offset calculations are correct!");
|
|
92
|
+
}
|