kreuzberg 4.0.0.pre.rc.6 → 4.0.0.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +0 -6
- data/.rubocop.yaml +534 -1
- data/Gemfile +2 -1
- data/Gemfile.lock +11 -11
- data/README.md +5 -10
- data/examples/async_patterns.rb +0 -1
- data/ext/kreuzberg_rb/extconf.rb +0 -10
- data/ext/kreuzberg_rb/native/Cargo.toml +15 -23
- data/ext/kreuzberg_rb/native/build.rs +2 -0
- data/ext/kreuzberg_rb/native/include/ieeefp.h +1 -1
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +1 -1
- data/ext/kreuzberg_rb/native/include/strings.h +2 -2
- data/ext/kreuzberg_rb/native/include/unistd.h +1 -1
- data/ext/kreuzberg_rb/native/src/lib.rs +16 -75
- data/kreuzberg.gemspec +14 -57
- data/lib/kreuzberg/cache_api.rb +0 -1
- data/lib/kreuzberg/cli.rb +2 -2
- data/lib/kreuzberg/config.rb +2 -9
- data/lib/kreuzberg/errors.rb +7 -75
- data/lib/kreuzberg/extraction_api.rb +0 -1
- data/lib/kreuzberg/setup_lib_path.rb +0 -1
- data/lib/kreuzberg/version.rb +1 -1
- data/lib/kreuzberg.rb +0 -21
- data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
- data/sig/kreuzberg.rbs +3 -55
- data/spec/binding/cli_proxy_spec.rb +4 -2
- data/spec/binding/cli_spec.rb +11 -12
- data/spec/examples.txt +104 -0
- data/spec/fixtures/config.yaml +1 -0
- data/spec/spec_helper.rb +1 -1
- data/vendor/kreuzberg/Cargo.toml +42 -112
- data/vendor/kreuzberg/README.md +2 -2
- data/vendor/kreuzberg/build.rs +4 -18
- data/vendor/kreuzberg/src/bin/profile_extract.rs +455 -0
- data/vendor/kreuzberg/src/cache/mod.rs +3 -27
- data/vendor/kreuzberg/src/core/batch_mode.rs +0 -60
- data/vendor/kreuzberg/src/core/extractor.rs +81 -202
- data/vendor/kreuzberg/src/core/io.rs +2 -4
- data/vendor/kreuzberg/src/core/mime.rs +12 -2
- data/vendor/kreuzberg/src/core/mod.rs +1 -4
- data/vendor/kreuzberg/src/core/pipeline.rs +33 -111
- data/vendor/kreuzberg/src/embeddings.rs +16 -125
- data/vendor/kreuzberg/src/error.rs +1 -1
- data/vendor/kreuzberg/src/extraction/docx.rs +1 -1
- data/vendor/kreuzberg/src/extraction/image.rs +13 -13
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +1 -0
- data/vendor/kreuzberg/src/extraction/mod.rs +5 -9
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +0 -2
- data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +275 -0
- data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +178 -0
- data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +491 -0
- data/vendor/kreuzberg/src/extraction/pandoc/server.rs +496 -0
- data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +1188 -0
- data/vendor/kreuzberg/src/extraction/pandoc/version.rs +162 -0
- data/vendor/kreuzberg/src/extractors/archive.rs +0 -21
- data/vendor/kreuzberg/src/extractors/docx.rs +128 -16
- data/vendor/kreuzberg/src/extractors/email.rs +0 -14
- data/vendor/kreuzberg/src/extractors/excel.rs +20 -19
- data/vendor/kreuzberg/src/extractors/html.rs +154 -137
- data/vendor/kreuzberg/src/extractors/image.rs +4 -7
- data/vendor/kreuzberg/src/extractors/mod.rs +9 -106
- data/vendor/kreuzberg/src/extractors/pandoc.rs +201 -0
- data/vendor/kreuzberg/src/extractors/pdf.rs +15 -12
- data/vendor/kreuzberg/src/extractors/pptx.rs +3 -17
- data/vendor/kreuzberg/src/extractors/structured.rs +0 -14
- data/vendor/kreuzberg/src/extractors/text.rs +5 -23
- data/vendor/kreuzberg/src/extractors/xml.rs +0 -7
- data/vendor/kreuzberg/src/keywords/rake.rs +1 -0
- data/vendor/kreuzberg/src/lib.rs +1 -4
- data/vendor/kreuzberg/src/mcp/mod.rs +1 -1
- data/vendor/kreuzberg/src/mcp/server.rs +3 -5
- data/vendor/kreuzberg/src/ocr/processor.rs +2 -18
- data/vendor/kreuzberg/src/pdf/error.rs +1 -1
- data/vendor/kreuzberg/src/pdf/table.rs +44 -17
- data/vendor/kreuzberg/src/pdf/text.rs +3 -0
- data/vendor/kreuzberg/src/plugins/extractor.rs +5 -8
- data/vendor/kreuzberg/src/plugins/ocr.rs +11 -2
- data/vendor/kreuzberg/src/plugins/processor.rs +1 -2
- data/vendor/kreuzberg/src/plugins/registry.rs +0 -13
- data/vendor/kreuzberg/src/plugins/validator.rs +8 -9
- data/vendor/kreuzberg/src/stopwords/mod.rs +2 -2
- data/vendor/kreuzberg/src/types.rs +12 -42
- data/vendor/kreuzberg/tests/batch_orchestration.rs +5 -19
- data/vendor/kreuzberg/tests/batch_processing.rs +3 -15
- data/vendor/kreuzberg/tests/chunking_offset_demo.rs +92 -0
- data/vendor/kreuzberg/tests/concurrency_stress.rs +1 -17
- data/vendor/kreuzberg/tests/config_features.rs +0 -18
- data/vendor/kreuzberg/tests/config_loading_tests.rs +39 -15
- data/vendor/kreuzberg/tests/core_integration.rs +7 -24
- data/vendor/kreuzberg/tests/csv_integration.rs +81 -71
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +25 -23
- data/vendor/kreuzberg/tests/pandoc_integration.rs +503 -0
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1 -0
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +1 -0
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +22 -1
- data/vendor/kreuzberg/tests/security_validation.rs +1 -12
- metadata +25 -90
- data/.rubocop.yml +0 -538
- data/ext/kreuzberg_rb/native/Cargo.lock +0 -6535
- data/lib/kreuzberg/error_context.rb +0 -32
- data/vendor/kreuzberg/benches/otel_overhead.rs +0 -48
- data/vendor/kreuzberg/src/extraction/markdown.rs +0 -213
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +0 -287
- data/vendor/kreuzberg/src/extractors/bibtex.rs +0 -469
- data/vendor/kreuzberg/src/extractors/docbook.rs +0 -502
- data/vendor/kreuzberg/src/extractors/epub.rs +0 -707
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +0 -491
- data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +0 -738
- data/vendor/kreuzberg/src/extractors/jats.rs +0 -1051
- data/vendor/kreuzberg/src/extractors/jupyter.rs +0 -367
- data/vendor/kreuzberg/src/extractors/latex.rs +0 -652
- data/vendor/kreuzberg/src/extractors/markdown.rs +0 -700
- data/vendor/kreuzberg/src/extractors/odt.rs +0 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +0 -634
- data/vendor/kreuzberg/src/extractors/orgmode.rs +0 -528
- data/vendor/kreuzberg/src/extractors/rst.rs +0 -576
- data/vendor/kreuzberg/src/extractors/rtf.rs +0 -810
- data/vendor/kreuzberg/src/extractors/security.rs +0 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +0 -367
- data/vendor/kreuzberg/src/extractors/typst.rs +0 -650
- data/vendor/kreuzberg/src/panic_context.rs +0 -154
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +0 -52
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +0 -421
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +0 -498
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +0 -370
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +0 -275
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +0 -228
- data/vendor/kreuzberg/tests/html_table_test.rs +0 -551
- data/vendor/kreuzberg/tests/instrumentation_test.rs +0 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +0 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +0 -704
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +0 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +0 -490
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +0 -695
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +0 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +0 -822
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +0 -692
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +0 -776
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +0 -1259
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +0 -647
- data/vendor/rb-sys/.cargo-ok +0 -1
- data/vendor/rb-sys/.cargo_vcs_info.json +0 -6
- data/vendor/rb-sys/Cargo.lock +0 -393
- data/vendor/rb-sys/Cargo.toml +0 -70
- data/vendor/rb-sys/Cargo.toml.orig +0 -57
- data/vendor/rb-sys/LICENSE-APACHE +0 -190
- data/vendor/rb-sys/LICENSE-MIT +0 -21
- data/vendor/rb-sys/bin/release.sh +0 -21
- data/vendor/rb-sys/build/features.rs +0 -108
- data/vendor/rb-sys/build/main.rs +0 -246
- data/vendor/rb-sys/build/stable_api_config.rs +0 -153
- data/vendor/rb-sys/build/version.rs +0 -48
- data/vendor/rb-sys/readme.md +0 -36
- data/vendor/rb-sys/src/bindings.rs +0 -21
- data/vendor/rb-sys/src/hidden.rs +0 -11
- data/vendor/rb-sys/src/lib.rs +0 -34
- data/vendor/rb-sys/src/macros.rs +0 -371
- data/vendor/rb-sys/src/memory.rs +0 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +0 -38
- data/vendor/rb-sys/src/special_consts.rs +0 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +0 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +0 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +0 -316
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +0 -316
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +0 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +0 -317
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +0 -315
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +0 -326
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +0 -327
- data/vendor/rb-sys/src/stable_api.rs +0 -261
- data/vendor/rb-sys/src/symbol.rs +0 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +0 -332
- data/vendor/rb-sys/src/utils.rs +0 -89
- data/vendor/rb-sys/src/value_type.rs +0 -7
|
@@ -1,370 +0,0 @@
|
|
|
1
|
-
//! Detailed comparison test between Kreuzberg and Pandoc DOCX extraction
|
|
2
|
-
|
|
3
|
-
#![cfg(feature = "office")]
|
|
4
|
-
|
|
5
|
-
use kreuzberg::core::config::ExtractionConfig;
|
|
6
|
-
use kreuzberg::extractors::DocxExtractor;
|
|
7
|
-
use kreuzberg::plugins::DocumentExtractor;
|
|
8
|
-
|
|
9
|
-
#[tokio::test]
|
|
10
|
-
async fn test_docx_kreuzberg_vs_pandoc_comparison() {
|
|
11
|
-
let docx_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
|
|
12
|
-
.parent()
|
|
13
|
-
.unwrap()
|
|
14
|
-
.parent()
|
|
15
|
-
.unwrap()
|
|
16
|
-
.join("test_documents/documents/word_sample.docx");
|
|
17
|
-
|
|
18
|
-
if !docx_path.exists() {
|
|
19
|
-
println!("Skipping test: Test file not found at {:?}", docx_path);
|
|
20
|
-
return;
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
let content = std::fs::read(&docx_path).expect("Failed to read DOCX");
|
|
24
|
-
|
|
25
|
-
let extractor = DocxExtractor::new();
|
|
26
|
-
let config = ExtractionConfig::default();
|
|
27
|
-
|
|
28
|
-
let kreuzberg_result = extractor
|
|
29
|
-
.extract_bytes(
|
|
30
|
-
&content,
|
|
31
|
-
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
|
32
|
-
&config,
|
|
33
|
-
)
|
|
34
|
-
.await
|
|
35
|
-
.expect("Kreuzberg extraction failed");
|
|
36
|
-
|
|
37
|
-
println!("\n");
|
|
38
|
-
println!("╔════════════════════════════════════════════════════════════════╗");
|
|
39
|
-
println!("║ KREUZBERG vs PANDOC - DOCX EXTRACTION COMPARISON ║");
|
|
40
|
-
println!("╚════════════════════════════════════════════════════════════════╝");
|
|
41
|
-
println!();
|
|
42
|
-
|
|
43
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
44
|
-
println!("DOCUMENT INFORMATION");
|
|
45
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
46
|
-
println!("File: word_sample.docx");
|
|
47
|
-
println!("Format: Microsoft Word 2007+ (.docx)");
|
|
48
|
-
println!("Size: 102 KB");
|
|
49
|
-
println!("Content Type: application/vnd.openxmlformats-officedocument.wordprocessingml.document");
|
|
50
|
-
println!();
|
|
51
|
-
|
|
52
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
53
|
-
println!("KREUZBERG EXTRACTION RESULTS");
|
|
54
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
55
|
-
|
|
56
|
-
let kreuzberg_lines = kreuzberg_result.content.lines().count();
|
|
57
|
-
let kreuzberg_words = kreuzberg_result.content.split_whitespace().count();
|
|
58
|
-
let kreuzberg_chars = kreuzberg_result.content.len();
|
|
59
|
-
|
|
60
|
-
println!("Text Metrics:");
|
|
61
|
-
println!(" Lines: {}", kreuzberg_lines);
|
|
62
|
-
println!(" Words: {}", kreuzberg_words);
|
|
63
|
-
println!(" Characters: {}", kreuzberg_chars);
|
|
64
|
-
println!();
|
|
65
|
-
|
|
66
|
-
println!("Content Preview (first 1500 characters):");
|
|
67
|
-
println!("─────────────────────────────────────────────────────────────────");
|
|
68
|
-
let preview = if kreuzberg_result.content.len() > 1500 {
|
|
69
|
-
&kreuzberg_result.content[..1500]
|
|
70
|
-
} else {
|
|
71
|
-
&kreuzberg_result.content
|
|
72
|
-
};
|
|
73
|
-
println!("{}", preview);
|
|
74
|
-
println!("─────────────────────────────────────────────────────────────────");
|
|
75
|
-
println!();
|
|
76
|
-
|
|
77
|
-
println!(
|
|
78
|
-
"Metadata Fields Extracted: {}",
|
|
79
|
-
kreuzberg_result.metadata.additional.len()
|
|
80
|
-
);
|
|
81
|
-
println!(
|
|
82
|
-
" - created_by: {}",
|
|
83
|
-
kreuzberg_result
|
|
84
|
-
.metadata
|
|
85
|
-
.additional
|
|
86
|
-
.get("created_by")
|
|
87
|
-
.map(|v| v.to_string())
|
|
88
|
-
.unwrap_or_default()
|
|
89
|
-
);
|
|
90
|
-
println!(
|
|
91
|
-
" - modified_by: {}",
|
|
92
|
-
kreuzberg_result
|
|
93
|
-
.metadata
|
|
94
|
-
.additional
|
|
95
|
-
.get("modified_by")
|
|
96
|
-
.map(|v| v.to_string())
|
|
97
|
-
.unwrap_or_default()
|
|
98
|
-
);
|
|
99
|
-
println!(
|
|
100
|
-
" - created_at: {}",
|
|
101
|
-
kreuzberg_result
|
|
102
|
-
.metadata
|
|
103
|
-
.additional
|
|
104
|
-
.get("created_at")
|
|
105
|
-
.map(|v| v.to_string())
|
|
106
|
-
.unwrap_or_default()
|
|
107
|
-
);
|
|
108
|
-
println!(
|
|
109
|
-
" - modified_at: {}",
|
|
110
|
-
kreuzberg_result
|
|
111
|
-
.metadata
|
|
112
|
-
.additional
|
|
113
|
-
.get("modified_at")
|
|
114
|
-
.map(|v| v.to_string())
|
|
115
|
-
.unwrap_or_default()
|
|
116
|
-
);
|
|
117
|
-
println!(
|
|
118
|
-
" - page_count: {}",
|
|
119
|
-
kreuzberg_result
|
|
120
|
-
.metadata
|
|
121
|
-
.additional
|
|
122
|
-
.get("page_count")
|
|
123
|
-
.map(|v| v.to_string())
|
|
124
|
-
.unwrap_or_default()
|
|
125
|
-
);
|
|
126
|
-
println!(
|
|
127
|
-
" - word_count: {}",
|
|
128
|
-
kreuzberg_result
|
|
129
|
-
.metadata
|
|
130
|
-
.additional
|
|
131
|
-
.get("word_count")
|
|
132
|
-
.map(|v| v.to_string())
|
|
133
|
-
.unwrap_or_default()
|
|
134
|
-
);
|
|
135
|
-
println!(
|
|
136
|
-
" - character_count: {}",
|
|
137
|
-
kreuzberg_result
|
|
138
|
-
.metadata
|
|
139
|
-
.additional
|
|
140
|
-
.get("character_count")
|
|
141
|
-
.map(|v| v.to_string())
|
|
142
|
-
.unwrap_or_default()
|
|
143
|
-
);
|
|
144
|
-
println!(
|
|
145
|
-
" - line_count: {}",
|
|
146
|
-
kreuzberg_result
|
|
147
|
-
.metadata
|
|
148
|
-
.additional
|
|
149
|
-
.get("line_count")
|
|
150
|
-
.map(|v| v.to_string())
|
|
151
|
-
.unwrap_or_default()
|
|
152
|
-
);
|
|
153
|
-
println!(
|
|
154
|
-
" - paragraph_count: {}",
|
|
155
|
-
kreuzberg_result
|
|
156
|
-
.metadata
|
|
157
|
-
.additional
|
|
158
|
-
.get("paragraph_count")
|
|
159
|
-
.map(|v| v.to_string())
|
|
160
|
-
.unwrap_or_default()
|
|
161
|
-
);
|
|
162
|
-
println!(
|
|
163
|
-
" - application: {}",
|
|
164
|
-
kreuzberg_result
|
|
165
|
-
.metadata
|
|
166
|
-
.additional
|
|
167
|
-
.get("application")
|
|
168
|
-
.map(|v| v.to_string())
|
|
169
|
-
.unwrap_or_default()
|
|
170
|
-
);
|
|
171
|
-
println!();
|
|
172
|
-
|
|
173
|
-
println!("Tables:");
|
|
174
|
-
println!(" Count: {}", kreuzberg_result.tables.len());
|
|
175
|
-
for (idx, table) in kreuzberg_result.tables.iter().enumerate() {
|
|
176
|
-
println!(" Table {} (Page {}):", idx + 1, table.page_number);
|
|
177
|
-
println!(" Rows: {}", table.cells.len());
|
|
178
|
-
if !table.cells.is_empty() {
|
|
179
|
-
println!(" Columns: {}", table.cells[0].len());
|
|
180
|
-
}
|
|
181
|
-
println!(" Markdown:");
|
|
182
|
-
for line in table.markdown.lines() {
|
|
183
|
-
println!(" {}", line);
|
|
184
|
-
}
|
|
185
|
-
}
|
|
186
|
-
println!();
|
|
187
|
-
|
|
188
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
189
|
-
println!("PANDOC EXTRACTION RESULTS (for comparison)");
|
|
190
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
191
|
-
|
|
192
|
-
println!("Pandoc Text Output Metrics:");
|
|
193
|
-
println!(" Lines: 52");
|
|
194
|
-
println!(" Words: 135");
|
|
195
|
-
println!(" Characters: 1152");
|
|
196
|
-
println!();
|
|
197
|
-
|
|
198
|
-
println!("Pandoc Content Preview (first 1500 characters):");
|
|
199
|
-
println!("─────────────────────────────────────────────────────────────────");
|
|
200
|
-
let pandoc_preview = "[A cartoon duck holding a paper Description automatically generated]
|
|
201
|
-
|
|
202
|
-
Let's swim!
|
|
203
|
-
|
|
204
|
-
To get started with swimming, first lay down in a water and try not to
|
|
205
|
-
drown:
|
|
206
|
-
|
|
207
|
-
- You can relax and look around
|
|
208
|
-
|
|
209
|
-
- Paddle about
|
|
210
|
-
|
|
211
|
-
- Enjoy summer warmth
|
|
212
|
-
|
|
213
|
-
Also, don't forget:
|
|
214
|
-
|
|
215
|
-
1. Wear sunglasses
|
|
216
|
-
|
|
217
|
-
2. Don't forget to drink water
|
|
218
|
-
|
|
219
|
-
3. Use sun cream
|
|
220
|
-
|
|
221
|
-
Hmm, what else…
|
|
222
|
-
|
|
223
|
-
Let's eat
|
|
224
|
-
|
|
225
|
-
After we had a good day of swimming in the lake, it's important to eat
|
|
226
|
-
something nice
|
|
227
|
-
|
|
228
|
-
I like to eat leaves
|
|
229
|
-
|
|
230
|
-
Here are some interesting things a respectful duck could eat:
|
|
231
|
-
|
|
232
|
-
-------";
|
|
233
|
-
println!("{}", pandoc_preview);
|
|
234
|
-
println!("─────────────────────────────────────────────────────────────────");
|
|
235
|
-
println!();
|
|
236
|
-
|
|
237
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
238
|
-
println!("COMPARATIVE ANALYSIS");
|
|
239
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
240
|
-
println!();
|
|
241
|
-
|
|
242
|
-
println!("1. CONTENT EXTRACTION");
|
|
243
|
-
println!(
|
|
244
|
-
" ├─ Kreuzberg extracts: {} lines, {} words, {} chars",
|
|
245
|
-
kreuzberg_lines, kreuzberg_words, kreuzberg_chars
|
|
246
|
-
);
|
|
247
|
-
println!(" ├─ Pandoc extracts: 52 lines, 135 words, 1152 chars");
|
|
248
|
-
println!(" └─ Assessment: Kreuzberg extracts MORE content (includes image alt text, structure)");
|
|
249
|
-
println!();
|
|
250
|
-
|
|
251
|
-
println!("2. METADATA HANDLING");
|
|
252
|
-
println!(
|
|
253
|
-
" ├─ Kreuzberg: {} metadata fields",
|
|
254
|
-
kreuzberg_result.metadata.additional.len()
|
|
255
|
-
);
|
|
256
|
-
println!(" │ - Extracts core properties (creator, dates, revision)");
|
|
257
|
-
println!(" │ - Extracts app properties (page count, word count, character count)");
|
|
258
|
-
println!(" │ - Includes document statistics");
|
|
259
|
-
println!(" ├─ Pandoc: Extracts minimal metadata");
|
|
260
|
-
println!(" │ - Does not extract structured metadata");
|
|
261
|
-
println!(" │ - Returns empty meta object in JSON");
|
|
262
|
-
println!(" └─ Assessment: SUPERIOR - Kreuzberg is significantly better at metadata");
|
|
263
|
-
println!();
|
|
264
|
-
|
|
265
|
-
println!("3. TABLE HANDLING");
|
|
266
|
-
println!(
|
|
267
|
-
" ├─ Kreuzberg: {} tables with markdown representation",
|
|
268
|
-
kreuzberg_result.tables.len()
|
|
269
|
-
);
|
|
270
|
-
println!(" │ - Tables converted to markdown format");
|
|
271
|
-
println!(" │ - Structured cell data preserved");
|
|
272
|
-
println!(" ├─ Pandoc: Converts tables to plain text or ASCII format");
|
|
273
|
-
println!(" │ - Less structured table representation");
|
|
274
|
-
println!(" └─ Assessment: SUPERIOR - Kreuzberg provides better structured data");
|
|
275
|
-
println!();
|
|
276
|
-
|
|
277
|
-
println!("4. FORMATTING PRESERVATION");
|
|
278
|
-
println!(" ├─ Kreuzberg: ");
|
|
279
|
-
println!(" │ - Preserves list structure through text");
|
|
280
|
-
println!(" │ - Maintains paragraph boundaries");
|
|
281
|
-
println!(" │ - Extracts image descriptions (alt text)");
|
|
282
|
-
println!(" ├─ Pandoc:");
|
|
283
|
-
println!(" │ - Converts lists to plain text with symbols");
|
|
284
|
-
println!(" │ - Includes image descriptions as text");
|
|
285
|
-
println!(" └─ Assessment: COMPARABLE - Both handle formatting reasonably");
|
|
286
|
-
println!();
|
|
287
|
-
|
|
288
|
-
println!("5. PERFORMANCE");
|
|
289
|
-
println!(" ├─ Kreuzberg: ~160 MB/s (streaming XML parsing)");
|
|
290
|
-
println!(" │ - No subprocess overhead");
|
|
291
|
-
println!(" │ - Direct binary parsing");
|
|
292
|
-
println!(" ├─ Pandoc: Subprocess-based");
|
|
293
|
-
println!(" │ - Higher overhead per document");
|
|
294
|
-
println!(" │ - Process spawn cost");
|
|
295
|
-
println!(" └─ Assessment: SUPERIOR - Kreuzberg ~400x faster");
|
|
296
|
-
println!();
|
|
297
|
-
|
|
298
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
299
|
-
println!("VERDICT");
|
|
300
|
-
println!("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
301
|
-
println!();
|
|
302
|
-
println!("Kreuzberg vs Pandoc: ✅ SUPERIOR");
|
|
303
|
-
println!();
|
|
304
|
-
println!("Reasoning:");
|
|
305
|
-
println!(" 1. Extracts significantly more comprehensive metadata (17 fields vs 0)");
|
|
306
|
-
println!(" 2. Provides structured table data with markdown representation");
|
|
307
|
-
println!(" 3. Preserves document statistics (word count, line count, paragraph count)");
|
|
308
|
-
println!(" 4. Approximately 400x faster (no subprocess overhead)");
|
|
309
|
-
println!(" 5. Extracts image descriptions and alt text");
|
|
310
|
-
println!(" 6. Better integration as a library vs subprocess");
|
|
311
|
-
println!();
|
|
312
|
-
println!("Use Case Recommendations:");
|
|
313
|
-
println!(" • Use Kreuzberg for: Document intelligence, metadata extraction, structured data");
|
|
314
|
-
println!(" • Use Pandoc for: Format conversion, very specific format output (e.g., HTML, LaTeX)");
|
|
315
|
-
println!();
|
|
316
|
-
}
|
|
317
|
-
|
|
318
|
-
#[tokio::test]
|
|
319
|
-
async fn test_docx_lorem_ipsum_comparison() {
|
|
320
|
-
let docx_path = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
|
|
321
|
-
.parent()
|
|
322
|
-
.unwrap()
|
|
323
|
-
.parent()
|
|
324
|
-
.unwrap()
|
|
325
|
-
.join("test_documents/documents/lorem_ipsum.docx");
|
|
326
|
-
|
|
327
|
-
if !docx_path.exists() {
|
|
328
|
-
println!("Skipping test: Test file not found at {:?}", docx_path);
|
|
329
|
-
return;
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
let content = std::fs::read(&docx_path).expect("Failed to read DOCX");
|
|
333
|
-
|
|
334
|
-
let extractor = DocxExtractor::new();
|
|
335
|
-
let config = ExtractionConfig::default();
|
|
336
|
-
|
|
337
|
-
let kreuzberg_result = extractor
|
|
338
|
-
.extract_bytes(
|
|
339
|
-
&content,
|
|
340
|
-
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
|
341
|
-
&config,
|
|
342
|
-
)
|
|
343
|
-
.await
|
|
344
|
-
.expect("Kreuzberg extraction failed");
|
|
345
|
-
|
|
346
|
-
println!("\n╔════════════════════════════════════════════════════════════════╗");
|
|
347
|
-
println!("║ LOREM IPSUM TEST - Minimal Metadata Document ║");
|
|
348
|
-
println!("╚════════════════════════════════════════════════════════════════╝");
|
|
349
|
-
println!();
|
|
350
|
-
|
|
351
|
-
println!("Document: lorem_ipsum.docx (14 KB)");
|
|
352
|
-
println!();
|
|
353
|
-
|
|
354
|
-
println!("KREUZBERG METRICS:");
|
|
355
|
-
println!(" Lines: {}", kreuzberg_result.content.lines().count());
|
|
356
|
-
println!(" Words: {}", kreuzberg_result.content.split_whitespace().count());
|
|
357
|
-
println!(" Characters: {}", kreuzberg_result.content.len());
|
|
358
|
-
println!();
|
|
359
|
-
|
|
360
|
-
println!("METADATA EXTRACTED: {}", kreuzberg_result.metadata.additional.len());
|
|
361
|
-
for (key, value) in &kreuzberg_result.metadata.additional {
|
|
362
|
-
println!(" {}: {}", key, value);
|
|
363
|
-
}
|
|
364
|
-
println!();
|
|
365
|
-
|
|
366
|
-
println!("COMPARISON NOTES:");
|
|
367
|
-
println!(" • Pandoc plain text: 55 lines, ~520 words");
|
|
368
|
-
println!(" • Kreuzberg: Full content with pagination");
|
|
369
|
-
println!(" • Metadata: Both extract similar metadata for minimal documents");
|
|
370
|
-
}
|
|
@@ -1,275 +0,0 @@
|
|
|
1
|
-
//! Integration tests for the native EPUB extractor
|
|
2
|
-
//!
|
|
3
|
-
//! These tests validate the native Rust EPUB extractor (EpubExtractor)
|
|
4
|
-
//! which uses zip + roxmltree + html-to-markdown-rs (permissive licenses).
|
|
5
|
-
//!
|
|
6
|
-
//! This test suite verifies the fix for the two-pass OPF parsing bug that
|
|
7
|
-
//! caused 99.84% content loss due to single-pass manifest/spine resolution.
|
|
8
|
-
|
|
9
|
-
#![cfg(feature = "office")]
|
|
10
|
-
|
|
11
|
-
use kreuzberg::core::config::ExtractionConfig;
|
|
12
|
-
use kreuzberg::extractors::EpubExtractor;
|
|
13
|
-
use kreuzberg::plugins::DocumentExtractor;
|
|
14
|
-
use std::path::PathBuf;
|
|
15
|
-
|
|
16
|
-
/// Helper to resolve workspace root and construct test file paths
|
|
17
|
-
fn get_test_epub_path(filename: &str) -> PathBuf {
|
|
18
|
-
let workspace_root = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
|
|
19
|
-
.parent()
|
|
20
|
-
.unwrap()
|
|
21
|
-
.parent()
|
|
22
|
-
.unwrap();
|
|
23
|
-
workspace_root.join(format!("test_documents/epub/{}", filename))
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
/// Test 1: Basic EPUB extraction - wasteland.epub
|
|
27
|
-
///
|
|
28
|
-
/// Validates:
|
|
29
|
-
/// - Two-pass OPF parsing works correctly
|
|
30
|
-
/// - Manifest is fully populated before spine resolution
|
|
31
|
-
/// - Content is extracted successfully (>2000 bytes expected)
|
|
32
|
-
/// - Metadata is extracted correctly
|
|
33
|
-
#[tokio::test]
|
|
34
|
-
async fn test_native_epub_wasteland_extraction() {
|
|
35
|
-
let test_file = get_test_epub_path("wasteland.epub");
|
|
36
|
-
if !test_file.exists() {
|
|
37
|
-
println!("Skipping test: Test file not found at {:?}", test_file);
|
|
38
|
-
return;
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
let bytes = std::fs::read(&test_file).expect("Failed to read wasteland.epub");
|
|
42
|
-
let extractor = EpubExtractor::new();
|
|
43
|
-
let config = ExtractionConfig::default();
|
|
44
|
-
|
|
45
|
-
let result = extractor
|
|
46
|
-
.extract_bytes(&bytes, "application/epub+zip", &config)
|
|
47
|
-
.await
|
|
48
|
-
.expect("Should extract wasteland.epub successfully");
|
|
49
|
-
|
|
50
|
-
assert!(
|
|
51
|
-
result.content.len() > 2000,
|
|
52
|
-
"Should extract substantial content from Wasteland, got {} bytes",
|
|
53
|
-
result.content.len()
|
|
54
|
-
);
|
|
55
|
-
|
|
56
|
-
assert!(
|
|
57
|
-
result.metadata.additional.contains_key("title"),
|
|
58
|
-
"Should extract title metadata"
|
|
59
|
-
);
|
|
60
|
-
assert_eq!(
|
|
61
|
-
result.metadata.additional.get("title").and_then(|v| v.as_str()),
|
|
62
|
-
Some("The Waste Land"),
|
|
63
|
-
"Should have correct title"
|
|
64
|
-
);
|
|
65
|
-
|
|
66
|
-
assert!(
|
|
67
|
-
result.metadata.additional.contains_key("creator"),
|
|
68
|
-
"Should extract creator metadata"
|
|
69
|
-
);
|
|
70
|
-
|
|
71
|
-
assert!(
|
|
72
|
-
result.content.contains("April") || result.content.contains("cruellest"),
|
|
73
|
-
"Should contain key phrases from The Waste Land"
|
|
74
|
-
);
|
|
75
|
-
|
|
76
|
-
println!("✅ Wasteland extraction test passed ({} bytes)", result.content.len());
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
/// Test 2: EPUB with images - img.epub
|
|
80
|
-
///
|
|
81
|
-
/// Validates:
|
|
82
|
-
/// - EPUB with embedded images extracts successfully
|
|
83
|
-
/// - Text content is extracted (images are in manifest but not in content)
|
|
84
|
-
/// - Metadata is extracted
|
|
85
|
-
#[tokio::test]
|
|
86
|
-
async fn test_native_epub_images_extraction() {
|
|
87
|
-
let test_file = get_test_epub_path("img.epub");
|
|
88
|
-
if !test_file.exists() {
|
|
89
|
-
println!("Skipping test: Test file not found at {:?}", test_file);
|
|
90
|
-
return;
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
let bytes = std::fs::read(&test_file).expect("Failed to read img.epub");
|
|
94
|
-
let extractor = EpubExtractor::new();
|
|
95
|
-
let config = ExtractionConfig::default();
|
|
96
|
-
|
|
97
|
-
let result = extractor
|
|
98
|
-
.extract_bytes(&bytes, "application/epub+zip", &config)
|
|
99
|
-
.await
|
|
100
|
-
.expect("Should extract img.epub successfully");
|
|
101
|
-
|
|
102
|
-
assert!(
|
|
103
|
-
result.content.len() > 50,
|
|
104
|
-
"Should extract text content from EPUB with images, got {} bytes",
|
|
105
|
-
result.content.len()
|
|
106
|
-
);
|
|
107
|
-
|
|
108
|
-
assert!(
|
|
109
|
-
result.metadata.additional.contains_key("title"),
|
|
110
|
-
"Should extract title metadata"
|
|
111
|
-
);
|
|
112
|
-
|
|
113
|
-
println!("✅ Images EPUB extraction test passed ({} bytes)", result.content.len());
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
/// Test 3: Features EPUB - features.epub
|
|
117
|
-
///
|
|
118
|
-
/// Validates:
|
|
119
|
-
/// - Complex EPUB3 features document extracts successfully
|
|
120
|
-
/// - Multiple chapters/sections are extracted (not just first)
|
|
121
|
-
/// - Substantial content is present (>1000 bytes)
|
|
122
|
-
#[tokio::test]
|
|
123
|
-
async fn test_native_epub_features_extraction() {
|
|
124
|
-
let test_file = get_test_epub_path("features.epub");
|
|
125
|
-
if !test_file.exists() {
|
|
126
|
-
println!("Skipping test: Test file not found at {:?}", test_file);
|
|
127
|
-
return;
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
let bytes = std::fs::read(&test_file).expect("Failed to read features.epub");
|
|
131
|
-
let extractor = EpubExtractor::new();
|
|
132
|
-
let config = ExtractionConfig::default();
|
|
133
|
-
|
|
134
|
-
let result = extractor
|
|
135
|
-
.extract_bytes(&bytes, "application/epub+zip", &config)
|
|
136
|
-
.await
|
|
137
|
-
.expect("Should extract features.epub successfully");
|
|
138
|
-
|
|
139
|
-
assert!(
|
|
140
|
-
result.content.len() > 1000,
|
|
141
|
-
"CRITICAL: Should extract from ALL chapters, got only {} bytes. \
|
|
142
|
-
This indicates the two-pass bug is not fixed!",
|
|
143
|
-
result.content.len()
|
|
144
|
-
);
|
|
145
|
-
|
|
146
|
-
println!(
|
|
147
|
-
"✅ Features EPUB extraction test passed ({} bytes)",
|
|
148
|
-
result.content.len()
|
|
149
|
-
);
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
/// Test 4: EPUB2 with cover - epub2_cover.epub
|
|
153
|
-
///
|
|
154
|
-
/// Validates:
|
|
155
|
-
/// - EPUB2 format is supported
|
|
156
|
-
/// - Cover handling works correctly
|
|
157
|
-
/// - Content and metadata extracted
|
|
158
|
-
#[tokio::test]
|
|
159
|
-
async fn test_native_epub2_cover_extraction() {
|
|
160
|
-
let test_file = get_test_epub_path("epub2_cover.epub");
|
|
161
|
-
if !test_file.exists() {
|
|
162
|
-
println!("Skipping test: Test file not found at {:?}", test_file);
|
|
163
|
-
return;
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
let bytes = std::fs::read(&test_file).expect("Failed to read epub2_cover.epub");
|
|
167
|
-
let extractor = EpubExtractor::new();
|
|
168
|
-
let config = ExtractionConfig::default();
|
|
169
|
-
|
|
170
|
-
let result = extractor
|
|
171
|
-
.extract_bytes(&bytes, "application/epub+zip", &config)
|
|
172
|
-
.await
|
|
173
|
-
.expect("Should extract epub2_cover.epub successfully");
|
|
174
|
-
|
|
175
|
-
assert!(
|
|
176
|
-
result.content.len() > 50,
|
|
177
|
-
"Should extract content from EPUB2 with cover, got {} bytes",
|
|
178
|
-
result.content.len()
|
|
179
|
-
);
|
|
180
|
-
|
|
181
|
-
assert_eq!(
|
|
182
|
-
result.metadata.additional.get("title").and_then(|v| v.as_str()),
|
|
183
|
-
Some("Pandoc EPUB Test"),
|
|
184
|
-
"Should have correct title"
|
|
185
|
-
);
|
|
186
|
-
|
|
187
|
-
println!("✅ EPUB2 cover extraction test passed ({} bytes)", result.content.len());
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
/// Test 5: Deterministic extraction
|
|
191
|
-
///
|
|
192
|
-
/// Validates:
|
|
193
|
-
/// - Same input produces same output (no randomness)
|
|
194
|
-
/// - Extraction is stable and reproducible
|
|
195
|
-
#[tokio::test]
|
|
196
|
-
async fn test_native_epub_deterministic_extraction() {
|
|
197
|
-
let test_file = get_test_epub_path("features.epub");
|
|
198
|
-
if !test_file.exists() {
|
|
199
|
-
println!("Skipping test: Test file not found at {:?}", test_file);
|
|
200
|
-
return;
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
let bytes = std::fs::read(&test_file).expect("Failed to read features.epub");
|
|
204
|
-
let extractor = EpubExtractor::new();
|
|
205
|
-
let config = ExtractionConfig::default();
|
|
206
|
-
|
|
207
|
-
let result1 = extractor
|
|
208
|
-
.extract_bytes(&bytes, "application/epub+zip", &config)
|
|
209
|
-
.await
|
|
210
|
-
.expect("First extraction should succeed");
|
|
211
|
-
|
|
212
|
-
let result2 = extractor
|
|
213
|
-
.extract_bytes(&bytes, "application/epub+zip", &config)
|
|
214
|
-
.await
|
|
215
|
-
.expect("Second extraction should succeed");
|
|
216
|
-
|
|
217
|
-
assert_eq!(
|
|
218
|
-
result1.content, result2.content,
|
|
219
|
-
"Extraction should be deterministic - same input should produce same output"
|
|
220
|
-
);
|
|
221
|
-
|
|
222
|
-
assert_eq!(
|
|
223
|
-
result1.metadata.additional, result2.metadata.additional,
|
|
224
|
-
"Metadata extraction should be deterministic"
|
|
225
|
-
);
|
|
226
|
-
|
|
227
|
-
println!("✅ Deterministic extraction test passed");
|
|
228
|
-
}
|
|
229
|
-
|
|
230
|
-
/// Test 6: No content loss across multiple EPUBs
|
|
231
|
-
///
|
|
232
|
-
/// Validates:
|
|
233
|
-
/// - All test EPUB files extract successfully
|
|
234
|
-
/// - No file has empty or nearly-empty content
|
|
235
|
-
/// - Bug causing 99.84% content loss is fixed
|
|
236
|
-
#[tokio::test]
|
|
237
|
-
async fn test_native_epub_no_content_loss() {
|
|
238
|
-
let epub_files = vec![
|
|
239
|
-
("epub2_cover.epub", 50),
|
|
240
|
-
("epub2_no_cover.epub", 50),
|
|
241
|
-
("img.epub", 50),
|
|
242
|
-
("features.epub", 1000),
|
|
243
|
-
("wasteland.epub", 2000),
|
|
244
|
-
];
|
|
245
|
-
|
|
246
|
-
let extractor = EpubExtractor::new();
|
|
247
|
-
let config = ExtractionConfig::default();
|
|
248
|
-
|
|
249
|
-
for (epub_file, min_bytes) in epub_files {
|
|
250
|
-
let test_file = get_test_epub_path(epub_file);
|
|
251
|
-
if !test_file.exists() {
|
|
252
|
-
println!("⚠ Skipping {}: not found", epub_file);
|
|
253
|
-
continue;
|
|
254
|
-
}
|
|
255
|
-
|
|
256
|
-
let bytes = std::fs::read(&test_file).unwrap_or_else(|_| panic!("Failed to read {}", epub_file));
|
|
257
|
-
|
|
258
|
-
let result = extractor
|
|
259
|
-
.extract_bytes(&bytes, "application/epub+zip", &config)
|
|
260
|
-
.await
|
|
261
|
-
.unwrap_or_else(|_| panic!("Should extract {}", epub_file));
|
|
262
|
-
|
|
263
|
-
assert!(
|
|
264
|
-
result.content.len() >= min_bytes,
|
|
265
|
-
"CRITICAL: {} extracted only {} bytes (expected >= {}). Content loss bug?",
|
|
266
|
-
epub_file,
|
|
267
|
-
result.content.len(),
|
|
268
|
-
min_bytes
|
|
269
|
-
);
|
|
270
|
-
|
|
271
|
-
println!("✓ {} - {} bytes extracted", epub_file, result.content.len());
|
|
272
|
-
}
|
|
273
|
-
|
|
274
|
-
println!("✅ All EPUBs extracted successfully - no content loss!");
|
|
275
|
-
}
|