kreuzberg 4.0.0.pre.rc.6 → 4.0.0.rc1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +0 -6
- data/.rubocop.yaml +534 -1
- data/Gemfile +2 -1
- data/Gemfile.lock +11 -11
- data/README.md +5 -10
- data/examples/async_patterns.rb +0 -1
- data/ext/kreuzberg_rb/extconf.rb +0 -10
- data/ext/kreuzberg_rb/native/Cargo.toml +15 -23
- data/ext/kreuzberg_rb/native/build.rs +2 -0
- data/ext/kreuzberg_rb/native/include/ieeefp.h +1 -1
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +1 -1
- data/ext/kreuzberg_rb/native/include/strings.h +2 -2
- data/ext/kreuzberg_rb/native/include/unistd.h +1 -1
- data/ext/kreuzberg_rb/native/src/lib.rs +16 -75
- data/kreuzberg.gemspec +14 -57
- data/lib/kreuzberg/cache_api.rb +0 -1
- data/lib/kreuzberg/cli.rb +2 -2
- data/lib/kreuzberg/config.rb +2 -9
- data/lib/kreuzberg/errors.rb +7 -75
- data/lib/kreuzberg/extraction_api.rb +0 -1
- data/lib/kreuzberg/setup_lib_path.rb +0 -1
- data/lib/kreuzberg/version.rb +1 -1
- data/lib/kreuzberg.rb +0 -21
- data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
- data/sig/kreuzberg.rbs +3 -55
- data/spec/binding/cli_proxy_spec.rb +4 -2
- data/spec/binding/cli_spec.rb +11 -12
- data/spec/examples.txt +104 -0
- data/spec/fixtures/config.yaml +1 -0
- data/spec/spec_helper.rb +1 -1
- data/vendor/kreuzberg/Cargo.toml +42 -112
- data/vendor/kreuzberg/README.md +2 -2
- data/vendor/kreuzberg/build.rs +4 -18
- data/vendor/kreuzberg/src/bin/profile_extract.rs +455 -0
- data/vendor/kreuzberg/src/cache/mod.rs +3 -27
- data/vendor/kreuzberg/src/core/batch_mode.rs +0 -60
- data/vendor/kreuzberg/src/core/extractor.rs +81 -202
- data/vendor/kreuzberg/src/core/io.rs +2 -4
- data/vendor/kreuzberg/src/core/mime.rs +12 -2
- data/vendor/kreuzberg/src/core/mod.rs +1 -4
- data/vendor/kreuzberg/src/core/pipeline.rs +33 -111
- data/vendor/kreuzberg/src/embeddings.rs +16 -125
- data/vendor/kreuzberg/src/error.rs +1 -1
- data/vendor/kreuzberg/src/extraction/docx.rs +1 -1
- data/vendor/kreuzberg/src/extraction/image.rs +13 -13
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +1 -0
- data/vendor/kreuzberg/src/extraction/mod.rs +5 -9
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +0 -2
- data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +275 -0
- data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +178 -0
- data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +491 -0
- data/vendor/kreuzberg/src/extraction/pandoc/server.rs +496 -0
- data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +1188 -0
- data/vendor/kreuzberg/src/extraction/pandoc/version.rs +162 -0
- data/vendor/kreuzberg/src/extractors/archive.rs +0 -21
- data/vendor/kreuzberg/src/extractors/docx.rs +128 -16
- data/vendor/kreuzberg/src/extractors/email.rs +0 -14
- data/vendor/kreuzberg/src/extractors/excel.rs +20 -19
- data/vendor/kreuzberg/src/extractors/html.rs +154 -137
- data/vendor/kreuzberg/src/extractors/image.rs +4 -7
- data/vendor/kreuzberg/src/extractors/mod.rs +9 -106
- data/vendor/kreuzberg/src/extractors/pandoc.rs +201 -0
- data/vendor/kreuzberg/src/extractors/pdf.rs +15 -12
- data/vendor/kreuzberg/src/extractors/pptx.rs +3 -17
- data/vendor/kreuzberg/src/extractors/structured.rs +0 -14
- data/vendor/kreuzberg/src/extractors/text.rs +5 -23
- data/vendor/kreuzberg/src/extractors/xml.rs +0 -7
- data/vendor/kreuzberg/src/keywords/rake.rs +1 -0
- data/vendor/kreuzberg/src/lib.rs +1 -4
- data/vendor/kreuzberg/src/mcp/mod.rs +1 -1
- data/vendor/kreuzberg/src/mcp/server.rs +3 -5
- data/vendor/kreuzberg/src/ocr/processor.rs +2 -18
- data/vendor/kreuzberg/src/pdf/error.rs +1 -1
- data/vendor/kreuzberg/src/pdf/table.rs +44 -17
- data/vendor/kreuzberg/src/pdf/text.rs +3 -0
- data/vendor/kreuzberg/src/plugins/extractor.rs +5 -8
- data/vendor/kreuzberg/src/plugins/ocr.rs +11 -2
- data/vendor/kreuzberg/src/plugins/processor.rs +1 -2
- data/vendor/kreuzberg/src/plugins/registry.rs +0 -13
- data/vendor/kreuzberg/src/plugins/validator.rs +8 -9
- data/vendor/kreuzberg/src/stopwords/mod.rs +2 -2
- data/vendor/kreuzberg/src/types.rs +12 -42
- data/vendor/kreuzberg/tests/batch_orchestration.rs +5 -19
- data/vendor/kreuzberg/tests/batch_processing.rs +3 -15
- data/vendor/kreuzberg/tests/chunking_offset_demo.rs +92 -0
- data/vendor/kreuzberg/tests/concurrency_stress.rs +1 -17
- data/vendor/kreuzberg/tests/config_features.rs +0 -18
- data/vendor/kreuzberg/tests/config_loading_tests.rs +39 -15
- data/vendor/kreuzberg/tests/core_integration.rs +7 -24
- data/vendor/kreuzberg/tests/csv_integration.rs +81 -71
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +25 -23
- data/vendor/kreuzberg/tests/pandoc_integration.rs +503 -0
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1 -0
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +1 -0
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +22 -1
- data/vendor/kreuzberg/tests/security_validation.rs +1 -12
- metadata +25 -90
- data/.rubocop.yml +0 -538
- data/ext/kreuzberg_rb/native/Cargo.lock +0 -6535
- data/lib/kreuzberg/error_context.rb +0 -32
- data/vendor/kreuzberg/benches/otel_overhead.rs +0 -48
- data/vendor/kreuzberg/src/extraction/markdown.rs +0 -213
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +0 -287
- data/vendor/kreuzberg/src/extractors/bibtex.rs +0 -469
- data/vendor/kreuzberg/src/extractors/docbook.rs +0 -502
- data/vendor/kreuzberg/src/extractors/epub.rs +0 -707
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +0 -491
- data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +0 -738
- data/vendor/kreuzberg/src/extractors/jats.rs +0 -1051
- data/vendor/kreuzberg/src/extractors/jupyter.rs +0 -367
- data/vendor/kreuzberg/src/extractors/latex.rs +0 -652
- data/vendor/kreuzberg/src/extractors/markdown.rs +0 -700
- data/vendor/kreuzberg/src/extractors/odt.rs +0 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +0 -634
- data/vendor/kreuzberg/src/extractors/orgmode.rs +0 -528
- data/vendor/kreuzberg/src/extractors/rst.rs +0 -576
- data/vendor/kreuzberg/src/extractors/rtf.rs +0 -810
- data/vendor/kreuzberg/src/extractors/security.rs +0 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +0 -367
- data/vendor/kreuzberg/src/extractors/typst.rs +0 -650
- data/vendor/kreuzberg/src/panic_context.rs +0 -154
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +0 -52
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +0 -421
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +0 -498
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +0 -370
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +0 -275
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +0 -228
- data/vendor/kreuzberg/tests/html_table_test.rs +0 -551
- data/vendor/kreuzberg/tests/instrumentation_test.rs +0 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +0 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +0 -704
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +0 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +0 -490
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +0 -695
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +0 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +0 -822
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +0 -692
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +0 -776
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +0 -1259
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +0 -647
- data/vendor/rb-sys/.cargo-ok +0 -1
- data/vendor/rb-sys/.cargo_vcs_info.json +0 -6
- data/vendor/rb-sys/Cargo.lock +0 -393
- data/vendor/rb-sys/Cargo.toml +0 -70
- data/vendor/rb-sys/Cargo.toml.orig +0 -57
- data/vendor/rb-sys/LICENSE-APACHE +0 -190
- data/vendor/rb-sys/LICENSE-MIT +0 -21
- data/vendor/rb-sys/bin/release.sh +0 -21
- data/vendor/rb-sys/build/features.rs +0 -108
- data/vendor/rb-sys/build/main.rs +0 -246
- data/vendor/rb-sys/build/stable_api_config.rs +0 -153
- data/vendor/rb-sys/build/version.rs +0 -48
- data/vendor/rb-sys/readme.md +0 -36
- data/vendor/rb-sys/src/bindings.rs +0 -21
- data/vendor/rb-sys/src/hidden.rs +0 -11
- data/vendor/rb-sys/src/lib.rs +0 -34
- data/vendor/rb-sys/src/macros.rs +0 -371
- data/vendor/rb-sys/src/memory.rs +0 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +0 -38
- data/vendor/rb-sys/src/special_consts.rs +0 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +0 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +0 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +0 -316
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +0 -316
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +0 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +0 -317
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +0 -315
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +0 -326
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +0 -327
- data/vendor/rb-sys/src/stable_api.rs +0 -261
- data/vendor/rb-sys/src/symbol.rs +0 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +0 -332
- data/vendor/rb-sys/src/utils.rs +0 -89
- data/vendor/rb-sys/src/value_type.rs +0 -7
|
@@ -0,0 +1,1188 @@
|
|
|
1
|
+
use crate::error::{KreuzbergError, Result};
|
|
2
|
+
#[cfg(feature = "quality")]
|
|
3
|
+
use crate::text::normalize_spaces;
|
|
4
|
+
use serde_json::Value;
|
|
5
|
+
use std::collections::HashMap;
|
|
6
|
+
use std::path::{Path, PathBuf};
|
|
7
|
+
use tokio::fs;
|
|
8
|
+
use tokio::process::Command;
|
|
9
|
+
use tokio::time::{Duration, timeout};
|
|
10
|
+
|
|
11
|
+
/// Default timeout for Pandoc operations (120 seconds)
|
|
12
|
+
const PANDOC_TIMEOUT_SECONDS: u64 = 120;
|
|
13
|
+
|
|
14
|
+
/// RAII guard for automatic temporary file cleanup
|
|
15
|
+
struct TempFile {
|
|
16
|
+
path: PathBuf,
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
impl TempFile {
|
|
20
|
+
fn new(path: PathBuf) -> Self {
|
|
21
|
+
Self { path }
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
impl Drop for TempFile {
|
|
26
|
+
fn drop(&mut self) {
|
|
27
|
+
let path = self.path.clone();
|
|
28
|
+
tokio::spawn(async move {
|
|
29
|
+
let _ = fs::remove_file(&path).await;
|
|
30
|
+
});
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/// Extract content from file using Pandoc (convert to markdown)
|
|
35
|
+
#[allow(dead_code)]
|
|
36
|
+
pub async fn extract_content(path: &Path, from_format: &str) -> Result<String> {
|
|
37
|
+
let child = Command::new("pandoc")
|
|
38
|
+
.arg(path)
|
|
39
|
+
.arg(format!("--from={}", from_format))
|
|
40
|
+
.arg("--to=markdown")
|
|
41
|
+
.arg("--standalone")
|
|
42
|
+
.arg("--wrap=preserve")
|
|
43
|
+
.arg("--quiet")
|
|
44
|
+
.stdout(std::process::Stdio::piped())
|
|
45
|
+
.stderr(std::process::Stdio::piped())
|
|
46
|
+
.spawn()
|
|
47
|
+
.map_err(|e| {
|
|
48
|
+
// Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
|
|
49
|
+
std::io::Error::other(format!("Failed to execute pandoc: {}", e))
|
|
50
|
+
})?;
|
|
51
|
+
|
|
52
|
+
let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
|
|
53
|
+
Ok(Ok(output)) => output,
|
|
54
|
+
Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
|
|
55
|
+
Err(_) => {
|
|
56
|
+
// Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
|
|
57
|
+
return Err(KreuzbergError::parsing(format!(
|
|
58
|
+
"Pandoc content extraction timed out after {} seconds",
|
|
59
|
+
PANDOC_TIMEOUT_SECONDS
|
|
60
|
+
)));
|
|
61
|
+
}
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
if !output.status.success() {
|
|
65
|
+
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
66
|
+
|
|
67
|
+
// Subprocess error analysis - wrap only if format/parsing error detected ~keep
|
|
68
|
+
let stderr_lower = stderr.to_lowercase();
|
|
69
|
+
if stderr_lower.contains("format")
|
|
70
|
+
|| stderr_lower.contains("unsupported")
|
|
71
|
+
|| stderr_lower.contains("error:")
|
|
72
|
+
|| stderr_lower.contains("failed")
|
|
73
|
+
{
|
|
74
|
+
return Err(KreuzbergError::parsing(format!(
|
|
75
|
+
"Pandoc format/parsing error: {}",
|
|
76
|
+
stderr
|
|
77
|
+
)));
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// True system error - bubble up as IO error ~keep
|
|
81
|
+
return Err(std::io::Error::other(format!("Pandoc system error: {}", stderr)).into());
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
let content = String::from_utf8(output.stdout)
|
|
85
|
+
.map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc output: {}", e)))?;
|
|
86
|
+
|
|
87
|
+
#[cfg(feature = "quality")]
|
|
88
|
+
{
|
|
89
|
+
Ok(normalize_spaces(&content))
|
|
90
|
+
}
|
|
91
|
+
#[cfg(not(feature = "quality"))]
|
|
92
|
+
{
|
|
93
|
+
Ok(content)
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/// Extract metadata from file using Pandoc JSON output
|
|
98
|
+
#[allow(dead_code)]
|
|
99
|
+
pub async fn extract_metadata(path: &Path, from_format: &str) -> Result<HashMap<String, Value>> {
|
|
100
|
+
let child = Command::new("pandoc")
|
|
101
|
+
.arg(path)
|
|
102
|
+
.arg(format!("--from={}", from_format))
|
|
103
|
+
.arg("--to=json")
|
|
104
|
+
.arg("--standalone")
|
|
105
|
+
.arg("--quiet")
|
|
106
|
+
.stdout(std::process::Stdio::piped())
|
|
107
|
+
.stderr(std::process::Stdio::piped())
|
|
108
|
+
.spawn()
|
|
109
|
+
.map_err(|e| {
|
|
110
|
+
// Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
|
|
111
|
+
std::io::Error::other(format!("Failed to execute pandoc: {}", e))
|
|
112
|
+
})?;
|
|
113
|
+
|
|
114
|
+
let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
|
|
115
|
+
Ok(Ok(output)) => output,
|
|
116
|
+
Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
|
|
117
|
+
Err(_) => {
|
|
118
|
+
// Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
|
|
119
|
+
return Err(KreuzbergError::parsing(format!(
|
|
120
|
+
"Pandoc metadata extraction timed out after {} seconds",
|
|
121
|
+
PANDOC_TIMEOUT_SECONDS
|
|
122
|
+
)));
|
|
123
|
+
}
|
|
124
|
+
};
|
|
125
|
+
|
|
126
|
+
if !output.status.success() {
|
|
127
|
+
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
128
|
+
|
|
129
|
+
// Subprocess error analysis - wrap only if format/parsing error detected ~keep
|
|
130
|
+
let stderr_lower = stderr.to_lowercase();
|
|
131
|
+
if stderr_lower.contains("format")
|
|
132
|
+
|| stderr_lower.contains("unsupported")
|
|
133
|
+
|| stderr_lower.contains("error:")
|
|
134
|
+
|| stderr_lower.contains("failed")
|
|
135
|
+
{
|
|
136
|
+
return Err(KreuzbergError::parsing(format!(
|
|
137
|
+
"Pandoc metadata extraction format/parsing error: {}",
|
|
138
|
+
stderr
|
|
139
|
+
)));
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// True system error - bubble up as IO error ~keep
|
|
143
|
+
return Err(std::io::Error::other(format!("Pandoc metadata extraction system error: {}", stderr)).into());
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
let json_content = String::from_utf8(output.stdout)
|
|
147
|
+
.map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc JSON output: {}", e)))?;
|
|
148
|
+
|
|
149
|
+
let json_data: Value = serde_json::from_str(&json_content)
|
|
150
|
+
.map_err(|e| KreuzbergError::parsing(format!("Failed to parse pandoc JSON: {}", e)))?;
|
|
151
|
+
|
|
152
|
+
extract_metadata_from_json(&json_data)
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
/// Valid metadata field names (must match Python's _VALID_METADATA_KEYS)
|
|
156
|
+
const VALID_METADATA_KEYS: &[&str] = &[
|
|
157
|
+
"abstract",
|
|
158
|
+
"authors",
|
|
159
|
+
"categories",
|
|
160
|
+
"character_count",
|
|
161
|
+
"citations",
|
|
162
|
+
"code_blocks",
|
|
163
|
+
"comments",
|
|
164
|
+
"content",
|
|
165
|
+
"copyright",
|
|
166
|
+
"created_at",
|
|
167
|
+
"created_by",
|
|
168
|
+
"description",
|
|
169
|
+
"fonts",
|
|
170
|
+
"headers",
|
|
171
|
+
"height",
|
|
172
|
+
"identifier",
|
|
173
|
+
"keywords",
|
|
174
|
+
"languages",
|
|
175
|
+
"license",
|
|
176
|
+
"line_count",
|
|
177
|
+
"links",
|
|
178
|
+
"modified_at",
|
|
179
|
+
"modified_by",
|
|
180
|
+
"organization",
|
|
181
|
+
"parse_error",
|
|
182
|
+
"publisher",
|
|
183
|
+
"references",
|
|
184
|
+
"sheet_count",
|
|
185
|
+
"sheet_names",
|
|
186
|
+
"status",
|
|
187
|
+
"subject",
|
|
188
|
+
"subtitle",
|
|
189
|
+
"summary",
|
|
190
|
+
"title",
|
|
191
|
+
"total_cells",
|
|
192
|
+
"version",
|
|
193
|
+
"warning",
|
|
194
|
+
"width",
|
|
195
|
+
"word_count",
|
|
196
|
+
"email_from",
|
|
197
|
+
"email_to",
|
|
198
|
+
"email_cc",
|
|
199
|
+
"email_bcc",
|
|
200
|
+
"date",
|
|
201
|
+
"attachments",
|
|
202
|
+
"table_count",
|
|
203
|
+
"tables_summary",
|
|
204
|
+
"quality_score",
|
|
205
|
+
"image_preprocessing",
|
|
206
|
+
"source_format",
|
|
207
|
+
"converted_via",
|
|
208
|
+
"error",
|
|
209
|
+
"error_context",
|
|
210
|
+
"json_schema",
|
|
211
|
+
"notes",
|
|
212
|
+
"note",
|
|
213
|
+
"name",
|
|
214
|
+
"body",
|
|
215
|
+
"text",
|
|
216
|
+
"message",
|
|
217
|
+
"attributes",
|
|
218
|
+
"token_reduction",
|
|
219
|
+
"processing_errors",
|
|
220
|
+
"extraction_error",
|
|
221
|
+
"element_count",
|
|
222
|
+
"unique_elements",
|
|
223
|
+
];
|
|
224
|
+
|
|
225
|
+
/// Extract metadata from Pandoc JSON AST
|
|
226
|
+
pub(crate) fn extract_metadata_from_json(json: &Value) -> Result<HashMap<String, Value>> {
|
|
227
|
+
let mut metadata = HashMap::new();
|
|
228
|
+
|
|
229
|
+
if let Some(meta) = json.get("meta").and_then(|m| m.as_object()) {
|
|
230
|
+
for (key, value) in meta {
|
|
231
|
+
let pandoc_key = get_pandoc_key(key);
|
|
232
|
+
if !VALID_METADATA_KEYS.contains(&pandoc_key.as_str()) {
|
|
233
|
+
continue;
|
|
234
|
+
}
|
|
235
|
+
if let Some(extracted) = extract_meta_value(value) {
|
|
236
|
+
metadata.insert(pandoc_key, extracted);
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
if let Some(blocks) = json.get("blocks").and_then(|b| b.as_array()) {
|
|
242
|
+
let mut citations = Vec::new();
|
|
243
|
+
extract_citations_from_blocks(blocks, &mut citations);
|
|
244
|
+
|
|
245
|
+
if !citations.is_empty() {
|
|
246
|
+
if let Some(existing) = metadata.get_mut("citations") {
|
|
247
|
+
if let Some(arr) = existing.as_array_mut() {
|
|
248
|
+
for cite in citations {
|
|
249
|
+
if !arr.contains(&Value::String(cite.clone())) {
|
|
250
|
+
arr.push(Value::String(cite));
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
} else {
|
|
255
|
+
metadata.insert(
|
|
256
|
+
"citations".to_string(),
|
|
257
|
+
Value::Array(citations.into_iter().map(Value::String).collect()),
|
|
258
|
+
);
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if let Some(citations) = json.get("citations").and_then(|c| c.as_array()) {
|
|
264
|
+
let cite_ids: Vec<String> = citations
|
|
265
|
+
.iter()
|
|
266
|
+
.filter_map(|c| c.get("citationId").and_then(|id| id.as_str()).map(String::from))
|
|
267
|
+
.collect();
|
|
268
|
+
|
|
269
|
+
if !cite_ids.is_empty() {
|
|
270
|
+
metadata.insert(
|
|
271
|
+
"citations".to_string(),
|
|
272
|
+
Value::Array(cite_ids.into_iter().map(Value::String).collect()),
|
|
273
|
+
);
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
Ok(metadata)
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
/// Extract markdown content from Pandoc JSON AST
|
|
281
|
+
///
|
|
282
|
+
/// Converts the JSON AST blocks back to markdown format, similar to what
|
|
283
|
+
/// `pandoc --to=markdown` would produce. This allows us to extract both
|
|
284
|
+
/// content and metadata from a single JSON extraction.
|
|
285
|
+
pub(crate) fn extract_content_from_json(json: &Value) -> Result<String> {
|
|
286
|
+
let mut content = String::new();
|
|
287
|
+
|
|
288
|
+
if let Some(meta) = json.get("meta").and_then(|m| m.as_object())
|
|
289
|
+
&& let Some(title_node) = meta.get("title")
|
|
290
|
+
&& let Some(title_value) = extract_meta_value(title_node)
|
|
291
|
+
&& let Some(title_str) = title_value.as_str()
|
|
292
|
+
{
|
|
293
|
+
content.push_str(&format!("# {}\n\n", title_str));
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
if let Some(blocks) = json.get("blocks").and_then(|b| b.as_array()) {
|
|
297
|
+
for block in blocks {
|
|
298
|
+
if let Some(text) = extract_block_text(block) {
|
|
299
|
+
if !content.is_empty() && !content.ends_with("\n\n") {
|
|
300
|
+
content.push_str("\n\n");
|
|
301
|
+
}
|
|
302
|
+
content.push_str(&text);
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
Ok(content)
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
/// Extract text from a Pandoc JSON AST block
|
|
311
|
+
fn extract_block_text(block: &Value) -> Option<String> {
|
|
312
|
+
let obj = block.as_object()?;
|
|
313
|
+
let block_type = obj.get("t")?.as_str()?;
|
|
314
|
+
let content = obj.get("c");
|
|
315
|
+
|
|
316
|
+
match block_type {
|
|
317
|
+
"Para" | "Plain" => {
|
|
318
|
+
if let Some(inlines) = content.and_then(|c| c.as_array()) {
|
|
319
|
+
return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
"Header" => {
|
|
323
|
+
if let Some(arr) = content.and_then(|c| c.as_array())
|
|
324
|
+
&& arr.len() >= 3
|
|
325
|
+
&& let Some(level) = arr[0].as_u64()
|
|
326
|
+
&& let Some(inlines) = arr[2].as_array()
|
|
327
|
+
{
|
|
328
|
+
let header_text = extract_inlines(inlines).and_then(|v| v.as_str().map(String::from))?;
|
|
329
|
+
let prefix = "#".repeat(level as usize);
|
|
330
|
+
return Some(format!("{} {}", prefix, header_text));
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
"CodeBlock" => {
|
|
334
|
+
if let Some(arr) = content.and_then(|c| c.as_array())
|
|
335
|
+
&& arr.len() >= 2
|
|
336
|
+
&& let Some(code) = arr[1].as_str()
|
|
337
|
+
{
|
|
338
|
+
return Some(format!("```\n{}\n```", code));
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
"BlockQuote" => {
|
|
342
|
+
if let Some(blocks) = content.and_then(|c| c.as_array()) {
|
|
343
|
+
let mut quote_text = String::new();
|
|
344
|
+
for inner_block in blocks {
|
|
345
|
+
if let Some(text) = extract_block_text(inner_block) {
|
|
346
|
+
quote_text.push_str("> ");
|
|
347
|
+
quote_text.push_str(&text);
|
|
348
|
+
quote_text.push('\n');
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
return Some(quote_text.trim_end().to_string());
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
"BulletList" => {
|
|
355
|
+
if let Some(items) = content.and_then(|c| c.as_array()) {
|
|
356
|
+
let mut list_text = String::new();
|
|
357
|
+
for item in items {
|
|
358
|
+
if let Some(item_blocks) = item.as_array() {
|
|
359
|
+
for block in item_blocks {
|
|
360
|
+
if let Some(text) = extract_block_text(block) {
|
|
361
|
+
list_text.push_str("- ");
|
|
362
|
+
list_text.push_str(&text);
|
|
363
|
+
list_text.push('\n');
|
|
364
|
+
}
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
return Some(list_text.trim_end().to_string());
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
"OrderedList" => {
|
|
372
|
+
if let Some(arr) = content.and_then(|c| c.as_array())
|
|
373
|
+
&& arr.len() >= 2
|
|
374
|
+
&& let Some(items) = arr[1].as_array()
|
|
375
|
+
{
|
|
376
|
+
let mut list_text = String::new();
|
|
377
|
+
for (idx, item) in items.iter().enumerate() {
|
|
378
|
+
if let Some(item_blocks) = item.as_array() {
|
|
379
|
+
for block in item_blocks {
|
|
380
|
+
if let Some(text) = extract_block_text(block) {
|
|
381
|
+
list_text.push_str(&format!("{}. {}\n", idx + 1, text));
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
return Some(list_text.trim_end().to_string());
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
"HorizontalRule" => {
|
|
390
|
+
return Some("---".to_string());
|
|
391
|
+
}
|
|
392
|
+
_ => {}
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
None
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
/// Map Pandoc metadata keys to standard keys
|
|
399
|
+
fn get_pandoc_key(key: &str) -> String {
|
|
400
|
+
match key {
|
|
401
|
+
"abstract" => "summary".to_string(),
|
|
402
|
+
"date" => "created_at".to_string(),
|
|
403
|
+
"contributors" | "author" => "authors".to_string(),
|
|
404
|
+
"institute" => "organization".to_string(),
|
|
405
|
+
_ => key.to_string(),
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
/// Extract value from Pandoc metadata node
|
|
410
|
+
fn extract_meta_value(node: &Value) -> Option<Value> {
|
|
411
|
+
if let Some(obj) = node.as_object() {
|
|
412
|
+
let node_type = obj.get("t")?.as_str()?;
|
|
413
|
+
let content = obj.get("c");
|
|
414
|
+
|
|
415
|
+
match node_type {
|
|
416
|
+
"MetaString" => {
|
|
417
|
+
if let Some(s) = content.and_then(|c| c.as_str()) {
|
|
418
|
+
return Some(Value::String(s.to_string()));
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
"MetaInlines" => {
|
|
422
|
+
if let Some(inlines) = content.and_then(|c| c.as_array()) {
|
|
423
|
+
return extract_inlines(inlines);
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
"MetaList" => {
|
|
427
|
+
if let Some(list) = content.and_then(|c| c.as_array()) {
|
|
428
|
+
let mut values = Vec::new();
|
|
429
|
+
for item in list {
|
|
430
|
+
if let Some(val) = extract_meta_value(item) {
|
|
431
|
+
if let Some(arr) = val.as_array() {
|
|
432
|
+
values.extend_from_slice(arr);
|
|
433
|
+
} else {
|
|
434
|
+
values.push(val);
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
if !values.is_empty() {
|
|
439
|
+
return Some(Value::Array(values));
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
"MetaBlocks" => {
|
|
444
|
+
if let Some(blocks) = content.and_then(|c| c.as_array()) {
|
|
445
|
+
let mut texts = Vec::new();
|
|
446
|
+
for block in blocks {
|
|
447
|
+
if let Some(block_obj) = block.as_object()
|
|
448
|
+
&& block_obj.get("t")?.as_str()? == "Para"
|
|
449
|
+
&& let Some(para_content) = block_obj.get("c").and_then(|c| c.as_array())
|
|
450
|
+
&& let Some(text) = extract_inlines(para_content)
|
|
451
|
+
&& let Some(s) = text.as_str()
|
|
452
|
+
{
|
|
453
|
+
texts.push(s.to_string());
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
if !texts.is_empty() {
|
|
457
|
+
return Some(Value::String(texts.join(" ")));
|
|
458
|
+
}
|
|
459
|
+
}
|
|
460
|
+
}
|
|
461
|
+
"MetaMap" => {
|
|
462
|
+
if let Some(map) = content.and_then(|c| c.as_object()) {
|
|
463
|
+
let mut result = serde_json::Map::new();
|
|
464
|
+
for (k, v) in map {
|
|
465
|
+
if let Some(val) = extract_meta_value(v) {
|
|
466
|
+
result.insert(k.clone(), val);
|
|
467
|
+
}
|
|
468
|
+
}
|
|
469
|
+
if !result.is_empty() {
|
|
470
|
+
return Some(Value::Object(result));
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
_ => {}
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
None
|
|
479
|
+
}
|
|
480
|
+
|
|
481
|
+
/// Extract inline text from Pandoc inline nodes
|
|
482
|
+
fn extract_inlines(inlines: &[Value]) -> Option<Value> {
|
|
483
|
+
let mut texts = Vec::new();
|
|
484
|
+
|
|
485
|
+
for inline in inlines {
|
|
486
|
+
if let Some(text) = extract_inline_text(inline) {
|
|
487
|
+
texts.push(text);
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
let result = texts.join("");
|
|
492
|
+
if result.is_empty() {
|
|
493
|
+
None
|
|
494
|
+
} else {
|
|
495
|
+
Some(Value::String(result))
|
|
496
|
+
}
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
/// Extract text from a single inline node
|
|
500
|
+
fn extract_inline_text(node: &Value) -> Option<String> {
|
|
501
|
+
if let Some(obj) = node.as_object() {
|
|
502
|
+
let node_type = obj.get("t")?.as_str()?;
|
|
503
|
+
|
|
504
|
+
match node_type {
|
|
505
|
+
"Str" => {
|
|
506
|
+
return obj.get("c")?.as_str().map(String::from);
|
|
507
|
+
}
|
|
508
|
+
"Space" => {
|
|
509
|
+
return Some(" ".to_string());
|
|
510
|
+
}
|
|
511
|
+
"Emph" | "Strong" | "Strikeout" | "Superscript" | "Subscript" | "SmallCaps" => {
|
|
512
|
+
if let Some(content) = obj.get("c").and_then(|c| c.as_array()) {
|
|
513
|
+
return extract_inlines(content).and_then(|v| v.as_str().map(String::from));
|
|
514
|
+
}
|
|
515
|
+
}
|
|
516
|
+
"Code" => {
|
|
517
|
+
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
518
|
+
&& arr.len() == 2
|
|
519
|
+
{
|
|
520
|
+
return arr[1].as_str().map(String::from);
|
|
521
|
+
}
|
|
522
|
+
}
|
|
523
|
+
"Link" | "Image" => {
|
|
524
|
+
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
525
|
+
&& arr.len() == 3
|
|
526
|
+
&& let Some(inlines) = arr[1].as_array()
|
|
527
|
+
{
|
|
528
|
+
return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
|
|
529
|
+
}
|
|
530
|
+
}
|
|
531
|
+
"Quoted" => {
|
|
532
|
+
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
533
|
+
&& arr.len() == 2
|
|
534
|
+
&& let Some(inlines) = arr[1].as_array()
|
|
535
|
+
{
|
|
536
|
+
return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
"Cite" => {
|
|
540
|
+
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
541
|
+
&& arr.len() == 2
|
|
542
|
+
&& let Some(inlines) = arr[1].as_array()
|
|
543
|
+
{
|
|
544
|
+
return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
|
|
545
|
+
}
|
|
546
|
+
}
|
|
547
|
+
"Math" => {
|
|
548
|
+
if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
549
|
+
&& arr.len() == 2
|
|
550
|
+
{
|
|
551
|
+
return arr[1].as_str().map(String::from);
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
"LineBreak" | "SoftBreak" => {
|
|
555
|
+
return Some("\n".to_string());
|
|
556
|
+
}
|
|
557
|
+
_ => {}
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
None
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
/// Extract citations from block nodes
|
|
565
|
+
fn extract_citations_from_blocks(blocks: &[Value], citations: &mut Vec<String>) {
|
|
566
|
+
for block in blocks {
|
|
567
|
+
if let Some(obj) = block.as_object() {
|
|
568
|
+
let block_type = obj.get("t").and_then(|t| t.as_str());
|
|
569
|
+
|
|
570
|
+
if block_type == Some("Cite")
|
|
571
|
+
&& let Some(arr) = obj.get("c").and_then(|c| c.as_array())
|
|
572
|
+
&& let Some(cite_list) = arr.first().and_then(|c| c.as_array())
|
|
573
|
+
{
|
|
574
|
+
for cite in cite_list {
|
|
575
|
+
if let Some(cite_id) = cite.get("citationId").and_then(|id| id.as_str()) {
|
|
576
|
+
citations.push(cite_id.to_string());
|
|
577
|
+
}
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
if let Some(content) = obj.get("c") {
|
|
582
|
+
if let Some(nested_blocks) = content.as_array() {
|
|
583
|
+
extract_citations_from_blocks(nested_blocks, citations);
|
|
584
|
+
} else if let Some(nested_obj) = content.as_object() {
|
|
585
|
+
for value in nested_obj.values() {
|
|
586
|
+
if let Some(arr) = value.as_array() {
|
|
587
|
+
extract_citations_from_blocks(arr, citations);
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
/// Wrapper functions for backwards compatibility
|
|
597
|
+
pub async fn extract_with_pandoc(path: &Path, from_format: &str) -> Result<(String, HashMap<String, Value>)> {
|
|
598
|
+
let child = Command::new("pandoc")
|
|
599
|
+
.arg(path)
|
|
600
|
+
.arg(format!("--from={}", from_format))
|
|
601
|
+
.arg("--to=json")
|
|
602
|
+
.arg("--standalone")
|
|
603
|
+
.arg("--quiet")
|
|
604
|
+
.stdout(std::process::Stdio::piped())
|
|
605
|
+
.stderr(std::process::Stdio::piped())
|
|
606
|
+
.spawn()
|
|
607
|
+
.map_err(|e| {
|
|
608
|
+
// Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
|
|
609
|
+
std::io::Error::other(format!("Failed to execute pandoc: {}", e))
|
|
610
|
+
})?;
|
|
611
|
+
|
|
612
|
+
let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
|
|
613
|
+
Ok(Ok(output)) => output,
|
|
614
|
+
Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
|
|
615
|
+
Err(_) => {
|
|
616
|
+
// Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
|
|
617
|
+
return Err(KreuzbergError::parsing(format!(
|
|
618
|
+
"Pandoc extraction timed out after {} seconds",
|
|
619
|
+
PANDOC_TIMEOUT_SECONDS
|
|
620
|
+
)));
|
|
621
|
+
}
|
|
622
|
+
};
|
|
623
|
+
|
|
624
|
+
if !output.status.success() {
|
|
625
|
+
let stderr = String::from_utf8_lossy(&output.stderr);
|
|
626
|
+
|
|
627
|
+
// Subprocess error analysis - wrap only if format/parsing error detected ~keep
|
|
628
|
+
let stderr_lower = stderr.to_lowercase();
|
|
629
|
+
if stderr_lower.contains("format")
|
|
630
|
+
|| stderr_lower.contains("unsupported")
|
|
631
|
+
|| stderr_lower.contains("error:")
|
|
632
|
+
|| stderr_lower.contains("failed")
|
|
633
|
+
{
|
|
634
|
+
return Err(KreuzbergError::parsing(format!(
|
|
635
|
+
"Pandoc format/parsing error: {}",
|
|
636
|
+
stderr
|
|
637
|
+
)));
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
// True system error - bubble up as IO error ~keep
|
|
641
|
+
return Err(std::io::Error::other(format!("Pandoc system error: {}", stderr)).into());
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
let json_content = String::from_utf8(output.stdout)
|
|
645
|
+
.map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc JSON output: {}", e)))?;
|
|
646
|
+
|
|
647
|
+
let json_data: Value = serde_json::from_str(&json_content)
|
|
648
|
+
.map_err(|e| KreuzbergError::parsing(format!("Failed to parse pandoc JSON: {}", e)))?;
|
|
649
|
+
|
|
650
|
+
let content = extract_content_from_json(&json_data)?;
|
|
651
|
+
let metadata = extract_metadata_from_json(&json_data)?;
|
|
652
|
+
|
|
653
|
+
#[cfg(feature = "quality")]
|
|
654
|
+
{
|
|
655
|
+
Ok((normalize_spaces(&content), metadata))
|
|
656
|
+
}
|
|
657
|
+
#[cfg(not(feature = "quality"))]
|
|
658
|
+
{
|
|
659
|
+
Ok((content, metadata))
|
|
660
|
+
}
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
pub async fn extract_with_pandoc_from_bytes(
|
|
664
|
+
bytes: &[u8],
|
|
665
|
+
from_format: &str,
|
|
666
|
+
extension: &str,
|
|
667
|
+
) -> Result<(String, HashMap<String, Value>)> {
|
|
668
|
+
let temp_dir = std::env::temp_dir();
|
|
669
|
+
let temp_file_path = temp_dir.join(format!(
|
|
670
|
+
"pandoc_temp_{}_{}.{}",
|
|
671
|
+
std::process::id(),
|
|
672
|
+
uuid::Uuid::new_v4(),
|
|
673
|
+
extension
|
|
674
|
+
));
|
|
675
|
+
|
|
676
|
+
// RAII guard ensures cleanup on all paths including panic ~keep
|
|
677
|
+
let _temp_guard = TempFile::new(temp_file_path.clone());
|
|
678
|
+
|
|
679
|
+
fs::write(&temp_file_path, bytes).await?;
|
|
680
|
+
|
|
681
|
+
extract_with_pandoc(&temp_file_path, from_format).await
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
#[cfg(test)]
|
|
685
|
+
mod tests {
|
|
686
|
+
use super::*;
|
|
687
|
+
use serde_json::json;
|
|
688
|
+
|
|
689
|
+
#[test]
|
|
690
|
+
fn test_get_pandoc_key() {
|
|
691
|
+
assert_eq!(get_pandoc_key("abstract"), "summary");
|
|
692
|
+
assert_eq!(get_pandoc_key("date"), "created_at");
|
|
693
|
+
assert_eq!(get_pandoc_key("author"), "authors");
|
|
694
|
+
assert_eq!(get_pandoc_key("contributors"), "authors");
|
|
695
|
+
assert_eq!(get_pandoc_key("institute"), "organization");
|
|
696
|
+
assert_eq!(get_pandoc_key("title"), "title");
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
#[test]
|
|
700
|
+
fn test_extract_meta_value_string() {
|
|
701
|
+
let node = json!({
|
|
702
|
+
"t": "MetaString",
|
|
703
|
+
"c": "Test Title"
|
|
704
|
+
});
|
|
705
|
+
|
|
706
|
+
let result = extract_meta_value(&node).unwrap();
|
|
707
|
+
assert_eq!(result, Value::String("Test Title".to_string()));
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
#[test]
|
|
711
|
+
fn test_extract_meta_value_inlines() {
|
|
712
|
+
let node = json!({
|
|
713
|
+
"t": "MetaInlines",
|
|
714
|
+
"c": [
|
|
715
|
+
{"t": "Str", "c": "Hello"},
|
|
716
|
+
{"t": "Space"},
|
|
717
|
+
{"t": "Str", "c": "World"}
|
|
718
|
+
]
|
|
719
|
+
});
|
|
720
|
+
|
|
721
|
+
let result = extract_meta_value(&node).unwrap();
|
|
722
|
+
assert_eq!(result, Value::String("Hello World".to_string()));
|
|
723
|
+
}
|
|
724
|
+
|
|
725
|
+
#[test]
|
|
726
|
+
fn test_extract_meta_value_list() {
|
|
727
|
+
let node = json!({
|
|
728
|
+
"t": "MetaList",
|
|
729
|
+
"c": [
|
|
730
|
+
{"t": "MetaString", "c": "Author1"},
|
|
731
|
+
{"t": "MetaString", "c": "Author2"}
|
|
732
|
+
]
|
|
733
|
+
});
|
|
734
|
+
|
|
735
|
+
let result = extract_meta_value(&node).unwrap();
|
|
736
|
+
assert_eq!(
|
|
737
|
+
result,
|
|
738
|
+
Value::Array(vec![
|
|
739
|
+
Value::String("Author1".to_string()),
|
|
740
|
+
Value::String("Author2".to_string())
|
|
741
|
+
])
|
|
742
|
+
);
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
#[test]
|
|
746
|
+
fn test_extract_inline_text_str() {
|
|
747
|
+
let node = json!({"t": "Str", "c": "Hello"});
|
|
748
|
+
let result = extract_inline_text(&node).unwrap();
|
|
749
|
+
assert_eq!(result, "Hello");
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
#[test]
|
|
753
|
+
fn test_extract_inline_text_space() {
|
|
754
|
+
let node = json!({"t": "Space"});
|
|
755
|
+
let result = extract_inline_text(&node).unwrap();
|
|
756
|
+
assert_eq!(result, " ");
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
#[test]
|
|
760
|
+
fn test_extract_inline_text_emph() {
|
|
761
|
+
let node = json!({
|
|
762
|
+
"t": "Emph",
|
|
763
|
+
"c": [
|
|
764
|
+
{"t": "Str", "c": "emphasized"}
|
|
765
|
+
]
|
|
766
|
+
});
|
|
767
|
+
let result = extract_inline_text(&node).unwrap();
|
|
768
|
+
assert_eq!(result, "emphasized");
|
|
769
|
+
}
|
|
770
|
+
|
|
771
|
+
#[test]
|
|
772
|
+
fn test_extract_inline_text_code() {
|
|
773
|
+
let node = json!({
|
|
774
|
+
"t": "Code",
|
|
775
|
+
"c": [["", [], []], "code_snippet"]
|
|
776
|
+
});
|
|
777
|
+
let result = extract_inline_text(&node).unwrap();
|
|
778
|
+
assert_eq!(result, "code_snippet");
|
|
779
|
+
}
|
|
780
|
+
|
|
781
|
+
#[test]
|
|
782
|
+
fn test_extract_inlines() {
|
|
783
|
+
let inlines = vec![
|
|
784
|
+
json!({"t": "Str", "c": "Hello"}),
|
|
785
|
+
json!({"t": "Space"}),
|
|
786
|
+
json!({"t": "Emph", "c": [{"t": "Str", "c": "World"}]}),
|
|
787
|
+
];
|
|
788
|
+
|
|
789
|
+
let result = extract_inlines(&inlines).unwrap();
|
|
790
|
+
assert_eq!(result, Value::String("Hello World".to_string()));
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
#[test]
|
|
794
|
+
fn test_extract_citations_from_blocks() {
|
|
795
|
+
let blocks = vec![json!({
|
|
796
|
+
"t": "Cite",
|
|
797
|
+
"c": [
|
|
798
|
+
[
|
|
799
|
+
{"citationId": "cite1"},
|
|
800
|
+
{"citationId": "cite2"}
|
|
801
|
+
],
|
|
802
|
+
[]
|
|
803
|
+
]
|
|
804
|
+
})];
|
|
805
|
+
|
|
806
|
+
let mut citations = Vec::new();
|
|
807
|
+
extract_citations_from_blocks(&blocks, &mut citations);
|
|
808
|
+
|
|
809
|
+
assert_eq!(citations, vec!["cite1", "cite2"]);
|
|
810
|
+
}
|
|
811
|
+
|
|
812
|
+
#[test]
|
|
813
|
+
fn test_extract_metadata_from_json() {
|
|
814
|
+
let json = json!({
|
|
815
|
+
"meta": {
|
|
816
|
+
"title": {"t": "MetaString", "c": "Test Document"},
|
|
817
|
+
"author": {"t": "MetaList", "c": [
|
|
818
|
+
{"t": "MetaString", "c": "Author One"}
|
|
819
|
+
]},
|
|
820
|
+
"date": {"t": "MetaString", "c": "2024-01-01"}
|
|
821
|
+
},
|
|
822
|
+
"blocks": []
|
|
823
|
+
});
|
|
824
|
+
|
|
825
|
+
let metadata = extract_metadata_from_json(&json).unwrap();
|
|
826
|
+
|
|
827
|
+
assert_eq!(
|
|
828
|
+
metadata.get("title").unwrap(),
|
|
829
|
+
&Value::String("Test Document".to_string())
|
|
830
|
+
);
|
|
831
|
+
assert_eq!(
|
|
832
|
+
metadata.get("authors").unwrap(),
|
|
833
|
+
&Value::Array(vec![Value::String("Author One".to_string())])
|
|
834
|
+
);
|
|
835
|
+
assert_eq!(
|
|
836
|
+
metadata.get("created_at").unwrap(),
|
|
837
|
+
&Value::String("2024-01-01".to_string())
|
|
838
|
+
);
|
|
839
|
+
}
|
|
840
|
+
|
|
841
|
+
#[test]
|
|
842
|
+
fn test_metadata_field_filtering() {
|
|
843
|
+
let json = json!({
|
|
844
|
+
"meta": {
|
|
845
|
+
"title": {"t": "MetaString", "c": "Valid Title"},
|
|
846
|
+
"invalid_field": {"t": "MetaString", "c": "Should be filtered"},
|
|
847
|
+
"random_key": {"t": "MetaString", "c": "Not in valid keys"},
|
|
848
|
+
"author": {"t": "MetaString", "c": "Valid Author"}
|
|
849
|
+
},
|
|
850
|
+
"blocks": []
|
|
851
|
+
});
|
|
852
|
+
|
|
853
|
+
let metadata = extract_metadata_from_json(&json).unwrap();
|
|
854
|
+
|
|
855
|
+
assert!(metadata.contains_key("title"));
|
|
856
|
+
assert!(metadata.contains_key("authors"));
|
|
857
|
+
|
|
858
|
+
assert!(!metadata.contains_key("invalid_field"));
|
|
859
|
+
assert!(!metadata.contains_key("random_key"));
|
|
860
|
+
}
|
|
861
|
+
|
|
862
|
+
#[test]
|
|
863
|
+
fn test_extract_meta_value_meta_blocks() {
|
|
864
|
+
let node = json!({
|
|
865
|
+
"t": "MetaBlocks",
|
|
866
|
+
"c": [
|
|
867
|
+
{
|
|
868
|
+
"t": "Para",
|
|
869
|
+
"c": [
|
|
870
|
+
{"t": "Str", "c": "First"},
|
|
871
|
+
{"t": "Space"},
|
|
872
|
+
{"t": "Str", "c": "paragraph"}
|
|
873
|
+
]
|
|
874
|
+
},
|
|
875
|
+
{
|
|
876
|
+
"t": "Para",
|
|
877
|
+
"c": [
|
|
878
|
+
{"t": "Str", "c": "Second"},
|
|
879
|
+
{"t": "Space"},
|
|
880
|
+
{"t": "Str", "c": "paragraph"}
|
|
881
|
+
]
|
|
882
|
+
}
|
|
883
|
+
]
|
|
884
|
+
});
|
|
885
|
+
|
|
886
|
+
let result = extract_meta_value(&node).unwrap();
|
|
887
|
+
assert_eq!(result, Value::String("First paragraph Second paragraph".to_string()));
|
|
888
|
+
}
|
|
889
|
+
|
|
890
|
+
#[test]
|
|
891
|
+
fn test_extract_meta_value_meta_map() {
|
|
892
|
+
let node = json!({
|
|
893
|
+
"t": "MetaMap",
|
|
894
|
+
"c": {
|
|
895
|
+
"key1": {"t": "MetaString", "c": "value1"},
|
|
896
|
+
"key2": {"t": "MetaString", "c": "value2"}
|
|
897
|
+
}
|
|
898
|
+
});
|
|
899
|
+
|
|
900
|
+
let result = extract_meta_value(&node).unwrap();
|
|
901
|
+
let obj = result.as_object().unwrap();
|
|
902
|
+
assert_eq!(obj.get("key1").unwrap(), &Value::String("value1".to_string()));
|
|
903
|
+
assert_eq!(obj.get("key2").unwrap(), &Value::String("value2".to_string()));
|
|
904
|
+
}
|
|
905
|
+
|
|
906
|
+
#[test]
|
|
907
|
+
fn test_extract_inline_text_strong() {
|
|
908
|
+
let node = json!({
|
|
909
|
+
"t": "Strong",
|
|
910
|
+
"c": [
|
|
911
|
+
{"t": "Str", "c": "bold"}
|
|
912
|
+
]
|
|
913
|
+
});
|
|
914
|
+
let result = extract_inline_text(&node).unwrap();
|
|
915
|
+
assert_eq!(result, "bold");
|
|
916
|
+
}
|
|
917
|
+
|
|
918
|
+
#[test]
|
|
919
|
+
fn test_extract_inline_text_link() {
|
|
920
|
+
let node = json!({
|
|
921
|
+
"t": "Link",
|
|
922
|
+
"c": [
|
|
923
|
+
["", [], []],
|
|
924
|
+
[{"t": "Str", "c": "link text"}],
|
|
925
|
+
["https://example.com", ""]
|
|
926
|
+
]
|
|
927
|
+
});
|
|
928
|
+
let result = extract_inline_text(&node).unwrap();
|
|
929
|
+
assert_eq!(result, "link text");
|
|
930
|
+
}
|
|
931
|
+
|
|
932
|
+
#[test]
|
|
933
|
+
fn test_extract_inline_text_image() {
|
|
934
|
+
let node = json!({
|
|
935
|
+
"t": "Image",
|
|
936
|
+
"c": [
|
|
937
|
+
["", [], []],
|
|
938
|
+
[{"t": "Str", "c": "alt text"}],
|
|
939
|
+
["image.png", ""]
|
|
940
|
+
]
|
|
941
|
+
});
|
|
942
|
+
let result = extract_inline_text(&node).unwrap();
|
|
943
|
+
assert_eq!(result, "alt text");
|
|
944
|
+
}
|
|
945
|
+
|
|
946
|
+
#[test]
|
|
947
|
+
fn test_extract_inline_text_quoted() {
|
|
948
|
+
let node = json!({
|
|
949
|
+
"t": "Quoted",
|
|
950
|
+
"c": [
|
|
951
|
+
{"t": "DoubleQuote"},
|
|
952
|
+
[{"t": "Str", "c": "quoted text"}]
|
|
953
|
+
]
|
|
954
|
+
});
|
|
955
|
+
let result = extract_inline_text(&node).unwrap();
|
|
956
|
+
assert_eq!(result, "quoted text");
|
|
957
|
+
}
|
|
958
|
+
|
|
959
|
+
#[test]
|
|
960
|
+
fn test_extract_inline_text_cite() {
|
|
961
|
+
let node = json!({
|
|
962
|
+
"t": "Cite",
|
|
963
|
+
"c": [
|
|
964
|
+
[{"citationId": "cite1"}],
|
|
965
|
+
[{"t": "Str", "c": "citation text"}]
|
|
966
|
+
]
|
|
967
|
+
});
|
|
968
|
+
let result = extract_inline_text(&node).unwrap();
|
|
969
|
+
assert_eq!(result, "citation text");
|
|
970
|
+
}
|
|
971
|
+
|
|
972
|
+
#[test]
|
|
973
|
+
fn test_extract_inline_text_math() {
|
|
974
|
+
let node = json!({
|
|
975
|
+
"t": "Math",
|
|
976
|
+
"c": [
|
|
977
|
+
{"t": "InlineMath"},
|
|
978
|
+
"x^2 + y^2"
|
|
979
|
+
]
|
|
980
|
+
});
|
|
981
|
+
let result = extract_inline_text(&node).unwrap();
|
|
982
|
+
assert_eq!(result, "x^2 + y^2");
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
#[test]
|
|
986
|
+
fn test_extract_inline_text_linebreak() {
|
|
987
|
+
let node = json!({"t": "LineBreak"});
|
|
988
|
+
let result = extract_inline_text(&node).unwrap();
|
|
989
|
+
assert_eq!(result, "\n");
|
|
990
|
+
}
|
|
991
|
+
|
|
992
|
+
#[test]
|
|
993
|
+
fn test_extract_inline_text_softbreak() {
|
|
994
|
+
let node = json!({"t": "SoftBreak"});
|
|
995
|
+
let result = extract_inline_text(&node).unwrap();
|
|
996
|
+
assert_eq!(result, "\n");
|
|
997
|
+
}
|
|
998
|
+
|
|
999
|
+
#[test]
|
|
1000
|
+
fn test_extract_inline_text_strikeout() {
|
|
1001
|
+
let node = json!({
|
|
1002
|
+
"t": "Strikeout",
|
|
1003
|
+
"c": [{"t": "Str", "c": "deleted"}]
|
|
1004
|
+
});
|
|
1005
|
+
let result = extract_inline_text(&node).unwrap();
|
|
1006
|
+
assert_eq!(result, "deleted");
|
|
1007
|
+
}
|
|
1008
|
+
|
|
1009
|
+
#[test]
|
|
1010
|
+
fn test_extract_inline_text_superscript() {
|
|
1011
|
+
let node = json!({
|
|
1012
|
+
"t": "Superscript",
|
|
1013
|
+
"c": [{"t": "Str", "c": "2"}]
|
|
1014
|
+
});
|
|
1015
|
+
let result = extract_inline_text(&node).unwrap();
|
|
1016
|
+
assert_eq!(result, "2");
|
|
1017
|
+
}
|
|
1018
|
+
|
|
1019
|
+
#[test]
|
|
1020
|
+
fn test_extract_inline_text_subscript() {
|
|
1021
|
+
let node = json!({
|
|
1022
|
+
"t": "Subscript",
|
|
1023
|
+
"c": [{"t": "Str", "c": "i"}]
|
|
1024
|
+
});
|
|
1025
|
+
let result = extract_inline_text(&node).unwrap();
|
|
1026
|
+
assert_eq!(result, "i");
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
#[test]
|
|
1030
|
+
fn test_extract_inline_text_smallcaps() {
|
|
1031
|
+
let node = json!({
|
|
1032
|
+
"t": "SmallCaps",
|
|
1033
|
+
"c": [{"t": "Str", "c": "small"}]
|
|
1034
|
+
});
|
|
1035
|
+
let result = extract_inline_text(&node).unwrap();
|
|
1036
|
+
assert_eq!(result, "small");
|
|
1037
|
+
}
|
|
1038
|
+
|
|
1039
|
+
#[test]
|
|
1040
|
+
fn test_extract_inline_text_unknown_type() {
|
|
1041
|
+
let node = json!({
|
|
1042
|
+
"t": "UnknownType",
|
|
1043
|
+
"c": "should be ignored"
|
|
1044
|
+
});
|
|
1045
|
+
let result = extract_inline_text(&node);
|
|
1046
|
+
assert!(result.is_none());
|
|
1047
|
+
}
|
|
1048
|
+
|
|
1049
|
+
#[test]
|
|
1050
|
+
fn test_extract_citations_from_nested_blocks() {
|
|
1051
|
+
let blocks = vec![json!({
|
|
1052
|
+
"t": "BulletList",
|
|
1053
|
+
"c": [
|
|
1054
|
+
[
|
|
1055
|
+
{
|
|
1056
|
+
"t": "Plain",
|
|
1057
|
+
"c": [
|
|
1058
|
+
{"t": "Str", "c": "text"}
|
|
1059
|
+
]
|
|
1060
|
+
}
|
|
1061
|
+
]
|
|
1062
|
+
]
|
|
1063
|
+
})];
|
|
1064
|
+
|
|
1065
|
+
let mut citations = Vec::new();
|
|
1066
|
+
extract_citations_from_blocks(&blocks, &mut citations);
|
|
1067
|
+
|
|
1068
|
+
assert!(citations.is_empty());
|
|
1069
|
+
}
|
|
1070
|
+
|
|
1071
|
+
#[test]
|
|
1072
|
+
fn test_extract_metadata_from_json_with_citations() {
|
|
1073
|
+
let json = json!({
|
|
1074
|
+
"meta": {
|
|
1075
|
+
"title": {"t": "MetaString", "c": "Paper"}
|
|
1076
|
+
},
|
|
1077
|
+
"citations": [
|
|
1078
|
+
{"citationId": "cite1"},
|
|
1079
|
+
{"citationId": "cite2"}
|
|
1080
|
+
],
|
|
1081
|
+
"blocks": []
|
|
1082
|
+
});
|
|
1083
|
+
|
|
1084
|
+
let metadata = extract_metadata_from_json(&json).unwrap();
|
|
1085
|
+
|
|
1086
|
+
assert!(metadata.contains_key("citations"));
|
|
1087
|
+
let citations = metadata.get("citations").unwrap().as_array().unwrap();
|
|
1088
|
+
assert_eq!(citations.len(), 2);
|
|
1089
|
+
assert_eq!(citations[0], Value::String("cite1".to_string()));
|
|
1090
|
+
assert_eq!(citations[1], Value::String("cite2".to_string()));
|
|
1091
|
+
}
|
|
1092
|
+
|
|
1093
|
+
#[test]
|
|
1094
|
+
fn test_extract_metadata_from_json_empty_meta() {
|
|
1095
|
+
let json = json!({
|
|
1096
|
+
"meta": {},
|
|
1097
|
+
"blocks": []
|
|
1098
|
+
});
|
|
1099
|
+
|
|
1100
|
+
let metadata = extract_metadata_from_json(&json).unwrap();
|
|
1101
|
+
assert!(metadata.is_empty());
|
|
1102
|
+
}
|
|
1103
|
+
|
|
1104
|
+
#[test]
|
|
1105
|
+
fn test_extract_meta_value_empty_list() {
|
|
1106
|
+
let node = json!({
|
|
1107
|
+
"t": "MetaList",
|
|
1108
|
+
"c": []
|
|
1109
|
+
});
|
|
1110
|
+
|
|
1111
|
+
let result = extract_meta_value(&node);
|
|
1112
|
+
assert!(result.is_none());
|
|
1113
|
+
}
|
|
1114
|
+
|
|
1115
|
+
#[test]
|
|
1116
|
+
fn test_extract_meta_value_empty_map() {
|
|
1117
|
+
let node = json!({
|
|
1118
|
+
"t": "MetaMap",
|
|
1119
|
+
"c": {}
|
|
1120
|
+
});
|
|
1121
|
+
|
|
1122
|
+
let result = extract_meta_value(&node);
|
|
1123
|
+
assert!(result.is_none());
|
|
1124
|
+
}
|
|
1125
|
+
|
|
1126
|
+
#[test]
|
|
1127
|
+
fn test_extract_inlines_empty() {
|
|
1128
|
+
let inlines = vec![];
|
|
1129
|
+
let result = extract_inlines(&inlines);
|
|
1130
|
+
assert!(result.is_none());
|
|
1131
|
+
}
|
|
1132
|
+
|
|
1133
|
+
#[test]
|
|
1134
|
+
fn test_valid_metadata_keys_contains_standard_fields() {
|
|
1135
|
+
assert!(VALID_METADATA_KEYS.contains(&"title"));
|
|
1136
|
+
assert!(VALID_METADATA_KEYS.contains(&"authors"));
|
|
1137
|
+
assert!(VALID_METADATA_KEYS.contains(&"date"));
|
|
1138
|
+
assert!(VALID_METADATA_KEYS.contains(&"keywords"));
|
|
1139
|
+
assert!(VALID_METADATA_KEYS.contains(&"abstract"));
|
|
1140
|
+
assert!(VALID_METADATA_KEYS.contains(&"citations"));
|
|
1141
|
+
}
|
|
1142
|
+
|
|
1143
|
+
#[test]
|
|
1144
|
+
fn test_get_pandoc_key_unmapped() {
|
|
1145
|
+
assert_eq!(get_pandoc_key("title"), "title");
|
|
1146
|
+
assert_eq!(get_pandoc_key("keywords"), "keywords");
|
|
1147
|
+
assert_eq!(get_pandoc_key("custom_field"), "custom_field");
|
|
1148
|
+
}
|
|
1149
|
+
|
|
1150
|
+
#[tokio::test]
|
|
1151
|
+
async fn test_tempfile_raii_cleanup() {
|
|
1152
|
+
use crate::extraction::pandoc::version::validate_pandoc_version;
|
|
1153
|
+
|
|
1154
|
+
if validate_pandoc_version().await.is_err() {
|
|
1155
|
+
return;
|
|
1156
|
+
}
|
|
1157
|
+
|
|
1158
|
+
let temp_path = std::env::temp_dir().join(format!("test_raii_{}.md", uuid::Uuid::new_v4()));
|
|
1159
|
+
|
|
1160
|
+
{
|
|
1161
|
+
let _guard = TempFile::new(temp_path.clone());
|
|
1162
|
+
fs::write(&temp_path, b"test content").await.unwrap();
|
|
1163
|
+
assert!(temp_path.exists());
|
|
1164
|
+
}
|
|
1165
|
+
|
|
1166
|
+
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
|
1167
|
+
|
|
1168
|
+
assert!(!temp_path.exists());
|
|
1169
|
+
}
|
|
1170
|
+
|
|
1171
|
+
#[tokio::test]
|
|
1172
|
+
async fn test_extract_content_timeout_kills_process() {
|
|
1173
|
+
use crate::extraction::pandoc::version::validate_pandoc_version;
|
|
1174
|
+
|
|
1175
|
+
if validate_pandoc_version().await.is_err() {
|
|
1176
|
+
return;
|
|
1177
|
+
}
|
|
1178
|
+
|
|
1179
|
+
let temp_dir = std::env::temp_dir();
|
|
1180
|
+
let test_file = temp_dir.join(format!("test_timeout_{}.md", uuid::Uuid::new_v4()));
|
|
1181
|
+
fs::write(&test_file, b"# Test\n\nContent").await.unwrap();
|
|
1182
|
+
|
|
1183
|
+
let result = extract_content(&test_file, "markdown").await;
|
|
1184
|
+
assert!(result.is_ok());
|
|
1185
|
+
|
|
1186
|
+
let _ = fs::remove_file(&test_file).await;
|
|
1187
|
+
}
|
|
1188
|
+
}
|