kreuzberg 4.0.0.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (265) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +8 -0
  3. data/.rspec +3 -0
  4. data/.rubocop.yaml +534 -0
  5. data/Gemfile +9 -0
  6. data/Gemfile.lock +157 -0
  7. data/README.md +421 -0
  8. data/Rakefile +25 -0
  9. data/Steepfile +47 -0
  10. data/examples/async_patterns.rb +340 -0
  11. data/ext/kreuzberg_rb/extconf.rb +35 -0
  12. data/ext/kreuzberg_rb/native/Cargo.toml +36 -0
  13. data/ext/kreuzberg_rb/native/README.md +425 -0
  14. data/ext/kreuzberg_rb/native/build.rs +17 -0
  15. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -0
  16. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -0
  17. data/ext/kreuzberg_rb/native/include/strings.h +20 -0
  18. data/ext/kreuzberg_rb/native/include/unistd.h +47 -0
  19. data/ext/kreuzberg_rb/native/src/lib.rs +2939 -0
  20. data/extconf.rb +28 -0
  21. data/kreuzberg.gemspec +105 -0
  22. data/lib/kreuzberg/api_proxy.rb +142 -0
  23. data/lib/kreuzberg/cache_api.rb +45 -0
  24. data/lib/kreuzberg/cli.rb +55 -0
  25. data/lib/kreuzberg/cli_proxy.rb +127 -0
  26. data/lib/kreuzberg/config.rb +684 -0
  27. data/lib/kreuzberg/errors.rb +50 -0
  28. data/lib/kreuzberg/extraction_api.rb +84 -0
  29. data/lib/kreuzberg/mcp_proxy.rb +186 -0
  30. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -0
  31. data/lib/kreuzberg/post_processor_protocol.rb +86 -0
  32. data/lib/kreuzberg/result.rb +216 -0
  33. data/lib/kreuzberg/setup_lib_path.rb +79 -0
  34. data/lib/kreuzberg/validator_protocol.rb +89 -0
  35. data/lib/kreuzberg/version.rb +5 -0
  36. data/lib/kreuzberg.rb +82 -0
  37. data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
  38. data/sig/kreuzberg/internal.rbs +184 -0
  39. data/sig/kreuzberg.rbs +468 -0
  40. data/spec/binding/cache_spec.rb +227 -0
  41. data/spec/binding/cli_proxy_spec.rb +87 -0
  42. data/spec/binding/cli_spec.rb +54 -0
  43. data/spec/binding/config_spec.rb +345 -0
  44. data/spec/binding/config_validation_spec.rb +283 -0
  45. data/spec/binding/error_handling_spec.rb +213 -0
  46. data/spec/binding/errors_spec.rb +66 -0
  47. data/spec/binding/plugins/ocr_backend_spec.rb +307 -0
  48. data/spec/binding/plugins/postprocessor_spec.rb +269 -0
  49. data/spec/binding/plugins/validator_spec.rb +274 -0
  50. data/spec/examples.txt +104 -0
  51. data/spec/fixtures/config.toml +39 -0
  52. data/spec/fixtures/config.yaml +42 -0
  53. data/spec/fixtures/invalid_config.toml +4 -0
  54. data/spec/smoke/package_spec.rb +178 -0
  55. data/spec/spec_helper.rb +42 -0
  56. data/vendor/kreuzberg/Cargo.toml +134 -0
  57. data/vendor/kreuzberg/README.md +175 -0
  58. data/vendor/kreuzberg/build.rs +460 -0
  59. data/vendor/kreuzberg/src/api/error.rs +81 -0
  60. data/vendor/kreuzberg/src/api/handlers.rs +199 -0
  61. data/vendor/kreuzberg/src/api/mod.rs +79 -0
  62. data/vendor/kreuzberg/src/api/server.rs +353 -0
  63. data/vendor/kreuzberg/src/api/types.rs +170 -0
  64. data/vendor/kreuzberg/src/bin/profile_extract.rs +455 -0
  65. data/vendor/kreuzberg/src/cache/mod.rs +1143 -0
  66. data/vendor/kreuzberg/src/chunking/mod.rs +677 -0
  67. data/vendor/kreuzberg/src/core/batch_mode.rs +35 -0
  68. data/vendor/kreuzberg/src/core/config.rs +1032 -0
  69. data/vendor/kreuzberg/src/core/extractor.rs +903 -0
  70. data/vendor/kreuzberg/src/core/io.rs +327 -0
  71. data/vendor/kreuzberg/src/core/mime.rs +615 -0
  72. data/vendor/kreuzberg/src/core/mod.rs +42 -0
  73. data/vendor/kreuzberg/src/core/pipeline.rs +906 -0
  74. data/vendor/kreuzberg/src/embeddings.rs +323 -0
  75. data/vendor/kreuzberg/src/error.rs +431 -0
  76. data/vendor/kreuzberg/src/extraction/archive.rs +954 -0
  77. data/vendor/kreuzberg/src/extraction/docx.rs +40 -0
  78. data/vendor/kreuzberg/src/extraction/email.rs +854 -0
  79. data/vendor/kreuzberg/src/extraction/excel.rs +688 -0
  80. data/vendor/kreuzberg/src/extraction/html.rs +553 -0
  81. data/vendor/kreuzberg/src/extraction/image.rs +368 -0
  82. data/vendor/kreuzberg/src/extraction/libreoffice.rs +564 -0
  83. data/vendor/kreuzberg/src/extraction/mod.rs +77 -0
  84. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -0
  85. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -0
  86. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -0
  87. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +128 -0
  88. data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +275 -0
  89. data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +178 -0
  90. data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +491 -0
  91. data/vendor/kreuzberg/src/extraction/pandoc/server.rs +496 -0
  92. data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +1188 -0
  93. data/vendor/kreuzberg/src/extraction/pandoc/version.rs +162 -0
  94. data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -0
  95. data/vendor/kreuzberg/src/extraction/structured.rs +490 -0
  96. data/vendor/kreuzberg/src/extraction/table.rs +328 -0
  97. data/vendor/kreuzberg/src/extraction/text.rs +269 -0
  98. data/vendor/kreuzberg/src/extraction/xml.rs +333 -0
  99. data/vendor/kreuzberg/src/extractors/archive.rs +425 -0
  100. data/vendor/kreuzberg/src/extractors/docx.rs +479 -0
  101. data/vendor/kreuzberg/src/extractors/email.rs +129 -0
  102. data/vendor/kreuzberg/src/extractors/excel.rs +344 -0
  103. data/vendor/kreuzberg/src/extractors/html.rs +410 -0
  104. data/vendor/kreuzberg/src/extractors/image.rs +195 -0
  105. data/vendor/kreuzberg/src/extractors/mod.rs +268 -0
  106. data/vendor/kreuzberg/src/extractors/pandoc.rs +201 -0
  107. data/vendor/kreuzberg/src/extractors/pdf.rs +496 -0
  108. data/vendor/kreuzberg/src/extractors/pptx.rs +234 -0
  109. data/vendor/kreuzberg/src/extractors/structured.rs +126 -0
  110. data/vendor/kreuzberg/src/extractors/text.rs +242 -0
  111. data/vendor/kreuzberg/src/extractors/xml.rs +128 -0
  112. data/vendor/kreuzberg/src/image/dpi.rs +164 -0
  113. data/vendor/kreuzberg/src/image/mod.rs +6 -0
  114. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -0
  115. data/vendor/kreuzberg/src/image/resize.rs +89 -0
  116. data/vendor/kreuzberg/src/keywords/config.rs +154 -0
  117. data/vendor/kreuzberg/src/keywords/mod.rs +237 -0
  118. data/vendor/kreuzberg/src/keywords/processor.rs +267 -0
  119. data/vendor/kreuzberg/src/keywords/rake.rs +294 -0
  120. data/vendor/kreuzberg/src/keywords/types.rs +68 -0
  121. data/vendor/kreuzberg/src/keywords/yake.rs +163 -0
  122. data/vendor/kreuzberg/src/language_detection/mod.rs +942 -0
  123. data/vendor/kreuzberg/src/lib.rs +102 -0
  124. data/vendor/kreuzberg/src/mcp/mod.rs +32 -0
  125. data/vendor/kreuzberg/src/mcp/server.rs +1966 -0
  126. data/vendor/kreuzberg/src/ocr/cache.rs +469 -0
  127. data/vendor/kreuzberg/src/ocr/error.rs +37 -0
  128. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -0
  129. data/vendor/kreuzberg/src/ocr/mod.rs +58 -0
  130. data/vendor/kreuzberg/src/ocr/processor.rs +847 -0
  131. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -0
  132. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -0
  133. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -0
  134. data/vendor/kreuzberg/src/ocr/types.rs +393 -0
  135. data/vendor/kreuzberg/src/ocr/utils.rs +47 -0
  136. data/vendor/kreuzberg/src/ocr/validation.rs +206 -0
  137. data/vendor/kreuzberg/src/pdf/error.rs +122 -0
  138. data/vendor/kreuzberg/src/pdf/images.rs +139 -0
  139. data/vendor/kreuzberg/src/pdf/metadata.rs +346 -0
  140. data/vendor/kreuzberg/src/pdf/mod.rs +50 -0
  141. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -0
  142. data/vendor/kreuzberg/src/pdf/table.rs +420 -0
  143. data/vendor/kreuzberg/src/pdf/text.rs +161 -0
  144. data/vendor/kreuzberg/src/plugins/extractor.rs +1010 -0
  145. data/vendor/kreuzberg/src/plugins/mod.rs +209 -0
  146. data/vendor/kreuzberg/src/plugins/ocr.rs +629 -0
  147. data/vendor/kreuzberg/src/plugins/processor.rs +641 -0
  148. data/vendor/kreuzberg/src/plugins/registry.rs +1324 -0
  149. data/vendor/kreuzberg/src/plugins/traits.rs +258 -0
  150. data/vendor/kreuzberg/src/plugins/validator.rs +955 -0
  151. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -0
  152. data/vendor/kreuzberg/src/text/mod.rs +19 -0
  153. data/vendor/kreuzberg/src/text/quality.rs +697 -0
  154. data/vendor/kreuzberg/src/text/string_utils.rs +217 -0
  155. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -0
  156. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -0
  157. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -0
  158. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -0
  159. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -0
  160. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -0
  161. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -0
  162. data/vendor/kreuzberg/src/types.rs +873 -0
  163. data/vendor/kreuzberg/src/utils/mod.rs +17 -0
  164. data/vendor/kreuzberg/src/utils/quality.rs +959 -0
  165. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -0
  166. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -0
  167. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -0
  168. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -0
  169. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -0
  170. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -0
  171. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -0
  172. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -0
  173. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -0
  174. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -0
  175. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -0
  176. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -0
  177. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -0
  178. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -0
  179. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -0
  180. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -0
  181. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -0
  182. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -0
  183. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -0
  184. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -0
  185. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -0
  186. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -0
  187. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -0
  188. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -0
  189. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -0
  190. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -0
  191. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -0
  192. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -0
  193. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -0
  194. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -0
  195. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -0
  196. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -0
  197. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -0
  198. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -0
  199. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -0
  200. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -0
  201. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -0
  202. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -0
  203. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -0
  204. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -0
  205. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -0
  206. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -0
  207. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -0
  208. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -0
  209. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -0
  210. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -0
  211. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -0
  212. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -0
  213. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -0
  214. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -0
  215. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -0
  216. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -0
  217. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -0
  218. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -0
  219. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -0
  220. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -0
  221. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -0
  222. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -0
  223. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -0
  224. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -0
  225. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -0
  226. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -0
  227. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -0
  228. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -0
  229. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -0
  230. data/vendor/kreuzberg/tests/api_tests.rs +966 -0
  231. data/vendor/kreuzberg/tests/archive_integration.rs +543 -0
  232. data/vendor/kreuzberg/tests/batch_orchestration.rs +542 -0
  233. data/vendor/kreuzberg/tests/batch_processing.rs +304 -0
  234. data/vendor/kreuzberg/tests/chunking_offset_demo.rs +92 -0
  235. data/vendor/kreuzberg/tests/concurrency_stress.rs +509 -0
  236. data/vendor/kreuzberg/tests/config_features.rs +580 -0
  237. data/vendor/kreuzberg/tests/config_loading_tests.rs +439 -0
  238. data/vendor/kreuzberg/tests/core_integration.rs +493 -0
  239. data/vendor/kreuzberg/tests/csv_integration.rs +424 -0
  240. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +124 -0
  241. data/vendor/kreuzberg/tests/email_integration.rs +325 -0
  242. data/vendor/kreuzberg/tests/error_handling.rs +393 -0
  243. data/vendor/kreuzberg/tests/format_integration.rs +159 -0
  244. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -0
  245. data/vendor/kreuzberg/tests/image_integration.rs +253 -0
  246. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -0
  247. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -0
  248. data/vendor/kreuzberg/tests/mime_detection.rs +428 -0
  249. data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -0
  250. data/vendor/kreuzberg/tests/ocr_errors.rs +676 -0
  251. data/vendor/kreuzberg/tests/ocr_quality.rs +627 -0
  252. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -0
  253. data/vendor/kreuzberg/tests/pandoc_integration.rs +503 -0
  254. data/vendor/kreuzberg/tests/pdf_integration.rs +43 -0
  255. data/vendor/kreuzberg/tests/pipeline_integration.rs +1412 -0
  256. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -0
  257. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +561 -0
  258. data/vendor/kreuzberg/tests/plugin_system.rs +921 -0
  259. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -0
  260. data/vendor/kreuzberg/tests/registry_integration_tests.rs +607 -0
  261. data/vendor/kreuzberg/tests/security_validation.rs +404 -0
  262. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -0
  263. data/vendor/kreuzberg/tests/test_fastembed.rs +609 -0
  264. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -0
  265. metadata +471 -0
@@ -0,0 +1,1188 @@
1
+ use crate::error::{KreuzbergError, Result};
2
+ #[cfg(feature = "quality")]
3
+ use crate::text::normalize_spaces;
4
+ use serde_json::Value;
5
+ use std::collections::HashMap;
6
+ use std::path::{Path, PathBuf};
7
+ use tokio::fs;
8
+ use tokio::process::Command;
9
+ use tokio::time::{Duration, timeout};
10
+
11
+ /// Default timeout for Pandoc operations (120 seconds)
12
+ const PANDOC_TIMEOUT_SECONDS: u64 = 120;
13
+
14
+ /// RAII guard for automatic temporary file cleanup
15
+ struct TempFile {
16
+ path: PathBuf,
17
+ }
18
+
19
+ impl TempFile {
20
+ fn new(path: PathBuf) -> Self {
21
+ Self { path }
22
+ }
23
+ }
24
+
25
+ impl Drop for TempFile {
26
+ fn drop(&mut self) {
27
+ let path = self.path.clone();
28
+ tokio::spawn(async move {
29
+ let _ = fs::remove_file(&path).await;
30
+ });
31
+ }
32
+ }
33
+
34
+ /// Extract content from file using Pandoc (convert to markdown)
35
+ #[allow(dead_code)]
36
+ pub async fn extract_content(path: &Path, from_format: &str) -> Result<String> {
37
+ let child = Command::new("pandoc")
38
+ .arg(path)
39
+ .arg(format!("--from={}", from_format))
40
+ .arg("--to=markdown")
41
+ .arg("--standalone")
42
+ .arg("--wrap=preserve")
43
+ .arg("--quiet")
44
+ .stdout(std::process::Stdio::piped())
45
+ .stderr(std::process::Stdio::piped())
46
+ .spawn()
47
+ .map_err(|e| {
48
+ // Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
49
+ std::io::Error::other(format!("Failed to execute pandoc: {}", e))
50
+ })?;
51
+
52
+ let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
53
+ Ok(Ok(output)) => output,
54
+ Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
55
+ Err(_) => {
56
+ // Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
57
+ return Err(KreuzbergError::parsing(format!(
58
+ "Pandoc content extraction timed out after {} seconds",
59
+ PANDOC_TIMEOUT_SECONDS
60
+ )));
61
+ }
62
+ };
63
+
64
+ if !output.status.success() {
65
+ let stderr = String::from_utf8_lossy(&output.stderr);
66
+
67
+ // Subprocess error analysis - wrap only if format/parsing error detected ~keep
68
+ let stderr_lower = stderr.to_lowercase();
69
+ if stderr_lower.contains("format")
70
+ || stderr_lower.contains("unsupported")
71
+ || stderr_lower.contains("error:")
72
+ || stderr_lower.contains("failed")
73
+ {
74
+ return Err(KreuzbergError::parsing(format!(
75
+ "Pandoc format/parsing error: {}",
76
+ stderr
77
+ )));
78
+ }
79
+
80
+ // True system error - bubble up as IO error ~keep
81
+ return Err(std::io::Error::other(format!("Pandoc system error: {}", stderr)).into());
82
+ }
83
+
84
+ let content = String::from_utf8(output.stdout)
85
+ .map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc output: {}", e)))?;
86
+
87
+ #[cfg(feature = "quality")]
88
+ {
89
+ Ok(normalize_spaces(&content))
90
+ }
91
+ #[cfg(not(feature = "quality"))]
92
+ {
93
+ Ok(content)
94
+ }
95
+ }
96
+
97
+ /// Extract metadata from file using Pandoc JSON output
98
+ #[allow(dead_code)]
99
+ pub async fn extract_metadata(path: &Path, from_format: &str) -> Result<HashMap<String, Value>> {
100
+ let child = Command::new("pandoc")
101
+ .arg(path)
102
+ .arg(format!("--from={}", from_format))
103
+ .arg("--to=json")
104
+ .arg("--standalone")
105
+ .arg("--quiet")
106
+ .stdout(std::process::Stdio::piped())
107
+ .stderr(std::process::Stdio::piped())
108
+ .spawn()
109
+ .map_err(|e| {
110
+ // Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
111
+ std::io::Error::other(format!("Failed to execute pandoc: {}", e))
112
+ })?;
113
+
114
+ let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
115
+ Ok(Ok(output)) => output,
116
+ Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
117
+ Err(_) => {
118
+ // Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
119
+ return Err(KreuzbergError::parsing(format!(
120
+ "Pandoc metadata extraction timed out after {} seconds",
121
+ PANDOC_TIMEOUT_SECONDS
122
+ )));
123
+ }
124
+ };
125
+
126
+ if !output.status.success() {
127
+ let stderr = String::from_utf8_lossy(&output.stderr);
128
+
129
+ // Subprocess error analysis - wrap only if format/parsing error detected ~keep
130
+ let stderr_lower = stderr.to_lowercase();
131
+ if stderr_lower.contains("format")
132
+ || stderr_lower.contains("unsupported")
133
+ || stderr_lower.contains("error:")
134
+ || stderr_lower.contains("failed")
135
+ {
136
+ return Err(KreuzbergError::parsing(format!(
137
+ "Pandoc metadata extraction format/parsing error: {}",
138
+ stderr
139
+ )));
140
+ }
141
+
142
+ // True system error - bubble up as IO error ~keep
143
+ return Err(std::io::Error::other(format!("Pandoc metadata extraction system error: {}", stderr)).into());
144
+ }
145
+
146
+ let json_content = String::from_utf8(output.stdout)
147
+ .map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc JSON output: {}", e)))?;
148
+
149
+ let json_data: Value = serde_json::from_str(&json_content)
150
+ .map_err(|e| KreuzbergError::parsing(format!("Failed to parse pandoc JSON: {}", e)))?;
151
+
152
+ extract_metadata_from_json(&json_data)
153
+ }
154
+
155
+ /// Valid metadata field names (must match Python's _VALID_METADATA_KEYS)
156
+ const VALID_METADATA_KEYS: &[&str] = &[
157
+ "abstract",
158
+ "authors",
159
+ "categories",
160
+ "character_count",
161
+ "citations",
162
+ "code_blocks",
163
+ "comments",
164
+ "content",
165
+ "copyright",
166
+ "created_at",
167
+ "created_by",
168
+ "description",
169
+ "fonts",
170
+ "headers",
171
+ "height",
172
+ "identifier",
173
+ "keywords",
174
+ "languages",
175
+ "license",
176
+ "line_count",
177
+ "links",
178
+ "modified_at",
179
+ "modified_by",
180
+ "organization",
181
+ "parse_error",
182
+ "publisher",
183
+ "references",
184
+ "sheet_count",
185
+ "sheet_names",
186
+ "status",
187
+ "subject",
188
+ "subtitle",
189
+ "summary",
190
+ "title",
191
+ "total_cells",
192
+ "version",
193
+ "warning",
194
+ "width",
195
+ "word_count",
196
+ "email_from",
197
+ "email_to",
198
+ "email_cc",
199
+ "email_bcc",
200
+ "date",
201
+ "attachments",
202
+ "table_count",
203
+ "tables_summary",
204
+ "quality_score",
205
+ "image_preprocessing",
206
+ "source_format",
207
+ "converted_via",
208
+ "error",
209
+ "error_context",
210
+ "json_schema",
211
+ "notes",
212
+ "note",
213
+ "name",
214
+ "body",
215
+ "text",
216
+ "message",
217
+ "attributes",
218
+ "token_reduction",
219
+ "processing_errors",
220
+ "extraction_error",
221
+ "element_count",
222
+ "unique_elements",
223
+ ];
224
+
225
+ /// Extract metadata from Pandoc JSON AST
226
+ pub(crate) fn extract_metadata_from_json(json: &Value) -> Result<HashMap<String, Value>> {
227
+ let mut metadata = HashMap::new();
228
+
229
+ if let Some(meta) = json.get("meta").and_then(|m| m.as_object()) {
230
+ for (key, value) in meta {
231
+ let pandoc_key = get_pandoc_key(key);
232
+ if !VALID_METADATA_KEYS.contains(&pandoc_key.as_str()) {
233
+ continue;
234
+ }
235
+ if let Some(extracted) = extract_meta_value(value) {
236
+ metadata.insert(pandoc_key, extracted);
237
+ }
238
+ }
239
+ }
240
+
241
+ if let Some(blocks) = json.get("blocks").and_then(|b| b.as_array()) {
242
+ let mut citations = Vec::new();
243
+ extract_citations_from_blocks(blocks, &mut citations);
244
+
245
+ if !citations.is_empty() {
246
+ if let Some(existing) = metadata.get_mut("citations") {
247
+ if let Some(arr) = existing.as_array_mut() {
248
+ for cite in citations {
249
+ if !arr.contains(&Value::String(cite.clone())) {
250
+ arr.push(Value::String(cite));
251
+ }
252
+ }
253
+ }
254
+ } else {
255
+ metadata.insert(
256
+ "citations".to_string(),
257
+ Value::Array(citations.into_iter().map(Value::String).collect()),
258
+ );
259
+ }
260
+ }
261
+ }
262
+
263
+ if let Some(citations) = json.get("citations").and_then(|c| c.as_array()) {
264
+ let cite_ids: Vec<String> = citations
265
+ .iter()
266
+ .filter_map(|c| c.get("citationId").and_then(|id| id.as_str()).map(String::from))
267
+ .collect();
268
+
269
+ if !cite_ids.is_empty() {
270
+ metadata.insert(
271
+ "citations".to_string(),
272
+ Value::Array(cite_ids.into_iter().map(Value::String).collect()),
273
+ );
274
+ }
275
+ }
276
+
277
+ Ok(metadata)
278
+ }
279
+
280
+ /// Extract markdown content from Pandoc JSON AST
281
+ ///
282
+ /// Converts the JSON AST blocks back to markdown format, similar to what
283
+ /// `pandoc --to=markdown` would produce. This allows us to extract both
284
+ /// content and metadata from a single JSON extraction.
285
+ pub(crate) fn extract_content_from_json(json: &Value) -> Result<String> {
286
+ let mut content = String::new();
287
+
288
+ if let Some(meta) = json.get("meta").and_then(|m| m.as_object())
289
+ && let Some(title_node) = meta.get("title")
290
+ && let Some(title_value) = extract_meta_value(title_node)
291
+ && let Some(title_str) = title_value.as_str()
292
+ {
293
+ content.push_str(&format!("# {}\n\n", title_str));
294
+ }
295
+
296
+ if let Some(blocks) = json.get("blocks").and_then(|b| b.as_array()) {
297
+ for block in blocks {
298
+ if let Some(text) = extract_block_text(block) {
299
+ if !content.is_empty() && !content.ends_with("\n\n") {
300
+ content.push_str("\n\n");
301
+ }
302
+ content.push_str(&text);
303
+ }
304
+ }
305
+ }
306
+
307
+ Ok(content)
308
+ }
309
+
310
+ /// Extract text from a Pandoc JSON AST block
311
+ fn extract_block_text(block: &Value) -> Option<String> {
312
+ let obj = block.as_object()?;
313
+ let block_type = obj.get("t")?.as_str()?;
314
+ let content = obj.get("c");
315
+
316
+ match block_type {
317
+ "Para" | "Plain" => {
318
+ if let Some(inlines) = content.and_then(|c| c.as_array()) {
319
+ return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
320
+ }
321
+ }
322
+ "Header" => {
323
+ if let Some(arr) = content.and_then(|c| c.as_array())
324
+ && arr.len() >= 3
325
+ && let Some(level) = arr[0].as_u64()
326
+ && let Some(inlines) = arr[2].as_array()
327
+ {
328
+ let header_text = extract_inlines(inlines).and_then(|v| v.as_str().map(String::from))?;
329
+ let prefix = "#".repeat(level as usize);
330
+ return Some(format!("{} {}", prefix, header_text));
331
+ }
332
+ }
333
+ "CodeBlock" => {
334
+ if let Some(arr) = content.and_then(|c| c.as_array())
335
+ && arr.len() >= 2
336
+ && let Some(code) = arr[1].as_str()
337
+ {
338
+ return Some(format!("```\n{}\n```", code));
339
+ }
340
+ }
341
+ "BlockQuote" => {
342
+ if let Some(blocks) = content.and_then(|c| c.as_array()) {
343
+ let mut quote_text = String::new();
344
+ for inner_block in blocks {
345
+ if let Some(text) = extract_block_text(inner_block) {
346
+ quote_text.push_str("> ");
347
+ quote_text.push_str(&text);
348
+ quote_text.push('\n');
349
+ }
350
+ }
351
+ return Some(quote_text.trim_end().to_string());
352
+ }
353
+ }
354
+ "BulletList" => {
355
+ if let Some(items) = content.and_then(|c| c.as_array()) {
356
+ let mut list_text = String::new();
357
+ for item in items {
358
+ if let Some(item_blocks) = item.as_array() {
359
+ for block in item_blocks {
360
+ if let Some(text) = extract_block_text(block) {
361
+ list_text.push_str("- ");
362
+ list_text.push_str(&text);
363
+ list_text.push('\n');
364
+ }
365
+ }
366
+ }
367
+ }
368
+ return Some(list_text.trim_end().to_string());
369
+ }
370
+ }
371
+ "OrderedList" => {
372
+ if let Some(arr) = content.and_then(|c| c.as_array())
373
+ && arr.len() >= 2
374
+ && let Some(items) = arr[1].as_array()
375
+ {
376
+ let mut list_text = String::new();
377
+ for (idx, item) in items.iter().enumerate() {
378
+ if let Some(item_blocks) = item.as_array() {
379
+ for block in item_blocks {
380
+ if let Some(text) = extract_block_text(block) {
381
+ list_text.push_str(&format!("{}. {}\n", idx + 1, text));
382
+ }
383
+ }
384
+ }
385
+ }
386
+ return Some(list_text.trim_end().to_string());
387
+ }
388
+ }
389
+ "HorizontalRule" => {
390
+ return Some("---".to_string());
391
+ }
392
+ _ => {}
393
+ }
394
+
395
+ None
396
+ }
397
+
398
+ /// Map Pandoc metadata keys to standard keys
399
+ fn get_pandoc_key(key: &str) -> String {
400
+ match key {
401
+ "abstract" => "summary".to_string(),
402
+ "date" => "created_at".to_string(),
403
+ "contributors" | "author" => "authors".to_string(),
404
+ "institute" => "organization".to_string(),
405
+ _ => key.to_string(),
406
+ }
407
+ }
408
+
409
+ /// Extract value from Pandoc metadata node
410
+ fn extract_meta_value(node: &Value) -> Option<Value> {
411
+ if let Some(obj) = node.as_object() {
412
+ let node_type = obj.get("t")?.as_str()?;
413
+ let content = obj.get("c");
414
+
415
+ match node_type {
416
+ "MetaString" => {
417
+ if let Some(s) = content.and_then(|c| c.as_str()) {
418
+ return Some(Value::String(s.to_string()));
419
+ }
420
+ }
421
+ "MetaInlines" => {
422
+ if let Some(inlines) = content.and_then(|c| c.as_array()) {
423
+ return extract_inlines(inlines);
424
+ }
425
+ }
426
+ "MetaList" => {
427
+ if let Some(list) = content.and_then(|c| c.as_array()) {
428
+ let mut values = Vec::new();
429
+ for item in list {
430
+ if let Some(val) = extract_meta_value(item) {
431
+ if let Some(arr) = val.as_array() {
432
+ values.extend_from_slice(arr);
433
+ } else {
434
+ values.push(val);
435
+ }
436
+ }
437
+ }
438
+ if !values.is_empty() {
439
+ return Some(Value::Array(values));
440
+ }
441
+ }
442
+ }
443
+ "MetaBlocks" => {
444
+ if let Some(blocks) = content.and_then(|c| c.as_array()) {
445
+ let mut texts = Vec::new();
446
+ for block in blocks {
447
+ if let Some(block_obj) = block.as_object()
448
+ && block_obj.get("t")?.as_str()? == "Para"
449
+ && let Some(para_content) = block_obj.get("c").and_then(|c| c.as_array())
450
+ && let Some(text) = extract_inlines(para_content)
451
+ && let Some(s) = text.as_str()
452
+ {
453
+ texts.push(s.to_string());
454
+ }
455
+ }
456
+ if !texts.is_empty() {
457
+ return Some(Value::String(texts.join(" ")));
458
+ }
459
+ }
460
+ }
461
+ "MetaMap" => {
462
+ if let Some(map) = content.and_then(|c| c.as_object()) {
463
+ let mut result = serde_json::Map::new();
464
+ for (k, v) in map {
465
+ if let Some(val) = extract_meta_value(v) {
466
+ result.insert(k.clone(), val);
467
+ }
468
+ }
469
+ if !result.is_empty() {
470
+ return Some(Value::Object(result));
471
+ }
472
+ }
473
+ }
474
+ _ => {}
475
+ }
476
+ }
477
+
478
+ None
479
+ }
480
+
481
+ /// Extract inline text from Pandoc inline nodes
482
+ fn extract_inlines(inlines: &[Value]) -> Option<Value> {
483
+ let mut texts = Vec::new();
484
+
485
+ for inline in inlines {
486
+ if let Some(text) = extract_inline_text(inline) {
487
+ texts.push(text);
488
+ }
489
+ }
490
+
491
+ let result = texts.join("");
492
+ if result.is_empty() {
493
+ None
494
+ } else {
495
+ Some(Value::String(result))
496
+ }
497
+ }
498
+
499
+ /// Extract text from a single inline node
500
+ fn extract_inline_text(node: &Value) -> Option<String> {
501
+ if let Some(obj) = node.as_object() {
502
+ let node_type = obj.get("t")?.as_str()?;
503
+
504
+ match node_type {
505
+ "Str" => {
506
+ return obj.get("c")?.as_str().map(String::from);
507
+ }
508
+ "Space" => {
509
+ return Some(" ".to_string());
510
+ }
511
+ "Emph" | "Strong" | "Strikeout" | "Superscript" | "Subscript" | "SmallCaps" => {
512
+ if let Some(content) = obj.get("c").and_then(|c| c.as_array()) {
513
+ return extract_inlines(content).and_then(|v| v.as_str().map(String::from));
514
+ }
515
+ }
516
+ "Code" => {
517
+ if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
518
+ && arr.len() == 2
519
+ {
520
+ return arr[1].as_str().map(String::from);
521
+ }
522
+ }
523
+ "Link" | "Image" => {
524
+ if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
525
+ && arr.len() == 3
526
+ && let Some(inlines) = arr[1].as_array()
527
+ {
528
+ return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
529
+ }
530
+ }
531
+ "Quoted" => {
532
+ if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
533
+ && arr.len() == 2
534
+ && let Some(inlines) = arr[1].as_array()
535
+ {
536
+ return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
537
+ }
538
+ }
539
+ "Cite" => {
540
+ if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
541
+ && arr.len() == 2
542
+ && let Some(inlines) = arr[1].as_array()
543
+ {
544
+ return extract_inlines(inlines).and_then(|v| v.as_str().map(String::from));
545
+ }
546
+ }
547
+ "Math" => {
548
+ if let Some(arr) = obj.get("c").and_then(|c| c.as_array())
549
+ && arr.len() == 2
550
+ {
551
+ return arr[1].as_str().map(String::from);
552
+ }
553
+ }
554
+ "LineBreak" | "SoftBreak" => {
555
+ return Some("\n".to_string());
556
+ }
557
+ _ => {}
558
+ }
559
+ }
560
+
561
+ None
562
+ }
563
+
564
+ /// Extract citations from block nodes
565
+ fn extract_citations_from_blocks(blocks: &[Value], citations: &mut Vec<String>) {
566
+ for block in blocks {
567
+ if let Some(obj) = block.as_object() {
568
+ let block_type = obj.get("t").and_then(|t| t.as_str());
569
+
570
+ if block_type == Some("Cite")
571
+ && let Some(arr) = obj.get("c").and_then(|c| c.as_array())
572
+ && let Some(cite_list) = arr.first().and_then(|c| c.as_array())
573
+ {
574
+ for cite in cite_list {
575
+ if let Some(cite_id) = cite.get("citationId").and_then(|id| id.as_str()) {
576
+ citations.push(cite_id.to_string());
577
+ }
578
+ }
579
+ }
580
+
581
+ if let Some(content) = obj.get("c") {
582
+ if let Some(nested_blocks) = content.as_array() {
583
+ extract_citations_from_blocks(nested_blocks, citations);
584
+ } else if let Some(nested_obj) = content.as_object() {
585
+ for value in nested_obj.values() {
586
+ if let Some(arr) = value.as_array() {
587
+ extract_citations_from_blocks(arr, citations);
588
+ }
589
+ }
590
+ }
591
+ }
592
+ }
593
+ }
594
+ }
595
+
596
+ /// Wrapper functions for backwards compatibility
597
+ pub async fn extract_with_pandoc(path: &Path, from_format: &str) -> Result<(String, HashMap<String, Value>)> {
598
+ let child = Command::new("pandoc")
599
+ .arg(path)
600
+ .arg(format!("--from={}", from_format))
601
+ .arg("--to=json")
602
+ .arg("--standalone")
603
+ .arg("--quiet")
604
+ .stdout(std::process::Stdio::piped())
605
+ .stderr(std::process::Stdio::piped())
606
+ .spawn()
607
+ .map_err(|e| {
608
+ // Failed to execute pandoc - this is an IO error (command not found, etc.) ~keep
609
+ std::io::Error::other(format!("Failed to execute pandoc: {}", e))
610
+ })?;
611
+
612
+ let output = match timeout(Duration::from_secs(PANDOC_TIMEOUT_SECONDS), child.wait_with_output()).await {
613
+ Ok(Ok(output)) => output,
614
+ Ok(Err(e)) => return Err(std::io::Error::other(format!("Failed to wait for pandoc: {}", e)).into()),
615
+ Err(_) => {
616
+ // Timeout - child was already consumed by wait_with_output(), process will be killed on drop ~keep
617
+ return Err(KreuzbergError::parsing(format!(
618
+ "Pandoc extraction timed out after {} seconds",
619
+ PANDOC_TIMEOUT_SECONDS
620
+ )));
621
+ }
622
+ };
623
+
624
+ if !output.status.success() {
625
+ let stderr = String::from_utf8_lossy(&output.stderr);
626
+
627
+ // Subprocess error analysis - wrap only if format/parsing error detected ~keep
628
+ let stderr_lower = stderr.to_lowercase();
629
+ if stderr_lower.contains("format")
630
+ || stderr_lower.contains("unsupported")
631
+ || stderr_lower.contains("error:")
632
+ || stderr_lower.contains("failed")
633
+ {
634
+ return Err(KreuzbergError::parsing(format!(
635
+ "Pandoc format/parsing error: {}",
636
+ stderr
637
+ )));
638
+ }
639
+
640
+ // True system error - bubble up as IO error ~keep
641
+ return Err(std::io::Error::other(format!("Pandoc system error: {}", stderr)).into());
642
+ }
643
+
644
+ let json_content = String::from_utf8(output.stdout)
645
+ .map_err(|e| KreuzbergError::parsing(format!("Failed to decode pandoc JSON output: {}", e)))?;
646
+
647
+ let json_data: Value = serde_json::from_str(&json_content)
648
+ .map_err(|e| KreuzbergError::parsing(format!("Failed to parse pandoc JSON: {}", e)))?;
649
+
650
+ let content = extract_content_from_json(&json_data)?;
651
+ let metadata = extract_metadata_from_json(&json_data)?;
652
+
653
+ #[cfg(feature = "quality")]
654
+ {
655
+ Ok((normalize_spaces(&content), metadata))
656
+ }
657
+ #[cfg(not(feature = "quality"))]
658
+ {
659
+ Ok((content, metadata))
660
+ }
661
+ }
662
+
663
+ pub async fn extract_with_pandoc_from_bytes(
664
+ bytes: &[u8],
665
+ from_format: &str,
666
+ extension: &str,
667
+ ) -> Result<(String, HashMap<String, Value>)> {
668
+ let temp_dir = std::env::temp_dir();
669
+ let temp_file_path = temp_dir.join(format!(
670
+ "pandoc_temp_{}_{}.{}",
671
+ std::process::id(),
672
+ uuid::Uuid::new_v4(),
673
+ extension
674
+ ));
675
+
676
+ // RAII guard ensures cleanup on all paths including panic ~keep
677
+ let _temp_guard = TempFile::new(temp_file_path.clone());
678
+
679
+ fs::write(&temp_file_path, bytes).await?;
680
+
681
+ extract_with_pandoc(&temp_file_path, from_format).await
682
+ }
683
+
684
+ #[cfg(test)]
685
+ mod tests {
686
+ use super::*;
687
+ use serde_json::json;
688
+
689
+ #[test]
690
+ fn test_get_pandoc_key() {
691
+ assert_eq!(get_pandoc_key("abstract"), "summary");
692
+ assert_eq!(get_pandoc_key("date"), "created_at");
693
+ assert_eq!(get_pandoc_key("author"), "authors");
694
+ assert_eq!(get_pandoc_key("contributors"), "authors");
695
+ assert_eq!(get_pandoc_key("institute"), "organization");
696
+ assert_eq!(get_pandoc_key("title"), "title");
697
+ }
698
+
699
+ #[test]
700
+ fn test_extract_meta_value_string() {
701
+ let node = json!({
702
+ "t": "MetaString",
703
+ "c": "Test Title"
704
+ });
705
+
706
+ let result = extract_meta_value(&node).unwrap();
707
+ assert_eq!(result, Value::String("Test Title".to_string()));
708
+ }
709
+
710
+ #[test]
711
+ fn test_extract_meta_value_inlines() {
712
+ let node = json!({
713
+ "t": "MetaInlines",
714
+ "c": [
715
+ {"t": "Str", "c": "Hello"},
716
+ {"t": "Space"},
717
+ {"t": "Str", "c": "World"}
718
+ ]
719
+ });
720
+
721
+ let result = extract_meta_value(&node).unwrap();
722
+ assert_eq!(result, Value::String("Hello World".to_string()));
723
+ }
724
+
725
+ #[test]
726
+ fn test_extract_meta_value_list() {
727
+ let node = json!({
728
+ "t": "MetaList",
729
+ "c": [
730
+ {"t": "MetaString", "c": "Author1"},
731
+ {"t": "MetaString", "c": "Author2"}
732
+ ]
733
+ });
734
+
735
+ let result = extract_meta_value(&node).unwrap();
736
+ assert_eq!(
737
+ result,
738
+ Value::Array(vec![
739
+ Value::String("Author1".to_string()),
740
+ Value::String("Author2".to_string())
741
+ ])
742
+ );
743
+ }
744
+
745
+ #[test]
746
+ fn test_extract_inline_text_str() {
747
+ let node = json!({"t": "Str", "c": "Hello"});
748
+ let result = extract_inline_text(&node).unwrap();
749
+ assert_eq!(result, "Hello");
750
+ }
751
+
752
+ #[test]
753
+ fn test_extract_inline_text_space() {
754
+ let node = json!({"t": "Space"});
755
+ let result = extract_inline_text(&node).unwrap();
756
+ assert_eq!(result, " ");
757
+ }
758
+
759
+ #[test]
760
+ fn test_extract_inline_text_emph() {
761
+ let node = json!({
762
+ "t": "Emph",
763
+ "c": [
764
+ {"t": "Str", "c": "emphasized"}
765
+ ]
766
+ });
767
+ let result = extract_inline_text(&node).unwrap();
768
+ assert_eq!(result, "emphasized");
769
+ }
770
+
771
+ #[test]
772
+ fn test_extract_inline_text_code() {
773
+ let node = json!({
774
+ "t": "Code",
775
+ "c": [["", [], []], "code_snippet"]
776
+ });
777
+ let result = extract_inline_text(&node).unwrap();
778
+ assert_eq!(result, "code_snippet");
779
+ }
780
+
781
+ #[test]
782
+ fn test_extract_inlines() {
783
+ let inlines = vec![
784
+ json!({"t": "Str", "c": "Hello"}),
785
+ json!({"t": "Space"}),
786
+ json!({"t": "Emph", "c": [{"t": "Str", "c": "World"}]}),
787
+ ];
788
+
789
+ let result = extract_inlines(&inlines).unwrap();
790
+ assert_eq!(result, Value::String("Hello World".to_string()));
791
+ }
792
+
793
+ #[test]
794
+ fn test_extract_citations_from_blocks() {
795
+ let blocks = vec![json!({
796
+ "t": "Cite",
797
+ "c": [
798
+ [
799
+ {"citationId": "cite1"},
800
+ {"citationId": "cite2"}
801
+ ],
802
+ []
803
+ ]
804
+ })];
805
+
806
+ let mut citations = Vec::new();
807
+ extract_citations_from_blocks(&blocks, &mut citations);
808
+
809
+ assert_eq!(citations, vec!["cite1", "cite2"]);
810
+ }
811
+
812
+ #[test]
813
+ fn test_extract_metadata_from_json() {
814
+ let json = json!({
815
+ "meta": {
816
+ "title": {"t": "MetaString", "c": "Test Document"},
817
+ "author": {"t": "MetaList", "c": [
818
+ {"t": "MetaString", "c": "Author One"}
819
+ ]},
820
+ "date": {"t": "MetaString", "c": "2024-01-01"}
821
+ },
822
+ "blocks": []
823
+ });
824
+
825
+ let metadata = extract_metadata_from_json(&json).unwrap();
826
+
827
+ assert_eq!(
828
+ metadata.get("title").unwrap(),
829
+ &Value::String("Test Document".to_string())
830
+ );
831
+ assert_eq!(
832
+ metadata.get("authors").unwrap(),
833
+ &Value::Array(vec![Value::String("Author One".to_string())])
834
+ );
835
+ assert_eq!(
836
+ metadata.get("created_at").unwrap(),
837
+ &Value::String("2024-01-01".to_string())
838
+ );
839
+ }
840
+
841
+ #[test]
842
+ fn test_metadata_field_filtering() {
843
+ let json = json!({
844
+ "meta": {
845
+ "title": {"t": "MetaString", "c": "Valid Title"},
846
+ "invalid_field": {"t": "MetaString", "c": "Should be filtered"},
847
+ "random_key": {"t": "MetaString", "c": "Not in valid keys"},
848
+ "author": {"t": "MetaString", "c": "Valid Author"}
849
+ },
850
+ "blocks": []
851
+ });
852
+
853
+ let metadata = extract_metadata_from_json(&json).unwrap();
854
+
855
+ assert!(metadata.contains_key("title"));
856
+ assert!(metadata.contains_key("authors"));
857
+
858
+ assert!(!metadata.contains_key("invalid_field"));
859
+ assert!(!metadata.contains_key("random_key"));
860
+ }
861
+
862
+ #[test]
863
+ fn test_extract_meta_value_meta_blocks() {
864
+ let node = json!({
865
+ "t": "MetaBlocks",
866
+ "c": [
867
+ {
868
+ "t": "Para",
869
+ "c": [
870
+ {"t": "Str", "c": "First"},
871
+ {"t": "Space"},
872
+ {"t": "Str", "c": "paragraph"}
873
+ ]
874
+ },
875
+ {
876
+ "t": "Para",
877
+ "c": [
878
+ {"t": "Str", "c": "Second"},
879
+ {"t": "Space"},
880
+ {"t": "Str", "c": "paragraph"}
881
+ ]
882
+ }
883
+ ]
884
+ });
885
+
886
+ let result = extract_meta_value(&node).unwrap();
887
+ assert_eq!(result, Value::String("First paragraph Second paragraph".to_string()));
888
+ }
889
+
890
+ #[test]
891
+ fn test_extract_meta_value_meta_map() {
892
+ let node = json!({
893
+ "t": "MetaMap",
894
+ "c": {
895
+ "key1": {"t": "MetaString", "c": "value1"},
896
+ "key2": {"t": "MetaString", "c": "value2"}
897
+ }
898
+ });
899
+
900
+ let result = extract_meta_value(&node).unwrap();
901
+ let obj = result.as_object().unwrap();
902
+ assert_eq!(obj.get("key1").unwrap(), &Value::String("value1".to_string()));
903
+ assert_eq!(obj.get("key2").unwrap(), &Value::String("value2".to_string()));
904
+ }
905
+
906
+ #[test]
907
+ fn test_extract_inline_text_strong() {
908
+ let node = json!({
909
+ "t": "Strong",
910
+ "c": [
911
+ {"t": "Str", "c": "bold"}
912
+ ]
913
+ });
914
+ let result = extract_inline_text(&node).unwrap();
915
+ assert_eq!(result, "bold");
916
+ }
917
+
918
+ #[test]
919
+ fn test_extract_inline_text_link() {
920
+ let node = json!({
921
+ "t": "Link",
922
+ "c": [
923
+ ["", [], []],
924
+ [{"t": "Str", "c": "link text"}],
925
+ ["https://example.com", ""]
926
+ ]
927
+ });
928
+ let result = extract_inline_text(&node).unwrap();
929
+ assert_eq!(result, "link text");
930
+ }
931
+
932
+ #[test]
933
+ fn test_extract_inline_text_image() {
934
+ let node = json!({
935
+ "t": "Image",
936
+ "c": [
937
+ ["", [], []],
938
+ [{"t": "Str", "c": "alt text"}],
939
+ ["image.png", ""]
940
+ ]
941
+ });
942
+ let result = extract_inline_text(&node).unwrap();
943
+ assert_eq!(result, "alt text");
944
+ }
945
+
946
+ #[test]
947
+ fn test_extract_inline_text_quoted() {
948
+ let node = json!({
949
+ "t": "Quoted",
950
+ "c": [
951
+ {"t": "DoubleQuote"},
952
+ [{"t": "Str", "c": "quoted text"}]
953
+ ]
954
+ });
955
+ let result = extract_inline_text(&node).unwrap();
956
+ assert_eq!(result, "quoted text");
957
+ }
958
+
959
+ #[test]
960
+ fn test_extract_inline_text_cite() {
961
+ let node = json!({
962
+ "t": "Cite",
963
+ "c": [
964
+ [{"citationId": "cite1"}],
965
+ [{"t": "Str", "c": "citation text"}]
966
+ ]
967
+ });
968
+ let result = extract_inline_text(&node).unwrap();
969
+ assert_eq!(result, "citation text");
970
+ }
971
+
972
+ #[test]
973
+ fn test_extract_inline_text_math() {
974
+ let node = json!({
975
+ "t": "Math",
976
+ "c": [
977
+ {"t": "InlineMath"},
978
+ "x^2 + y^2"
979
+ ]
980
+ });
981
+ let result = extract_inline_text(&node).unwrap();
982
+ assert_eq!(result, "x^2 + y^2");
983
+ }
984
+
985
+ #[test]
986
+ fn test_extract_inline_text_linebreak() {
987
+ let node = json!({"t": "LineBreak"});
988
+ let result = extract_inline_text(&node).unwrap();
989
+ assert_eq!(result, "\n");
990
+ }
991
+
992
+ #[test]
993
+ fn test_extract_inline_text_softbreak() {
994
+ let node = json!({"t": "SoftBreak"});
995
+ let result = extract_inline_text(&node).unwrap();
996
+ assert_eq!(result, "\n");
997
+ }
998
+
999
+ #[test]
1000
+ fn test_extract_inline_text_strikeout() {
1001
+ let node = json!({
1002
+ "t": "Strikeout",
1003
+ "c": [{"t": "Str", "c": "deleted"}]
1004
+ });
1005
+ let result = extract_inline_text(&node).unwrap();
1006
+ assert_eq!(result, "deleted");
1007
+ }
1008
+
1009
+ #[test]
1010
+ fn test_extract_inline_text_superscript() {
1011
+ let node = json!({
1012
+ "t": "Superscript",
1013
+ "c": [{"t": "Str", "c": "2"}]
1014
+ });
1015
+ let result = extract_inline_text(&node).unwrap();
1016
+ assert_eq!(result, "2");
1017
+ }
1018
+
1019
+ #[test]
1020
+ fn test_extract_inline_text_subscript() {
1021
+ let node = json!({
1022
+ "t": "Subscript",
1023
+ "c": [{"t": "Str", "c": "i"}]
1024
+ });
1025
+ let result = extract_inline_text(&node).unwrap();
1026
+ assert_eq!(result, "i");
1027
+ }
1028
+
1029
+ #[test]
1030
+ fn test_extract_inline_text_smallcaps() {
1031
+ let node = json!({
1032
+ "t": "SmallCaps",
1033
+ "c": [{"t": "Str", "c": "small"}]
1034
+ });
1035
+ let result = extract_inline_text(&node).unwrap();
1036
+ assert_eq!(result, "small");
1037
+ }
1038
+
1039
+ #[test]
1040
+ fn test_extract_inline_text_unknown_type() {
1041
+ let node = json!({
1042
+ "t": "UnknownType",
1043
+ "c": "should be ignored"
1044
+ });
1045
+ let result = extract_inline_text(&node);
1046
+ assert!(result.is_none());
1047
+ }
1048
+
1049
+ #[test]
1050
+ fn test_extract_citations_from_nested_blocks() {
1051
+ let blocks = vec![json!({
1052
+ "t": "BulletList",
1053
+ "c": [
1054
+ [
1055
+ {
1056
+ "t": "Plain",
1057
+ "c": [
1058
+ {"t": "Str", "c": "text"}
1059
+ ]
1060
+ }
1061
+ ]
1062
+ ]
1063
+ })];
1064
+
1065
+ let mut citations = Vec::new();
1066
+ extract_citations_from_blocks(&blocks, &mut citations);
1067
+
1068
+ assert!(citations.is_empty());
1069
+ }
1070
+
1071
+ #[test]
1072
+ fn test_extract_metadata_from_json_with_citations() {
1073
+ let json = json!({
1074
+ "meta": {
1075
+ "title": {"t": "MetaString", "c": "Paper"}
1076
+ },
1077
+ "citations": [
1078
+ {"citationId": "cite1"},
1079
+ {"citationId": "cite2"}
1080
+ ],
1081
+ "blocks": []
1082
+ });
1083
+
1084
+ let metadata = extract_metadata_from_json(&json).unwrap();
1085
+
1086
+ assert!(metadata.contains_key("citations"));
1087
+ let citations = metadata.get("citations").unwrap().as_array().unwrap();
1088
+ assert_eq!(citations.len(), 2);
1089
+ assert_eq!(citations[0], Value::String("cite1".to_string()));
1090
+ assert_eq!(citations[1], Value::String("cite2".to_string()));
1091
+ }
1092
+
1093
+ #[test]
1094
+ fn test_extract_metadata_from_json_empty_meta() {
1095
+ let json = json!({
1096
+ "meta": {},
1097
+ "blocks": []
1098
+ });
1099
+
1100
+ let metadata = extract_metadata_from_json(&json).unwrap();
1101
+ assert!(metadata.is_empty());
1102
+ }
1103
+
1104
+ #[test]
1105
+ fn test_extract_meta_value_empty_list() {
1106
+ let node = json!({
1107
+ "t": "MetaList",
1108
+ "c": []
1109
+ });
1110
+
1111
+ let result = extract_meta_value(&node);
1112
+ assert!(result.is_none());
1113
+ }
1114
+
1115
+ #[test]
1116
+ fn test_extract_meta_value_empty_map() {
1117
+ let node = json!({
1118
+ "t": "MetaMap",
1119
+ "c": {}
1120
+ });
1121
+
1122
+ let result = extract_meta_value(&node);
1123
+ assert!(result.is_none());
1124
+ }
1125
+
1126
+ #[test]
1127
+ fn test_extract_inlines_empty() {
1128
+ let inlines = vec![];
1129
+ let result = extract_inlines(&inlines);
1130
+ assert!(result.is_none());
1131
+ }
1132
+
1133
+ #[test]
1134
+ fn test_valid_metadata_keys_contains_standard_fields() {
1135
+ assert!(VALID_METADATA_KEYS.contains(&"title"));
1136
+ assert!(VALID_METADATA_KEYS.contains(&"authors"));
1137
+ assert!(VALID_METADATA_KEYS.contains(&"date"));
1138
+ assert!(VALID_METADATA_KEYS.contains(&"keywords"));
1139
+ assert!(VALID_METADATA_KEYS.contains(&"abstract"));
1140
+ assert!(VALID_METADATA_KEYS.contains(&"citations"));
1141
+ }
1142
+
1143
+ #[test]
1144
+ fn test_get_pandoc_key_unmapped() {
1145
+ assert_eq!(get_pandoc_key("title"), "title");
1146
+ assert_eq!(get_pandoc_key("keywords"), "keywords");
1147
+ assert_eq!(get_pandoc_key("custom_field"), "custom_field");
1148
+ }
1149
+
1150
+ #[tokio::test]
1151
+ async fn test_tempfile_raii_cleanup() {
1152
+ use crate::extraction::pandoc::version::validate_pandoc_version;
1153
+
1154
+ if validate_pandoc_version().await.is_err() {
1155
+ return;
1156
+ }
1157
+
1158
+ let temp_path = std::env::temp_dir().join(format!("test_raii_{}.md", uuid::Uuid::new_v4()));
1159
+
1160
+ {
1161
+ let _guard = TempFile::new(temp_path.clone());
1162
+ fs::write(&temp_path, b"test content").await.unwrap();
1163
+ assert!(temp_path.exists());
1164
+ }
1165
+
1166
+ tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
1167
+
1168
+ assert!(!temp_path.exists());
1169
+ }
1170
+
1171
+ #[tokio::test]
1172
+ async fn test_extract_content_timeout_kills_process() {
1173
+ use crate::extraction::pandoc::version::validate_pandoc_version;
1174
+
1175
+ if validate_pandoc_version().await.is_err() {
1176
+ return;
1177
+ }
1178
+
1179
+ let temp_dir = std::env::temp_dir();
1180
+ let test_file = temp_dir.join(format!("test_timeout_{}.md", uuid::Uuid::new_v4()));
1181
+ fs::write(&test_file, b"# Test\n\nContent").await.unwrap();
1182
+
1183
+ let result = extract_content(&test_file, "markdown").await;
1184
+ assert!(result.is_ok());
1185
+
1186
+ let _ = fs::remove_file(&test_file).await;
1187
+ }
1188
+ }