kreuzberg 4.0.0.rc1 → 4.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (342) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -8
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -534
  5. data/.rubocop.yml +538 -0
  6. data/Gemfile +8 -9
  7. data/Gemfile.lock +9 -109
  8. data/README.md +426 -421
  9. data/Rakefile +25 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -340
  12. data/ext/kreuzberg_rb/extconf.rb +45 -35
  13. data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
  14. data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
  15. data/ext/kreuzberg_rb/native/README.md +425 -425
  16. data/ext/kreuzberg_rb/native/build.rs +15 -17
  17. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  18. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  19. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  20. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  21. data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
  22. data/extconf.rb +28 -28
  23. data/kreuzberg.gemspec +148 -105
  24. data/lib/kreuzberg/api_proxy.rb +142 -142
  25. data/lib/kreuzberg/cache_api.rb +46 -45
  26. data/lib/kreuzberg/cli.rb +55 -55
  27. data/lib/kreuzberg/cli_proxy.rb +127 -127
  28. data/lib/kreuzberg/config.rb +691 -684
  29. data/lib/kreuzberg/error_context.rb +32 -0
  30. data/lib/kreuzberg/errors.rb +118 -50
  31. data/lib/kreuzberg/extraction_api.rb +85 -84
  32. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  33. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  34. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  35. data/lib/kreuzberg/result.rb +216 -216
  36. data/lib/kreuzberg/setup_lib_path.rb +80 -79
  37. data/lib/kreuzberg/validator_protocol.rb +89 -89
  38. data/lib/kreuzberg/version.rb +5 -5
  39. data/lib/kreuzberg.rb +103 -82
  40. data/sig/kreuzberg/internal.rbs +184 -184
  41. data/sig/kreuzberg.rbs +520 -468
  42. data/spec/binding/cache_spec.rb +227 -227
  43. data/spec/binding/cli_proxy_spec.rb +85 -87
  44. data/spec/binding/cli_spec.rb +55 -54
  45. data/spec/binding/config_spec.rb +345 -345
  46. data/spec/binding/config_validation_spec.rb +283 -283
  47. data/spec/binding/error_handling_spec.rb +213 -213
  48. data/spec/binding/errors_spec.rb +66 -66
  49. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  50. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  51. data/spec/binding/plugins/validator_spec.rb +274 -274
  52. data/spec/fixtures/config.toml +39 -39
  53. data/spec/fixtures/config.yaml +41 -42
  54. data/spec/fixtures/invalid_config.toml +4 -4
  55. data/spec/smoke/package_spec.rb +178 -178
  56. data/spec/spec_helper.rb +42 -42
  57. data/vendor/kreuzberg/Cargo.toml +204 -134
  58. data/vendor/kreuzberg/README.md +175 -175
  59. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
  60. data/vendor/kreuzberg/build.rs +474 -460
  61. data/vendor/kreuzberg/src/api/error.rs +81 -81
  62. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  63. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  64. data/vendor/kreuzberg/src/api/server.rs +353 -353
  65. data/vendor/kreuzberg/src/api/types.rs +170 -170
  66. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
  67. data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
  68. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
  69. data/vendor/kreuzberg/src/core/config.rs +1032 -1032
  70. data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
  71. data/vendor/kreuzberg/src/core/io.rs +329 -327
  72. data/vendor/kreuzberg/src/core/mime.rs +605 -615
  73. data/vendor/kreuzberg/src/core/mod.rs +45 -42
  74. data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
  75. data/vendor/kreuzberg/src/embeddings.rs +432 -323
  76. data/vendor/kreuzberg/src/error.rs +431 -431
  77. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  78. data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
  79. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  80. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  81. data/vendor/kreuzberg/src/extraction/html.rs +553 -553
  82. data/vendor/kreuzberg/src/extraction/image.rs +368 -368
  83. data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
  84. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
  85. data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
  86. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  87. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  88. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  89. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
  90. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
  91. data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
  92. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  93. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  94. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  95. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  96. data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
  97. data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
  98. data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
  99. data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
  100. data/vendor/kreuzberg/src/extractors/email.rs +143 -129
  101. data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
  102. data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
  103. data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
  104. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
  105. data/vendor/kreuzberg/src/extractors/html.rs +393 -410
  106. data/vendor/kreuzberg/src/extractors/image.rs +198 -195
  107. data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
  108. data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
  109. data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
  110. data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
  111. data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
  112. data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
  113. data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
  114. data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
  115. data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
  116. data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
  117. data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
  118. data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
  119. data/vendor/kreuzberg/src/extractors/security.rs +484 -0
  120. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
  121. data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
  122. data/vendor/kreuzberg/src/extractors/text.rs +260 -242
  123. data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
  124. data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
  125. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  126. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  127. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  128. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  129. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  130. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  131. data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
  132. data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
  133. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  134. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  135. data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
  136. data/vendor/kreuzberg/src/lib.rs +105 -102
  137. data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
  138. data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
  139. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  140. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  141. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  142. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  143. data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
  144. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  145. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  146. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
  147. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  148. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  149. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  150. data/vendor/kreuzberg/src/panic_context.rs +154 -0
  151. data/vendor/kreuzberg/src/pdf/error.rs +122 -122
  152. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  153. data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
  154. data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
  155. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  156. data/vendor/kreuzberg/src/pdf/table.rs +393 -420
  157. data/vendor/kreuzberg/src/pdf/text.rs +158 -161
  158. data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
  159. data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
  160. data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
  161. data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
  162. data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
  163. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  164. data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
  165. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  166. data/vendor/kreuzberg/src/text/mod.rs +19 -19
  167. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  168. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  169. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  170. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  171. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  172. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  173. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  174. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  175. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  176. data/vendor/kreuzberg/src/types.rs +903 -873
  177. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  178. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  179. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  180. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  181. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  182. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  183. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  184. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  185. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  186. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  187. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  188. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  189. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  190. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  191. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  192. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  193. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  194. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  195. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  196. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  198. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  199. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  200. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  201. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  202. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  203. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  204. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  205. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  206. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  207. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  208. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  209. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  210. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  211. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  212. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  213. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  214. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  215. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  216. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  217. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  218. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  219. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  220. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  221. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  222. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  223. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  224. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  225. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  226. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  227. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  228. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  229. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  230. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  231. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  232. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  233. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  234. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  235. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  236. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  237. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  238. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  239. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  240. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  241. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  242. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  243. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  244. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
  245. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  246. data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
  247. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
  248. data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
  249. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
  250. data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
  251. data/vendor/kreuzberg/tests/config_features.rs +598 -580
  252. data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
  253. data/vendor/kreuzberg/tests/core_integration.rs +510 -493
  254. data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
  255. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
  256. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
  257. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
  258. data/vendor/kreuzberg/tests/email_integration.rs +325 -325
  259. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
  260. data/vendor/kreuzberg/tests/error_handling.rs +393 -393
  261. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
  262. data/vendor/kreuzberg/tests/format_integration.rs +159 -159
  263. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  264. data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
  265. data/vendor/kreuzberg/tests/image_integration.rs +253 -253
  266. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
  267. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
  268. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
  269. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  270. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  271. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
  272. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
  273. data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
  274. data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
  275. data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
  276. data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
  277. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  278. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
  279. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
  280. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
  281. data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
  282. data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
  283. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
  284. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
  285. data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
  286. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  287. data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
  288. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
  289. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
  290. data/vendor/kreuzberg/tests/security_validation.rs +415 -404
  291. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  292. data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
  293. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
  294. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
  295. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  296. data/vendor/rb-sys/.cargo-ok +1 -0
  297. data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
  298. data/vendor/rb-sys/Cargo.lock +393 -0
  299. data/vendor/rb-sys/Cargo.toml +70 -0
  300. data/vendor/rb-sys/Cargo.toml.orig +57 -0
  301. data/vendor/rb-sys/LICENSE-APACHE +190 -0
  302. data/vendor/rb-sys/LICENSE-MIT +21 -0
  303. data/vendor/rb-sys/bin/release.sh +21 -0
  304. data/vendor/rb-sys/build/features.rs +108 -0
  305. data/vendor/rb-sys/build/main.rs +246 -0
  306. data/vendor/rb-sys/build/stable_api_config.rs +153 -0
  307. data/vendor/rb-sys/build/version.rs +48 -0
  308. data/vendor/rb-sys/readme.md +36 -0
  309. data/vendor/rb-sys/src/bindings.rs +21 -0
  310. data/vendor/rb-sys/src/hidden.rs +11 -0
  311. data/vendor/rb-sys/src/lib.rs +34 -0
  312. data/vendor/rb-sys/src/macros.rs +371 -0
  313. data/vendor/rb-sys/src/memory.rs +53 -0
  314. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
  315. data/vendor/rb-sys/src/special_consts.rs +31 -0
  316. data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
  317. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
  318. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
  319. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
  320. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
  321. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
  322. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
  323. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
  324. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
  325. data/vendor/rb-sys/src/stable_api.rs +261 -0
  326. data/vendor/rb-sys/src/symbol.rs +31 -0
  327. data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
  328. data/vendor/rb-sys/src/utils.rs +89 -0
  329. data/vendor/rb-sys/src/value_type.rs +7 -0
  330. metadata +90 -95
  331. data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
  332. data/spec/examples.txt +0 -104
  333. data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
  334. data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
  335. data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
  336. data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
  337. data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
  338. data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
  339. data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
  340. data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
  341. data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
  342. data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
@@ -1,1143 +1,1167 @@
1
- //! Generic cache implementation with lock poisoning recovery.
2
- //!
3
- //! # Lock Poisoning Handling
4
- //!
5
- //! This module uses `Arc<Mutex<T>>` for thread-safe state management and implements
6
- //! explicit lock poisoning recovery throughout all public methods:
7
- //!
8
- //! **What is lock poisoning?**
9
- //! - When a thread panics while holding a Mutex, the lock becomes "poisoned"
10
- //! - Rust marks the Mutex to indicate data may be in an inconsistent state
11
- //! - Subsequent lock attempts return `Err(PoisonError)` instead of acquiring the lock
12
- //!
13
- //! **Recovery strategy:**
14
- //! - All `.lock()` calls use `.map_err()` to convert `PoisonError` into `KreuzbergError::LockPoisoned`
15
- //! - The error propagates to callers via `Result` returns (never `.unwrap()` on locks)
16
- //! - Provides clear error messages indicating which mutex is poisoned
17
- //! - Follows CLAUDE.md requirement: "Lock poisoning must be handled - never `.unwrap()` on Mutex/RwLock"
18
- //!
19
- //! **Affected state:**
20
- //! - `processing_locks`: Tracks cache keys currently being processed (6 lock sites)
21
- //! - `deleting_files`: Prevents read-during-delete race conditions (3 lock sites)
22
- //!
23
- //! This approach ensures that lock poisoning (rare in practice) is surfaced to users
24
- //! rather than causing panics, maintaining system stability during concurrent operations.
25
-
26
- use crate::error::{KreuzbergError, Result};
27
- use ahash::AHasher;
28
- use serde::{Deserialize, Serialize};
29
- use std::collections::HashSet;
30
- use std::fs;
31
-
32
- /// Cache key hash format width (32 hex digits for u64 hash)
33
- const CACHE_KEY_HASH_WIDTH: usize = 32;
34
- use std::hash::{Hash, Hasher};
35
- use std::path::{Path, PathBuf};
36
- use std::sync::atomic::{AtomicUsize, Ordering};
37
- use std::sync::{Arc, Mutex};
38
- use std::time::{SystemTime, UNIX_EPOCH};
39
-
40
- #[derive(Debug, Clone, Serialize, Deserialize)]
41
- pub struct CacheStats {
42
- pub total_files: usize,
43
- pub total_size_mb: f64,
44
- pub available_space_mb: f64,
45
- pub oldest_file_age_days: f64,
46
- pub newest_file_age_days: f64,
47
- }
48
-
49
- #[derive(Debug, Clone)]
50
- struct CacheEntry {
51
- path: PathBuf,
52
- size: u64,
53
- modified: SystemTime,
54
- }
55
-
56
- struct CacheScanResult {
57
- stats: CacheStats,
58
- entries: Vec<CacheEntry>,
59
- }
60
-
61
- pub struct GenericCache {
62
- cache_dir: PathBuf,
63
- cache_type: String,
64
- max_age_days: f64,
65
- max_cache_size_mb: f64,
66
- min_free_space_mb: f64,
67
- processing_locks: Arc<Mutex<HashSet<String>>>,
68
- /// Tracks cache keys being deleted to prevent read-during-delete race conditions
69
- deleting_files: Arc<Mutex<HashSet<PathBuf>>>,
70
- /// Counter for triggering periodic cleanup (every 100 writes)
71
- write_counter: Arc<AtomicUsize>,
72
- }
73
-
74
- impl GenericCache {
75
- pub fn new(
76
- cache_type: String,
77
- cache_dir: Option<String>,
78
- max_age_days: f64,
79
- max_cache_size_mb: f64,
80
- min_free_space_mb: f64,
81
- ) -> Result<Self> {
82
- let cache_dir_path = if let Some(dir) = cache_dir {
83
- PathBuf::from(dir).join(&cache_type)
84
- } else {
85
- // OSError/RuntimeError must bubble up - system errors need user reports ~keep
86
- std::env::current_dir()?.join(".kreuzberg").join(&cache_type)
87
- };
88
-
89
- fs::create_dir_all(&cache_dir_path)
90
- .map_err(|e| KreuzbergError::cache(format!("Failed to create cache directory: {}", e)))?;
91
-
92
- Ok(Self {
93
- cache_dir: cache_dir_path,
94
- cache_type,
95
- max_age_days,
96
- max_cache_size_mb,
97
- min_free_space_mb,
98
- processing_locks: Arc::new(Mutex::new(HashSet::new())),
99
- deleting_files: Arc::new(Mutex::new(HashSet::new())),
100
- write_counter: Arc::new(AtomicUsize::new(0)),
101
- })
102
- }
103
-
104
- fn get_cache_path(&self, cache_key: &str) -> PathBuf {
105
- self.cache_dir.join(format!("{}.msgpack", cache_key))
106
- }
107
-
108
- fn get_metadata_path(&self, cache_key: &str) -> PathBuf {
109
- self.cache_dir.join(format!("{}.meta", cache_key))
110
- }
111
-
112
- fn is_valid(&self, cache_path: &Path, source_file: Option<&str>) -> bool {
113
- if !cache_path.exists() {
114
- return false;
115
- }
116
-
117
- if let Ok(metadata) = fs::metadata(cache_path)
118
- && let Ok(modified) = metadata.modified()
119
- && let Ok(elapsed) = SystemTime::now().duration_since(modified)
120
- {
121
- let age_days = elapsed.as_secs() as f64 / (24.0 * 3600.0);
122
- if age_days > self.max_age_days {
123
- return false;
124
- }
125
- }
126
-
127
- if let Some(source_path) = source_file {
128
- let Some(file_stem) = cache_path.file_stem().and_then(|s| s.to_str()) else {
129
- return false;
130
- };
131
- let meta_path = self.get_metadata_path(file_stem);
132
-
133
- if meta_path.exists() {
134
- if let Ok(meta_metadata) = fs::metadata(&meta_path)
135
- && meta_metadata.len() == 16
136
- && let Ok(cached_meta_bytes) = fs::read(&meta_path)
137
- {
138
- let cached_size = u64::from_le_bytes([
139
- cached_meta_bytes[0],
140
- cached_meta_bytes[1],
141
- cached_meta_bytes[2],
142
- cached_meta_bytes[3],
143
- cached_meta_bytes[4],
144
- cached_meta_bytes[5],
145
- cached_meta_bytes[6],
146
- cached_meta_bytes[7],
147
- ]);
148
- let cached_mtime = u64::from_le_bytes([
149
- cached_meta_bytes[8],
150
- cached_meta_bytes[9],
151
- cached_meta_bytes[10],
152
- cached_meta_bytes[11],
153
- cached_meta_bytes[12],
154
- cached_meta_bytes[13],
155
- cached_meta_bytes[14],
156
- cached_meta_bytes[15],
157
- ]);
158
-
159
- if let Ok(source_metadata) = fs::metadata(source_path) {
160
- let current_size = source_metadata.len();
161
- let Some(current_mtime) = source_metadata
162
- .modified()
163
- .ok()
164
- .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
165
- .map(|d| d.as_secs())
166
- else {
167
- return false;
168
- };
169
-
170
- return cached_size == current_size && cached_mtime == current_mtime;
171
- }
172
- }
173
- return false;
174
- }
175
- }
176
-
177
- true
178
- }
179
-
180
- fn save_metadata(&self, cache_key: &str, source_file: Option<&str>) {
181
- if let Some(source_path) = source_file
182
- && let Ok(metadata) = fs::metadata(source_path)
183
- {
184
- let size = metadata.len();
185
- let Some(mtime) = metadata
186
- .modified()
187
- .ok()
188
- .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
189
- .map(|d| d.as_secs())
190
- else {
191
- return;
192
- };
193
-
194
- let mut bytes = Vec::with_capacity(16);
195
- bytes.extend_from_slice(&size.to_le_bytes());
196
- bytes.extend_from_slice(&mtime.to_le_bytes());
197
-
198
- let meta_path = self.get_metadata_path(cache_key);
199
- // Cache metadata write failure - safe to ignore, cache is optional fallback ~keep
200
- let _ = fs::write(meta_path, bytes);
201
- }
202
- }
203
-
204
- pub fn get(&self, cache_key: &str, source_file: Option<&str>) -> Result<Option<Vec<u8>>> {
205
- let cache_path = self.get_cache_path(cache_key);
206
-
207
- {
208
- let deleting = self
209
- .deleting_files
210
- .lock()
211
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
212
- if deleting.contains(&cache_path) {
213
- return Ok(None);
214
- }
215
- }
216
-
217
- if !self.is_valid(&cache_path, source_file) {
218
- return Ok(None);
219
- }
220
-
221
- match fs::read(&cache_path) {
222
- Ok(content) => Ok(Some(content)),
223
- Err(_) => {
224
- // Best-effort cleanup of corrupted cache files ~keep
225
- if let Err(e) = fs::remove_file(&cache_path) {
226
- tracing::debug!("Failed to remove corrupted cache file: {}", e);
227
- }
228
- if let Err(e) = fs::remove_file(self.get_metadata_path(cache_key)) {
229
- tracing::debug!("Failed to remove corrupted metadata file: {}", e);
230
- }
231
- Ok(None)
232
- }
233
- }
234
- }
235
-
236
- pub fn set(&self, cache_key: &str, data: Vec<u8>, source_file: Option<&str>) -> Result<()> {
237
- let cache_path = self.get_cache_path(cache_key);
238
-
239
- fs::write(&cache_path, data)
240
- .map_err(|e| KreuzbergError::cache(format!("Failed to write cache file: {}", e)))?;
241
-
242
- self.save_metadata(cache_key, source_file);
243
-
244
- let count = self.write_counter.fetch_add(1, Ordering::Relaxed);
245
- if count % 100 == 0
246
- && let Some(cache_path_str) = self.cache_dir.to_str()
247
- {
248
- // Cache cleanup failure - safe to ignore, cache is optional fallback ~keep
249
- let _ = smart_cleanup_cache(
250
- cache_path_str,
251
- self.max_age_days,
252
- self.max_cache_size_mb,
253
- self.min_free_space_mb,
254
- );
255
- }
256
-
257
- Ok(())
258
- }
259
-
260
- pub fn is_processing(&self, cache_key: &str) -> Result<bool> {
261
- // OSError/RuntimeError must bubble up - system errors need user reports ~keep
262
- let locks = self
263
- .processing_locks
264
- .lock()
265
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
266
- Ok(locks.contains(cache_key))
267
- }
268
-
269
- pub fn mark_processing(&self, cache_key: String) -> Result<()> {
270
- // OSError/RuntimeError must bubble up - system errors need user reports ~keep
271
- let mut locks = self
272
- .processing_locks
273
- .lock()
274
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
275
- locks.insert(cache_key);
276
- Ok(())
277
- }
278
-
279
- pub fn mark_complete(&self, cache_key: &str) -> Result<()> {
280
- // OSError/RuntimeError must bubble up - system errors need user reports ~keep
281
- let mut locks = self
282
- .processing_locks
283
- .lock()
284
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
285
- locks.remove(cache_key);
286
- Ok(())
287
- }
288
-
289
- /// Mark a file path as being deleted to prevent concurrent reads.
290
- ///
291
- /// # TOCTOU Race Condition
292
- ///
293
- /// There is a Time-Of-Check-To-Time-Of-Use (TOCTOU) race condition between:
294
- /// 1. Iterating directory entries in `clear()` (getting path/metadata)
295
- /// 2. Marking the file for deletion here
296
- /// 3. Actually deleting the file
297
- ///
298
- /// **Race scenario:**
299
- /// - Thread A: Begins iterating in `clear()`, gets path
300
- /// - Thread B: Calls `get()`, checks `deleting_files` (not marked yet), proceeds
301
- /// - Thread A: Calls `mark_for_deletion()` here
302
- /// - Thread A: Deletes file with `fs::remove_file()`
303
- /// - Thread B: Tries to read file, but it's already deleted
304
- ///
305
- /// **Why this is acceptable:**
306
- /// - Cache operations are best-effort optimizations, not critical
307
- /// - `get()` already handles file read failures gracefully (treats as cache miss)
308
- /// - The worst case is a failed read → cache miss → recomputation
309
- /// - No data corruption or invariant violations occur
310
- /// - Alternative (atomic operation) would require complex locking impacting performance
311
- fn mark_for_deletion(&self, path: &Path) -> Result<()> {
312
- let mut deleting = self
313
- .deleting_files
314
- .lock()
315
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
316
- deleting.insert(path.to_path_buf());
317
- Ok(())
318
- }
319
-
320
- /// Remove a file path from the deletion set
321
- fn unmark_deletion(&self, path: &Path) -> Result<()> {
322
- let mut deleting = self
323
- .deleting_files
324
- .lock()
325
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
326
- deleting.remove(&path.to_path_buf());
327
- Ok(())
328
- }
329
-
330
- pub fn clear(&self) -> Result<(usize, f64)> {
331
- let dir_path = &self.cache_dir;
332
-
333
- if !dir_path.exists() {
334
- return Ok((0, 0.0));
335
- }
336
-
337
- let mut removed_count = 0;
338
- let mut removed_size = 0.0;
339
-
340
- let read_dir = fs::read_dir(dir_path)
341
- .map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
342
-
343
- for entry in read_dir {
344
- let entry = match entry {
345
- Ok(e) => e,
346
- Err(e) => {
347
- tracing::debug!("Error reading entry: {}", e);
348
- continue;
349
- }
350
- };
351
-
352
- let metadata = match entry.metadata() {
353
- Ok(m) if m.is_file() => m,
354
- _ => continue,
355
- };
356
-
357
- let path = entry.path();
358
- if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
359
- continue;
360
- }
361
-
362
- let size_mb = metadata.len() as f64 / (1024.0 * 1024.0);
363
-
364
- // Mark file for deletion to prevent concurrent access ~keep
365
- if let Err(e) = self.mark_for_deletion(&path) {
366
- tracing::debug!("Failed to mark file for deletion: {} (continuing anyway)", e);
367
- }
368
-
369
- match fs::remove_file(&path) {
370
- Ok(_) => {
371
- removed_count += 1;
372
- removed_size += size_mb;
373
- // Unmark after successful deletion ~keep
374
- if let Err(e) = self.unmark_deletion(&path) {
375
- tracing::debug!("Failed to unmark deleted file: {} (non-critical)", e);
376
- }
377
- }
378
- Err(e) => {
379
- tracing::debug!("Failed to remove {:?}: {}", path, e);
380
- // Unmark after failed deletion to allow retries ~keep
381
- if let Err(e) = self.unmark_deletion(&path) {
382
- tracing::debug!("Failed to unmark file after deletion error: {} (non-critical)", e);
383
- }
384
- }
385
- }
386
- }
387
-
388
- Ok((removed_count, removed_size))
389
- }
390
-
391
- pub fn get_stats(&self) -> Result<CacheStats> {
392
- let cache_path_str = self
393
- .cache_dir
394
- .to_str()
395
- .ok_or_else(|| KreuzbergError::validation("Cache directory path contains invalid UTF-8".to_string()))?;
396
- get_cache_metadata(cache_path_str)
397
- }
398
-
399
- pub fn cache_dir(&self) -> &Path {
400
- &self.cache_dir
401
- }
402
-
403
- pub fn cache_type(&self) -> &str {
404
- &self.cache_type
405
- }
406
- }
407
-
408
- /// Generate a deterministic cache key from configuration parameters.
409
- ///
410
- /// # Algorithm
411
- ///
412
- /// Uses ahash (non-cryptographic 64-bit hash) for performance. Cache keys are
413
- /// generated by:
414
- /// 1. Sorting key-value pairs by key (for determinism)
415
- /// 2. Concatenating as "key1=val1&key2=val2&..."
416
- /// 3. Hashing with ahash and formatting as 32-character hex
417
- ///
418
- /// # Collision Probability
419
- ///
420
- /// AHash produces 64-bit hashes, leading to birthday paradox collisions:
421
- /// - **~0.01%** probability at 1 million cache entries
422
- /// - **~1%** probability at 100 million entries
423
- /// - **~50%** probability at 4.3 billion (2^32) entries
424
- ///
425
- /// For context: P(collision) ≈ n^2 / (2 * 2^64) where n = number of entries.
426
- ///
427
- /// # Performance vs Security Trade-off
428
- ///
429
- /// - **ahash**: ~10x faster than SHA256, sufficient for cache keys
430
- /// - **SHA256**: Collision-resistant but overkill for caching
431
- /// - **Practical risk**: Low for typical usage (< 1M entries)
432
- ///
433
- /// # Impact of Collisions
434
- ///
435
- /// If two different configurations hash to the same key:
436
- /// - One configuration reads the other's cached data
437
- /// - Results in incorrect data served from cache
438
- /// - Detected via metadata validation (size/mtime checks)
439
- ///
440
- /// # Recommendations
441
- ///
442
- /// - **< 1M entries**: ahash is safe and fast
443
- /// - **> 100M entries**: Monitor cache size, consider periodic clearing
444
- /// - **Critical data**: If collision risk is unacceptable, add SHA256 option
445
- ///
446
- /// # Example
447
- ///
448
- /// ```rust
449
- /// use kreuzberg::cache::generate_cache_key;
450
- ///
451
- /// let parts = [("format", "pdf"), ("ocr", "true"), ("lang", "en")];
452
- /// let key = generate_cache_key(&parts);
453
- /// assert_eq!(key.len(), 32); // 64-bit hash as hex
454
- /// ```
455
- pub fn generate_cache_key(parts: &[(&str, &str)]) -> String {
456
- if parts.is_empty() {
457
- return "empty".to_string();
458
- }
459
-
460
- let mut sorted_parts: Vec<_> = parts.to_vec();
461
- sorted_parts.sort_by_key(|(k, _)| *k);
462
-
463
- let estimated_size = sorted_parts.iter().map(|(k, v)| k.len() + v.len() + 2).sum::<usize>();
464
- let mut cache_str = String::with_capacity(estimated_size);
465
-
466
- for (i, (key, val)) in sorted_parts.iter().enumerate() {
467
- if i > 0 {
468
- cache_str.push('&');
469
- }
470
- cache_str.push_str(&format!("{}={}", key, val));
471
- }
472
-
473
- let mut hasher = AHasher::default();
474
- cache_str.hash(&mut hasher);
475
- let hash = hasher.finish();
476
-
477
- format!("{:0width$x}", hash, width = CACHE_KEY_HASH_WIDTH)
478
- }
479
-
480
- #[allow(unsafe_code)]
481
- pub fn get_available_disk_space(path: &str) -> Result<f64> {
482
- #[cfg(unix)]
483
- {
484
- let path = Path::new(path);
485
- let check_path = if path.exists() {
486
- path
487
- } else if let Some(parent) = path.parent() {
488
- parent
489
- } else {
490
- Path::new("/")
491
- };
492
-
493
- use libc::{statvfs, statvfs as statvfs_struct};
494
- use std::ffi::CString;
495
-
496
- let path_str = check_path
497
- .to_str()
498
- .ok_or_else(|| KreuzbergError::validation("Path contains invalid UTF-8".to_string()))?;
499
- let c_path = CString::new(path_str).map_err(|e| KreuzbergError::validation(format!("Invalid path: {}", e)))?;
500
-
501
- let mut stat: statvfs_struct = unsafe { std::mem::zeroed() };
502
-
503
- let result = unsafe { statvfs(c_path.as_ptr(), &mut stat) };
504
-
505
- if result == 0 {
506
- #[allow(clippy::unnecessary_cast)]
507
- let available_bytes = stat.f_bavail as u64 * stat.f_frsize as u64;
508
- Ok(available_bytes as f64 / (1024.0 * 1024.0))
509
- } else {
510
- tracing::debug!("Failed to get disk stats for {}: errno {}", path_str, result);
511
- Ok(10000.0)
512
- }
513
- }
514
-
515
- #[cfg(not(unix))]
516
- {
517
- let _ = path;
518
- Ok(10000.0)
519
- }
520
- }
521
-
522
- fn scan_cache_directory(cache_dir: &str) -> Result<CacheScanResult> {
523
- let dir_path = Path::new(cache_dir);
524
-
525
- if !dir_path.exists() {
526
- return Ok(CacheScanResult {
527
- stats: CacheStats {
528
- total_files: 0,
529
- total_size_mb: 0.0,
530
- available_space_mb: get_available_disk_space(cache_dir)?,
531
- oldest_file_age_days: 0.0,
532
- newest_file_age_days: 0.0,
533
- },
534
- entries: Vec::new(),
535
- });
536
- }
537
-
538
- let current_time = SystemTime::now()
539
- .duration_since(UNIX_EPOCH)
540
- .unwrap_or_default()
541
- .as_secs() as f64;
542
-
543
- let read_dir =
544
- fs::read_dir(dir_path).map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
545
-
546
- let mut total_size = 0u64;
547
- let mut oldest_age = 0.0f64;
548
- let mut newest_age = f64::INFINITY;
549
- let mut entries = Vec::new();
550
-
551
- for entry in read_dir {
552
- let entry = match entry {
553
- Ok(e) => e,
554
- Err(e) => {
555
- tracing::debug!("Error reading cache entry: {}", e);
556
- continue;
557
- }
558
- };
559
-
560
- let metadata = match entry.metadata() {
561
- Ok(m) if m.is_file() => m,
562
- _ => continue,
563
- };
564
-
565
- let path = entry.path();
566
- if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
567
- continue;
568
- }
569
-
570
- let modified = match metadata.modified() {
571
- Ok(m) => m,
572
- Err(e) => {
573
- tracing::debug!("Error getting modification time for {:?}: {}", path, e);
574
- continue;
575
- }
576
- };
577
-
578
- let size = metadata.len();
579
- total_size += size;
580
-
581
- if let Ok(duration) = modified.duration_since(UNIX_EPOCH) {
582
- let age_days = (current_time - duration.as_secs() as f64) / (24.0 * 3600.0);
583
- oldest_age = oldest_age.max(age_days);
584
- newest_age = newest_age.min(age_days);
585
- }
586
-
587
- entries.push(CacheEntry { path, size, modified });
588
- }
589
-
590
- if entries.is_empty() {
591
- oldest_age = 0.0;
592
- newest_age = 0.0;
593
- }
594
-
595
- Ok(CacheScanResult {
596
- stats: CacheStats {
597
- total_files: entries.len(),
598
- total_size_mb: total_size as f64 / (1024.0 * 1024.0),
599
- available_space_mb: get_available_disk_space(cache_dir)?,
600
- oldest_file_age_days: oldest_age,
601
- newest_file_age_days: newest_age,
602
- },
603
- entries,
604
- })
605
- }
606
-
607
- pub fn get_cache_metadata(cache_dir: &str) -> Result<CacheStats> {
608
- let scan_result = scan_cache_directory(cache_dir)?;
609
- Ok(scan_result.stats)
610
- }
611
-
612
- pub fn cleanup_cache(
613
- cache_dir: &str,
614
- max_age_days: f64,
615
- max_size_mb: f64,
616
- target_size_ratio: f64,
617
- ) -> Result<(usize, f64)> {
618
- let scan_result = scan_cache_directory(cache_dir)?;
619
-
620
- if scan_result.entries.is_empty() {
621
- return Ok((0, 0.0));
622
- }
623
-
624
- let current_time = SystemTime::now()
625
- .duration_since(UNIX_EPOCH)
626
- .unwrap_or_default()
627
- .as_secs() as f64;
628
- let max_age_seconds = max_age_days * 24.0 * 3600.0;
629
-
630
- let mut removed_count = 0;
631
- let mut removed_size = 0.0;
632
- let mut remaining_entries = Vec::new();
633
- let mut total_remaining_size = 0u64;
634
-
635
- for entry in scan_result.entries {
636
- if let Ok(age) = entry.modified.duration_since(UNIX_EPOCH) {
637
- let age_seconds = current_time - age.as_secs() as f64;
638
- if age_seconds > max_age_seconds {
639
- match fs::remove_file(&entry.path) {
640
- Ok(_) => {
641
- removed_count += 1;
642
- removed_size += entry.size as f64 / (1024.0 * 1024.0);
643
- }
644
- Err(e) => {
645
- tracing::debug!("Failed to remove {:?}: {}", entry.path, e);
646
- }
647
- }
648
- } else {
649
- total_remaining_size += entry.size;
650
- remaining_entries.push(entry);
651
- }
652
- }
653
- }
654
-
655
- let mut total_size_mb = total_remaining_size as f64 / (1024.0 * 1024.0);
656
-
657
- if total_size_mb > max_size_mb {
658
- remaining_entries.sort_by_key(|e| e.modified);
659
-
660
- let target_size = max_size_mb * target_size_ratio;
661
-
662
- for entry in remaining_entries {
663
- if total_size_mb <= target_size {
664
- break;
665
- }
666
-
667
- match fs::remove_file(&entry.path) {
668
- Ok(_) => {
669
- let size_mb = entry.size as f64 / (1024.0 * 1024.0);
670
- removed_count += 1;
671
- removed_size += size_mb;
672
- total_size_mb -= size_mb;
673
- }
674
- Err(e) => {
675
- tracing::debug!("Failed to remove {:?}: {}", entry.path, e);
676
- }
677
- }
678
- }
679
- }
680
-
681
- Ok((removed_count, removed_size))
682
- }
683
-
684
- pub fn smart_cleanup_cache(
685
- cache_dir: &str,
686
- max_age_days: f64,
687
- max_size_mb: f64,
688
- min_free_space_mb: f64,
689
- ) -> Result<(usize, f64)> {
690
- let stats = get_cache_metadata(cache_dir)?;
691
-
692
- let needs_cleanup = stats.available_space_mb < min_free_space_mb
693
- || stats.total_size_mb > max_size_mb
694
- || stats.oldest_file_age_days > max_age_days;
695
-
696
- if !needs_cleanup {
697
- return Ok((0, 0.0));
698
- }
699
-
700
- let target_ratio = if stats.available_space_mb < min_free_space_mb {
701
- 0.5
702
- } else {
703
- 0.8
704
- };
705
-
706
- cleanup_cache(cache_dir, max_age_days, max_size_mb, target_ratio)
707
- }
708
-
709
- pub fn filter_old_cache_entries(cache_times: &[f64], current_time: f64, max_age_seconds: f64) -> Vec<usize> {
710
- cache_times
711
- .iter()
712
- .enumerate()
713
- .filter_map(|(idx, &time)| {
714
- if current_time - time > max_age_seconds {
715
- Some(idx)
716
- } else {
717
- None
718
- }
719
- })
720
- .collect()
721
- }
722
-
723
- pub fn sort_cache_by_access_time(mut entries: Vec<(String, f64)>) -> Vec<String> {
724
- entries.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
725
- entries.into_iter().map(|(key, _)| key).collect()
726
- }
727
-
728
- pub fn fast_hash(data: &[u8]) -> u64 {
729
- let mut hasher = AHasher::default();
730
- data.hash(&mut hasher);
731
- hasher.finish()
732
- }
733
-
734
- pub fn validate_cache_key(key: &str) -> bool {
735
- key.len() == 32 && key.chars().all(|c| c.is_ascii_hexdigit())
736
- }
737
-
738
- pub fn is_cache_valid(cache_path: &str, max_age_days: f64) -> bool {
739
- let path = Path::new(cache_path);
740
-
741
- if !path.exists() {
742
- return false;
743
- }
744
-
745
- match fs::metadata(path) {
746
- Ok(metadata) => match metadata.modified() {
747
- Ok(modified) => match SystemTime::now().duration_since(modified) {
748
- Ok(elapsed) => {
749
- let age_days = elapsed.as_secs() as f64 / (24.0 * 3600.0);
750
- age_days <= max_age_days
751
- }
752
- Err(_) => false,
753
- },
754
- Err(_) => false,
755
- },
756
- Err(_) => false,
757
- }
758
- }
759
-
760
- pub fn clear_cache_directory(cache_dir: &str) -> Result<(usize, f64)> {
761
- let dir_path = Path::new(cache_dir);
762
-
763
- if !dir_path.exists() {
764
- return Ok((0, 0.0));
765
- }
766
-
767
- let mut removed_count = 0;
768
- let mut removed_size = 0.0;
769
-
770
- let read_dir =
771
- fs::read_dir(dir_path).map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
772
-
773
- for entry in read_dir {
774
- let entry = match entry {
775
- Ok(e) => e,
776
- Err(e) => {
777
- tracing::debug!("Error reading entry: {}", e);
778
- continue;
779
- }
780
- };
781
-
782
- let metadata = match entry.metadata() {
783
- Ok(m) if m.is_file() => m,
784
- _ => continue,
785
- };
786
-
787
- let path = entry.path();
788
- if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
789
- continue;
790
- }
791
-
792
- let size_mb = metadata.len() as f64 / (1024.0 * 1024.0);
793
- match fs::remove_file(&path) {
794
- Ok(_) => {
795
- removed_count += 1;
796
- removed_size += size_mb;
797
- }
798
- Err(e) => {
799
- tracing::debug!("Failed to remove {:?}: {}", path, e);
800
- }
801
- }
802
- }
803
-
804
- Ok((removed_count, removed_size))
805
- }
806
-
807
- pub fn batch_cleanup_caches(
808
- cache_dirs: &[&str],
809
- max_age_days: f64,
810
- max_size_mb: f64,
811
- min_free_space_mb: f64,
812
- ) -> Result<Vec<(usize, f64)>> {
813
- cache_dirs
814
- .iter()
815
- .map(|dir| smart_cleanup_cache(dir, max_age_days, max_size_mb, min_free_space_mb))
816
- .collect()
817
- }
818
-
819
- #[cfg(test)]
820
- mod tests {
821
- use super::*;
822
- use std::fs::File;
823
- use tempfile::tempdir;
824
-
825
- #[test]
826
- fn test_generate_cache_key_empty() {
827
- let result = generate_cache_key(&[]);
828
- assert_eq!(result, "empty");
829
- }
830
-
831
- #[test]
832
- fn test_generate_cache_key_consistent() {
833
- let parts = [("key1", "value1"), ("key2", "value2")];
834
- let key1 = generate_cache_key(&parts);
835
- let key2 = generate_cache_key(&parts);
836
- assert_eq!(key1, key2);
837
- assert_eq!(key1.len(), 32);
838
- }
839
-
840
- #[test]
841
- fn test_validate_cache_key() {
842
- assert!(validate_cache_key("0123456789abcdef0123456789abcdef"));
843
- assert!(!validate_cache_key("invalid_key"));
844
- assert!(!validate_cache_key("0123456789abcdef"));
845
- assert!(!validate_cache_key("0123456789abcdef0123456789abcdef0"));
846
- }
847
-
848
- #[test]
849
- fn test_fast_hash() {
850
- let data1 = b"test data";
851
- let data2 = b"test data";
852
- let data3 = b"different data";
853
-
854
- assert_eq!(fast_hash(data1), fast_hash(data2));
855
- assert_ne!(fast_hash(data1), fast_hash(data3));
856
- }
857
-
858
- #[test]
859
- fn test_filter_old_cache_entries() {
860
- let cache_times = vec![100.0, 200.0, 300.0, 400.0];
861
- let current_time = 500.0;
862
- let max_age = 200.0;
863
-
864
- let old_indices = filter_old_cache_entries(&cache_times, current_time, max_age);
865
- assert_eq!(old_indices, vec![0, 1]);
866
- }
867
-
868
- #[test]
869
- fn test_sort_cache_by_access_time() {
870
- let entries = vec![
871
- ("key3".to_string(), 300.0),
872
- ("key1".to_string(), 100.0),
873
- ("key2".to_string(), 200.0),
874
- ];
875
-
876
- let sorted = sort_cache_by_access_time(entries);
877
- assert_eq!(sorted, vec!["key1", "key2", "key3"]);
878
- }
879
-
880
- #[test]
881
- fn test_sort_cache_with_nan() {
882
- let entries = vec![
883
- ("key1".to_string(), 100.0),
884
- ("key2".to_string(), f64::NAN),
885
- ("key3".to_string(), 200.0),
886
- ];
887
-
888
- let sorted = sort_cache_by_access_time(entries);
889
- assert_eq!(sorted.len(), 3);
890
- }
891
-
892
- #[test]
893
- fn test_cache_metadata() {
894
- let temp_dir = tempdir().unwrap();
895
- let cache_dir = temp_dir.path().to_str().unwrap();
896
-
897
- let file1 = temp_dir.path().join("test1.msgpack");
898
- let file2 = temp_dir.path().join("test2.msgpack");
899
- File::create(&file1).unwrap();
900
- File::create(&file2).unwrap();
901
-
902
- let stats = get_cache_metadata(cache_dir).unwrap();
903
- assert_eq!(stats.total_files, 2);
904
- assert!(stats.available_space_mb > 0.0);
905
- }
906
-
907
- #[test]
908
- fn test_cleanup_cache() {
909
- use std::io::Write;
910
-
911
- let temp_dir = tempdir().unwrap();
912
- let cache_dir = temp_dir.path().to_str().unwrap();
913
-
914
- let file1 = temp_dir.path().join("old.msgpack");
915
- let mut f = File::create(&file1).unwrap();
916
- f.write_all(b"test data for cleanup").unwrap();
917
- drop(f);
918
-
919
- let (removed_count, _) = cleanup_cache(cache_dir, 1000.0, 0.000001, 0.8).unwrap();
920
- assert_eq!(removed_count, 1);
921
- assert!(!file1.exists());
922
- }
923
-
924
- #[test]
925
- fn test_is_cache_valid() {
926
- let temp_dir = tempdir().unwrap();
927
- let file_path = temp_dir.path().join("test.msgpack");
928
- File::create(&file_path).unwrap();
929
-
930
- let path_str = file_path.to_str().unwrap();
931
-
932
- assert!(is_cache_valid(path_str, 1.0));
933
-
934
- assert!(!is_cache_valid("/nonexistent/path", 1.0));
935
- }
936
-
937
- #[test]
938
- fn test_generic_cache_new() {
939
- let temp_dir = tempdir().unwrap();
940
- let cache = GenericCache::new(
941
- "test".to_string(),
942
- Some(temp_dir.path().to_str().unwrap().to_string()),
943
- 30.0,
944
- 500.0,
945
- 1000.0,
946
- )
947
- .unwrap();
948
-
949
- assert_eq!(cache.cache_type, "test");
950
- assert!(cache.cache_dir.exists());
951
- }
952
-
953
- #[test]
954
- fn test_generic_cache_get_set() {
955
- let temp_dir = tempdir().unwrap();
956
- let cache = GenericCache::new(
957
- "test".to_string(),
958
- Some(temp_dir.path().to_str().unwrap().to_string()),
959
- 30.0,
960
- 500.0,
961
- 1000.0,
962
- )
963
- .unwrap();
964
-
965
- let cache_key = "test_key";
966
- let data = b"test data".to_vec();
967
-
968
- cache.set(cache_key, data.clone(), None).unwrap();
969
-
970
- let result = cache.get(cache_key, None).unwrap();
971
- assert_eq!(result, Some(data));
972
- }
973
-
974
- #[test]
975
- fn test_generic_cache_get_miss() {
976
- let temp_dir = tempdir().unwrap();
977
- let cache = GenericCache::new(
978
- "test".to_string(),
979
- Some(temp_dir.path().to_str().unwrap().to_string()),
980
- 30.0,
981
- 500.0,
982
- 1000.0,
983
- )
984
- .unwrap();
985
-
986
- let result = cache.get("nonexistent", None).unwrap();
987
- assert_eq!(result, None);
988
- }
989
-
990
- #[test]
991
- fn test_generic_cache_source_file_invalidation() {
992
- use std::io::Write;
993
- use std::thread::sleep;
994
- use std::time::Duration;
995
-
996
- let temp_dir = tempdir().unwrap();
997
- let cache = GenericCache::new(
998
- "test".to_string(),
999
- Some(temp_dir.path().to_str().unwrap().to_string()),
1000
- 30.0,
1001
- 500.0,
1002
- 1000.0,
1003
- )
1004
- .unwrap();
1005
-
1006
- let source_file = temp_dir.path().join("source.txt");
1007
- let mut f = File::create(&source_file).unwrap();
1008
- f.write_all(b"original content").unwrap();
1009
- drop(f);
1010
-
1011
- let cache_key = "test_key";
1012
- let data = b"cached data".to_vec();
1013
-
1014
- cache
1015
- .set(cache_key, data.clone(), Some(source_file.to_str().unwrap()))
1016
- .unwrap();
1017
-
1018
- let result = cache.get(cache_key, Some(source_file.to_str().unwrap())).unwrap();
1019
- assert_eq!(result, Some(data.clone()));
1020
-
1021
- sleep(Duration::from_millis(10));
1022
- let mut f = fs::OpenOptions::new()
1023
- .write(true)
1024
- .truncate(true)
1025
- .open(&source_file)
1026
- .unwrap();
1027
- f.write_all(b"modified content with different size").unwrap();
1028
- drop(f);
1029
-
1030
- let result = cache.get(cache_key, Some(source_file.to_str().unwrap())).unwrap();
1031
- assert_eq!(result, None);
1032
- }
1033
-
1034
- #[test]
1035
- fn test_generic_cache_processing_locks() {
1036
- let temp_dir = tempdir().unwrap();
1037
- let cache = GenericCache::new(
1038
- "test".to_string(),
1039
- Some(temp_dir.path().to_str().unwrap().to_string()),
1040
- 30.0,
1041
- 500.0,
1042
- 1000.0,
1043
- )
1044
- .unwrap();
1045
-
1046
- let cache_key = "test_key";
1047
-
1048
- assert!(!cache.is_processing(cache_key).unwrap());
1049
-
1050
- cache.mark_processing(cache_key.to_string()).unwrap();
1051
- assert!(cache.is_processing(cache_key).unwrap());
1052
-
1053
- cache.mark_complete(cache_key).unwrap();
1054
- assert!(!cache.is_processing(cache_key).unwrap());
1055
- }
1056
-
1057
- #[test]
1058
- fn test_generic_cache_clear() {
1059
- let temp_dir = tempdir().unwrap();
1060
- let cache = GenericCache::new(
1061
- "test".to_string(),
1062
- Some(temp_dir.path().to_str().unwrap().to_string()),
1063
- 30.0,
1064
- 500.0,
1065
- 1000.0,
1066
- )
1067
- .unwrap();
1068
-
1069
- cache.set("key1", b"data1".to_vec(), None).unwrap();
1070
- cache.set("key2", b"data2".to_vec(), None).unwrap();
1071
-
1072
- let (removed, _freed) = cache.clear().unwrap();
1073
- assert_eq!(removed, 2);
1074
-
1075
- assert_eq!(cache.get("key1", None).unwrap(), None);
1076
- assert_eq!(cache.get("key2", None).unwrap(), None);
1077
- }
1078
-
1079
- #[test]
1080
- fn test_generic_cache_stats() {
1081
- let temp_dir = tempdir().unwrap();
1082
- let cache = GenericCache::new(
1083
- "test".to_string(),
1084
- Some(temp_dir.path().to_str().unwrap().to_string()),
1085
- 30.0,
1086
- 500.0,
1087
- 1000.0,
1088
- )
1089
- .unwrap();
1090
-
1091
- cache.set("key1", b"test data 1".to_vec(), None).unwrap();
1092
- cache.set("key2", b"test data 2".to_vec(), None).unwrap();
1093
-
1094
- let stats = cache.get_stats().unwrap();
1095
- assert_eq!(stats.total_files, 2);
1096
- assert!(stats.total_size_mb > 0.0);
1097
- assert!(stats.available_space_mb > 0.0);
1098
- }
1099
-
1100
- #[test]
1101
- fn test_generic_cache_expired_entry() {
1102
- use std::io::Write;
1103
-
1104
- let temp_dir = tempdir().unwrap();
1105
- let cache = GenericCache::new(
1106
- "test".to_string(),
1107
- Some(temp_dir.path().to_str().unwrap().to_string()),
1108
- 0.000001,
1109
- 500.0,
1110
- 1000.0,
1111
- )
1112
- .unwrap();
1113
-
1114
- let cache_key = "test_key";
1115
-
1116
- let cache_path = cache.cache_dir.join(format!("{}.msgpack", cache_key));
1117
- let mut f = File::create(&cache_path).unwrap();
1118
- f.write_all(b"test data").unwrap();
1119
- drop(f);
1120
-
1121
- let old_time = SystemTime::now() - std::time::Duration::from_secs(60);
1122
- filetime::set_file_mtime(&cache_path, filetime::FileTime::from_system_time(old_time)).unwrap();
1123
-
1124
- let result = cache.get(cache_key, None).unwrap();
1125
- assert_eq!(result, None);
1126
- }
1127
-
1128
- #[test]
1129
- fn test_generic_cache_properties() {
1130
- let temp_dir = tempdir().unwrap();
1131
- let cache = GenericCache::new(
1132
- "test".to_string(),
1133
- Some(temp_dir.path().to_str().unwrap().to_string()),
1134
- 30.0,
1135
- 500.0,
1136
- 1000.0,
1137
- )
1138
- .unwrap();
1139
-
1140
- assert_eq!(cache.cache_type(), "test");
1141
- assert!(cache.cache_dir().to_string_lossy().contains("test"));
1142
- }
1143
- }
1
+ //! Generic cache implementation with lock poisoning recovery.
2
+ //!
3
+ //! # Lock Poisoning Handling
4
+ //!
5
+ //! This module uses `Arc<Mutex<T>>` for thread-safe state management and implements
6
+ //! explicit lock poisoning recovery throughout all public methods:
7
+ //!
8
+ //! **What is lock poisoning?**
9
+ //! - When a thread panics while holding a Mutex, the lock becomes "poisoned"
10
+ //! - Rust marks the Mutex to indicate data may be in an inconsistent state
11
+ //! - Subsequent lock attempts return `Err(PoisonError)` instead of acquiring the lock
12
+ //!
13
+ //! **Recovery strategy:**
14
+ //! - All `.lock()` calls use `.map_err()` to convert `PoisonError` into `KreuzbergError::LockPoisoned`
15
+ //! - The error propagates to callers via `Result` returns (never `.unwrap()` on locks)
16
+ //! - Provides clear error messages indicating which mutex is poisoned
17
+ //! - Follows CLAUDE.md requirement: "Lock poisoning must be handled - never `.unwrap()` on Mutex/RwLock"
18
+ //!
19
+ //! **Affected state:**
20
+ //! - `processing_locks`: Tracks cache keys currently being processed (6 lock sites)
21
+ //! - `deleting_files`: Prevents read-during-delete race conditions (3 lock sites)
22
+ //!
23
+ //! This approach ensures that lock poisoning (rare in practice) is surfaced to users
24
+ //! rather than causing panics, maintaining system stability during concurrent operations.
25
+
26
+ use crate::error::{KreuzbergError, Result};
27
+ use ahash::AHasher;
28
+ use serde::{Deserialize, Serialize};
29
+ use std::collections::HashSet;
30
+ use std::fs;
31
+
32
+ /// Cache key hash format width (32 hex digits for u64 hash)
33
+ const CACHE_KEY_HASH_WIDTH: usize = 32;
34
+ use std::hash::{Hash, Hasher};
35
+ use std::path::{Path, PathBuf};
36
+ use std::sync::atomic::{AtomicUsize, Ordering};
37
+ use std::sync::{Arc, Mutex};
38
+ use std::time::{SystemTime, UNIX_EPOCH};
39
+
40
+ #[derive(Debug, Clone, Serialize, Deserialize)]
41
+ pub struct CacheStats {
42
+ pub total_files: usize,
43
+ pub total_size_mb: f64,
44
+ pub available_space_mb: f64,
45
+ pub oldest_file_age_days: f64,
46
+ pub newest_file_age_days: f64,
47
+ }
48
+
49
+ #[derive(Debug, Clone)]
50
+ struct CacheEntry {
51
+ path: PathBuf,
52
+ size: u64,
53
+ modified: SystemTime,
54
+ }
55
+
56
+ struct CacheScanResult {
57
+ stats: CacheStats,
58
+ entries: Vec<CacheEntry>,
59
+ }
60
+
61
+ pub struct GenericCache {
62
+ cache_dir: PathBuf,
63
+ cache_type: String,
64
+ max_age_days: f64,
65
+ max_cache_size_mb: f64,
66
+ min_free_space_mb: f64,
67
+ processing_locks: Arc<Mutex<HashSet<String>>>,
68
+ /// Tracks cache keys being deleted to prevent read-during-delete race conditions
69
+ deleting_files: Arc<Mutex<HashSet<PathBuf>>>,
70
+ /// Counter for triggering periodic cleanup (every 100 writes)
71
+ write_counter: Arc<AtomicUsize>,
72
+ }
73
+
74
+ impl GenericCache {
75
+ pub fn new(
76
+ cache_type: String,
77
+ cache_dir: Option<String>,
78
+ max_age_days: f64,
79
+ max_cache_size_mb: f64,
80
+ min_free_space_mb: f64,
81
+ ) -> Result<Self> {
82
+ let cache_dir_path = if let Some(dir) = cache_dir {
83
+ PathBuf::from(dir).join(&cache_type)
84
+ } else {
85
+ // OSError/RuntimeError must bubble up - system errors need user reports ~keep
86
+ std::env::current_dir()?.join(".kreuzberg").join(&cache_type)
87
+ };
88
+
89
+ fs::create_dir_all(&cache_dir_path)
90
+ .map_err(|e| KreuzbergError::cache(format!("Failed to create cache directory: {}", e)))?;
91
+
92
+ Ok(Self {
93
+ cache_dir: cache_dir_path,
94
+ cache_type,
95
+ max_age_days,
96
+ max_cache_size_mb,
97
+ min_free_space_mb,
98
+ processing_locks: Arc::new(Mutex::new(HashSet::new())),
99
+ deleting_files: Arc::new(Mutex::new(HashSet::new())),
100
+ write_counter: Arc::new(AtomicUsize::new(0)),
101
+ })
102
+ }
103
+
104
+ fn get_cache_path(&self, cache_key: &str) -> PathBuf {
105
+ self.cache_dir.join(format!("{}.msgpack", cache_key))
106
+ }
107
+
108
+ fn get_metadata_path(&self, cache_key: &str) -> PathBuf {
109
+ self.cache_dir.join(format!("{}.meta", cache_key))
110
+ }
111
+
112
+ fn is_valid(&self, cache_path: &Path, source_file: Option<&str>) -> bool {
113
+ if !cache_path.exists() {
114
+ return false;
115
+ }
116
+
117
+ if let Ok(metadata) = fs::metadata(cache_path)
118
+ && let Ok(modified) = metadata.modified()
119
+ && let Ok(elapsed) = SystemTime::now().duration_since(modified)
120
+ {
121
+ let age_days = elapsed.as_secs() as f64 / (24.0 * 3600.0);
122
+ if age_days > self.max_age_days {
123
+ return false;
124
+ }
125
+ }
126
+
127
+ if let Some(source_path) = source_file {
128
+ let Some(file_stem) = cache_path.file_stem().and_then(|s| s.to_str()) else {
129
+ return false;
130
+ };
131
+ let meta_path = self.get_metadata_path(file_stem);
132
+
133
+ if meta_path.exists() {
134
+ if let Ok(meta_metadata) = fs::metadata(&meta_path)
135
+ && meta_metadata.len() == 16
136
+ && let Ok(cached_meta_bytes) = fs::read(&meta_path)
137
+ {
138
+ let cached_size = u64::from_le_bytes([
139
+ cached_meta_bytes[0],
140
+ cached_meta_bytes[1],
141
+ cached_meta_bytes[2],
142
+ cached_meta_bytes[3],
143
+ cached_meta_bytes[4],
144
+ cached_meta_bytes[5],
145
+ cached_meta_bytes[6],
146
+ cached_meta_bytes[7],
147
+ ]);
148
+ let cached_mtime = u64::from_le_bytes([
149
+ cached_meta_bytes[8],
150
+ cached_meta_bytes[9],
151
+ cached_meta_bytes[10],
152
+ cached_meta_bytes[11],
153
+ cached_meta_bytes[12],
154
+ cached_meta_bytes[13],
155
+ cached_meta_bytes[14],
156
+ cached_meta_bytes[15],
157
+ ]);
158
+
159
+ if let Ok(source_metadata) = fs::metadata(source_path) {
160
+ let current_size = source_metadata.len();
161
+ let Some(current_mtime) = source_metadata
162
+ .modified()
163
+ .ok()
164
+ .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
165
+ .map(|d| d.as_secs())
166
+ else {
167
+ return false;
168
+ };
169
+
170
+ return cached_size == current_size && cached_mtime == current_mtime;
171
+ }
172
+ }
173
+ return false;
174
+ }
175
+ }
176
+
177
+ true
178
+ }
179
+
180
+ fn save_metadata(&self, cache_key: &str, source_file: Option<&str>) {
181
+ if let Some(source_path) = source_file
182
+ && let Ok(metadata) = fs::metadata(source_path)
183
+ {
184
+ let size = metadata.len();
185
+ let Some(mtime) = metadata
186
+ .modified()
187
+ .ok()
188
+ .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
189
+ .map(|d| d.as_secs())
190
+ else {
191
+ return;
192
+ };
193
+
194
+ let mut bytes = Vec::with_capacity(16);
195
+ bytes.extend_from_slice(&size.to_le_bytes());
196
+ bytes.extend_from_slice(&mtime.to_le_bytes());
197
+
198
+ let meta_path = self.get_metadata_path(cache_key);
199
+ // Cache metadata write failure - safe to ignore, cache is optional fallback ~keep
200
+ let _ = fs::write(meta_path, bytes);
201
+ }
202
+ }
203
+
204
+ #[cfg_attr(feature = "otel", tracing::instrument(
205
+ skip(self),
206
+ fields(
207
+ cache.hit = tracing::field::Empty,
208
+ cache.key = %cache_key,
209
+ )
210
+ ))]
211
+ pub fn get(&self, cache_key: &str, source_file: Option<&str>) -> Result<Option<Vec<u8>>> {
212
+ let cache_path = self.get_cache_path(cache_key);
213
+
214
+ {
215
+ let deleting = self
216
+ .deleting_files
217
+ .lock()
218
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
219
+ if deleting.contains(&cache_path) {
220
+ #[cfg(feature = "otel")]
221
+ tracing::Span::current().record("cache.hit", false);
222
+ return Ok(None);
223
+ }
224
+ }
225
+
226
+ if !self.is_valid(&cache_path, source_file) {
227
+ #[cfg(feature = "otel")]
228
+ tracing::Span::current().record("cache.hit", false);
229
+ return Ok(None);
230
+ }
231
+
232
+ match fs::read(&cache_path) {
233
+ Ok(content) => {
234
+ #[cfg(feature = "otel")]
235
+ tracing::Span::current().record("cache.hit", true);
236
+ Ok(Some(content))
237
+ }
238
+ Err(_) => {
239
+ // Best-effort cleanup of corrupted cache files ~keep
240
+ if let Err(e) = fs::remove_file(&cache_path) {
241
+ tracing::debug!("Failed to remove corrupted cache file: {}", e);
242
+ }
243
+ if let Err(e) = fs::remove_file(self.get_metadata_path(cache_key)) {
244
+ tracing::debug!("Failed to remove corrupted metadata file: {}", e);
245
+ }
246
+ #[cfg(feature = "otel")]
247
+ tracing::Span::current().record("cache.hit", false);
248
+ Ok(None)
249
+ }
250
+ }
251
+ }
252
+
253
+ #[cfg_attr(feature = "otel", tracing::instrument(
254
+ skip(self, data),
255
+ fields(
256
+ cache.key = %cache_key,
257
+ cache.size_bytes = data.len(),
258
+ )
259
+ ))]
260
+ pub fn set(&self, cache_key: &str, data: Vec<u8>, source_file: Option<&str>) -> Result<()> {
261
+ let cache_path = self.get_cache_path(cache_key);
262
+
263
+ fs::write(&cache_path, &data)
264
+ .map_err(|e| KreuzbergError::cache(format!("Failed to write cache file: {}", e)))?;
265
+
266
+ self.save_metadata(cache_key, source_file);
267
+
268
+ let count = self.write_counter.fetch_add(1, Ordering::Relaxed);
269
+ if count.is_multiple_of(100)
270
+ && let Some(cache_path_str) = self.cache_dir.to_str()
271
+ {
272
+ // Cache cleanup failure - safe to ignore, cache is optional fallback ~keep
273
+ let _ = smart_cleanup_cache(
274
+ cache_path_str,
275
+ self.max_age_days,
276
+ self.max_cache_size_mb,
277
+ self.min_free_space_mb,
278
+ );
279
+ }
280
+
281
+ Ok(())
282
+ }
283
+
284
+ pub fn is_processing(&self, cache_key: &str) -> Result<bool> {
285
+ // OSError/RuntimeError must bubble up - system errors need user reports ~keep
286
+ let locks = self
287
+ .processing_locks
288
+ .lock()
289
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
290
+ Ok(locks.contains(cache_key))
291
+ }
292
+
293
+ pub fn mark_processing(&self, cache_key: String) -> Result<()> {
294
+ // OSError/RuntimeError must bubble up - system errors need user reports ~keep
295
+ let mut locks = self
296
+ .processing_locks
297
+ .lock()
298
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
299
+ locks.insert(cache_key);
300
+ Ok(())
301
+ }
302
+
303
+ pub fn mark_complete(&self, cache_key: &str) -> Result<()> {
304
+ // OSError/RuntimeError must bubble up - system errors need user reports ~keep
305
+ let mut locks = self
306
+ .processing_locks
307
+ .lock()
308
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
309
+ locks.remove(cache_key);
310
+ Ok(())
311
+ }
312
+
313
+ /// Mark a file path as being deleted to prevent concurrent reads.
314
+ ///
315
+ /// # TOCTOU Race Condition
316
+ ///
317
+ /// There is a Time-Of-Check-To-Time-Of-Use (TOCTOU) race condition between:
318
+ /// 1. Iterating directory entries in `clear()` (getting path/metadata)
319
+ /// 2. Marking the file for deletion here
320
+ /// 3. Actually deleting the file
321
+ ///
322
+ /// **Race scenario:**
323
+ /// - Thread A: Begins iterating in `clear()`, gets path
324
+ /// - Thread B: Calls `get()`, checks `deleting_files` (not marked yet), proceeds
325
+ /// - Thread A: Calls `mark_for_deletion()` here
326
+ /// - Thread A: Deletes file with `fs::remove_file()`
327
+ /// - Thread B: Tries to read file, but it's already deleted
328
+ ///
329
+ /// **Why this is acceptable:**
330
+ /// - Cache operations are best-effort optimizations, not critical
331
+ /// - `get()` already handles file read failures gracefully (treats as cache miss)
332
+ /// - The worst case is a failed read → cache miss → recomputation
333
+ /// - No data corruption or invariant violations occur
334
+ /// - Alternative (atomic operation) would require complex locking impacting performance
335
+ fn mark_for_deletion(&self, path: &Path) -> Result<()> {
336
+ let mut deleting = self
337
+ .deleting_files
338
+ .lock()
339
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
340
+ deleting.insert(path.to_path_buf());
341
+ Ok(())
342
+ }
343
+
344
+ /// Remove a file path from the deletion set
345
+ fn unmark_deletion(&self, path: &Path) -> Result<()> {
346
+ let mut deleting = self
347
+ .deleting_files
348
+ .lock()
349
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
350
+ deleting.remove(&path.to_path_buf());
351
+ Ok(())
352
+ }
353
+
354
+ pub fn clear(&self) -> Result<(usize, f64)> {
355
+ let dir_path = &self.cache_dir;
356
+
357
+ if !dir_path.exists() {
358
+ return Ok((0, 0.0));
359
+ }
360
+
361
+ let mut removed_count = 0;
362
+ let mut removed_size = 0.0;
363
+
364
+ let read_dir = fs::read_dir(dir_path)
365
+ .map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
366
+
367
+ for entry in read_dir {
368
+ let entry = match entry {
369
+ Ok(e) => e,
370
+ Err(e) => {
371
+ tracing::debug!("Error reading entry: {}", e);
372
+ continue;
373
+ }
374
+ };
375
+
376
+ let metadata = match entry.metadata() {
377
+ Ok(m) if m.is_file() => m,
378
+ _ => continue,
379
+ };
380
+
381
+ let path = entry.path();
382
+ if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
383
+ continue;
384
+ }
385
+
386
+ let size_mb = metadata.len() as f64 / (1024.0 * 1024.0);
387
+
388
+ // Mark file for deletion to prevent concurrent access ~keep
389
+ if let Err(e) = self.mark_for_deletion(&path) {
390
+ tracing::debug!("Failed to mark file for deletion: {} (continuing anyway)", e);
391
+ }
392
+
393
+ match fs::remove_file(&path) {
394
+ Ok(_) => {
395
+ removed_count += 1;
396
+ removed_size += size_mb;
397
+ // Unmark after successful deletion ~keep
398
+ if let Err(e) = self.unmark_deletion(&path) {
399
+ tracing::debug!("Failed to unmark deleted file: {} (non-critical)", e);
400
+ }
401
+ }
402
+ Err(e) => {
403
+ tracing::debug!("Failed to remove {:?}: {}", path, e);
404
+ // Unmark after failed deletion to allow retries ~keep
405
+ if let Err(e) = self.unmark_deletion(&path) {
406
+ tracing::debug!("Failed to unmark file after deletion error: {} (non-critical)", e);
407
+ }
408
+ }
409
+ }
410
+ }
411
+
412
+ Ok((removed_count, removed_size))
413
+ }
414
+
415
+ pub fn get_stats(&self) -> Result<CacheStats> {
416
+ let cache_path_str = self
417
+ .cache_dir
418
+ .to_str()
419
+ .ok_or_else(|| KreuzbergError::validation("Cache directory path contains invalid UTF-8".to_string()))?;
420
+ get_cache_metadata(cache_path_str)
421
+ }
422
+
423
+ pub fn cache_dir(&self) -> &Path {
424
+ &self.cache_dir
425
+ }
426
+
427
+ pub fn cache_type(&self) -> &str {
428
+ &self.cache_type
429
+ }
430
+ }
431
+
432
+ /// Generate a deterministic cache key from configuration parameters.
433
+ ///
434
+ /// # Algorithm
435
+ ///
436
+ /// Uses ahash (non-cryptographic 64-bit hash) for performance. Cache keys are
437
+ /// generated by:
438
+ /// 1. Sorting key-value pairs by key (for determinism)
439
+ /// 2. Concatenating as "key1=val1&key2=val2&..."
440
+ /// 3. Hashing with ahash and formatting as 32-character hex
441
+ ///
442
+ /// # Collision Probability
443
+ ///
444
+ /// AHash produces 64-bit hashes, leading to birthday paradox collisions:
445
+ /// - **~0.01%** probability at 1 million cache entries
446
+ /// - **~1%** probability at 100 million entries
447
+ /// - **~50%** probability at 4.3 billion (2^32) entries
448
+ ///
449
+ /// For context: P(collision) ≈ n^2 / (2 * 2^64) where n = number of entries.
450
+ ///
451
+ /// # Performance vs Security Trade-off
452
+ ///
453
+ /// - **ahash**: ~10x faster than SHA256, sufficient for cache keys
454
+ /// - **SHA256**: Collision-resistant but overkill for caching
455
+ /// - **Practical risk**: Low for typical usage (< 1M entries)
456
+ ///
457
+ /// # Impact of Collisions
458
+ ///
459
+ /// If two different configurations hash to the same key:
460
+ /// - One configuration reads the other's cached data
461
+ /// - Results in incorrect data served from cache
462
+ /// - Detected via metadata validation (size/mtime checks)
463
+ ///
464
+ /// # Recommendations
465
+ ///
466
+ /// - **< 1M entries**: ahash is safe and fast
467
+ /// - **> 100M entries**: Monitor cache size, consider periodic clearing
468
+ /// - **Critical data**: If collision risk is unacceptable, add SHA256 option
469
+ ///
470
+ /// # Example
471
+ ///
472
+ /// ```rust
473
+ /// use kreuzberg::cache::generate_cache_key;
474
+ ///
475
+ /// let parts = [("format", "pdf"), ("ocr", "true"), ("lang", "en")];
476
+ /// let key = generate_cache_key(&parts);
477
+ /// assert_eq!(key.len(), 32); // 64-bit hash as hex
478
+ /// ```
479
+ pub fn generate_cache_key(parts: &[(&str, &str)]) -> String {
480
+ if parts.is_empty() {
481
+ return "empty".to_string();
482
+ }
483
+
484
+ let mut sorted_parts: Vec<_> = parts.to_vec();
485
+ sorted_parts.sort_by_key(|(k, _)| *k);
486
+
487
+ let estimated_size = sorted_parts.iter().map(|(k, v)| k.len() + v.len() + 2).sum::<usize>();
488
+ let mut cache_str = String::with_capacity(estimated_size);
489
+
490
+ for (i, (key, val)) in sorted_parts.iter().enumerate() {
491
+ if i > 0 {
492
+ cache_str.push('&');
493
+ }
494
+ cache_str.push_str(&format!("{}={}", key, val));
495
+ }
496
+
497
+ let mut hasher = AHasher::default();
498
+ cache_str.hash(&mut hasher);
499
+ let hash = hasher.finish();
500
+
501
+ format!("{:0width$x}", hash, width = CACHE_KEY_HASH_WIDTH)
502
+ }
503
+
504
+ #[allow(unsafe_code)]
505
+ pub fn get_available_disk_space(path: &str) -> Result<f64> {
506
+ #[cfg(unix)]
507
+ {
508
+ let path = Path::new(path);
509
+ let check_path = if path.exists() {
510
+ path
511
+ } else if let Some(parent) = path.parent() {
512
+ parent
513
+ } else {
514
+ Path::new("/")
515
+ };
516
+
517
+ use libc::{statvfs, statvfs as statvfs_struct};
518
+ use std::ffi::CString;
519
+
520
+ let path_str = check_path
521
+ .to_str()
522
+ .ok_or_else(|| KreuzbergError::validation("Path contains invalid UTF-8".to_string()))?;
523
+ let c_path = CString::new(path_str).map_err(|e| KreuzbergError::validation(format!("Invalid path: {}", e)))?;
524
+
525
+ let mut stat: statvfs_struct = unsafe { std::mem::zeroed() };
526
+
527
+ let result = unsafe { statvfs(c_path.as_ptr(), &mut stat) };
528
+
529
+ if result == 0 {
530
+ #[allow(clippy::unnecessary_cast)]
531
+ let available_bytes = stat.f_bavail as u64 * stat.f_frsize as u64;
532
+ Ok(available_bytes as f64 / (1024.0 * 1024.0))
533
+ } else {
534
+ tracing::debug!("Failed to get disk stats for {}: errno {}", path_str, result);
535
+ Ok(10000.0)
536
+ }
537
+ }
538
+
539
+ #[cfg(not(unix))]
540
+ {
541
+ let _ = path;
542
+ Ok(10000.0)
543
+ }
544
+ }
545
+
546
+ fn scan_cache_directory(cache_dir: &str) -> Result<CacheScanResult> {
547
+ let dir_path = Path::new(cache_dir);
548
+
549
+ if !dir_path.exists() {
550
+ return Ok(CacheScanResult {
551
+ stats: CacheStats {
552
+ total_files: 0,
553
+ total_size_mb: 0.0,
554
+ available_space_mb: get_available_disk_space(cache_dir)?,
555
+ oldest_file_age_days: 0.0,
556
+ newest_file_age_days: 0.0,
557
+ },
558
+ entries: Vec::new(),
559
+ });
560
+ }
561
+
562
+ let current_time = SystemTime::now()
563
+ .duration_since(UNIX_EPOCH)
564
+ .unwrap_or_default()
565
+ .as_secs() as f64;
566
+
567
+ let read_dir =
568
+ fs::read_dir(dir_path).map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
569
+
570
+ let mut total_size = 0u64;
571
+ let mut oldest_age = 0.0f64;
572
+ let mut newest_age = f64::INFINITY;
573
+ let mut entries = Vec::new();
574
+
575
+ for entry in read_dir {
576
+ let entry = match entry {
577
+ Ok(e) => e,
578
+ Err(e) => {
579
+ tracing::debug!("Error reading cache entry: {}", e);
580
+ continue;
581
+ }
582
+ };
583
+
584
+ let metadata = match entry.metadata() {
585
+ Ok(m) if m.is_file() => m,
586
+ _ => continue,
587
+ };
588
+
589
+ let path = entry.path();
590
+ if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
591
+ continue;
592
+ }
593
+
594
+ let modified = match metadata.modified() {
595
+ Ok(m) => m,
596
+ Err(e) => {
597
+ tracing::debug!("Error getting modification time for {:?}: {}", path, e);
598
+ continue;
599
+ }
600
+ };
601
+
602
+ let size = metadata.len();
603
+ total_size += size;
604
+
605
+ if let Ok(duration) = modified.duration_since(UNIX_EPOCH) {
606
+ let age_days = (current_time - duration.as_secs() as f64) / (24.0 * 3600.0);
607
+ oldest_age = oldest_age.max(age_days);
608
+ newest_age = newest_age.min(age_days);
609
+ }
610
+
611
+ entries.push(CacheEntry { path, size, modified });
612
+ }
613
+
614
+ if entries.is_empty() {
615
+ oldest_age = 0.0;
616
+ newest_age = 0.0;
617
+ }
618
+
619
+ Ok(CacheScanResult {
620
+ stats: CacheStats {
621
+ total_files: entries.len(),
622
+ total_size_mb: total_size as f64 / (1024.0 * 1024.0),
623
+ available_space_mb: get_available_disk_space(cache_dir)?,
624
+ oldest_file_age_days: oldest_age,
625
+ newest_file_age_days: newest_age,
626
+ },
627
+ entries,
628
+ })
629
+ }
630
+
631
+ pub fn get_cache_metadata(cache_dir: &str) -> Result<CacheStats> {
632
+ let scan_result = scan_cache_directory(cache_dir)?;
633
+ Ok(scan_result.stats)
634
+ }
635
+
636
+ pub fn cleanup_cache(
637
+ cache_dir: &str,
638
+ max_age_days: f64,
639
+ max_size_mb: f64,
640
+ target_size_ratio: f64,
641
+ ) -> Result<(usize, f64)> {
642
+ let scan_result = scan_cache_directory(cache_dir)?;
643
+
644
+ if scan_result.entries.is_empty() {
645
+ return Ok((0, 0.0));
646
+ }
647
+
648
+ let current_time = SystemTime::now()
649
+ .duration_since(UNIX_EPOCH)
650
+ .unwrap_or_default()
651
+ .as_secs() as f64;
652
+ let max_age_seconds = max_age_days * 24.0 * 3600.0;
653
+
654
+ let mut removed_count = 0;
655
+ let mut removed_size = 0.0;
656
+ let mut remaining_entries = Vec::new();
657
+ let mut total_remaining_size = 0u64;
658
+
659
+ for entry in scan_result.entries {
660
+ if let Ok(age) = entry.modified.duration_since(UNIX_EPOCH) {
661
+ let age_seconds = current_time - age.as_secs() as f64;
662
+ if age_seconds > max_age_seconds {
663
+ match fs::remove_file(&entry.path) {
664
+ Ok(_) => {
665
+ removed_count += 1;
666
+ removed_size += entry.size as f64 / (1024.0 * 1024.0);
667
+ }
668
+ Err(e) => {
669
+ tracing::debug!("Failed to remove {:?}: {}", entry.path, e);
670
+ }
671
+ }
672
+ } else {
673
+ total_remaining_size += entry.size;
674
+ remaining_entries.push(entry);
675
+ }
676
+ }
677
+ }
678
+
679
+ let mut total_size_mb = total_remaining_size as f64 / (1024.0 * 1024.0);
680
+
681
+ if total_size_mb > max_size_mb {
682
+ remaining_entries.sort_by_key(|e| e.modified);
683
+
684
+ let target_size = max_size_mb * target_size_ratio;
685
+
686
+ for entry in remaining_entries {
687
+ if total_size_mb <= target_size {
688
+ break;
689
+ }
690
+
691
+ match fs::remove_file(&entry.path) {
692
+ Ok(_) => {
693
+ let size_mb = entry.size as f64 / (1024.0 * 1024.0);
694
+ removed_count += 1;
695
+ removed_size += size_mb;
696
+ total_size_mb -= size_mb;
697
+ }
698
+ Err(e) => {
699
+ tracing::debug!("Failed to remove {:?}: {}", entry.path, e);
700
+ }
701
+ }
702
+ }
703
+ }
704
+
705
+ Ok((removed_count, removed_size))
706
+ }
707
+
708
+ pub fn smart_cleanup_cache(
709
+ cache_dir: &str,
710
+ max_age_days: f64,
711
+ max_size_mb: f64,
712
+ min_free_space_mb: f64,
713
+ ) -> Result<(usize, f64)> {
714
+ let stats = get_cache_metadata(cache_dir)?;
715
+
716
+ let needs_cleanup = stats.available_space_mb < min_free_space_mb
717
+ || stats.total_size_mb > max_size_mb
718
+ || stats.oldest_file_age_days > max_age_days;
719
+
720
+ if !needs_cleanup {
721
+ return Ok((0, 0.0));
722
+ }
723
+
724
+ let target_ratio = if stats.available_space_mb < min_free_space_mb {
725
+ 0.5
726
+ } else {
727
+ 0.8
728
+ };
729
+
730
+ cleanup_cache(cache_dir, max_age_days, max_size_mb, target_ratio)
731
+ }
732
+
733
+ pub fn filter_old_cache_entries(cache_times: &[f64], current_time: f64, max_age_seconds: f64) -> Vec<usize> {
734
+ cache_times
735
+ .iter()
736
+ .enumerate()
737
+ .filter_map(|(idx, &time)| {
738
+ if current_time - time > max_age_seconds {
739
+ Some(idx)
740
+ } else {
741
+ None
742
+ }
743
+ })
744
+ .collect()
745
+ }
746
+
747
+ pub fn sort_cache_by_access_time(mut entries: Vec<(String, f64)>) -> Vec<String> {
748
+ entries.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
749
+ entries.into_iter().map(|(key, _)| key).collect()
750
+ }
751
+
752
+ pub fn fast_hash(data: &[u8]) -> u64 {
753
+ let mut hasher = AHasher::default();
754
+ data.hash(&mut hasher);
755
+ hasher.finish()
756
+ }
757
+
758
+ pub fn validate_cache_key(key: &str) -> bool {
759
+ key.len() == 32 && key.chars().all(|c| c.is_ascii_hexdigit())
760
+ }
761
+
762
+ pub fn is_cache_valid(cache_path: &str, max_age_days: f64) -> bool {
763
+ let path = Path::new(cache_path);
764
+
765
+ if !path.exists() {
766
+ return false;
767
+ }
768
+
769
+ match fs::metadata(path) {
770
+ Ok(metadata) => match metadata.modified() {
771
+ Ok(modified) => match SystemTime::now().duration_since(modified) {
772
+ Ok(elapsed) => {
773
+ let age_days = elapsed.as_secs() as f64 / (24.0 * 3600.0);
774
+ age_days <= max_age_days
775
+ }
776
+ Err(_) => false,
777
+ },
778
+ Err(_) => false,
779
+ },
780
+ Err(_) => false,
781
+ }
782
+ }
783
+
784
+ pub fn clear_cache_directory(cache_dir: &str) -> Result<(usize, f64)> {
785
+ let dir_path = Path::new(cache_dir);
786
+
787
+ if !dir_path.exists() {
788
+ return Ok((0, 0.0));
789
+ }
790
+
791
+ let mut removed_count = 0;
792
+ let mut removed_size = 0.0;
793
+
794
+ let read_dir =
795
+ fs::read_dir(dir_path).map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
796
+
797
+ for entry in read_dir {
798
+ let entry = match entry {
799
+ Ok(e) => e,
800
+ Err(e) => {
801
+ tracing::debug!("Error reading entry: {}", e);
802
+ continue;
803
+ }
804
+ };
805
+
806
+ let metadata = match entry.metadata() {
807
+ Ok(m) if m.is_file() => m,
808
+ _ => continue,
809
+ };
810
+
811
+ let path = entry.path();
812
+ if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
813
+ continue;
814
+ }
815
+
816
+ let size_mb = metadata.len() as f64 / (1024.0 * 1024.0);
817
+ match fs::remove_file(&path) {
818
+ Ok(_) => {
819
+ removed_count += 1;
820
+ removed_size += size_mb;
821
+ }
822
+ Err(e) => {
823
+ tracing::debug!("Failed to remove {:?}: {}", path, e);
824
+ }
825
+ }
826
+ }
827
+
828
+ Ok((removed_count, removed_size))
829
+ }
830
+
831
+ pub fn batch_cleanup_caches(
832
+ cache_dirs: &[&str],
833
+ max_age_days: f64,
834
+ max_size_mb: f64,
835
+ min_free_space_mb: f64,
836
+ ) -> Result<Vec<(usize, f64)>> {
837
+ cache_dirs
838
+ .iter()
839
+ .map(|dir| smart_cleanup_cache(dir, max_age_days, max_size_mb, min_free_space_mb))
840
+ .collect()
841
+ }
842
+
843
+ #[cfg(test)]
844
+ mod tests {
845
+ use super::*;
846
+ use std::fs::File;
847
+ use tempfile::tempdir;
848
+
849
+ #[test]
850
+ fn test_generate_cache_key_empty() {
851
+ let result = generate_cache_key(&[]);
852
+ assert_eq!(result, "empty");
853
+ }
854
+
855
+ #[test]
856
+ fn test_generate_cache_key_consistent() {
857
+ let parts = [("key1", "value1"), ("key2", "value2")];
858
+ let key1 = generate_cache_key(&parts);
859
+ let key2 = generate_cache_key(&parts);
860
+ assert_eq!(key1, key2);
861
+ assert_eq!(key1.len(), 32);
862
+ }
863
+
864
+ #[test]
865
+ fn test_validate_cache_key() {
866
+ assert!(validate_cache_key("0123456789abcdef0123456789abcdef"));
867
+ assert!(!validate_cache_key("invalid_key"));
868
+ assert!(!validate_cache_key("0123456789abcdef"));
869
+ assert!(!validate_cache_key("0123456789abcdef0123456789abcdef0"));
870
+ }
871
+
872
+ #[test]
873
+ fn test_fast_hash() {
874
+ let data1 = b"test data";
875
+ let data2 = b"test data";
876
+ let data3 = b"different data";
877
+
878
+ assert_eq!(fast_hash(data1), fast_hash(data2));
879
+ assert_ne!(fast_hash(data1), fast_hash(data3));
880
+ }
881
+
882
+ #[test]
883
+ fn test_filter_old_cache_entries() {
884
+ let cache_times = vec![100.0, 200.0, 300.0, 400.0];
885
+ let current_time = 500.0;
886
+ let max_age = 200.0;
887
+
888
+ let old_indices = filter_old_cache_entries(&cache_times, current_time, max_age);
889
+ assert_eq!(old_indices, vec![0, 1]);
890
+ }
891
+
892
+ #[test]
893
+ fn test_sort_cache_by_access_time() {
894
+ let entries = vec![
895
+ ("key3".to_string(), 300.0),
896
+ ("key1".to_string(), 100.0),
897
+ ("key2".to_string(), 200.0),
898
+ ];
899
+
900
+ let sorted = sort_cache_by_access_time(entries);
901
+ assert_eq!(sorted, vec!["key1", "key2", "key3"]);
902
+ }
903
+
904
+ #[test]
905
+ fn test_sort_cache_with_nan() {
906
+ let entries = vec![
907
+ ("key1".to_string(), 100.0),
908
+ ("key2".to_string(), f64::NAN),
909
+ ("key3".to_string(), 200.0),
910
+ ];
911
+
912
+ let sorted = sort_cache_by_access_time(entries);
913
+ assert_eq!(sorted.len(), 3);
914
+ }
915
+
916
+ #[test]
917
+ fn test_cache_metadata() {
918
+ let temp_dir = tempdir().unwrap();
919
+ let cache_dir = temp_dir.path().to_str().unwrap();
920
+
921
+ let file1 = temp_dir.path().join("test1.msgpack");
922
+ let file2 = temp_dir.path().join("test2.msgpack");
923
+ File::create(&file1).unwrap();
924
+ File::create(&file2).unwrap();
925
+
926
+ let stats = get_cache_metadata(cache_dir).unwrap();
927
+ assert_eq!(stats.total_files, 2);
928
+ assert!(stats.available_space_mb > 0.0);
929
+ }
930
+
931
+ #[test]
932
+ fn test_cleanup_cache() {
933
+ use std::io::Write;
934
+
935
+ let temp_dir = tempdir().unwrap();
936
+ let cache_dir = temp_dir.path().to_str().unwrap();
937
+
938
+ let file1 = temp_dir.path().join("old.msgpack");
939
+ let mut f = File::create(&file1).unwrap();
940
+ f.write_all(b"test data for cleanup").unwrap();
941
+ drop(f);
942
+
943
+ let (removed_count, _) = cleanup_cache(cache_dir, 1000.0, 0.000001, 0.8).unwrap();
944
+ assert_eq!(removed_count, 1);
945
+ assert!(!file1.exists());
946
+ }
947
+
948
+ #[test]
949
+ fn test_is_cache_valid() {
950
+ let temp_dir = tempdir().unwrap();
951
+ let file_path = temp_dir.path().join("test.msgpack");
952
+ File::create(&file_path).unwrap();
953
+
954
+ let path_str = file_path.to_str().unwrap();
955
+
956
+ assert!(is_cache_valid(path_str, 1.0));
957
+
958
+ assert!(!is_cache_valid("/nonexistent/path", 1.0));
959
+ }
960
+
961
+ #[test]
962
+ fn test_generic_cache_new() {
963
+ let temp_dir = tempdir().unwrap();
964
+ let cache = GenericCache::new(
965
+ "test".to_string(),
966
+ Some(temp_dir.path().to_str().unwrap().to_string()),
967
+ 30.0,
968
+ 500.0,
969
+ 1000.0,
970
+ )
971
+ .unwrap();
972
+
973
+ assert_eq!(cache.cache_type, "test");
974
+ assert!(cache.cache_dir.exists());
975
+ }
976
+
977
+ #[test]
978
+ fn test_generic_cache_get_set() {
979
+ let temp_dir = tempdir().unwrap();
980
+ let cache = GenericCache::new(
981
+ "test".to_string(),
982
+ Some(temp_dir.path().to_str().unwrap().to_string()),
983
+ 30.0,
984
+ 500.0,
985
+ 1000.0,
986
+ )
987
+ .unwrap();
988
+
989
+ let cache_key = "test_key";
990
+ let data = b"test data".to_vec();
991
+
992
+ cache.set(cache_key, data.clone(), None).unwrap();
993
+
994
+ let result = cache.get(cache_key, None).unwrap();
995
+ assert_eq!(result, Some(data));
996
+ }
997
+
998
+ #[test]
999
+ fn test_generic_cache_get_miss() {
1000
+ let temp_dir = tempdir().unwrap();
1001
+ let cache = GenericCache::new(
1002
+ "test".to_string(),
1003
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1004
+ 30.0,
1005
+ 500.0,
1006
+ 1000.0,
1007
+ )
1008
+ .unwrap();
1009
+
1010
+ let result = cache.get("nonexistent", None).unwrap();
1011
+ assert_eq!(result, None);
1012
+ }
1013
+
1014
+ #[test]
1015
+ fn test_generic_cache_source_file_invalidation() {
1016
+ use std::io::Write;
1017
+ use std::thread::sleep;
1018
+ use std::time::Duration;
1019
+
1020
+ let temp_dir = tempdir().unwrap();
1021
+ let cache = GenericCache::new(
1022
+ "test".to_string(),
1023
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1024
+ 30.0,
1025
+ 500.0,
1026
+ 1000.0,
1027
+ )
1028
+ .unwrap();
1029
+
1030
+ let source_file = temp_dir.path().join("source.txt");
1031
+ let mut f = File::create(&source_file).unwrap();
1032
+ f.write_all(b"original content").unwrap();
1033
+ drop(f);
1034
+
1035
+ let cache_key = "test_key";
1036
+ let data = b"cached data".to_vec();
1037
+
1038
+ cache
1039
+ .set(cache_key, data.clone(), Some(source_file.to_str().unwrap()))
1040
+ .unwrap();
1041
+
1042
+ let result = cache.get(cache_key, Some(source_file.to_str().unwrap())).unwrap();
1043
+ assert_eq!(result, Some(data.clone()));
1044
+
1045
+ sleep(Duration::from_millis(10));
1046
+ let mut f = fs::OpenOptions::new()
1047
+ .write(true)
1048
+ .truncate(true)
1049
+ .open(&source_file)
1050
+ .unwrap();
1051
+ f.write_all(b"modified content with different size").unwrap();
1052
+ drop(f);
1053
+
1054
+ let result = cache.get(cache_key, Some(source_file.to_str().unwrap())).unwrap();
1055
+ assert_eq!(result, None);
1056
+ }
1057
+
1058
+ #[test]
1059
+ fn test_generic_cache_processing_locks() {
1060
+ let temp_dir = tempdir().unwrap();
1061
+ let cache = GenericCache::new(
1062
+ "test".to_string(),
1063
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1064
+ 30.0,
1065
+ 500.0,
1066
+ 1000.0,
1067
+ )
1068
+ .unwrap();
1069
+
1070
+ let cache_key = "test_key";
1071
+
1072
+ assert!(!cache.is_processing(cache_key).unwrap());
1073
+
1074
+ cache.mark_processing(cache_key.to_string()).unwrap();
1075
+ assert!(cache.is_processing(cache_key).unwrap());
1076
+
1077
+ cache.mark_complete(cache_key).unwrap();
1078
+ assert!(!cache.is_processing(cache_key).unwrap());
1079
+ }
1080
+
1081
+ #[test]
1082
+ fn test_generic_cache_clear() {
1083
+ let temp_dir = tempdir().unwrap();
1084
+ let cache = GenericCache::new(
1085
+ "test".to_string(),
1086
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1087
+ 30.0,
1088
+ 500.0,
1089
+ 1000.0,
1090
+ )
1091
+ .unwrap();
1092
+
1093
+ cache.set("key1", b"data1".to_vec(), None).unwrap();
1094
+ cache.set("key2", b"data2".to_vec(), None).unwrap();
1095
+
1096
+ let (removed, _freed) = cache.clear().unwrap();
1097
+ assert_eq!(removed, 2);
1098
+
1099
+ assert_eq!(cache.get("key1", None).unwrap(), None);
1100
+ assert_eq!(cache.get("key2", None).unwrap(), None);
1101
+ }
1102
+
1103
+ #[test]
1104
+ fn test_generic_cache_stats() {
1105
+ let temp_dir = tempdir().unwrap();
1106
+ let cache = GenericCache::new(
1107
+ "test".to_string(),
1108
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1109
+ 30.0,
1110
+ 500.0,
1111
+ 1000.0,
1112
+ )
1113
+ .unwrap();
1114
+
1115
+ cache.set("key1", b"test data 1".to_vec(), None).unwrap();
1116
+ cache.set("key2", b"test data 2".to_vec(), None).unwrap();
1117
+
1118
+ let stats = cache.get_stats().unwrap();
1119
+ assert_eq!(stats.total_files, 2);
1120
+ assert!(stats.total_size_mb > 0.0);
1121
+ assert!(stats.available_space_mb > 0.0);
1122
+ }
1123
+
1124
+ #[test]
1125
+ fn test_generic_cache_expired_entry() {
1126
+ use std::io::Write;
1127
+
1128
+ let temp_dir = tempdir().unwrap();
1129
+ let cache = GenericCache::new(
1130
+ "test".to_string(),
1131
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1132
+ 0.000001,
1133
+ 500.0,
1134
+ 1000.0,
1135
+ )
1136
+ .unwrap();
1137
+
1138
+ let cache_key = "test_key";
1139
+
1140
+ let cache_path = cache.cache_dir.join(format!("{}.msgpack", cache_key));
1141
+ let mut f = File::create(&cache_path).unwrap();
1142
+ f.write_all(b"test data").unwrap();
1143
+ drop(f);
1144
+
1145
+ let old_time = SystemTime::now() - std::time::Duration::from_secs(60);
1146
+ filetime::set_file_mtime(&cache_path, filetime::FileTime::from_system_time(old_time)).unwrap();
1147
+
1148
+ let result = cache.get(cache_key, None).unwrap();
1149
+ assert_eq!(result, None);
1150
+ }
1151
+
1152
+ #[test]
1153
+ fn test_generic_cache_properties() {
1154
+ let temp_dir = tempdir().unwrap();
1155
+ let cache = GenericCache::new(
1156
+ "test".to_string(),
1157
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1158
+ 30.0,
1159
+ 500.0,
1160
+ 1000.0,
1161
+ )
1162
+ .unwrap();
1163
+
1164
+ assert_eq!(cache.cache_type(), "test");
1165
+ assert!(cache.cache_dir().to_string_lossy().contains("test"));
1166
+ }
1167
+ }