kreuzberg 4.0.0.pre.rc.13 → 4.0.0.pre.rc.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (369) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -14
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -1
  5. data/.rubocop.yml +538 -538
  6. data/Gemfile +8 -8
  7. data/Gemfile.lock +105 -2
  8. data/README.md +454 -454
  9. data/Rakefile +33 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -341
  12. data/ext/kreuzberg_rb/extconf.rb +45 -45
  13. data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
  14. data/ext/kreuzberg_rb/native/Cargo.lock +6940 -6941
  15. data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
  16. data/ext/kreuzberg_rb/native/README.md +425 -425
  17. data/ext/kreuzberg_rb/native/build.rs +15 -15
  18. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  19. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  20. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  21. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  22. data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
  23. data/extconf.rb +28 -28
  24. data/kreuzberg.gemspec +214 -214
  25. data/lib/kreuzberg/api_proxy.rb +142 -142
  26. data/lib/kreuzberg/cache_api.rb +81 -81
  27. data/lib/kreuzberg/cli.rb +55 -55
  28. data/lib/kreuzberg/cli_proxy.rb +127 -127
  29. data/lib/kreuzberg/config.rb +724 -724
  30. data/lib/kreuzberg/error_context.rb +80 -80
  31. data/lib/kreuzberg/errors.rb +118 -118
  32. data/lib/kreuzberg/extraction_api.rb +340 -340
  33. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  34. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  35. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  36. data/lib/kreuzberg/result.rb +279 -279
  37. data/lib/kreuzberg/setup_lib_path.rb +80 -80
  38. data/lib/kreuzberg/validator_protocol.rb +89 -89
  39. data/lib/kreuzberg/version.rb +5 -5
  40. data/lib/kreuzberg.rb +109 -109
  41. data/lib/{pdfium.dll → libpdfium.dylib} +0 -0
  42. data/sig/kreuzberg/internal.rbs +184 -184
  43. data/sig/kreuzberg.rbs +546 -546
  44. data/spec/binding/cache_spec.rb +227 -227
  45. data/spec/binding/cli_proxy_spec.rb +85 -85
  46. data/spec/binding/cli_spec.rb +55 -55
  47. data/spec/binding/config_spec.rb +345 -345
  48. data/spec/binding/config_validation_spec.rb +283 -283
  49. data/spec/binding/error_handling_spec.rb +213 -213
  50. data/spec/binding/errors_spec.rb +66 -66
  51. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  52. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  53. data/spec/binding/plugins/validator_spec.rb +274 -274
  54. data/spec/fixtures/config.toml +39 -39
  55. data/spec/fixtures/config.yaml +41 -41
  56. data/spec/fixtures/invalid_config.toml +4 -4
  57. data/spec/smoke/package_spec.rb +178 -178
  58. data/spec/spec_helper.rb +42 -42
  59. data/vendor/Cargo.toml +1 -1
  60. data/vendor/kreuzberg/Cargo.toml +5 -5
  61. data/vendor/kreuzberg/README.md +230 -230
  62. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
  63. data/vendor/kreuzberg/build.rs +843 -843
  64. data/vendor/kreuzberg/src/api/error.rs +81 -81
  65. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  66. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  67. data/vendor/kreuzberg/src/api/server.rs +353 -353
  68. data/vendor/kreuzberg/src/api/types.rs +170 -170
  69. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
  70. data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
  71. data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
  72. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
  73. data/vendor/kreuzberg/src/core/config.rs +1080 -1080
  74. data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
  75. data/vendor/kreuzberg/src/core/io.rs +329 -329
  76. data/vendor/kreuzberg/src/core/mime.rs +605 -605
  77. data/vendor/kreuzberg/src/core/mod.rs +47 -47
  78. data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
  79. data/vendor/kreuzberg/src/embeddings.rs +500 -500
  80. data/vendor/kreuzberg/src/error.rs +431 -431
  81. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  82. data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
  83. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  84. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  85. data/vendor/kreuzberg/src/extraction/html.rs +601 -601
  86. data/vendor/kreuzberg/src/extraction/image.rs +491 -491
  87. data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -574
  88. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
  89. data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
  90. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  91. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  92. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  93. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
  94. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
  95. data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
  96. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  97. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  98. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  99. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  100. data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
  101. data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
  102. data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
  103. data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
  104. data/vendor/kreuzberg/src/extractors/email.rs +157 -157
  105. data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
  106. data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
  107. data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
  108. data/vendor/kreuzberg/src/extractors/html.rs +407 -407
  109. data/vendor/kreuzberg/src/extractors/image.rs +219 -219
  110. data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
  111. data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
  112. data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
  113. data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
  114. data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
  115. data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
  116. data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
  117. data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
  118. data/vendor/kreuzberg/src/extractors/pdf.rs +749 -749
  119. data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
  120. data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
  121. data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
  122. data/vendor/kreuzberg/src/extractors/security.rs +484 -484
  123. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
  124. data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
  125. data/vendor/kreuzberg/src/extractors/text.rs +265 -265
  126. data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
  127. data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
  128. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  129. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  130. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  131. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  132. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  133. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  134. data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
  135. data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
  136. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  137. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  138. data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
  139. data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
  140. data/vendor/kreuzberg/src/lib.rs +113 -113
  141. data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
  142. data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
  143. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  144. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  145. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  146. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  147. data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
  148. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  149. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  150. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
  151. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  152. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  153. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  154. data/vendor/kreuzberg/src/panic_context.rs +154 -154
  155. data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
  156. data/vendor/kreuzberg/src/pdf/bundled.rs +346 -346
  157. data/vendor/kreuzberg/src/pdf/error.rs +130 -130
  158. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  159. data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
  160. data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
  161. data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
  162. data/vendor/kreuzberg/src/pdf/table.rs +420 -420
  163. data/vendor/kreuzberg/src/pdf/text.rs +240 -240
  164. data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
  165. data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
  166. data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
  167. data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
  168. data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
  169. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  170. data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
  171. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  172. data/vendor/kreuzberg/src/text/mod.rs +25 -25
  173. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  174. data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
  175. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  176. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  177. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  178. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  179. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  180. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  181. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  182. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  183. data/vendor/kreuzberg/src/types.rs +1055 -1055
  184. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  185. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  186. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  187. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  188. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  189. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  190. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  191. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  192. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  193. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  194. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  195. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  196. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  198. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  199. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  200. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  201. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  202. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  203. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  204. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  205. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  206. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  207. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  208. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  209. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  210. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  211. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  212. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  213. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  214. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  215. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  216. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  217. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  218. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  219. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  220. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  221. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  222. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  223. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  224. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  225. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  226. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  227. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  228. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  229. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  230. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  231. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  232. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  233. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  234. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  235. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  236. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  237. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  238. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  239. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  240. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  241. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  242. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  243. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  244. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  245. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  246. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  247. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  248. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  249. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  250. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  251. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
  252. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  253. data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
  254. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
  255. data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
  256. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
  257. data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
  258. data/vendor/kreuzberg/tests/config_features.rs +612 -612
  259. data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
  260. data/vendor/kreuzberg/tests/core_integration.rs +510 -510
  261. data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
  262. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
  263. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
  264. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
  265. data/vendor/kreuzberg/tests/email_integration.rs +327 -327
  266. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
  267. data/vendor/kreuzberg/tests/error_handling.rs +402 -402
  268. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
  269. data/vendor/kreuzberg/tests/format_integration.rs +164 -164
  270. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  271. data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
  272. data/vendor/kreuzberg/tests/image_integration.rs +255 -255
  273. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
  274. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
  275. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
  276. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  277. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  278. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
  279. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
  280. data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
  281. data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
  282. data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
  283. data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
  284. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  285. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
  286. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
  287. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
  288. data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
  289. data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
  290. data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
  291. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
  292. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
  293. data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
  294. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  295. data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
  296. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
  297. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
  298. data/vendor/kreuzberg/tests/security_validation.rs +416 -416
  299. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  300. data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
  301. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
  302. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
  303. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  304. data/vendor/kreuzberg-ffi/Cargo.toml +1 -1
  305. data/vendor/kreuzberg-ffi/README.md +851 -851
  306. data/vendor/kreuzberg-ffi/build.rs +176 -176
  307. data/vendor/kreuzberg-ffi/cbindgen.toml +27 -27
  308. data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -12
  309. data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -1087
  310. data/vendor/kreuzberg-ffi/src/lib.rs +3616 -3616
  311. data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -247
  312. data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -48
  313. data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -299
  314. data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -346
  315. data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -232
  316. data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -470
  317. data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
  318. data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
  319. data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
  320. data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
  321. data/vendor/kreuzberg-tesseract/LICENSE +22 -22
  322. data/vendor/kreuzberg-tesseract/README.md +399 -399
  323. data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
  324. data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
  325. data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
  326. data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
  327. data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
  328. data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
  329. data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
  330. data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
  331. data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
  332. data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
  333. data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
  334. data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
  335. data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
  336. data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
  337. data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
  338. data/vendor/rb-sys/Cargo.lock +393 -393
  339. data/vendor/rb-sys/Cargo.toml +70 -70
  340. data/vendor/rb-sys/Cargo.toml.orig +57 -57
  341. data/vendor/rb-sys/LICENSE-APACHE +190 -190
  342. data/vendor/rb-sys/LICENSE-MIT +21 -21
  343. data/vendor/rb-sys/build/features.rs +111 -111
  344. data/vendor/rb-sys/build/main.rs +286 -286
  345. data/vendor/rb-sys/build/stable_api_config.rs +155 -155
  346. data/vendor/rb-sys/build/version.rs +50 -50
  347. data/vendor/rb-sys/readme.md +36 -36
  348. data/vendor/rb-sys/src/bindings.rs +21 -21
  349. data/vendor/rb-sys/src/hidden.rs +11 -11
  350. data/vendor/rb-sys/src/lib.rs +35 -35
  351. data/vendor/rb-sys/src/macros.rs +371 -371
  352. data/vendor/rb-sys/src/memory.rs +53 -53
  353. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
  354. data/vendor/rb-sys/src/special_consts.rs +31 -31
  355. data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
  356. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
  357. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
  358. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
  359. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
  360. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
  361. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
  362. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
  363. data/vendor/rb-sys/src/stable_api.rs +260 -260
  364. data/vendor/rb-sys/src/symbol.rs +31 -31
  365. data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
  366. data/vendor/rb-sys/src/utils.rs +89 -89
  367. data/vendor/rb-sys/src/value_type.rs +7 -7
  368. metadata +73 -4
  369. data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +0 -12
@@ -1,1167 +1,1167 @@
1
- //! Generic cache implementation with lock poisoning recovery.
2
- //!
3
- //! # Lock Poisoning Handling
4
- //!
5
- //! This module uses `Arc<Mutex<T>>` for thread-safe state management and implements
6
- //! explicit lock poisoning recovery throughout all public methods:
7
- //!
8
- //! **What is lock poisoning?**
9
- //! - When a thread panics while holding a Mutex, the lock becomes "poisoned"
10
- //! - Rust marks the Mutex to indicate data may be in an inconsistent state
11
- //! - Subsequent lock attempts return `Err(PoisonError)` instead of acquiring the lock
12
- //!
13
- //! **Recovery strategy:**
14
- //! - All `.lock()` calls use `.map_err()` to convert `PoisonError` into `KreuzbergError::LockPoisoned`
15
- //! - The error propagates to callers via `Result` returns (never `.unwrap()` on locks)
16
- //! - Provides clear error messages indicating which mutex is poisoned
17
- //! - Follows CLAUDE.md requirement: "Lock poisoning must be handled - never `.unwrap()` on Mutex/RwLock"
18
- //!
19
- //! **Affected state:**
20
- //! - `processing_locks`: Tracks cache keys currently being processed (6 lock sites)
21
- //! - `deleting_files`: Prevents read-during-delete race conditions (3 lock sites)
22
- //!
23
- //! This approach ensures that lock poisoning (rare in practice) is surfaced to users
24
- //! rather than causing panics, maintaining system stability during concurrent operations.
25
-
26
- use crate::error::{KreuzbergError, Result};
27
- use ahash::AHasher;
28
- use serde::{Deserialize, Serialize};
29
- use std::collections::HashSet;
30
- use std::fs;
31
-
32
- /// Cache key hash format width (32 hex digits for u64 hash)
33
- const CACHE_KEY_HASH_WIDTH: usize = 32;
34
- use std::hash::{Hash, Hasher};
35
- use std::path::{Path, PathBuf};
36
- use std::sync::atomic::{AtomicUsize, Ordering};
37
- use std::sync::{Arc, Mutex};
38
- use std::time::{SystemTime, UNIX_EPOCH};
39
-
40
- #[derive(Debug, Clone, Serialize, Deserialize)]
41
- pub struct CacheStats {
42
- pub total_files: usize,
43
- pub total_size_mb: f64,
44
- pub available_space_mb: f64,
45
- pub oldest_file_age_days: f64,
46
- pub newest_file_age_days: f64,
47
- }
48
-
49
- #[derive(Debug, Clone)]
50
- struct CacheEntry {
51
- path: PathBuf,
52
- size: u64,
53
- modified: SystemTime,
54
- }
55
-
56
- struct CacheScanResult {
57
- stats: CacheStats,
58
- entries: Vec<CacheEntry>,
59
- }
60
-
61
- pub struct GenericCache {
62
- cache_dir: PathBuf,
63
- cache_type: String,
64
- max_age_days: f64,
65
- max_cache_size_mb: f64,
66
- min_free_space_mb: f64,
67
- processing_locks: Arc<Mutex<HashSet<String>>>,
68
- /// Tracks cache keys being deleted to prevent read-during-delete race conditions
69
- deleting_files: Arc<Mutex<HashSet<PathBuf>>>,
70
- /// Counter for triggering periodic cleanup (every 100 writes)
71
- write_counter: Arc<AtomicUsize>,
72
- }
73
-
74
- impl GenericCache {
75
- pub fn new(
76
- cache_type: String,
77
- cache_dir: Option<String>,
78
- max_age_days: f64,
79
- max_cache_size_mb: f64,
80
- min_free_space_mb: f64,
81
- ) -> Result<Self> {
82
- let cache_dir_path = if let Some(dir) = cache_dir {
83
- PathBuf::from(dir).join(&cache_type)
84
- } else {
85
- // OSError/RuntimeError must bubble up - system errors need user reports ~keep
86
- std::env::current_dir()?.join(".kreuzberg").join(&cache_type)
87
- };
88
-
89
- fs::create_dir_all(&cache_dir_path)
90
- .map_err(|e| KreuzbergError::cache(format!("Failed to create cache directory: {}", e)))?;
91
-
92
- Ok(Self {
93
- cache_dir: cache_dir_path,
94
- cache_type,
95
- max_age_days,
96
- max_cache_size_mb,
97
- min_free_space_mb,
98
- processing_locks: Arc::new(Mutex::new(HashSet::new())),
99
- deleting_files: Arc::new(Mutex::new(HashSet::new())),
100
- write_counter: Arc::new(AtomicUsize::new(0)),
101
- })
102
- }
103
-
104
- fn get_cache_path(&self, cache_key: &str) -> PathBuf {
105
- self.cache_dir.join(format!("{}.msgpack", cache_key))
106
- }
107
-
108
- fn get_metadata_path(&self, cache_key: &str) -> PathBuf {
109
- self.cache_dir.join(format!("{}.meta", cache_key))
110
- }
111
-
112
- fn is_valid(&self, cache_path: &Path, source_file: Option<&str>) -> bool {
113
- if !cache_path.exists() {
114
- return false;
115
- }
116
-
117
- if let Ok(metadata) = fs::metadata(cache_path)
118
- && let Ok(modified) = metadata.modified()
119
- && let Ok(elapsed) = SystemTime::now().duration_since(modified)
120
- {
121
- let age_days = elapsed.as_secs() as f64 / (24.0 * 3600.0);
122
- if age_days > self.max_age_days {
123
- return false;
124
- }
125
- }
126
-
127
- if let Some(source_path) = source_file {
128
- let Some(file_stem) = cache_path.file_stem().and_then(|s| s.to_str()) else {
129
- return false;
130
- };
131
- let meta_path = self.get_metadata_path(file_stem);
132
-
133
- if meta_path.exists() {
134
- if let Ok(meta_metadata) = fs::metadata(&meta_path)
135
- && meta_metadata.len() == 16
136
- && let Ok(cached_meta_bytes) = fs::read(&meta_path)
137
- {
138
- let cached_size = u64::from_le_bytes([
139
- cached_meta_bytes[0],
140
- cached_meta_bytes[1],
141
- cached_meta_bytes[2],
142
- cached_meta_bytes[3],
143
- cached_meta_bytes[4],
144
- cached_meta_bytes[5],
145
- cached_meta_bytes[6],
146
- cached_meta_bytes[7],
147
- ]);
148
- let cached_mtime = u64::from_le_bytes([
149
- cached_meta_bytes[8],
150
- cached_meta_bytes[9],
151
- cached_meta_bytes[10],
152
- cached_meta_bytes[11],
153
- cached_meta_bytes[12],
154
- cached_meta_bytes[13],
155
- cached_meta_bytes[14],
156
- cached_meta_bytes[15],
157
- ]);
158
-
159
- if let Ok(source_metadata) = fs::metadata(source_path) {
160
- let current_size = source_metadata.len();
161
- let Some(current_mtime) = source_metadata
162
- .modified()
163
- .ok()
164
- .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
165
- .map(|d| d.as_secs())
166
- else {
167
- return false;
168
- };
169
-
170
- return cached_size == current_size && cached_mtime == current_mtime;
171
- }
172
- }
173
- return false;
174
- }
175
- }
176
-
177
- true
178
- }
179
-
180
- fn save_metadata(&self, cache_key: &str, source_file: Option<&str>) {
181
- if let Some(source_path) = source_file
182
- && let Ok(metadata) = fs::metadata(source_path)
183
- {
184
- let size = metadata.len();
185
- let Some(mtime) = metadata
186
- .modified()
187
- .ok()
188
- .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
189
- .map(|d| d.as_secs())
190
- else {
191
- return;
192
- };
193
-
194
- let mut bytes = Vec::with_capacity(16);
195
- bytes.extend_from_slice(&size.to_le_bytes());
196
- bytes.extend_from_slice(&mtime.to_le_bytes());
197
-
198
- let meta_path = self.get_metadata_path(cache_key);
199
- // Cache metadata write failure - safe to ignore, cache is optional fallback ~keep
200
- let _ = fs::write(meta_path, bytes);
201
- }
202
- }
203
-
204
- #[cfg_attr(feature = "otel", tracing::instrument(
205
- skip(self),
206
- fields(
207
- cache.hit = tracing::field::Empty,
208
- cache.key = %cache_key,
209
- )
210
- ))]
211
- pub fn get(&self, cache_key: &str, source_file: Option<&str>) -> Result<Option<Vec<u8>>> {
212
- let cache_path = self.get_cache_path(cache_key);
213
-
214
- {
215
- let deleting = self
216
- .deleting_files
217
- .lock()
218
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
219
- if deleting.contains(&cache_path) {
220
- #[cfg(feature = "otel")]
221
- tracing::Span::current().record("cache.hit", false);
222
- return Ok(None);
223
- }
224
- }
225
-
226
- if !self.is_valid(&cache_path, source_file) {
227
- #[cfg(feature = "otel")]
228
- tracing::Span::current().record("cache.hit", false);
229
- return Ok(None);
230
- }
231
-
232
- match fs::read(&cache_path) {
233
- Ok(content) => {
234
- #[cfg(feature = "otel")]
235
- tracing::Span::current().record("cache.hit", true);
236
- Ok(Some(content))
237
- }
238
- Err(_) => {
239
- // Best-effort cleanup of corrupted cache files ~keep
240
- if let Err(e) = fs::remove_file(&cache_path) {
241
- tracing::debug!("Failed to remove corrupted cache file: {}", e);
242
- }
243
- if let Err(e) = fs::remove_file(self.get_metadata_path(cache_key)) {
244
- tracing::debug!("Failed to remove corrupted metadata file: {}", e);
245
- }
246
- #[cfg(feature = "otel")]
247
- tracing::Span::current().record("cache.hit", false);
248
- Ok(None)
249
- }
250
- }
251
- }
252
-
253
- #[cfg_attr(feature = "otel", tracing::instrument(
254
- skip(self, data),
255
- fields(
256
- cache.key = %cache_key,
257
- cache.size_bytes = data.len(),
258
- )
259
- ))]
260
- pub fn set(&self, cache_key: &str, data: Vec<u8>, source_file: Option<&str>) -> Result<()> {
261
- let cache_path = self.get_cache_path(cache_key);
262
-
263
- fs::write(&cache_path, &data)
264
- .map_err(|e| KreuzbergError::cache(format!("Failed to write cache file: {}", e)))?;
265
-
266
- self.save_metadata(cache_key, source_file);
267
-
268
- let count = self.write_counter.fetch_add(1, Ordering::Relaxed);
269
- if count.is_multiple_of(100)
270
- && let Some(cache_path_str) = self.cache_dir.to_str()
271
- {
272
- // Cache cleanup failure - safe to ignore, cache is optional fallback ~keep
273
- let _ = smart_cleanup_cache(
274
- cache_path_str,
275
- self.max_age_days,
276
- self.max_cache_size_mb,
277
- self.min_free_space_mb,
278
- );
279
- }
280
-
281
- Ok(())
282
- }
283
-
284
- pub fn is_processing(&self, cache_key: &str) -> Result<bool> {
285
- // OSError/RuntimeError must bubble up - system errors need user reports ~keep
286
- let locks = self
287
- .processing_locks
288
- .lock()
289
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
290
- Ok(locks.contains(cache_key))
291
- }
292
-
293
- pub fn mark_processing(&self, cache_key: String) -> Result<()> {
294
- // OSError/RuntimeError must bubble up - system errors need user reports ~keep
295
- let mut locks = self
296
- .processing_locks
297
- .lock()
298
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
299
- locks.insert(cache_key);
300
- Ok(())
301
- }
302
-
303
- pub fn mark_complete(&self, cache_key: &str) -> Result<()> {
304
- // OSError/RuntimeError must bubble up - system errors need user reports ~keep
305
- let mut locks = self
306
- .processing_locks
307
- .lock()
308
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
309
- locks.remove(cache_key);
310
- Ok(())
311
- }
312
-
313
- /// Mark a file path as being deleted to prevent concurrent reads.
314
- ///
315
- /// # TOCTOU Race Condition
316
- ///
317
- /// There is a Time-Of-Check-To-Time-Of-Use (TOCTOU) race condition between:
318
- /// 1. Iterating directory entries in `clear()` (getting path/metadata)
319
- /// 2. Marking the file for deletion here
320
- /// 3. Actually deleting the file
321
- ///
322
- /// **Race scenario:**
323
- /// - Thread A: Begins iterating in `clear()`, gets path
324
- /// - Thread B: Calls `get()`, checks `deleting_files` (not marked yet), proceeds
325
- /// - Thread A: Calls `mark_for_deletion()` here
326
- /// - Thread A: Deletes file with `fs::remove_file()`
327
- /// - Thread B: Tries to read file, but it's already deleted
328
- ///
329
- /// **Why this is acceptable:**
330
- /// - Cache operations are best-effort optimizations, not critical
331
- /// - `get()` already handles file read failures gracefully (treats as cache miss)
332
- /// - The worst case is a failed read → cache miss → recomputation
333
- /// - No data corruption or invariant violations occur
334
- /// - Alternative (atomic operation) would require complex locking impacting performance
335
- fn mark_for_deletion(&self, path: &Path) -> Result<()> {
336
- let mut deleting = self
337
- .deleting_files
338
- .lock()
339
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
340
- deleting.insert(path.to_path_buf());
341
- Ok(())
342
- }
343
-
344
- /// Remove a file path from the deletion set
345
- fn unmark_deletion(&self, path: &Path) -> Result<()> {
346
- let mut deleting = self
347
- .deleting_files
348
- .lock()
349
- .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
350
- deleting.remove(&path.to_path_buf());
351
- Ok(())
352
- }
353
-
354
- pub fn clear(&self) -> Result<(usize, f64)> {
355
- let dir_path = &self.cache_dir;
356
-
357
- if !dir_path.exists() {
358
- return Ok((0, 0.0));
359
- }
360
-
361
- let mut removed_count = 0;
362
- let mut removed_size = 0.0;
363
-
364
- let read_dir = fs::read_dir(dir_path)
365
- .map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
366
-
367
- for entry in read_dir {
368
- let entry = match entry {
369
- Ok(e) => e,
370
- Err(e) => {
371
- tracing::debug!("Error reading entry: {}", e);
372
- continue;
373
- }
374
- };
375
-
376
- let metadata = match entry.metadata() {
377
- Ok(m) if m.is_file() => m,
378
- _ => continue,
379
- };
380
-
381
- let path = entry.path();
382
- if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
383
- continue;
384
- }
385
-
386
- let size_mb = metadata.len() as f64 / (1024.0 * 1024.0);
387
-
388
- // Mark file for deletion to prevent concurrent access ~keep
389
- if let Err(e) = self.mark_for_deletion(&path) {
390
- tracing::debug!("Failed to mark file for deletion: {} (continuing anyway)", e);
391
- }
392
-
393
- match fs::remove_file(&path) {
394
- Ok(_) => {
395
- removed_count += 1;
396
- removed_size += size_mb;
397
- // Unmark after successful deletion ~keep
398
- if let Err(e) = self.unmark_deletion(&path) {
399
- tracing::debug!("Failed to unmark deleted file: {} (non-critical)", e);
400
- }
401
- }
402
- Err(e) => {
403
- tracing::debug!("Failed to remove {:?}: {}", path, e);
404
- // Unmark after failed deletion to allow retries ~keep
405
- if let Err(e) = self.unmark_deletion(&path) {
406
- tracing::debug!("Failed to unmark file after deletion error: {} (non-critical)", e);
407
- }
408
- }
409
- }
410
- }
411
-
412
- Ok((removed_count, removed_size))
413
- }
414
-
415
- pub fn get_stats(&self) -> Result<CacheStats> {
416
- let cache_path_str = self
417
- .cache_dir
418
- .to_str()
419
- .ok_or_else(|| KreuzbergError::validation("Cache directory path contains invalid UTF-8".to_string()))?;
420
- get_cache_metadata(cache_path_str)
421
- }
422
-
423
- pub fn cache_dir(&self) -> &Path {
424
- &self.cache_dir
425
- }
426
-
427
- pub fn cache_type(&self) -> &str {
428
- &self.cache_type
429
- }
430
- }
431
-
432
- /// Generate a deterministic cache key from configuration parameters.
433
- ///
434
- /// # Algorithm
435
- ///
436
- /// Uses ahash (non-cryptographic 64-bit hash) for performance. Cache keys are
437
- /// generated by:
438
- /// 1. Sorting key-value pairs by key (for determinism)
439
- /// 2. Concatenating as "key1=val1&key2=val2&..."
440
- /// 3. Hashing with ahash and formatting as 32-character hex
441
- ///
442
- /// # Collision Probability
443
- ///
444
- /// AHash produces 64-bit hashes, leading to birthday paradox collisions:
445
- /// - **~0.01%** probability at 1 million cache entries
446
- /// - **~1%** probability at 100 million entries
447
- /// - **~50%** probability at 4.3 billion (2^32) entries
448
- ///
449
- /// For context: P(collision) ≈ n^2 / (2 * 2^64) where n = number of entries.
450
- ///
451
- /// # Performance vs Security Trade-off
452
- ///
453
- /// - **ahash**: ~10x faster than SHA256, sufficient for cache keys
454
- /// - **SHA256**: Collision-resistant but overkill for caching
455
- /// - **Practical risk**: Low for typical usage (< 1M entries)
456
- ///
457
- /// # Impact of Collisions
458
- ///
459
- /// If two different configurations hash to the same key:
460
- /// - One configuration reads the other's cached data
461
- /// - Results in incorrect data served from cache
462
- /// - Detected via metadata validation (size/mtime checks)
463
- ///
464
- /// # Recommendations
465
- ///
466
- /// - **< 1M entries**: ahash is safe and fast
467
- /// - **> 100M entries**: Monitor cache size, consider periodic clearing
468
- /// - **Critical data**: If collision risk is unacceptable, add SHA256 option
469
- ///
470
- /// # Example
471
- ///
472
- /// ```rust
473
- /// use kreuzberg::cache::generate_cache_key;
474
- ///
475
- /// let parts = [("format", "pdf"), ("ocr", "true"), ("lang", "en")];
476
- /// let key = generate_cache_key(&parts);
477
- /// assert_eq!(key.len(), 32); // 64-bit hash as hex
478
- /// ```
479
- pub fn generate_cache_key(parts: &[(&str, &str)]) -> String {
480
- if parts.is_empty() {
481
- return "empty".to_string();
482
- }
483
-
484
- let mut sorted_parts: Vec<_> = parts.to_vec();
485
- sorted_parts.sort_by_key(|(k, _)| *k);
486
-
487
- let estimated_size = sorted_parts.iter().map(|(k, v)| k.len() + v.len() + 2).sum::<usize>();
488
- let mut cache_str = String::with_capacity(estimated_size);
489
-
490
- for (i, (key, val)) in sorted_parts.iter().enumerate() {
491
- if i > 0 {
492
- cache_str.push('&');
493
- }
494
- cache_str.push_str(&format!("{}={}", key, val));
495
- }
496
-
497
- let mut hasher = AHasher::default();
498
- cache_str.hash(&mut hasher);
499
- let hash = hasher.finish();
500
-
501
- format!("{:0width$x}", hash, width = CACHE_KEY_HASH_WIDTH)
502
- }
503
-
504
- #[allow(unsafe_code)]
505
- pub fn get_available_disk_space(path: &str) -> Result<f64> {
506
- #[cfg(unix)]
507
- {
508
- let path = Path::new(path);
509
- let check_path = if path.exists() {
510
- path
511
- } else if let Some(parent) = path.parent() {
512
- parent
513
- } else {
514
- Path::new("/")
515
- };
516
-
517
- use libc::{statvfs, statvfs as statvfs_struct};
518
- use std::ffi::CString;
519
-
520
- let path_str = check_path
521
- .to_str()
522
- .ok_or_else(|| KreuzbergError::validation("Path contains invalid UTF-8".to_string()))?;
523
- let c_path = CString::new(path_str).map_err(|e| KreuzbergError::validation(format!("Invalid path: {}", e)))?;
524
-
525
- let mut stat: statvfs_struct = unsafe { std::mem::zeroed() };
526
-
527
- let result = unsafe { statvfs(c_path.as_ptr(), &mut stat) };
528
-
529
- if result == 0 {
530
- #[allow(clippy::unnecessary_cast)]
531
- let available_bytes = stat.f_bavail as u64 * stat.f_frsize as u64;
532
- Ok(available_bytes as f64 / (1024.0 * 1024.0))
533
- } else {
534
- tracing::debug!("Failed to get disk stats for {}: errno {}", path_str, result);
535
- Ok(10000.0)
536
- }
537
- }
538
-
539
- #[cfg(not(unix))]
540
- {
541
- let _ = path;
542
- Ok(10000.0)
543
- }
544
- }
545
-
546
- fn scan_cache_directory(cache_dir: &str) -> Result<CacheScanResult> {
547
- let dir_path = Path::new(cache_dir);
548
-
549
- if !dir_path.exists() {
550
- return Ok(CacheScanResult {
551
- stats: CacheStats {
552
- total_files: 0,
553
- total_size_mb: 0.0,
554
- available_space_mb: get_available_disk_space(cache_dir)?,
555
- oldest_file_age_days: 0.0,
556
- newest_file_age_days: 0.0,
557
- },
558
- entries: Vec::new(),
559
- });
560
- }
561
-
562
- let current_time = SystemTime::now()
563
- .duration_since(UNIX_EPOCH)
564
- .unwrap_or_default()
565
- .as_secs() as f64;
566
-
567
- let read_dir =
568
- fs::read_dir(dir_path).map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
569
-
570
- let mut total_size = 0u64;
571
- let mut oldest_age = 0.0f64;
572
- let mut newest_age = f64::INFINITY;
573
- let mut entries = Vec::new();
574
-
575
- for entry in read_dir {
576
- let entry = match entry {
577
- Ok(e) => e,
578
- Err(e) => {
579
- tracing::debug!("Error reading cache entry: {}", e);
580
- continue;
581
- }
582
- };
583
-
584
- let metadata = match entry.metadata() {
585
- Ok(m) if m.is_file() => m,
586
- _ => continue,
587
- };
588
-
589
- let path = entry.path();
590
- if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
591
- continue;
592
- }
593
-
594
- let modified = match metadata.modified() {
595
- Ok(m) => m,
596
- Err(e) => {
597
- tracing::debug!("Error getting modification time for {:?}: {}", path, e);
598
- continue;
599
- }
600
- };
601
-
602
- let size = metadata.len();
603
- total_size += size;
604
-
605
- if let Ok(duration) = modified.duration_since(UNIX_EPOCH) {
606
- let age_days = (current_time - duration.as_secs() as f64) / (24.0 * 3600.0);
607
- oldest_age = oldest_age.max(age_days);
608
- newest_age = newest_age.min(age_days);
609
- }
610
-
611
- entries.push(CacheEntry { path, size, modified });
612
- }
613
-
614
- if entries.is_empty() {
615
- oldest_age = 0.0;
616
- newest_age = 0.0;
617
- }
618
-
619
- Ok(CacheScanResult {
620
- stats: CacheStats {
621
- total_files: entries.len(),
622
- total_size_mb: total_size as f64 / (1024.0 * 1024.0),
623
- available_space_mb: get_available_disk_space(cache_dir)?,
624
- oldest_file_age_days: oldest_age,
625
- newest_file_age_days: newest_age,
626
- },
627
- entries,
628
- })
629
- }
630
-
631
- pub fn get_cache_metadata(cache_dir: &str) -> Result<CacheStats> {
632
- let scan_result = scan_cache_directory(cache_dir)?;
633
- Ok(scan_result.stats)
634
- }
635
-
636
- pub fn cleanup_cache(
637
- cache_dir: &str,
638
- max_age_days: f64,
639
- max_size_mb: f64,
640
- target_size_ratio: f64,
641
- ) -> Result<(usize, f64)> {
642
- let scan_result = scan_cache_directory(cache_dir)?;
643
-
644
- if scan_result.entries.is_empty() {
645
- return Ok((0, 0.0));
646
- }
647
-
648
- let current_time = SystemTime::now()
649
- .duration_since(UNIX_EPOCH)
650
- .unwrap_or_default()
651
- .as_secs() as f64;
652
- let max_age_seconds = max_age_days * 24.0 * 3600.0;
653
-
654
- let mut removed_count = 0;
655
- let mut removed_size = 0.0;
656
- let mut remaining_entries = Vec::new();
657
- let mut total_remaining_size = 0u64;
658
-
659
- for entry in scan_result.entries {
660
- if let Ok(age) = entry.modified.duration_since(UNIX_EPOCH) {
661
- let age_seconds = current_time - age.as_secs() as f64;
662
- if age_seconds > max_age_seconds {
663
- match fs::remove_file(&entry.path) {
664
- Ok(_) => {
665
- removed_count += 1;
666
- removed_size += entry.size as f64 / (1024.0 * 1024.0);
667
- }
668
- Err(e) => {
669
- tracing::debug!("Failed to remove {:?}: {}", entry.path, e);
670
- }
671
- }
672
- } else {
673
- total_remaining_size += entry.size;
674
- remaining_entries.push(entry);
675
- }
676
- }
677
- }
678
-
679
- let mut total_size_mb = total_remaining_size as f64 / (1024.0 * 1024.0);
680
-
681
- if total_size_mb > max_size_mb {
682
- remaining_entries.sort_by_key(|e| e.modified);
683
-
684
- let target_size = max_size_mb * target_size_ratio;
685
-
686
- for entry in remaining_entries {
687
- if total_size_mb <= target_size {
688
- break;
689
- }
690
-
691
- match fs::remove_file(&entry.path) {
692
- Ok(_) => {
693
- let size_mb = entry.size as f64 / (1024.0 * 1024.0);
694
- removed_count += 1;
695
- removed_size += size_mb;
696
- total_size_mb -= size_mb;
697
- }
698
- Err(e) => {
699
- tracing::debug!("Failed to remove {:?}: {}", entry.path, e);
700
- }
701
- }
702
- }
703
- }
704
-
705
- Ok((removed_count, removed_size))
706
- }
707
-
708
- pub fn smart_cleanup_cache(
709
- cache_dir: &str,
710
- max_age_days: f64,
711
- max_size_mb: f64,
712
- min_free_space_mb: f64,
713
- ) -> Result<(usize, f64)> {
714
- let stats = get_cache_metadata(cache_dir)?;
715
-
716
- let needs_cleanup = stats.available_space_mb < min_free_space_mb
717
- || stats.total_size_mb > max_size_mb
718
- || stats.oldest_file_age_days > max_age_days;
719
-
720
- if !needs_cleanup {
721
- return Ok((0, 0.0));
722
- }
723
-
724
- let target_ratio = if stats.available_space_mb < min_free_space_mb {
725
- 0.5
726
- } else {
727
- 0.8
728
- };
729
-
730
- cleanup_cache(cache_dir, max_age_days, max_size_mb, target_ratio)
731
- }
732
-
733
- pub fn filter_old_cache_entries(cache_times: &[f64], current_time: f64, max_age_seconds: f64) -> Vec<usize> {
734
- cache_times
735
- .iter()
736
- .enumerate()
737
- .filter_map(|(idx, &time)| {
738
- if current_time - time > max_age_seconds {
739
- Some(idx)
740
- } else {
741
- None
742
- }
743
- })
744
- .collect()
745
- }
746
-
747
- pub fn sort_cache_by_access_time(mut entries: Vec<(String, f64)>) -> Vec<String> {
748
- entries.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
749
- entries.into_iter().map(|(key, _)| key).collect()
750
- }
751
-
752
- pub fn fast_hash(data: &[u8]) -> u64 {
753
- let mut hasher = AHasher::default();
754
- data.hash(&mut hasher);
755
- hasher.finish()
756
- }
757
-
758
- pub fn validate_cache_key(key: &str) -> bool {
759
- key.len() == 32 && key.chars().all(|c| c.is_ascii_hexdigit())
760
- }
761
-
762
- pub fn is_cache_valid(cache_path: &str, max_age_days: f64) -> bool {
763
- let path = Path::new(cache_path);
764
-
765
- if !path.exists() {
766
- return false;
767
- }
768
-
769
- match fs::metadata(path) {
770
- Ok(metadata) => match metadata.modified() {
771
- Ok(modified) => match SystemTime::now().duration_since(modified) {
772
- Ok(elapsed) => {
773
- let age_days = elapsed.as_secs() as f64 / (24.0 * 3600.0);
774
- age_days <= max_age_days
775
- }
776
- Err(_) => false,
777
- },
778
- Err(_) => false,
779
- },
780
- Err(_) => false,
781
- }
782
- }
783
-
784
- pub fn clear_cache_directory(cache_dir: &str) -> Result<(usize, f64)> {
785
- let dir_path = Path::new(cache_dir);
786
-
787
- if !dir_path.exists() {
788
- return Ok((0, 0.0));
789
- }
790
-
791
- let mut removed_count = 0;
792
- let mut removed_size = 0.0;
793
-
794
- let read_dir =
795
- fs::read_dir(dir_path).map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
796
-
797
- for entry in read_dir {
798
- let entry = match entry {
799
- Ok(e) => e,
800
- Err(e) => {
801
- tracing::debug!("Error reading entry: {}", e);
802
- continue;
803
- }
804
- };
805
-
806
- let metadata = match entry.metadata() {
807
- Ok(m) if m.is_file() => m,
808
- _ => continue,
809
- };
810
-
811
- let path = entry.path();
812
- if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
813
- continue;
814
- }
815
-
816
- let size_mb = metadata.len() as f64 / (1024.0 * 1024.0);
817
- match fs::remove_file(&path) {
818
- Ok(_) => {
819
- removed_count += 1;
820
- removed_size += size_mb;
821
- }
822
- Err(e) => {
823
- tracing::debug!("Failed to remove {:?}: {}", path, e);
824
- }
825
- }
826
- }
827
-
828
- Ok((removed_count, removed_size))
829
- }
830
-
831
- pub fn batch_cleanup_caches(
832
- cache_dirs: &[&str],
833
- max_age_days: f64,
834
- max_size_mb: f64,
835
- min_free_space_mb: f64,
836
- ) -> Result<Vec<(usize, f64)>> {
837
- cache_dirs
838
- .iter()
839
- .map(|dir| smart_cleanup_cache(dir, max_age_days, max_size_mb, min_free_space_mb))
840
- .collect()
841
- }
842
-
843
- #[cfg(test)]
844
- mod tests {
845
- use super::*;
846
- use std::fs::File;
847
- use tempfile::tempdir;
848
-
849
- #[test]
850
- fn test_generate_cache_key_empty() {
851
- let result = generate_cache_key(&[]);
852
- assert_eq!(result, "empty");
853
- }
854
-
855
- #[test]
856
- fn test_generate_cache_key_consistent() {
857
- let parts = [("key1", "value1"), ("key2", "value2")];
858
- let key1 = generate_cache_key(&parts);
859
- let key2 = generate_cache_key(&parts);
860
- assert_eq!(key1, key2);
861
- assert_eq!(key1.len(), 32);
862
- }
863
-
864
- #[test]
865
- fn test_validate_cache_key() {
866
- assert!(validate_cache_key("0123456789abcdef0123456789abcdef"));
867
- assert!(!validate_cache_key("invalid_key"));
868
- assert!(!validate_cache_key("0123456789abcdef"));
869
- assert!(!validate_cache_key("0123456789abcdef0123456789abcdef0"));
870
- }
871
-
872
- #[test]
873
- fn test_fast_hash() {
874
- let data1 = b"test data";
875
- let data2 = b"test data";
876
- let data3 = b"different data";
877
-
878
- assert_eq!(fast_hash(data1), fast_hash(data2));
879
- assert_ne!(fast_hash(data1), fast_hash(data3));
880
- }
881
-
882
- #[test]
883
- fn test_filter_old_cache_entries() {
884
- let cache_times = vec![100.0, 200.0, 300.0, 400.0];
885
- let current_time = 500.0;
886
- let max_age = 200.0;
887
-
888
- let old_indices = filter_old_cache_entries(&cache_times, current_time, max_age);
889
- assert_eq!(old_indices, vec![0, 1]);
890
- }
891
-
892
- #[test]
893
- fn test_sort_cache_by_access_time() {
894
- let entries = vec![
895
- ("key3".to_string(), 300.0),
896
- ("key1".to_string(), 100.0),
897
- ("key2".to_string(), 200.0),
898
- ];
899
-
900
- let sorted = sort_cache_by_access_time(entries);
901
- assert_eq!(sorted, vec!["key1", "key2", "key3"]);
902
- }
903
-
904
- #[test]
905
- fn test_sort_cache_with_nan() {
906
- let entries = vec![
907
- ("key1".to_string(), 100.0),
908
- ("key2".to_string(), f64::NAN),
909
- ("key3".to_string(), 200.0),
910
- ];
911
-
912
- let sorted = sort_cache_by_access_time(entries);
913
- assert_eq!(sorted.len(), 3);
914
- }
915
-
916
- #[test]
917
- fn test_cache_metadata() {
918
- let temp_dir = tempdir().unwrap();
919
- let cache_dir = temp_dir.path().to_str().unwrap();
920
-
921
- let file1 = temp_dir.path().join("test1.msgpack");
922
- let file2 = temp_dir.path().join("test2.msgpack");
923
- File::create(&file1).unwrap();
924
- File::create(&file2).unwrap();
925
-
926
- let stats = get_cache_metadata(cache_dir).unwrap();
927
- assert_eq!(stats.total_files, 2);
928
- assert!(stats.available_space_mb > 0.0);
929
- }
930
-
931
- #[test]
932
- fn test_cleanup_cache() {
933
- use std::io::Write;
934
-
935
- let temp_dir = tempdir().unwrap();
936
- let cache_dir = temp_dir.path().to_str().unwrap();
937
-
938
- let file1 = temp_dir.path().join("old.msgpack");
939
- let mut f = File::create(&file1).unwrap();
940
- f.write_all(b"test data for cleanup").unwrap();
941
- drop(f);
942
-
943
- let (removed_count, _) = cleanup_cache(cache_dir, 1000.0, 0.000001, 0.8).unwrap();
944
- assert_eq!(removed_count, 1);
945
- assert!(!file1.exists());
946
- }
947
-
948
- #[test]
949
- fn test_is_cache_valid() {
950
- let temp_dir = tempdir().unwrap();
951
- let file_path = temp_dir.path().join("test.msgpack");
952
- File::create(&file_path).unwrap();
953
-
954
- let path_str = file_path.to_str().unwrap();
955
-
956
- assert!(is_cache_valid(path_str, 1.0));
957
-
958
- assert!(!is_cache_valid("/nonexistent/path", 1.0));
959
- }
960
-
961
- #[test]
962
- fn test_generic_cache_new() {
963
- let temp_dir = tempdir().unwrap();
964
- let cache = GenericCache::new(
965
- "test".to_string(),
966
- Some(temp_dir.path().to_str().unwrap().to_string()),
967
- 30.0,
968
- 500.0,
969
- 1000.0,
970
- )
971
- .unwrap();
972
-
973
- assert_eq!(cache.cache_type, "test");
974
- assert!(cache.cache_dir.exists());
975
- }
976
-
977
- #[test]
978
- fn test_generic_cache_get_set() {
979
- let temp_dir = tempdir().unwrap();
980
- let cache = GenericCache::new(
981
- "test".to_string(),
982
- Some(temp_dir.path().to_str().unwrap().to_string()),
983
- 30.0,
984
- 500.0,
985
- 1000.0,
986
- )
987
- .unwrap();
988
-
989
- let cache_key = "test_key";
990
- let data = b"test data".to_vec();
991
-
992
- cache.set(cache_key, data.clone(), None).unwrap();
993
-
994
- let result = cache.get(cache_key, None).unwrap();
995
- assert_eq!(result, Some(data));
996
- }
997
-
998
- #[test]
999
- fn test_generic_cache_get_miss() {
1000
- let temp_dir = tempdir().unwrap();
1001
- let cache = GenericCache::new(
1002
- "test".to_string(),
1003
- Some(temp_dir.path().to_str().unwrap().to_string()),
1004
- 30.0,
1005
- 500.0,
1006
- 1000.0,
1007
- )
1008
- .unwrap();
1009
-
1010
- let result = cache.get("nonexistent", None).unwrap();
1011
- assert_eq!(result, None);
1012
- }
1013
-
1014
- #[test]
1015
- fn test_generic_cache_source_file_invalidation() {
1016
- use std::io::Write;
1017
- use std::thread::sleep;
1018
- use std::time::Duration;
1019
-
1020
- let temp_dir = tempdir().unwrap();
1021
- let cache = GenericCache::new(
1022
- "test".to_string(),
1023
- Some(temp_dir.path().to_str().unwrap().to_string()),
1024
- 30.0,
1025
- 500.0,
1026
- 1000.0,
1027
- )
1028
- .unwrap();
1029
-
1030
- let source_file = temp_dir.path().join("source.txt");
1031
- let mut f = File::create(&source_file).unwrap();
1032
- f.write_all(b"original content").unwrap();
1033
- drop(f);
1034
-
1035
- let cache_key = "test_key";
1036
- let data = b"cached data".to_vec();
1037
-
1038
- cache
1039
- .set(cache_key, data.clone(), Some(source_file.to_str().unwrap()))
1040
- .unwrap();
1041
-
1042
- let result = cache.get(cache_key, Some(source_file.to_str().unwrap())).unwrap();
1043
- assert_eq!(result, Some(data.clone()));
1044
-
1045
- sleep(Duration::from_millis(10));
1046
- let mut f = fs::OpenOptions::new()
1047
- .write(true)
1048
- .truncate(true)
1049
- .open(&source_file)
1050
- .unwrap();
1051
- f.write_all(b"modified content with different size").unwrap();
1052
- drop(f);
1053
-
1054
- let result = cache.get(cache_key, Some(source_file.to_str().unwrap())).unwrap();
1055
- assert_eq!(result, None);
1056
- }
1057
-
1058
- #[test]
1059
- fn test_generic_cache_processing_locks() {
1060
- let temp_dir = tempdir().unwrap();
1061
- let cache = GenericCache::new(
1062
- "test".to_string(),
1063
- Some(temp_dir.path().to_str().unwrap().to_string()),
1064
- 30.0,
1065
- 500.0,
1066
- 1000.0,
1067
- )
1068
- .unwrap();
1069
-
1070
- let cache_key = "test_key";
1071
-
1072
- assert!(!cache.is_processing(cache_key).unwrap());
1073
-
1074
- cache.mark_processing(cache_key.to_string()).unwrap();
1075
- assert!(cache.is_processing(cache_key).unwrap());
1076
-
1077
- cache.mark_complete(cache_key).unwrap();
1078
- assert!(!cache.is_processing(cache_key).unwrap());
1079
- }
1080
-
1081
- #[test]
1082
- fn test_generic_cache_clear() {
1083
- let temp_dir = tempdir().unwrap();
1084
- let cache = GenericCache::new(
1085
- "test".to_string(),
1086
- Some(temp_dir.path().to_str().unwrap().to_string()),
1087
- 30.0,
1088
- 500.0,
1089
- 1000.0,
1090
- )
1091
- .unwrap();
1092
-
1093
- cache.set("key1", b"data1".to_vec(), None).unwrap();
1094
- cache.set("key2", b"data2".to_vec(), None).unwrap();
1095
-
1096
- let (removed, _freed) = cache.clear().unwrap();
1097
- assert_eq!(removed, 2);
1098
-
1099
- assert_eq!(cache.get("key1", None).unwrap(), None);
1100
- assert_eq!(cache.get("key2", None).unwrap(), None);
1101
- }
1102
-
1103
- #[test]
1104
- fn test_generic_cache_stats() {
1105
- let temp_dir = tempdir().unwrap();
1106
- let cache = GenericCache::new(
1107
- "test".to_string(),
1108
- Some(temp_dir.path().to_str().unwrap().to_string()),
1109
- 30.0,
1110
- 500.0,
1111
- 1000.0,
1112
- )
1113
- .unwrap();
1114
-
1115
- cache.set("key1", b"test data 1".to_vec(), None).unwrap();
1116
- cache.set("key2", b"test data 2".to_vec(), None).unwrap();
1117
-
1118
- let stats = cache.get_stats().unwrap();
1119
- assert_eq!(stats.total_files, 2);
1120
- assert!(stats.total_size_mb > 0.0);
1121
- assert!(stats.available_space_mb > 0.0);
1122
- }
1123
-
1124
- #[test]
1125
- fn test_generic_cache_expired_entry() {
1126
- use std::io::Write;
1127
-
1128
- let temp_dir = tempdir().unwrap();
1129
- let cache = GenericCache::new(
1130
- "test".to_string(),
1131
- Some(temp_dir.path().to_str().unwrap().to_string()),
1132
- 0.000001,
1133
- 500.0,
1134
- 1000.0,
1135
- )
1136
- .unwrap();
1137
-
1138
- let cache_key = "test_key";
1139
-
1140
- let cache_path = cache.cache_dir.join(format!("{}.msgpack", cache_key));
1141
- let mut f = File::create(&cache_path).unwrap();
1142
- f.write_all(b"test data").unwrap();
1143
- drop(f);
1144
-
1145
- let old_time = SystemTime::now() - std::time::Duration::from_secs(60);
1146
- filetime::set_file_mtime(&cache_path, filetime::FileTime::from_system_time(old_time)).unwrap();
1147
-
1148
- let result = cache.get(cache_key, None).unwrap();
1149
- assert_eq!(result, None);
1150
- }
1151
-
1152
- #[test]
1153
- fn test_generic_cache_properties() {
1154
- let temp_dir = tempdir().unwrap();
1155
- let cache = GenericCache::new(
1156
- "test".to_string(),
1157
- Some(temp_dir.path().to_str().unwrap().to_string()),
1158
- 30.0,
1159
- 500.0,
1160
- 1000.0,
1161
- )
1162
- .unwrap();
1163
-
1164
- assert_eq!(cache.cache_type(), "test");
1165
- assert!(cache.cache_dir().to_string_lossy().contains("test"));
1166
- }
1167
- }
1
+ //! Generic cache implementation with lock poisoning recovery.
2
+ //!
3
+ //! # Lock Poisoning Handling
4
+ //!
5
+ //! This module uses `Arc<Mutex<T>>` for thread-safe state management and implements
6
+ //! explicit lock poisoning recovery throughout all public methods:
7
+ //!
8
+ //! **What is lock poisoning?**
9
+ //! - When a thread panics while holding a Mutex, the lock becomes "poisoned"
10
+ //! - Rust marks the Mutex to indicate data may be in an inconsistent state
11
+ //! - Subsequent lock attempts return `Err(PoisonError)` instead of acquiring the lock
12
+ //!
13
+ //! **Recovery strategy:**
14
+ //! - All `.lock()` calls use `.map_err()` to convert `PoisonError` into `KreuzbergError::LockPoisoned`
15
+ //! - The error propagates to callers via `Result` returns (never `.unwrap()` on locks)
16
+ //! - Provides clear error messages indicating which mutex is poisoned
17
+ //! - Follows CLAUDE.md requirement: "Lock poisoning must be handled - never `.unwrap()` on Mutex/RwLock"
18
+ //!
19
+ //! **Affected state:**
20
+ //! - `processing_locks`: Tracks cache keys currently being processed (6 lock sites)
21
+ //! - `deleting_files`: Prevents read-during-delete race conditions (3 lock sites)
22
+ //!
23
+ //! This approach ensures that lock poisoning (rare in practice) is surfaced to users
24
+ //! rather than causing panics, maintaining system stability during concurrent operations.
25
+
26
+ use crate::error::{KreuzbergError, Result};
27
+ use ahash::AHasher;
28
+ use serde::{Deserialize, Serialize};
29
+ use std::collections::HashSet;
30
+ use std::fs;
31
+
32
+ /// Cache key hash format width (32 hex digits for u64 hash)
33
+ const CACHE_KEY_HASH_WIDTH: usize = 32;
34
+ use std::hash::{Hash, Hasher};
35
+ use std::path::{Path, PathBuf};
36
+ use std::sync::atomic::{AtomicUsize, Ordering};
37
+ use std::sync::{Arc, Mutex};
38
+ use std::time::{SystemTime, UNIX_EPOCH};
39
+
40
+ #[derive(Debug, Clone, Serialize, Deserialize)]
41
+ pub struct CacheStats {
42
+ pub total_files: usize,
43
+ pub total_size_mb: f64,
44
+ pub available_space_mb: f64,
45
+ pub oldest_file_age_days: f64,
46
+ pub newest_file_age_days: f64,
47
+ }
48
+
49
+ #[derive(Debug, Clone)]
50
+ struct CacheEntry {
51
+ path: PathBuf,
52
+ size: u64,
53
+ modified: SystemTime,
54
+ }
55
+
56
+ struct CacheScanResult {
57
+ stats: CacheStats,
58
+ entries: Vec<CacheEntry>,
59
+ }
60
+
61
+ pub struct GenericCache {
62
+ cache_dir: PathBuf,
63
+ cache_type: String,
64
+ max_age_days: f64,
65
+ max_cache_size_mb: f64,
66
+ min_free_space_mb: f64,
67
+ processing_locks: Arc<Mutex<HashSet<String>>>,
68
+ /// Tracks cache keys being deleted to prevent read-during-delete race conditions
69
+ deleting_files: Arc<Mutex<HashSet<PathBuf>>>,
70
+ /// Counter for triggering periodic cleanup (every 100 writes)
71
+ write_counter: Arc<AtomicUsize>,
72
+ }
73
+
74
+ impl GenericCache {
75
+ pub fn new(
76
+ cache_type: String,
77
+ cache_dir: Option<String>,
78
+ max_age_days: f64,
79
+ max_cache_size_mb: f64,
80
+ min_free_space_mb: f64,
81
+ ) -> Result<Self> {
82
+ let cache_dir_path = if let Some(dir) = cache_dir {
83
+ PathBuf::from(dir).join(&cache_type)
84
+ } else {
85
+ // OSError/RuntimeError must bubble up - system errors need user reports ~keep
86
+ std::env::current_dir()?.join(".kreuzberg").join(&cache_type)
87
+ };
88
+
89
+ fs::create_dir_all(&cache_dir_path)
90
+ .map_err(|e| KreuzbergError::cache(format!("Failed to create cache directory: {}", e)))?;
91
+
92
+ Ok(Self {
93
+ cache_dir: cache_dir_path,
94
+ cache_type,
95
+ max_age_days,
96
+ max_cache_size_mb,
97
+ min_free_space_mb,
98
+ processing_locks: Arc::new(Mutex::new(HashSet::new())),
99
+ deleting_files: Arc::new(Mutex::new(HashSet::new())),
100
+ write_counter: Arc::new(AtomicUsize::new(0)),
101
+ })
102
+ }
103
+
104
+ fn get_cache_path(&self, cache_key: &str) -> PathBuf {
105
+ self.cache_dir.join(format!("{}.msgpack", cache_key))
106
+ }
107
+
108
+ fn get_metadata_path(&self, cache_key: &str) -> PathBuf {
109
+ self.cache_dir.join(format!("{}.meta", cache_key))
110
+ }
111
+
112
+ fn is_valid(&self, cache_path: &Path, source_file: Option<&str>) -> bool {
113
+ if !cache_path.exists() {
114
+ return false;
115
+ }
116
+
117
+ if let Ok(metadata) = fs::metadata(cache_path)
118
+ && let Ok(modified) = metadata.modified()
119
+ && let Ok(elapsed) = SystemTime::now().duration_since(modified)
120
+ {
121
+ let age_days = elapsed.as_secs() as f64 / (24.0 * 3600.0);
122
+ if age_days > self.max_age_days {
123
+ return false;
124
+ }
125
+ }
126
+
127
+ if let Some(source_path) = source_file {
128
+ let Some(file_stem) = cache_path.file_stem().and_then(|s| s.to_str()) else {
129
+ return false;
130
+ };
131
+ let meta_path = self.get_metadata_path(file_stem);
132
+
133
+ if meta_path.exists() {
134
+ if let Ok(meta_metadata) = fs::metadata(&meta_path)
135
+ && meta_metadata.len() == 16
136
+ && let Ok(cached_meta_bytes) = fs::read(&meta_path)
137
+ {
138
+ let cached_size = u64::from_le_bytes([
139
+ cached_meta_bytes[0],
140
+ cached_meta_bytes[1],
141
+ cached_meta_bytes[2],
142
+ cached_meta_bytes[3],
143
+ cached_meta_bytes[4],
144
+ cached_meta_bytes[5],
145
+ cached_meta_bytes[6],
146
+ cached_meta_bytes[7],
147
+ ]);
148
+ let cached_mtime = u64::from_le_bytes([
149
+ cached_meta_bytes[8],
150
+ cached_meta_bytes[9],
151
+ cached_meta_bytes[10],
152
+ cached_meta_bytes[11],
153
+ cached_meta_bytes[12],
154
+ cached_meta_bytes[13],
155
+ cached_meta_bytes[14],
156
+ cached_meta_bytes[15],
157
+ ]);
158
+
159
+ if let Ok(source_metadata) = fs::metadata(source_path) {
160
+ let current_size = source_metadata.len();
161
+ let Some(current_mtime) = source_metadata
162
+ .modified()
163
+ .ok()
164
+ .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
165
+ .map(|d| d.as_secs())
166
+ else {
167
+ return false;
168
+ };
169
+
170
+ return cached_size == current_size && cached_mtime == current_mtime;
171
+ }
172
+ }
173
+ return false;
174
+ }
175
+ }
176
+
177
+ true
178
+ }
179
+
180
+ fn save_metadata(&self, cache_key: &str, source_file: Option<&str>) {
181
+ if let Some(source_path) = source_file
182
+ && let Ok(metadata) = fs::metadata(source_path)
183
+ {
184
+ let size = metadata.len();
185
+ let Some(mtime) = metadata
186
+ .modified()
187
+ .ok()
188
+ .and_then(|t| t.duration_since(UNIX_EPOCH).ok())
189
+ .map(|d| d.as_secs())
190
+ else {
191
+ return;
192
+ };
193
+
194
+ let mut bytes = Vec::with_capacity(16);
195
+ bytes.extend_from_slice(&size.to_le_bytes());
196
+ bytes.extend_from_slice(&mtime.to_le_bytes());
197
+
198
+ let meta_path = self.get_metadata_path(cache_key);
199
+ // Cache metadata write failure - safe to ignore, cache is optional fallback ~keep
200
+ let _ = fs::write(meta_path, bytes);
201
+ }
202
+ }
203
+
204
+ #[cfg_attr(feature = "otel", tracing::instrument(
205
+ skip(self),
206
+ fields(
207
+ cache.hit = tracing::field::Empty,
208
+ cache.key = %cache_key,
209
+ )
210
+ ))]
211
+ pub fn get(&self, cache_key: &str, source_file: Option<&str>) -> Result<Option<Vec<u8>>> {
212
+ let cache_path = self.get_cache_path(cache_key);
213
+
214
+ {
215
+ let deleting = self
216
+ .deleting_files
217
+ .lock()
218
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
219
+ if deleting.contains(&cache_path) {
220
+ #[cfg(feature = "otel")]
221
+ tracing::Span::current().record("cache.hit", false);
222
+ return Ok(None);
223
+ }
224
+ }
225
+
226
+ if !self.is_valid(&cache_path, source_file) {
227
+ #[cfg(feature = "otel")]
228
+ tracing::Span::current().record("cache.hit", false);
229
+ return Ok(None);
230
+ }
231
+
232
+ match fs::read(&cache_path) {
233
+ Ok(content) => {
234
+ #[cfg(feature = "otel")]
235
+ tracing::Span::current().record("cache.hit", true);
236
+ Ok(Some(content))
237
+ }
238
+ Err(_) => {
239
+ // Best-effort cleanup of corrupted cache files ~keep
240
+ if let Err(e) = fs::remove_file(&cache_path) {
241
+ tracing::debug!("Failed to remove corrupted cache file: {}", e);
242
+ }
243
+ if let Err(e) = fs::remove_file(self.get_metadata_path(cache_key)) {
244
+ tracing::debug!("Failed to remove corrupted metadata file: {}", e);
245
+ }
246
+ #[cfg(feature = "otel")]
247
+ tracing::Span::current().record("cache.hit", false);
248
+ Ok(None)
249
+ }
250
+ }
251
+ }
252
+
253
+ #[cfg_attr(feature = "otel", tracing::instrument(
254
+ skip(self, data),
255
+ fields(
256
+ cache.key = %cache_key,
257
+ cache.size_bytes = data.len(),
258
+ )
259
+ ))]
260
+ pub fn set(&self, cache_key: &str, data: Vec<u8>, source_file: Option<&str>) -> Result<()> {
261
+ let cache_path = self.get_cache_path(cache_key);
262
+
263
+ fs::write(&cache_path, &data)
264
+ .map_err(|e| KreuzbergError::cache(format!("Failed to write cache file: {}", e)))?;
265
+
266
+ self.save_metadata(cache_key, source_file);
267
+
268
+ let count = self.write_counter.fetch_add(1, Ordering::Relaxed);
269
+ if count.is_multiple_of(100)
270
+ && let Some(cache_path_str) = self.cache_dir.to_str()
271
+ {
272
+ // Cache cleanup failure - safe to ignore, cache is optional fallback ~keep
273
+ let _ = smart_cleanup_cache(
274
+ cache_path_str,
275
+ self.max_age_days,
276
+ self.max_cache_size_mb,
277
+ self.min_free_space_mb,
278
+ );
279
+ }
280
+
281
+ Ok(())
282
+ }
283
+
284
+ pub fn is_processing(&self, cache_key: &str) -> Result<bool> {
285
+ // OSError/RuntimeError must bubble up - system errors need user reports ~keep
286
+ let locks = self
287
+ .processing_locks
288
+ .lock()
289
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
290
+ Ok(locks.contains(cache_key))
291
+ }
292
+
293
+ pub fn mark_processing(&self, cache_key: String) -> Result<()> {
294
+ // OSError/RuntimeError must bubble up - system errors need user reports ~keep
295
+ let mut locks = self
296
+ .processing_locks
297
+ .lock()
298
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
299
+ locks.insert(cache_key);
300
+ Ok(())
301
+ }
302
+
303
+ pub fn mark_complete(&self, cache_key: &str) -> Result<()> {
304
+ // OSError/RuntimeError must bubble up - system errors need user reports ~keep
305
+ let mut locks = self
306
+ .processing_locks
307
+ .lock()
308
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Processing locks mutex poisoned: {}", e)))?;
309
+ locks.remove(cache_key);
310
+ Ok(())
311
+ }
312
+
313
+ /// Mark a file path as being deleted to prevent concurrent reads.
314
+ ///
315
+ /// # TOCTOU Race Condition
316
+ ///
317
+ /// There is a Time-Of-Check-To-Time-Of-Use (TOCTOU) race condition between:
318
+ /// 1. Iterating directory entries in `clear()` (getting path/metadata)
319
+ /// 2. Marking the file for deletion here
320
+ /// 3. Actually deleting the file
321
+ ///
322
+ /// **Race scenario:**
323
+ /// - Thread A: Begins iterating in `clear()`, gets path
324
+ /// - Thread B: Calls `get()`, checks `deleting_files` (not marked yet), proceeds
325
+ /// - Thread A: Calls `mark_for_deletion()` here
326
+ /// - Thread A: Deletes file with `fs::remove_file()`
327
+ /// - Thread B: Tries to read file, but it's already deleted
328
+ ///
329
+ /// **Why this is acceptable:**
330
+ /// - Cache operations are best-effort optimizations, not critical
331
+ /// - `get()` already handles file read failures gracefully (treats as cache miss)
332
+ /// - The worst case is a failed read → cache miss → recomputation
333
+ /// - No data corruption or invariant violations occur
334
+ /// - Alternative (atomic operation) would require complex locking impacting performance
335
+ fn mark_for_deletion(&self, path: &Path) -> Result<()> {
336
+ let mut deleting = self
337
+ .deleting_files
338
+ .lock()
339
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
340
+ deleting.insert(path.to_path_buf());
341
+ Ok(())
342
+ }
343
+
344
+ /// Remove a file path from the deletion set
345
+ fn unmark_deletion(&self, path: &Path) -> Result<()> {
346
+ let mut deleting = self
347
+ .deleting_files
348
+ .lock()
349
+ .map_err(|e| KreuzbergError::LockPoisoned(format!("Deleting files mutex poisoned: {}", e)))?;
350
+ deleting.remove(&path.to_path_buf());
351
+ Ok(())
352
+ }
353
+
354
+ pub fn clear(&self) -> Result<(usize, f64)> {
355
+ let dir_path = &self.cache_dir;
356
+
357
+ if !dir_path.exists() {
358
+ return Ok((0, 0.0));
359
+ }
360
+
361
+ let mut removed_count = 0;
362
+ let mut removed_size = 0.0;
363
+
364
+ let read_dir = fs::read_dir(dir_path)
365
+ .map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
366
+
367
+ for entry in read_dir {
368
+ let entry = match entry {
369
+ Ok(e) => e,
370
+ Err(e) => {
371
+ tracing::debug!("Error reading entry: {}", e);
372
+ continue;
373
+ }
374
+ };
375
+
376
+ let metadata = match entry.metadata() {
377
+ Ok(m) if m.is_file() => m,
378
+ _ => continue,
379
+ };
380
+
381
+ let path = entry.path();
382
+ if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
383
+ continue;
384
+ }
385
+
386
+ let size_mb = metadata.len() as f64 / (1024.0 * 1024.0);
387
+
388
+ // Mark file for deletion to prevent concurrent access ~keep
389
+ if let Err(e) = self.mark_for_deletion(&path) {
390
+ tracing::debug!("Failed to mark file for deletion: {} (continuing anyway)", e);
391
+ }
392
+
393
+ match fs::remove_file(&path) {
394
+ Ok(_) => {
395
+ removed_count += 1;
396
+ removed_size += size_mb;
397
+ // Unmark after successful deletion ~keep
398
+ if let Err(e) = self.unmark_deletion(&path) {
399
+ tracing::debug!("Failed to unmark deleted file: {} (non-critical)", e);
400
+ }
401
+ }
402
+ Err(e) => {
403
+ tracing::debug!("Failed to remove {:?}: {}", path, e);
404
+ // Unmark after failed deletion to allow retries ~keep
405
+ if let Err(e) = self.unmark_deletion(&path) {
406
+ tracing::debug!("Failed to unmark file after deletion error: {} (non-critical)", e);
407
+ }
408
+ }
409
+ }
410
+ }
411
+
412
+ Ok((removed_count, removed_size))
413
+ }
414
+
415
+ pub fn get_stats(&self) -> Result<CacheStats> {
416
+ let cache_path_str = self
417
+ .cache_dir
418
+ .to_str()
419
+ .ok_or_else(|| KreuzbergError::validation("Cache directory path contains invalid UTF-8".to_string()))?;
420
+ get_cache_metadata(cache_path_str)
421
+ }
422
+
423
+ pub fn cache_dir(&self) -> &Path {
424
+ &self.cache_dir
425
+ }
426
+
427
+ pub fn cache_type(&self) -> &str {
428
+ &self.cache_type
429
+ }
430
+ }
431
+
432
+ /// Generate a deterministic cache key from configuration parameters.
433
+ ///
434
+ /// # Algorithm
435
+ ///
436
+ /// Uses ahash (non-cryptographic 64-bit hash) for performance. Cache keys are
437
+ /// generated by:
438
+ /// 1. Sorting key-value pairs by key (for determinism)
439
+ /// 2. Concatenating as "key1=val1&key2=val2&..."
440
+ /// 3. Hashing with ahash and formatting as 32-character hex
441
+ ///
442
+ /// # Collision Probability
443
+ ///
444
+ /// AHash produces 64-bit hashes, leading to birthday paradox collisions:
445
+ /// - **~0.01%** probability at 1 million cache entries
446
+ /// - **~1%** probability at 100 million entries
447
+ /// - **~50%** probability at 4.3 billion (2^32) entries
448
+ ///
449
+ /// For context: P(collision) ≈ n^2 / (2 * 2^64) where n = number of entries.
450
+ ///
451
+ /// # Performance vs Security Trade-off
452
+ ///
453
+ /// - **ahash**: ~10x faster than SHA256, sufficient for cache keys
454
+ /// - **SHA256**: Collision-resistant but overkill for caching
455
+ /// - **Practical risk**: Low for typical usage (< 1M entries)
456
+ ///
457
+ /// # Impact of Collisions
458
+ ///
459
+ /// If two different configurations hash to the same key:
460
+ /// - One configuration reads the other's cached data
461
+ /// - Results in incorrect data served from cache
462
+ /// - Detected via metadata validation (size/mtime checks)
463
+ ///
464
+ /// # Recommendations
465
+ ///
466
+ /// - **< 1M entries**: ahash is safe and fast
467
+ /// - **> 100M entries**: Monitor cache size, consider periodic clearing
468
+ /// - **Critical data**: If collision risk is unacceptable, add SHA256 option
469
+ ///
470
+ /// # Example
471
+ ///
472
+ /// ```rust
473
+ /// use kreuzberg::cache::generate_cache_key;
474
+ ///
475
+ /// let parts = [("format", "pdf"), ("ocr", "true"), ("lang", "en")];
476
+ /// let key = generate_cache_key(&parts);
477
+ /// assert_eq!(key.len(), 32); // 64-bit hash as hex
478
+ /// ```
479
+ pub fn generate_cache_key(parts: &[(&str, &str)]) -> String {
480
+ if parts.is_empty() {
481
+ return "empty".to_string();
482
+ }
483
+
484
+ let mut sorted_parts: Vec<_> = parts.to_vec();
485
+ sorted_parts.sort_by_key(|(k, _)| *k);
486
+
487
+ let estimated_size = sorted_parts.iter().map(|(k, v)| k.len() + v.len() + 2).sum::<usize>();
488
+ let mut cache_str = String::with_capacity(estimated_size);
489
+
490
+ for (i, (key, val)) in sorted_parts.iter().enumerate() {
491
+ if i > 0 {
492
+ cache_str.push('&');
493
+ }
494
+ cache_str.push_str(&format!("{}={}", key, val));
495
+ }
496
+
497
+ let mut hasher = AHasher::default();
498
+ cache_str.hash(&mut hasher);
499
+ let hash = hasher.finish();
500
+
501
+ format!("{:0width$x}", hash, width = CACHE_KEY_HASH_WIDTH)
502
+ }
503
+
504
+ #[allow(unsafe_code)]
505
+ pub fn get_available_disk_space(path: &str) -> Result<f64> {
506
+ #[cfg(unix)]
507
+ {
508
+ let path = Path::new(path);
509
+ let check_path = if path.exists() {
510
+ path
511
+ } else if let Some(parent) = path.parent() {
512
+ parent
513
+ } else {
514
+ Path::new("/")
515
+ };
516
+
517
+ use libc::{statvfs, statvfs as statvfs_struct};
518
+ use std::ffi::CString;
519
+
520
+ let path_str = check_path
521
+ .to_str()
522
+ .ok_or_else(|| KreuzbergError::validation("Path contains invalid UTF-8".to_string()))?;
523
+ let c_path = CString::new(path_str).map_err(|e| KreuzbergError::validation(format!("Invalid path: {}", e)))?;
524
+
525
+ let mut stat: statvfs_struct = unsafe { std::mem::zeroed() };
526
+
527
+ let result = unsafe { statvfs(c_path.as_ptr(), &mut stat) };
528
+
529
+ if result == 0 {
530
+ #[allow(clippy::unnecessary_cast)]
531
+ let available_bytes = stat.f_bavail as u64 * stat.f_frsize as u64;
532
+ Ok(available_bytes as f64 / (1024.0 * 1024.0))
533
+ } else {
534
+ tracing::debug!("Failed to get disk stats for {}: errno {}", path_str, result);
535
+ Ok(10000.0)
536
+ }
537
+ }
538
+
539
+ #[cfg(not(unix))]
540
+ {
541
+ let _ = path;
542
+ Ok(10000.0)
543
+ }
544
+ }
545
+
546
+ fn scan_cache_directory(cache_dir: &str) -> Result<CacheScanResult> {
547
+ let dir_path = Path::new(cache_dir);
548
+
549
+ if !dir_path.exists() {
550
+ return Ok(CacheScanResult {
551
+ stats: CacheStats {
552
+ total_files: 0,
553
+ total_size_mb: 0.0,
554
+ available_space_mb: get_available_disk_space(cache_dir)?,
555
+ oldest_file_age_days: 0.0,
556
+ newest_file_age_days: 0.0,
557
+ },
558
+ entries: Vec::new(),
559
+ });
560
+ }
561
+
562
+ let current_time = SystemTime::now()
563
+ .duration_since(UNIX_EPOCH)
564
+ .unwrap_or_default()
565
+ .as_secs() as f64;
566
+
567
+ let read_dir =
568
+ fs::read_dir(dir_path).map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
569
+
570
+ let mut total_size = 0u64;
571
+ let mut oldest_age = 0.0f64;
572
+ let mut newest_age = f64::INFINITY;
573
+ let mut entries = Vec::new();
574
+
575
+ for entry in read_dir {
576
+ let entry = match entry {
577
+ Ok(e) => e,
578
+ Err(e) => {
579
+ tracing::debug!("Error reading cache entry: {}", e);
580
+ continue;
581
+ }
582
+ };
583
+
584
+ let metadata = match entry.metadata() {
585
+ Ok(m) if m.is_file() => m,
586
+ _ => continue,
587
+ };
588
+
589
+ let path = entry.path();
590
+ if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
591
+ continue;
592
+ }
593
+
594
+ let modified = match metadata.modified() {
595
+ Ok(m) => m,
596
+ Err(e) => {
597
+ tracing::debug!("Error getting modification time for {:?}: {}", path, e);
598
+ continue;
599
+ }
600
+ };
601
+
602
+ let size = metadata.len();
603
+ total_size += size;
604
+
605
+ if let Ok(duration) = modified.duration_since(UNIX_EPOCH) {
606
+ let age_days = (current_time - duration.as_secs() as f64) / (24.0 * 3600.0);
607
+ oldest_age = oldest_age.max(age_days);
608
+ newest_age = newest_age.min(age_days);
609
+ }
610
+
611
+ entries.push(CacheEntry { path, size, modified });
612
+ }
613
+
614
+ if entries.is_empty() {
615
+ oldest_age = 0.0;
616
+ newest_age = 0.0;
617
+ }
618
+
619
+ Ok(CacheScanResult {
620
+ stats: CacheStats {
621
+ total_files: entries.len(),
622
+ total_size_mb: total_size as f64 / (1024.0 * 1024.0),
623
+ available_space_mb: get_available_disk_space(cache_dir)?,
624
+ oldest_file_age_days: oldest_age,
625
+ newest_file_age_days: newest_age,
626
+ },
627
+ entries,
628
+ })
629
+ }
630
+
631
+ pub fn get_cache_metadata(cache_dir: &str) -> Result<CacheStats> {
632
+ let scan_result = scan_cache_directory(cache_dir)?;
633
+ Ok(scan_result.stats)
634
+ }
635
+
636
+ pub fn cleanup_cache(
637
+ cache_dir: &str,
638
+ max_age_days: f64,
639
+ max_size_mb: f64,
640
+ target_size_ratio: f64,
641
+ ) -> Result<(usize, f64)> {
642
+ let scan_result = scan_cache_directory(cache_dir)?;
643
+
644
+ if scan_result.entries.is_empty() {
645
+ return Ok((0, 0.0));
646
+ }
647
+
648
+ let current_time = SystemTime::now()
649
+ .duration_since(UNIX_EPOCH)
650
+ .unwrap_or_default()
651
+ .as_secs() as f64;
652
+ let max_age_seconds = max_age_days * 24.0 * 3600.0;
653
+
654
+ let mut removed_count = 0;
655
+ let mut removed_size = 0.0;
656
+ let mut remaining_entries = Vec::new();
657
+ let mut total_remaining_size = 0u64;
658
+
659
+ for entry in scan_result.entries {
660
+ if let Ok(age) = entry.modified.duration_since(UNIX_EPOCH) {
661
+ let age_seconds = current_time - age.as_secs() as f64;
662
+ if age_seconds > max_age_seconds {
663
+ match fs::remove_file(&entry.path) {
664
+ Ok(_) => {
665
+ removed_count += 1;
666
+ removed_size += entry.size as f64 / (1024.0 * 1024.0);
667
+ }
668
+ Err(e) => {
669
+ tracing::debug!("Failed to remove {:?}: {}", entry.path, e);
670
+ }
671
+ }
672
+ } else {
673
+ total_remaining_size += entry.size;
674
+ remaining_entries.push(entry);
675
+ }
676
+ }
677
+ }
678
+
679
+ let mut total_size_mb = total_remaining_size as f64 / (1024.0 * 1024.0);
680
+
681
+ if total_size_mb > max_size_mb {
682
+ remaining_entries.sort_by_key(|e| e.modified);
683
+
684
+ let target_size = max_size_mb * target_size_ratio;
685
+
686
+ for entry in remaining_entries {
687
+ if total_size_mb <= target_size {
688
+ break;
689
+ }
690
+
691
+ match fs::remove_file(&entry.path) {
692
+ Ok(_) => {
693
+ let size_mb = entry.size as f64 / (1024.0 * 1024.0);
694
+ removed_count += 1;
695
+ removed_size += size_mb;
696
+ total_size_mb -= size_mb;
697
+ }
698
+ Err(e) => {
699
+ tracing::debug!("Failed to remove {:?}: {}", entry.path, e);
700
+ }
701
+ }
702
+ }
703
+ }
704
+
705
+ Ok((removed_count, removed_size))
706
+ }
707
+
708
+ pub fn smart_cleanup_cache(
709
+ cache_dir: &str,
710
+ max_age_days: f64,
711
+ max_size_mb: f64,
712
+ min_free_space_mb: f64,
713
+ ) -> Result<(usize, f64)> {
714
+ let stats = get_cache_metadata(cache_dir)?;
715
+
716
+ let needs_cleanup = stats.available_space_mb < min_free_space_mb
717
+ || stats.total_size_mb > max_size_mb
718
+ || stats.oldest_file_age_days > max_age_days;
719
+
720
+ if !needs_cleanup {
721
+ return Ok((0, 0.0));
722
+ }
723
+
724
+ let target_ratio = if stats.available_space_mb < min_free_space_mb {
725
+ 0.5
726
+ } else {
727
+ 0.8
728
+ };
729
+
730
+ cleanup_cache(cache_dir, max_age_days, max_size_mb, target_ratio)
731
+ }
732
+
733
+ pub fn filter_old_cache_entries(cache_times: &[f64], current_time: f64, max_age_seconds: f64) -> Vec<usize> {
734
+ cache_times
735
+ .iter()
736
+ .enumerate()
737
+ .filter_map(|(idx, &time)| {
738
+ if current_time - time > max_age_seconds {
739
+ Some(idx)
740
+ } else {
741
+ None
742
+ }
743
+ })
744
+ .collect()
745
+ }
746
+
747
+ pub fn sort_cache_by_access_time(mut entries: Vec<(String, f64)>) -> Vec<String> {
748
+ entries.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
749
+ entries.into_iter().map(|(key, _)| key).collect()
750
+ }
751
+
752
+ pub fn fast_hash(data: &[u8]) -> u64 {
753
+ let mut hasher = AHasher::default();
754
+ data.hash(&mut hasher);
755
+ hasher.finish()
756
+ }
757
+
758
+ pub fn validate_cache_key(key: &str) -> bool {
759
+ key.len() == 32 && key.chars().all(|c| c.is_ascii_hexdigit())
760
+ }
761
+
762
+ pub fn is_cache_valid(cache_path: &str, max_age_days: f64) -> bool {
763
+ let path = Path::new(cache_path);
764
+
765
+ if !path.exists() {
766
+ return false;
767
+ }
768
+
769
+ match fs::metadata(path) {
770
+ Ok(metadata) => match metadata.modified() {
771
+ Ok(modified) => match SystemTime::now().duration_since(modified) {
772
+ Ok(elapsed) => {
773
+ let age_days = elapsed.as_secs() as f64 / (24.0 * 3600.0);
774
+ age_days <= max_age_days
775
+ }
776
+ Err(_) => false,
777
+ },
778
+ Err(_) => false,
779
+ },
780
+ Err(_) => false,
781
+ }
782
+ }
783
+
784
+ pub fn clear_cache_directory(cache_dir: &str) -> Result<(usize, f64)> {
785
+ let dir_path = Path::new(cache_dir);
786
+
787
+ if !dir_path.exists() {
788
+ return Ok((0, 0.0));
789
+ }
790
+
791
+ let mut removed_count = 0;
792
+ let mut removed_size = 0.0;
793
+
794
+ let read_dir =
795
+ fs::read_dir(dir_path).map_err(|e| KreuzbergError::cache(format!("Failed to read cache directory: {}", e)))?;
796
+
797
+ for entry in read_dir {
798
+ let entry = match entry {
799
+ Ok(e) => e,
800
+ Err(e) => {
801
+ tracing::debug!("Error reading entry: {}", e);
802
+ continue;
803
+ }
804
+ };
805
+
806
+ let metadata = match entry.metadata() {
807
+ Ok(m) if m.is_file() => m,
808
+ _ => continue,
809
+ };
810
+
811
+ let path = entry.path();
812
+ if path.extension().and_then(|s| s.to_str()) != Some("msgpack") {
813
+ continue;
814
+ }
815
+
816
+ let size_mb = metadata.len() as f64 / (1024.0 * 1024.0);
817
+ match fs::remove_file(&path) {
818
+ Ok(_) => {
819
+ removed_count += 1;
820
+ removed_size += size_mb;
821
+ }
822
+ Err(e) => {
823
+ tracing::debug!("Failed to remove {:?}: {}", path, e);
824
+ }
825
+ }
826
+ }
827
+
828
+ Ok((removed_count, removed_size))
829
+ }
830
+
831
+ pub fn batch_cleanup_caches(
832
+ cache_dirs: &[&str],
833
+ max_age_days: f64,
834
+ max_size_mb: f64,
835
+ min_free_space_mb: f64,
836
+ ) -> Result<Vec<(usize, f64)>> {
837
+ cache_dirs
838
+ .iter()
839
+ .map(|dir| smart_cleanup_cache(dir, max_age_days, max_size_mb, min_free_space_mb))
840
+ .collect()
841
+ }
842
+
843
+ #[cfg(test)]
844
+ mod tests {
845
+ use super::*;
846
+ use std::fs::File;
847
+ use tempfile::tempdir;
848
+
849
+ #[test]
850
+ fn test_generate_cache_key_empty() {
851
+ let result = generate_cache_key(&[]);
852
+ assert_eq!(result, "empty");
853
+ }
854
+
855
+ #[test]
856
+ fn test_generate_cache_key_consistent() {
857
+ let parts = [("key1", "value1"), ("key2", "value2")];
858
+ let key1 = generate_cache_key(&parts);
859
+ let key2 = generate_cache_key(&parts);
860
+ assert_eq!(key1, key2);
861
+ assert_eq!(key1.len(), 32);
862
+ }
863
+
864
+ #[test]
865
+ fn test_validate_cache_key() {
866
+ assert!(validate_cache_key("0123456789abcdef0123456789abcdef"));
867
+ assert!(!validate_cache_key("invalid_key"));
868
+ assert!(!validate_cache_key("0123456789abcdef"));
869
+ assert!(!validate_cache_key("0123456789abcdef0123456789abcdef0"));
870
+ }
871
+
872
+ #[test]
873
+ fn test_fast_hash() {
874
+ let data1 = b"test data";
875
+ let data2 = b"test data";
876
+ let data3 = b"different data";
877
+
878
+ assert_eq!(fast_hash(data1), fast_hash(data2));
879
+ assert_ne!(fast_hash(data1), fast_hash(data3));
880
+ }
881
+
882
+ #[test]
883
+ fn test_filter_old_cache_entries() {
884
+ let cache_times = vec![100.0, 200.0, 300.0, 400.0];
885
+ let current_time = 500.0;
886
+ let max_age = 200.0;
887
+
888
+ let old_indices = filter_old_cache_entries(&cache_times, current_time, max_age);
889
+ assert_eq!(old_indices, vec![0, 1]);
890
+ }
891
+
892
+ #[test]
893
+ fn test_sort_cache_by_access_time() {
894
+ let entries = vec![
895
+ ("key3".to_string(), 300.0),
896
+ ("key1".to_string(), 100.0),
897
+ ("key2".to_string(), 200.0),
898
+ ];
899
+
900
+ let sorted = sort_cache_by_access_time(entries);
901
+ assert_eq!(sorted, vec!["key1", "key2", "key3"]);
902
+ }
903
+
904
+ #[test]
905
+ fn test_sort_cache_with_nan() {
906
+ let entries = vec![
907
+ ("key1".to_string(), 100.0),
908
+ ("key2".to_string(), f64::NAN),
909
+ ("key3".to_string(), 200.0),
910
+ ];
911
+
912
+ let sorted = sort_cache_by_access_time(entries);
913
+ assert_eq!(sorted.len(), 3);
914
+ }
915
+
916
+ #[test]
917
+ fn test_cache_metadata() {
918
+ let temp_dir = tempdir().unwrap();
919
+ let cache_dir = temp_dir.path().to_str().unwrap();
920
+
921
+ let file1 = temp_dir.path().join("test1.msgpack");
922
+ let file2 = temp_dir.path().join("test2.msgpack");
923
+ File::create(&file1).unwrap();
924
+ File::create(&file2).unwrap();
925
+
926
+ let stats = get_cache_metadata(cache_dir).unwrap();
927
+ assert_eq!(stats.total_files, 2);
928
+ assert!(stats.available_space_mb > 0.0);
929
+ }
930
+
931
+ #[test]
932
+ fn test_cleanup_cache() {
933
+ use std::io::Write;
934
+
935
+ let temp_dir = tempdir().unwrap();
936
+ let cache_dir = temp_dir.path().to_str().unwrap();
937
+
938
+ let file1 = temp_dir.path().join("old.msgpack");
939
+ let mut f = File::create(&file1).unwrap();
940
+ f.write_all(b"test data for cleanup").unwrap();
941
+ drop(f);
942
+
943
+ let (removed_count, _) = cleanup_cache(cache_dir, 1000.0, 0.000001, 0.8).unwrap();
944
+ assert_eq!(removed_count, 1);
945
+ assert!(!file1.exists());
946
+ }
947
+
948
+ #[test]
949
+ fn test_is_cache_valid() {
950
+ let temp_dir = tempdir().unwrap();
951
+ let file_path = temp_dir.path().join("test.msgpack");
952
+ File::create(&file_path).unwrap();
953
+
954
+ let path_str = file_path.to_str().unwrap();
955
+
956
+ assert!(is_cache_valid(path_str, 1.0));
957
+
958
+ assert!(!is_cache_valid("/nonexistent/path", 1.0));
959
+ }
960
+
961
+ #[test]
962
+ fn test_generic_cache_new() {
963
+ let temp_dir = tempdir().unwrap();
964
+ let cache = GenericCache::new(
965
+ "test".to_string(),
966
+ Some(temp_dir.path().to_str().unwrap().to_string()),
967
+ 30.0,
968
+ 500.0,
969
+ 1000.0,
970
+ )
971
+ .unwrap();
972
+
973
+ assert_eq!(cache.cache_type, "test");
974
+ assert!(cache.cache_dir.exists());
975
+ }
976
+
977
+ #[test]
978
+ fn test_generic_cache_get_set() {
979
+ let temp_dir = tempdir().unwrap();
980
+ let cache = GenericCache::new(
981
+ "test".to_string(),
982
+ Some(temp_dir.path().to_str().unwrap().to_string()),
983
+ 30.0,
984
+ 500.0,
985
+ 1000.0,
986
+ )
987
+ .unwrap();
988
+
989
+ let cache_key = "test_key";
990
+ let data = b"test data".to_vec();
991
+
992
+ cache.set(cache_key, data.clone(), None).unwrap();
993
+
994
+ let result = cache.get(cache_key, None).unwrap();
995
+ assert_eq!(result, Some(data));
996
+ }
997
+
998
+ #[test]
999
+ fn test_generic_cache_get_miss() {
1000
+ let temp_dir = tempdir().unwrap();
1001
+ let cache = GenericCache::new(
1002
+ "test".to_string(),
1003
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1004
+ 30.0,
1005
+ 500.0,
1006
+ 1000.0,
1007
+ )
1008
+ .unwrap();
1009
+
1010
+ let result = cache.get("nonexistent", None).unwrap();
1011
+ assert_eq!(result, None);
1012
+ }
1013
+
1014
+ #[test]
1015
+ fn test_generic_cache_source_file_invalidation() {
1016
+ use std::io::Write;
1017
+ use std::thread::sleep;
1018
+ use std::time::Duration;
1019
+
1020
+ let temp_dir = tempdir().unwrap();
1021
+ let cache = GenericCache::new(
1022
+ "test".to_string(),
1023
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1024
+ 30.0,
1025
+ 500.0,
1026
+ 1000.0,
1027
+ )
1028
+ .unwrap();
1029
+
1030
+ let source_file = temp_dir.path().join("source.txt");
1031
+ let mut f = File::create(&source_file).unwrap();
1032
+ f.write_all(b"original content").unwrap();
1033
+ drop(f);
1034
+
1035
+ let cache_key = "test_key";
1036
+ let data = b"cached data".to_vec();
1037
+
1038
+ cache
1039
+ .set(cache_key, data.clone(), Some(source_file.to_str().unwrap()))
1040
+ .unwrap();
1041
+
1042
+ let result = cache.get(cache_key, Some(source_file.to_str().unwrap())).unwrap();
1043
+ assert_eq!(result, Some(data.clone()));
1044
+
1045
+ sleep(Duration::from_millis(10));
1046
+ let mut f = fs::OpenOptions::new()
1047
+ .write(true)
1048
+ .truncate(true)
1049
+ .open(&source_file)
1050
+ .unwrap();
1051
+ f.write_all(b"modified content with different size").unwrap();
1052
+ drop(f);
1053
+
1054
+ let result = cache.get(cache_key, Some(source_file.to_str().unwrap())).unwrap();
1055
+ assert_eq!(result, None);
1056
+ }
1057
+
1058
+ #[test]
1059
+ fn test_generic_cache_processing_locks() {
1060
+ let temp_dir = tempdir().unwrap();
1061
+ let cache = GenericCache::new(
1062
+ "test".to_string(),
1063
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1064
+ 30.0,
1065
+ 500.0,
1066
+ 1000.0,
1067
+ )
1068
+ .unwrap();
1069
+
1070
+ let cache_key = "test_key";
1071
+
1072
+ assert!(!cache.is_processing(cache_key).unwrap());
1073
+
1074
+ cache.mark_processing(cache_key.to_string()).unwrap();
1075
+ assert!(cache.is_processing(cache_key).unwrap());
1076
+
1077
+ cache.mark_complete(cache_key).unwrap();
1078
+ assert!(!cache.is_processing(cache_key).unwrap());
1079
+ }
1080
+
1081
+ #[test]
1082
+ fn test_generic_cache_clear() {
1083
+ let temp_dir = tempdir().unwrap();
1084
+ let cache = GenericCache::new(
1085
+ "test".to_string(),
1086
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1087
+ 30.0,
1088
+ 500.0,
1089
+ 1000.0,
1090
+ )
1091
+ .unwrap();
1092
+
1093
+ cache.set("key1", b"data1".to_vec(), None).unwrap();
1094
+ cache.set("key2", b"data2".to_vec(), None).unwrap();
1095
+
1096
+ let (removed, _freed) = cache.clear().unwrap();
1097
+ assert_eq!(removed, 2);
1098
+
1099
+ assert_eq!(cache.get("key1", None).unwrap(), None);
1100
+ assert_eq!(cache.get("key2", None).unwrap(), None);
1101
+ }
1102
+
1103
+ #[test]
1104
+ fn test_generic_cache_stats() {
1105
+ let temp_dir = tempdir().unwrap();
1106
+ let cache = GenericCache::new(
1107
+ "test".to_string(),
1108
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1109
+ 30.0,
1110
+ 500.0,
1111
+ 1000.0,
1112
+ )
1113
+ .unwrap();
1114
+
1115
+ cache.set("key1", b"test data 1".to_vec(), None).unwrap();
1116
+ cache.set("key2", b"test data 2".to_vec(), None).unwrap();
1117
+
1118
+ let stats = cache.get_stats().unwrap();
1119
+ assert_eq!(stats.total_files, 2);
1120
+ assert!(stats.total_size_mb > 0.0);
1121
+ assert!(stats.available_space_mb > 0.0);
1122
+ }
1123
+
1124
+ #[test]
1125
+ fn test_generic_cache_expired_entry() {
1126
+ use std::io::Write;
1127
+
1128
+ let temp_dir = tempdir().unwrap();
1129
+ let cache = GenericCache::new(
1130
+ "test".to_string(),
1131
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1132
+ 0.000001,
1133
+ 500.0,
1134
+ 1000.0,
1135
+ )
1136
+ .unwrap();
1137
+
1138
+ let cache_key = "test_key";
1139
+
1140
+ let cache_path = cache.cache_dir.join(format!("{}.msgpack", cache_key));
1141
+ let mut f = File::create(&cache_path).unwrap();
1142
+ f.write_all(b"test data").unwrap();
1143
+ drop(f);
1144
+
1145
+ let old_time = SystemTime::now() - std::time::Duration::from_secs(60);
1146
+ filetime::set_file_mtime(&cache_path, filetime::FileTime::from_system_time(old_time)).unwrap();
1147
+
1148
+ let result = cache.get(cache_key, None).unwrap();
1149
+ assert_eq!(result, None);
1150
+ }
1151
+
1152
+ #[test]
1153
+ fn test_generic_cache_properties() {
1154
+ let temp_dir = tempdir().unwrap();
1155
+ let cache = GenericCache::new(
1156
+ "test".to_string(),
1157
+ Some(temp_dir.path().to_str().unwrap().to_string()),
1158
+ 30.0,
1159
+ 500.0,
1160
+ 1000.0,
1161
+ )
1162
+ .unwrap();
1163
+
1164
+ assert_eq!(cache.cache_type(), "test");
1165
+ assert!(cache.cache_dir().to_string_lossy().contains("test"));
1166
+ }
1167
+ }