kreuzberg 4.0.0.pre.rc.13 → 4.0.0.pre.rc.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (369) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -14
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -1
  5. data/.rubocop.yml +538 -538
  6. data/Gemfile +8 -8
  7. data/Gemfile.lock +105 -2
  8. data/README.md +454 -454
  9. data/Rakefile +33 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -341
  12. data/ext/kreuzberg_rb/extconf.rb +45 -45
  13. data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
  14. data/ext/kreuzberg_rb/native/Cargo.lock +6940 -6941
  15. data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
  16. data/ext/kreuzberg_rb/native/README.md +425 -425
  17. data/ext/kreuzberg_rb/native/build.rs +15 -15
  18. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  19. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  20. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  21. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  22. data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
  23. data/extconf.rb +28 -28
  24. data/kreuzberg.gemspec +214 -214
  25. data/lib/kreuzberg/api_proxy.rb +142 -142
  26. data/lib/kreuzberg/cache_api.rb +81 -81
  27. data/lib/kreuzberg/cli.rb +55 -55
  28. data/lib/kreuzberg/cli_proxy.rb +127 -127
  29. data/lib/kreuzberg/config.rb +724 -724
  30. data/lib/kreuzberg/error_context.rb +80 -80
  31. data/lib/kreuzberg/errors.rb +118 -118
  32. data/lib/kreuzberg/extraction_api.rb +340 -340
  33. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  34. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  35. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  36. data/lib/kreuzberg/result.rb +279 -279
  37. data/lib/kreuzberg/setup_lib_path.rb +80 -80
  38. data/lib/kreuzberg/validator_protocol.rb +89 -89
  39. data/lib/kreuzberg/version.rb +5 -5
  40. data/lib/kreuzberg.rb +109 -109
  41. data/lib/{pdfium.dll → libpdfium.dylib} +0 -0
  42. data/sig/kreuzberg/internal.rbs +184 -184
  43. data/sig/kreuzberg.rbs +546 -546
  44. data/spec/binding/cache_spec.rb +227 -227
  45. data/spec/binding/cli_proxy_spec.rb +85 -85
  46. data/spec/binding/cli_spec.rb +55 -55
  47. data/spec/binding/config_spec.rb +345 -345
  48. data/spec/binding/config_validation_spec.rb +283 -283
  49. data/spec/binding/error_handling_spec.rb +213 -213
  50. data/spec/binding/errors_spec.rb +66 -66
  51. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  52. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  53. data/spec/binding/plugins/validator_spec.rb +274 -274
  54. data/spec/fixtures/config.toml +39 -39
  55. data/spec/fixtures/config.yaml +41 -41
  56. data/spec/fixtures/invalid_config.toml +4 -4
  57. data/spec/smoke/package_spec.rb +178 -178
  58. data/spec/spec_helper.rb +42 -42
  59. data/vendor/Cargo.toml +1 -1
  60. data/vendor/kreuzberg/Cargo.toml +5 -5
  61. data/vendor/kreuzberg/README.md +230 -230
  62. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
  63. data/vendor/kreuzberg/build.rs +843 -843
  64. data/vendor/kreuzberg/src/api/error.rs +81 -81
  65. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  66. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  67. data/vendor/kreuzberg/src/api/server.rs +353 -353
  68. data/vendor/kreuzberg/src/api/types.rs +170 -170
  69. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
  70. data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
  71. data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
  72. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
  73. data/vendor/kreuzberg/src/core/config.rs +1080 -1080
  74. data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
  75. data/vendor/kreuzberg/src/core/io.rs +329 -329
  76. data/vendor/kreuzberg/src/core/mime.rs +605 -605
  77. data/vendor/kreuzberg/src/core/mod.rs +47 -47
  78. data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
  79. data/vendor/kreuzberg/src/embeddings.rs +500 -500
  80. data/vendor/kreuzberg/src/error.rs +431 -431
  81. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  82. data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
  83. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  84. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  85. data/vendor/kreuzberg/src/extraction/html.rs +601 -601
  86. data/vendor/kreuzberg/src/extraction/image.rs +491 -491
  87. data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -574
  88. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
  89. data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
  90. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  91. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  92. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  93. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
  94. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
  95. data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
  96. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  97. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  98. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  99. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  100. data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
  101. data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
  102. data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
  103. data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
  104. data/vendor/kreuzberg/src/extractors/email.rs +157 -157
  105. data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
  106. data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
  107. data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
  108. data/vendor/kreuzberg/src/extractors/html.rs +407 -407
  109. data/vendor/kreuzberg/src/extractors/image.rs +219 -219
  110. data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
  111. data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
  112. data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
  113. data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
  114. data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
  115. data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
  116. data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
  117. data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
  118. data/vendor/kreuzberg/src/extractors/pdf.rs +749 -749
  119. data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
  120. data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
  121. data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
  122. data/vendor/kreuzberg/src/extractors/security.rs +484 -484
  123. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
  124. data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
  125. data/vendor/kreuzberg/src/extractors/text.rs +265 -265
  126. data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
  127. data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
  128. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  129. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  130. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  131. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  132. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  133. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  134. data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
  135. data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
  136. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  137. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  138. data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
  139. data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
  140. data/vendor/kreuzberg/src/lib.rs +113 -113
  141. data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
  142. data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
  143. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  144. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  145. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  146. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  147. data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
  148. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  149. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  150. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
  151. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  152. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  153. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  154. data/vendor/kreuzberg/src/panic_context.rs +154 -154
  155. data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
  156. data/vendor/kreuzberg/src/pdf/bundled.rs +346 -346
  157. data/vendor/kreuzberg/src/pdf/error.rs +130 -130
  158. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  159. data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
  160. data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
  161. data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
  162. data/vendor/kreuzberg/src/pdf/table.rs +420 -420
  163. data/vendor/kreuzberg/src/pdf/text.rs +240 -240
  164. data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
  165. data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
  166. data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
  167. data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
  168. data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
  169. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  170. data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
  171. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  172. data/vendor/kreuzberg/src/text/mod.rs +25 -25
  173. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  174. data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
  175. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  176. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  177. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  178. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  179. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  180. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  181. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  182. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  183. data/vendor/kreuzberg/src/types.rs +1055 -1055
  184. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  185. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  186. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  187. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  188. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  189. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  190. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  191. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  192. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  193. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  194. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  195. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  196. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  198. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  199. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  200. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  201. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  202. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  203. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  204. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  205. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  206. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  207. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  208. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  209. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  210. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  211. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  212. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  213. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  214. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  215. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  216. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  217. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  218. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  219. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  220. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  221. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  222. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  223. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  224. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  225. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  226. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  227. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  228. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  229. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  230. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  231. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  232. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  233. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  234. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  235. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  236. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  237. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  238. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  239. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  240. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  241. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  242. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  243. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  244. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  245. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  246. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  247. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  248. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  249. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  250. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  251. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
  252. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  253. data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
  254. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
  255. data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
  256. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
  257. data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
  258. data/vendor/kreuzberg/tests/config_features.rs +612 -612
  259. data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
  260. data/vendor/kreuzberg/tests/core_integration.rs +510 -510
  261. data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
  262. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
  263. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
  264. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
  265. data/vendor/kreuzberg/tests/email_integration.rs +327 -327
  266. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
  267. data/vendor/kreuzberg/tests/error_handling.rs +402 -402
  268. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
  269. data/vendor/kreuzberg/tests/format_integration.rs +164 -164
  270. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  271. data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
  272. data/vendor/kreuzberg/tests/image_integration.rs +255 -255
  273. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
  274. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
  275. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
  276. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  277. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  278. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
  279. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
  280. data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
  281. data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
  282. data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
  283. data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
  284. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  285. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
  286. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
  287. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
  288. data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
  289. data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
  290. data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
  291. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
  292. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
  293. data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
  294. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  295. data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
  296. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
  297. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
  298. data/vendor/kreuzberg/tests/security_validation.rs +416 -416
  299. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  300. data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
  301. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
  302. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
  303. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  304. data/vendor/kreuzberg-ffi/Cargo.toml +1 -1
  305. data/vendor/kreuzberg-ffi/README.md +851 -851
  306. data/vendor/kreuzberg-ffi/build.rs +176 -176
  307. data/vendor/kreuzberg-ffi/cbindgen.toml +27 -27
  308. data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -12
  309. data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -1087
  310. data/vendor/kreuzberg-ffi/src/lib.rs +3616 -3616
  311. data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -247
  312. data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -48
  313. data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -299
  314. data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -346
  315. data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -232
  316. data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -470
  317. data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
  318. data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
  319. data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
  320. data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
  321. data/vendor/kreuzberg-tesseract/LICENSE +22 -22
  322. data/vendor/kreuzberg-tesseract/README.md +399 -399
  323. data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
  324. data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
  325. data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
  326. data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
  327. data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
  328. data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
  329. data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
  330. data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
  331. data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
  332. data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
  333. data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
  334. data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
  335. data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
  336. data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
  337. data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
  338. data/vendor/rb-sys/Cargo.lock +393 -393
  339. data/vendor/rb-sys/Cargo.toml +70 -70
  340. data/vendor/rb-sys/Cargo.toml.orig +57 -57
  341. data/vendor/rb-sys/LICENSE-APACHE +190 -190
  342. data/vendor/rb-sys/LICENSE-MIT +21 -21
  343. data/vendor/rb-sys/build/features.rs +111 -111
  344. data/vendor/rb-sys/build/main.rs +286 -286
  345. data/vendor/rb-sys/build/stable_api_config.rs +155 -155
  346. data/vendor/rb-sys/build/version.rs +50 -50
  347. data/vendor/rb-sys/readme.md +36 -36
  348. data/vendor/rb-sys/src/bindings.rs +21 -21
  349. data/vendor/rb-sys/src/hidden.rs +11 -11
  350. data/vendor/rb-sys/src/lib.rs +35 -35
  351. data/vendor/rb-sys/src/macros.rs +371 -371
  352. data/vendor/rb-sys/src/memory.rs +53 -53
  353. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
  354. data/vendor/rb-sys/src/special_consts.rs +31 -31
  355. data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
  356. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
  357. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
  358. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
  359. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
  360. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
  361. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
  362. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
  363. data/vendor/rb-sys/src/stable_api.rs +260 -260
  364. data/vendor/rb-sys/src/symbol.rs +31 -31
  365. data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
  366. data/vendor/rb-sys/src/utils.rs +89 -89
  367. data/vendor/rb-sys/src/value_type.rs +7 -7
  368. metadata +73 -4
  369. data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +0 -12
@@ -1,959 +1,959 @@
1
- use ahash::AHashMap;
2
- use memchr::{memchr, memchr3};
3
- use once_cell::sync::Lazy;
4
- use regex::Regex;
5
- use std::borrow::Cow;
6
-
7
- // ============================================================================
8
- // ============================================================================
9
-
10
- const OCR_PENALTY_WEIGHT: f64 = 0.3;
11
- const SCRIPT_PENALTY_WEIGHT: f64 = 0.2;
12
- const NAV_PENALTY_WEIGHT: f64 = 0.1;
13
- const STRUCTURE_BONUS_WEIGHT: f64 = 0.2;
14
- const METADATA_BONUS_WEIGHT: f64 = 0.1;
15
-
16
- const MIN_TEXT_LENGTH: usize = 10;
17
- const LARGE_TEXT_LENGTH: usize = 1000;
18
- const MIN_SENTENCE_WORDS: f64 = 10.0;
19
- const MAX_SENTENCE_WORDS: f64 = 30.0;
20
- const MIN_PARAGRAPH_WORDS: f64 = 50.0;
21
- const MAX_PARAGRAPH_WORDS: f64 = 300.0;
22
-
23
- static SCATTERED_CHARS_PATTERN: Lazy<Regex> = Lazy::new(|| {
24
- Regex::new(r"\b[a-zA-Z]\s{2,}[a-zA-Z]\s{2,}[a-zA-Z]\b")
25
- .expect("Scattered chars regex pattern is valid and should compile")
26
- });
27
- static REPEATED_PUNCT_PATTERN: Lazy<Regex> = Lazy::new(|| {
28
- Regex::new(r"[.]{3,}|[_]{3,}").expect("Repeated punctuation regex pattern is valid and should compile")
29
- });
30
- static DASH_PATTERN: Lazy<Regex> =
31
- Lazy::new(|| Regex::new(r"[-]{3,}").expect("Dash pattern regex is valid and should compile"));
32
- static ISOLATED_PUNCT_PATTERN: Lazy<Regex> =
33
- Lazy::new(|| Regex::new(r"\s[.,;:!?]\s").expect("Isolated punctuation regex pattern is valid and should compile"));
34
- static MALFORMED_WORDS_PATTERN: Lazy<Regex> = Lazy::new(|| {
35
- Regex::new(r"\b[a-zA-Z]+[0-9]+[a-zA-Z]+[a-zA-Z0-9]*\b")
36
- .expect("Malformed words regex pattern is valid and should compile")
37
- });
38
- static EXCESSIVE_WHITESPACE_PATTERN: Lazy<Regex> =
39
- Lazy::new(|| Regex::new(r"\s{3,}").expect("Excessive whitespace regex pattern is valid and should compile"));
40
-
41
- static JS_FUNCTION_PATTERN: Lazy<Regex> = Lazy::new(|| {
42
- Regex::new(r"(?i)function\s+\w+\s*\([^)]*\)\s*\{[^}]*\}")
43
- .expect("JavaScript function regex pattern is valid and should compile")
44
- });
45
- static CSS_RULES_PATTERN: Lazy<Regex> = Lazy::new(|| {
46
- Regex::new(r"(?i)\.[a-zA-Z][\w-]*\s*\{[^}]*\}").expect("CSS rules regex pattern is valid and should compile")
47
- });
48
- static SCRIPT_TAG_PATTERN: Lazy<Regex> = Lazy::new(|| {
49
- Regex::new(r"(?is)<script[^>]*>.*?</script>").expect("Script tag regex pattern is valid and should compile")
50
- });
51
- static STYLE_TAG_PATTERN: Lazy<Regex> = Lazy::new(|| {
52
- Regex::new(r"(?is)<style[^>]*>.*?</style>").expect("Style tag regex pattern is valid and should compile")
53
- });
54
-
55
- static NAV_WORDS_PATTERN: Lazy<Regex> = Lazy::new(|| {
56
- Regex::new(r"(?i)\b(?:Skip to main content|Back to top|Main navigation|Site navigation)\b")
57
- .expect("Navigation words regex pattern is valid and should compile")
58
- });
59
- static BREADCRUMB_PATTERN: Lazy<Regex> = Lazy::new(|| {
60
- Regex::new(r"(?:Home\s*[>»]\s*|[>»]\s*){2,}").expect("Breadcrumb regex pattern is valid and should compile")
61
- });
62
- static PAGINATION_PATTERN: Lazy<Regex> = Lazy::new(|| {
63
- Regex::new(r"(?i)\b(?:Page \d+ of \d+|First page|Last page|Previous page|Next page|^\d+ of \d+$)\b")
64
- .expect("Pagination regex pattern is valid and should compile")
65
- });
66
-
67
- static SENTENCE_DETECT: Lazy<Regex> =
68
- Lazy::new(|| Regex::new(r"[.!?]\s+[A-Z]").expect("Sentence detection regex pattern is valid and should compile"));
69
- static PUNCTUATION_DETECT: Lazy<Regex> =
70
- Lazy::new(|| Regex::new(r"[.!?]").expect("Punctuation detection regex pattern is valid and should compile"));
71
-
72
- static WHITESPACE_NORMALIZE: Lazy<Regex> = Lazy::new(|| {
73
- Regex::new(r"[ \t\f\v\r\xa0\u{2000}-\u{200b}\u{2028}\u{2029}\u{3000}]+")
74
- .expect("Whitespace normalization regex pattern is valid and should compile")
75
- });
76
- static NEWLINE_NORMALIZE: Lazy<Regex> = Lazy::new(|| {
77
- Regex::new(r"\n\s*\n\s*\n+").expect("Newline normalization regex pattern is valid and should compile")
78
- });
79
- static NEWLINE_CLEANUP: Lazy<Regex> =
80
- Lazy::new(|| Regex::new(r"\n+").expect("Newline cleanup regex pattern is valid and should compile"));
81
-
82
- #[inline]
83
- fn sum_match_lengths(text: &str, pattern: &Regex) -> usize {
84
- pattern.find_iter(text).map(|m| m.len()).sum()
85
- }
86
-
87
- fn chain_replacements<'a>(mut text: Cow<'a, str>, replacements: &[(&Regex, &str)]) -> Cow<'a, str> {
88
- for (pattern, replacement) in replacements {
89
- if pattern.is_match(&text) {
90
- text = Cow::Owned(pattern.replace_all(&text, *replacement).into_owned());
91
- }
92
- }
93
- text
94
- }
95
-
96
- #[inline]
97
- fn replace_with_if_matches<'a, F>(text: &'a str, pattern: &Regex, replacer: F) -> Cow<'a, str>
98
- where
99
- F: FnMut(&regex::Captures) -> String,
100
- {
101
- if pattern.is_match(text) {
102
- Cow::Owned(pattern.replace_all(text, replacer).into_owned())
103
- } else {
104
- Cow::Borrowed(text)
105
- }
106
- }
107
-
108
- /// Compute a heuristic score (0.0–1.0) describing how clean the extracted text is.
109
- ///
110
- /// The scoring pipeline rewards well-structured prose while penalising OCR artefacts,
111
- /// embedded scripts, and navigation chrome. Supplying document metadata allows the
112
- /// function to include contextual bonuses.
113
- ///
114
- /// ```rust
115
- /// use ahash::AHashMap;
116
- /// use kreuzberg::utils::quality::calculate_quality_score;
117
- ///
118
- /// let text = "Executive Summary\n===================\nKreuzberg extracts documents quickly.";
119
- /// let score = calculate_quality_score(text, None);
120
- /// assert!(score > 0.7);
121
- /// ```
122
- pub fn calculate_quality_score(text: &str, metadata: Option<&AHashMap<String, String>>) -> f64 {
123
- if text.is_empty() || text.trim().is_empty() {
124
- return 0.0;
125
- }
126
-
127
- let total_chars = text.len() as f64;
128
-
129
- if text.len() < MIN_TEXT_LENGTH {
130
- return 0.1;
131
- }
132
-
133
- let mut score = 1.0;
134
-
135
- if text.len() > LARGE_TEXT_LENGTH {
136
- let ocr_penalty = calculate_ocr_penalty(text, total_chars);
137
- let script_penalty = calculate_script_penalty(text, total_chars);
138
- let nav_penalty = calculate_navigation_penalty(text, total_chars);
139
- let structure_bonus = calculate_structure_bonus(text);
140
-
141
- score -= ocr_penalty * OCR_PENALTY_WEIGHT;
142
- score -= script_penalty * SCRIPT_PENALTY_WEIGHT;
143
- score -= nav_penalty * NAV_PENALTY_WEIGHT;
144
- score += structure_bonus * STRUCTURE_BONUS_WEIGHT;
145
- } else {
146
- score -= calculate_ocr_penalty(text, total_chars) * OCR_PENALTY_WEIGHT;
147
- score += calculate_structure_bonus(text) * STRUCTURE_BONUS_WEIGHT;
148
- }
149
-
150
- if let Some(metadata) = metadata {
151
- score += calculate_metadata_bonus(metadata) * METADATA_BONUS_WEIGHT;
152
- }
153
-
154
- score.clamp(0.0, 1.0)
155
- }
156
-
157
- #[inline]
158
- fn calculate_ocr_penalty(text: &str, total_chars: f64) -> f64 {
159
- if total_chars == 0.0 {
160
- return 0.0;
161
- }
162
-
163
- if !text.contains(" ") && !text.contains("...") {
164
- return 0.0;
165
- }
166
-
167
- let artifact_chars = sum_match_lengths(text, &SCATTERED_CHARS_PATTERN)
168
- + sum_match_lengths(text, &REPEATED_PUNCT_PATTERN)
169
- + count_non_table_dash_artifacts(text)
170
- + sum_match_lengths(text, &ISOLATED_PUNCT_PATTERN)
171
- + sum_match_lengths(text, &MALFORMED_WORDS_PATTERN)
172
- + sum_match_lengths(text, &EXCESSIVE_WHITESPACE_PATTERN);
173
-
174
- (artifact_chars as f64 / total_chars).min(1.0)
175
- }
176
-
177
- #[inline]
178
- fn count_non_table_dash_artifacts(text: &str) -> usize {
179
- let mut artifact_count = 0;
180
-
181
- for line in text.lines() {
182
- let trimmed = line.trim();
183
- let is_table_separator = trimmed.starts_with('|')
184
- && trimmed.ends_with('|')
185
- && trimmed
186
- .chars()
187
- .all(|c| c == '|' || c == '-' || c.is_whitespace() || c == ':');
188
-
189
- if !is_table_separator {
190
- for m in DASH_PATTERN.find_iter(line) {
191
- artifact_count += m.len();
192
- }
193
- }
194
- }
195
-
196
- artifact_count
197
- }
198
-
199
- #[inline]
200
- fn calculate_script_penalty(text: &str, total_chars: f64) -> f64 {
201
- if total_chars == 0.0 {
202
- return 0.0;
203
- }
204
-
205
- if !text.contains("function") && !text.contains("<script") && !text.contains("<style") {
206
- return 0.0;
207
- }
208
-
209
- let script_chars = sum_match_lengths(text, &JS_FUNCTION_PATTERN)
210
- + sum_match_lengths(text, &CSS_RULES_PATTERN)
211
- + sum_match_lengths(text, &SCRIPT_TAG_PATTERN)
212
- + sum_match_lengths(text, &STYLE_TAG_PATTERN);
213
-
214
- (script_chars as f64 / total_chars).min(1.0)
215
- }
216
-
217
- #[inline]
218
- fn calculate_navigation_penalty(text: &str, total_chars: f64) -> f64 {
219
- if total_chars == 0.0 {
220
- return 0.0;
221
- }
222
-
223
- let nav_chars = sum_match_lengths(text, &NAV_WORDS_PATTERN)
224
- + sum_match_lengths(text, &BREADCRUMB_PATTERN)
225
- + sum_match_lengths(text, &PAGINATION_PATTERN);
226
-
227
- (nav_chars as f64 / total_chars).min(1.0)
228
- }
229
-
230
- #[inline]
231
- fn calculate_structure_bonus(text: &str) -> f64 {
232
- if text.is_empty() {
233
- return 0.0;
234
- }
235
-
236
- let sentence_count = SENTENCE_DETECT.find_iter(text).count() as f64;
237
- let paragraph_count = text.matches("\n\n").count() as f64 + 1.0;
238
- let words = text.split_whitespace().count() as f64;
239
-
240
- if words == 0.0 {
241
- return 0.0;
242
- }
243
-
244
- let avg_words_per_sentence = words / sentence_count.max(1.0);
245
- let avg_words_per_paragraph = words / paragraph_count.max(1.0);
246
-
247
- let mut structure_score: f64 = 0.0;
248
-
249
- if (MIN_SENTENCE_WORDS..=MAX_SENTENCE_WORDS).contains(&avg_words_per_sentence) {
250
- structure_score += 0.3;
251
- }
252
-
253
- if (MIN_PARAGRAPH_WORDS..=MAX_PARAGRAPH_WORDS).contains(&avg_words_per_paragraph) {
254
- structure_score += 0.3;
255
- }
256
-
257
- if paragraph_count > 1.0 {
258
- structure_score += 0.2;
259
- }
260
-
261
- if PUNCTUATION_DETECT.is_match(text) {
262
- structure_score += 0.2;
263
- }
264
-
265
- structure_score.min(1.0)
266
- }
267
-
268
- #[inline]
269
- fn calculate_metadata_bonus(metadata: &AHashMap<String, String>) -> f64 {
270
- const IMPORTANT_FIELDS: &[&str] = &["title", "author", "subject", "description", "keywords"];
271
-
272
- let present_fields = IMPORTANT_FIELDS
273
- .iter()
274
- .filter(|&&field| metadata.contains_key(field))
275
- .count();
276
-
277
- present_fields as f64 / IMPORTANT_FIELDS.len() as f64
278
- }
279
-
280
- /// Apply the quality heuristics and return a cleaned representation of the text.
281
- ///
282
- /// This function normalises whitespace, removes navigation boilerplate, and strips
283
- /// repeated punctuation that commonly appears in OCR output.
284
- pub fn clean_extracted_text(text: &str) -> String {
285
- if text.is_empty() {
286
- return String::new();
287
- }
288
-
289
- let mut working_text = Cow::Borrowed(text);
290
-
291
- working_text = clean_scripts(working_text);
292
-
293
- working_text = clean_ocr_artifacts_cow(working_text);
294
-
295
- working_text = clean_navigation_elements_cow(working_text);
296
-
297
- working_text = clean_repeated_punctuation_cow(working_text);
298
-
299
- working_text = normalize_whitespace_cow(working_text);
300
-
301
- working_text.trim().to_string()
302
- }
303
-
304
- #[inline]
305
- fn clean_scripts<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
306
- let script_replacements = [
307
- (&*SCRIPT_TAG_PATTERN, " "),
308
- (&*STYLE_TAG_PATTERN, " "),
309
- (&*JS_FUNCTION_PATTERN, " "),
310
- (&*CSS_RULES_PATTERN, " "),
311
- ];
312
- chain_replacements(text, &script_replacements)
313
- }
314
-
315
- #[inline]
316
- fn normalize_whitespace_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
317
- if let Some(fast) = normalize_whitespace_ascii(text.as_ref()) {
318
- return Cow::Owned(fast);
319
- }
320
-
321
- let mut result = text;
322
-
323
- if WHITESPACE_NORMALIZE.is_match(&result) {
324
- result = Cow::Owned(WHITESPACE_NORMALIZE.replace_all(&result, " ").into_owned());
325
- }
326
-
327
- if NEWLINE_NORMALIZE.is_match(&result) {
328
- result = Cow::Owned(NEWLINE_NORMALIZE.replace_all(&result, "\n\n").into_owned());
329
- }
330
-
331
- result
332
- }
333
-
334
- #[inline]
335
- fn clean_repeated_punctuation_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
336
- if let Some(cleaned) = clean_repeated_punctuation_ascii(text.as_ref()) {
337
- return Cow::Owned(cleaned);
338
- }
339
-
340
- if REPEATED_PUNCT_PATTERN.is_match(&text) {
341
- Cow::Owned(
342
- REPEATED_PUNCT_PATTERN
343
- .replace_all(&text, |caps: &regex::Captures<'_>| {
344
- let ch = caps.get(0).and_then(|m| m.as_str().chars().next()).unwrap_or('.');
345
- ch.to_string()
346
- })
347
- .into_owned(),
348
- )
349
- } else {
350
- text
351
- }
352
- }
353
-
354
- fn clean_repeated_punctuation_ascii(text: &str) -> Option<String> {
355
- if !text.is_ascii() {
356
- return None;
357
- }
358
-
359
- let bytes = text.as_bytes();
360
- let mut result = Vec::with_capacity(bytes.len());
361
- let mut changed = false;
362
- let mut offset = 0;
363
-
364
- while offset < bytes.len() {
365
- let remaining = &bytes[offset..];
366
- if let Some(next) = find_next_ascii_punctuation(remaining) {
367
- if next > 0 {
368
- result.extend_from_slice(&remaining[..next]);
369
- offset += next;
370
- }
371
-
372
- if offset >= bytes.len() {
373
- break;
374
- }
375
-
376
- let current = bytes[offset];
377
- result.push(current);
378
- let mut end = offset + 1;
379
- while end < bytes.len() && matches!(bytes[end], b'!' | b'?' | b'.' | b',') {
380
- changed = true;
381
- end += 1;
382
- }
383
- offset = end;
384
- } else {
385
- result.extend_from_slice(remaining);
386
- break;
387
- }
388
- }
389
-
390
- if changed { String::from_utf8(result).ok() } else { None }
391
- }
392
-
393
- #[inline]
394
- fn find_next_ascii_punctuation(bytes: &[u8]) -> Option<usize> {
395
- let primary = memchr3(b'!', b'?', b'.', bytes);
396
- let comma = memchr(b',', bytes);
397
- match (primary, comma) {
398
- (Some(a), Some(b)) => Some(a.min(b)),
399
- (Some(a), None) => Some(a),
400
- (None, Some(b)) => Some(b),
401
- (None, None) => None,
402
- }
403
- }
404
-
405
- #[inline]
406
- pub(crate) fn normalize_whitespace_ascii(text: &str) -> Option<String> {
407
- if !text.is_ascii() {
408
- return None;
409
- }
410
-
411
- let bytes = text.as_bytes();
412
- let mut result = Vec::with_capacity(bytes.len());
413
- let mut changed = false;
414
- let mut i = 0;
415
- let len = bytes.len();
416
-
417
- while i < len {
418
- match bytes[i] {
419
- b' ' | b'\t' | b'\r' | 0x0B | 0x0C => {
420
- let mut j = i + 1;
421
- while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
422
- j += 1;
423
- }
424
- if j - i > 1 || bytes[i] != b' ' {
425
- changed = true;
426
- }
427
- result.push(b' ');
428
- i = j;
429
- }
430
- b'\n' => {
431
- let mut j = i + 1;
432
- while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
433
- j += 1;
434
- changed = true;
435
- }
436
-
437
- let mut newline_count = 1;
438
- while j < len && bytes[j] == b'\n' {
439
- newline_count += 1;
440
- j += 1;
441
-
442
- while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
443
- j += 1;
444
- changed = true;
445
- }
446
- }
447
-
448
- if newline_count >= 3 {
449
- result.extend_from_slice(b"\n\n");
450
- changed = true;
451
- } else {
452
- result.extend(std::iter::repeat_n(b'\n', newline_count));
453
- }
454
-
455
- i = j;
456
- }
457
- _ => {
458
- result.push(bytes[i]);
459
- i += 1;
460
- }
461
- }
462
- }
463
-
464
- let normalized = String::from_utf8(result).unwrap_or_else(|_| text.to_string());
465
-
466
- if changed { Some(normalized) } else { None }
467
- }
468
-
469
- #[inline]
470
- fn clean_ocr_artifacts_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
471
- let result = if let Some(fixed) = collapse_scattered_ascii(&text) {
472
- Cow::Owned(fixed)
473
- } else if SCATTERED_CHARS_PATTERN.is_match(&text) {
474
- Cow::Owned(
475
- replace_with_if_matches(&text, &SCATTERED_CHARS_PATTERN, |caps: &regex::Captures| {
476
- caps[0].chars().filter(|c| !c.is_whitespace()).collect::<String>()
477
- })
478
- .into_owned(),
479
- )
480
- } else {
481
- text
482
- };
483
-
484
- let result = clean_dashes_preserve_tables(result);
485
-
486
- let ocr_replacements = [
487
- (&*REPEATED_PUNCT_PATTERN, "..."),
488
- (&*ISOLATED_PUNCT_PATTERN, " "),
489
- (&*MALFORMED_WORDS_PATTERN, " "),
490
- (&*EXCESSIVE_WHITESPACE_PATTERN, " "),
491
- ];
492
-
493
- chain_replacements(result, &ocr_replacements)
494
- }
495
-
496
- #[inline]
497
- fn clean_dashes_preserve_tables<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
498
- if !DASH_PATTERN.is_match(&text) {
499
- return text;
500
- }
501
-
502
- let mut result = String::with_capacity(text.len());
503
- let lines: Vec<&str> = text.lines().collect();
504
-
505
- for (i, line) in lines.iter().enumerate() {
506
- if i > 0 {
507
- result.push('\n');
508
- }
509
-
510
- let trimmed = line.trim();
511
- let is_table_separator = trimmed.starts_with('|')
512
- && trimmed.ends_with('|')
513
- && trimmed
514
- .chars()
515
- .all(|c| c == '|' || c == '-' || c.is_whitespace() || c == ':');
516
-
517
- if is_table_separator {
518
- result.push_str(line);
519
- } else {
520
- let cleaned_line = DASH_PATTERN.replace_all(line, "...");
521
- result.push_str(&cleaned_line);
522
- }
523
- }
524
-
525
- Cow::Owned(result)
526
- }
527
-
528
- #[inline]
529
- fn clean_navigation_elements_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
530
- let nav_replacements = [
531
- (&*NAV_WORDS_PATTERN, " "),
532
- (&*BREADCRUMB_PATTERN, " "),
533
- (&*PAGINATION_PATTERN, " "),
534
- ];
535
-
536
- chain_replacements(text, &nav_replacements)
537
- }
538
-
539
- #[inline]
540
- pub(crate) fn collapse_scattered_ascii(text: &str) -> Option<String> {
541
- if !text.is_ascii() {
542
- return None;
543
- }
544
-
545
- let bytes = text.as_bytes();
546
- let mut result = Vec::with_capacity(bytes.len());
547
- let mut changed = false;
548
- let mut i = 0;
549
-
550
- while i < bytes.len() {
551
- if bytes[i].is_ascii_alphabetic() {
552
- let mut j = i + 1;
553
- let mut count = 1;
554
- while j < bytes.len() {
555
- if bytes[j].is_ascii_alphabetic() {
556
- count += 1;
557
- j += 1;
558
- } else if bytes[j].is_ascii_whitespace() {
559
- j += 1;
560
- } else {
561
- break;
562
- }
563
- }
564
-
565
- if count >= 3 && j - i >= (count * 2 - 1) {
566
- changed = true;
567
- for &byte in &bytes[i..j] {
568
- if byte.is_ascii_alphabetic() {
569
- result.push(byte.to_ascii_lowercase());
570
- }
571
- }
572
- result.push(b' ');
573
- i = j;
574
- continue;
575
- }
576
- }
577
-
578
- result.push(bytes[i]);
579
- i += 1;
580
- }
581
-
582
- if changed { String::from_utf8(result).ok() } else { None }
583
- }
584
-
585
- /// Collapse redundant whitespace while preserving paragraph boundaries.
586
- pub fn normalize_spaces(text: &str) -> String {
587
- if text.is_empty() || text.trim().is_empty() {
588
- return String::new();
589
- }
590
-
591
- let mut result = String::with_capacity(text.len());
592
-
593
- let mut first = true;
594
- for paragraph in text.split("\n\n") {
595
- let trimmed = paragraph.trim();
596
- if trimmed.is_empty() {
597
- continue;
598
- }
599
-
600
- if !first {
601
- result.push_str("\n\n");
602
- }
603
- first = false;
604
-
605
- let collapsed = if let Some(fast) = normalize_whitespace_ascii(paragraph) {
606
- Cow::Owned(fast)
607
- } else {
608
- Cow::Owned(WHITESPACE_NORMALIZE.replace_all(paragraph, " ").into_owned())
609
- };
610
-
611
- let cleaned = NEWLINE_CLEANUP.replace_all(&collapsed, "\n");
612
-
613
- let mut first_line = true;
614
- for line in cleaned.split('\n') {
615
- let line = line.trim();
616
- if !line.is_empty() {
617
- if !first_line {
618
- result.push('\n');
619
- }
620
- result.push_str(line);
621
- first_line = false;
622
- }
623
- }
624
- }
625
-
626
- result
627
- }
628
-
629
- #[cfg(all(test, feature = "quality"))]
630
- mod tests {
631
- use super::*;
632
-
633
- #[test]
634
- fn test_calculate_quality_score_empty_text() {
635
- assert_eq!(calculate_quality_score("", None), 0.0);
636
- assert_eq!(calculate_quality_score(" ", None), 0.0);
637
- assert_eq!(calculate_quality_score("\n\n\n", None), 0.0);
638
- }
639
-
640
- #[test]
641
- fn test_calculate_quality_score_short_text() {
642
- let text = "Hello";
643
- let score = calculate_quality_score(text, None);
644
- assert_eq!(score, 0.1);
645
- }
646
-
647
- #[test]
648
- fn test_calculate_quality_score_normal_text() {
649
- let text =
650
- "This is a normal sentence with proper punctuation. It has multiple sentences. And proper structure.";
651
- let score = calculate_quality_score(text, None);
652
- assert!(score > 0.5);
653
- assert!(score <= 1.0);
654
- }
655
-
656
- #[test]
657
- fn test_clean_extracted_text_empty() {
658
- assert_eq!(clean_extracted_text(""), "");
659
- assert_eq!(clean_extracted_text(" "), "");
660
- }
661
-
662
- #[test]
663
- fn test_clean_extracted_text_removes_scripts() {
664
- let text = "Before <script>alert('test');</script> After";
665
- let cleaned = clean_extracted_text(text);
666
- assert!(!cleaned.contains("<script"));
667
- assert!(cleaned.contains("Before"));
668
- assert!(cleaned.contains("After"));
669
- }
670
-
671
- #[test]
672
- fn test_normalize_spaces_empty() {
673
- assert_eq!(normalize_spaces(""), "");
674
- assert_eq!(normalize_spaces(" "), "");
675
- }
676
-
677
- #[test]
678
- fn test_normalize_spaces_single_paragraph() {
679
- let text = "This is a test";
680
- let normalized = normalize_spaces(text);
681
- assert_eq!(normalized, "This is a test");
682
- }
683
-
684
- #[test]
685
- fn test_calculate_quality_score_with_metadata() {
686
- let text = "This is a normal text with proper structure.";
687
- let mut metadata = AHashMap::new();
688
- metadata.insert("title".to_string(), "Test Title".to_string());
689
- metadata.insert("author".to_string(), "Test Author".to_string());
690
-
691
- let score = calculate_quality_score(text, Some(&metadata));
692
- assert!(score > 0.0);
693
- assert!(score <= 1.0);
694
- }
695
-
696
- #[test]
697
- fn test_calculate_ocr_penalty_clean_text() {
698
- let text = "This is clean text without artifacts";
699
- let penalty = calculate_ocr_penalty(text, text.len() as f64);
700
- assert_eq!(penalty, 0.0);
701
- }
702
-
703
- #[test]
704
- fn test_calculate_ocr_penalty_with_artifacts() {
705
- let text = "Text with excessive spaces and ....... dots";
706
- let penalty = calculate_ocr_penalty(text, text.len() as f64);
707
- assert!(penalty > 0.0);
708
- assert!(penalty <= 1.0);
709
- }
710
-
711
- #[test]
712
- fn test_calculate_script_penalty_clean_text() {
713
- let text = "This is clean text without scripts";
714
- let penalty = calculate_script_penalty(text, text.len() as f64);
715
- assert_eq!(penalty, 0.0);
716
- }
717
-
718
- #[test]
719
- fn test_calculate_script_penalty_with_js() {
720
- let text = "function test() { return 42; }";
721
- let penalty = calculate_script_penalty(text, text.len() as f64);
722
- assert!(penalty > 0.0);
723
- }
724
-
725
- #[test]
726
- fn test_calculate_navigation_penalty_clean_text() {
727
- let text = "This is clean text without navigation";
728
- let penalty = calculate_navigation_penalty(text, text.len() as f64);
729
- assert_eq!(penalty, 0.0);
730
- }
731
-
732
- #[test]
733
- fn test_calculate_navigation_penalty_with_nav() {
734
- let text = "Skip to main content and Back to top links everywhere";
735
- let penalty = calculate_navigation_penalty(text, text.len() as f64);
736
- assert!(penalty > 0.0);
737
- }
738
-
739
- #[test]
740
- fn test_calculate_structure_bonus_empty() {
741
- assert_eq!(calculate_structure_bonus(""), 0.0);
742
- }
743
-
744
- #[test]
745
- fn test_calculate_structure_bonus_well_structured() {
746
- let text = "This is a sentence. This is another sentence.\n\nNew paragraph here. More content.";
747
- let bonus = calculate_structure_bonus(text);
748
- assert!(bonus > 0.0);
749
- assert!(bonus <= 1.0);
750
- }
751
-
752
- #[test]
753
- fn test_calculate_metadata_bonus_empty() {
754
- let metadata = AHashMap::new();
755
- let bonus = calculate_metadata_bonus(&metadata);
756
- assert_eq!(bonus, 0.0);
757
- }
758
-
759
- #[test]
760
- fn test_calculate_metadata_bonus_full() {
761
- let mut metadata = AHashMap::new();
762
- metadata.insert("title".to_string(), "Title".to_string());
763
- metadata.insert("author".to_string(), "Author".to_string());
764
- metadata.insert("subject".to_string(), "Subject".to_string());
765
- metadata.insert("description".to_string(), "Description".to_string());
766
- metadata.insert("keywords".to_string(), "Keywords".to_string());
767
-
768
- let bonus = calculate_metadata_bonus(&metadata);
769
- assert_eq!(bonus, 1.0);
770
- }
771
-
772
- #[test]
773
- fn test_clean_extracted_text_removes_styles() {
774
- let text = "Before <style>.class { color: red; }</style> After";
775
- let cleaned = clean_extracted_text(text);
776
- assert!(!cleaned.contains("<style"));
777
- assert!(cleaned.contains("Before"));
778
- assert!(cleaned.contains("After"));
779
- }
780
-
781
- #[test]
782
- fn test_clean_extracted_text_ocr_artifacts() {
783
- let text = "Text with excessive spaces";
784
- let cleaned = clean_extracted_text(text);
785
- assert!(!cleaned.contains(" "));
786
- }
787
-
788
- #[test]
789
- fn test_clean_extracted_text_navigation() {
790
- let text = "Content Skip to main content more content";
791
- let cleaned = clean_extracted_text(text);
792
- assert!(cleaned.contains("Content"));
793
- assert!(cleaned.contains("more content"));
794
- }
795
-
796
- #[test]
797
- fn test_clean_repeated_punctuation_ascii_helper() {
798
- let input = "Wow!!! Really??? Sure...";
799
- let cleaned = clean_repeated_punctuation_ascii(input).expect("Should collapse punctuation");
800
- assert_eq!(cleaned, "Wow! Really? Sure.");
801
- }
802
-
803
- #[test]
804
- fn test_clean_repeated_punctuation_non_ascii_passthrough() {
805
- assert!(clean_repeated_punctuation_ascii("¿Qué tal?").is_none());
806
- }
807
-
808
- #[test]
809
- fn test_normalize_spaces_multiple_paragraphs() {
810
- let text = "First paragraph.\n\nSecond paragraph.";
811
- let normalized = normalize_spaces(text);
812
- assert!(normalized.contains("\n\n"));
813
- }
814
-
815
- #[test]
816
- fn test_normalize_spaces_preserves_paragraphs() {
817
- let text = "Para 1\n\n\n\nPara 2";
818
- let normalized = normalize_spaces(text);
819
- assert_eq!(normalized, "Para 1\n\nPara 2");
820
- }
821
-
822
- #[test]
823
- fn test_count_non_table_dash_artifacts() {
824
- let text = "Some text --- with dashes";
825
- let count = count_non_table_dash_artifacts(text);
826
- assert!(count > 0);
827
- }
828
-
829
- #[test]
830
- fn test_count_non_table_dash_artifacts_preserves_tables() {
831
- let text = "| Header |\n|--------|\n| Data |";
832
- let count = count_non_table_dash_artifacts(text);
833
- assert_eq!(count, 0);
834
- }
835
-
836
- #[test]
837
- fn test_clean_dashes_preserve_tables_simple() {
838
- let text = Cow::Borrowed("| Col1 |\n|------|\n| Data |");
839
- let result = clean_dashes_preserve_tables(text);
840
- assert!(result.contains("|------"));
841
- }
842
-
843
- #[test]
844
- fn test_clean_dashes_preserve_tables_replaces_non_table() {
845
- let text = Cow::Borrowed("Text with --- dashes");
846
- let result = clean_dashes_preserve_tables(text);
847
- assert!(result.contains("..."));
848
- assert!(!result.contains("---"));
849
- }
850
-
851
- #[test]
852
- fn test_sum_match_lengths() {
853
- let text = "test ... test ... test";
854
- let count = sum_match_lengths(text, &REPEATED_PUNCT_PATTERN);
855
- assert!(count > 0);
856
- }
857
-
858
- #[test]
859
- fn test_quality_score_large_text_with_ocr_issues() {
860
- let text = "a".repeat(2000) + " " + &"b".repeat(2000);
861
- let score = calculate_quality_score(&text, None);
862
- assert!(score >= 0.0);
863
- assert!(score <= 1.0);
864
- }
865
-
866
- #[test]
867
- fn test_quality_score_clamped_to_range() {
868
- let perfect_text = "This is perfect text. ".repeat(100);
869
- let score = calculate_quality_score(&perfect_text, None);
870
- assert!(score >= 0.0);
871
- assert!(score <= 1.0);
872
- }
873
-
874
- #[test]
875
- fn test_clean_extracted_text_scattered_chars() {
876
- let text = "a b c scattered";
877
- let cleaned = clean_extracted_text(text);
878
- assert!(!cleaned.is_empty());
879
- }
880
-
881
- #[cfg_attr(coverage, ignore = "coverage instrumentation perturbs ASCII fast path heuristics")]
882
- #[test]
883
- fn test_collapse_scattered_ascii_trigger() {
884
- let original = "S p a c e d";
885
- let collapsed = collapse_scattered_ascii(original).expect("fast path should trigger");
886
- assert_eq!(collapsed.trim(), "spaced");
887
- }
888
-
889
- #[test]
890
- fn test_collapse_scattered_ascii_non_ascii() {
891
- assert!(collapse_scattered_ascii("מ ש ה ו").is_none());
892
- }
893
-
894
- #[test]
895
- fn test_normalize_whitespace_ascii_spaces() {
896
- let input = "Hello \tWorld\rWelcome";
897
- let normalized = normalize_whitespace_ascii(input).expect("ascii fast path should trigger");
898
- assert_eq!(normalized, "Hello World Welcome");
899
- }
900
-
901
- #[test]
902
- fn test_normalize_whitespace_ascii_newlines() {
903
- let input = "Line1\n \n\n \nLine2";
904
- let normalized = normalize_whitespace_ascii(input).expect("ascii fast path should trigger");
905
- assert_eq!(normalized, "Line1\n\nLine2");
906
- }
907
-
908
- #[test]
909
- fn test_normalize_whitespace_ascii_no_change() {
910
- assert!(normalize_whitespace_ascii("Clean text").is_none());
911
- }
912
-
913
- #[test]
914
- fn test_normalize_whitespace_ascii_non_ascii() {
915
- assert!(normalize_whitespace_ascii("שלום שלום").is_none());
916
- }
917
-
918
- #[test]
919
- fn test_normalize_spaces_ascii_fast_path() {
920
- let input = "Hello world\n\nSecond line";
921
- let normalized = normalize_spaces(input);
922
- assert_eq!(normalized, "Hello world\n\nSecond line");
923
- }
924
-
925
- #[test]
926
- fn test_normalize_whitespace_cow_no_changes() {
927
- let text = Cow::Borrowed("normaltext");
928
- let result = normalize_whitespace_cow(text);
929
- assert_eq!(result.as_ref(), "normaltext");
930
- }
931
-
932
- #[test]
933
- fn test_normalize_whitespace_cow_with_changes() {
934
- let text = Cow::Borrowed("text with spaces");
935
- let result = normalize_whitespace_cow(text);
936
- assert!(matches!(result, Cow::Owned(_)));
937
- }
938
-
939
- #[test]
940
- fn test_clean_scripts_no_scripts() {
941
- let text = Cow::Borrowed("clean text");
942
- let result = clean_scripts(text);
943
- assert!(matches!(result, Cow::Borrowed(_)));
944
- }
945
-
946
- #[test]
947
- fn test_clean_scripts_with_script_tag() {
948
- let text = Cow::Borrowed("<script>code</script>");
949
- let result = clean_scripts(text);
950
- assert!(!result.contains("<script"));
951
- }
952
-
953
- #[test]
954
- fn test_quality_constants() {
955
- assert_eq!(MIN_TEXT_LENGTH, 10);
956
- assert_eq!(LARGE_TEXT_LENGTH, 1000);
957
- assert_eq!(OCR_PENALTY_WEIGHT, 0.3);
958
- }
959
- }
1
+ use ahash::AHashMap;
2
+ use memchr::{memchr, memchr3};
3
+ use once_cell::sync::Lazy;
4
+ use regex::Regex;
5
+ use std::borrow::Cow;
6
+
7
+ // ============================================================================
8
+ // ============================================================================
9
+
10
+ const OCR_PENALTY_WEIGHT: f64 = 0.3;
11
+ const SCRIPT_PENALTY_WEIGHT: f64 = 0.2;
12
+ const NAV_PENALTY_WEIGHT: f64 = 0.1;
13
+ const STRUCTURE_BONUS_WEIGHT: f64 = 0.2;
14
+ const METADATA_BONUS_WEIGHT: f64 = 0.1;
15
+
16
+ const MIN_TEXT_LENGTH: usize = 10;
17
+ const LARGE_TEXT_LENGTH: usize = 1000;
18
+ const MIN_SENTENCE_WORDS: f64 = 10.0;
19
+ const MAX_SENTENCE_WORDS: f64 = 30.0;
20
+ const MIN_PARAGRAPH_WORDS: f64 = 50.0;
21
+ const MAX_PARAGRAPH_WORDS: f64 = 300.0;
22
+
23
+ static SCATTERED_CHARS_PATTERN: Lazy<Regex> = Lazy::new(|| {
24
+ Regex::new(r"\b[a-zA-Z]\s{2,}[a-zA-Z]\s{2,}[a-zA-Z]\b")
25
+ .expect("Scattered chars regex pattern is valid and should compile")
26
+ });
27
+ static REPEATED_PUNCT_PATTERN: Lazy<Regex> = Lazy::new(|| {
28
+ Regex::new(r"[.]{3,}|[_]{3,}").expect("Repeated punctuation regex pattern is valid and should compile")
29
+ });
30
+ static DASH_PATTERN: Lazy<Regex> =
31
+ Lazy::new(|| Regex::new(r"[-]{3,}").expect("Dash pattern regex is valid and should compile"));
32
+ static ISOLATED_PUNCT_PATTERN: Lazy<Regex> =
33
+ Lazy::new(|| Regex::new(r"\s[.,;:!?]\s").expect("Isolated punctuation regex pattern is valid and should compile"));
34
+ static MALFORMED_WORDS_PATTERN: Lazy<Regex> = Lazy::new(|| {
35
+ Regex::new(r"\b[a-zA-Z]+[0-9]+[a-zA-Z]+[a-zA-Z0-9]*\b")
36
+ .expect("Malformed words regex pattern is valid and should compile")
37
+ });
38
+ static EXCESSIVE_WHITESPACE_PATTERN: Lazy<Regex> =
39
+ Lazy::new(|| Regex::new(r"\s{3,}").expect("Excessive whitespace regex pattern is valid and should compile"));
40
+
41
+ static JS_FUNCTION_PATTERN: Lazy<Regex> = Lazy::new(|| {
42
+ Regex::new(r"(?i)function\s+\w+\s*\([^)]*\)\s*\{[^}]*\}")
43
+ .expect("JavaScript function regex pattern is valid and should compile")
44
+ });
45
+ static CSS_RULES_PATTERN: Lazy<Regex> = Lazy::new(|| {
46
+ Regex::new(r"(?i)\.[a-zA-Z][\w-]*\s*\{[^}]*\}").expect("CSS rules regex pattern is valid and should compile")
47
+ });
48
+ static SCRIPT_TAG_PATTERN: Lazy<Regex> = Lazy::new(|| {
49
+ Regex::new(r"(?is)<script[^>]*>.*?</script>").expect("Script tag regex pattern is valid and should compile")
50
+ });
51
+ static STYLE_TAG_PATTERN: Lazy<Regex> = Lazy::new(|| {
52
+ Regex::new(r"(?is)<style[^>]*>.*?</style>").expect("Style tag regex pattern is valid and should compile")
53
+ });
54
+
55
+ static NAV_WORDS_PATTERN: Lazy<Regex> = Lazy::new(|| {
56
+ Regex::new(r"(?i)\b(?:Skip to main content|Back to top|Main navigation|Site navigation)\b")
57
+ .expect("Navigation words regex pattern is valid and should compile")
58
+ });
59
+ static BREADCRUMB_PATTERN: Lazy<Regex> = Lazy::new(|| {
60
+ Regex::new(r"(?:Home\s*[>»]\s*|[>»]\s*){2,}").expect("Breadcrumb regex pattern is valid and should compile")
61
+ });
62
+ static PAGINATION_PATTERN: Lazy<Regex> = Lazy::new(|| {
63
+ Regex::new(r"(?i)\b(?:Page \d+ of \d+|First page|Last page|Previous page|Next page|^\d+ of \d+$)\b")
64
+ .expect("Pagination regex pattern is valid and should compile")
65
+ });
66
+
67
+ static SENTENCE_DETECT: Lazy<Regex> =
68
+ Lazy::new(|| Regex::new(r"[.!?]\s+[A-Z]").expect("Sentence detection regex pattern is valid and should compile"));
69
+ static PUNCTUATION_DETECT: Lazy<Regex> =
70
+ Lazy::new(|| Regex::new(r"[.!?]").expect("Punctuation detection regex pattern is valid and should compile"));
71
+
72
+ static WHITESPACE_NORMALIZE: Lazy<Regex> = Lazy::new(|| {
73
+ Regex::new(r"[ \t\f\v\r\xa0\u{2000}-\u{200b}\u{2028}\u{2029}\u{3000}]+")
74
+ .expect("Whitespace normalization regex pattern is valid and should compile")
75
+ });
76
+ static NEWLINE_NORMALIZE: Lazy<Regex> = Lazy::new(|| {
77
+ Regex::new(r"\n\s*\n\s*\n+").expect("Newline normalization regex pattern is valid and should compile")
78
+ });
79
+ static NEWLINE_CLEANUP: Lazy<Regex> =
80
+ Lazy::new(|| Regex::new(r"\n+").expect("Newline cleanup regex pattern is valid and should compile"));
81
+
82
+ #[inline]
83
+ fn sum_match_lengths(text: &str, pattern: &Regex) -> usize {
84
+ pattern.find_iter(text).map(|m| m.len()).sum()
85
+ }
86
+
87
+ fn chain_replacements<'a>(mut text: Cow<'a, str>, replacements: &[(&Regex, &str)]) -> Cow<'a, str> {
88
+ for (pattern, replacement) in replacements {
89
+ if pattern.is_match(&text) {
90
+ text = Cow::Owned(pattern.replace_all(&text, *replacement).into_owned());
91
+ }
92
+ }
93
+ text
94
+ }
95
+
96
+ #[inline]
97
+ fn replace_with_if_matches<'a, F>(text: &'a str, pattern: &Regex, replacer: F) -> Cow<'a, str>
98
+ where
99
+ F: FnMut(&regex::Captures) -> String,
100
+ {
101
+ if pattern.is_match(text) {
102
+ Cow::Owned(pattern.replace_all(text, replacer).into_owned())
103
+ } else {
104
+ Cow::Borrowed(text)
105
+ }
106
+ }
107
+
108
+ /// Compute a heuristic score (0.0–1.0) describing how clean the extracted text is.
109
+ ///
110
+ /// The scoring pipeline rewards well-structured prose while penalising OCR artefacts,
111
+ /// embedded scripts, and navigation chrome. Supplying document metadata allows the
112
+ /// function to include contextual bonuses.
113
+ ///
114
+ /// ```rust
115
+ /// use ahash::AHashMap;
116
+ /// use kreuzberg::utils::quality::calculate_quality_score;
117
+ ///
118
+ /// let text = "Executive Summary\n===================\nKreuzberg extracts documents quickly.";
119
+ /// let score = calculate_quality_score(text, None);
120
+ /// assert!(score > 0.7);
121
+ /// ```
122
+ pub fn calculate_quality_score(text: &str, metadata: Option<&AHashMap<String, String>>) -> f64 {
123
+ if text.is_empty() || text.trim().is_empty() {
124
+ return 0.0;
125
+ }
126
+
127
+ let total_chars = text.len() as f64;
128
+
129
+ if text.len() < MIN_TEXT_LENGTH {
130
+ return 0.1;
131
+ }
132
+
133
+ let mut score = 1.0;
134
+
135
+ if text.len() > LARGE_TEXT_LENGTH {
136
+ let ocr_penalty = calculate_ocr_penalty(text, total_chars);
137
+ let script_penalty = calculate_script_penalty(text, total_chars);
138
+ let nav_penalty = calculate_navigation_penalty(text, total_chars);
139
+ let structure_bonus = calculate_structure_bonus(text);
140
+
141
+ score -= ocr_penalty * OCR_PENALTY_WEIGHT;
142
+ score -= script_penalty * SCRIPT_PENALTY_WEIGHT;
143
+ score -= nav_penalty * NAV_PENALTY_WEIGHT;
144
+ score += structure_bonus * STRUCTURE_BONUS_WEIGHT;
145
+ } else {
146
+ score -= calculate_ocr_penalty(text, total_chars) * OCR_PENALTY_WEIGHT;
147
+ score += calculate_structure_bonus(text) * STRUCTURE_BONUS_WEIGHT;
148
+ }
149
+
150
+ if let Some(metadata) = metadata {
151
+ score += calculate_metadata_bonus(metadata) * METADATA_BONUS_WEIGHT;
152
+ }
153
+
154
+ score.clamp(0.0, 1.0)
155
+ }
156
+
157
+ #[inline]
158
+ fn calculate_ocr_penalty(text: &str, total_chars: f64) -> f64 {
159
+ if total_chars == 0.0 {
160
+ return 0.0;
161
+ }
162
+
163
+ if !text.contains(" ") && !text.contains("...") {
164
+ return 0.0;
165
+ }
166
+
167
+ let artifact_chars = sum_match_lengths(text, &SCATTERED_CHARS_PATTERN)
168
+ + sum_match_lengths(text, &REPEATED_PUNCT_PATTERN)
169
+ + count_non_table_dash_artifacts(text)
170
+ + sum_match_lengths(text, &ISOLATED_PUNCT_PATTERN)
171
+ + sum_match_lengths(text, &MALFORMED_WORDS_PATTERN)
172
+ + sum_match_lengths(text, &EXCESSIVE_WHITESPACE_PATTERN);
173
+
174
+ (artifact_chars as f64 / total_chars).min(1.0)
175
+ }
176
+
177
+ #[inline]
178
+ fn count_non_table_dash_artifacts(text: &str) -> usize {
179
+ let mut artifact_count = 0;
180
+
181
+ for line in text.lines() {
182
+ let trimmed = line.trim();
183
+ let is_table_separator = trimmed.starts_with('|')
184
+ && trimmed.ends_with('|')
185
+ && trimmed
186
+ .chars()
187
+ .all(|c| c == '|' || c == '-' || c.is_whitespace() || c == ':');
188
+
189
+ if !is_table_separator {
190
+ for m in DASH_PATTERN.find_iter(line) {
191
+ artifact_count += m.len();
192
+ }
193
+ }
194
+ }
195
+
196
+ artifact_count
197
+ }
198
+
199
+ #[inline]
200
+ fn calculate_script_penalty(text: &str, total_chars: f64) -> f64 {
201
+ if total_chars == 0.0 {
202
+ return 0.0;
203
+ }
204
+
205
+ if !text.contains("function") && !text.contains("<script") && !text.contains("<style") {
206
+ return 0.0;
207
+ }
208
+
209
+ let script_chars = sum_match_lengths(text, &JS_FUNCTION_PATTERN)
210
+ + sum_match_lengths(text, &CSS_RULES_PATTERN)
211
+ + sum_match_lengths(text, &SCRIPT_TAG_PATTERN)
212
+ + sum_match_lengths(text, &STYLE_TAG_PATTERN);
213
+
214
+ (script_chars as f64 / total_chars).min(1.0)
215
+ }
216
+
217
+ #[inline]
218
+ fn calculate_navigation_penalty(text: &str, total_chars: f64) -> f64 {
219
+ if total_chars == 0.0 {
220
+ return 0.0;
221
+ }
222
+
223
+ let nav_chars = sum_match_lengths(text, &NAV_WORDS_PATTERN)
224
+ + sum_match_lengths(text, &BREADCRUMB_PATTERN)
225
+ + sum_match_lengths(text, &PAGINATION_PATTERN);
226
+
227
+ (nav_chars as f64 / total_chars).min(1.0)
228
+ }
229
+
230
+ #[inline]
231
+ fn calculate_structure_bonus(text: &str) -> f64 {
232
+ if text.is_empty() {
233
+ return 0.0;
234
+ }
235
+
236
+ let sentence_count = SENTENCE_DETECT.find_iter(text).count() as f64;
237
+ let paragraph_count = text.matches("\n\n").count() as f64 + 1.0;
238
+ let words = text.split_whitespace().count() as f64;
239
+
240
+ if words == 0.0 {
241
+ return 0.0;
242
+ }
243
+
244
+ let avg_words_per_sentence = words / sentence_count.max(1.0);
245
+ let avg_words_per_paragraph = words / paragraph_count.max(1.0);
246
+
247
+ let mut structure_score: f64 = 0.0;
248
+
249
+ if (MIN_SENTENCE_WORDS..=MAX_SENTENCE_WORDS).contains(&avg_words_per_sentence) {
250
+ structure_score += 0.3;
251
+ }
252
+
253
+ if (MIN_PARAGRAPH_WORDS..=MAX_PARAGRAPH_WORDS).contains(&avg_words_per_paragraph) {
254
+ structure_score += 0.3;
255
+ }
256
+
257
+ if paragraph_count > 1.0 {
258
+ structure_score += 0.2;
259
+ }
260
+
261
+ if PUNCTUATION_DETECT.is_match(text) {
262
+ structure_score += 0.2;
263
+ }
264
+
265
+ structure_score.min(1.0)
266
+ }
267
+
268
+ #[inline]
269
+ fn calculate_metadata_bonus(metadata: &AHashMap<String, String>) -> f64 {
270
+ const IMPORTANT_FIELDS: &[&str] = &["title", "author", "subject", "description", "keywords"];
271
+
272
+ let present_fields = IMPORTANT_FIELDS
273
+ .iter()
274
+ .filter(|&&field| metadata.contains_key(field))
275
+ .count();
276
+
277
+ present_fields as f64 / IMPORTANT_FIELDS.len() as f64
278
+ }
279
+
280
+ /// Apply the quality heuristics and return a cleaned representation of the text.
281
+ ///
282
+ /// This function normalises whitespace, removes navigation boilerplate, and strips
283
+ /// repeated punctuation that commonly appears in OCR output.
284
+ pub fn clean_extracted_text(text: &str) -> String {
285
+ if text.is_empty() {
286
+ return String::new();
287
+ }
288
+
289
+ let mut working_text = Cow::Borrowed(text);
290
+
291
+ working_text = clean_scripts(working_text);
292
+
293
+ working_text = clean_ocr_artifacts_cow(working_text);
294
+
295
+ working_text = clean_navigation_elements_cow(working_text);
296
+
297
+ working_text = clean_repeated_punctuation_cow(working_text);
298
+
299
+ working_text = normalize_whitespace_cow(working_text);
300
+
301
+ working_text.trim().to_string()
302
+ }
303
+
304
+ #[inline]
305
+ fn clean_scripts<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
306
+ let script_replacements = [
307
+ (&*SCRIPT_TAG_PATTERN, " "),
308
+ (&*STYLE_TAG_PATTERN, " "),
309
+ (&*JS_FUNCTION_PATTERN, " "),
310
+ (&*CSS_RULES_PATTERN, " "),
311
+ ];
312
+ chain_replacements(text, &script_replacements)
313
+ }
314
+
315
+ #[inline]
316
+ fn normalize_whitespace_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
317
+ if let Some(fast) = normalize_whitespace_ascii(text.as_ref()) {
318
+ return Cow::Owned(fast);
319
+ }
320
+
321
+ let mut result = text;
322
+
323
+ if WHITESPACE_NORMALIZE.is_match(&result) {
324
+ result = Cow::Owned(WHITESPACE_NORMALIZE.replace_all(&result, " ").into_owned());
325
+ }
326
+
327
+ if NEWLINE_NORMALIZE.is_match(&result) {
328
+ result = Cow::Owned(NEWLINE_NORMALIZE.replace_all(&result, "\n\n").into_owned());
329
+ }
330
+
331
+ result
332
+ }
333
+
334
+ #[inline]
335
+ fn clean_repeated_punctuation_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
336
+ if let Some(cleaned) = clean_repeated_punctuation_ascii(text.as_ref()) {
337
+ return Cow::Owned(cleaned);
338
+ }
339
+
340
+ if REPEATED_PUNCT_PATTERN.is_match(&text) {
341
+ Cow::Owned(
342
+ REPEATED_PUNCT_PATTERN
343
+ .replace_all(&text, |caps: &regex::Captures<'_>| {
344
+ let ch = caps.get(0).and_then(|m| m.as_str().chars().next()).unwrap_or('.');
345
+ ch.to_string()
346
+ })
347
+ .into_owned(),
348
+ )
349
+ } else {
350
+ text
351
+ }
352
+ }
353
+
354
+ fn clean_repeated_punctuation_ascii(text: &str) -> Option<String> {
355
+ if !text.is_ascii() {
356
+ return None;
357
+ }
358
+
359
+ let bytes = text.as_bytes();
360
+ let mut result = Vec::with_capacity(bytes.len());
361
+ let mut changed = false;
362
+ let mut offset = 0;
363
+
364
+ while offset < bytes.len() {
365
+ let remaining = &bytes[offset..];
366
+ if let Some(next) = find_next_ascii_punctuation(remaining) {
367
+ if next > 0 {
368
+ result.extend_from_slice(&remaining[..next]);
369
+ offset += next;
370
+ }
371
+
372
+ if offset >= bytes.len() {
373
+ break;
374
+ }
375
+
376
+ let current = bytes[offset];
377
+ result.push(current);
378
+ let mut end = offset + 1;
379
+ while end < bytes.len() && matches!(bytes[end], b'!' | b'?' | b'.' | b',') {
380
+ changed = true;
381
+ end += 1;
382
+ }
383
+ offset = end;
384
+ } else {
385
+ result.extend_from_slice(remaining);
386
+ break;
387
+ }
388
+ }
389
+
390
+ if changed { String::from_utf8(result).ok() } else { None }
391
+ }
392
+
393
+ #[inline]
394
+ fn find_next_ascii_punctuation(bytes: &[u8]) -> Option<usize> {
395
+ let primary = memchr3(b'!', b'?', b'.', bytes);
396
+ let comma = memchr(b',', bytes);
397
+ match (primary, comma) {
398
+ (Some(a), Some(b)) => Some(a.min(b)),
399
+ (Some(a), None) => Some(a),
400
+ (None, Some(b)) => Some(b),
401
+ (None, None) => None,
402
+ }
403
+ }
404
+
405
+ #[inline]
406
+ pub(crate) fn normalize_whitespace_ascii(text: &str) -> Option<String> {
407
+ if !text.is_ascii() {
408
+ return None;
409
+ }
410
+
411
+ let bytes = text.as_bytes();
412
+ let mut result = Vec::with_capacity(bytes.len());
413
+ let mut changed = false;
414
+ let mut i = 0;
415
+ let len = bytes.len();
416
+
417
+ while i < len {
418
+ match bytes[i] {
419
+ b' ' | b'\t' | b'\r' | 0x0B | 0x0C => {
420
+ let mut j = i + 1;
421
+ while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
422
+ j += 1;
423
+ }
424
+ if j - i > 1 || bytes[i] != b' ' {
425
+ changed = true;
426
+ }
427
+ result.push(b' ');
428
+ i = j;
429
+ }
430
+ b'\n' => {
431
+ let mut j = i + 1;
432
+ while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
433
+ j += 1;
434
+ changed = true;
435
+ }
436
+
437
+ let mut newline_count = 1;
438
+ while j < len && bytes[j] == b'\n' {
439
+ newline_count += 1;
440
+ j += 1;
441
+
442
+ while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
443
+ j += 1;
444
+ changed = true;
445
+ }
446
+ }
447
+
448
+ if newline_count >= 3 {
449
+ result.extend_from_slice(b"\n\n");
450
+ changed = true;
451
+ } else {
452
+ result.extend(std::iter::repeat_n(b'\n', newline_count));
453
+ }
454
+
455
+ i = j;
456
+ }
457
+ _ => {
458
+ result.push(bytes[i]);
459
+ i += 1;
460
+ }
461
+ }
462
+ }
463
+
464
+ let normalized = String::from_utf8(result).unwrap_or_else(|_| text.to_string());
465
+
466
+ if changed { Some(normalized) } else { None }
467
+ }
468
+
469
+ #[inline]
470
+ fn clean_ocr_artifacts_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
471
+ let result = if let Some(fixed) = collapse_scattered_ascii(&text) {
472
+ Cow::Owned(fixed)
473
+ } else if SCATTERED_CHARS_PATTERN.is_match(&text) {
474
+ Cow::Owned(
475
+ replace_with_if_matches(&text, &SCATTERED_CHARS_PATTERN, |caps: &regex::Captures| {
476
+ caps[0].chars().filter(|c| !c.is_whitespace()).collect::<String>()
477
+ })
478
+ .into_owned(),
479
+ )
480
+ } else {
481
+ text
482
+ };
483
+
484
+ let result = clean_dashes_preserve_tables(result);
485
+
486
+ let ocr_replacements = [
487
+ (&*REPEATED_PUNCT_PATTERN, "..."),
488
+ (&*ISOLATED_PUNCT_PATTERN, " "),
489
+ (&*MALFORMED_WORDS_PATTERN, " "),
490
+ (&*EXCESSIVE_WHITESPACE_PATTERN, " "),
491
+ ];
492
+
493
+ chain_replacements(result, &ocr_replacements)
494
+ }
495
+
496
+ #[inline]
497
+ fn clean_dashes_preserve_tables<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
498
+ if !DASH_PATTERN.is_match(&text) {
499
+ return text;
500
+ }
501
+
502
+ let mut result = String::with_capacity(text.len());
503
+ let lines: Vec<&str> = text.lines().collect();
504
+
505
+ for (i, line) in lines.iter().enumerate() {
506
+ if i > 0 {
507
+ result.push('\n');
508
+ }
509
+
510
+ let trimmed = line.trim();
511
+ let is_table_separator = trimmed.starts_with('|')
512
+ && trimmed.ends_with('|')
513
+ && trimmed
514
+ .chars()
515
+ .all(|c| c == '|' || c == '-' || c.is_whitespace() || c == ':');
516
+
517
+ if is_table_separator {
518
+ result.push_str(line);
519
+ } else {
520
+ let cleaned_line = DASH_PATTERN.replace_all(line, "...");
521
+ result.push_str(&cleaned_line);
522
+ }
523
+ }
524
+
525
+ Cow::Owned(result)
526
+ }
527
+
528
+ #[inline]
529
+ fn clean_navigation_elements_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
530
+ let nav_replacements = [
531
+ (&*NAV_WORDS_PATTERN, " "),
532
+ (&*BREADCRUMB_PATTERN, " "),
533
+ (&*PAGINATION_PATTERN, " "),
534
+ ];
535
+
536
+ chain_replacements(text, &nav_replacements)
537
+ }
538
+
539
+ #[inline]
540
+ pub(crate) fn collapse_scattered_ascii(text: &str) -> Option<String> {
541
+ if !text.is_ascii() {
542
+ return None;
543
+ }
544
+
545
+ let bytes = text.as_bytes();
546
+ let mut result = Vec::with_capacity(bytes.len());
547
+ let mut changed = false;
548
+ let mut i = 0;
549
+
550
+ while i < bytes.len() {
551
+ if bytes[i].is_ascii_alphabetic() {
552
+ let mut j = i + 1;
553
+ let mut count = 1;
554
+ while j < bytes.len() {
555
+ if bytes[j].is_ascii_alphabetic() {
556
+ count += 1;
557
+ j += 1;
558
+ } else if bytes[j].is_ascii_whitespace() {
559
+ j += 1;
560
+ } else {
561
+ break;
562
+ }
563
+ }
564
+
565
+ if count >= 3 && j - i >= (count * 2 - 1) {
566
+ changed = true;
567
+ for &byte in &bytes[i..j] {
568
+ if byte.is_ascii_alphabetic() {
569
+ result.push(byte.to_ascii_lowercase());
570
+ }
571
+ }
572
+ result.push(b' ');
573
+ i = j;
574
+ continue;
575
+ }
576
+ }
577
+
578
+ result.push(bytes[i]);
579
+ i += 1;
580
+ }
581
+
582
+ if changed { String::from_utf8(result).ok() } else { None }
583
+ }
584
+
585
+ /// Collapse redundant whitespace while preserving paragraph boundaries.
586
+ pub fn normalize_spaces(text: &str) -> String {
587
+ if text.is_empty() || text.trim().is_empty() {
588
+ return String::new();
589
+ }
590
+
591
+ let mut result = String::with_capacity(text.len());
592
+
593
+ let mut first = true;
594
+ for paragraph in text.split("\n\n") {
595
+ let trimmed = paragraph.trim();
596
+ if trimmed.is_empty() {
597
+ continue;
598
+ }
599
+
600
+ if !first {
601
+ result.push_str("\n\n");
602
+ }
603
+ first = false;
604
+
605
+ let collapsed = if let Some(fast) = normalize_whitespace_ascii(paragraph) {
606
+ Cow::Owned(fast)
607
+ } else {
608
+ Cow::Owned(WHITESPACE_NORMALIZE.replace_all(paragraph, " ").into_owned())
609
+ };
610
+
611
+ let cleaned = NEWLINE_CLEANUP.replace_all(&collapsed, "\n");
612
+
613
+ let mut first_line = true;
614
+ for line in cleaned.split('\n') {
615
+ let line = line.trim();
616
+ if !line.is_empty() {
617
+ if !first_line {
618
+ result.push('\n');
619
+ }
620
+ result.push_str(line);
621
+ first_line = false;
622
+ }
623
+ }
624
+ }
625
+
626
+ result
627
+ }
628
+
629
+ #[cfg(all(test, feature = "quality"))]
630
+ mod tests {
631
+ use super::*;
632
+
633
+ #[test]
634
+ fn test_calculate_quality_score_empty_text() {
635
+ assert_eq!(calculate_quality_score("", None), 0.0);
636
+ assert_eq!(calculate_quality_score(" ", None), 0.0);
637
+ assert_eq!(calculate_quality_score("\n\n\n", None), 0.0);
638
+ }
639
+
640
+ #[test]
641
+ fn test_calculate_quality_score_short_text() {
642
+ let text = "Hello";
643
+ let score = calculate_quality_score(text, None);
644
+ assert_eq!(score, 0.1);
645
+ }
646
+
647
+ #[test]
648
+ fn test_calculate_quality_score_normal_text() {
649
+ let text =
650
+ "This is a normal sentence with proper punctuation. It has multiple sentences. And proper structure.";
651
+ let score = calculate_quality_score(text, None);
652
+ assert!(score > 0.5);
653
+ assert!(score <= 1.0);
654
+ }
655
+
656
+ #[test]
657
+ fn test_clean_extracted_text_empty() {
658
+ assert_eq!(clean_extracted_text(""), "");
659
+ assert_eq!(clean_extracted_text(" "), "");
660
+ }
661
+
662
+ #[test]
663
+ fn test_clean_extracted_text_removes_scripts() {
664
+ let text = "Before <script>alert('test');</script> After";
665
+ let cleaned = clean_extracted_text(text);
666
+ assert!(!cleaned.contains("<script"));
667
+ assert!(cleaned.contains("Before"));
668
+ assert!(cleaned.contains("After"));
669
+ }
670
+
671
+ #[test]
672
+ fn test_normalize_spaces_empty() {
673
+ assert_eq!(normalize_spaces(""), "");
674
+ assert_eq!(normalize_spaces(" "), "");
675
+ }
676
+
677
+ #[test]
678
+ fn test_normalize_spaces_single_paragraph() {
679
+ let text = "This is a test";
680
+ let normalized = normalize_spaces(text);
681
+ assert_eq!(normalized, "This is a test");
682
+ }
683
+
684
+ #[test]
685
+ fn test_calculate_quality_score_with_metadata() {
686
+ let text = "This is a normal text with proper structure.";
687
+ let mut metadata = AHashMap::new();
688
+ metadata.insert("title".to_string(), "Test Title".to_string());
689
+ metadata.insert("author".to_string(), "Test Author".to_string());
690
+
691
+ let score = calculate_quality_score(text, Some(&metadata));
692
+ assert!(score > 0.0);
693
+ assert!(score <= 1.0);
694
+ }
695
+
696
+ #[test]
697
+ fn test_calculate_ocr_penalty_clean_text() {
698
+ let text = "This is clean text without artifacts";
699
+ let penalty = calculate_ocr_penalty(text, text.len() as f64);
700
+ assert_eq!(penalty, 0.0);
701
+ }
702
+
703
+ #[test]
704
+ fn test_calculate_ocr_penalty_with_artifacts() {
705
+ let text = "Text with excessive spaces and ....... dots";
706
+ let penalty = calculate_ocr_penalty(text, text.len() as f64);
707
+ assert!(penalty > 0.0);
708
+ assert!(penalty <= 1.0);
709
+ }
710
+
711
+ #[test]
712
+ fn test_calculate_script_penalty_clean_text() {
713
+ let text = "This is clean text without scripts";
714
+ let penalty = calculate_script_penalty(text, text.len() as f64);
715
+ assert_eq!(penalty, 0.0);
716
+ }
717
+
718
+ #[test]
719
+ fn test_calculate_script_penalty_with_js() {
720
+ let text = "function test() { return 42; }";
721
+ let penalty = calculate_script_penalty(text, text.len() as f64);
722
+ assert!(penalty > 0.0);
723
+ }
724
+
725
+ #[test]
726
+ fn test_calculate_navigation_penalty_clean_text() {
727
+ let text = "This is clean text without navigation";
728
+ let penalty = calculate_navigation_penalty(text, text.len() as f64);
729
+ assert_eq!(penalty, 0.0);
730
+ }
731
+
732
+ #[test]
733
+ fn test_calculate_navigation_penalty_with_nav() {
734
+ let text = "Skip to main content and Back to top links everywhere";
735
+ let penalty = calculate_navigation_penalty(text, text.len() as f64);
736
+ assert!(penalty > 0.0);
737
+ }
738
+
739
+ #[test]
740
+ fn test_calculate_structure_bonus_empty() {
741
+ assert_eq!(calculate_structure_bonus(""), 0.0);
742
+ }
743
+
744
+ #[test]
745
+ fn test_calculate_structure_bonus_well_structured() {
746
+ let text = "This is a sentence. This is another sentence.\n\nNew paragraph here. More content.";
747
+ let bonus = calculate_structure_bonus(text);
748
+ assert!(bonus > 0.0);
749
+ assert!(bonus <= 1.0);
750
+ }
751
+
752
+ #[test]
753
+ fn test_calculate_metadata_bonus_empty() {
754
+ let metadata = AHashMap::new();
755
+ let bonus = calculate_metadata_bonus(&metadata);
756
+ assert_eq!(bonus, 0.0);
757
+ }
758
+
759
+ #[test]
760
+ fn test_calculate_metadata_bonus_full() {
761
+ let mut metadata = AHashMap::new();
762
+ metadata.insert("title".to_string(), "Title".to_string());
763
+ metadata.insert("author".to_string(), "Author".to_string());
764
+ metadata.insert("subject".to_string(), "Subject".to_string());
765
+ metadata.insert("description".to_string(), "Description".to_string());
766
+ metadata.insert("keywords".to_string(), "Keywords".to_string());
767
+
768
+ let bonus = calculate_metadata_bonus(&metadata);
769
+ assert_eq!(bonus, 1.0);
770
+ }
771
+
772
+ #[test]
773
+ fn test_clean_extracted_text_removes_styles() {
774
+ let text = "Before <style>.class { color: red; }</style> After";
775
+ let cleaned = clean_extracted_text(text);
776
+ assert!(!cleaned.contains("<style"));
777
+ assert!(cleaned.contains("Before"));
778
+ assert!(cleaned.contains("After"));
779
+ }
780
+
781
+ #[test]
782
+ fn test_clean_extracted_text_ocr_artifacts() {
783
+ let text = "Text with excessive spaces";
784
+ let cleaned = clean_extracted_text(text);
785
+ assert!(!cleaned.contains(" "));
786
+ }
787
+
788
+ #[test]
789
+ fn test_clean_extracted_text_navigation() {
790
+ let text = "Content Skip to main content more content";
791
+ let cleaned = clean_extracted_text(text);
792
+ assert!(cleaned.contains("Content"));
793
+ assert!(cleaned.contains("more content"));
794
+ }
795
+
796
+ #[test]
797
+ fn test_clean_repeated_punctuation_ascii_helper() {
798
+ let input = "Wow!!! Really??? Sure...";
799
+ let cleaned = clean_repeated_punctuation_ascii(input).expect("Should collapse punctuation");
800
+ assert_eq!(cleaned, "Wow! Really? Sure.");
801
+ }
802
+
803
+ #[test]
804
+ fn test_clean_repeated_punctuation_non_ascii_passthrough() {
805
+ assert!(clean_repeated_punctuation_ascii("¿Qué tal?").is_none());
806
+ }
807
+
808
+ #[test]
809
+ fn test_normalize_spaces_multiple_paragraphs() {
810
+ let text = "First paragraph.\n\nSecond paragraph.";
811
+ let normalized = normalize_spaces(text);
812
+ assert!(normalized.contains("\n\n"));
813
+ }
814
+
815
+ #[test]
816
+ fn test_normalize_spaces_preserves_paragraphs() {
817
+ let text = "Para 1\n\n\n\nPara 2";
818
+ let normalized = normalize_spaces(text);
819
+ assert_eq!(normalized, "Para 1\n\nPara 2");
820
+ }
821
+
822
+ #[test]
823
+ fn test_count_non_table_dash_artifacts() {
824
+ let text = "Some text --- with dashes";
825
+ let count = count_non_table_dash_artifacts(text);
826
+ assert!(count > 0);
827
+ }
828
+
829
+ #[test]
830
+ fn test_count_non_table_dash_artifacts_preserves_tables() {
831
+ let text = "| Header |\n|--------|\n| Data |";
832
+ let count = count_non_table_dash_artifacts(text);
833
+ assert_eq!(count, 0);
834
+ }
835
+
836
+ #[test]
837
+ fn test_clean_dashes_preserve_tables_simple() {
838
+ let text = Cow::Borrowed("| Col1 |\n|------|\n| Data |");
839
+ let result = clean_dashes_preserve_tables(text);
840
+ assert!(result.contains("|------"));
841
+ }
842
+
843
+ #[test]
844
+ fn test_clean_dashes_preserve_tables_replaces_non_table() {
845
+ let text = Cow::Borrowed("Text with --- dashes");
846
+ let result = clean_dashes_preserve_tables(text);
847
+ assert!(result.contains("..."));
848
+ assert!(!result.contains("---"));
849
+ }
850
+
851
+ #[test]
852
+ fn test_sum_match_lengths() {
853
+ let text = "test ... test ... test";
854
+ let count = sum_match_lengths(text, &REPEATED_PUNCT_PATTERN);
855
+ assert!(count > 0);
856
+ }
857
+
858
+ #[test]
859
+ fn test_quality_score_large_text_with_ocr_issues() {
860
+ let text = "a".repeat(2000) + " " + &"b".repeat(2000);
861
+ let score = calculate_quality_score(&text, None);
862
+ assert!(score >= 0.0);
863
+ assert!(score <= 1.0);
864
+ }
865
+
866
+ #[test]
867
+ fn test_quality_score_clamped_to_range() {
868
+ let perfect_text = "This is perfect text. ".repeat(100);
869
+ let score = calculate_quality_score(&perfect_text, None);
870
+ assert!(score >= 0.0);
871
+ assert!(score <= 1.0);
872
+ }
873
+
874
+ #[test]
875
+ fn test_clean_extracted_text_scattered_chars() {
876
+ let text = "a b c scattered";
877
+ let cleaned = clean_extracted_text(text);
878
+ assert!(!cleaned.is_empty());
879
+ }
880
+
881
+ #[cfg_attr(coverage, ignore = "coverage instrumentation perturbs ASCII fast path heuristics")]
882
+ #[test]
883
+ fn test_collapse_scattered_ascii_trigger() {
884
+ let original = "S p a c e d";
885
+ let collapsed = collapse_scattered_ascii(original).expect("fast path should trigger");
886
+ assert_eq!(collapsed.trim(), "spaced");
887
+ }
888
+
889
+ #[test]
890
+ fn test_collapse_scattered_ascii_non_ascii() {
891
+ assert!(collapse_scattered_ascii("מ ש ה ו").is_none());
892
+ }
893
+
894
+ #[test]
895
+ fn test_normalize_whitespace_ascii_spaces() {
896
+ let input = "Hello \tWorld\rWelcome";
897
+ let normalized = normalize_whitespace_ascii(input).expect("ascii fast path should trigger");
898
+ assert_eq!(normalized, "Hello World Welcome");
899
+ }
900
+
901
+ #[test]
902
+ fn test_normalize_whitespace_ascii_newlines() {
903
+ let input = "Line1\n \n\n \nLine2";
904
+ let normalized = normalize_whitespace_ascii(input).expect("ascii fast path should trigger");
905
+ assert_eq!(normalized, "Line1\n\nLine2");
906
+ }
907
+
908
+ #[test]
909
+ fn test_normalize_whitespace_ascii_no_change() {
910
+ assert!(normalize_whitespace_ascii("Clean text").is_none());
911
+ }
912
+
913
+ #[test]
914
+ fn test_normalize_whitespace_ascii_non_ascii() {
915
+ assert!(normalize_whitespace_ascii("שלום שלום").is_none());
916
+ }
917
+
918
+ #[test]
919
+ fn test_normalize_spaces_ascii_fast_path() {
920
+ let input = "Hello world\n\nSecond line";
921
+ let normalized = normalize_spaces(input);
922
+ assert_eq!(normalized, "Hello world\n\nSecond line");
923
+ }
924
+
925
+ #[test]
926
+ fn test_normalize_whitespace_cow_no_changes() {
927
+ let text = Cow::Borrowed("normaltext");
928
+ let result = normalize_whitespace_cow(text);
929
+ assert_eq!(result.as_ref(), "normaltext");
930
+ }
931
+
932
+ #[test]
933
+ fn test_normalize_whitespace_cow_with_changes() {
934
+ let text = Cow::Borrowed("text with spaces");
935
+ let result = normalize_whitespace_cow(text);
936
+ assert!(matches!(result, Cow::Owned(_)));
937
+ }
938
+
939
+ #[test]
940
+ fn test_clean_scripts_no_scripts() {
941
+ let text = Cow::Borrowed("clean text");
942
+ let result = clean_scripts(text);
943
+ assert!(matches!(result, Cow::Borrowed(_)));
944
+ }
945
+
946
+ #[test]
947
+ fn test_clean_scripts_with_script_tag() {
948
+ let text = Cow::Borrowed("<script>code</script>");
949
+ let result = clean_scripts(text);
950
+ assert!(!result.contains("<script"));
951
+ }
952
+
953
+ #[test]
954
+ fn test_quality_constants() {
955
+ assert_eq!(MIN_TEXT_LENGTH, 10);
956
+ assert_eq!(LARGE_TEXT_LENGTH, 1000);
957
+ assert_eq!(OCR_PENALTY_WEIGHT, 0.3);
958
+ }
959
+ }