kreuzberg 4.0.0.rc1 → 4.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (342) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -8
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -534
  5. data/.rubocop.yml +538 -0
  6. data/Gemfile +8 -9
  7. data/Gemfile.lock +9 -109
  8. data/README.md +426 -421
  9. data/Rakefile +25 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -340
  12. data/ext/kreuzberg_rb/extconf.rb +45 -35
  13. data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
  14. data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
  15. data/ext/kreuzberg_rb/native/README.md +425 -425
  16. data/ext/kreuzberg_rb/native/build.rs +15 -17
  17. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  18. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  19. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  20. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  21. data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
  22. data/extconf.rb +28 -28
  23. data/kreuzberg.gemspec +148 -105
  24. data/lib/kreuzberg/api_proxy.rb +142 -142
  25. data/lib/kreuzberg/cache_api.rb +46 -45
  26. data/lib/kreuzberg/cli.rb +55 -55
  27. data/lib/kreuzberg/cli_proxy.rb +127 -127
  28. data/lib/kreuzberg/config.rb +691 -684
  29. data/lib/kreuzberg/error_context.rb +32 -0
  30. data/lib/kreuzberg/errors.rb +118 -50
  31. data/lib/kreuzberg/extraction_api.rb +85 -84
  32. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  33. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  34. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  35. data/lib/kreuzberg/result.rb +216 -216
  36. data/lib/kreuzberg/setup_lib_path.rb +80 -79
  37. data/lib/kreuzberg/validator_protocol.rb +89 -89
  38. data/lib/kreuzberg/version.rb +5 -5
  39. data/lib/kreuzberg.rb +103 -82
  40. data/sig/kreuzberg/internal.rbs +184 -184
  41. data/sig/kreuzberg.rbs +520 -468
  42. data/spec/binding/cache_spec.rb +227 -227
  43. data/spec/binding/cli_proxy_spec.rb +85 -87
  44. data/spec/binding/cli_spec.rb +55 -54
  45. data/spec/binding/config_spec.rb +345 -345
  46. data/spec/binding/config_validation_spec.rb +283 -283
  47. data/spec/binding/error_handling_spec.rb +213 -213
  48. data/spec/binding/errors_spec.rb +66 -66
  49. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  50. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  51. data/spec/binding/plugins/validator_spec.rb +274 -274
  52. data/spec/fixtures/config.toml +39 -39
  53. data/spec/fixtures/config.yaml +41 -42
  54. data/spec/fixtures/invalid_config.toml +4 -4
  55. data/spec/smoke/package_spec.rb +178 -178
  56. data/spec/spec_helper.rb +42 -42
  57. data/vendor/kreuzberg/Cargo.toml +204 -134
  58. data/vendor/kreuzberg/README.md +175 -175
  59. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
  60. data/vendor/kreuzberg/build.rs +474 -460
  61. data/vendor/kreuzberg/src/api/error.rs +81 -81
  62. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  63. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  64. data/vendor/kreuzberg/src/api/server.rs +353 -353
  65. data/vendor/kreuzberg/src/api/types.rs +170 -170
  66. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
  67. data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
  68. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
  69. data/vendor/kreuzberg/src/core/config.rs +1032 -1032
  70. data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
  71. data/vendor/kreuzberg/src/core/io.rs +329 -327
  72. data/vendor/kreuzberg/src/core/mime.rs +605 -615
  73. data/vendor/kreuzberg/src/core/mod.rs +45 -42
  74. data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
  75. data/vendor/kreuzberg/src/embeddings.rs +432 -323
  76. data/vendor/kreuzberg/src/error.rs +431 -431
  77. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  78. data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
  79. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  80. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  81. data/vendor/kreuzberg/src/extraction/html.rs +553 -553
  82. data/vendor/kreuzberg/src/extraction/image.rs +368 -368
  83. data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
  84. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
  85. data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
  86. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  87. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  88. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  89. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
  90. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
  91. data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
  92. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  93. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  94. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  95. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  96. data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
  97. data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
  98. data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
  99. data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
  100. data/vendor/kreuzberg/src/extractors/email.rs +143 -129
  101. data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
  102. data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
  103. data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
  104. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
  105. data/vendor/kreuzberg/src/extractors/html.rs +393 -410
  106. data/vendor/kreuzberg/src/extractors/image.rs +198 -195
  107. data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
  108. data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
  109. data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
  110. data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
  111. data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
  112. data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
  113. data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
  114. data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
  115. data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
  116. data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
  117. data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
  118. data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
  119. data/vendor/kreuzberg/src/extractors/security.rs +484 -0
  120. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
  121. data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
  122. data/vendor/kreuzberg/src/extractors/text.rs +260 -242
  123. data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
  124. data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
  125. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  126. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  127. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  128. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  129. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  130. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  131. data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
  132. data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
  133. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  134. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  135. data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
  136. data/vendor/kreuzberg/src/lib.rs +105 -102
  137. data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
  138. data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
  139. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  140. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  141. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  142. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  143. data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
  144. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  145. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  146. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
  147. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  148. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  149. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  150. data/vendor/kreuzberg/src/panic_context.rs +154 -0
  151. data/vendor/kreuzberg/src/pdf/error.rs +122 -122
  152. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  153. data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
  154. data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
  155. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  156. data/vendor/kreuzberg/src/pdf/table.rs +393 -420
  157. data/vendor/kreuzberg/src/pdf/text.rs +158 -161
  158. data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
  159. data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
  160. data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
  161. data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
  162. data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
  163. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  164. data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
  165. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  166. data/vendor/kreuzberg/src/text/mod.rs +19 -19
  167. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  168. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  169. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  170. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  171. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  172. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  173. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  174. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  175. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  176. data/vendor/kreuzberg/src/types.rs +903 -873
  177. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  178. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  179. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  180. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  181. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  182. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  183. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  184. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  185. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  186. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  187. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  188. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  189. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  190. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  191. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  192. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  193. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  194. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  195. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  196. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  198. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  199. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  200. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  201. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  202. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  203. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  204. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  205. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  206. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  207. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  208. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  209. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  210. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  211. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  212. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  213. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  214. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  215. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  216. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  217. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  218. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  219. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  220. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  221. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  222. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  223. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  224. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  225. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  226. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  227. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  228. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  229. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  230. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  231. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  232. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  233. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  234. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  235. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  236. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  237. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  238. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  239. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  240. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  241. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  242. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  243. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  244. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
  245. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  246. data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
  247. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
  248. data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
  249. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
  250. data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
  251. data/vendor/kreuzberg/tests/config_features.rs +598 -580
  252. data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
  253. data/vendor/kreuzberg/tests/core_integration.rs +510 -493
  254. data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
  255. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
  256. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
  257. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
  258. data/vendor/kreuzberg/tests/email_integration.rs +325 -325
  259. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
  260. data/vendor/kreuzberg/tests/error_handling.rs +393 -393
  261. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
  262. data/vendor/kreuzberg/tests/format_integration.rs +159 -159
  263. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  264. data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
  265. data/vendor/kreuzberg/tests/image_integration.rs +253 -253
  266. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
  267. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
  268. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
  269. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  270. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  271. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
  272. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
  273. data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
  274. data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
  275. data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
  276. data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
  277. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  278. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
  279. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
  280. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
  281. data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
  282. data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
  283. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
  284. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
  285. data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
  286. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  287. data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
  288. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
  289. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
  290. data/vendor/kreuzberg/tests/security_validation.rs +415 -404
  291. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  292. data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
  293. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
  294. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
  295. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  296. data/vendor/rb-sys/.cargo-ok +1 -0
  297. data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
  298. data/vendor/rb-sys/Cargo.lock +393 -0
  299. data/vendor/rb-sys/Cargo.toml +70 -0
  300. data/vendor/rb-sys/Cargo.toml.orig +57 -0
  301. data/vendor/rb-sys/LICENSE-APACHE +190 -0
  302. data/vendor/rb-sys/LICENSE-MIT +21 -0
  303. data/vendor/rb-sys/bin/release.sh +21 -0
  304. data/vendor/rb-sys/build/features.rs +108 -0
  305. data/vendor/rb-sys/build/main.rs +246 -0
  306. data/vendor/rb-sys/build/stable_api_config.rs +153 -0
  307. data/vendor/rb-sys/build/version.rs +48 -0
  308. data/vendor/rb-sys/readme.md +36 -0
  309. data/vendor/rb-sys/src/bindings.rs +21 -0
  310. data/vendor/rb-sys/src/hidden.rs +11 -0
  311. data/vendor/rb-sys/src/lib.rs +34 -0
  312. data/vendor/rb-sys/src/macros.rs +371 -0
  313. data/vendor/rb-sys/src/memory.rs +53 -0
  314. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
  315. data/vendor/rb-sys/src/special_consts.rs +31 -0
  316. data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
  317. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
  318. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
  319. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
  320. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
  321. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
  322. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
  323. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
  324. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
  325. data/vendor/rb-sys/src/stable_api.rs +261 -0
  326. data/vendor/rb-sys/src/symbol.rs +31 -0
  327. data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
  328. data/vendor/rb-sys/src/utils.rs +89 -0
  329. data/vendor/rb-sys/src/value_type.rs +7 -0
  330. metadata +90 -95
  331. data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
  332. data/spec/examples.txt +0 -104
  333. data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
  334. data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
  335. data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
  336. data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
  337. data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
  338. data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
  339. data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
  340. data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
  341. data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
  342. data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
@@ -1,959 +1,959 @@
1
- use ahash::AHashMap;
2
- use memchr::{memchr, memchr3};
3
- use once_cell::sync::Lazy;
4
- use regex::Regex;
5
- use std::borrow::Cow;
6
-
7
- // ============================================================================
8
- // ============================================================================
9
-
10
- const OCR_PENALTY_WEIGHT: f64 = 0.3;
11
- const SCRIPT_PENALTY_WEIGHT: f64 = 0.2;
12
- const NAV_PENALTY_WEIGHT: f64 = 0.1;
13
- const STRUCTURE_BONUS_WEIGHT: f64 = 0.2;
14
- const METADATA_BONUS_WEIGHT: f64 = 0.1;
15
-
16
- const MIN_TEXT_LENGTH: usize = 10;
17
- const LARGE_TEXT_LENGTH: usize = 1000;
18
- const MIN_SENTENCE_WORDS: f64 = 10.0;
19
- const MAX_SENTENCE_WORDS: f64 = 30.0;
20
- const MIN_PARAGRAPH_WORDS: f64 = 50.0;
21
- const MAX_PARAGRAPH_WORDS: f64 = 300.0;
22
-
23
- static SCATTERED_CHARS_PATTERN: Lazy<Regex> = Lazy::new(|| {
24
- Regex::new(r"\b[a-zA-Z]\s{2,}[a-zA-Z]\s{2,}[a-zA-Z]\b")
25
- .expect("Scattered chars regex pattern is valid and should compile")
26
- });
27
- static REPEATED_PUNCT_PATTERN: Lazy<Regex> = Lazy::new(|| {
28
- Regex::new(r"[.]{3,}|[_]{3,}").expect("Repeated punctuation regex pattern is valid and should compile")
29
- });
30
- static DASH_PATTERN: Lazy<Regex> =
31
- Lazy::new(|| Regex::new(r"[-]{3,}").expect("Dash pattern regex is valid and should compile"));
32
- static ISOLATED_PUNCT_PATTERN: Lazy<Regex> =
33
- Lazy::new(|| Regex::new(r"\s[.,;:!?]\s").expect("Isolated punctuation regex pattern is valid and should compile"));
34
- static MALFORMED_WORDS_PATTERN: Lazy<Regex> = Lazy::new(|| {
35
- Regex::new(r"\b[a-zA-Z]+[0-9]+[a-zA-Z]+[a-zA-Z0-9]*\b")
36
- .expect("Malformed words regex pattern is valid and should compile")
37
- });
38
- static EXCESSIVE_WHITESPACE_PATTERN: Lazy<Regex> =
39
- Lazy::new(|| Regex::new(r"\s{3,}").expect("Excessive whitespace regex pattern is valid and should compile"));
40
-
41
- static JS_FUNCTION_PATTERN: Lazy<Regex> = Lazy::new(|| {
42
- Regex::new(r"(?i)function\s+\w+\s*\([^)]*\)\s*\{[^}]*\}")
43
- .expect("JavaScript function regex pattern is valid and should compile")
44
- });
45
- static CSS_RULES_PATTERN: Lazy<Regex> = Lazy::new(|| {
46
- Regex::new(r"(?i)\.[a-zA-Z][\w-]*\s*\{[^}]*\}").expect("CSS rules regex pattern is valid and should compile")
47
- });
48
- static SCRIPT_TAG_PATTERN: Lazy<Regex> = Lazy::new(|| {
49
- Regex::new(r"(?is)<script[^>]*>.*?</script>").expect("Script tag regex pattern is valid and should compile")
50
- });
51
- static STYLE_TAG_PATTERN: Lazy<Regex> = Lazy::new(|| {
52
- Regex::new(r"(?is)<style[^>]*>.*?</style>").expect("Style tag regex pattern is valid and should compile")
53
- });
54
-
55
- static NAV_WORDS_PATTERN: Lazy<Regex> = Lazy::new(|| {
56
- Regex::new(r"(?i)\b(?:Skip to main content|Back to top|Main navigation|Site navigation)\b")
57
- .expect("Navigation words regex pattern is valid and should compile")
58
- });
59
- static BREADCRUMB_PATTERN: Lazy<Regex> = Lazy::new(|| {
60
- Regex::new(r"(?:Home\s*[>»]\s*|[>»]\s*){2,}").expect("Breadcrumb regex pattern is valid and should compile")
61
- });
62
- static PAGINATION_PATTERN: Lazy<Regex> = Lazy::new(|| {
63
- Regex::new(r"(?i)\b(?:Page \d+ of \d+|First page|Last page|Previous page|Next page|^\d+ of \d+$)\b")
64
- .expect("Pagination regex pattern is valid and should compile")
65
- });
66
-
67
- static SENTENCE_DETECT: Lazy<Regex> =
68
- Lazy::new(|| Regex::new(r"[.!?]\s+[A-Z]").expect("Sentence detection regex pattern is valid and should compile"));
69
- static PUNCTUATION_DETECT: Lazy<Regex> =
70
- Lazy::new(|| Regex::new(r"[.!?]").expect("Punctuation detection regex pattern is valid and should compile"));
71
-
72
- static WHITESPACE_NORMALIZE: Lazy<Regex> = Lazy::new(|| {
73
- Regex::new(r"[ \t\f\v\r\xa0\u{2000}-\u{200b}\u{2028}\u{2029}\u{3000}]+")
74
- .expect("Whitespace normalization regex pattern is valid and should compile")
75
- });
76
- static NEWLINE_NORMALIZE: Lazy<Regex> = Lazy::new(|| {
77
- Regex::new(r"\n\s*\n\s*\n+").expect("Newline normalization regex pattern is valid and should compile")
78
- });
79
- static NEWLINE_CLEANUP: Lazy<Regex> =
80
- Lazy::new(|| Regex::new(r"\n+").expect("Newline cleanup regex pattern is valid and should compile"));
81
-
82
- #[inline]
83
- fn sum_match_lengths(text: &str, pattern: &Regex) -> usize {
84
- pattern.find_iter(text).map(|m| m.len()).sum()
85
- }
86
-
87
- fn chain_replacements<'a>(mut text: Cow<'a, str>, replacements: &[(&Regex, &str)]) -> Cow<'a, str> {
88
- for (pattern, replacement) in replacements {
89
- if pattern.is_match(&text) {
90
- text = Cow::Owned(pattern.replace_all(&text, *replacement).into_owned());
91
- }
92
- }
93
- text
94
- }
95
-
96
- #[inline]
97
- fn replace_with_if_matches<'a, F>(text: &'a str, pattern: &Regex, replacer: F) -> Cow<'a, str>
98
- where
99
- F: FnMut(&regex::Captures) -> String,
100
- {
101
- if pattern.is_match(text) {
102
- Cow::Owned(pattern.replace_all(text, replacer).into_owned())
103
- } else {
104
- Cow::Borrowed(text)
105
- }
106
- }
107
-
108
- /// Compute a heuristic score (0.0–1.0) describing how clean the extracted text is.
109
- ///
110
- /// The scoring pipeline rewards well-structured prose while penalising OCR artefacts,
111
- /// embedded scripts, and navigation chrome. Supplying document metadata allows the
112
- /// function to include contextual bonuses.
113
- ///
114
- /// ```rust
115
- /// use ahash::AHashMap;
116
- /// use kreuzberg::utils::quality::calculate_quality_score;
117
- ///
118
- /// let text = "Executive Summary\n===================\nKreuzberg extracts documents quickly.";
119
- /// let score = calculate_quality_score(text, None);
120
- /// assert!(score > 0.7);
121
- /// ```
122
- pub fn calculate_quality_score(text: &str, metadata: Option<&AHashMap<String, String>>) -> f64 {
123
- if text.is_empty() || text.trim().is_empty() {
124
- return 0.0;
125
- }
126
-
127
- let total_chars = text.len() as f64;
128
-
129
- if text.len() < MIN_TEXT_LENGTH {
130
- return 0.1;
131
- }
132
-
133
- let mut score = 1.0;
134
-
135
- if text.len() > LARGE_TEXT_LENGTH {
136
- let ocr_penalty = calculate_ocr_penalty(text, total_chars);
137
- let script_penalty = calculate_script_penalty(text, total_chars);
138
- let nav_penalty = calculate_navigation_penalty(text, total_chars);
139
- let structure_bonus = calculate_structure_bonus(text);
140
-
141
- score -= ocr_penalty * OCR_PENALTY_WEIGHT;
142
- score -= script_penalty * SCRIPT_PENALTY_WEIGHT;
143
- score -= nav_penalty * NAV_PENALTY_WEIGHT;
144
- score += structure_bonus * STRUCTURE_BONUS_WEIGHT;
145
- } else {
146
- score -= calculate_ocr_penalty(text, total_chars) * OCR_PENALTY_WEIGHT;
147
- score += calculate_structure_bonus(text) * STRUCTURE_BONUS_WEIGHT;
148
- }
149
-
150
- if let Some(metadata) = metadata {
151
- score += calculate_metadata_bonus(metadata) * METADATA_BONUS_WEIGHT;
152
- }
153
-
154
- score.clamp(0.0, 1.0)
155
- }
156
-
157
- #[inline]
158
- fn calculate_ocr_penalty(text: &str, total_chars: f64) -> f64 {
159
- if total_chars == 0.0 {
160
- return 0.0;
161
- }
162
-
163
- if !text.contains(" ") && !text.contains("...") {
164
- return 0.0;
165
- }
166
-
167
- let artifact_chars = sum_match_lengths(text, &SCATTERED_CHARS_PATTERN)
168
- + sum_match_lengths(text, &REPEATED_PUNCT_PATTERN)
169
- + count_non_table_dash_artifacts(text)
170
- + sum_match_lengths(text, &ISOLATED_PUNCT_PATTERN)
171
- + sum_match_lengths(text, &MALFORMED_WORDS_PATTERN)
172
- + sum_match_lengths(text, &EXCESSIVE_WHITESPACE_PATTERN);
173
-
174
- (artifact_chars as f64 / total_chars).min(1.0)
175
- }
176
-
177
- #[inline]
178
- fn count_non_table_dash_artifacts(text: &str) -> usize {
179
- let mut artifact_count = 0;
180
-
181
- for line in text.lines() {
182
- let trimmed = line.trim();
183
- let is_table_separator = trimmed.starts_with('|')
184
- && trimmed.ends_with('|')
185
- && trimmed
186
- .chars()
187
- .all(|c| c == '|' || c == '-' || c.is_whitespace() || c == ':');
188
-
189
- if !is_table_separator {
190
- for m in DASH_PATTERN.find_iter(line) {
191
- artifact_count += m.len();
192
- }
193
- }
194
- }
195
-
196
- artifact_count
197
- }
198
-
199
- #[inline]
200
- fn calculate_script_penalty(text: &str, total_chars: f64) -> f64 {
201
- if total_chars == 0.0 {
202
- return 0.0;
203
- }
204
-
205
- if !text.contains("function") && !text.contains("<script") && !text.contains("<style") {
206
- return 0.0;
207
- }
208
-
209
- let script_chars = sum_match_lengths(text, &JS_FUNCTION_PATTERN)
210
- + sum_match_lengths(text, &CSS_RULES_PATTERN)
211
- + sum_match_lengths(text, &SCRIPT_TAG_PATTERN)
212
- + sum_match_lengths(text, &STYLE_TAG_PATTERN);
213
-
214
- (script_chars as f64 / total_chars).min(1.0)
215
- }
216
-
217
- #[inline]
218
- fn calculate_navigation_penalty(text: &str, total_chars: f64) -> f64 {
219
- if total_chars == 0.0 {
220
- return 0.0;
221
- }
222
-
223
- let nav_chars = sum_match_lengths(text, &NAV_WORDS_PATTERN)
224
- + sum_match_lengths(text, &BREADCRUMB_PATTERN)
225
- + sum_match_lengths(text, &PAGINATION_PATTERN);
226
-
227
- (nav_chars as f64 / total_chars).min(1.0)
228
- }
229
-
230
- #[inline]
231
- fn calculate_structure_bonus(text: &str) -> f64 {
232
- if text.is_empty() {
233
- return 0.0;
234
- }
235
-
236
- let sentence_count = SENTENCE_DETECT.find_iter(text).count() as f64;
237
- let paragraph_count = text.matches("\n\n").count() as f64 + 1.0;
238
- let words = text.split_whitespace().count() as f64;
239
-
240
- if words == 0.0 {
241
- return 0.0;
242
- }
243
-
244
- let avg_words_per_sentence = words / sentence_count.max(1.0);
245
- let avg_words_per_paragraph = words / paragraph_count.max(1.0);
246
-
247
- let mut structure_score: f64 = 0.0;
248
-
249
- if (MIN_SENTENCE_WORDS..=MAX_SENTENCE_WORDS).contains(&avg_words_per_sentence) {
250
- structure_score += 0.3;
251
- }
252
-
253
- if (MIN_PARAGRAPH_WORDS..=MAX_PARAGRAPH_WORDS).contains(&avg_words_per_paragraph) {
254
- structure_score += 0.3;
255
- }
256
-
257
- if paragraph_count > 1.0 {
258
- structure_score += 0.2;
259
- }
260
-
261
- if PUNCTUATION_DETECT.is_match(text) {
262
- structure_score += 0.2;
263
- }
264
-
265
- structure_score.min(1.0)
266
- }
267
-
268
- #[inline]
269
- fn calculate_metadata_bonus(metadata: &AHashMap<String, String>) -> f64 {
270
- const IMPORTANT_FIELDS: &[&str] = &["title", "author", "subject", "description", "keywords"];
271
-
272
- let present_fields = IMPORTANT_FIELDS
273
- .iter()
274
- .filter(|&&field| metadata.contains_key(field))
275
- .count();
276
-
277
- present_fields as f64 / IMPORTANT_FIELDS.len() as f64
278
- }
279
-
280
- /// Apply the quality heuristics and return a cleaned representation of the text.
281
- ///
282
- /// This function normalises whitespace, removes navigation boilerplate, and strips
283
- /// repeated punctuation that commonly appears in OCR output.
284
- pub fn clean_extracted_text(text: &str) -> String {
285
- if text.is_empty() {
286
- return String::new();
287
- }
288
-
289
- let mut working_text = Cow::Borrowed(text);
290
-
291
- working_text = clean_scripts(working_text);
292
-
293
- working_text = clean_ocr_artifacts_cow(working_text);
294
-
295
- working_text = clean_navigation_elements_cow(working_text);
296
-
297
- working_text = clean_repeated_punctuation_cow(working_text);
298
-
299
- working_text = normalize_whitespace_cow(working_text);
300
-
301
- working_text.trim().to_string()
302
- }
303
-
304
- #[inline]
305
- fn clean_scripts<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
306
- let script_replacements = [
307
- (&*SCRIPT_TAG_PATTERN, " "),
308
- (&*STYLE_TAG_PATTERN, " "),
309
- (&*JS_FUNCTION_PATTERN, " "),
310
- (&*CSS_RULES_PATTERN, " "),
311
- ];
312
- chain_replacements(text, &script_replacements)
313
- }
314
-
315
- #[inline]
316
- fn normalize_whitespace_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
317
- if let Some(fast) = normalize_whitespace_ascii(text.as_ref()) {
318
- return Cow::Owned(fast);
319
- }
320
-
321
- let mut result = text;
322
-
323
- if WHITESPACE_NORMALIZE.is_match(&result) {
324
- result = Cow::Owned(WHITESPACE_NORMALIZE.replace_all(&result, " ").into_owned());
325
- }
326
-
327
- if NEWLINE_NORMALIZE.is_match(&result) {
328
- result = Cow::Owned(NEWLINE_NORMALIZE.replace_all(&result, "\n\n").into_owned());
329
- }
330
-
331
- result
332
- }
333
-
334
- #[inline]
335
- fn clean_repeated_punctuation_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
336
- if let Some(cleaned) = clean_repeated_punctuation_ascii(text.as_ref()) {
337
- return Cow::Owned(cleaned);
338
- }
339
-
340
- if REPEATED_PUNCT_PATTERN.is_match(&text) {
341
- Cow::Owned(
342
- REPEATED_PUNCT_PATTERN
343
- .replace_all(&text, |caps: &regex::Captures<'_>| {
344
- let ch = caps.get(0).and_then(|m| m.as_str().chars().next()).unwrap_or('.');
345
- ch.to_string()
346
- })
347
- .into_owned(),
348
- )
349
- } else {
350
- text
351
- }
352
- }
353
-
354
- fn clean_repeated_punctuation_ascii(text: &str) -> Option<String> {
355
- if !text.is_ascii() {
356
- return None;
357
- }
358
-
359
- let bytes = text.as_bytes();
360
- let mut result = Vec::with_capacity(bytes.len());
361
- let mut changed = false;
362
- let mut offset = 0;
363
-
364
- while offset < bytes.len() {
365
- let remaining = &bytes[offset..];
366
- if let Some(next) = find_next_ascii_punctuation(remaining) {
367
- if next > 0 {
368
- result.extend_from_slice(&remaining[..next]);
369
- offset += next;
370
- }
371
-
372
- if offset >= bytes.len() {
373
- break;
374
- }
375
-
376
- let current = bytes[offset];
377
- result.push(current);
378
- let mut end = offset + 1;
379
- while end < bytes.len() && matches!(bytes[end], b'!' | b'?' | b'.' | b',') {
380
- changed = true;
381
- end += 1;
382
- }
383
- offset = end;
384
- } else {
385
- result.extend_from_slice(remaining);
386
- break;
387
- }
388
- }
389
-
390
- if changed { String::from_utf8(result).ok() } else { None }
391
- }
392
-
393
- #[inline]
394
- fn find_next_ascii_punctuation(bytes: &[u8]) -> Option<usize> {
395
- let primary = memchr3(b'!', b'?', b'.', bytes);
396
- let comma = memchr(b',', bytes);
397
- match (primary, comma) {
398
- (Some(a), Some(b)) => Some(a.min(b)),
399
- (Some(a), None) => Some(a),
400
- (None, Some(b)) => Some(b),
401
- (None, None) => None,
402
- }
403
- }
404
-
405
- #[inline]
406
- pub(crate) fn normalize_whitespace_ascii(text: &str) -> Option<String> {
407
- if !text.is_ascii() {
408
- return None;
409
- }
410
-
411
- let bytes = text.as_bytes();
412
- let mut result = Vec::with_capacity(bytes.len());
413
- let mut changed = false;
414
- let mut i = 0;
415
- let len = bytes.len();
416
-
417
- while i < len {
418
- match bytes[i] {
419
- b' ' | b'\t' | b'\r' | 0x0B | 0x0C => {
420
- let mut j = i + 1;
421
- while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
422
- j += 1;
423
- }
424
- if j - i > 1 || bytes[i] != b' ' {
425
- changed = true;
426
- }
427
- result.push(b' ');
428
- i = j;
429
- }
430
- b'\n' => {
431
- let mut j = i + 1;
432
- while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
433
- j += 1;
434
- changed = true;
435
- }
436
-
437
- let mut newline_count = 1;
438
- while j < len && bytes[j] == b'\n' {
439
- newline_count += 1;
440
- j += 1;
441
-
442
- while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
443
- j += 1;
444
- changed = true;
445
- }
446
- }
447
-
448
- if newline_count >= 3 {
449
- result.extend_from_slice(b"\n\n");
450
- changed = true;
451
- } else {
452
- result.extend(std::iter::repeat_n(b'\n', newline_count));
453
- }
454
-
455
- i = j;
456
- }
457
- _ => {
458
- result.push(bytes[i]);
459
- i += 1;
460
- }
461
- }
462
- }
463
-
464
- let normalized = String::from_utf8(result).unwrap_or_else(|_| text.to_string());
465
-
466
- if changed { Some(normalized) } else { None }
467
- }
468
-
469
- #[inline]
470
- fn clean_ocr_artifacts_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
471
- let result = if let Some(fixed) = collapse_scattered_ascii(&text) {
472
- Cow::Owned(fixed)
473
- } else if SCATTERED_CHARS_PATTERN.is_match(&text) {
474
- Cow::Owned(
475
- replace_with_if_matches(&text, &SCATTERED_CHARS_PATTERN, |caps: &regex::Captures| {
476
- caps[0].chars().filter(|c| !c.is_whitespace()).collect::<String>()
477
- })
478
- .into_owned(),
479
- )
480
- } else {
481
- text
482
- };
483
-
484
- let result = clean_dashes_preserve_tables(result);
485
-
486
- let ocr_replacements = [
487
- (&*REPEATED_PUNCT_PATTERN, "..."),
488
- (&*ISOLATED_PUNCT_PATTERN, " "),
489
- (&*MALFORMED_WORDS_PATTERN, " "),
490
- (&*EXCESSIVE_WHITESPACE_PATTERN, " "),
491
- ];
492
-
493
- chain_replacements(result, &ocr_replacements)
494
- }
495
-
496
- #[inline]
497
- fn clean_dashes_preserve_tables<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
498
- if !DASH_PATTERN.is_match(&text) {
499
- return text;
500
- }
501
-
502
- let mut result = String::with_capacity(text.len());
503
- let lines: Vec<&str> = text.lines().collect();
504
-
505
- for (i, line) in lines.iter().enumerate() {
506
- if i > 0 {
507
- result.push('\n');
508
- }
509
-
510
- let trimmed = line.trim();
511
- let is_table_separator = trimmed.starts_with('|')
512
- && trimmed.ends_with('|')
513
- && trimmed
514
- .chars()
515
- .all(|c| c == '|' || c == '-' || c.is_whitespace() || c == ':');
516
-
517
- if is_table_separator {
518
- result.push_str(line);
519
- } else {
520
- let cleaned_line = DASH_PATTERN.replace_all(line, "...");
521
- result.push_str(&cleaned_line);
522
- }
523
- }
524
-
525
- Cow::Owned(result)
526
- }
527
-
528
- #[inline]
529
- fn clean_navigation_elements_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
530
- let nav_replacements = [
531
- (&*NAV_WORDS_PATTERN, " "),
532
- (&*BREADCRUMB_PATTERN, " "),
533
- (&*PAGINATION_PATTERN, " "),
534
- ];
535
-
536
- chain_replacements(text, &nav_replacements)
537
- }
538
-
539
- #[inline]
540
- pub(crate) fn collapse_scattered_ascii(text: &str) -> Option<String> {
541
- if !text.is_ascii() {
542
- return None;
543
- }
544
-
545
- let bytes = text.as_bytes();
546
- let mut result = Vec::with_capacity(bytes.len());
547
- let mut changed = false;
548
- let mut i = 0;
549
-
550
- while i < bytes.len() {
551
- if bytes[i].is_ascii_alphabetic() {
552
- let mut j = i + 1;
553
- let mut count = 1;
554
- while j < bytes.len() {
555
- if bytes[j].is_ascii_alphabetic() {
556
- count += 1;
557
- j += 1;
558
- } else if bytes[j].is_ascii_whitespace() {
559
- j += 1;
560
- } else {
561
- break;
562
- }
563
- }
564
-
565
- if count >= 3 && j - i >= (count * 2 - 1) {
566
- changed = true;
567
- for &byte in &bytes[i..j] {
568
- if byte.is_ascii_alphabetic() {
569
- result.push(byte.to_ascii_lowercase());
570
- }
571
- }
572
- result.push(b' ');
573
- i = j;
574
- continue;
575
- }
576
- }
577
-
578
- result.push(bytes[i]);
579
- i += 1;
580
- }
581
-
582
- if changed { String::from_utf8(result).ok() } else { None }
583
- }
584
-
585
- /// Collapse redundant whitespace while preserving paragraph boundaries.
586
- pub fn normalize_spaces(text: &str) -> String {
587
- if text.is_empty() || text.trim().is_empty() {
588
- return String::new();
589
- }
590
-
591
- let mut result = String::with_capacity(text.len());
592
-
593
- let mut first = true;
594
- for paragraph in text.split("\n\n") {
595
- let trimmed = paragraph.trim();
596
- if trimmed.is_empty() {
597
- continue;
598
- }
599
-
600
- if !first {
601
- result.push_str("\n\n");
602
- }
603
- first = false;
604
-
605
- let collapsed = if let Some(fast) = normalize_whitespace_ascii(paragraph) {
606
- Cow::Owned(fast)
607
- } else {
608
- Cow::Owned(WHITESPACE_NORMALIZE.replace_all(paragraph, " ").into_owned())
609
- };
610
-
611
- let cleaned = NEWLINE_CLEANUP.replace_all(&collapsed, "\n");
612
-
613
- let mut first_line = true;
614
- for line in cleaned.split('\n') {
615
- let line = line.trim();
616
- if !line.is_empty() {
617
- if !first_line {
618
- result.push('\n');
619
- }
620
- result.push_str(line);
621
- first_line = false;
622
- }
623
- }
624
- }
625
-
626
- result
627
- }
628
-
629
- #[cfg(all(test, feature = "quality"))]
630
- mod tests {
631
- use super::*;
632
-
633
- #[test]
634
- fn test_calculate_quality_score_empty_text() {
635
- assert_eq!(calculate_quality_score("", None), 0.0);
636
- assert_eq!(calculate_quality_score(" ", None), 0.0);
637
- assert_eq!(calculate_quality_score("\n\n\n", None), 0.0);
638
- }
639
-
640
- #[test]
641
- fn test_calculate_quality_score_short_text() {
642
- let text = "Hello";
643
- let score = calculate_quality_score(text, None);
644
- assert_eq!(score, 0.1);
645
- }
646
-
647
- #[test]
648
- fn test_calculate_quality_score_normal_text() {
649
- let text =
650
- "This is a normal sentence with proper punctuation. It has multiple sentences. And proper structure.";
651
- let score = calculate_quality_score(text, None);
652
- assert!(score > 0.5);
653
- assert!(score <= 1.0);
654
- }
655
-
656
- #[test]
657
- fn test_clean_extracted_text_empty() {
658
- assert_eq!(clean_extracted_text(""), "");
659
- assert_eq!(clean_extracted_text(" "), "");
660
- }
661
-
662
- #[test]
663
- fn test_clean_extracted_text_removes_scripts() {
664
- let text = "Before <script>alert('test');</script> After";
665
- let cleaned = clean_extracted_text(text);
666
- assert!(!cleaned.contains("<script"));
667
- assert!(cleaned.contains("Before"));
668
- assert!(cleaned.contains("After"));
669
- }
670
-
671
- #[test]
672
- fn test_normalize_spaces_empty() {
673
- assert_eq!(normalize_spaces(""), "");
674
- assert_eq!(normalize_spaces(" "), "");
675
- }
676
-
677
- #[test]
678
- fn test_normalize_spaces_single_paragraph() {
679
- let text = "This is a test";
680
- let normalized = normalize_spaces(text);
681
- assert_eq!(normalized, "This is a test");
682
- }
683
-
684
- #[test]
685
- fn test_calculate_quality_score_with_metadata() {
686
- let text = "This is a normal text with proper structure.";
687
- let mut metadata = AHashMap::new();
688
- metadata.insert("title".to_string(), "Test Title".to_string());
689
- metadata.insert("author".to_string(), "Test Author".to_string());
690
-
691
- let score = calculate_quality_score(text, Some(&metadata));
692
- assert!(score > 0.0);
693
- assert!(score <= 1.0);
694
- }
695
-
696
- #[test]
697
- fn test_calculate_ocr_penalty_clean_text() {
698
- let text = "This is clean text without artifacts";
699
- let penalty = calculate_ocr_penalty(text, text.len() as f64);
700
- assert_eq!(penalty, 0.0);
701
- }
702
-
703
- #[test]
704
- fn test_calculate_ocr_penalty_with_artifacts() {
705
- let text = "Text with excessive spaces and ....... dots";
706
- let penalty = calculate_ocr_penalty(text, text.len() as f64);
707
- assert!(penalty > 0.0);
708
- assert!(penalty <= 1.0);
709
- }
710
-
711
- #[test]
712
- fn test_calculate_script_penalty_clean_text() {
713
- let text = "This is clean text without scripts";
714
- let penalty = calculate_script_penalty(text, text.len() as f64);
715
- assert_eq!(penalty, 0.0);
716
- }
717
-
718
- #[test]
719
- fn test_calculate_script_penalty_with_js() {
720
- let text = "function test() { return 42; }";
721
- let penalty = calculate_script_penalty(text, text.len() as f64);
722
- assert!(penalty > 0.0);
723
- }
724
-
725
- #[test]
726
- fn test_calculate_navigation_penalty_clean_text() {
727
- let text = "This is clean text without navigation";
728
- let penalty = calculate_navigation_penalty(text, text.len() as f64);
729
- assert_eq!(penalty, 0.0);
730
- }
731
-
732
- #[test]
733
- fn test_calculate_navigation_penalty_with_nav() {
734
- let text = "Skip to main content and Back to top links everywhere";
735
- let penalty = calculate_navigation_penalty(text, text.len() as f64);
736
- assert!(penalty > 0.0);
737
- }
738
-
739
- #[test]
740
- fn test_calculate_structure_bonus_empty() {
741
- assert_eq!(calculate_structure_bonus(""), 0.0);
742
- }
743
-
744
- #[test]
745
- fn test_calculate_structure_bonus_well_structured() {
746
- let text = "This is a sentence. This is another sentence.\n\nNew paragraph here. More content.";
747
- let bonus = calculate_structure_bonus(text);
748
- assert!(bonus > 0.0);
749
- assert!(bonus <= 1.0);
750
- }
751
-
752
- #[test]
753
- fn test_calculate_metadata_bonus_empty() {
754
- let metadata = AHashMap::new();
755
- let bonus = calculate_metadata_bonus(&metadata);
756
- assert_eq!(bonus, 0.0);
757
- }
758
-
759
- #[test]
760
- fn test_calculate_metadata_bonus_full() {
761
- let mut metadata = AHashMap::new();
762
- metadata.insert("title".to_string(), "Title".to_string());
763
- metadata.insert("author".to_string(), "Author".to_string());
764
- metadata.insert("subject".to_string(), "Subject".to_string());
765
- metadata.insert("description".to_string(), "Description".to_string());
766
- metadata.insert("keywords".to_string(), "Keywords".to_string());
767
-
768
- let bonus = calculate_metadata_bonus(&metadata);
769
- assert_eq!(bonus, 1.0);
770
- }
771
-
772
- #[test]
773
- fn test_clean_extracted_text_removes_styles() {
774
- let text = "Before <style>.class { color: red; }</style> After";
775
- let cleaned = clean_extracted_text(text);
776
- assert!(!cleaned.contains("<style"));
777
- assert!(cleaned.contains("Before"));
778
- assert!(cleaned.contains("After"));
779
- }
780
-
781
- #[test]
782
- fn test_clean_extracted_text_ocr_artifacts() {
783
- let text = "Text with excessive spaces";
784
- let cleaned = clean_extracted_text(text);
785
- assert!(!cleaned.contains(" "));
786
- }
787
-
788
- #[test]
789
- fn test_clean_extracted_text_navigation() {
790
- let text = "Content Skip to main content more content";
791
- let cleaned = clean_extracted_text(text);
792
- assert!(cleaned.contains("Content"));
793
- assert!(cleaned.contains("more content"));
794
- }
795
-
796
- #[test]
797
- fn test_clean_repeated_punctuation_ascii_helper() {
798
- let input = "Wow!!! Really??? Sure...";
799
- let cleaned = clean_repeated_punctuation_ascii(input).expect("Should collapse punctuation");
800
- assert_eq!(cleaned, "Wow! Really? Sure.");
801
- }
802
-
803
- #[test]
804
- fn test_clean_repeated_punctuation_non_ascii_passthrough() {
805
- assert!(clean_repeated_punctuation_ascii("¿Qué tal?").is_none());
806
- }
807
-
808
- #[test]
809
- fn test_normalize_spaces_multiple_paragraphs() {
810
- let text = "First paragraph.\n\nSecond paragraph.";
811
- let normalized = normalize_spaces(text);
812
- assert!(normalized.contains("\n\n"));
813
- }
814
-
815
- #[test]
816
- fn test_normalize_spaces_preserves_paragraphs() {
817
- let text = "Para 1\n\n\n\nPara 2";
818
- let normalized = normalize_spaces(text);
819
- assert_eq!(normalized, "Para 1\n\nPara 2");
820
- }
821
-
822
- #[test]
823
- fn test_count_non_table_dash_artifacts() {
824
- let text = "Some text --- with dashes";
825
- let count = count_non_table_dash_artifacts(text);
826
- assert!(count > 0);
827
- }
828
-
829
- #[test]
830
- fn test_count_non_table_dash_artifacts_preserves_tables() {
831
- let text = "| Header |\n|--------|\n| Data |";
832
- let count = count_non_table_dash_artifacts(text);
833
- assert_eq!(count, 0);
834
- }
835
-
836
- #[test]
837
- fn test_clean_dashes_preserve_tables_simple() {
838
- let text = Cow::Borrowed("| Col1 |\n|------|\n| Data |");
839
- let result = clean_dashes_preserve_tables(text);
840
- assert!(result.contains("|------"));
841
- }
842
-
843
- #[test]
844
- fn test_clean_dashes_preserve_tables_replaces_non_table() {
845
- let text = Cow::Borrowed("Text with --- dashes");
846
- let result = clean_dashes_preserve_tables(text);
847
- assert!(result.contains("..."));
848
- assert!(!result.contains("---"));
849
- }
850
-
851
- #[test]
852
- fn test_sum_match_lengths() {
853
- let text = "test ... test ... test";
854
- let count = sum_match_lengths(text, &REPEATED_PUNCT_PATTERN);
855
- assert!(count > 0);
856
- }
857
-
858
- #[test]
859
- fn test_quality_score_large_text_with_ocr_issues() {
860
- let text = "a".repeat(2000) + " " + &"b".repeat(2000);
861
- let score = calculate_quality_score(&text, None);
862
- assert!(score >= 0.0);
863
- assert!(score <= 1.0);
864
- }
865
-
866
- #[test]
867
- fn test_quality_score_clamped_to_range() {
868
- let perfect_text = "This is perfect text. ".repeat(100);
869
- let score = calculate_quality_score(&perfect_text, None);
870
- assert!(score >= 0.0);
871
- assert!(score <= 1.0);
872
- }
873
-
874
- #[test]
875
- fn test_clean_extracted_text_scattered_chars() {
876
- let text = "a b c scattered";
877
- let cleaned = clean_extracted_text(text);
878
- assert!(!cleaned.is_empty());
879
- }
880
-
881
- #[cfg_attr(coverage, ignore = "coverage instrumentation perturbs ASCII fast path heuristics")]
882
- #[test]
883
- fn test_collapse_scattered_ascii_trigger() {
884
- let original = "S p a c e d";
885
- let collapsed = collapse_scattered_ascii(original).expect("fast path should trigger");
886
- assert_eq!(collapsed.trim(), "spaced");
887
- }
888
-
889
- #[test]
890
- fn test_collapse_scattered_ascii_non_ascii() {
891
- assert!(collapse_scattered_ascii("מ ש ה ו").is_none());
892
- }
893
-
894
- #[test]
895
- fn test_normalize_whitespace_ascii_spaces() {
896
- let input = "Hello \tWorld\rWelcome";
897
- let normalized = normalize_whitespace_ascii(input).expect("ascii fast path should trigger");
898
- assert_eq!(normalized, "Hello World Welcome");
899
- }
900
-
901
- #[test]
902
- fn test_normalize_whitespace_ascii_newlines() {
903
- let input = "Line1\n \n\n \nLine2";
904
- let normalized = normalize_whitespace_ascii(input).expect("ascii fast path should trigger");
905
- assert_eq!(normalized, "Line1\n\nLine2");
906
- }
907
-
908
- #[test]
909
- fn test_normalize_whitespace_ascii_no_change() {
910
- assert!(normalize_whitespace_ascii("Clean text").is_none());
911
- }
912
-
913
- #[test]
914
- fn test_normalize_whitespace_ascii_non_ascii() {
915
- assert!(normalize_whitespace_ascii("שלום שלום").is_none());
916
- }
917
-
918
- #[test]
919
- fn test_normalize_spaces_ascii_fast_path() {
920
- let input = "Hello world\n\nSecond line";
921
- let normalized = normalize_spaces(input);
922
- assert_eq!(normalized, "Hello world\n\nSecond line");
923
- }
924
-
925
- #[test]
926
- fn test_normalize_whitespace_cow_no_changes() {
927
- let text = Cow::Borrowed("normaltext");
928
- let result = normalize_whitespace_cow(text);
929
- assert_eq!(result.as_ref(), "normaltext");
930
- }
931
-
932
- #[test]
933
- fn test_normalize_whitespace_cow_with_changes() {
934
- let text = Cow::Borrowed("text with spaces");
935
- let result = normalize_whitespace_cow(text);
936
- assert!(matches!(result, Cow::Owned(_)));
937
- }
938
-
939
- #[test]
940
- fn test_clean_scripts_no_scripts() {
941
- let text = Cow::Borrowed("clean text");
942
- let result = clean_scripts(text);
943
- assert!(matches!(result, Cow::Borrowed(_)));
944
- }
945
-
946
- #[test]
947
- fn test_clean_scripts_with_script_tag() {
948
- let text = Cow::Borrowed("<script>code</script>");
949
- let result = clean_scripts(text);
950
- assert!(!result.contains("<script"));
951
- }
952
-
953
- #[test]
954
- fn test_quality_constants() {
955
- assert_eq!(MIN_TEXT_LENGTH, 10);
956
- assert_eq!(LARGE_TEXT_LENGTH, 1000);
957
- assert_eq!(OCR_PENALTY_WEIGHT, 0.3);
958
- }
959
- }
1
+ use ahash::AHashMap;
2
+ use memchr::{memchr, memchr3};
3
+ use once_cell::sync::Lazy;
4
+ use regex::Regex;
5
+ use std::borrow::Cow;
6
+
7
+ // ============================================================================
8
+ // ============================================================================
9
+
10
+ const OCR_PENALTY_WEIGHT: f64 = 0.3;
11
+ const SCRIPT_PENALTY_WEIGHT: f64 = 0.2;
12
+ const NAV_PENALTY_WEIGHT: f64 = 0.1;
13
+ const STRUCTURE_BONUS_WEIGHT: f64 = 0.2;
14
+ const METADATA_BONUS_WEIGHT: f64 = 0.1;
15
+
16
+ const MIN_TEXT_LENGTH: usize = 10;
17
+ const LARGE_TEXT_LENGTH: usize = 1000;
18
+ const MIN_SENTENCE_WORDS: f64 = 10.0;
19
+ const MAX_SENTENCE_WORDS: f64 = 30.0;
20
+ const MIN_PARAGRAPH_WORDS: f64 = 50.0;
21
+ const MAX_PARAGRAPH_WORDS: f64 = 300.0;
22
+
23
+ static SCATTERED_CHARS_PATTERN: Lazy<Regex> = Lazy::new(|| {
24
+ Regex::new(r"\b[a-zA-Z]\s{2,}[a-zA-Z]\s{2,}[a-zA-Z]\b")
25
+ .expect("Scattered chars regex pattern is valid and should compile")
26
+ });
27
+ static REPEATED_PUNCT_PATTERN: Lazy<Regex> = Lazy::new(|| {
28
+ Regex::new(r"[.]{3,}|[_]{3,}").expect("Repeated punctuation regex pattern is valid and should compile")
29
+ });
30
+ static DASH_PATTERN: Lazy<Regex> =
31
+ Lazy::new(|| Regex::new(r"[-]{3,}").expect("Dash pattern regex is valid and should compile"));
32
+ static ISOLATED_PUNCT_PATTERN: Lazy<Regex> =
33
+ Lazy::new(|| Regex::new(r"\s[.,;:!?]\s").expect("Isolated punctuation regex pattern is valid and should compile"));
34
+ static MALFORMED_WORDS_PATTERN: Lazy<Regex> = Lazy::new(|| {
35
+ Regex::new(r"\b[a-zA-Z]+[0-9]+[a-zA-Z]+[a-zA-Z0-9]*\b")
36
+ .expect("Malformed words regex pattern is valid and should compile")
37
+ });
38
+ static EXCESSIVE_WHITESPACE_PATTERN: Lazy<Regex> =
39
+ Lazy::new(|| Regex::new(r"\s{3,}").expect("Excessive whitespace regex pattern is valid and should compile"));
40
+
41
+ static JS_FUNCTION_PATTERN: Lazy<Regex> = Lazy::new(|| {
42
+ Regex::new(r"(?i)function\s+\w+\s*\([^)]*\)\s*\{[^}]*\}")
43
+ .expect("JavaScript function regex pattern is valid and should compile")
44
+ });
45
+ static CSS_RULES_PATTERN: Lazy<Regex> = Lazy::new(|| {
46
+ Regex::new(r"(?i)\.[a-zA-Z][\w-]*\s*\{[^}]*\}").expect("CSS rules regex pattern is valid and should compile")
47
+ });
48
+ static SCRIPT_TAG_PATTERN: Lazy<Regex> = Lazy::new(|| {
49
+ Regex::new(r"(?is)<script[^>]*>.*?</script>").expect("Script tag regex pattern is valid and should compile")
50
+ });
51
+ static STYLE_TAG_PATTERN: Lazy<Regex> = Lazy::new(|| {
52
+ Regex::new(r"(?is)<style[^>]*>.*?</style>").expect("Style tag regex pattern is valid and should compile")
53
+ });
54
+
55
+ static NAV_WORDS_PATTERN: Lazy<Regex> = Lazy::new(|| {
56
+ Regex::new(r"(?i)\b(?:Skip to main content|Back to top|Main navigation|Site navigation)\b")
57
+ .expect("Navigation words regex pattern is valid and should compile")
58
+ });
59
+ static BREADCRUMB_PATTERN: Lazy<Regex> = Lazy::new(|| {
60
+ Regex::new(r"(?:Home\s*[>»]\s*|[>»]\s*){2,}").expect("Breadcrumb regex pattern is valid and should compile")
61
+ });
62
+ static PAGINATION_PATTERN: Lazy<Regex> = Lazy::new(|| {
63
+ Regex::new(r"(?i)\b(?:Page \d+ of \d+|First page|Last page|Previous page|Next page|^\d+ of \d+$)\b")
64
+ .expect("Pagination regex pattern is valid and should compile")
65
+ });
66
+
67
+ static SENTENCE_DETECT: Lazy<Regex> =
68
+ Lazy::new(|| Regex::new(r"[.!?]\s+[A-Z]").expect("Sentence detection regex pattern is valid and should compile"));
69
+ static PUNCTUATION_DETECT: Lazy<Regex> =
70
+ Lazy::new(|| Regex::new(r"[.!?]").expect("Punctuation detection regex pattern is valid and should compile"));
71
+
72
+ static WHITESPACE_NORMALIZE: Lazy<Regex> = Lazy::new(|| {
73
+ Regex::new(r"[ \t\f\v\r\xa0\u{2000}-\u{200b}\u{2028}\u{2029}\u{3000}]+")
74
+ .expect("Whitespace normalization regex pattern is valid and should compile")
75
+ });
76
+ static NEWLINE_NORMALIZE: Lazy<Regex> = Lazy::new(|| {
77
+ Regex::new(r"\n\s*\n\s*\n+").expect("Newline normalization regex pattern is valid and should compile")
78
+ });
79
+ static NEWLINE_CLEANUP: Lazy<Regex> =
80
+ Lazy::new(|| Regex::new(r"\n+").expect("Newline cleanup regex pattern is valid and should compile"));
81
+
82
+ #[inline]
83
+ fn sum_match_lengths(text: &str, pattern: &Regex) -> usize {
84
+ pattern.find_iter(text).map(|m| m.len()).sum()
85
+ }
86
+
87
+ fn chain_replacements<'a>(mut text: Cow<'a, str>, replacements: &[(&Regex, &str)]) -> Cow<'a, str> {
88
+ for (pattern, replacement) in replacements {
89
+ if pattern.is_match(&text) {
90
+ text = Cow::Owned(pattern.replace_all(&text, *replacement).into_owned());
91
+ }
92
+ }
93
+ text
94
+ }
95
+
96
+ #[inline]
97
+ fn replace_with_if_matches<'a, F>(text: &'a str, pattern: &Regex, replacer: F) -> Cow<'a, str>
98
+ where
99
+ F: FnMut(&regex::Captures) -> String,
100
+ {
101
+ if pattern.is_match(text) {
102
+ Cow::Owned(pattern.replace_all(text, replacer).into_owned())
103
+ } else {
104
+ Cow::Borrowed(text)
105
+ }
106
+ }
107
+
108
+ /// Compute a heuristic score (0.0–1.0) describing how clean the extracted text is.
109
+ ///
110
+ /// The scoring pipeline rewards well-structured prose while penalising OCR artefacts,
111
+ /// embedded scripts, and navigation chrome. Supplying document metadata allows the
112
+ /// function to include contextual bonuses.
113
+ ///
114
+ /// ```rust
115
+ /// use ahash::AHashMap;
116
+ /// use kreuzberg::utils::quality::calculate_quality_score;
117
+ ///
118
+ /// let text = "Executive Summary\n===================\nKreuzberg extracts documents quickly.";
119
+ /// let score = calculate_quality_score(text, None);
120
+ /// assert!(score > 0.7);
121
+ /// ```
122
+ pub fn calculate_quality_score(text: &str, metadata: Option<&AHashMap<String, String>>) -> f64 {
123
+ if text.is_empty() || text.trim().is_empty() {
124
+ return 0.0;
125
+ }
126
+
127
+ let total_chars = text.len() as f64;
128
+
129
+ if text.len() < MIN_TEXT_LENGTH {
130
+ return 0.1;
131
+ }
132
+
133
+ let mut score = 1.0;
134
+
135
+ if text.len() > LARGE_TEXT_LENGTH {
136
+ let ocr_penalty = calculate_ocr_penalty(text, total_chars);
137
+ let script_penalty = calculate_script_penalty(text, total_chars);
138
+ let nav_penalty = calculate_navigation_penalty(text, total_chars);
139
+ let structure_bonus = calculate_structure_bonus(text);
140
+
141
+ score -= ocr_penalty * OCR_PENALTY_WEIGHT;
142
+ score -= script_penalty * SCRIPT_PENALTY_WEIGHT;
143
+ score -= nav_penalty * NAV_PENALTY_WEIGHT;
144
+ score += structure_bonus * STRUCTURE_BONUS_WEIGHT;
145
+ } else {
146
+ score -= calculate_ocr_penalty(text, total_chars) * OCR_PENALTY_WEIGHT;
147
+ score += calculate_structure_bonus(text) * STRUCTURE_BONUS_WEIGHT;
148
+ }
149
+
150
+ if let Some(metadata) = metadata {
151
+ score += calculate_metadata_bonus(metadata) * METADATA_BONUS_WEIGHT;
152
+ }
153
+
154
+ score.clamp(0.0, 1.0)
155
+ }
156
+
157
+ #[inline]
158
+ fn calculate_ocr_penalty(text: &str, total_chars: f64) -> f64 {
159
+ if total_chars == 0.0 {
160
+ return 0.0;
161
+ }
162
+
163
+ if !text.contains(" ") && !text.contains("...") {
164
+ return 0.0;
165
+ }
166
+
167
+ let artifact_chars = sum_match_lengths(text, &SCATTERED_CHARS_PATTERN)
168
+ + sum_match_lengths(text, &REPEATED_PUNCT_PATTERN)
169
+ + count_non_table_dash_artifacts(text)
170
+ + sum_match_lengths(text, &ISOLATED_PUNCT_PATTERN)
171
+ + sum_match_lengths(text, &MALFORMED_WORDS_PATTERN)
172
+ + sum_match_lengths(text, &EXCESSIVE_WHITESPACE_PATTERN);
173
+
174
+ (artifact_chars as f64 / total_chars).min(1.0)
175
+ }
176
+
177
+ #[inline]
178
+ fn count_non_table_dash_artifacts(text: &str) -> usize {
179
+ let mut artifact_count = 0;
180
+
181
+ for line in text.lines() {
182
+ let trimmed = line.trim();
183
+ let is_table_separator = trimmed.starts_with('|')
184
+ && trimmed.ends_with('|')
185
+ && trimmed
186
+ .chars()
187
+ .all(|c| c == '|' || c == '-' || c.is_whitespace() || c == ':');
188
+
189
+ if !is_table_separator {
190
+ for m in DASH_PATTERN.find_iter(line) {
191
+ artifact_count += m.len();
192
+ }
193
+ }
194
+ }
195
+
196
+ artifact_count
197
+ }
198
+
199
+ #[inline]
200
+ fn calculate_script_penalty(text: &str, total_chars: f64) -> f64 {
201
+ if total_chars == 0.0 {
202
+ return 0.0;
203
+ }
204
+
205
+ if !text.contains("function") && !text.contains("<script") && !text.contains("<style") {
206
+ return 0.0;
207
+ }
208
+
209
+ let script_chars = sum_match_lengths(text, &JS_FUNCTION_PATTERN)
210
+ + sum_match_lengths(text, &CSS_RULES_PATTERN)
211
+ + sum_match_lengths(text, &SCRIPT_TAG_PATTERN)
212
+ + sum_match_lengths(text, &STYLE_TAG_PATTERN);
213
+
214
+ (script_chars as f64 / total_chars).min(1.0)
215
+ }
216
+
217
+ #[inline]
218
+ fn calculate_navigation_penalty(text: &str, total_chars: f64) -> f64 {
219
+ if total_chars == 0.0 {
220
+ return 0.0;
221
+ }
222
+
223
+ let nav_chars = sum_match_lengths(text, &NAV_WORDS_PATTERN)
224
+ + sum_match_lengths(text, &BREADCRUMB_PATTERN)
225
+ + sum_match_lengths(text, &PAGINATION_PATTERN);
226
+
227
+ (nav_chars as f64 / total_chars).min(1.0)
228
+ }
229
+
230
+ #[inline]
231
+ fn calculate_structure_bonus(text: &str) -> f64 {
232
+ if text.is_empty() {
233
+ return 0.0;
234
+ }
235
+
236
+ let sentence_count = SENTENCE_DETECT.find_iter(text).count() as f64;
237
+ let paragraph_count = text.matches("\n\n").count() as f64 + 1.0;
238
+ let words = text.split_whitespace().count() as f64;
239
+
240
+ if words == 0.0 {
241
+ return 0.0;
242
+ }
243
+
244
+ let avg_words_per_sentence = words / sentence_count.max(1.0);
245
+ let avg_words_per_paragraph = words / paragraph_count.max(1.0);
246
+
247
+ let mut structure_score: f64 = 0.0;
248
+
249
+ if (MIN_SENTENCE_WORDS..=MAX_SENTENCE_WORDS).contains(&avg_words_per_sentence) {
250
+ structure_score += 0.3;
251
+ }
252
+
253
+ if (MIN_PARAGRAPH_WORDS..=MAX_PARAGRAPH_WORDS).contains(&avg_words_per_paragraph) {
254
+ structure_score += 0.3;
255
+ }
256
+
257
+ if paragraph_count > 1.0 {
258
+ structure_score += 0.2;
259
+ }
260
+
261
+ if PUNCTUATION_DETECT.is_match(text) {
262
+ structure_score += 0.2;
263
+ }
264
+
265
+ structure_score.min(1.0)
266
+ }
267
+
268
+ #[inline]
269
+ fn calculate_metadata_bonus(metadata: &AHashMap<String, String>) -> f64 {
270
+ const IMPORTANT_FIELDS: &[&str] = &["title", "author", "subject", "description", "keywords"];
271
+
272
+ let present_fields = IMPORTANT_FIELDS
273
+ .iter()
274
+ .filter(|&&field| metadata.contains_key(field))
275
+ .count();
276
+
277
+ present_fields as f64 / IMPORTANT_FIELDS.len() as f64
278
+ }
279
+
280
+ /// Apply the quality heuristics and return a cleaned representation of the text.
281
+ ///
282
+ /// This function normalises whitespace, removes navigation boilerplate, and strips
283
+ /// repeated punctuation that commonly appears in OCR output.
284
+ pub fn clean_extracted_text(text: &str) -> String {
285
+ if text.is_empty() {
286
+ return String::new();
287
+ }
288
+
289
+ let mut working_text = Cow::Borrowed(text);
290
+
291
+ working_text = clean_scripts(working_text);
292
+
293
+ working_text = clean_ocr_artifacts_cow(working_text);
294
+
295
+ working_text = clean_navigation_elements_cow(working_text);
296
+
297
+ working_text = clean_repeated_punctuation_cow(working_text);
298
+
299
+ working_text = normalize_whitespace_cow(working_text);
300
+
301
+ working_text.trim().to_string()
302
+ }
303
+
304
+ #[inline]
305
+ fn clean_scripts<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
306
+ let script_replacements = [
307
+ (&*SCRIPT_TAG_PATTERN, " "),
308
+ (&*STYLE_TAG_PATTERN, " "),
309
+ (&*JS_FUNCTION_PATTERN, " "),
310
+ (&*CSS_RULES_PATTERN, " "),
311
+ ];
312
+ chain_replacements(text, &script_replacements)
313
+ }
314
+
315
+ #[inline]
316
+ fn normalize_whitespace_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
317
+ if let Some(fast) = normalize_whitespace_ascii(text.as_ref()) {
318
+ return Cow::Owned(fast);
319
+ }
320
+
321
+ let mut result = text;
322
+
323
+ if WHITESPACE_NORMALIZE.is_match(&result) {
324
+ result = Cow::Owned(WHITESPACE_NORMALIZE.replace_all(&result, " ").into_owned());
325
+ }
326
+
327
+ if NEWLINE_NORMALIZE.is_match(&result) {
328
+ result = Cow::Owned(NEWLINE_NORMALIZE.replace_all(&result, "\n\n").into_owned());
329
+ }
330
+
331
+ result
332
+ }
333
+
334
+ #[inline]
335
+ fn clean_repeated_punctuation_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
336
+ if let Some(cleaned) = clean_repeated_punctuation_ascii(text.as_ref()) {
337
+ return Cow::Owned(cleaned);
338
+ }
339
+
340
+ if REPEATED_PUNCT_PATTERN.is_match(&text) {
341
+ Cow::Owned(
342
+ REPEATED_PUNCT_PATTERN
343
+ .replace_all(&text, |caps: &regex::Captures<'_>| {
344
+ let ch = caps.get(0).and_then(|m| m.as_str().chars().next()).unwrap_or('.');
345
+ ch.to_string()
346
+ })
347
+ .into_owned(),
348
+ )
349
+ } else {
350
+ text
351
+ }
352
+ }
353
+
354
+ fn clean_repeated_punctuation_ascii(text: &str) -> Option<String> {
355
+ if !text.is_ascii() {
356
+ return None;
357
+ }
358
+
359
+ let bytes = text.as_bytes();
360
+ let mut result = Vec::with_capacity(bytes.len());
361
+ let mut changed = false;
362
+ let mut offset = 0;
363
+
364
+ while offset < bytes.len() {
365
+ let remaining = &bytes[offset..];
366
+ if let Some(next) = find_next_ascii_punctuation(remaining) {
367
+ if next > 0 {
368
+ result.extend_from_slice(&remaining[..next]);
369
+ offset += next;
370
+ }
371
+
372
+ if offset >= bytes.len() {
373
+ break;
374
+ }
375
+
376
+ let current = bytes[offset];
377
+ result.push(current);
378
+ let mut end = offset + 1;
379
+ while end < bytes.len() && matches!(bytes[end], b'!' | b'?' | b'.' | b',') {
380
+ changed = true;
381
+ end += 1;
382
+ }
383
+ offset = end;
384
+ } else {
385
+ result.extend_from_slice(remaining);
386
+ break;
387
+ }
388
+ }
389
+
390
+ if changed { String::from_utf8(result).ok() } else { None }
391
+ }
392
+
393
+ #[inline]
394
+ fn find_next_ascii_punctuation(bytes: &[u8]) -> Option<usize> {
395
+ let primary = memchr3(b'!', b'?', b'.', bytes);
396
+ let comma = memchr(b',', bytes);
397
+ match (primary, comma) {
398
+ (Some(a), Some(b)) => Some(a.min(b)),
399
+ (Some(a), None) => Some(a),
400
+ (None, Some(b)) => Some(b),
401
+ (None, None) => None,
402
+ }
403
+ }
404
+
405
+ #[inline]
406
+ pub(crate) fn normalize_whitespace_ascii(text: &str) -> Option<String> {
407
+ if !text.is_ascii() {
408
+ return None;
409
+ }
410
+
411
+ let bytes = text.as_bytes();
412
+ let mut result = Vec::with_capacity(bytes.len());
413
+ let mut changed = false;
414
+ let mut i = 0;
415
+ let len = bytes.len();
416
+
417
+ while i < len {
418
+ match bytes[i] {
419
+ b' ' | b'\t' | b'\r' | 0x0B | 0x0C => {
420
+ let mut j = i + 1;
421
+ while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
422
+ j += 1;
423
+ }
424
+ if j - i > 1 || bytes[i] != b' ' {
425
+ changed = true;
426
+ }
427
+ result.push(b' ');
428
+ i = j;
429
+ }
430
+ b'\n' => {
431
+ let mut j = i + 1;
432
+ while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
433
+ j += 1;
434
+ changed = true;
435
+ }
436
+
437
+ let mut newline_count = 1;
438
+ while j < len && bytes[j] == b'\n' {
439
+ newline_count += 1;
440
+ j += 1;
441
+
442
+ while j < len && matches!(bytes[j], b' ' | b'\t' | b'\r' | 0x0B | 0x0C) {
443
+ j += 1;
444
+ changed = true;
445
+ }
446
+ }
447
+
448
+ if newline_count >= 3 {
449
+ result.extend_from_slice(b"\n\n");
450
+ changed = true;
451
+ } else {
452
+ result.extend(std::iter::repeat_n(b'\n', newline_count));
453
+ }
454
+
455
+ i = j;
456
+ }
457
+ _ => {
458
+ result.push(bytes[i]);
459
+ i += 1;
460
+ }
461
+ }
462
+ }
463
+
464
+ let normalized = String::from_utf8(result).unwrap_or_else(|_| text.to_string());
465
+
466
+ if changed { Some(normalized) } else { None }
467
+ }
468
+
469
+ #[inline]
470
+ fn clean_ocr_artifacts_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
471
+ let result = if let Some(fixed) = collapse_scattered_ascii(&text) {
472
+ Cow::Owned(fixed)
473
+ } else if SCATTERED_CHARS_PATTERN.is_match(&text) {
474
+ Cow::Owned(
475
+ replace_with_if_matches(&text, &SCATTERED_CHARS_PATTERN, |caps: &regex::Captures| {
476
+ caps[0].chars().filter(|c| !c.is_whitespace()).collect::<String>()
477
+ })
478
+ .into_owned(),
479
+ )
480
+ } else {
481
+ text
482
+ };
483
+
484
+ let result = clean_dashes_preserve_tables(result);
485
+
486
+ let ocr_replacements = [
487
+ (&*REPEATED_PUNCT_PATTERN, "..."),
488
+ (&*ISOLATED_PUNCT_PATTERN, " "),
489
+ (&*MALFORMED_WORDS_PATTERN, " "),
490
+ (&*EXCESSIVE_WHITESPACE_PATTERN, " "),
491
+ ];
492
+
493
+ chain_replacements(result, &ocr_replacements)
494
+ }
495
+
496
+ #[inline]
497
+ fn clean_dashes_preserve_tables<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
498
+ if !DASH_PATTERN.is_match(&text) {
499
+ return text;
500
+ }
501
+
502
+ let mut result = String::with_capacity(text.len());
503
+ let lines: Vec<&str> = text.lines().collect();
504
+
505
+ for (i, line) in lines.iter().enumerate() {
506
+ if i > 0 {
507
+ result.push('\n');
508
+ }
509
+
510
+ let trimmed = line.trim();
511
+ let is_table_separator = trimmed.starts_with('|')
512
+ && trimmed.ends_with('|')
513
+ && trimmed
514
+ .chars()
515
+ .all(|c| c == '|' || c == '-' || c.is_whitespace() || c == ':');
516
+
517
+ if is_table_separator {
518
+ result.push_str(line);
519
+ } else {
520
+ let cleaned_line = DASH_PATTERN.replace_all(line, "...");
521
+ result.push_str(&cleaned_line);
522
+ }
523
+ }
524
+
525
+ Cow::Owned(result)
526
+ }
527
+
528
+ #[inline]
529
+ fn clean_navigation_elements_cow<'a>(text: Cow<'a, str>) -> Cow<'a, str> {
530
+ let nav_replacements = [
531
+ (&*NAV_WORDS_PATTERN, " "),
532
+ (&*BREADCRUMB_PATTERN, " "),
533
+ (&*PAGINATION_PATTERN, " "),
534
+ ];
535
+
536
+ chain_replacements(text, &nav_replacements)
537
+ }
538
+
539
+ #[inline]
540
+ pub(crate) fn collapse_scattered_ascii(text: &str) -> Option<String> {
541
+ if !text.is_ascii() {
542
+ return None;
543
+ }
544
+
545
+ let bytes = text.as_bytes();
546
+ let mut result = Vec::with_capacity(bytes.len());
547
+ let mut changed = false;
548
+ let mut i = 0;
549
+
550
+ while i < bytes.len() {
551
+ if bytes[i].is_ascii_alphabetic() {
552
+ let mut j = i + 1;
553
+ let mut count = 1;
554
+ while j < bytes.len() {
555
+ if bytes[j].is_ascii_alphabetic() {
556
+ count += 1;
557
+ j += 1;
558
+ } else if bytes[j].is_ascii_whitespace() {
559
+ j += 1;
560
+ } else {
561
+ break;
562
+ }
563
+ }
564
+
565
+ if count >= 3 && j - i >= (count * 2 - 1) {
566
+ changed = true;
567
+ for &byte in &bytes[i..j] {
568
+ if byte.is_ascii_alphabetic() {
569
+ result.push(byte.to_ascii_lowercase());
570
+ }
571
+ }
572
+ result.push(b' ');
573
+ i = j;
574
+ continue;
575
+ }
576
+ }
577
+
578
+ result.push(bytes[i]);
579
+ i += 1;
580
+ }
581
+
582
+ if changed { String::from_utf8(result).ok() } else { None }
583
+ }
584
+
585
+ /// Collapse redundant whitespace while preserving paragraph boundaries.
586
+ pub fn normalize_spaces(text: &str) -> String {
587
+ if text.is_empty() || text.trim().is_empty() {
588
+ return String::new();
589
+ }
590
+
591
+ let mut result = String::with_capacity(text.len());
592
+
593
+ let mut first = true;
594
+ for paragraph in text.split("\n\n") {
595
+ let trimmed = paragraph.trim();
596
+ if trimmed.is_empty() {
597
+ continue;
598
+ }
599
+
600
+ if !first {
601
+ result.push_str("\n\n");
602
+ }
603
+ first = false;
604
+
605
+ let collapsed = if let Some(fast) = normalize_whitespace_ascii(paragraph) {
606
+ Cow::Owned(fast)
607
+ } else {
608
+ Cow::Owned(WHITESPACE_NORMALIZE.replace_all(paragraph, " ").into_owned())
609
+ };
610
+
611
+ let cleaned = NEWLINE_CLEANUP.replace_all(&collapsed, "\n");
612
+
613
+ let mut first_line = true;
614
+ for line in cleaned.split('\n') {
615
+ let line = line.trim();
616
+ if !line.is_empty() {
617
+ if !first_line {
618
+ result.push('\n');
619
+ }
620
+ result.push_str(line);
621
+ first_line = false;
622
+ }
623
+ }
624
+ }
625
+
626
+ result
627
+ }
628
+
629
+ #[cfg(all(test, feature = "quality"))]
630
+ mod tests {
631
+ use super::*;
632
+
633
+ #[test]
634
+ fn test_calculate_quality_score_empty_text() {
635
+ assert_eq!(calculate_quality_score("", None), 0.0);
636
+ assert_eq!(calculate_quality_score(" ", None), 0.0);
637
+ assert_eq!(calculate_quality_score("\n\n\n", None), 0.0);
638
+ }
639
+
640
+ #[test]
641
+ fn test_calculate_quality_score_short_text() {
642
+ let text = "Hello";
643
+ let score = calculate_quality_score(text, None);
644
+ assert_eq!(score, 0.1);
645
+ }
646
+
647
+ #[test]
648
+ fn test_calculate_quality_score_normal_text() {
649
+ let text =
650
+ "This is a normal sentence with proper punctuation. It has multiple sentences. And proper structure.";
651
+ let score = calculate_quality_score(text, None);
652
+ assert!(score > 0.5);
653
+ assert!(score <= 1.0);
654
+ }
655
+
656
+ #[test]
657
+ fn test_clean_extracted_text_empty() {
658
+ assert_eq!(clean_extracted_text(""), "");
659
+ assert_eq!(clean_extracted_text(" "), "");
660
+ }
661
+
662
+ #[test]
663
+ fn test_clean_extracted_text_removes_scripts() {
664
+ let text = "Before <script>alert('test');</script> After";
665
+ let cleaned = clean_extracted_text(text);
666
+ assert!(!cleaned.contains("<script"));
667
+ assert!(cleaned.contains("Before"));
668
+ assert!(cleaned.contains("After"));
669
+ }
670
+
671
+ #[test]
672
+ fn test_normalize_spaces_empty() {
673
+ assert_eq!(normalize_spaces(""), "");
674
+ assert_eq!(normalize_spaces(" "), "");
675
+ }
676
+
677
+ #[test]
678
+ fn test_normalize_spaces_single_paragraph() {
679
+ let text = "This is a test";
680
+ let normalized = normalize_spaces(text);
681
+ assert_eq!(normalized, "This is a test");
682
+ }
683
+
684
+ #[test]
685
+ fn test_calculate_quality_score_with_metadata() {
686
+ let text = "This is a normal text with proper structure.";
687
+ let mut metadata = AHashMap::new();
688
+ metadata.insert("title".to_string(), "Test Title".to_string());
689
+ metadata.insert("author".to_string(), "Test Author".to_string());
690
+
691
+ let score = calculate_quality_score(text, Some(&metadata));
692
+ assert!(score > 0.0);
693
+ assert!(score <= 1.0);
694
+ }
695
+
696
+ #[test]
697
+ fn test_calculate_ocr_penalty_clean_text() {
698
+ let text = "This is clean text without artifacts";
699
+ let penalty = calculate_ocr_penalty(text, text.len() as f64);
700
+ assert_eq!(penalty, 0.0);
701
+ }
702
+
703
+ #[test]
704
+ fn test_calculate_ocr_penalty_with_artifacts() {
705
+ let text = "Text with excessive spaces and ....... dots";
706
+ let penalty = calculate_ocr_penalty(text, text.len() as f64);
707
+ assert!(penalty > 0.0);
708
+ assert!(penalty <= 1.0);
709
+ }
710
+
711
+ #[test]
712
+ fn test_calculate_script_penalty_clean_text() {
713
+ let text = "This is clean text without scripts";
714
+ let penalty = calculate_script_penalty(text, text.len() as f64);
715
+ assert_eq!(penalty, 0.0);
716
+ }
717
+
718
+ #[test]
719
+ fn test_calculate_script_penalty_with_js() {
720
+ let text = "function test() { return 42; }";
721
+ let penalty = calculate_script_penalty(text, text.len() as f64);
722
+ assert!(penalty > 0.0);
723
+ }
724
+
725
+ #[test]
726
+ fn test_calculate_navigation_penalty_clean_text() {
727
+ let text = "This is clean text without navigation";
728
+ let penalty = calculate_navigation_penalty(text, text.len() as f64);
729
+ assert_eq!(penalty, 0.0);
730
+ }
731
+
732
+ #[test]
733
+ fn test_calculate_navigation_penalty_with_nav() {
734
+ let text = "Skip to main content and Back to top links everywhere";
735
+ let penalty = calculate_navigation_penalty(text, text.len() as f64);
736
+ assert!(penalty > 0.0);
737
+ }
738
+
739
+ #[test]
740
+ fn test_calculate_structure_bonus_empty() {
741
+ assert_eq!(calculate_structure_bonus(""), 0.0);
742
+ }
743
+
744
+ #[test]
745
+ fn test_calculate_structure_bonus_well_structured() {
746
+ let text = "This is a sentence. This is another sentence.\n\nNew paragraph here. More content.";
747
+ let bonus = calculate_structure_bonus(text);
748
+ assert!(bonus > 0.0);
749
+ assert!(bonus <= 1.0);
750
+ }
751
+
752
+ #[test]
753
+ fn test_calculate_metadata_bonus_empty() {
754
+ let metadata = AHashMap::new();
755
+ let bonus = calculate_metadata_bonus(&metadata);
756
+ assert_eq!(bonus, 0.0);
757
+ }
758
+
759
+ #[test]
760
+ fn test_calculate_metadata_bonus_full() {
761
+ let mut metadata = AHashMap::new();
762
+ metadata.insert("title".to_string(), "Title".to_string());
763
+ metadata.insert("author".to_string(), "Author".to_string());
764
+ metadata.insert("subject".to_string(), "Subject".to_string());
765
+ metadata.insert("description".to_string(), "Description".to_string());
766
+ metadata.insert("keywords".to_string(), "Keywords".to_string());
767
+
768
+ let bonus = calculate_metadata_bonus(&metadata);
769
+ assert_eq!(bonus, 1.0);
770
+ }
771
+
772
+ #[test]
773
+ fn test_clean_extracted_text_removes_styles() {
774
+ let text = "Before <style>.class { color: red; }</style> After";
775
+ let cleaned = clean_extracted_text(text);
776
+ assert!(!cleaned.contains("<style"));
777
+ assert!(cleaned.contains("Before"));
778
+ assert!(cleaned.contains("After"));
779
+ }
780
+
781
+ #[test]
782
+ fn test_clean_extracted_text_ocr_artifacts() {
783
+ let text = "Text with excessive spaces";
784
+ let cleaned = clean_extracted_text(text);
785
+ assert!(!cleaned.contains(" "));
786
+ }
787
+
788
+ #[test]
789
+ fn test_clean_extracted_text_navigation() {
790
+ let text = "Content Skip to main content more content";
791
+ let cleaned = clean_extracted_text(text);
792
+ assert!(cleaned.contains("Content"));
793
+ assert!(cleaned.contains("more content"));
794
+ }
795
+
796
+ #[test]
797
+ fn test_clean_repeated_punctuation_ascii_helper() {
798
+ let input = "Wow!!! Really??? Sure...";
799
+ let cleaned = clean_repeated_punctuation_ascii(input).expect("Should collapse punctuation");
800
+ assert_eq!(cleaned, "Wow! Really? Sure.");
801
+ }
802
+
803
+ #[test]
804
+ fn test_clean_repeated_punctuation_non_ascii_passthrough() {
805
+ assert!(clean_repeated_punctuation_ascii("¿Qué tal?").is_none());
806
+ }
807
+
808
+ #[test]
809
+ fn test_normalize_spaces_multiple_paragraphs() {
810
+ let text = "First paragraph.\n\nSecond paragraph.";
811
+ let normalized = normalize_spaces(text);
812
+ assert!(normalized.contains("\n\n"));
813
+ }
814
+
815
+ #[test]
816
+ fn test_normalize_spaces_preserves_paragraphs() {
817
+ let text = "Para 1\n\n\n\nPara 2";
818
+ let normalized = normalize_spaces(text);
819
+ assert_eq!(normalized, "Para 1\n\nPara 2");
820
+ }
821
+
822
+ #[test]
823
+ fn test_count_non_table_dash_artifacts() {
824
+ let text = "Some text --- with dashes";
825
+ let count = count_non_table_dash_artifacts(text);
826
+ assert!(count > 0);
827
+ }
828
+
829
+ #[test]
830
+ fn test_count_non_table_dash_artifacts_preserves_tables() {
831
+ let text = "| Header |\n|--------|\n| Data |";
832
+ let count = count_non_table_dash_artifacts(text);
833
+ assert_eq!(count, 0);
834
+ }
835
+
836
+ #[test]
837
+ fn test_clean_dashes_preserve_tables_simple() {
838
+ let text = Cow::Borrowed("| Col1 |\n|------|\n| Data |");
839
+ let result = clean_dashes_preserve_tables(text);
840
+ assert!(result.contains("|------"));
841
+ }
842
+
843
+ #[test]
844
+ fn test_clean_dashes_preserve_tables_replaces_non_table() {
845
+ let text = Cow::Borrowed("Text with --- dashes");
846
+ let result = clean_dashes_preserve_tables(text);
847
+ assert!(result.contains("..."));
848
+ assert!(!result.contains("---"));
849
+ }
850
+
851
+ #[test]
852
+ fn test_sum_match_lengths() {
853
+ let text = "test ... test ... test";
854
+ let count = sum_match_lengths(text, &REPEATED_PUNCT_PATTERN);
855
+ assert!(count > 0);
856
+ }
857
+
858
+ #[test]
859
+ fn test_quality_score_large_text_with_ocr_issues() {
860
+ let text = "a".repeat(2000) + " " + &"b".repeat(2000);
861
+ let score = calculate_quality_score(&text, None);
862
+ assert!(score >= 0.0);
863
+ assert!(score <= 1.0);
864
+ }
865
+
866
+ #[test]
867
+ fn test_quality_score_clamped_to_range() {
868
+ let perfect_text = "This is perfect text. ".repeat(100);
869
+ let score = calculate_quality_score(&perfect_text, None);
870
+ assert!(score >= 0.0);
871
+ assert!(score <= 1.0);
872
+ }
873
+
874
+ #[test]
875
+ fn test_clean_extracted_text_scattered_chars() {
876
+ let text = "a b c scattered";
877
+ let cleaned = clean_extracted_text(text);
878
+ assert!(!cleaned.is_empty());
879
+ }
880
+
881
+ #[cfg_attr(coverage, ignore = "coverage instrumentation perturbs ASCII fast path heuristics")]
882
+ #[test]
883
+ fn test_collapse_scattered_ascii_trigger() {
884
+ let original = "S p a c e d";
885
+ let collapsed = collapse_scattered_ascii(original).expect("fast path should trigger");
886
+ assert_eq!(collapsed.trim(), "spaced");
887
+ }
888
+
889
+ #[test]
890
+ fn test_collapse_scattered_ascii_non_ascii() {
891
+ assert!(collapse_scattered_ascii("מ ש ה ו").is_none());
892
+ }
893
+
894
+ #[test]
895
+ fn test_normalize_whitespace_ascii_spaces() {
896
+ let input = "Hello \tWorld\rWelcome";
897
+ let normalized = normalize_whitespace_ascii(input).expect("ascii fast path should trigger");
898
+ assert_eq!(normalized, "Hello World Welcome");
899
+ }
900
+
901
+ #[test]
902
+ fn test_normalize_whitespace_ascii_newlines() {
903
+ let input = "Line1\n \n\n \nLine2";
904
+ let normalized = normalize_whitespace_ascii(input).expect("ascii fast path should trigger");
905
+ assert_eq!(normalized, "Line1\n\nLine2");
906
+ }
907
+
908
+ #[test]
909
+ fn test_normalize_whitespace_ascii_no_change() {
910
+ assert!(normalize_whitespace_ascii("Clean text").is_none());
911
+ }
912
+
913
+ #[test]
914
+ fn test_normalize_whitespace_ascii_non_ascii() {
915
+ assert!(normalize_whitespace_ascii("שלום שלום").is_none());
916
+ }
917
+
918
+ #[test]
919
+ fn test_normalize_spaces_ascii_fast_path() {
920
+ let input = "Hello world\n\nSecond line";
921
+ let normalized = normalize_spaces(input);
922
+ assert_eq!(normalized, "Hello world\n\nSecond line");
923
+ }
924
+
925
+ #[test]
926
+ fn test_normalize_whitespace_cow_no_changes() {
927
+ let text = Cow::Borrowed("normaltext");
928
+ let result = normalize_whitespace_cow(text);
929
+ assert_eq!(result.as_ref(), "normaltext");
930
+ }
931
+
932
+ #[test]
933
+ fn test_normalize_whitespace_cow_with_changes() {
934
+ let text = Cow::Borrowed("text with spaces");
935
+ let result = normalize_whitespace_cow(text);
936
+ assert!(matches!(result, Cow::Owned(_)));
937
+ }
938
+
939
+ #[test]
940
+ fn test_clean_scripts_no_scripts() {
941
+ let text = Cow::Borrowed("clean text");
942
+ let result = clean_scripts(text);
943
+ assert!(matches!(result, Cow::Borrowed(_)));
944
+ }
945
+
946
+ #[test]
947
+ fn test_clean_scripts_with_script_tag() {
948
+ let text = Cow::Borrowed("<script>code</script>");
949
+ let result = clean_scripts(text);
950
+ assert!(!result.contains("<script"));
951
+ }
952
+
953
+ #[test]
954
+ fn test_quality_constants() {
955
+ assert_eq!(MIN_TEXT_LENGTH, 10);
956
+ assert_eq!(LARGE_TEXT_LENGTH, 1000);
957
+ assert_eq!(OCR_PENALTY_WEIGHT, 0.3);
958
+ }
959
+ }