kreuzberg 4.0.0.pre.rc.13 → 4.0.0.pre.rc.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (369) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -14
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -1
  5. data/.rubocop.yml +538 -538
  6. data/Gemfile +8 -8
  7. data/Gemfile.lock +104 -2
  8. data/README.md +454 -454
  9. data/Rakefile +33 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -341
  12. data/ext/kreuzberg_rb/extconf.rb +45 -45
  13. data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
  14. data/ext/kreuzberg_rb/native/Cargo.lock +6750 -6941
  15. data/ext/kreuzberg_rb/native/Cargo.toml +53 -54
  16. data/ext/kreuzberg_rb/native/README.md +425 -425
  17. data/ext/kreuzberg_rb/native/build.rs +52 -15
  18. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  19. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  20. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  21. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  22. data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
  23. data/extconf.rb +28 -28
  24. data/kreuzberg.gemspec +214 -214
  25. data/lib/kreuzberg/api_proxy.rb +142 -142
  26. data/lib/kreuzberg/cache_api.rb +81 -81
  27. data/lib/kreuzberg/cli.rb +55 -55
  28. data/lib/kreuzberg/cli_proxy.rb +127 -127
  29. data/lib/kreuzberg/config.rb +724 -724
  30. data/lib/kreuzberg/error_context.rb +80 -80
  31. data/lib/kreuzberg/errors.rb +118 -118
  32. data/lib/kreuzberg/extraction_api.rb +340 -340
  33. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  34. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  35. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  36. data/lib/kreuzberg/result.rb +279 -279
  37. data/lib/kreuzberg/setup_lib_path.rb +80 -80
  38. data/lib/kreuzberg/validator_protocol.rb +89 -89
  39. data/lib/kreuzberg/version.rb +5 -5
  40. data/lib/kreuzberg.rb +109 -109
  41. data/lib/{pdfium.dll → libpdfium.so} +0 -0
  42. data/sig/kreuzberg/internal.rbs +184 -184
  43. data/sig/kreuzberg.rbs +546 -546
  44. data/spec/binding/cache_spec.rb +227 -227
  45. data/spec/binding/cli_proxy_spec.rb +85 -85
  46. data/spec/binding/cli_spec.rb +55 -55
  47. data/spec/binding/config_spec.rb +345 -345
  48. data/spec/binding/config_validation_spec.rb +283 -283
  49. data/spec/binding/error_handling_spec.rb +213 -213
  50. data/spec/binding/errors_spec.rb +66 -66
  51. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  52. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  53. data/spec/binding/plugins/validator_spec.rb +274 -274
  54. data/spec/fixtures/config.toml +39 -39
  55. data/spec/fixtures/config.yaml +41 -41
  56. data/spec/fixtures/invalid_config.toml +4 -4
  57. data/spec/smoke/package_spec.rb +178 -178
  58. data/spec/spec_helper.rb +42 -42
  59. data/vendor/Cargo.toml +2 -2
  60. data/vendor/kreuzberg/Cargo.toml +5 -5
  61. data/vendor/kreuzberg/README.md +230 -230
  62. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
  63. data/vendor/kreuzberg/build.rs +887 -843
  64. data/vendor/kreuzberg/src/api/error.rs +81 -81
  65. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  66. data/vendor/kreuzberg/src/api/mod.rs +87 -79
  67. data/vendor/kreuzberg/src/api/server.rs +353 -353
  68. data/vendor/kreuzberg/src/api/types.rs +170 -170
  69. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
  70. data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
  71. data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
  72. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
  73. data/vendor/kreuzberg/src/core/config.rs +1080 -1080
  74. data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
  75. data/vendor/kreuzberg/src/core/io.rs +329 -329
  76. data/vendor/kreuzberg/src/core/mime.rs +605 -605
  77. data/vendor/kreuzberg/src/core/mod.rs +47 -47
  78. data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
  79. data/vendor/kreuzberg/src/embeddings.rs +500 -500
  80. data/vendor/kreuzberg/src/error.rs +431 -431
  81. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  82. data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
  83. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  84. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  85. data/vendor/kreuzberg/src/extraction/html.rs +634 -601
  86. data/vendor/kreuzberg/src/extraction/image.rs +491 -491
  87. data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -574
  88. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
  89. data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
  90. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  91. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  92. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  93. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
  94. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
  95. data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
  96. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  97. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  98. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  99. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  100. data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
  101. data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
  102. data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
  103. data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
  104. data/vendor/kreuzberg/src/extractors/email.rs +157 -157
  105. data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
  106. data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
  107. data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
  108. data/vendor/kreuzberg/src/extractors/html.rs +407 -407
  109. data/vendor/kreuzberg/src/extractors/image.rs +219 -219
  110. data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
  111. data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
  112. data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
  113. data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
  114. data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
  115. data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
  116. data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
  117. data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
  118. data/vendor/kreuzberg/src/extractors/pdf.rs +749 -749
  119. data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
  120. data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
  121. data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
  122. data/vendor/kreuzberg/src/extractors/security.rs +484 -484
  123. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
  124. data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
  125. data/vendor/kreuzberg/src/extractors/text.rs +265 -265
  126. data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
  127. data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
  128. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  129. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  130. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  131. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  132. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  133. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  134. data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
  135. data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
  136. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  137. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  138. data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
  139. data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
  140. data/vendor/kreuzberg/src/lib.rs +113 -113
  141. data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
  142. data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
  143. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  144. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  145. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  146. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  147. data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
  148. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  149. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  150. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
  151. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  152. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  153. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  154. data/vendor/kreuzberg/src/panic_context.rs +154 -154
  155. data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
  156. data/vendor/kreuzberg/src/pdf/bundled.rs +452 -346
  157. data/vendor/kreuzberg/src/pdf/error.rs +130 -130
  158. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  159. data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
  160. data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
  161. data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
  162. data/vendor/kreuzberg/src/pdf/table.rs +420 -420
  163. data/vendor/kreuzberg/src/pdf/text.rs +240 -240
  164. data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
  165. data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
  166. data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
  167. data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
  168. data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
  169. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  170. data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
  171. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  172. data/vendor/kreuzberg/src/text/mod.rs +25 -25
  173. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  174. data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
  175. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  176. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  177. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  178. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  179. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  180. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  181. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  182. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  183. data/vendor/kreuzberg/src/types.rs +1055 -1055
  184. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  185. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  186. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  187. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  188. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  189. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  190. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  191. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  192. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  193. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  194. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  195. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  196. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  198. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  199. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  200. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  201. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  202. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  203. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  204. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  205. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  206. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  207. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  208. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  209. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  210. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  211. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  212. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  213. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  214. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  215. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  216. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  217. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  218. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  219. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  220. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  221. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  222. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  223. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  224. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  225. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  226. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  227. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  228. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  229. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  230. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  231. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  232. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  233. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  234. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  235. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  236. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  237. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  238. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  239. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  240. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  241. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  242. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  243. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  244. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  245. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  246. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  247. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  248. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  249. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  250. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  251. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
  252. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  253. data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
  254. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
  255. data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
  256. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
  257. data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
  258. data/vendor/kreuzberg/tests/config_features.rs +612 -612
  259. data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
  260. data/vendor/kreuzberg/tests/core_integration.rs +510 -510
  261. data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
  262. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
  263. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
  264. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
  265. data/vendor/kreuzberg/tests/email_integration.rs +327 -327
  266. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
  267. data/vendor/kreuzberg/tests/error_handling.rs +402 -402
  268. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
  269. data/vendor/kreuzberg/tests/format_integration.rs +165 -164
  270. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  271. data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
  272. data/vendor/kreuzberg/tests/image_integration.rs +255 -255
  273. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
  274. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
  275. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
  276. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  277. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  278. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
  279. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
  280. data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
  281. data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
  282. data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
  283. data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
  284. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  285. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
  286. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
  287. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
  288. data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
  289. data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
  290. data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
  291. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
  292. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
  293. data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
  294. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  295. data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
  296. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
  297. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
  298. data/vendor/kreuzberg/tests/security_validation.rs +416 -416
  299. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  300. data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
  301. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
  302. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
  303. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  304. data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
  305. data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
  306. data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
  307. data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
  308. data/vendor/kreuzberg-tesseract/LICENSE +22 -22
  309. data/vendor/kreuzberg-tesseract/README.md +399 -399
  310. data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
  311. data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
  312. data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
  313. data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
  314. data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
  315. data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
  316. data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
  317. data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
  318. data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
  319. data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
  320. data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
  321. data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
  322. data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
  323. data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
  324. data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
  325. data/vendor/rb-sys/Cargo.lock +393 -393
  326. data/vendor/rb-sys/Cargo.toml +70 -70
  327. data/vendor/rb-sys/Cargo.toml.orig +57 -57
  328. data/vendor/rb-sys/LICENSE-APACHE +190 -190
  329. data/vendor/rb-sys/LICENSE-MIT +21 -21
  330. data/vendor/rb-sys/build/features.rs +111 -111
  331. data/vendor/rb-sys/build/main.rs +286 -286
  332. data/vendor/rb-sys/build/stable_api_config.rs +155 -155
  333. data/vendor/rb-sys/build/version.rs +50 -50
  334. data/vendor/rb-sys/readme.md +36 -36
  335. data/vendor/rb-sys/src/bindings.rs +21 -21
  336. data/vendor/rb-sys/src/hidden.rs +11 -11
  337. data/vendor/rb-sys/src/lib.rs +35 -35
  338. data/vendor/rb-sys/src/macros.rs +371 -371
  339. data/vendor/rb-sys/src/memory.rs +53 -53
  340. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
  341. data/vendor/rb-sys/src/special_consts.rs +31 -31
  342. data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
  343. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
  344. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
  345. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
  346. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
  347. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
  348. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
  349. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
  350. data/vendor/rb-sys/src/stable_api.rs +260 -260
  351. data/vendor/rb-sys/src/symbol.rs +31 -31
  352. data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
  353. data/vendor/rb-sys/src/utils.rs +89 -89
  354. data/vendor/rb-sys/src/value_type.rs +7 -7
  355. metadata +81 -22
  356. data/vendor/kreuzberg-ffi/Cargo.toml +0 -63
  357. data/vendor/kreuzberg-ffi/README.md +0 -851
  358. data/vendor/kreuzberg-ffi/build.rs +0 -176
  359. data/vendor/kreuzberg-ffi/cbindgen.toml +0 -27
  360. data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +0 -12
  361. data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +0 -12
  362. data/vendor/kreuzberg-ffi/kreuzberg.h +0 -1087
  363. data/vendor/kreuzberg-ffi/src/lib.rs +0 -3616
  364. data/vendor/kreuzberg-ffi/src/panic_shield.rs +0 -247
  365. data/vendor/kreuzberg-ffi/tests.disabled/README.md +0 -48
  366. data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +0 -299
  367. data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +0 -346
  368. data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +0 -232
  369. data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +0 -470
@@ -1,902 +1,902 @@
1
- use crate::error::{KreuzbergError, Result};
2
- use crate::stopwords::STOPWORDS;
3
- use crate::text::token_reduction::config::TokenReductionConfig;
4
- use ahash::{AHashMap, AHashSet};
5
- use once_cell::sync::Lazy;
6
- use regex::Regex;
7
- use std::sync::Arc;
8
-
9
- static HTML_COMMENT_REGEX: Lazy<Regex> =
10
- Lazy::new(|| Regex::new(r"<!--.*?-->").expect("HTML comment regex pattern is valid and should compile"));
11
- static EXCESSIVE_NEWLINES_REGEX: Lazy<Regex> =
12
- Lazy::new(|| Regex::new(r"\n{3,}").expect("Excessive newlines regex pattern is valid and should compile"));
13
- static MULTIPLE_SPACES_REGEX: Lazy<Regex> =
14
- Lazy::new(|| Regex::new(r" {2,}").expect("Multiple spaces regex pattern is valid and should compile"));
15
- static MARKDOWN_CODE_BLOCK_REGEX: Lazy<Regex> =
16
- Lazy::new(|| Regex::new(r"```[\s\S]*?```").expect("Markdown code block regex pattern is valid and should compile"));
17
- static MARKDOWN_INLINE_CODE_REGEX: Lazy<Regex> =
18
- Lazy::new(|| Regex::new(r"`[^`\n]+`").expect("Markdown inline code regex pattern is valid and should compile"));
19
- static MARKDOWN_HEADERS_REGEX: Lazy<Regex> =
20
- Lazy::new(|| Regex::new(r"^#{1,6}\s+").expect("Markdown headers regex pattern is valid and should compile"));
21
- static MARKDOWN_LISTS_REGEX: Lazy<Regex> =
22
- Lazy::new(|| Regex::new(r"^[ \t]*[-*+]\s+").expect("Markdown lists regex pattern is valid and should compile"));
23
-
24
- pub struct FilterPipeline {
25
- config: Arc<TokenReductionConfig>,
26
- stopwords: AHashSet<String>,
27
- preserve_patterns: Vec<Regex>,
28
- language: String,
29
- }
30
-
31
- impl FilterPipeline {
32
- pub fn new(config: &Arc<TokenReductionConfig>, language: &str) -> Result<Self> {
33
- let mut stopwords = STOPWORDS.get(language).cloned().unwrap_or_else(|| {
34
- STOPWORDS
35
- .get("en")
36
- .cloned()
37
- .expect("English stopwords must be available - indicates build failure if missing")
38
- });
39
-
40
- if let Some(ref custom) = config.custom_stopwords
41
- && let Some(custom_for_lang) = custom.get(language)
42
- {
43
- for word in custom_for_lang {
44
- stopwords.insert(word.to_lowercase());
45
- }
46
- }
47
-
48
- let preserve_patterns: std::result::Result<Vec<Regex>, _> = config
49
- .preserve_patterns
50
- .iter()
51
- .map(|pattern| Regex::new(pattern))
52
- .collect();
53
-
54
- let preserve_patterns =
55
- preserve_patterns.map_err(|e| KreuzbergError::validation(format!("Invalid regex pattern: {}", e)))?;
56
-
57
- Ok(Self {
58
- config: Arc::clone(config),
59
- stopwords,
60
- preserve_patterns,
61
- language: language.to_string(),
62
- })
63
- }
64
-
65
- pub fn apply_light_filters(&self, text: &str) -> String {
66
- let mut result = text.to_string();
67
-
68
- let mut preserved_blocks = AHashMap::new();
69
- if self.config.preserve_markdown {
70
- result = self.extract_and_preserve_code(&result, &mut preserved_blocks);
71
- }
72
-
73
- result = HTML_COMMENT_REGEX.replace_all(&result, "").to_string();
74
-
75
- result = MULTIPLE_SPACES_REGEX.replace_all(&result, " ").to_string();
76
-
77
- result = EXCESSIVE_NEWLINES_REGEX.replace_all(&result, "\n\n").to_string();
78
-
79
- if self.config.preserve_markdown {
80
- result = self.preserve_markdown_structure(&result);
81
- }
82
-
83
- result = self.restore_preserved_blocks(&result, &preserved_blocks);
84
-
85
- result
86
- }
87
-
88
- pub fn apply_moderate_filters(&self, text: &str) -> String {
89
- let mut result = self.apply_light_filters(text);
90
-
91
- let mut preserved_blocks = AHashMap::new();
92
- if self.config.preserve_code {
93
- result = self.extract_and_preserve_code(&result, &mut preserved_blocks);
94
- }
95
-
96
- if self.config.preserve_markdown {
97
- result = self.remove_stopwords_preserving_markdown(&result);
98
- } else {
99
- result = self.remove_stopwords(&result);
100
- }
101
-
102
- result = self.restore_preserved_blocks(&result, &preserved_blocks);
103
-
104
- result
105
- }
106
-
107
- fn remove_stopwords_preserving_markdown(&self, text: &str) -> String {
108
- let lines: Vec<&str> = text.lines().collect();
109
- let mut processed_lines = Vec::new();
110
-
111
- for line in lines {
112
- if MARKDOWN_HEADERS_REGEX.is_match(line) {
113
- processed_lines.push(line.to_string());
114
- continue;
115
- }
116
-
117
- if MARKDOWN_LISTS_REGEX.is_match(line) {
118
- processed_lines.push(line.to_string());
119
- continue;
120
- }
121
-
122
- if line.trim().starts_with('|') && line.trim().ends_with('|') {
123
- processed_lines.push(line.to_string());
124
- continue;
125
- }
126
-
127
- let processed_line = self.remove_stopwords(line);
128
- processed_lines.push(processed_line);
129
- }
130
-
131
- processed_lines.join("\n")
132
- }
133
-
134
- fn remove_stopwords(&self, text: &str) -> String {
135
- let words: Vec<&str> = text.split_whitespace().collect();
136
- let mut filtered_words = Vec::with_capacity(words.len());
137
-
138
- for word in words {
139
- if word.is_empty() {
140
- continue;
141
- }
142
-
143
- if self.should_preserve_word(word) {
144
- filtered_words.push(word);
145
- continue;
146
- }
147
-
148
- if word.len() > 1 && word.bytes().all(|b| b.is_ascii_uppercase() || !b.is_ascii_alphabetic()) {
149
- filtered_words.push(word);
150
- continue;
151
- }
152
-
153
- if word.bytes().any(|b| b.is_ascii_digit()) {
154
- filtered_words.push(word);
155
- continue;
156
- }
157
-
158
- let clean_word = if word.is_ascii() {
159
- let clean_bytes: Vec<u8> = word
160
- .bytes()
161
- .filter(|&b| b.is_ascii_alphabetic())
162
- .map(|b| b.to_ascii_lowercase())
163
- .collect();
164
- String::from_utf8(clean_bytes).unwrap_or_else(|_| {
165
- word.chars()
166
- .filter(|c| c.is_alphabetic())
167
- .collect::<String>()
168
- .to_lowercase()
169
- })
170
- } else {
171
- word.chars()
172
- .filter(|c| c.is_alphabetic())
173
- .collect::<String>()
174
- .to_lowercase()
175
- };
176
-
177
- if clean_word.is_empty() {
178
- filtered_words.push(word);
179
- continue;
180
- }
181
-
182
- if clean_word.len() <= 1 {
183
- filtered_words.push(word);
184
- continue;
185
- }
186
-
187
- if !self.stopwords.contains(&clean_word) {
188
- filtered_words.push(word);
189
- }
190
- }
191
-
192
- filtered_words.join(" ")
193
- }
194
-
195
- /// Get the language code for this filter pipeline.
196
- ///
197
- /// Primarily useful for testing and debugging to verify language configuration.
198
- #[cfg_attr(not(test), allow(dead_code))]
199
- pub fn language(&self) -> &str {
200
- &self.language
201
- }
202
-
203
- /// Check if a word should be preserved based on configured patterns.
204
- fn should_preserve_word(&self, word: &str) -> bool {
205
- self.preserve_patterns.iter().any(|pattern| pattern.is_match(word))
206
- }
207
-
208
- /// Split a word into prefix (non-alphanumeric), core (alphanumeric), and suffix (non-alphanumeric).
209
- ///
210
- /// This is useful for handling punctuation-wrapped words like "(hello)" or "world!".
211
- /// Currently used in tests; reserved for future word boundary-aware filtering.
212
- #[cfg_attr(not(test), allow(dead_code))]
213
- fn split_word_boundaries(&self, word: &str) -> (String, String, String) {
214
- let chars: Vec<char> = word.chars().collect();
215
- let mut start = 0;
216
- let mut end = chars.len();
217
-
218
- while start < chars.len() && !chars[start].is_alphanumeric() {
219
- start += 1;
220
- }
221
-
222
- while end > start && !chars[end - 1].is_alphanumeric() {
223
- end -= 1;
224
- }
225
-
226
- let prefix: String = chars[..start].iter().collect();
227
- let core: String = chars[start..end].iter().collect();
228
- let suffix: String = chars[end..].iter().collect();
229
-
230
- (prefix, core, suffix)
231
- }
232
-
233
- fn preserve_markdown_structure(&self, text: &str) -> String {
234
- let lines: Vec<&str> = text.lines().collect();
235
- let mut processed_lines = Vec::new();
236
-
237
- for line in lines {
238
- if MARKDOWN_HEADERS_REGEX.is_match(line) {
239
- processed_lines.push(line);
240
- continue;
241
- }
242
-
243
- if MARKDOWN_LISTS_REGEX.is_match(line) {
244
- processed_lines.push(line);
245
- continue;
246
- }
247
-
248
- processed_lines.push(line);
249
- }
250
-
251
- processed_lines.join("\n")
252
- }
253
-
254
- fn extract_and_preserve_code(&self, text: &str, preserved: &mut AHashMap<String, String>) -> String {
255
- let mut result = text.to_string();
256
- let mut code_block_id = 0;
257
- let mut inline_code_id = 0;
258
-
259
- result = MARKDOWN_CODE_BLOCK_REGEX
260
- .replace_all(&result, |caps: &regex::Captures| {
261
- let code_block = caps[0].to_string();
262
- let placeholder = format!("__CODEBLOCK_{}__", code_block_id);
263
- code_block_id += 1;
264
- preserved.insert(placeholder.clone(), code_block);
265
- placeholder
266
- })
267
- .to_string();
268
-
269
- result = MARKDOWN_INLINE_CODE_REGEX
270
- .replace_all(&result, |caps: &regex::Captures| {
271
- let inline_code = caps[0].to_string();
272
- let placeholder = format!("__INLINECODE_{}__", inline_code_id);
273
- inline_code_id += 1;
274
- preserved.insert(placeholder.clone(), inline_code);
275
- placeholder
276
- })
277
- .to_string();
278
-
279
- result
280
- }
281
-
282
- fn restore_preserved_blocks(&self, text: &str, preserved: &AHashMap<String, String>) -> String {
283
- let mut result = text.to_string();
284
-
285
- for (placeholder, original_content) in preserved {
286
- result = result.replace(placeholder, original_content);
287
- }
288
-
289
- result
290
- }
291
- }
292
-
293
- #[cfg(all(test, feature = "stopwords"))]
294
- mod tests {
295
- use super::*;
296
-
297
- #[test]
298
- fn test_stopword_removal() {
299
- let config = Arc::new(TokenReductionConfig::default());
300
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
301
-
302
- let input = "The quick brown fox is jumping over the lazy dog";
303
- let result = pipeline.remove_stopwords(input);
304
-
305
- assert!(!result.contains(" the "));
306
- assert!(!result.contains(" is "));
307
- assert!(result.contains("quick"));
308
- assert!(result.contains("brown"));
309
- assert!(result.contains("fox"));
310
- }
311
-
312
- #[test]
313
- fn test_preserve_patterns() {
314
- let config = TokenReductionConfig {
315
- preserve_patterns: vec!["\\b[A-Z]{2,}\\b".to_string()],
316
- ..Default::default()
317
- };
318
-
319
- let config = Arc::new(config);
320
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
321
-
322
- let input = "The NASA mission is a success";
323
- let result = pipeline.remove_stopwords(input);
324
-
325
- assert!(result.contains("NASA"));
326
- assert!(result.contains("mission"));
327
- assert!(result.contains("success"));
328
- }
329
-
330
- #[test]
331
- fn test_markdown_preservation() {
332
- let config = TokenReductionConfig {
333
- preserve_markdown: true,
334
- preserve_code: true,
335
- ..Default::default()
336
- };
337
-
338
- let config = Arc::new(config);
339
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
340
-
341
- let input = "# Header\nThis is `code` and ```\ncode block\n``` text";
342
- let result = pipeline.apply_moderate_filters(input);
343
-
344
- assert!(result.contains("# Header"));
345
- assert!(result.contains("`code`"));
346
- assert!(result.contains("```\ncode block\n```"));
347
- }
348
-
349
- #[test]
350
- fn test_apply_light_filters_removes_html_comments() {
351
- let config = Arc::new(TokenReductionConfig::default());
352
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
353
-
354
- let input = "Text before <!-- comment --> text after";
355
- let result = pipeline.apply_light_filters(input);
356
-
357
- assert!(!result.contains("<!-- comment -->"));
358
- assert!(result.contains("Text before"));
359
- assert!(result.contains("text after"));
360
- }
361
-
362
- #[test]
363
- fn test_apply_light_filters_normalizes_whitespace() {
364
- let config = Arc::new(TokenReductionConfig::default());
365
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
366
-
367
- let input = "Text with multiple spaces";
368
- let result = pipeline.apply_light_filters(input);
369
-
370
- assert!(!result.contains(" "));
371
- assert!(result.contains("Text with multiple spaces"));
372
- }
373
-
374
- #[test]
375
- fn test_apply_light_filters_reduces_newlines() {
376
- let config = Arc::new(TokenReductionConfig::default());
377
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
378
-
379
- let input = "Paragraph 1\n\n\n\n\nParagraph 2";
380
- let result = pipeline.apply_light_filters(input);
381
-
382
- assert!(!result.contains("\n\n\n"));
383
- assert!(result.contains("Paragraph 1"));
384
- assert!(result.contains("Paragraph 2"));
385
- }
386
-
387
- #[test]
388
- fn test_stopword_removal_preserves_uppercase() {
389
- let config = Arc::new(TokenReductionConfig::default());
390
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
391
-
392
- let input = "The API is working WITH the SDK";
393
- let result = pipeline.remove_stopwords(input);
394
-
395
- assert!(result.contains("API"));
396
- assert!(result.contains("SDK"));
397
- assert!(result.contains("WITH"));
398
- assert!(!result.contains("The "));
399
- assert!(!result.contains(" is "));
400
- }
401
-
402
- #[test]
403
- fn test_stopword_removal_preserves_numbers() {
404
- let config = Arc::new(TokenReductionConfig::default());
405
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
406
-
407
- let input = "The version is 3.14 and the count is 42";
408
- let result = pipeline.remove_stopwords(input);
409
-
410
- assert!(result.contains("3.14"));
411
- assert!(result.contains("42"));
412
- assert!(result.contains("version"));
413
- assert!(result.contains("count"));
414
- }
415
-
416
- #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
417
- #[test]
418
- fn test_stopword_removal_handles_punctuation() {
419
- let config = Arc::new(TokenReductionConfig::default());
420
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
421
-
422
- let input = "Hello, the world! This is great.";
423
- let result = pipeline.remove_stopwords(input);
424
-
425
- assert!(result.contains("Hello,"));
426
- assert!(result.contains("world!"));
427
- assert!(result.contains("great."));
428
- }
429
-
430
- #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
431
- #[test]
432
- fn test_custom_stopwords() {
433
- use std::collections::HashMap;
434
-
435
- let mut custom_stopwords = HashMap::new();
436
- custom_stopwords.insert("en".to_string(), vec!["custom".to_string(), "word".to_string()]);
437
-
438
- let config = TokenReductionConfig {
439
- custom_stopwords: Some(custom_stopwords),
440
- ..Default::default()
441
- };
442
-
443
- let config = Arc::new(config);
444
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
445
-
446
- let input = "This is a custom word test";
447
- let result = pipeline.remove_stopwords(input);
448
-
449
- assert!(!result.contains("custom"));
450
- assert!(!result.contains("word"));
451
- assert!(result.contains("test"));
452
- }
453
-
454
- #[test]
455
- fn test_spanish_stopwords() {
456
- let config = Arc::new(TokenReductionConfig::default());
457
- let pipeline = FilterPipeline::new(&config, "es").unwrap();
458
-
459
- let input = "El perro grande bonito tiene";
460
- let result = pipeline.remove_stopwords(input);
461
-
462
- assert!(result.contains("perro"));
463
- assert!(result.contains("grande"));
464
- assert!(result.contains("bonito"));
465
- let words: Vec<&str> = result.split_whitespace().collect();
466
- assert!(!words.contains(&"el"));
467
- assert!(!words.contains(&"El"));
468
- }
469
-
470
- #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
471
- #[test]
472
- fn test_unknown_language_fallback() {
473
- let config = Arc::new(TokenReductionConfig::default());
474
- let pipeline = FilterPipeline::new(&config, "unknown").unwrap();
475
-
476
- let input = "The quick test with unknown language";
477
- let result = pipeline.remove_stopwords(input);
478
-
479
- assert!(!result.contains("The "));
480
- assert!(result.contains("quick"));
481
- assert!(result.contains("test"));
482
- }
483
-
484
- #[test]
485
- fn test_markdown_header_preservation() {
486
- let config = TokenReductionConfig {
487
- preserve_markdown: true,
488
- ..Default::default()
489
- };
490
-
491
- let config = Arc::new(config);
492
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
493
-
494
- let input = "# Header 1\n## Header 2\n### Header 3\nRegular text";
495
- let result = pipeline.remove_stopwords_preserving_markdown(input);
496
-
497
- assert!(result.contains("# Header 1"));
498
- assert!(result.contains("## Header 2"));
499
- assert!(result.contains("### Header 3"));
500
- }
501
-
502
- #[test]
503
- fn test_markdown_list_preservation() {
504
- let config = TokenReductionConfig {
505
- preserve_markdown: true,
506
- ..Default::default()
507
- };
508
-
509
- let config = Arc::new(config);
510
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
511
-
512
- let input = "- Item 1\n* Item 2\n+ Item 3";
513
- let result = pipeline.remove_stopwords_preserving_markdown(input);
514
-
515
- assert!(result.contains("- Item 1"));
516
- assert!(result.contains("* Item 2"));
517
- assert!(result.contains("+ Item 3"));
518
- }
519
-
520
- #[test]
521
- fn test_markdown_table_preservation() {
522
- let config = TokenReductionConfig {
523
- preserve_markdown: true,
524
- ..Default::default()
525
- };
526
-
527
- let config = Arc::new(config);
528
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
529
-
530
- let input = "| Header 1 | Header 2 |\n|----------|----------|\n| Cell 1 | Cell 2 |";
531
- let result = pipeline.remove_stopwords_preserving_markdown(input);
532
-
533
- assert!(result.contains("| Header 1 | Header 2 |"));
534
- assert!(result.contains("|----------|----------|"));
535
- }
536
-
537
- #[test]
538
- fn test_code_block_preservation() {
539
- let config = Arc::new(TokenReductionConfig {
540
- preserve_code: true,
541
- ..Default::default()
542
- });
543
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
544
-
545
- let mut preserved = AHashMap::new();
546
- let input = "Text before\n```rust\nfn main() {}\n```\nText after";
547
- let result = pipeline.extract_and_preserve_code(input, &mut preserved);
548
-
549
- assert_eq!(preserved.len(), 1);
550
- assert!(preserved.values().any(|v| v.contains("fn main()")));
551
- assert!(result.contains("__CODEBLOCK_0__"));
552
- }
553
-
554
- #[test]
555
- fn test_inline_code_preservation() {
556
- let config = Arc::new(TokenReductionConfig {
557
- preserve_code: true,
558
- ..Default::default()
559
- });
560
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
561
-
562
- let mut preserved = AHashMap::new();
563
- let input = "Use the `println!` macro";
564
- let result = pipeline.extract_and_preserve_code(input, &mut preserved);
565
-
566
- assert_eq!(preserved.len(), 1);
567
- assert!(preserved.values().any(|v| v == "`println!`"));
568
- assert!(result.contains("__INLINECODE_0__"));
569
- }
570
-
571
- #[test]
572
- fn test_restore_preserved_blocks() {
573
- let config = Arc::new(TokenReductionConfig::default());
574
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
575
-
576
- let mut preserved = AHashMap::new();
577
- preserved.insert("__CODEBLOCK_0__".to_string(), "```code```".to_string());
578
- preserved.insert("__INLINECODE_0__".to_string(), "`inline`".to_string());
579
- let input = "Text __CODEBLOCK_0__ and __INLINECODE_0__ here";
580
- let result = pipeline.restore_preserved_blocks(input, &preserved);
581
-
582
- assert!(result.contains("```code```"));
583
- assert!(result.contains("`inline`"));
584
- assert!(!result.contains("__CODEBLOCK_0__"));
585
- assert!(!result.contains("__INLINECODE_0__"));
586
- }
587
-
588
- #[test]
589
- fn test_apply_moderate_filters_with_stopwords() {
590
- let config = Arc::new(TokenReductionConfig::default());
591
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
592
-
593
- let input = "The quick brown fox is jumping";
594
- let result = pipeline.apply_moderate_filters(input);
595
-
596
- assert!(!result.contains("The "));
597
- assert!(!result.contains(" is "));
598
- assert!(result.contains("quick"));
599
- assert!(result.contains("brown"));
600
- }
601
-
602
- #[test]
603
- fn test_invalid_regex_pattern() {
604
- let config = TokenReductionConfig {
605
- preserve_patterns: vec!["[invalid".to_string()],
606
- ..Default::default()
607
- };
608
-
609
- let config = Arc::new(config);
610
- let result = FilterPipeline::new(&config, "en");
611
-
612
- assert!(result.is_err());
613
- if let Err(err) = result {
614
- assert!(matches!(err, KreuzbergError::Validation { .. }));
615
- }
616
- }
617
-
618
- #[test]
619
- fn test_empty_input() {
620
- let config = Arc::new(TokenReductionConfig::default());
621
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
622
-
623
- let result = pipeline.apply_light_filters("");
624
- assert_eq!(result, "");
625
-
626
- let result = pipeline.apply_moderate_filters("");
627
- assert_eq!(result, "");
628
- }
629
-
630
- #[test]
631
- fn test_stopword_removal_single_letter_words() {
632
- let config = Arc::new(TokenReductionConfig::default());
633
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
634
-
635
- let input = "I a x test";
636
- let result = pipeline.remove_stopwords(input);
637
-
638
- assert!(result.contains("I"));
639
- assert!(result.contains("x"));
640
- }
641
-
642
- #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
643
- #[test]
644
- fn test_stopword_removal_mixed_case() {
645
- let config = Arc::new(TokenReductionConfig::default());
646
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
647
-
648
- let input = "The Test Is Working";
649
- let result = pipeline.remove_stopwords(input);
650
-
651
- assert!(!result.contains("The"));
652
- assert!(!result.contains("Is"));
653
- assert!(result.contains("Test"));
654
- assert!(result.contains("Working"));
655
- }
656
-
657
- #[test]
658
- fn test_lazy_regex_initialization() {
659
- let _ = &*HTML_COMMENT_REGEX;
660
- let _ = &*EXCESSIVE_NEWLINES_REGEX;
661
- let _ = &*MULTIPLE_SPACES_REGEX;
662
- let _ = &*MARKDOWN_CODE_BLOCK_REGEX;
663
- let _ = &*MARKDOWN_INLINE_CODE_REGEX;
664
- let _ = &*MARKDOWN_HEADERS_REGEX;
665
- let _ = &*MARKDOWN_LISTS_REGEX;
666
- }
667
-
668
- #[test]
669
- fn test_multiple_code_blocks_hashmap_approach() {
670
- let config = Arc::new(TokenReductionConfig {
671
- preserve_code: true,
672
- ..Default::default()
673
- });
674
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
675
-
676
- let input =
677
- "Start ```rust\nlet x = 1;\n``` middle `inline1` text ```python\nprint('hi')\n``` and `inline2` end";
678
- let mut preserved = AHashMap::new();
679
- let result = pipeline.extract_and_preserve_code(input, &mut preserved);
680
-
681
- assert_eq!(preserved.len(), 4);
682
- assert!(preserved.contains_key("__CODEBLOCK_0__"));
683
- assert!(preserved.contains_key("__CODEBLOCK_1__"));
684
- assert!(preserved.contains_key("__INLINECODE_0__"));
685
- assert!(preserved.contains_key("__INLINECODE_1__"));
686
-
687
- assert_eq!(preserved.get("__CODEBLOCK_0__").unwrap(), "```rust\nlet x = 1;\n```");
688
- assert_eq!(preserved.get("__CODEBLOCK_1__").unwrap(), "```python\nprint('hi')\n```");
689
- assert_eq!(preserved.get("__INLINECODE_0__").unwrap(), "`inline1`");
690
- assert_eq!(preserved.get("__INLINECODE_1__").unwrap(), "`inline2`");
691
-
692
- let restored = pipeline.restore_preserved_blocks(&result, &preserved);
693
- assert!(restored.contains("```rust\nlet x = 1;\n```"));
694
- assert!(restored.contains("```python\nprint('hi')\n```"));
695
- assert!(restored.contains("`inline1`"));
696
- assert!(restored.contains("`inline2`"));
697
- assert!(!restored.contains("__CODEBLOCK_"));
698
- assert!(!restored.contains("__INLINECODE_"));
699
- }
700
-
701
- #[test]
702
- fn test_hashmap_order_independence() {
703
- let config = Arc::new(TokenReductionConfig {
704
- preserve_code: true,
705
- ..Default::default()
706
- });
707
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
708
-
709
- let input = "Text `a` and `b` and `c` here";
710
- let mut preserved = AHashMap::new();
711
- let result = pipeline.extract_and_preserve_code(input, &mut preserved);
712
-
713
- assert_eq!(preserved.len(), 3);
714
- let restored = pipeline.restore_preserved_blocks(&result, &preserved);
715
-
716
- assert!(restored.contains("`a`"));
717
- assert!(restored.contains("`b`"));
718
- assert!(restored.contains("`c`"));
719
- assert_eq!(restored, "Text `a` and `b` and `c` here");
720
- }
721
-
722
- #[test]
723
- fn test_preserve_patterns_regex() {
724
- let config = TokenReductionConfig {
725
- preserve_patterns: vec![
726
- r"\b[A-Z]{2,}\b".to_string(),
727
- r"\b\d+\.\d+\.\d+\b".to_string(),
728
- r"@\w+".to_string(),
729
- ],
730
- ..Default::default()
731
- };
732
-
733
- let config = Arc::new(config);
734
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
735
-
736
- let input = "The NASA and HTTP protocols version 1.2.3 by @john";
737
- let result = pipeline.remove_stopwords(input);
738
-
739
- assert!(result.contains("NASA"));
740
- assert!(result.contains("HTTP"));
741
- assert!(result.contains("1.2.3"));
742
- assert!(result.contains("@john"));
743
-
744
- assert!(!result.contains(" the "));
745
- assert!(!result.contains(" and "));
746
- assert!(!result.contains(" by "));
747
- }
748
-
749
- #[test]
750
- fn test_language_specific_stopwords() {
751
- let config_en = Arc::new(TokenReductionConfig::default());
752
- let pipeline_en = FilterPipeline::new(&config_en, "en").unwrap();
753
- assert_eq!(pipeline_en.language(), "en");
754
-
755
- let input_en = "the quick brown fox";
756
- let result_en = pipeline_en.remove_stopwords(input_en);
757
- assert!(!result_en.contains(" the "));
758
-
759
- let config_de = Arc::new(TokenReductionConfig::default());
760
- let pipeline_de = FilterPipeline::new(&config_de, "de").unwrap();
761
- assert_eq!(pipeline_de.language(), "de");
762
-
763
- let input_de = "der schnelle braune fuchs";
764
- let result_de = pipeline_de.remove_stopwords(input_de);
765
- assert!(!result_de.contains(" der "));
766
- assert!(result_de.contains("schnelle"));
767
- }
768
-
769
- #[test]
770
- fn test_language_fallback_to_english() {
771
- let config = Arc::new(TokenReductionConfig::default());
772
-
773
- let pipeline = FilterPipeline::new(&config, "unsupported_lang").unwrap();
774
- assert_eq!(pipeline.language(), "unsupported_lang");
775
-
776
- let input = "the quick brown fox";
777
- let result = pipeline.remove_stopwords(input);
778
-
779
- assert!(!result.contains(" the "));
780
- assert!(result.contains("quick"));
781
- }
782
-
783
- #[test]
784
- fn test_split_word_boundaries() {
785
- let config = Arc::new(TokenReductionConfig::default());
786
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
787
-
788
- let (prefix, core, suffix) = pipeline.split_word_boundaries("(hello)");
789
- assert_eq!(prefix, "(");
790
- assert_eq!(core, "hello");
791
- assert_eq!(suffix, ")");
792
-
793
- let (prefix2, core2, suffix2) = pipeline.split_word_boundaries("world!");
794
- assert_eq!(prefix2, "");
795
- assert_eq!(core2, "world");
796
- assert_eq!(suffix2, "!");
797
-
798
- let (prefix3, core3, suffix3) = pipeline.split_word_boundaries("'test");
799
- assert_eq!(prefix3, "'");
800
- assert_eq!(core3, "test");
801
- assert_eq!(suffix3, "");
802
-
803
- let (prefix4, core4, suffix4) = pipeline.split_word_boundaries("simple");
804
- assert_eq!(prefix4, "");
805
- assert_eq!(core4, "simple");
806
- assert_eq!(suffix4, "");
807
-
808
- let (prefix5, core5, suffix5) = pipeline.split_word_boundaries("\"example!!!\"");
809
- assert_eq!(prefix5, "\"");
810
- assert_eq!(core5, "example");
811
- assert_eq!(suffix5, "!!!\"");
812
- }
813
-
814
- #[test]
815
- fn test_split_word_boundaries_edge_cases() {
816
- let config = Arc::new(TokenReductionConfig::default());
817
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
818
-
819
- let (prefix, core, suffix) = pipeline.split_word_boundaries("!!!");
820
- assert_eq!(prefix, "!!!");
821
- assert_eq!(core, "");
822
- assert_eq!(suffix, "");
823
-
824
- let (prefix2, core2, suffix2) = pipeline.split_word_boundaries("");
825
- assert_eq!(prefix2, "");
826
- assert_eq!(core2, "");
827
- assert_eq!(suffix2, "");
828
-
829
- let (prefix3, core3, suffix3) = pipeline.split_word_boundaries("a");
830
- assert_eq!(prefix3, "");
831
- assert_eq!(core3, "a");
832
- assert_eq!(suffix3, "");
833
-
834
- let (prefix4, core4, suffix4) = pipeline.split_word_boundaries("(café)");
835
- assert_eq!(prefix4, "(");
836
- assert_eq!(core4, "café");
837
- assert_eq!(suffix4, ")");
838
- }
839
-
840
- #[test]
841
- fn test_custom_stopwords_with_preserve_patterns() {
842
- use std::collections::HashMap;
843
-
844
- let mut custom_stopwords = HashMap::new();
845
- custom_stopwords.insert("en".to_string(), vec!["custom".to_string(), "stopword".to_string()]);
846
-
847
- let config = TokenReductionConfig {
848
- custom_stopwords: Some(custom_stopwords),
849
- ..Default::default()
850
- };
851
-
852
- let config = Arc::new(config);
853
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
854
-
855
- let input = "this is a custom stopword test";
856
- let result = pipeline.remove_stopwords(input);
857
-
858
- assert!(!result.contains(" custom "));
859
- assert!(!result.contains(" stopword "));
860
- assert!(!result.contains(" is "));
861
- assert!(!result.contains(" a "));
862
- assert!(result.contains("test"));
863
- }
864
-
865
- #[test]
866
- fn test_preserve_patterns_empty() {
867
- let config = TokenReductionConfig {
868
- preserve_patterns: vec![],
869
- ..Default::default()
870
- };
871
-
872
- let config = Arc::new(config);
873
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
874
-
875
- let input = "The quick brown fox";
876
- let result = pipeline.remove_stopwords(input);
877
-
878
- assert!(!result.contains(" The "));
879
- assert!(result.contains("quick"));
880
- }
881
-
882
- #[test]
883
- fn test_invalid_preserve_pattern() {
884
- let config = TokenReductionConfig {
885
- preserve_patterns: vec!["[invalid".to_string()],
886
- ..Default::default()
887
- };
888
-
889
- let config = Arc::new(config);
890
- let result = FilterPipeline::new(&config, "en");
891
-
892
- assert!(result.is_err());
893
- if let Err(e) = result {
894
- match e {
895
- KreuzbergError::Validation { message, .. } => {
896
- assert!(message.contains("Invalid regex pattern"));
897
- }
898
- _ => panic!("Expected ValidationError"),
899
- }
900
- }
901
- }
902
- }
1
+ use crate::error::{KreuzbergError, Result};
2
+ use crate::stopwords::STOPWORDS;
3
+ use crate::text::token_reduction::config::TokenReductionConfig;
4
+ use ahash::{AHashMap, AHashSet};
5
+ use once_cell::sync::Lazy;
6
+ use regex::Regex;
7
+ use std::sync::Arc;
8
+
9
+ static HTML_COMMENT_REGEX: Lazy<Regex> =
10
+ Lazy::new(|| Regex::new(r"<!--.*?-->").expect("HTML comment regex pattern is valid and should compile"));
11
+ static EXCESSIVE_NEWLINES_REGEX: Lazy<Regex> =
12
+ Lazy::new(|| Regex::new(r"\n{3,}").expect("Excessive newlines regex pattern is valid and should compile"));
13
+ static MULTIPLE_SPACES_REGEX: Lazy<Regex> =
14
+ Lazy::new(|| Regex::new(r" {2,}").expect("Multiple spaces regex pattern is valid and should compile"));
15
+ static MARKDOWN_CODE_BLOCK_REGEX: Lazy<Regex> =
16
+ Lazy::new(|| Regex::new(r"```[\s\S]*?```").expect("Markdown code block regex pattern is valid and should compile"));
17
+ static MARKDOWN_INLINE_CODE_REGEX: Lazy<Regex> =
18
+ Lazy::new(|| Regex::new(r"`[^`\n]+`").expect("Markdown inline code regex pattern is valid and should compile"));
19
+ static MARKDOWN_HEADERS_REGEX: Lazy<Regex> =
20
+ Lazy::new(|| Regex::new(r"^#{1,6}\s+").expect("Markdown headers regex pattern is valid and should compile"));
21
+ static MARKDOWN_LISTS_REGEX: Lazy<Regex> =
22
+ Lazy::new(|| Regex::new(r"^[ \t]*[-*+]\s+").expect("Markdown lists regex pattern is valid and should compile"));
23
+
24
+ pub struct FilterPipeline {
25
+ config: Arc<TokenReductionConfig>,
26
+ stopwords: AHashSet<String>,
27
+ preserve_patterns: Vec<Regex>,
28
+ language: String,
29
+ }
30
+
31
+ impl FilterPipeline {
32
+ pub fn new(config: &Arc<TokenReductionConfig>, language: &str) -> Result<Self> {
33
+ let mut stopwords = STOPWORDS.get(language).cloned().unwrap_or_else(|| {
34
+ STOPWORDS
35
+ .get("en")
36
+ .cloned()
37
+ .expect("English stopwords must be available - indicates build failure if missing")
38
+ });
39
+
40
+ if let Some(ref custom) = config.custom_stopwords
41
+ && let Some(custom_for_lang) = custom.get(language)
42
+ {
43
+ for word in custom_for_lang {
44
+ stopwords.insert(word.to_lowercase());
45
+ }
46
+ }
47
+
48
+ let preserve_patterns: std::result::Result<Vec<Regex>, _> = config
49
+ .preserve_patterns
50
+ .iter()
51
+ .map(|pattern| Regex::new(pattern))
52
+ .collect();
53
+
54
+ let preserve_patterns =
55
+ preserve_patterns.map_err(|e| KreuzbergError::validation(format!("Invalid regex pattern: {}", e)))?;
56
+
57
+ Ok(Self {
58
+ config: Arc::clone(config),
59
+ stopwords,
60
+ preserve_patterns,
61
+ language: language.to_string(),
62
+ })
63
+ }
64
+
65
+ pub fn apply_light_filters(&self, text: &str) -> String {
66
+ let mut result = text.to_string();
67
+
68
+ let mut preserved_blocks = AHashMap::new();
69
+ if self.config.preserve_markdown {
70
+ result = self.extract_and_preserve_code(&result, &mut preserved_blocks);
71
+ }
72
+
73
+ result = HTML_COMMENT_REGEX.replace_all(&result, "").to_string();
74
+
75
+ result = MULTIPLE_SPACES_REGEX.replace_all(&result, " ").to_string();
76
+
77
+ result = EXCESSIVE_NEWLINES_REGEX.replace_all(&result, "\n\n").to_string();
78
+
79
+ if self.config.preserve_markdown {
80
+ result = self.preserve_markdown_structure(&result);
81
+ }
82
+
83
+ result = self.restore_preserved_blocks(&result, &preserved_blocks);
84
+
85
+ result
86
+ }
87
+
88
+ pub fn apply_moderate_filters(&self, text: &str) -> String {
89
+ let mut result = self.apply_light_filters(text);
90
+
91
+ let mut preserved_blocks = AHashMap::new();
92
+ if self.config.preserve_code {
93
+ result = self.extract_and_preserve_code(&result, &mut preserved_blocks);
94
+ }
95
+
96
+ if self.config.preserve_markdown {
97
+ result = self.remove_stopwords_preserving_markdown(&result);
98
+ } else {
99
+ result = self.remove_stopwords(&result);
100
+ }
101
+
102
+ result = self.restore_preserved_blocks(&result, &preserved_blocks);
103
+
104
+ result
105
+ }
106
+
107
+ fn remove_stopwords_preserving_markdown(&self, text: &str) -> String {
108
+ let lines: Vec<&str> = text.lines().collect();
109
+ let mut processed_lines = Vec::new();
110
+
111
+ for line in lines {
112
+ if MARKDOWN_HEADERS_REGEX.is_match(line) {
113
+ processed_lines.push(line.to_string());
114
+ continue;
115
+ }
116
+
117
+ if MARKDOWN_LISTS_REGEX.is_match(line) {
118
+ processed_lines.push(line.to_string());
119
+ continue;
120
+ }
121
+
122
+ if line.trim().starts_with('|') && line.trim().ends_with('|') {
123
+ processed_lines.push(line.to_string());
124
+ continue;
125
+ }
126
+
127
+ let processed_line = self.remove_stopwords(line);
128
+ processed_lines.push(processed_line);
129
+ }
130
+
131
+ processed_lines.join("\n")
132
+ }
133
+
134
+ fn remove_stopwords(&self, text: &str) -> String {
135
+ let words: Vec<&str> = text.split_whitespace().collect();
136
+ let mut filtered_words = Vec::with_capacity(words.len());
137
+
138
+ for word in words {
139
+ if word.is_empty() {
140
+ continue;
141
+ }
142
+
143
+ if self.should_preserve_word(word) {
144
+ filtered_words.push(word);
145
+ continue;
146
+ }
147
+
148
+ if word.len() > 1 && word.bytes().all(|b| b.is_ascii_uppercase() || !b.is_ascii_alphabetic()) {
149
+ filtered_words.push(word);
150
+ continue;
151
+ }
152
+
153
+ if word.bytes().any(|b| b.is_ascii_digit()) {
154
+ filtered_words.push(word);
155
+ continue;
156
+ }
157
+
158
+ let clean_word = if word.is_ascii() {
159
+ let clean_bytes: Vec<u8> = word
160
+ .bytes()
161
+ .filter(|&b| b.is_ascii_alphabetic())
162
+ .map(|b| b.to_ascii_lowercase())
163
+ .collect();
164
+ String::from_utf8(clean_bytes).unwrap_or_else(|_| {
165
+ word.chars()
166
+ .filter(|c| c.is_alphabetic())
167
+ .collect::<String>()
168
+ .to_lowercase()
169
+ })
170
+ } else {
171
+ word.chars()
172
+ .filter(|c| c.is_alphabetic())
173
+ .collect::<String>()
174
+ .to_lowercase()
175
+ };
176
+
177
+ if clean_word.is_empty() {
178
+ filtered_words.push(word);
179
+ continue;
180
+ }
181
+
182
+ if clean_word.len() <= 1 {
183
+ filtered_words.push(word);
184
+ continue;
185
+ }
186
+
187
+ if !self.stopwords.contains(&clean_word) {
188
+ filtered_words.push(word);
189
+ }
190
+ }
191
+
192
+ filtered_words.join(" ")
193
+ }
194
+
195
+ /// Get the language code for this filter pipeline.
196
+ ///
197
+ /// Primarily useful for testing and debugging to verify language configuration.
198
+ #[cfg_attr(not(test), allow(dead_code))]
199
+ pub fn language(&self) -> &str {
200
+ &self.language
201
+ }
202
+
203
+ /// Check if a word should be preserved based on configured patterns.
204
+ fn should_preserve_word(&self, word: &str) -> bool {
205
+ self.preserve_patterns.iter().any(|pattern| pattern.is_match(word))
206
+ }
207
+
208
+ /// Split a word into prefix (non-alphanumeric), core (alphanumeric), and suffix (non-alphanumeric).
209
+ ///
210
+ /// This is useful for handling punctuation-wrapped words like "(hello)" or "world!".
211
+ /// Currently used in tests; reserved for future word boundary-aware filtering.
212
+ #[cfg_attr(not(test), allow(dead_code))]
213
+ fn split_word_boundaries(&self, word: &str) -> (String, String, String) {
214
+ let chars: Vec<char> = word.chars().collect();
215
+ let mut start = 0;
216
+ let mut end = chars.len();
217
+
218
+ while start < chars.len() && !chars[start].is_alphanumeric() {
219
+ start += 1;
220
+ }
221
+
222
+ while end > start && !chars[end - 1].is_alphanumeric() {
223
+ end -= 1;
224
+ }
225
+
226
+ let prefix: String = chars[..start].iter().collect();
227
+ let core: String = chars[start..end].iter().collect();
228
+ let suffix: String = chars[end..].iter().collect();
229
+
230
+ (prefix, core, suffix)
231
+ }
232
+
233
+ fn preserve_markdown_structure(&self, text: &str) -> String {
234
+ let lines: Vec<&str> = text.lines().collect();
235
+ let mut processed_lines = Vec::new();
236
+
237
+ for line in lines {
238
+ if MARKDOWN_HEADERS_REGEX.is_match(line) {
239
+ processed_lines.push(line);
240
+ continue;
241
+ }
242
+
243
+ if MARKDOWN_LISTS_REGEX.is_match(line) {
244
+ processed_lines.push(line);
245
+ continue;
246
+ }
247
+
248
+ processed_lines.push(line);
249
+ }
250
+
251
+ processed_lines.join("\n")
252
+ }
253
+
254
+ fn extract_and_preserve_code(&self, text: &str, preserved: &mut AHashMap<String, String>) -> String {
255
+ let mut result = text.to_string();
256
+ let mut code_block_id = 0;
257
+ let mut inline_code_id = 0;
258
+
259
+ result = MARKDOWN_CODE_BLOCK_REGEX
260
+ .replace_all(&result, |caps: &regex::Captures| {
261
+ let code_block = caps[0].to_string();
262
+ let placeholder = format!("__CODEBLOCK_{}__", code_block_id);
263
+ code_block_id += 1;
264
+ preserved.insert(placeholder.clone(), code_block);
265
+ placeholder
266
+ })
267
+ .to_string();
268
+
269
+ result = MARKDOWN_INLINE_CODE_REGEX
270
+ .replace_all(&result, |caps: &regex::Captures| {
271
+ let inline_code = caps[0].to_string();
272
+ let placeholder = format!("__INLINECODE_{}__", inline_code_id);
273
+ inline_code_id += 1;
274
+ preserved.insert(placeholder.clone(), inline_code);
275
+ placeholder
276
+ })
277
+ .to_string();
278
+
279
+ result
280
+ }
281
+
282
+ fn restore_preserved_blocks(&self, text: &str, preserved: &AHashMap<String, String>) -> String {
283
+ let mut result = text.to_string();
284
+
285
+ for (placeholder, original_content) in preserved {
286
+ result = result.replace(placeholder, original_content);
287
+ }
288
+
289
+ result
290
+ }
291
+ }
292
+
293
+ #[cfg(all(test, feature = "stopwords"))]
294
+ mod tests {
295
+ use super::*;
296
+
297
+ #[test]
298
+ fn test_stopword_removal() {
299
+ let config = Arc::new(TokenReductionConfig::default());
300
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
301
+
302
+ let input = "The quick brown fox is jumping over the lazy dog";
303
+ let result = pipeline.remove_stopwords(input);
304
+
305
+ assert!(!result.contains(" the "));
306
+ assert!(!result.contains(" is "));
307
+ assert!(result.contains("quick"));
308
+ assert!(result.contains("brown"));
309
+ assert!(result.contains("fox"));
310
+ }
311
+
312
+ #[test]
313
+ fn test_preserve_patterns() {
314
+ let config = TokenReductionConfig {
315
+ preserve_patterns: vec!["\\b[A-Z]{2,}\\b".to_string()],
316
+ ..Default::default()
317
+ };
318
+
319
+ let config = Arc::new(config);
320
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
321
+
322
+ let input = "The NASA mission is a success";
323
+ let result = pipeline.remove_stopwords(input);
324
+
325
+ assert!(result.contains("NASA"));
326
+ assert!(result.contains("mission"));
327
+ assert!(result.contains("success"));
328
+ }
329
+
330
+ #[test]
331
+ fn test_markdown_preservation() {
332
+ let config = TokenReductionConfig {
333
+ preserve_markdown: true,
334
+ preserve_code: true,
335
+ ..Default::default()
336
+ };
337
+
338
+ let config = Arc::new(config);
339
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
340
+
341
+ let input = "# Header\nThis is `code` and ```\ncode block\n``` text";
342
+ let result = pipeline.apply_moderate_filters(input);
343
+
344
+ assert!(result.contains("# Header"));
345
+ assert!(result.contains("`code`"));
346
+ assert!(result.contains("```\ncode block\n```"));
347
+ }
348
+
349
+ #[test]
350
+ fn test_apply_light_filters_removes_html_comments() {
351
+ let config = Arc::new(TokenReductionConfig::default());
352
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
353
+
354
+ let input = "Text before <!-- comment --> text after";
355
+ let result = pipeline.apply_light_filters(input);
356
+
357
+ assert!(!result.contains("<!-- comment -->"));
358
+ assert!(result.contains("Text before"));
359
+ assert!(result.contains("text after"));
360
+ }
361
+
362
+ #[test]
363
+ fn test_apply_light_filters_normalizes_whitespace() {
364
+ let config = Arc::new(TokenReductionConfig::default());
365
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
366
+
367
+ let input = "Text with multiple spaces";
368
+ let result = pipeline.apply_light_filters(input);
369
+
370
+ assert!(!result.contains(" "));
371
+ assert!(result.contains("Text with multiple spaces"));
372
+ }
373
+
374
+ #[test]
375
+ fn test_apply_light_filters_reduces_newlines() {
376
+ let config = Arc::new(TokenReductionConfig::default());
377
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
378
+
379
+ let input = "Paragraph 1\n\n\n\n\nParagraph 2";
380
+ let result = pipeline.apply_light_filters(input);
381
+
382
+ assert!(!result.contains("\n\n\n"));
383
+ assert!(result.contains("Paragraph 1"));
384
+ assert!(result.contains("Paragraph 2"));
385
+ }
386
+
387
+ #[test]
388
+ fn test_stopword_removal_preserves_uppercase() {
389
+ let config = Arc::new(TokenReductionConfig::default());
390
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
391
+
392
+ let input = "The API is working WITH the SDK";
393
+ let result = pipeline.remove_stopwords(input);
394
+
395
+ assert!(result.contains("API"));
396
+ assert!(result.contains("SDK"));
397
+ assert!(result.contains("WITH"));
398
+ assert!(!result.contains("The "));
399
+ assert!(!result.contains(" is "));
400
+ }
401
+
402
+ #[test]
403
+ fn test_stopword_removal_preserves_numbers() {
404
+ let config = Arc::new(TokenReductionConfig::default());
405
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
406
+
407
+ let input = "The version is 3.14 and the count is 42";
408
+ let result = pipeline.remove_stopwords(input);
409
+
410
+ assert!(result.contains("3.14"));
411
+ assert!(result.contains("42"));
412
+ assert!(result.contains("version"));
413
+ assert!(result.contains("count"));
414
+ }
415
+
416
+ #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
417
+ #[test]
418
+ fn test_stopword_removal_handles_punctuation() {
419
+ let config = Arc::new(TokenReductionConfig::default());
420
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
421
+
422
+ let input = "Hello, the world! This is great.";
423
+ let result = pipeline.remove_stopwords(input);
424
+
425
+ assert!(result.contains("Hello,"));
426
+ assert!(result.contains("world!"));
427
+ assert!(result.contains("great."));
428
+ }
429
+
430
+ #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
431
+ #[test]
432
+ fn test_custom_stopwords() {
433
+ use std::collections::HashMap;
434
+
435
+ let mut custom_stopwords = HashMap::new();
436
+ custom_stopwords.insert("en".to_string(), vec!["custom".to_string(), "word".to_string()]);
437
+
438
+ let config = TokenReductionConfig {
439
+ custom_stopwords: Some(custom_stopwords),
440
+ ..Default::default()
441
+ };
442
+
443
+ let config = Arc::new(config);
444
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
445
+
446
+ let input = "This is a custom word test";
447
+ let result = pipeline.remove_stopwords(input);
448
+
449
+ assert!(!result.contains("custom"));
450
+ assert!(!result.contains("word"));
451
+ assert!(result.contains("test"));
452
+ }
453
+
454
+ #[test]
455
+ fn test_spanish_stopwords() {
456
+ let config = Arc::new(TokenReductionConfig::default());
457
+ let pipeline = FilterPipeline::new(&config, "es").unwrap();
458
+
459
+ let input = "El perro grande bonito tiene";
460
+ let result = pipeline.remove_stopwords(input);
461
+
462
+ assert!(result.contains("perro"));
463
+ assert!(result.contains("grande"));
464
+ assert!(result.contains("bonito"));
465
+ let words: Vec<&str> = result.split_whitespace().collect();
466
+ assert!(!words.contains(&"el"));
467
+ assert!(!words.contains(&"El"));
468
+ }
469
+
470
+ #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
471
+ #[test]
472
+ fn test_unknown_language_fallback() {
473
+ let config = Arc::new(TokenReductionConfig::default());
474
+ let pipeline = FilterPipeline::new(&config, "unknown").unwrap();
475
+
476
+ let input = "The quick test with unknown language";
477
+ let result = pipeline.remove_stopwords(input);
478
+
479
+ assert!(!result.contains("The "));
480
+ assert!(result.contains("quick"));
481
+ assert!(result.contains("test"));
482
+ }
483
+
484
+ #[test]
485
+ fn test_markdown_header_preservation() {
486
+ let config = TokenReductionConfig {
487
+ preserve_markdown: true,
488
+ ..Default::default()
489
+ };
490
+
491
+ let config = Arc::new(config);
492
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
493
+
494
+ let input = "# Header 1\n## Header 2\n### Header 3\nRegular text";
495
+ let result = pipeline.remove_stopwords_preserving_markdown(input);
496
+
497
+ assert!(result.contains("# Header 1"));
498
+ assert!(result.contains("## Header 2"));
499
+ assert!(result.contains("### Header 3"));
500
+ }
501
+
502
+ #[test]
503
+ fn test_markdown_list_preservation() {
504
+ let config = TokenReductionConfig {
505
+ preserve_markdown: true,
506
+ ..Default::default()
507
+ };
508
+
509
+ let config = Arc::new(config);
510
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
511
+
512
+ let input = "- Item 1\n* Item 2\n+ Item 3";
513
+ let result = pipeline.remove_stopwords_preserving_markdown(input);
514
+
515
+ assert!(result.contains("- Item 1"));
516
+ assert!(result.contains("* Item 2"));
517
+ assert!(result.contains("+ Item 3"));
518
+ }
519
+
520
+ #[test]
521
+ fn test_markdown_table_preservation() {
522
+ let config = TokenReductionConfig {
523
+ preserve_markdown: true,
524
+ ..Default::default()
525
+ };
526
+
527
+ let config = Arc::new(config);
528
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
529
+
530
+ let input = "| Header 1 | Header 2 |\n|----------|----------|\n| Cell 1 | Cell 2 |";
531
+ let result = pipeline.remove_stopwords_preserving_markdown(input);
532
+
533
+ assert!(result.contains("| Header 1 | Header 2 |"));
534
+ assert!(result.contains("|----------|----------|"));
535
+ }
536
+
537
+ #[test]
538
+ fn test_code_block_preservation() {
539
+ let config = Arc::new(TokenReductionConfig {
540
+ preserve_code: true,
541
+ ..Default::default()
542
+ });
543
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
544
+
545
+ let mut preserved = AHashMap::new();
546
+ let input = "Text before\n```rust\nfn main() {}\n```\nText after";
547
+ let result = pipeline.extract_and_preserve_code(input, &mut preserved);
548
+
549
+ assert_eq!(preserved.len(), 1);
550
+ assert!(preserved.values().any(|v| v.contains("fn main()")));
551
+ assert!(result.contains("__CODEBLOCK_0__"));
552
+ }
553
+
554
+ #[test]
555
+ fn test_inline_code_preservation() {
556
+ let config = Arc::new(TokenReductionConfig {
557
+ preserve_code: true,
558
+ ..Default::default()
559
+ });
560
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
561
+
562
+ let mut preserved = AHashMap::new();
563
+ let input = "Use the `println!` macro";
564
+ let result = pipeline.extract_and_preserve_code(input, &mut preserved);
565
+
566
+ assert_eq!(preserved.len(), 1);
567
+ assert!(preserved.values().any(|v| v == "`println!`"));
568
+ assert!(result.contains("__INLINECODE_0__"));
569
+ }
570
+
571
+ #[test]
572
+ fn test_restore_preserved_blocks() {
573
+ let config = Arc::new(TokenReductionConfig::default());
574
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
575
+
576
+ let mut preserved = AHashMap::new();
577
+ preserved.insert("__CODEBLOCK_0__".to_string(), "```code```".to_string());
578
+ preserved.insert("__INLINECODE_0__".to_string(), "`inline`".to_string());
579
+ let input = "Text __CODEBLOCK_0__ and __INLINECODE_0__ here";
580
+ let result = pipeline.restore_preserved_blocks(input, &preserved);
581
+
582
+ assert!(result.contains("```code```"));
583
+ assert!(result.contains("`inline`"));
584
+ assert!(!result.contains("__CODEBLOCK_0__"));
585
+ assert!(!result.contains("__INLINECODE_0__"));
586
+ }
587
+
588
+ #[test]
589
+ fn test_apply_moderate_filters_with_stopwords() {
590
+ let config = Arc::new(TokenReductionConfig::default());
591
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
592
+
593
+ let input = "The quick brown fox is jumping";
594
+ let result = pipeline.apply_moderate_filters(input);
595
+
596
+ assert!(!result.contains("The "));
597
+ assert!(!result.contains(" is "));
598
+ assert!(result.contains("quick"));
599
+ assert!(result.contains("brown"));
600
+ }
601
+
602
+ #[test]
603
+ fn test_invalid_regex_pattern() {
604
+ let config = TokenReductionConfig {
605
+ preserve_patterns: vec!["[invalid".to_string()],
606
+ ..Default::default()
607
+ };
608
+
609
+ let config = Arc::new(config);
610
+ let result = FilterPipeline::new(&config, "en");
611
+
612
+ assert!(result.is_err());
613
+ if let Err(err) = result {
614
+ assert!(matches!(err, KreuzbergError::Validation { .. }));
615
+ }
616
+ }
617
+
618
+ #[test]
619
+ fn test_empty_input() {
620
+ let config = Arc::new(TokenReductionConfig::default());
621
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
622
+
623
+ let result = pipeline.apply_light_filters("");
624
+ assert_eq!(result, "");
625
+
626
+ let result = pipeline.apply_moderate_filters("");
627
+ assert_eq!(result, "");
628
+ }
629
+
630
+ #[test]
631
+ fn test_stopword_removal_single_letter_words() {
632
+ let config = Arc::new(TokenReductionConfig::default());
633
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
634
+
635
+ let input = "I a x test";
636
+ let result = pipeline.remove_stopwords(input);
637
+
638
+ assert!(result.contains("I"));
639
+ assert!(result.contains("x"));
640
+ }
641
+
642
+ #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
643
+ #[test]
644
+ fn test_stopword_removal_mixed_case() {
645
+ let config = Arc::new(TokenReductionConfig::default());
646
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
647
+
648
+ let input = "The Test Is Working";
649
+ let result = pipeline.remove_stopwords(input);
650
+
651
+ assert!(!result.contains("The"));
652
+ assert!(!result.contains("Is"));
653
+ assert!(result.contains("Test"));
654
+ assert!(result.contains("Working"));
655
+ }
656
+
657
+ #[test]
658
+ fn test_lazy_regex_initialization() {
659
+ let _ = &*HTML_COMMENT_REGEX;
660
+ let _ = &*EXCESSIVE_NEWLINES_REGEX;
661
+ let _ = &*MULTIPLE_SPACES_REGEX;
662
+ let _ = &*MARKDOWN_CODE_BLOCK_REGEX;
663
+ let _ = &*MARKDOWN_INLINE_CODE_REGEX;
664
+ let _ = &*MARKDOWN_HEADERS_REGEX;
665
+ let _ = &*MARKDOWN_LISTS_REGEX;
666
+ }
667
+
668
+ #[test]
669
+ fn test_multiple_code_blocks_hashmap_approach() {
670
+ let config = Arc::new(TokenReductionConfig {
671
+ preserve_code: true,
672
+ ..Default::default()
673
+ });
674
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
675
+
676
+ let input =
677
+ "Start ```rust\nlet x = 1;\n``` middle `inline1` text ```python\nprint('hi')\n``` and `inline2` end";
678
+ let mut preserved = AHashMap::new();
679
+ let result = pipeline.extract_and_preserve_code(input, &mut preserved);
680
+
681
+ assert_eq!(preserved.len(), 4);
682
+ assert!(preserved.contains_key("__CODEBLOCK_0__"));
683
+ assert!(preserved.contains_key("__CODEBLOCK_1__"));
684
+ assert!(preserved.contains_key("__INLINECODE_0__"));
685
+ assert!(preserved.contains_key("__INLINECODE_1__"));
686
+
687
+ assert_eq!(preserved.get("__CODEBLOCK_0__").unwrap(), "```rust\nlet x = 1;\n```");
688
+ assert_eq!(preserved.get("__CODEBLOCK_1__").unwrap(), "```python\nprint('hi')\n```");
689
+ assert_eq!(preserved.get("__INLINECODE_0__").unwrap(), "`inline1`");
690
+ assert_eq!(preserved.get("__INLINECODE_1__").unwrap(), "`inline2`");
691
+
692
+ let restored = pipeline.restore_preserved_blocks(&result, &preserved);
693
+ assert!(restored.contains("```rust\nlet x = 1;\n```"));
694
+ assert!(restored.contains("```python\nprint('hi')\n```"));
695
+ assert!(restored.contains("`inline1`"));
696
+ assert!(restored.contains("`inline2`"));
697
+ assert!(!restored.contains("__CODEBLOCK_"));
698
+ assert!(!restored.contains("__INLINECODE_"));
699
+ }
700
+
701
+ #[test]
702
+ fn test_hashmap_order_independence() {
703
+ let config = Arc::new(TokenReductionConfig {
704
+ preserve_code: true,
705
+ ..Default::default()
706
+ });
707
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
708
+
709
+ let input = "Text `a` and `b` and `c` here";
710
+ let mut preserved = AHashMap::new();
711
+ let result = pipeline.extract_and_preserve_code(input, &mut preserved);
712
+
713
+ assert_eq!(preserved.len(), 3);
714
+ let restored = pipeline.restore_preserved_blocks(&result, &preserved);
715
+
716
+ assert!(restored.contains("`a`"));
717
+ assert!(restored.contains("`b`"));
718
+ assert!(restored.contains("`c`"));
719
+ assert_eq!(restored, "Text `a` and `b` and `c` here");
720
+ }
721
+
722
+ #[test]
723
+ fn test_preserve_patterns_regex() {
724
+ let config = TokenReductionConfig {
725
+ preserve_patterns: vec![
726
+ r"\b[A-Z]{2,}\b".to_string(),
727
+ r"\b\d+\.\d+\.\d+\b".to_string(),
728
+ r"@\w+".to_string(),
729
+ ],
730
+ ..Default::default()
731
+ };
732
+
733
+ let config = Arc::new(config);
734
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
735
+
736
+ let input = "The NASA and HTTP protocols version 1.2.3 by @john";
737
+ let result = pipeline.remove_stopwords(input);
738
+
739
+ assert!(result.contains("NASA"));
740
+ assert!(result.contains("HTTP"));
741
+ assert!(result.contains("1.2.3"));
742
+ assert!(result.contains("@john"));
743
+
744
+ assert!(!result.contains(" the "));
745
+ assert!(!result.contains(" and "));
746
+ assert!(!result.contains(" by "));
747
+ }
748
+
749
+ #[test]
750
+ fn test_language_specific_stopwords() {
751
+ let config_en = Arc::new(TokenReductionConfig::default());
752
+ let pipeline_en = FilterPipeline::new(&config_en, "en").unwrap();
753
+ assert_eq!(pipeline_en.language(), "en");
754
+
755
+ let input_en = "the quick brown fox";
756
+ let result_en = pipeline_en.remove_stopwords(input_en);
757
+ assert!(!result_en.contains(" the "));
758
+
759
+ let config_de = Arc::new(TokenReductionConfig::default());
760
+ let pipeline_de = FilterPipeline::new(&config_de, "de").unwrap();
761
+ assert_eq!(pipeline_de.language(), "de");
762
+
763
+ let input_de = "der schnelle braune fuchs";
764
+ let result_de = pipeline_de.remove_stopwords(input_de);
765
+ assert!(!result_de.contains(" der "));
766
+ assert!(result_de.contains("schnelle"));
767
+ }
768
+
769
+ #[test]
770
+ fn test_language_fallback_to_english() {
771
+ let config = Arc::new(TokenReductionConfig::default());
772
+
773
+ let pipeline = FilterPipeline::new(&config, "unsupported_lang").unwrap();
774
+ assert_eq!(pipeline.language(), "unsupported_lang");
775
+
776
+ let input = "the quick brown fox";
777
+ let result = pipeline.remove_stopwords(input);
778
+
779
+ assert!(!result.contains(" the "));
780
+ assert!(result.contains("quick"));
781
+ }
782
+
783
+ #[test]
784
+ fn test_split_word_boundaries() {
785
+ let config = Arc::new(TokenReductionConfig::default());
786
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
787
+
788
+ let (prefix, core, suffix) = pipeline.split_word_boundaries("(hello)");
789
+ assert_eq!(prefix, "(");
790
+ assert_eq!(core, "hello");
791
+ assert_eq!(suffix, ")");
792
+
793
+ let (prefix2, core2, suffix2) = pipeline.split_word_boundaries("world!");
794
+ assert_eq!(prefix2, "");
795
+ assert_eq!(core2, "world");
796
+ assert_eq!(suffix2, "!");
797
+
798
+ let (prefix3, core3, suffix3) = pipeline.split_word_boundaries("'test");
799
+ assert_eq!(prefix3, "'");
800
+ assert_eq!(core3, "test");
801
+ assert_eq!(suffix3, "");
802
+
803
+ let (prefix4, core4, suffix4) = pipeline.split_word_boundaries("simple");
804
+ assert_eq!(prefix4, "");
805
+ assert_eq!(core4, "simple");
806
+ assert_eq!(suffix4, "");
807
+
808
+ let (prefix5, core5, suffix5) = pipeline.split_word_boundaries("\"example!!!\"");
809
+ assert_eq!(prefix5, "\"");
810
+ assert_eq!(core5, "example");
811
+ assert_eq!(suffix5, "!!!\"");
812
+ }
813
+
814
+ #[test]
815
+ fn test_split_word_boundaries_edge_cases() {
816
+ let config = Arc::new(TokenReductionConfig::default());
817
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
818
+
819
+ let (prefix, core, suffix) = pipeline.split_word_boundaries("!!!");
820
+ assert_eq!(prefix, "!!!");
821
+ assert_eq!(core, "");
822
+ assert_eq!(suffix, "");
823
+
824
+ let (prefix2, core2, suffix2) = pipeline.split_word_boundaries("");
825
+ assert_eq!(prefix2, "");
826
+ assert_eq!(core2, "");
827
+ assert_eq!(suffix2, "");
828
+
829
+ let (prefix3, core3, suffix3) = pipeline.split_word_boundaries("a");
830
+ assert_eq!(prefix3, "");
831
+ assert_eq!(core3, "a");
832
+ assert_eq!(suffix3, "");
833
+
834
+ let (prefix4, core4, suffix4) = pipeline.split_word_boundaries("(café)");
835
+ assert_eq!(prefix4, "(");
836
+ assert_eq!(core4, "café");
837
+ assert_eq!(suffix4, ")");
838
+ }
839
+
840
+ #[test]
841
+ fn test_custom_stopwords_with_preserve_patterns() {
842
+ use std::collections::HashMap;
843
+
844
+ let mut custom_stopwords = HashMap::new();
845
+ custom_stopwords.insert("en".to_string(), vec!["custom".to_string(), "stopword".to_string()]);
846
+
847
+ let config = TokenReductionConfig {
848
+ custom_stopwords: Some(custom_stopwords),
849
+ ..Default::default()
850
+ };
851
+
852
+ let config = Arc::new(config);
853
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
854
+
855
+ let input = "this is a custom stopword test";
856
+ let result = pipeline.remove_stopwords(input);
857
+
858
+ assert!(!result.contains(" custom "));
859
+ assert!(!result.contains(" stopword "));
860
+ assert!(!result.contains(" is "));
861
+ assert!(!result.contains(" a "));
862
+ assert!(result.contains("test"));
863
+ }
864
+
865
+ #[test]
866
+ fn test_preserve_patterns_empty() {
867
+ let config = TokenReductionConfig {
868
+ preserve_patterns: vec![],
869
+ ..Default::default()
870
+ };
871
+
872
+ let config = Arc::new(config);
873
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
874
+
875
+ let input = "The quick brown fox";
876
+ let result = pipeline.remove_stopwords(input);
877
+
878
+ assert!(!result.contains(" The "));
879
+ assert!(result.contains("quick"));
880
+ }
881
+
882
+ #[test]
883
+ fn test_invalid_preserve_pattern() {
884
+ let config = TokenReductionConfig {
885
+ preserve_patterns: vec!["[invalid".to_string()],
886
+ ..Default::default()
887
+ };
888
+
889
+ let config = Arc::new(config);
890
+ let result = FilterPipeline::new(&config, "en");
891
+
892
+ assert!(result.is_err());
893
+ if let Err(e) = result {
894
+ match e {
895
+ KreuzbergError::Validation { message, .. } => {
896
+ assert!(message.contains("Invalid regex pattern"));
897
+ }
898
+ _ => panic!("Expected ValidationError"),
899
+ }
900
+ }
901
+ }
902
+ }