kreuzberg 4.0.0.rc1 → 4.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (342) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -8
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -534
  5. data/.rubocop.yml +538 -0
  6. data/Gemfile +8 -9
  7. data/Gemfile.lock +9 -109
  8. data/README.md +426 -421
  9. data/Rakefile +25 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -340
  12. data/ext/kreuzberg_rb/extconf.rb +45 -35
  13. data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
  14. data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
  15. data/ext/kreuzberg_rb/native/README.md +425 -425
  16. data/ext/kreuzberg_rb/native/build.rs +15 -17
  17. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  18. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  19. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  20. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  21. data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
  22. data/extconf.rb +28 -28
  23. data/kreuzberg.gemspec +148 -105
  24. data/lib/kreuzberg/api_proxy.rb +142 -142
  25. data/lib/kreuzberg/cache_api.rb +46 -45
  26. data/lib/kreuzberg/cli.rb +55 -55
  27. data/lib/kreuzberg/cli_proxy.rb +127 -127
  28. data/lib/kreuzberg/config.rb +691 -684
  29. data/lib/kreuzberg/error_context.rb +32 -0
  30. data/lib/kreuzberg/errors.rb +118 -50
  31. data/lib/kreuzberg/extraction_api.rb +85 -84
  32. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  33. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  34. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  35. data/lib/kreuzberg/result.rb +216 -216
  36. data/lib/kreuzberg/setup_lib_path.rb +80 -79
  37. data/lib/kreuzberg/validator_protocol.rb +89 -89
  38. data/lib/kreuzberg/version.rb +5 -5
  39. data/lib/kreuzberg.rb +103 -82
  40. data/sig/kreuzberg/internal.rbs +184 -184
  41. data/sig/kreuzberg.rbs +520 -468
  42. data/spec/binding/cache_spec.rb +227 -227
  43. data/spec/binding/cli_proxy_spec.rb +85 -87
  44. data/spec/binding/cli_spec.rb +55 -54
  45. data/spec/binding/config_spec.rb +345 -345
  46. data/spec/binding/config_validation_spec.rb +283 -283
  47. data/spec/binding/error_handling_spec.rb +213 -213
  48. data/spec/binding/errors_spec.rb +66 -66
  49. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  50. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  51. data/spec/binding/plugins/validator_spec.rb +274 -274
  52. data/spec/fixtures/config.toml +39 -39
  53. data/spec/fixtures/config.yaml +41 -42
  54. data/spec/fixtures/invalid_config.toml +4 -4
  55. data/spec/smoke/package_spec.rb +178 -178
  56. data/spec/spec_helper.rb +42 -42
  57. data/vendor/kreuzberg/Cargo.toml +204 -134
  58. data/vendor/kreuzberg/README.md +175 -175
  59. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
  60. data/vendor/kreuzberg/build.rs +474 -460
  61. data/vendor/kreuzberg/src/api/error.rs +81 -81
  62. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  63. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  64. data/vendor/kreuzberg/src/api/server.rs +353 -353
  65. data/vendor/kreuzberg/src/api/types.rs +170 -170
  66. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
  67. data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
  68. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
  69. data/vendor/kreuzberg/src/core/config.rs +1032 -1032
  70. data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
  71. data/vendor/kreuzberg/src/core/io.rs +329 -327
  72. data/vendor/kreuzberg/src/core/mime.rs +605 -615
  73. data/vendor/kreuzberg/src/core/mod.rs +45 -42
  74. data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
  75. data/vendor/kreuzberg/src/embeddings.rs +432 -323
  76. data/vendor/kreuzberg/src/error.rs +431 -431
  77. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  78. data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
  79. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  80. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  81. data/vendor/kreuzberg/src/extraction/html.rs +553 -553
  82. data/vendor/kreuzberg/src/extraction/image.rs +368 -368
  83. data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
  84. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
  85. data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
  86. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  87. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  88. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  89. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
  90. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
  91. data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
  92. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  93. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  94. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  95. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  96. data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
  97. data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
  98. data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
  99. data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
  100. data/vendor/kreuzberg/src/extractors/email.rs +143 -129
  101. data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
  102. data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
  103. data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
  104. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
  105. data/vendor/kreuzberg/src/extractors/html.rs +393 -410
  106. data/vendor/kreuzberg/src/extractors/image.rs +198 -195
  107. data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
  108. data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
  109. data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
  110. data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
  111. data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
  112. data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
  113. data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
  114. data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
  115. data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
  116. data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
  117. data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
  118. data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
  119. data/vendor/kreuzberg/src/extractors/security.rs +484 -0
  120. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
  121. data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
  122. data/vendor/kreuzberg/src/extractors/text.rs +260 -242
  123. data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
  124. data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
  125. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  126. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  127. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  128. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  129. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  130. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  131. data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
  132. data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
  133. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  134. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  135. data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
  136. data/vendor/kreuzberg/src/lib.rs +105 -102
  137. data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
  138. data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
  139. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  140. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  141. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  142. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  143. data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
  144. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  145. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  146. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
  147. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  148. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  149. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  150. data/vendor/kreuzberg/src/panic_context.rs +154 -0
  151. data/vendor/kreuzberg/src/pdf/error.rs +122 -122
  152. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  153. data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
  154. data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
  155. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  156. data/vendor/kreuzberg/src/pdf/table.rs +393 -420
  157. data/vendor/kreuzberg/src/pdf/text.rs +158 -161
  158. data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
  159. data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
  160. data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
  161. data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
  162. data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
  163. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  164. data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
  165. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  166. data/vendor/kreuzberg/src/text/mod.rs +19 -19
  167. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  168. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  169. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  170. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  171. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  172. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  173. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  174. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  175. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  176. data/vendor/kreuzberg/src/types.rs +903 -873
  177. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  178. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  179. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  180. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  181. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  182. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  183. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  184. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  185. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  186. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  187. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  188. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  189. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  190. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  191. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  192. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  193. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  194. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  195. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  196. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  198. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  199. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  200. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  201. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  202. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  203. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  204. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  205. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  206. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  207. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  208. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  209. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  210. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  211. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  212. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  213. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  214. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  215. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  216. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  217. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  218. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  219. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  220. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  221. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  222. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  223. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  224. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  225. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  226. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  227. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  228. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  229. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  230. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  231. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  232. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  233. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  234. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  235. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  236. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  237. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  238. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  239. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  240. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  241. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  242. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  243. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  244. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
  245. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  246. data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
  247. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
  248. data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
  249. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
  250. data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
  251. data/vendor/kreuzberg/tests/config_features.rs +598 -580
  252. data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
  253. data/vendor/kreuzberg/tests/core_integration.rs +510 -493
  254. data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
  255. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
  256. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
  257. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
  258. data/vendor/kreuzberg/tests/email_integration.rs +325 -325
  259. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
  260. data/vendor/kreuzberg/tests/error_handling.rs +393 -393
  261. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
  262. data/vendor/kreuzberg/tests/format_integration.rs +159 -159
  263. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  264. data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
  265. data/vendor/kreuzberg/tests/image_integration.rs +253 -253
  266. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
  267. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
  268. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
  269. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  270. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  271. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
  272. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
  273. data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
  274. data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
  275. data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
  276. data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
  277. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  278. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
  279. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
  280. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
  281. data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
  282. data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
  283. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
  284. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
  285. data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
  286. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  287. data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
  288. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
  289. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
  290. data/vendor/kreuzberg/tests/security_validation.rs +415 -404
  291. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  292. data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
  293. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
  294. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
  295. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  296. data/vendor/rb-sys/.cargo-ok +1 -0
  297. data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
  298. data/vendor/rb-sys/Cargo.lock +393 -0
  299. data/vendor/rb-sys/Cargo.toml +70 -0
  300. data/vendor/rb-sys/Cargo.toml.orig +57 -0
  301. data/vendor/rb-sys/LICENSE-APACHE +190 -0
  302. data/vendor/rb-sys/LICENSE-MIT +21 -0
  303. data/vendor/rb-sys/bin/release.sh +21 -0
  304. data/vendor/rb-sys/build/features.rs +108 -0
  305. data/vendor/rb-sys/build/main.rs +246 -0
  306. data/vendor/rb-sys/build/stable_api_config.rs +153 -0
  307. data/vendor/rb-sys/build/version.rs +48 -0
  308. data/vendor/rb-sys/readme.md +36 -0
  309. data/vendor/rb-sys/src/bindings.rs +21 -0
  310. data/vendor/rb-sys/src/hidden.rs +11 -0
  311. data/vendor/rb-sys/src/lib.rs +34 -0
  312. data/vendor/rb-sys/src/macros.rs +371 -0
  313. data/vendor/rb-sys/src/memory.rs +53 -0
  314. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
  315. data/vendor/rb-sys/src/special_consts.rs +31 -0
  316. data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
  317. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
  318. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
  319. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
  320. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
  321. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
  322. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
  323. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
  324. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
  325. data/vendor/rb-sys/src/stable_api.rs +261 -0
  326. data/vendor/rb-sys/src/symbol.rs +31 -0
  327. data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
  328. data/vendor/rb-sys/src/utils.rs +89 -0
  329. data/vendor/rb-sys/src/value_type.rs +7 -0
  330. metadata +90 -95
  331. data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
  332. data/spec/examples.txt +0 -104
  333. data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
  334. data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
  335. data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
  336. data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
  337. data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
  338. data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
  339. data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
  340. data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
  341. data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
  342. data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
@@ -1,902 +1,902 @@
1
- use crate::error::{KreuzbergError, Result};
2
- use crate::stopwords::STOPWORDS;
3
- use crate::text::token_reduction::config::TokenReductionConfig;
4
- use ahash::{AHashMap, AHashSet};
5
- use once_cell::sync::Lazy;
6
- use regex::Regex;
7
- use std::sync::Arc;
8
-
9
- static HTML_COMMENT_REGEX: Lazy<Regex> =
10
- Lazy::new(|| Regex::new(r"<!--.*?-->").expect("HTML comment regex pattern is valid and should compile"));
11
- static EXCESSIVE_NEWLINES_REGEX: Lazy<Regex> =
12
- Lazy::new(|| Regex::new(r"\n{3,}").expect("Excessive newlines regex pattern is valid and should compile"));
13
- static MULTIPLE_SPACES_REGEX: Lazy<Regex> =
14
- Lazy::new(|| Regex::new(r" {2,}").expect("Multiple spaces regex pattern is valid and should compile"));
15
- static MARKDOWN_CODE_BLOCK_REGEX: Lazy<Regex> =
16
- Lazy::new(|| Regex::new(r"```[\s\S]*?```").expect("Markdown code block regex pattern is valid and should compile"));
17
- static MARKDOWN_INLINE_CODE_REGEX: Lazy<Regex> =
18
- Lazy::new(|| Regex::new(r"`[^`\n]+`").expect("Markdown inline code regex pattern is valid and should compile"));
19
- static MARKDOWN_HEADERS_REGEX: Lazy<Regex> =
20
- Lazy::new(|| Regex::new(r"^#{1,6}\s+").expect("Markdown headers regex pattern is valid and should compile"));
21
- static MARKDOWN_LISTS_REGEX: Lazy<Regex> =
22
- Lazy::new(|| Regex::new(r"^[ \t]*[-*+]\s+").expect("Markdown lists regex pattern is valid and should compile"));
23
-
24
- pub struct FilterPipeline {
25
- config: Arc<TokenReductionConfig>,
26
- stopwords: AHashSet<String>,
27
- preserve_patterns: Vec<Regex>,
28
- language: String,
29
- }
30
-
31
- impl FilterPipeline {
32
- pub fn new(config: &Arc<TokenReductionConfig>, language: &str) -> Result<Self> {
33
- let mut stopwords = STOPWORDS.get(language).cloned().unwrap_or_else(|| {
34
- STOPWORDS
35
- .get("en")
36
- .cloned()
37
- .expect("English stopwords must be available - indicates build failure if missing")
38
- });
39
-
40
- if let Some(ref custom) = config.custom_stopwords
41
- && let Some(custom_for_lang) = custom.get(language)
42
- {
43
- for word in custom_for_lang {
44
- stopwords.insert(word.to_lowercase());
45
- }
46
- }
47
-
48
- let preserve_patterns: std::result::Result<Vec<Regex>, _> = config
49
- .preserve_patterns
50
- .iter()
51
- .map(|pattern| Regex::new(pattern))
52
- .collect();
53
-
54
- let preserve_patterns =
55
- preserve_patterns.map_err(|e| KreuzbergError::validation(format!("Invalid regex pattern: {}", e)))?;
56
-
57
- Ok(Self {
58
- config: Arc::clone(config),
59
- stopwords,
60
- preserve_patterns,
61
- language: language.to_string(),
62
- })
63
- }
64
-
65
- pub fn apply_light_filters(&self, text: &str) -> String {
66
- let mut result = text.to_string();
67
-
68
- let mut preserved_blocks = AHashMap::new();
69
- if self.config.preserve_markdown {
70
- result = self.extract_and_preserve_code(&result, &mut preserved_blocks);
71
- }
72
-
73
- result = HTML_COMMENT_REGEX.replace_all(&result, "").to_string();
74
-
75
- result = MULTIPLE_SPACES_REGEX.replace_all(&result, " ").to_string();
76
-
77
- result = EXCESSIVE_NEWLINES_REGEX.replace_all(&result, "\n\n").to_string();
78
-
79
- if self.config.preserve_markdown {
80
- result = self.preserve_markdown_structure(&result);
81
- }
82
-
83
- result = self.restore_preserved_blocks(&result, &preserved_blocks);
84
-
85
- result
86
- }
87
-
88
- pub fn apply_moderate_filters(&self, text: &str) -> String {
89
- let mut result = self.apply_light_filters(text);
90
-
91
- let mut preserved_blocks = AHashMap::new();
92
- if self.config.preserve_code {
93
- result = self.extract_and_preserve_code(&result, &mut preserved_blocks);
94
- }
95
-
96
- if self.config.preserve_markdown {
97
- result = self.remove_stopwords_preserving_markdown(&result);
98
- } else {
99
- result = self.remove_stopwords(&result);
100
- }
101
-
102
- result = self.restore_preserved_blocks(&result, &preserved_blocks);
103
-
104
- result
105
- }
106
-
107
- fn remove_stopwords_preserving_markdown(&self, text: &str) -> String {
108
- let lines: Vec<&str> = text.lines().collect();
109
- let mut processed_lines = Vec::new();
110
-
111
- for line in lines {
112
- if MARKDOWN_HEADERS_REGEX.is_match(line) {
113
- processed_lines.push(line.to_string());
114
- continue;
115
- }
116
-
117
- if MARKDOWN_LISTS_REGEX.is_match(line) {
118
- processed_lines.push(line.to_string());
119
- continue;
120
- }
121
-
122
- if line.trim().starts_with('|') && line.trim().ends_with('|') {
123
- processed_lines.push(line.to_string());
124
- continue;
125
- }
126
-
127
- let processed_line = self.remove_stopwords(line);
128
- processed_lines.push(processed_line);
129
- }
130
-
131
- processed_lines.join("\n")
132
- }
133
-
134
- fn remove_stopwords(&self, text: &str) -> String {
135
- let words: Vec<&str> = text.split_whitespace().collect();
136
- let mut filtered_words = Vec::with_capacity(words.len());
137
-
138
- for word in words {
139
- if word.is_empty() {
140
- continue;
141
- }
142
-
143
- if self.should_preserve_word(word) {
144
- filtered_words.push(word);
145
- continue;
146
- }
147
-
148
- if word.len() > 1 && word.bytes().all(|b| b.is_ascii_uppercase() || !b.is_ascii_alphabetic()) {
149
- filtered_words.push(word);
150
- continue;
151
- }
152
-
153
- if word.bytes().any(|b| b.is_ascii_digit()) {
154
- filtered_words.push(word);
155
- continue;
156
- }
157
-
158
- let clean_word = if word.is_ascii() {
159
- let clean_bytes: Vec<u8> = word
160
- .bytes()
161
- .filter(|&b| b.is_ascii_alphabetic())
162
- .map(|b| b.to_ascii_lowercase())
163
- .collect();
164
- String::from_utf8(clean_bytes).unwrap_or_else(|_| {
165
- word.chars()
166
- .filter(|c| c.is_alphabetic())
167
- .collect::<String>()
168
- .to_lowercase()
169
- })
170
- } else {
171
- word.chars()
172
- .filter(|c| c.is_alphabetic())
173
- .collect::<String>()
174
- .to_lowercase()
175
- };
176
-
177
- if clean_word.is_empty() {
178
- filtered_words.push(word);
179
- continue;
180
- }
181
-
182
- if clean_word.len() <= 1 {
183
- filtered_words.push(word);
184
- continue;
185
- }
186
-
187
- if !self.stopwords.contains(&clean_word) {
188
- filtered_words.push(word);
189
- }
190
- }
191
-
192
- filtered_words.join(" ")
193
- }
194
-
195
- /// Get the language code for this filter pipeline.
196
- ///
197
- /// Primarily useful for testing and debugging to verify language configuration.
198
- #[cfg_attr(not(test), allow(dead_code))]
199
- pub fn language(&self) -> &str {
200
- &self.language
201
- }
202
-
203
- /// Check if a word should be preserved based on configured patterns.
204
- fn should_preserve_word(&self, word: &str) -> bool {
205
- self.preserve_patterns.iter().any(|pattern| pattern.is_match(word))
206
- }
207
-
208
- /// Split a word into prefix (non-alphanumeric), core (alphanumeric), and suffix (non-alphanumeric).
209
- ///
210
- /// This is useful for handling punctuation-wrapped words like "(hello)" or "world!".
211
- /// Currently used in tests; reserved for future word boundary-aware filtering.
212
- #[cfg_attr(not(test), allow(dead_code))]
213
- fn split_word_boundaries(&self, word: &str) -> (String, String, String) {
214
- let chars: Vec<char> = word.chars().collect();
215
- let mut start = 0;
216
- let mut end = chars.len();
217
-
218
- while start < chars.len() && !chars[start].is_alphanumeric() {
219
- start += 1;
220
- }
221
-
222
- while end > start && !chars[end - 1].is_alphanumeric() {
223
- end -= 1;
224
- }
225
-
226
- let prefix: String = chars[..start].iter().collect();
227
- let core: String = chars[start..end].iter().collect();
228
- let suffix: String = chars[end..].iter().collect();
229
-
230
- (prefix, core, suffix)
231
- }
232
-
233
- fn preserve_markdown_structure(&self, text: &str) -> String {
234
- let lines: Vec<&str> = text.lines().collect();
235
- let mut processed_lines = Vec::new();
236
-
237
- for line in lines {
238
- if MARKDOWN_HEADERS_REGEX.is_match(line) {
239
- processed_lines.push(line);
240
- continue;
241
- }
242
-
243
- if MARKDOWN_LISTS_REGEX.is_match(line) {
244
- processed_lines.push(line);
245
- continue;
246
- }
247
-
248
- processed_lines.push(line);
249
- }
250
-
251
- processed_lines.join("\n")
252
- }
253
-
254
- fn extract_and_preserve_code(&self, text: &str, preserved: &mut AHashMap<String, String>) -> String {
255
- let mut result = text.to_string();
256
- let mut code_block_id = 0;
257
- let mut inline_code_id = 0;
258
-
259
- result = MARKDOWN_CODE_BLOCK_REGEX
260
- .replace_all(&result, |caps: &regex::Captures| {
261
- let code_block = caps[0].to_string();
262
- let placeholder = format!("__CODEBLOCK_{}__", code_block_id);
263
- code_block_id += 1;
264
- preserved.insert(placeholder.clone(), code_block);
265
- placeholder
266
- })
267
- .to_string();
268
-
269
- result = MARKDOWN_INLINE_CODE_REGEX
270
- .replace_all(&result, |caps: &regex::Captures| {
271
- let inline_code = caps[0].to_string();
272
- let placeholder = format!("__INLINECODE_{}__", inline_code_id);
273
- inline_code_id += 1;
274
- preserved.insert(placeholder.clone(), inline_code);
275
- placeholder
276
- })
277
- .to_string();
278
-
279
- result
280
- }
281
-
282
- fn restore_preserved_blocks(&self, text: &str, preserved: &AHashMap<String, String>) -> String {
283
- let mut result = text.to_string();
284
-
285
- for (placeholder, original_content) in preserved {
286
- result = result.replace(placeholder, original_content);
287
- }
288
-
289
- result
290
- }
291
- }
292
-
293
- #[cfg(all(test, feature = "stopwords"))]
294
- mod tests {
295
- use super::*;
296
-
297
- #[test]
298
- fn test_stopword_removal() {
299
- let config = Arc::new(TokenReductionConfig::default());
300
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
301
-
302
- let input = "The quick brown fox is jumping over the lazy dog";
303
- let result = pipeline.remove_stopwords(input);
304
-
305
- assert!(!result.contains(" the "));
306
- assert!(!result.contains(" is "));
307
- assert!(result.contains("quick"));
308
- assert!(result.contains("brown"));
309
- assert!(result.contains("fox"));
310
- }
311
-
312
- #[test]
313
- fn test_preserve_patterns() {
314
- let config = TokenReductionConfig {
315
- preserve_patterns: vec!["\\b[A-Z]{2,}\\b".to_string()],
316
- ..Default::default()
317
- };
318
-
319
- let config = Arc::new(config);
320
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
321
-
322
- let input = "The NASA mission is a success";
323
- let result = pipeline.remove_stopwords(input);
324
-
325
- assert!(result.contains("NASA"));
326
- assert!(result.contains("mission"));
327
- assert!(result.contains("success"));
328
- }
329
-
330
- #[test]
331
- fn test_markdown_preservation() {
332
- let config = TokenReductionConfig {
333
- preserve_markdown: true,
334
- preserve_code: true,
335
- ..Default::default()
336
- };
337
-
338
- let config = Arc::new(config);
339
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
340
-
341
- let input = "# Header\nThis is `code` and ```\ncode block\n``` text";
342
- let result = pipeline.apply_moderate_filters(input);
343
-
344
- assert!(result.contains("# Header"));
345
- assert!(result.contains("`code`"));
346
- assert!(result.contains("```\ncode block\n```"));
347
- }
348
-
349
- #[test]
350
- fn test_apply_light_filters_removes_html_comments() {
351
- let config = Arc::new(TokenReductionConfig::default());
352
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
353
-
354
- let input = "Text before <!-- comment --> text after";
355
- let result = pipeline.apply_light_filters(input);
356
-
357
- assert!(!result.contains("<!-- comment -->"));
358
- assert!(result.contains("Text before"));
359
- assert!(result.contains("text after"));
360
- }
361
-
362
- #[test]
363
- fn test_apply_light_filters_normalizes_whitespace() {
364
- let config = Arc::new(TokenReductionConfig::default());
365
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
366
-
367
- let input = "Text with multiple spaces";
368
- let result = pipeline.apply_light_filters(input);
369
-
370
- assert!(!result.contains(" "));
371
- assert!(result.contains("Text with multiple spaces"));
372
- }
373
-
374
- #[test]
375
- fn test_apply_light_filters_reduces_newlines() {
376
- let config = Arc::new(TokenReductionConfig::default());
377
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
378
-
379
- let input = "Paragraph 1\n\n\n\n\nParagraph 2";
380
- let result = pipeline.apply_light_filters(input);
381
-
382
- assert!(!result.contains("\n\n\n"));
383
- assert!(result.contains("Paragraph 1"));
384
- assert!(result.contains("Paragraph 2"));
385
- }
386
-
387
- #[test]
388
- fn test_stopword_removal_preserves_uppercase() {
389
- let config = Arc::new(TokenReductionConfig::default());
390
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
391
-
392
- let input = "The API is working WITH the SDK";
393
- let result = pipeline.remove_stopwords(input);
394
-
395
- assert!(result.contains("API"));
396
- assert!(result.contains("SDK"));
397
- assert!(result.contains("WITH"));
398
- assert!(!result.contains("The "));
399
- assert!(!result.contains(" is "));
400
- }
401
-
402
- #[test]
403
- fn test_stopword_removal_preserves_numbers() {
404
- let config = Arc::new(TokenReductionConfig::default());
405
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
406
-
407
- let input = "The version is 3.14 and the count is 42";
408
- let result = pipeline.remove_stopwords(input);
409
-
410
- assert!(result.contains("3.14"));
411
- assert!(result.contains("42"));
412
- assert!(result.contains("version"));
413
- assert!(result.contains("count"));
414
- }
415
-
416
- #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
417
- #[test]
418
- fn test_stopword_removal_handles_punctuation() {
419
- let config = Arc::new(TokenReductionConfig::default());
420
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
421
-
422
- let input = "Hello, the world! This is great.";
423
- let result = pipeline.remove_stopwords(input);
424
-
425
- assert!(result.contains("Hello,"));
426
- assert!(result.contains("world!"));
427
- assert!(result.contains("great."));
428
- }
429
-
430
- #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
431
- #[test]
432
- fn test_custom_stopwords() {
433
- use std::collections::HashMap;
434
-
435
- let mut custom_stopwords = HashMap::new();
436
- custom_stopwords.insert("en".to_string(), vec!["custom".to_string(), "word".to_string()]);
437
-
438
- let config = TokenReductionConfig {
439
- custom_stopwords: Some(custom_stopwords),
440
- ..Default::default()
441
- };
442
-
443
- let config = Arc::new(config);
444
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
445
-
446
- let input = "This is a custom word test";
447
- let result = pipeline.remove_stopwords(input);
448
-
449
- assert!(!result.contains("custom"));
450
- assert!(!result.contains("word"));
451
- assert!(result.contains("test"));
452
- }
453
-
454
- #[test]
455
- fn test_spanish_stopwords() {
456
- let config = Arc::new(TokenReductionConfig::default());
457
- let pipeline = FilterPipeline::new(&config, "es").unwrap();
458
-
459
- let input = "El perro grande bonito tiene";
460
- let result = pipeline.remove_stopwords(input);
461
-
462
- assert!(result.contains("perro"));
463
- assert!(result.contains("grande"));
464
- assert!(result.contains("bonito"));
465
- let words: Vec<&str> = result.split_whitespace().collect();
466
- assert!(!words.contains(&"el"));
467
- assert!(!words.contains(&"El"));
468
- }
469
-
470
- #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
471
- #[test]
472
- fn test_unknown_language_fallback() {
473
- let config = Arc::new(TokenReductionConfig::default());
474
- let pipeline = FilterPipeline::new(&config, "unknown").unwrap();
475
-
476
- let input = "The quick test with unknown language";
477
- let result = pipeline.remove_stopwords(input);
478
-
479
- assert!(!result.contains("The "));
480
- assert!(result.contains("quick"));
481
- assert!(result.contains("test"));
482
- }
483
-
484
- #[test]
485
- fn test_markdown_header_preservation() {
486
- let config = TokenReductionConfig {
487
- preserve_markdown: true,
488
- ..Default::default()
489
- };
490
-
491
- let config = Arc::new(config);
492
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
493
-
494
- let input = "# Header 1\n## Header 2\n### Header 3\nRegular text";
495
- let result = pipeline.remove_stopwords_preserving_markdown(input);
496
-
497
- assert!(result.contains("# Header 1"));
498
- assert!(result.contains("## Header 2"));
499
- assert!(result.contains("### Header 3"));
500
- }
501
-
502
- #[test]
503
- fn test_markdown_list_preservation() {
504
- let config = TokenReductionConfig {
505
- preserve_markdown: true,
506
- ..Default::default()
507
- };
508
-
509
- let config = Arc::new(config);
510
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
511
-
512
- let input = "- Item 1\n* Item 2\n+ Item 3";
513
- let result = pipeline.remove_stopwords_preserving_markdown(input);
514
-
515
- assert!(result.contains("- Item 1"));
516
- assert!(result.contains("* Item 2"));
517
- assert!(result.contains("+ Item 3"));
518
- }
519
-
520
- #[test]
521
- fn test_markdown_table_preservation() {
522
- let config = TokenReductionConfig {
523
- preserve_markdown: true,
524
- ..Default::default()
525
- };
526
-
527
- let config = Arc::new(config);
528
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
529
-
530
- let input = "| Header 1 | Header 2 |\n|----------|----------|\n| Cell 1 | Cell 2 |";
531
- let result = pipeline.remove_stopwords_preserving_markdown(input);
532
-
533
- assert!(result.contains("| Header 1 | Header 2 |"));
534
- assert!(result.contains("|----------|----------|"));
535
- }
536
-
537
- #[test]
538
- fn test_code_block_preservation() {
539
- let config = Arc::new(TokenReductionConfig {
540
- preserve_code: true,
541
- ..Default::default()
542
- });
543
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
544
-
545
- let mut preserved = AHashMap::new();
546
- let input = "Text before\n```rust\nfn main() {}\n```\nText after";
547
- let result = pipeline.extract_and_preserve_code(input, &mut preserved);
548
-
549
- assert_eq!(preserved.len(), 1);
550
- assert!(preserved.values().any(|v| v.contains("fn main()")));
551
- assert!(result.contains("__CODEBLOCK_0__"));
552
- }
553
-
554
- #[test]
555
- fn test_inline_code_preservation() {
556
- let config = Arc::new(TokenReductionConfig {
557
- preserve_code: true,
558
- ..Default::default()
559
- });
560
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
561
-
562
- let mut preserved = AHashMap::new();
563
- let input = "Use the `println!` macro";
564
- let result = pipeline.extract_and_preserve_code(input, &mut preserved);
565
-
566
- assert_eq!(preserved.len(), 1);
567
- assert!(preserved.values().any(|v| v == "`println!`"));
568
- assert!(result.contains("__INLINECODE_0__"));
569
- }
570
-
571
- #[test]
572
- fn test_restore_preserved_blocks() {
573
- let config = Arc::new(TokenReductionConfig::default());
574
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
575
-
576
- let mut preserved = AHashMap::new();
577
- preserved.insert("__CODEBLOCK_0__".to_string(), "```code```".to_string());
578
- preserved.insert("__INLINECODE_0__".to_string(), "`inline`".to_string());
579
- let input = "Text __CODEBLOCK_0__ and __INLINECODE_0__ here";
580
- let result = pipeline.restore_preserved_blocks(input, &preserved);
581
-
582
- assert!(result.contains("```code```"));
583
- assert!(result.contains("`inline`"));
584
- assert!(!result.contains("__CODEBLOCK_0__"));
585
- assert!(!result.contains("__INLINECODE_0__"));
586
- }
587
-
588
- #[test]
589
- fn test_apply_moderate_filters_with_stopwords() {
590
- let config = Arc::new(TokenReductionConfig::default());
591
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
592
-
593
- let input = "The quick brown fox is jumping";
594
- let result = pipeline.apply_moderate_filters(input);
595
-
596
- assert!(!result.contains("The "));
597
- assert!(!result.contains(" is "));
598
- assert!(result.contains("quick"));
599
- assert!(result.contains("brown"));
600
- }
601
-
602
- #[test]
603
- fn test_invalid_regex_pattern() {
604
- let config = TokenReductionConfig {
605
- preserve_patterns: vec!["[invalid".to_string()],
606
- ..Default::default()
607
- };
608
-
609
- let config = Arc::new(config);
610
- let result = FilterPipeline::new(&config, "en");
611
-
612
- assert!(result.is_err());
613
- if let Err(err) = result {
614
- assert!(matches!(err, KreuzbergError::Validation { .. }));
615
- }
616
- }
617
-
618
- #[test]
619
- fn test_empty_input() {
620
- let config = Arc::new(TokenReductionConfig::default());
621
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
622
-
623
- let result = pipeline.apply_light_filters("");
624
- assert_eq!(result, "");
625
-
626
- let result = pipeline.apply_moderate_filters("");
627
- assert_eq!(result, "");
628
- }
629
-
630
- #[test]
631
- fn test_stopword_removal_single_letter_words() {
632
- let config = Arc::new(TokenReductionConfig::default());
633
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
634
-
635
- let input = "I a x test";
636
- let result = pipeline.remove_stopwords(input);
637
-
638
- assert!(result.contains("I"));
639
- assert!(result.contains("x"));
640
- }
641
-
642
- #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
643
- #[test]
644
- fn test_stopword_removal_mixed_case() {
645
- let config = Arc::new(TokenReductionConfig::default());
646
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
647
-
648
- let input = "The Test Is Working";
649
- let result = pipeline.remove_stopwords(input);
650
-
651
- assert!(!result.contains("The"));
652
- assert!(!result.contains("Is"));
653
- assert!(result.contains("Test"));
654
- assert!(result.contains("Working"));
655
- }
656
-
657
- #[test]
658
- fn test_lazy_regex_initialization() {
659
- let _ = &*HTML_COMMENT_REGEX;
660
- let _ = &*EXCESSIVE_NEWLINES_REGEX;
661
- let _ = &*MULTIPLE_SPACES_REGEX;
662
- let _ = &*MARKDOWN_CODE_BLOCK_REGEX;
663
- let _ = &*MARKDOWN_INLINE_CODE_REGEX;
664
- let _ = &*MARKDOWN_HEADERS_REGEX;
665
- let _ = &*MARKDOWN_LISTS_REGEX;
666
- }
667
-
668
- #[test]
669
- fn test_multiple_code_blocks_hashmap_approach() {
670
- let config = Arc::new(TokenReductionConfig {
671
- preserve_code: true,
672
- ..Default::default()
673
- });
674
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
675
-
676
- let input =
677
- "Start ```rust\nlet x = 1;\n``` middle `inline1` text ```python\nprint('hi')\n``` and `inline2` end";
678
- let mut preserved = AHashMap::new();
679
- let result = pipeline.extract_and_preserve_code(input, &mut preserved);
680
-
681
- assert_eq!(preserved.len(), 4);
682
- assert!(preserved.contains_key("__CODEBLOCK_0__"));
683
- assert!(preserved.contains_key("__CODEBLOCK_1__"));
684
- assert!(preserved.contains_key("__INLINECODE_0__"));
685
- assert!(preserved.contains_key("__INLINECODE_1__"));
686
-
687
- assert_eq!(preserved.get("__CODEBLOCK_0__").unwrap(), "```rust\nlet x = 1;\n```");
688
- assert_eq!(preserved.get("__CODEBLOCK_1__").unwrap(), "```python\nprint('hi')\n```");
689
- assert_eq!(preserved.get("__INLINECODE_0__").unwrap(), "`inline1`");
690
- assert_eq!(preserved.get("__INLINECODE_1__").unwrap(), "`inline2`");
691
-
692
- let restored = pipeline.restore_preserved_blocks(&result, &preserved);
693
- assert!(restored.contains("```rust\nlet x = 1;\n```"));
694
- assert!(restored.contains("```python\nprint('hi')\n```"));
695
- assert!(restored.contains("`inline1`"));
696
- assert!(restored.contains("`inline2`"));
697
- assert!(!restored.contains("__CODEBLOCK_"));
698
- assert!(!restored.contains("__INLINECODE_"));
699
- }
700
-
701
- #[test]
702
- fn test_hashmap_order_independence() {
703
- let config = Arc::new(TokenReductionConfig {
704
- preserve_code: true,
705
- ..Default::default()
706
- });
707
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
708
-
709
- let input = "Text `a` and `b` and `c` here";
710
- let mut preserved = AHashMap::new();
711
- let result = pipeline.extract_and_preserve_code(input, &mut preserved);
712
-
713
- assert_eq!(preserved.len(), 3);
714
- let restored = pipeline.restore_preserved_blocks(&result, &preserved);
715
-
716
- assert!(restored.contains("`a`"));
717
- assert!(restored.contains("`b`"));
718
- assert!(restored.contains("`c`"));
719
- assert_eq!(restored, "Text `a` and `b` and `c` here");
720
- }
721
-
722
- #[test]
723
- fn test_preserve_patterns_regex() {
724
- let config = TokenReductionConfig {
725
- preserve_patterns: vec![
726
- r"\b[A-Z]{2,}\b".to_string(),
727
- r"\b\d+\.\d+\.\d+\b".to_string(),
728
- r"@\w+".to_string(),
729
- ],
730
- ..Default::default()
731
- };
732
-
733
- let config = Arc::new(config);
734
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
735
-
736
- let input = "The NASA and HTTP protocols version 1.2.3 by @john";
737
- let result = pipeline.remove_stopwords(input);
738
-
739
- assert!(result.contains("NASA"));
740
- assert!(result.contains("HTTP"));
741
- assert!(result.contains("1.2.3"));
742
- assert!(result.contains("@john"));
743
-
744
- assert!(!result.contains(" the "));
745
- assert!(!result.contains(" and "));
746
- assert!(!result.contains(" by "));
747
- }
748
-
749
- #[test]
750
- fn test_language_specific_stopwords() {
751
- let config_en = Arc::new(TokenReductionConfig::default());
752
- let pipeline_en = FilterPipeline::new(&config_en, "en").unwrap();
753
- assert_eq!(pipeline_en.language(), "en");
754
-
755
- let input_en = "the quick brown fox";
756
- let result_en = pipeline_en.remove_stopwords(input_en);
757
- assert!(!result_en.contains(" the "));
758
-
759
- let config_de = Arc::new(TokenReductionConfig::default());
760
- let pipeline_de = FilterPipeline::new(&config_de, "de").unwrap();
761
- assert_eq!(pipeline_de.language(), "de");
762
-
763
- let input_de = "der schnelle braune fuchs";
764
- let result_de = pipeline_de.remove_stopwords(input_de);
765
- assert!(!result_de.contains(" der "));
766
- assert!(result_de.contains("schnelle"));
767
- }
768
-
769
- #[test]
770
- fn test_language_fallback_to_english() {
771
- let config = Arc::new(TokenReductionConfig::default());
772
-
773
- let pipeline = FilterPipeline::new(&config, "unsupported_lang").unwrap();
774
- assert_eq!(pipeline.language(), "unsupported_lang");
775
-
776
- let input = "the quick brown fox";
777
- let result = pipeline.remove_stopwords(input);
778
-
779
- assert!(!result.contains(" the "));
780
- assert!(result.contains("quick"));
781
- }
782
-
783
- #[test]
784
- fn test_split_word_boundaries() {
785
- let config = Arc::new(TokenReductionConfig::default());
786
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
787
-
788
- let (prefix, core, suffix) = pipeline.split_word_boundaries("(hello)");
789
- assert_eq!(prefix, "(");
790
- assert_eq!(core, "hello");
791
- assert_eq!(suffix, ")");
792
-
793
- let (prefix2, core2, suffix2) = pipeline.split_word_boundaries("world!");
794
- assert_eq!(prefix2, "");
795
- assert_eq!(core2, "world");
796
- assert_eq!(suffix2, "!");
797
-
798
- let (prefix3, core3, suffix3) = pipeline.split_word_boundaries("'test");
799
- assert_eq!(prefix3, "'");
800
- assert_eq!(core3, "test");
801
- assert_eq!(suffix3, "");
802
-
803
- let (prefix4, core4, suffix4) = pipeline.split_word_boundaries("simple");
804
- assert_eq!(prefix4, "");
805
- assert_eq!(core4, "simple");
806
- assert_eq!(suffix4, "");
807
-
808
- let (prefix5, core5, suffix5) = pipeline.split_word_boundaries("\"example!!!\"");
809
- assert_eq!(prefix5, "\"");
810
- assert_eq!(core5, "example");
811
- assert_eq!(suffix5, "!!!\"");
812
- }
813
-
814
- #[test]
815
- fn test_split_word_boundaries_edge_cases() {
816
- let config = Arc::new(TokenReductionConfig::default());
817
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
818
-
819
- let (prefix, core, suffix) = pipeline.split_word_boundaries("!!!");
820
- assert_eq!(prefix, "!!!");
821
- assert_eq!(core, "");
822
- assert_eq!(suffix, "");
823
-
824
- let (prefix2, core2, suffix2) = pipeline.split_word_boundaries("");
825
- assert_eq!(prefix2, "");
826
- assert_eq!(core2, "");
827
- assert_eq!(suffix2, "");
828
-
829
- let (prefix3, core3, suffix3) = pipeline.split_word_boundaries("a");
830
- assert_eq!(prefix3, "");
831
- assert_eq!(core3, "a");
832
- assert_eq!(suffix3, "");
833
-
834
- let (prefix4, core4, suffix4) = pipeline.split_word_boundaries("(café)");
835
- assert_eq!(prefix4, "(");
836
- assert_eq!(core4, "café");
837
- assert_eq!(suffix4, ")");
838
- }
839
-
840
- #[test]
841
- fn test_custom_stopwords_with_preserve_patterns() {
842
- use std::collections::HashMap;
843
-
844
- let mut custom_stopwords = HashMap::new();
845
- custom_stopwords.insert("en".to_string(), vec!["custom".to_string(), "stopword".to_string()]);
846
-
847
- let config = TokenReductionConfig {
848
- custom_stopwords: Some(custom_stopwords),
849
- ..Default::default()
850
- };
851
-
852
- let config = Arc::new(config);
853
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
854
-
855
- let input = "this is a custom stopword test";
856
- let result = pipeline.remove_stopwords(input);
857
-
858
- assert!(!result.contains(" custom "));
859
- assert!(!result.contains(" stopword "));
860
- assert!(!result.contains(" is "));
861
- assert!(!result.contains(" a "));
862
- assert!(result.contains("test"));
863
- }
864
-
865
- #[test]
866
- fn test_preserve_patterns_empty() {
867
- let config = TokenReductionConfig {
868
- preserve_patterns: vec![],
869
- ..Default::default()
870
- };
871
-
872
- let config = Arc::new(config);
873
- let pipeline = FilterPipeline::new(&config, "en").unwrap();
874
-
875
- let input = "The quick brown fox";
876
- let result = pipeline.remove_stopwords(input);
877
-
878
- assert!(!result.contains(" The "));
879
- assert!(result.contains("quick"));
880
- }
881
-
882
- #[test]
883
- fn test_invalid_preserve_pattern() {
884
- let config = TokenReductionConfig {
885
- preserve_patterns: vec!["[invalid".to_string()],
886
- ..Default::default()
887
- };
888
-
889
- let config = Arc::new(config);
890
- let result = FilterPipeline::new(&config, "en");
891
-
892
- assert!(result.is_err());
893
- if let Err(e) = result {
894
- match e {
895
- KreuzbergError::Validation { message, .. } => {
896
- assert!(message.contains("Invalid regex pattern"));
897
- }
898
- _ => panic!("Expected ValidationError"),
899
- }
900
- }
901
- }
902
- }
1
+ use crate::error::{KreuzbergError, Result};
2
+ use crate::stopwords::STOPWORDS;
3
+ use crate::text::token_reduction::config::TokenReductionConfig;
4
+ use ahash::{AHashMap, AHashSet};
5
+ use once_cell::sync::Lazy;
6
+ use regex::Regex;
7
+ use std::sync::Arc;
8
+
9
+ static HTML_COMMENT_REGEX: Lazy<Regex> =
10
+ Lazy::new(|| Regex::new(r"<!--.*?-->").expect("HTML comment regex pattern is valid and should compile"));
11
+ static EXCESSIVE_NEWLINES_REGEX: Lazy<Regex> =
12
+ Lazy::new(|| Regex::new(r"\n{3,}").expect("Excessive newlines regex pattern is valid and should compile"));
13
+ static MULTIPLE_SPACES_REGEX: Lazy<Regex> =
14
+ Lazy::new(|| Regex::new(r" {2,}").expect("Multiple spaces regex pattern is valid and should compile"));
15
+ static MARKDOWN_CODE_BLOCK_REGEX: Lazy<Regex> =
16
+ Lazy::new(|| Regex::new(r"```[\s\S]*?```").expect("Markdown code block regex pattern is valid and should compile"));
17
+ static MARKDOWN_INLINE_CODE_REGEX: Lazy<Regex> =
18
+ Lazy::new(|| Regex::new(r"`[^`\n]+`").expect("Markdown inline code regex pattern is valid and should compile"));
19
+ static MARKDOWN_HEADERS_REGEX: Lazy<Regex> =
20
+ Lazy::new(|| Regex::new(r"^#{1,6}\s+").expect("Markdown headers regex pattern is valid and should compile"));
21
+ static MARKDOWN_LISTS_REGEX: Lazy<Regex> =
22
+ Lazy::new(|| Regex::new(r"^[ \t]*[-*+]\s+").expect("Markdown lists regex pattern is valid and should compile"));
23
+
24
+ pub struct FilterPipeline {
25
+ config: Arc<TokenReductionConfig>,
26
+ stopwords: AHashSet<String>,
27
+ preserve_patterns: Vec<Regex>,
28
+ language: String,
29
+ }
30
+
31
+ impl FilterPipeline {
32
+ pub fn new(config: &Arc<TokenReductionConfig>, language: &str) -> Result<Self> {
33
+ let mut stopwords = STOPWORDS.get(language).cloned().unwrap_or_else(|| {
34
+ STOPWORDS
35
+ .get("en")
36
+ .cloned()
37
+ .expect("English stopwords must be available - indicates build failure if missing")
38
+ });
39
+
40
+ if let Some(ref custom) = config.custom_stopwords
41
+ && let Some(custom_for_lang) = custom.get(language)
42
+ {
43
+ for word in custom_for_lang {
44
+ stopwords.insert(word.to_lowercase());
45
+ }
46
+ }
47
+
48
+ let preserve_patterns: std::result::Result<Vec<Regex>, _> = config
49
+ .preserve_patterns
50
+ .iter()
51
+ .map(|pattern| Regex::new(pattern))
52
+ .collect();
53
+
54
+ let preserve_patterns =
55
+ preserve_patterns.map_err(|e| KreuzbergError::validation(format!("Invalid regex pattern: {}", e)))?;
56
+
57
+ Ok(Self {
58
+ config: Arc::clone(config),
59
+ stopwords,
60
+ preserve_patterns,
61
+ language: language.to_string(),
62
+ })
63
+ }
64
+
65
+ pub fn apply_light_filters(&self, text: &str) -> String {
66
+ let mut result = text.to_string();
67
+
68
+ let mut preserved_blocks = AHashMap::new();
69
+ if self.config.preserve_markdown {
70
+ result = self.extract_and_preserve_code(&result, &mut preserved_blocks);
71
+ }
72
+
73
+ result = HTML_COMMENT_REGEX.replace_all(&result, "").to_string();
74
+
75
+ result = MULTIPLE_SPACES_REGEX.replace_all(&result, " ").to_string();
76
+
77
+ result = EXCESSIVE_NEWLINES_REGEX.replace_all(&result, "\n\n").to_string();
78
+
79
+ if self.config.preserve_markdown {
80
+ result = self.preserve_markdown_structure(&result);
81
+ }
82
+
83
+ result = self.restore_preserved_blocks(&result, &preserved_blocks);
84
+
85
+ result
86
+ }
87
+
88
+ pub fn apply_moderate_filters(&self, text: &str) -> String {
89
+ let mut result = self.apply_light_filters(text);
90
+
91
+ let mut preserved_blocks = AHashMap::new();
92
+ if self.config.preserve_code {
93
+ result = self.extract_and_preserve_code(&result, &mut preserved_blocks);
94
+ }
95
+
96
+ if self.config.preserve_markdown {
97
+ result = self.remove_stopwords_preserving_markdown(&result);
98
+ } else {
99
+ result = self.remove_stopwords(&result);
100
+ }
101
+
102
+ result = self.restore_preserved_blocks(&result, &preserved_blocks);
103
+
104
+ result
105
+ }
106
+
107
+ fn remove_stopwords_preserving_markdown(&self, text: &str) -> String {
108
+ let lines: Vec<&str> = text.lines().collect();
109
+ let mut processed_lines = Vec::new();
110
+
111
+ for line in lines {
112
+ if MARKDOWN_HEADERS_REGEX.is_match(line) {
113
+ processed_lines.push(line.to_string());
114
+ continue;
115
+ }
116
+
117
+ if MARKDOWN_LISTS_REGEX.is_match(line) {
118
+ processed_lines.push(line.to_string());
119
+ continue;
120
+ }
121
+
122
+ if line.trim().starts_with('|') && line.trim().ends_with('|') {
123
+ processed_lines.push(line.to_string());
124
+ continue;
125
+ }
126
+
127
+ let processed_line = self.remove_stopwords(line);
128
+ processed_lines.push(processed_line);
129
+ }
130
+
131
+ processed_lines.join("\n")
132
+ }
133
+
134
+ fn remove_stopwords(&self, text: &str) -> String {
135
+ let words: Vec<&str> = text.split_whitespace().collect();
136
+ let mut filtered_words = Vec::with_capacity(words.len());
137
+
138
+ for word in words {
139
+ if word.is_empty() {
140
+ continue;
141
+ }
142
+
143
+ if self.should_preserve_word(word) {
144
+ filtered_words.push(word);
145
+ continue;
146
+ }
147
+
148
+ if word.len() > 1 && word.bytes().all(|b| b.is_ascii_uppercase() || !b.is_ascii_alphabetic()) {
149
+ filtered_words.push(word);
150
+ continue;
151
+ }
152
+
153
+ if word.bytes().any(|b| b.is_ascii_digit()) {
154
+ filtered_words.push(word);
155
+ continue;
156
+ }
157
+
158
+ let clean_word = if word.is_ascii() {
159
+ let clean_bytes: Vec<u8> = word
160
+ .bytes()
161
+ .filter(|&b| b.is_ascii_alphabetic())
162
+ .map(|b| b.to_ascii_lowercase())
163
+ .collect();
164
+ String::from_utf8(clean_bytes).unwrap_or_else(|_| {
165
+ word.chars()
166
+ .filter(|c| c.is_alphabetic())
167
+ .collect::<String>()
168
+ .to_lowercase()
169
+ })
170
+ } else {
171
+ word.chars()
172
+ .filter(|c| c.is_alphabetic())
173
+ .collect::<String>()
174
+ .to_lowercase()
175
+ };
176
+
177
+ if clean_word.is_empty() {
178
+ filtered_words.push(word);
179
+ continue;
180
+ }
181
+
182
+ if clean_word.len() <= 1 {
183
+ filtered_words.push(word);
184
+ continue;
185
+ }
186
+
187
+ if !self.stopwords.contains(&clean_word) {
188
+ filtered_words.push(word);
189
+ }
190
+ }
191
+
192
+ filtered_words.join(" ")
193
+ }
194
+
195
+ /// Get the language code for this filter pipeline.
196
+ ///
197
+ /// Primarily useful for testing and debugging to verify language configuration.
198
+ #[cfg_attr(not(test), allow(dead_code))]
199
+ pub fn language(&self) -> &str {
200
+ &self.language
201
+ }
202
+
203
+ /// Check if a word should be preserved based on configured patterns.
204
+ fn should_preserve_word(&self, word: &str) -> bool {
205
+ self.preserve_patterns.iter().any(|pattern| pattern.is_match(word))
206
+ }
207
+
208
+ /// Split a word into prefix (non-alphanumeric), core (alphanumeric), and suffix (non-alphanumeric).
209
+ ///
210
+ /// This is useful for handling punctuation-wrapped words like "(hello)" or "world!".
211
+ /// Currently used in tests; reserved for future word boundary-aware filtering.
212
+ #[cfg_attr(not(test), allow(dead_code))]
213
+ fn split_word_boundaries(&self, word: &str) -> (String, String, String) {
214
+ let chars: Vec<char> = word.chars().collect();
215
+ let mut start = 0;
216
+ let mut end = chars.len();
217
+
218
+ while start < chars.len() && !chars[start].is_alphanumeric() {
219
+ start += 1;
220
+ }
221
+
222
+ while end > start && !chars[end - 1].is_alphanumeric() {
223
+ end -= 1;
224
+ }
225
+
226
+ let prefix: String = chars[..start].iter().collect();
227
+ let core: String = chars[start..end].iter().collect();
228
+ let suffix: String = chars[end..].iter().collect();
229
+
230
+ (prefix, core, suffix)
231
+ }
232
+
233
+ fn preserve_markdown_structure(&self, text: &str) -> String {
234
+ let lines: Vec<&str> = text.lines().collect();
235
+ let mut processed_lines = Vec::new();
236
+
237
+ for line in lines {
238
+ if MARKDOWN_HEADERS_REGEX.is_match(line) {
239
+ processed_lines.push(line);
240
+ continue;
241
+ }
242
+
243
+ if MARKDOWN_LISTS_REGEX.is_match(line) {
244
+ processed_lines.push(line);
245
+ continue;
246
+ }
247
+
248
+ processed_lines.push(line);
249
+ }
250
+
251
+ processed_lines.join("\n")
252
+ }
253
+
254
+ fn extract_and_preserve_code(&self, text: &str, preserved: &mut AHashMap<String, String>) -> String {
255
+ let mut result = text.to_string();
256
+ let mut code_block_id = 0;
257
+ let mut inline_code_id = 0;
258
+
259
+ result = MARKDOWN_CODE_BLOCK_REGEX
260
+ .replace_all(&result, |caps: &regex::Captures| {
261
+ let code_block = caps[0].to_string();
262
+ let placeholder = format!("__CODEBLOCK_{}__", code_block_id);
263
+ code_block_id += 1;
264
+ preserved.insert(placeholder.clone(), code_block);
265
+ placeholder
266
+ })
267
+ .to_string();
268
+
269
+ result = MARKDOWN_INLINE_CODE_REGEX
270
+ .replace_all(&result, |caps: &regex::Captures| {
271
+ let inline_code = caps[0].to_string();
272
+ let placeholder = format!("__INLINECODE_{}__", inline_code_id);
273
+ inline_code_id += 1;
274
+ preserved.insert(placeholder.clone(), inline_code);
275
+ placeholder
276
+ })
277
+ .to_string();
278
+
279
+ result
280
+ }
281
+
282
+ fn restore_preserved_blocks(&self, text: &str, preserved: &AHashMap<String, String>) -> String {
283
+ let mut result = text.to_string();
284
+
285
+ for (placeholder, original_content) in preserved {
286
+ result = result.replace(placeholder, original_content);
287
+ }
288
+
289
+ result
290
+ }
291
+ }
292
+
293
+ #[cfg(all(test, feature = "stopwords"))]
294
+ mod tests {
295
+ use super::*;
296
+
297
+ #[test]
298
+ fn test_stopword_removal() {
299
+ let config = Arc::new(TokenReductionConfig::default());
300
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
301
+
302
+ let input = "The quick brown fox is jumping over the lazy dog";
303
+ let result = pipeline.remove_stopwords(input);
304
+
305
+ assert!(!result.contains(" the "));
306
+ assert!(!result.contains(" is "));
307
+ assert!(result.contains("quick"));
308
+ assert!(result.contains("brown"));
309
+ assert!(result.contains("fox"));
310
+ }
311
+
312
+ #[test]
313
+ fn test_preserve_patterns() {
314
+ let config = TokenReductionConfig {
315
+ preserve_patterns: vec!["\\b[A-Z]{2,}\\b".to_string()],
316
+ ..Default::default()
317
+ };
318
+
319
+ let config = Arc::new(config);
320
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
321
+
322
+ let input = "The NASA mission is a success";
323
+ let result = pipeline.remove_stopwords(input);
324
+
325
+ assert!(result.contains("NASA"));
326
+ assert!(result.contains("mission"));
327
+ assert!(result.contains("success"));
328
+ }
329
+
330
+ #[test]
331
+ fn test_markdown_preservation() {
332
+ let config = TokenReductionConfig {
333
+ preserve_markdown: true,
334
+ preserve_code: true,
335
+ ..Default::default()
336
+ };
337
+
338
+ let config = Arc::new(config);
339
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
340
+
341
+ let input = "# Header\nThis is `code` and ```\ncode block\n``` text";
342
+ let result = pipeline.apply_moderate_filters(input);
343
+
344
+ assert!(result.contains("# Header"));
345
+ assert!(result.contains("`code`"));
346
+ assert!(result.contains("```\ncode block\n```"));
347
+ }
348
+
349
+ #[test]
350
+ fn test_apply_light_filters_removes_html_comments() {
351
+ let config = Arc::new(TokenReductionConfig::default());
352
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
353
+
354
+ let input = "Text before <!-- comment --> text after";
355
+ let result = pipeline.apply_light_filters(input);
356
+
357
+ assert!(!result.contains("<!-- comment -->"));
358
+ assert!(result.contains("Text before"));
359
+ assert!(result.contains("text after"));
360
+ }
361
+
362
+ #[test]
363
+ fn test_apply_light_filters_normalizes_whitespace() {
364
+ let config = Arc::new(TokenReductionConfig::default());
365
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
366
+
367
+ let input = "Text with multiple spaces";
368
+ let result = pipeline.apply_light_filters(input);
369
+
370
+ assert!(!result.contains(" "));
371
+ assert!(result.contains("Text with multiple spaces"));
372
+ }
373
+
374
+ #[test]
375
+ fn test_apply_light_filters_reduces_newlines() {
376
+ let config = Arc::new(TokenReductionConfig::default());
377
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
378
+
379
+ let input = "Paragraph 1\n\n\n\n\nParagraph 2";
380
+ let result = pipeline.apply_light_filters(input);
381
+
382
+ assert!(!result.contains("\n\n\n"));
383
+ assert!(result.contains("Paragraph 1"));
384
+ assert!(result.contains("Paragraph 2"));
385
+ }
386
+
387
+ #[test]
388
+ fn test_stopword_removal_preserves_uppercase() {
389
+ let config = Arc::new(TokenReductionConfig::default());
390
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
391
+
392
+ let input = "The API is working WITH the SDK";
393
+ let result = pipeline.remove_stopwords(input);
394
+
395
+ assert!(result.contains("API"));
396
+ assert!(result.contains("SDK"));
397
+ assert!(result.contains("WITH"));
398
+ assert!(!result.contains("The "));
399
+ assert!(!result.contains(" is "));
400
+ }
401
+
402
+ #[test]
403
+ fn test_stopword_removal_preserves_numbers() {
404
+ let config = Arc::new(TokenReductionConfig::default());
405
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
406
+
407
+ let input = "The version is 3.14 and the count is 42";
408
+ let result = pipeline.remove_stopwords(input);
409
+
410
+ assert!(result.contains("3.14"));
411
+ assert!(result.contains("42"));
412
+ assert!(result.contains("version"));
413
+ assert!(result.contains("count"));
414
+ }
415
+
416
+ #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
417
+ #[test]
418
+ fn test_stopword_removal_handles_punctuation() {
419
+ let config = Arc::new(TokenReductionConfig::default());
420
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
421
+
422
+ let input = "Hello, the world! This is great.";
423
+ let result = pipeline.remove_stopwords(input);
424
+
425
+ assert!(result.contains("Hello,"));
426
+ assert!(result.contains("world!"));
427
+ assert!(result.contains("great."));
428
+ }
429
+
430
+ #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
431
+ #[test]
432
+ fn test_custom_stopwords() {
433
+ use std::collections::HashMap;
434
+
435
+ let mut custom_stopwords = HashMap::new();
436
+ custom_stopwords.insert("en".to_string(), vec!["custom".to_string(), "word".to_string()]);
437
+
438
+ let config = TokenReductionConfig {
439
+ custom_stopwords: Some(custom_stopwords),
440
+ ..Default::default()
441
+ };
442
+
443
+ let config = Arc::new(config);
444
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
445
+
446
+ let input = "This is a custom word test";
447
+ let result = pipeline.remove_stopwords(input);
448
+
449
+ assert!(!result.contains("custom"));
450
+ assert!(!result.contains("word"));
451
+ assert!(result.contains("test"));
452
+ }
453
+
454
+ #[test]
455
+ fn test_spanish_stopwords() {
456
+ let config = Arc::new(TokenReductionConfig::default());
457
+ let pipeline = FilterPipeline::new(&config, "es").unwrap();
458
+
459
+ let input = "El perro grande bonito tiene";
460
+ let result = pipeline.remove_stopwords(input);
461
+
462
+ assert!(result.contains("perro"));
463
+ assert!(result.contains("grande"));
464
+ assert!(result.contains("bonito"));
465
+ let words: Vec<&str> = result.split_whitespace().collect();
466
+ assert!(!words.contains(&"el"));
467
+ assert!(!words.contains(&"El"));
468
+ }
469
+
470
+ #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
471
+ #[test]
472
+ fn test_unknown_language_fallback() {
473
+ let config = Arc::new(TokenReductionConfig::default());
474
+ let pipeline = FilterPipeline::new(&config, "unknown").unwrap();
475
+
476
+ let input = "The quick test with unknown language";
477
+ let result = pipeline.remove_stopwords(input);
478
+
479
+ assert!(!result.contains("The "));
480
+ assert!(result.contains("quick"));
481
+ assert!(result.contains("test"));
482
+ }
483
+
484
+ #[test]
485
+ fn test_markdown_header_preservation() {
486
+ let config = TokenReductionConfig {
487
+ preserve_markdown: true,
488
+ ..Default::default()
489
+ };
490
+
491
+ let config = Arc::new(config);
492
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
493
+
494
+ let input = "# Header 1\n## Header 2\n### Header 3\nRegular text";
495
+ let result = pipeline.remove_stopwords_preserving_markdown(input);
496
+
497
+ assert!(result.contains("# Header 1"));
498
+ assert!(result.contains("## Header 2"));
499
+ assert!(result.contains("### Header 3"));
500
+ }
501
+
502
+ #[test]
503
+ fn test_markdown_list_preservation() {
504
+ let config = TokenReductionConfig {
505
+ preserve_markdown: true,
506
+ ..Default::default()
507
+ };
508
+
509
+ let config = Arc::new(config);
510
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
511
+
512
+ let input = "- Item 1\n* Item 2\n+ Item 3";
513
+ let result = pipeline.remove_stopwords_preserving_markdown(input);
514
+
515
+ assert!(result.contains("- Item 1"));
516
+ assert!(result.contains("* Item 2"));
517
+ assert!(result.contains("+ Item 3"));
518
+ }
519
+
520
+ #[test]
521
+ fn test_markdown_table_preservation() {
522
+ let config = TokenReductionConfig {
523
+ preserve_markdown: true,
524
+ ..Default::default()
525
+ };
526
+
527
+ let config = Arc::new(config);
528
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
529
+
530
+ let input = "| Header 1 | Header 2 |\n|----------|----------|\n| Cell 1 | Cell 2 |";
531
+ let result = pipeline.remove_stopwords_preserving_markdown(input);
532
+
533
+ assert!(result.contains("| Header 1 | Header 2 |"));
534
+ assert!(result.contains("|----------|----------|"));
535
+ }
536
+
537
+ #[test]
538
+ fn test_code_block_preservation() {
539
+ let config = Arc::new(TokenReductionConfig {
540
+ preserve_code: true,
541
+ ..Default::default()
542
+ });
543
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
544
+
545
+ let mut preserved = AHashMap::new();
546
+ let input = "Text before\n```rust\nfn main() {}\n```\nText after";
547
+ let result = pipeline.extract_and_preserve_code(input, &mut preserved);
548
+
549
+ assert_eq!(preserved.len(), 1);
550
+ assert!(preserved.values().any(|v| v.contains("fn main()")));
551
+ assert!(result.contains("__CODEBLOCK_0__"));
552
+ }
553
+
554
+ #[test]
555
+ fn test_inline_code_preservation() {
556
+ let config = Arc::new(TokenReductionConfig {
557
+ preserve_code: true,
558
+ ..Default::default()
559
+ });
560
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
561
+
562
+ let mut preserved = AHashMap::new();
563
+ let input = "Use the `println!` macro";
564
+ let result = pipeline.extract_and_preserve_code(input, &mut preserved);
565
+
566
+ assert_eq!(preserved.len(), 1);
567
+ assert!(preserved.values().any(|v| v == "`println!`"));
568
+ assert!(result.contains("__INLINECODE_0__"));
569
+ }
570
+
571
+ #[test]
572
+ fn test_restore_preserved_blocks() {
573
+ let config = Arc::new(TokenReductionConfig::default());
574
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
575
+
576
+ let mut preserved = AHashMap::new();
577
+ preserved.insert("__CODEBLOCK_0__".to_string(), "```code```".to_string());
578
+ preserved.insert("__INLINECODE_0__".to_string(), "`inline`".to_string());
579
+ let input = "Text __CODEBLOCK_0__ and __INLINECODE_0__ here";
580
+ let result = pipeline.restore_preserved_blocks(input, &preserved);
581
+
582
+ assert!(result.contains("```code```"));
583
+ assert!(result.contains("`inline`"));
584
+ assert!(!result.contains("__CODEBLOCK_0__"));
585
+ assert!(!result.contains("__INLINECODE_0__"));
586
+ }
587
+
588
+ #[test]
589
+ fn test_apply_moderate_filters_with_stopwords() {
590
+ let config = Arc::new(TokenReductionConfig::default());
591
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
592
+
593
+ let input = "The quick brown fox is jumping";
594
+ let result = pipeline.apply_moderate_filters(input);
595
+
596
+ assert!(!result.contains("The "));
597
+ assert!(!result.contains(" is "));
598
+ assert!(result.contains("quick"));
599
+ assert!(result.contains("brown"));
600
+ }
601
+
602
+ #[test]
603
+ fn test_invalid_regex_pattern() {
604
+ let config = TokenReductionConfig {
605
+ preserve_patterns: vec!["[invalid".to_string()],
606
+ ..Default::default()
607
+ };
608
+
609
+ let config = Arc::new(config);
610
+ let result = FilterPipeline::new(&config, "en");
611
+
612
+ assert!(result.is_err());
613
+ if let Err(err) = result {
614
+ assert!(matches!(err, KreuzbergError::Validation { .. }));
615
+ }
616
+ }
617
+
618
+ #[test]
619
+ fn test_empty_input() {
620
+ let config = Arc::new(TokenReductionConfig::default());
621
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
622
+
623
+ let result = pipeline.apply_light_filters("");
624
+ assert_eq!(result, "");
625
+
626
+ let result = pipeline.apply_moderate_filters("");
627
+ assert_eq!(result, "");
628
+ }
629
+
630
+ #[test]
631
+ fn test_stopword_removal_single_letter_words() {
632
+ let config = Arc::new(TokenReductionConfig::default());
633
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
634
+
635
+ let input = "I a x test";
636
+ let result = pipeline.remove_stopwords(input);
637
+
638
+ assert!(result.contains("I"));
639
+ assert!(result.contains("x"));
640
+ }
641
+
642
+ #[cfg_attr(coverage, ignore = "coverage instrumentation disables SIMD stopword paths")]
643
+ #[test]
644
+ fn test_stopword_removal_mixed_case() {
645
+ let config = Arc::new(TokenReductionConfig::default());
646
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
647
+
648
+ let input = "The Test Is Working";
649
+ let result = pipeline.remove_stopwords(input);
650
+
651
+ assert!(!result.contains("The"));
652
+ assert!(!result.contains("Is"));
653
+ assert!(result.contains("Test"));
654
+ assert!(result.contains("Working"));
655
+ }
656
+
657
+ #[test]
658
+ fn test_lazy_regex_initialization() {
659
+ let _ = &*HTML_COMMENT_REGEX;
660
+ let _ = &*EXCESSIVE_NEWLINES_REGEX;
661
+ let _ = &*MULTIPLE_SPACES_REGEX;
662
+ let _ = &*MARKDOWN_CODE_BLOCK_REGEX;
663
+ let _ = &*MARKDOWN_INLINE_CODE_REGEX;
664
+ let _ = &*MARKDOWN_HEADERS_REGEX;
665
+ let _ = &*MARKDOWN_LISTS_REGEX;
666
+ }
667
+
668
+ #[test]
669
+ fn test_multiple_code_blocks_hashmap_approach() {
670
+ let config = Arc::new(TokenReductionConfig {
671
+ preserve_code: true,
672
+ ..Default::default()
673
+ });
674
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
675
+
676
+ let input =
677
+ "Start ```rust\nlet x = 1;\n``` middle `inline1` text ```python\nprint('hi')\n``` and `inline2` end";
678
+ let mut preserved = AHashMap::new();
679
+ let result = pipeline.extract_and_preserve_code(input, &mut preserved);
680
+
681
+ assert_eq!(preserved.len(), 4);
682
+ assert!(preserved.contains_key("__CODEBLOCK_0__"));
683
+ assert!(preserved.contains_key("__CODEBLOCK_1__"));
684
+ assert!(preserved.contains_key("__INLINECODE_0__"));
685
+ assert!(preserved.contains_key("__INLINECODE_1__"));
686
+
687
+ assert_eq!(preserved.get("__CODEBLOCK_0__").unwrap(), "```rust\nlet x = 1;\n```");
688
+ assert_eq!(preserved.get("__CODEBLOCK_1__").unwrap(), "```python\nprint('hi')\n```");
689
+ assert_eq!(preserved.get("__INLINECODE_0__").unwrap(), "`inline1`");
690
+ assert_eq!(preserved.get("__INLINECODE_1__").unwrap(), "`inline2`");
691
+
692
+ let restored = pipeline.restore_preserved_blocks(&result, &preserved);
693
+ assert!(restored.contains("```rust\nlet x = 1;\n```"));
694
+ assert!(restored.contains("```python\nprint('hi')\n```"));
695
+ assert!(restored.contains("`inline1`"));
696
+ assert!(restored.contains("`inline2`"));
697
+ assert!(!restored.contains("__CODEBLOCK_"));
698
+ assert!(!restored.contains("__INLINECODE_"));
699
+ }
700
+
701
+ #[test]
702
+ fn test_hashmap_order_independence() {
703
+ let config = Arc::new(TokenReductionConfig {
704
+ preserve_code: true,
705
+ ..Default::default()
706
+ });
707
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
708
+
709
+ let input = "Text `a` and `b` and `c` here";
710
+ let mut preserved = AHashMap::new();
711
+ let result = pipeline.extract_and_preserve_code(input, &mut preserved);
712
+
713
+ assert_eq!(preserved.len(), 3);
714
+ let restored = pipeline.restore_preserved_blocks(&result, &preserved);
715
+
716
+ assert!(restored.contains("`a`"));
717
+ assert!(restored.contains("`b`"));
718
+ assert!(restored.contains("`c`"));
719
+ assert_eq!(restored, "Text `a` and `b` and `c` here");
720
+ }
721
+
722
+ #[test]
723
+ fn test_preserve_patterns_regex() {
724
+ let config = TokenReductionConfig {
725
+ preserve_patterns: vec![
726
+ r"\b[A-Z]{2,}\b".to_string(),
727
+ r"\b\d+\.\d+\.\d+\b".to_string(),
728
+ r"@\w+".to_string(),
729
+ ],
730
+ ..Default::default()
731
+ };
732
+
733
+ let config = Arc::new(config);
734
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
735
+
736
+ let input = "The NASA and HTTP protocols version 1.2.3 by @john";
737
+ let result = pipeline.remove_stopwords(input);
738
+
739
+ assert!(result.contains("NASA"));
740
+ assert!(result.contains("HTTP"));
741
+ assert!(result.contains("1.2.3"));
742
+ assert!(result.contains("@john"));
743
+
744
+ assert!(!result.contains(" the "));
745
+ assert!(!result.contains(" and "));
746
+ assert!(!result.contains(" by "));
747
+ }
748
+
749
+ #[test]
750
+ fn test_language_specific_stopwords() {
751
+ let config_en = Arc::new(TokenReductionConfig::default());
752
+ let pipeline_en = FilterPipeline::new(&config_en, "en").unwrap();
753
+ assert_eq!(pipeline_en.language(), "en");
754
+
755
+ let input_en = "the quick brown fox";
756
+ let result_en = pipeline_en.remove_stopwords(input_en);
757
+ assert!(!result_en.contains(" the "));
758
+
759
+ let config_de = Arc::new(TokenReductionConfig::default());
760
+ let pipeline_de = FilterPipeline::new(&config_de, "de").unwrap();
761
+ assert_eq!(pipeline_de.language(), "de");
762
+
763
+ let input_de = "der schnelle braune fuchs";
764
+ let result_de = pipeline_de.remove_stopwords(input_de);
765
+ assert!(!result_de.contains(" der "));
766
+ assert!(result_de.contains("schnelle"));
767
+ }
768
+
769
+ #[test]
770
+ fn test_language_fallback_to_english() {
771
+ let config = Arc::new(TokenReductionConfig::default());
772
+
773
+ let pipeline = FilterPipeline::new(&config, "unsupported_lang").unwrap();
774
+ assert_eq!(pipeline.language(), "unsupported_lang");
775
+
776
+ let input = "the quick brown fox";
777
+ let result = pipeline.remove_stopwords(input);
778
+
779
+ assert!(!result.contains(" the "));
780
+ assert!(result.contains("quick"));
781
+ }
782
+
783
+ #[test]
784
+ fn test_split_word_boundaries() {
785
+ let config = Arc::new(TokenReductionConfig::default());
786
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
787
+
788
+ let (prefix, core, suffix) = pipeline.split_word_boundaries("(hello)");
789
+ assert_eq!(prefix, "(");
790
+ assert_eq!(core, "hello");
791
+ assert_eq!(suffix, ")");
792
+
793
+ let (prefix2, core2, suffix2) = pipeline.split_word_boundaries("world!");
794
+ assert_eq!(prefix2, "");
795
+ assert_eq!(core2, "world");
796
+ assert_eq!(suffix2, "!");
797
+
798
+ let (prefix3, core3, suffix3) = pipeline.split_word_boundaries("'test");
799
+ assert_eq!(prefix3, "'");
800
+ assert_eq!(core3, "test");
801
+ assert_eq!(suffix3, "");
802
+
803
+ let (prefix4, core4, suffix4) = pipeline.split_word_boundaries("simple");
804
+ assert_eq!(prefix4, "");
805
+ assert_eq!(core4, "simple");
806
+ assert_eq!(suffix4, "");
807
+
808
+ let (prefix5, core5, suffix5) = pipeline.split_word_boundaries("\"example!!!\"");
809
+ assert_eq!(prefix5, "\"");
810
+ assert_eq!(core5, "example");
811
+ assert_eq!(suffix5, "!!!\"");
812
+ }
813
+
814
+ #[test]
815
+ fn test_split_word_boundaries_edge_cases() {
816
+ let config = Arc::new(TokenReductionConfig::default());
817
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
818
+
819
+ let (prefix, core, suffix) = pipeline.split_word_boundaries("!!!");
820
+ assert_eq!(prefix, "!!!");
821
+ assert_eq!(core, "");
822
+ assert_eq!(suffix, "");
823
+
824
+ let (prefix2, core2, suffix2) = pipeline.split_word_boundaries("");
825
+ assert_eq!(prefix2, "");
826
+ assert_eq!(core2, "");
827
+ assert_eq!(suffix2, "");
828
+
829
+ let (prefix3, core3, suffix3) = pipeline.split_word_boundaries("a");
830
+ assert_eq!(prefix3, "");
831
+ assert_eq!(core3, "a");
832
+ assert_eq!(suffix3, "");
833
+
834
+ let (prefix4, core4, suffix4) = pipeline.split_word_boundaries("(café)");
835
+ assert_eq!(prefix4, "(");
836
+ assert_eq!(core4, "café");
837
+ assert_eq!(suffix4, ")");
838
+ }
839
+
840
+ #[test]
841
+ fn test_custom_stopwords_with_preserve_patterns() {
842
+ use std::collections::HashMap;
843
+
844
+ let mut custom_stopwords = HashMap::new();
845
+ custom_stopwords.insert("en".to_string(), vec!["custom".to_string(), "stopword".to_string()]);
846
+
847
+ let config = TokenReductionConfig {
848
+ custom_stopwords: Some(custom_stopwords),
849
+ ..Default::default()
850
+ };
851
+
852
+ let config = Arc::new(config);
853
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
854
+
855
+ let input = "this is a custom stopword test";
856
+ let result = pipeline.remove_stopwords(input);
857
+
858
+ assert!(!result.contains(" custom "));
859
+ assert!(!result.contains(" stopword "));
860
+ assert!(!result.contains(" is "));
861
+ assert!(!result.contains(" a "));
862
+ assert!(result.contains("test"));
863
+ }
864
+
865
+ #[test]
866
+ fn test_preserve_patterns_empty() {
867
+ let config = TokenReductionConfig {
868
+ preserve_patterns: vec![],
869
+ ..Default::default()
870
+ };
871
+
872
+ let config = Arc::new(config);
873
+ let pipeline = FilterPipeline::new(&config, "en").unwrap();
874
+
875
+ let input = "The quick brown fox";
876
+ let result = pipeline.remove_stopwords(input);
877
+
878
+ assert!(!result.contains(" The "));
879
+ assert!(result.contains("quick"));
880
+ }
881
+
882
+ #[test]
883
+ fn test_invalid_preserve_pattern() {
884
+ let config = TokenReductionConfig {
885
+ preserve_patterns: vec!["[invalid".to_string()],
886
+ ..Default::default()
887
+ };
888
+
889
+ let config = Arc::new(config);
890
+ let result = FilterPipeline::new(&config, "en");
891
+
892
+ assert!(result.is_err());
893
+ if let Err(e) = result {
894
+ match e {
895
+ KreuzbergError::Validation { message, .. } => {
896
+ assert!(message.contains("Invalid regex pattern"));
897
+ }
898
+ _ => panic!("Expected ValidationError"),
899
+ }
900
+ }
901
+ }
902
+ }