kreuzberg 4.0.0.rc1 → 4.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (342) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -8
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -534
  5. data/.rubocop.yml +538 -0
  6. data/Gemfile +8 -9
  7. data/Gemfile.lock +9 -109
  8. data/README.md +426 -421
  9. data/Rakefile +25 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -340
  12. data/ext/kreuzberg_rb/extconf.rb +45 -35
  13. data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
  14. data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
  15. data/ext/kreuzberg_rb/native/README.md +425 -425
  16. data/ext/kreuzberg_rb/native/build.rs +15 -17
  17. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  18. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  19. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  20. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  21. data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
  22. data/extconf.rb +28 -28
  23. data/kreuzberg.gemspec +148 -105
  24. data/lib/kreuzberg/api_proxy.rb +142 -142
  25. data/lib/kreuzberg/cache_api.rb +46 -45
  26. data/lib/kreuzberg/cli.rb +55 -55
  27. data/lib/kreuzberg/cli_proxy.rb +127 -127
  28. data/lib/kreuzberg/config.rb +691 -684
  29. data/lib/kreuzberg/error_context.rb +32 -0
  30. data/lib/kreuzberg/errors.rb +118 -50
  31. data/lib/kreuzberg/extraction_api.rb +85 -84
  32. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  33. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  34. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  35. data/lib/kreuzberg/result.rb +216 -216
  36. data/lib/kreuzberg/setup_lib_path.rb +80 -79
  37. data/lib/kreuzberg/validator_protocol.rb +89 -89
  38. data/lib/kreuzberg/version.rb +5 -5
  39. data/lib/kreuzberg.rb +103 -82
  40. data/sig/kreuzberg/internal.rbs +184 -184
  41. data/sig/kreuzberg.rbs +520 -468
  42. data/spec/binding/cache_spec.rb +227 -227
  43. data/spec/binding/cli_proxy_spec.rb +85 -87
  44. data/spec/binding/cli_spec.rb +55 -54
  45. data/spec/binding/config_spec.rb +345 -345
  46. data/spec/binding/config_validation_spec.rb +283 -283
  47. data/spec/binding/error_handling_spec.rb +213 -213
  48. data/spec/binding/errors_spec.rb +66 -66
  49. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  50. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  51. data/spec/binding/plugins/validator_spec.rb +274 -274
  52. data/spec/fixtures/config.toml +39 -39
  53. data/spec/fixtures/config.yaml +41 -42
  54. data/spec/fixtures/invalid_config.toml +4 -4
  55. data/spec/smoke/package_spec.rb +178 -178
  56. data/spec/spec_helper.rb +42 -42
  57. data/vendor/kreuzberg/Cargo.toml +204 -134
  58. data/vendor/kreuzberg/README.md +175 -175
  59. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
  60. data/vendor/kreuzberg/build.rs +474 -460
  61. data/vendor/kreuzberg/src/api/error.rs +81 -81
  62. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  63. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  64. data/vendor/kreuzberg/src/api/server.rs +353 -353
  65. data/vendor/kreuzberg/src/api/types.rs +170 -170
  66. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
  67. data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
  68. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
  69. data/vendor/kreuzberg/src/core/config.rs +1032 -1032
  70. data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
  71. data/vendor/kreuzberg/src/core/io.rs +329 -327
  72. data/vendor/kreuzberg/src/core/mime.rs +605 -615
  73. data/vendor/kreuzberg/src/core/mod.rs +45 -42
  74. data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
  75. data/vendor/kreuzberg/src/embeddings.rs +432 -323
  76. data/vendor/kreuzberg/src/error.rs +431 -431
  77. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  78. data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
  79. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  80. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  81. data/vendor/kreuzberg/src/extraction/html.rs +553 -553
  82. data/vendor/kreuzberg/src/extraction/image.rs +368 -368
  83. data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
  84. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
  85. data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
  86. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  87. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  88. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  89. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
  90. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
  91. data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
  92. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  93. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  94. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  95. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  96. data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
  97. data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
  98. data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
  99. data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
  100. data/vendor/kreuzberg/src/extractors/email.rs +143 -129
  101. data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
  102. data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
  103. data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
  104. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
  105. data/vendor/kreuzberg/src/extractors/html.rs +393 -410
  106. data/vendor/kreuzberg/src/extractors/image.rs +198 -195
  107. data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
  108. data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
  109. data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
  110. data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
  111. data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
  112. data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
  113. data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
  114. data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
  115. data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
  116. data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
  117. data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
  118. data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
  119. data/vendor/kreuzberg/src/extractors/security.rs +484 -0
  120. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
  121. data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
  122. data/vendor/kreuzberg/src/extractors/text.rs +260 -242
  123. data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
  124. data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
  125. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  126. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  127. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  128. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  129. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  130. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  131. data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
  132. data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
  133. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  134. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  135. data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
  136. data/vendor/kreuzberg/src/lib.rs +105 -102
  137. data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
  138. data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
  139. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  140. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  141. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  142. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  143. data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
  144. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  145. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  146. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
  147. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  148. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  149. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  150. data/vendor/kreuzberg/src/panic_context.rs +154 -0
  151. data/vendor/kreuzberg/src/pdf/error.rs +122 -122
  152. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  153. data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
  154. data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
  155. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  156. data/vendor/kreuzberg/src/pdf/table.rs +393 -420
  157. data/vendor/kreuzberg/src/pdf/text.rs +158 -161
  158. data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
  159. data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
  160. data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
  161. data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
  162. data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
  163. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  164. data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
  165. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  166. data/vendor/kreuzberg/src/text/mod.rs +19 -19
  167. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  168. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  169. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  170. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  171. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  172. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  173. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  174. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  175. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  176. data/vendor/kreuzberg/src/types.rs +903 -873
  177. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  178. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  179. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  180. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  181. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  182. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  183. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  184. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  185. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  186. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  187. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  188. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  189. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  190. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  191. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  192. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  193. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  194. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  195. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  196. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  198. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  199. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  200. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  201. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  202. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  203. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  204. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  205. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  206. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  207. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  208. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  209. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  210. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  211. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  212. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  213. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  214. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  215. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  216. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  217. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  218. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  219. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  220. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  221. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  222. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  223. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  224. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  225. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  226. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  227. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  228. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  229. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  230. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  231. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  232. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  233. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  234. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  235. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  236. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  237. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  238. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  239. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  240. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  241. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  242. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  243. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  244. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
  245. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  246. data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
  247. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
  248. data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
  249. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
  250. data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
  251. data/vendor/kreuzberg/tests/config_features.rs +598 -580
  252. data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
  253. data/vendor/kreuzberg/tests/core_integration.rs +510 -493
  254. data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
  255. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
  256. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
  257. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
  258. data/vendor/kreuzberg/tests/email_integration.rs +325 -325
  259. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
  260. data/vendor/kreuzberg/tests/error_handling.rs +393 -393
  261. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
  262. data/vendor/kreuzberg/tests/format_integration.rs +159 -159
  263. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  264. data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
  265. data/vendor/kreuzberg/tests/image_integration.rs +253 -253
  266. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
  267. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
  268. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
  269. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  270. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  271. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
  272. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
  273. data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
  274. data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
  275. data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
  276. data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
  277. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  278. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
  279. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
  280. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
  281. data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
  282. data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
  283. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
  284. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
  285. data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
  286. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  287. data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
  288. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
  289. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
  290. data/vendor/kreuzberg/tests/security_validation.rs +415 -404
  291. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  292. data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
  293. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
  294. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
  295. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  296. data/vendor/rb-sys/.cargo-ok +1 -0
  297. data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
  298. data/vendor/rb-sys/Cargo.lock +393 -0
  299. data/vendor/rb-sys/Cargo.toml +70 -0
  300. data/vendor/rb-sys/Cargo.toml.orig +57 -0
  301. data/vendor/rb-sys/LICENSE-APACHE +190 -0
  302. data/vendor/rb-sys/LICENSE-MIT +21 -0
  303. data/vendor/rb-sys/bin/release.sh +21 -0
  304. data/vendor/rb-sys/build/features.rs +108 -0
  305. data/vendor/rb-sys/build/main.rs +246 -0
  306. data/vendor/rb-sys/build/stable_api_config.rs +153 -0
  307. data/vendor/rb-sys/build/version.rs +48 -0
  308. data/vendor/rb-sys/readme.md +36 -0
  309. data/vendor/rb-sys/src/bindings.rs +21 -0
  310. data/vendor/rb-sys/src/hidden.rs +11 -0
  311. data/vendor/rb-sys/src/lib.rs +34 -0
  312. data/vendor/rb-sys/src/macros.rs +371 -0
  313. data/vendor/rb-sys/src/memory.rs +53 -0
  314. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
  315. data/vendor/rb-sys/src/special_consts.rs +31 -0
  316. data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
  317. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
  318. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
  319. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
  320. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
  321. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
  322. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
  323. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
  324. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
  325. data/vendor/rb-sys/src/stable_api.rs +261 -0
  326. data/vendor/rb-sys/src/symbol.rs +31 -0
  327. data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
  328. data/vendor/rb-sys/src/utils.rs +89 -0
  329. data/vendor/rb-sys/src/value_type.rs +7 -0
  330. metadata +90 -95
  331. data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
  332. data/spec/examples.txt +0 -104
  333. data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
  334. data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
  335. data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
  336. data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
  337. data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
  338. data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
  339. data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
  340. data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
  341. data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
  342. data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
@@ -1,888 +1,888 @@
1
- //! Integration tests for stopwords with token reduction and keywords extraction.
2
- #![cfg(all(feature = "stopwords", feature = "quality"))]
3
- //!
4
- //! These tests verify that stopwords are properly integrated across different features:
5
- //! - Token reduction at all ReductionLevels
6
- //! - Keywords extraction (YAKE and RAKE algorithms)
7
- //! - CJK text processing
8
- //! - Multi-language documents
9
- //! - Language fallback mechanisms
10
- //! - Custom stopwords
11
-
12
- use kreuzberg::stopwords::{STOPWORDS, get_stopwords, get_stopwords_with_fallback};
13
- use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
14
-
15
- #[cfg(any(feature = "keywords-yake", feature = "keywords-rake"))]
16
- use kreuzberg::keywords::{KeywordConfig, extract_keywords};
17
-
18
- use std::collections::HashMap;
19
-
20
- fn count_stopwords(text: &str, lang: &str) -> usize {
21
- let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
22
- let words: Vec<&str> = text.split_whitespace().collect();
23
-
24
- words
25
- .iter()
26
- .filter(|word| {
27
- let clean = word
28
- .chars()
29
- .filter(|c| c.is_alphabetic())
30
- .collect::<String>()
31
- .to_lowercase();
32
-
33
- !clean.is_empty() && stopwords.contains(&clean)
34
- })
35
- .count()
36
- }
37
-
38
- fn extract_content_words(text: &str, lang: &str) -> Vec<String> {
39
- let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
40
- let words: Vec<&str> = text.split_whitespace().collect();
41
-
42
- words
43
- .iter()
44
- .filter_map(|word| {
45
- let clean = word
46
- .chars()
47
- .filter(|c| c.is_alphabetic())
48
- .collect::<String>()
49
- .to_lowercase();
50
-
51
- if !clean.is_empty() && !stopwords.contains(&clean) && clean.len() > 1 {
52
- Some(clean)
53
- } else {
54
- None
55
- }
56
- })
57
- .collect()
58
- }
59
-
60
- #[test]
61
- fn test_stopwords_removed_during_moderate_token_reduction() {
62
- let config = TokenReductionConfig {
63
- level: ReductionLevel::Moderate,
64
- language_hint: Some("en".to_string()),
65
- use_simd: false,
66
- ..Default::default()
67
- };
68
-
69
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
70
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
71
-
72
- assert!(!result.contains(" the "), "Should remove 'the'. Result: {}", result);
73
- assert!(!result.contains(" is "), "Should remove 'is'. Result: {}", result);
74
- assert!(!result.contains(" and "), "Should remove 'and'. Result: {}", result);
75
-
76
- assert!(result.contains("quick"), "Should preserve 'quick'. Result: {}", result);
77
- assert!(result.contains("brown"), "Should preserve 'brown'. Result: {}", result);
78
- assert!(result.contains("fox"), "Should preserve 'fox'. Result: {}", result);
79
- assert!(
80
- result.contains("jumping"),
81
- "Should preserve 'jumping'. Result: {}",
82
- result
83
- );
84
- assert!(result.contains("lazy"), "Should preserve 'lazy'. Result: {}", result);
85
-
86
- let original_stopwords = count_stopwords(input, "en");
87
- let result_stopwords = count_stopwords(&result, "en");
88
-
89
- assert!(
90
- result_stopwords < original_stopwords,
91
- "Result should have fewer stopwords than original. Original: {}, Result: {}",
92
- original_stopwords,
93
- result_stopwords
94
- );
95
- }
96
-
97
- #[test]
98
- fn test_stopwords_across_reduction_levels() {
99
- let text = "The machine learning model is trained on the large dataset and achieves good performance";
100
-
101
- let light_config = TokenReductionConfig {
102
- level: ReductionLevel::Light,
103
- use_simd: false,
104
- ..Default::default()
105
- };
106
- let light_result = reduce_tokens(text, &light_config, Some("en")).unwrap();
107
-
108
- let light_stopwords = count_stopwords(&light_result, "en");
109
- assert!(light_stopwords > 0, "Light reduction should preserve some stopwords");
110
-
111
- let moderate_config = TokenReductionConfig {
112
- level: ReductionLevel::Moderate,
113
- use_simd: false,
114
- ..Default::default()
115
- };
116
- let moderate_result = reduce_tokens(text, &moderate_config, Some("en")).unwrap();
117
-
118
- let moderate_stopwords = count_stopwords(&moderate_result, "en");
119
- assert!(
120
- moderate_stopwords < light_stopwords,
121
- "Moderate reduction should remove more stopwords than light. Light: {}, Moderate: {}",
122
- light_stopwords,
123
- moderate_stopwords
124
- );
125
-
126
- let aggressive_config = TokenReductionConfig {
127
- level: ReductionLevel::Aggressive,
128
- use_simd: false,
129
- ..Default::default()
130
- };
131
- let aggressive_result = reduce_tokens(text, &aggressive_config, Some("en")).unwrap();
132
-
133
- assert!(
134
- aggressive_result.len() <= moderate_result.len(),
135
- "Aggressive reduction should be more aggressive than moderate"
136
- );
137
- }
138
-
139
- #[test]
140
- fn test_stopwords_preserve_semantic_meaning() {
141
- let config = TokenReductionConfig {
142
- level: ReductionLevel::Moderate,
143
- use_simd: false,
144
- ..Default::default()
145
- };
146
-
147
- let input =
148
- "The artificial intelligence system is processing the natural language text for extracting meaningful insights";
149
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
150
-
151
- let content_words = extract_content_words(&result, "en");
152
-
153
- assert!(
154
- content_words.contains(&"artificial".to_string()) || result.contains("artificial"),
155
- "Should preserve 'artificial'. Result: {}",
156
- result
157
- );
158
- assert!(
159
- content_words.contains(&"intelligence".to_string()) || result.contains("intelligence"),
160
- "Should preserve 'intelligence'. Result: {}",
161
- result
162
- );
163
- assert!(
164
- content_words.contains(&"processing".to_string()) || result.contains("processing"),
165
- "Should preserve 'processing'. Result: {}",
166
- result
167
- );
168
- assert!(
169
- content_words.contains(&"natural".to_string()) || result.contains("natural"),
170
- "Should preserve 'natural'. Result: {}",
171
- result
172
- );
173
- assert!(
174
- content_words.contains(&"language".to_string()) || result.contains("language"),
175
- "Should preserve 'language'. Result: {}",
176
- result
177
- );
178
- }
179
-
180
- #[test]
181
- fn test_stopwords_with_multiple_languages() {
182
- let en_config = TokenReductionConfig {
183
- level: ReductionLevel::Moderate,
184
- use_simd: false,
185
- ..Default::default()
186
- };
187
- let en_input = "The computer science program is very comprehensive and includes many courses";
188
- let en_result = reduce_tokens(en_input, &en_config, Some("en")).unwrap();
189
-
190
- let en_original_stopwords = count_stopwords(en_input, "en");
191
- let en_result_stopwords = count_stopwords(&en_result, "en");
192
- assert!(
193
- en_result_stopwords < en_original_stopwords,
194
- "English stopwords should be removed"
195
- );
196
-
197
- let es_config = TokenReductionConfig {
198
- level: ReductionLevel::Moderate,
199
- use_simd: false,
200
- ..Default::default()
201
- };
202
- let es_input = "El programa de ciencias de la computación es muy completo y tiene muchos cursos";
203
- let es_result = reduce_tokens(es_input, &es_config, Some("es")).unwrap();
204
-
205
- let es_original_stopwords = count_stopwords(es_input, "es");
206
- let es_result_stopwords = count_stopwords(&es_result, "es");
207
- assert!(
208
- es_result_stopwords < es_original_stopwords,
209
- "Spanish stopwords should be removed"
210
- );
211
-
212
- assert!(
213
- es_result.contains("programa") || es_result.contains("ciencias") || es_result.contains("computación"),
214
- "Should preserve Spanish content words. Result: {}",
215
- es_result
216
- );
217
-
218
- let de_config = TokenReductionConfig {
219
- level: ReductionLevel::Moderate,
220
- use_simd: false,
221
- ..Default::default()
222
- };
223
- let de_input = "Die künstliche Intelligenz ist ein wichtiges Forschungsgebiet der Informatik";
224
- let de_result = reduce_tokens(de_input, &de_config, Some("de")).unwrap();
225
-
226
- let de_original_stopwords = count_stopwords(de_input, "de");
227
- let de_result_stopwords = count_stopwords(&de_result, "de");
228
- assert!(
229
- de_result_stopwords < de_original_stopwords,
230
- "German stopwords should be removed"
231
- );
232
- }
233
-
234
- #[test]
235
- fn test_language_fallback_to_english_stopwords() {
236
- let config = TokenReductionConfig {
237
- level: ReductionLevel::Moderate,
238
- use_simd: false,
239
- ..Default::default()
240
- };
241
-
242
- let input = "The system is processing the data with the algorithm";
243
- let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
244
-
245
- let original_stopwords = count_stopwords(input, "en");
246
- let result_stopwords = count_stopwords(&result, "en");
247
-
248
- assert!(
249
- result_stopwords < original_stopwords,
250
- "Should fallback to English stopwords for unsupported language"
251
- );
252
- }
253
-
254
- #[test]
255
- fn test_custom_stopwords_integration() {
256
- let mut custom_stopwords = HashMap::new();
257
- custom_stopwords.insert(
258
- "en".to_string(),
259
- vec!["algorithm".to_string(), "system".to_string(), "data".to_string()],
260
- );
261
-
262
- let config = TokenReductionConfig {
263
- level: ReductionLevel::Moderate,
264
- use_simd: false,
265
- custom_stopwords: Some(custom_stopwords),
266
- ..Default::default()
267
- };
268
-
269
- let input = "The algorithm processes the data in the system efficiently";
270
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
271
-
272
- assert!(
273
- !result.contains("algorithm"),
274
- "Should remove custom stopword 'algorithm'. Result: {}",
275
- result
276
- );
277
- assert!(
278
- !result.contains("system"),
279
- "Should remove custom stopword 'system'. Result: {}",
280
- result
281
- );
282
- assert!(
283
- !result.contains("data"),
284
- "Should remove custom stopword 'data'. Result: {}",
285
- result
286
- );
287
-
288
- assert!(
289
- result.contains("processes") || result.contains("efficiently"),
290
- "Should preserve non-stopword content. Result: {}",
291
- result
292
- );
293
- }
294
-
295
- #[test]
296
- fn test_stopwords_with_chinese_text() {
297
- let config = TokenReductionConfig {
298
- level: ReductionLevel::Moderate,
299
- use_simd: false,
300
- ..Default::default()
301
- };
302
-
303
- let input = "这个人工智能系统可以处理自然语言";
304
- let result = reduce_tokens(input, &config, Some("zh")).unwrap();
305
-
306
- assert!(
307
- !result.is_empty(),
308
- "Chinese text should be processed. Result: {}",
309
- result
310
- );
311
-
312
- assert!(
313
- result.contains("人工") || result.contains("智能") || result.contains("语言"),
314
- "Should preserve important Chinese terms. Result: {}",
315
- result
316
- );
317
- }
318
-
319
- #[test]
320
- fn test_stopwords_with_mixed_cjk_english() {
321
- let config = TokenReductionConfig {
322
- level: ReductionLevel::Moderate,
323
- use_simd: false,
324
- ..Default::default()
325
- };
326
-
327
- let input = "The machine learning model 机器学习模型 is processing data efficiently";
328
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
329
-
330
- assert!(
331
- !result.contains(" the ") && !result.contains("The "),
332
- "Should remove English 'the'. Result: {}",
333
- result
334
- );
335
-
336
- assert!(
337
- result.contains("machine") || result.contains("learning"),
338
- "Should preserve English content. Result: {}",
339
- result
340
- );
341
-
342
- assert!(
343
- result.contains("机器") || result.contains("学习") || result.contains("模型"),
344
- "Should preserve Chinese content. Result: {}",
345
- result
346
- );
347
- }
348
-
349
- #[test]
350
- fn test_stopwords_with_japanese_text() {
351
- let config = TokenReductionConfig {
352
- level: ReductionLevel::Moderate,
353
- use_simd: false,
354
- ..Default::default()
355
- };
356
-
357
- let input = "人工知能技術の研究開発";
358
- let result = reduce_tokens(input, &config, Some("ja")).unwrap();
359
-
360
- assert!(
361
- !result.is_empty(),
362
- "Japanese text should be processed. Result: {}",
363
- result
364
- );
365
- }
366
-
367
- #[test]
368
- fn test_stopwords_with_korean_text() {
369
- let config = TokenReductionConfig {
370
- level: ReductionLevel::Moderate,
371
- use_simd: false,
372
- ..Default::default()
373
- };
374
-
375
- let input = "인공 지능 기술 개발";
376
- let result = reduce_tokens(input, &config, Some("ko")).unwrap();
377
-
378
- assert!(
379
- !result.is_empty(),
380
- "Korean text should be processed. Result: {}",
381
- result
382
- );
383
- }
384
-
385
- #[cfg(feature = "keywords-rake")]
386
- #[test]
387
- fn test_stopwords_excluded_from_rake_keywords() {
388
- let text = "The machine learning model is trained on a large dataset. \
389
- The model uses neural networks and deep learning algorithms. \
390
- The training process requires significant computational resources.";
391
-
392
- let config = KeywordConfig::rake().with_language("en").with_max_keywords(10);
393
-
394
- let keywords = extract_keywords(text, &config).unwrap();
395
-
396
- assert!(!keywords.is_empty(), "Should extract keywords");
397
-
398
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
399
-
400
- for keyword in &keywords {
401
- let words: Vec<&str> = keyword.text.split_whitespace().collect();
402
-
403
- let all_stopwords = words.iter().all(|word| {
404
- let clean = word
405
- .chars()
406
- .filter(|c| c.is_alphabetic())
407
- .collect::<String>()
408
- .to_lowercase();
409
- en_stopwords.contains(&clean)
410
- });
411
-
412
- assert!(
413
- !all_stopwords,
414
- "Keyword '{}' should not be composed entirely of stopwords",
415
- keyword.text
416
- );
417
- }
418
-
419
- let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
420
-
421
- assert!(
422
- keyword_texts.iter().any(|k| k.contains("machine learning")
423
- || k.contains("neural networks")
424
- || k.contains("deep learning")
425
- || k.contains("dataset")
426
- || k.contains("model")
427
- || k.contains("training")),
428
- "Should extract meaningful technical keywords. Got: {:?}",
429
- keyword_texts
430
- );
431
- }
432
-
433
- #[cfg(feature = "keywords-yake")]
434
- #[test]
435
- fn test_stopwords_excluded_from_yake_keywords() {
436
- let text = "Natural language processing enables computers to understand human language. \
437
- Deep learning models achieve state-of-the-art performance in text analysis. \
438
- These systems can extract meaningful information from large text corpora.";
439
-
440
- let config = KeywordConfig::yake().with_language("en").with_max_keywords(10);
441
-
442
- let keywords = extract_keywords(text, &config).unwrap();
443
-
444
- assert!(!keywords.is_empty(), "Should extract keywords");
445
-
446
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
447
-
448
- for keyword in &keywords {
449
- let has_content_word = keyword.text.split_whitespace().any(|word| {
450
- let clean = word
451
- .chars()
452
- .filter(|c| c.is_alphabetic())
453
- .collect::<String>()
454
- .to_lowercase();
455
- !clean.is_empty() && !en_stopwords.contains(&clean)
456
- });
457
-
458
- assert!(
459
- has_content_word,
460
- "Keyword '{}' should contain at least one content word (non-stopword)",
461
- keyword.text
462
- );
463
- }
464
- }
465
-
466
- #[cfg(feature = "keywords-rake")]
467
- #[test]
468
- fn test_keywords_respect_language_specific_stopwords() {
469
- let spanish_text = "El aprendizaje automático es una rama de la inteligencia artificial. \
470
- Los modelos de aprendizaje profundo logran un rendimiento excepcional. \
471
- Estos sistemas pueden procesar grandes cantidades de datos.";
472
-
473
- let config = KeywordConfig::rake().with_language("es").with_max_keywords(8);
474
-
475
- let keywords = extract_keywords(spanish_text, &config).unwrap();
476
-
477
- assert!(!keywords.is_empty(), "Should extract Spanish keywords");
478
-
479
- let es_stopwords = get_stopwords("es").expect("Spanish stopwords must exist");
480
-
481
- for keyword in &keywords {
482
- let words: Vec<&str> = keyword.text.split_whitespace().collect();
483
- let all_stopwords = words.iter().all(|word| {
484
- let clean = word
485
- .chars()
486
- .filter(|c| c.is_alphabetic())
487
- .collect::<String>()
488
- .to_lowercase();
489
- es_stopwords.contains(&clean)
490
- });
491
-
492
- assert!(
493
- !all_stopwords,
494
- "Spanish keyword '{}' should not be all stopwords",
495
- keyword.text
496
- );
497
- }
498
-
499
- let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
500
- assert!(
501
- keyword_texts.iter().any(|k| k.contains("aprendizaje")
502
- || k.contains("inteligencia")
503
- || k.contains("modelos")
504
- || k.contains("datos")),
505
- "Should extract meaningful Spanish keywords. Got: {:?}",
506
- keyword_texts
507
- );
508
- }
509
-
510
- #[test]
511
- fn test_all_stopwords_text_reduction() {
512
- let config = TokenReductionConfig {
513
- level: ReductionLevel::Moderate,
514
- use_simd: false,
515
- ..Default::default()
516
- };
517
-
518
- let input = "the is a an and or but of to in for on at by";
519
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
520
-
521
- assert!(
522
- result.len() < input.len(),
523
- "Text of all stopwords should be significantly reduced"
524
- );
525
- }
526
-
527
- #[test]
528
- fn test_no_stopwords_text_reduction() {
529
- let config = TokenReductionConfig {
530
- level: ReductionLevel::Moderate,
531
- use_simd: false,
532
- ..Default::default()
533
- };
534
-
535
- let input = "PyTorch TensorFlow CUDA GPU optimization benchmark performance metrics";
536
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
537
-
538
- let input_words: Vec<&str> = input.split_whitespace().collect();
539
- let result_lower = result.to_lowercase();
540
-
541
- for word in input_words {
542
- let word_lower = word.to_lowercase();
543
- assert!(
544
- result_lower.contains(&word_lower),
545
- "Technical term '{}' should be preserved. Result: {}",
546
- word,
547
- result
548
- );
549
- }
550
- }
551
-
552
- #[test]
553
- fn test_mixed_case_stopwords_removal() {
554
- let config = TokenReductionConfig {
555
- level: ReductionLevel::Moderate,
556
- use_simd: false,
557
- ..Default::default()
558
- };
559
-
560
- let input = "The SYSTEM Is Processing The DATA With The ALGORITHM";
561
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
562
-
563
- let result_words: Vec<&str> = result.split_whitespace().collect();
564
- assert!(
565
- !result_words.contains(&"the"),
566
- "Should remove lowercase 'the'. Result: {}",
567
- result
568
- );
569
- assert!(
570
- !result_words.contains(&"is"),
571
- "Should remove lowercase 'is'. Result: {}",
572
- result
573
- );
574
-
575
- assert!(
576
- result.contains("SYSTEM"),
577
- "Should preserve 'SYSTEM'. Result: {}",
578
- result
579
- );
580
- assert!(result.contains("DATA"), "Should preserve 'DATA'. Result: {}", result);
581
- assert!(
582
- result.contains("ALGORITHM"),
583
- "Should preserve 'ALGORITHM'. Result: {}",
584
- result
585
- );
586
- }
587
-
588
- #[test]
589
- fn test_reduce_tokens_function_with_stopwords() {
590
- let config = TokenReductionConfig {
591
- level: ReductionLevel::Moderate,
592
- use_simd: false,
593
- ..Default::default()
594
- };
595
-
596
- let text = "The artificial intelligence system processes the natural language efficiently";
597
- let result = reduce_tokens(text, &config, Some("en")).unwrap();
598
-
599
- let original_stopwords = count_stopwords(text, "en");
600
- let result_stopwords = count_stopwords(&result, "en");
601
-
602
- assert!(
603
- result_stopwords < original_stopwords,
604
- "reduce_tokens should remove stopwords. Original: {}, Result: {}",
605
- original_stopwords,
606
- result_stopwords
607
- );
608
-
609
- assert!(
610
- result.contains("artificial") || result.contains("intelligence"),
611
- "Should preserve content words. Result: {}",
612
- result
613
- );
614
- }
615
-
616
- #[test]
617
- fn test_stopwords_with_punctuation() {
618
- let config = TokenReductionConfig {
619
- level: ReductionLevel::Moderate,
620
- use_simd: false,
621
- ..Default::default()
622
- };
623
-
624
- let input = "The system, which is processing the data, uses the algorithm.";
625
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
626
-
627
- assert!(
628
- !result.contains(" the ") || result.split_whitespace().filter(|w| w.contains("the")).count() < 3,
629
- "Should remove most instances of 'the'. Result: {}",
630
- result
631
- );
632
-
633
- assert!(
634
- result.contains("system") || result.contains("processing") || result.contains("algorithm"),
635
- "Should preserve content words. Result: {}",
636
- result
637
- );
638
- }
639
-
640
- #[test]
641
- fn test_stopwords_with_numbers() {
642
- let config = TokenReductionConfig {
643
- level: ReductionLevel::Moderate,
644
- use_simd: false,
645
- ..Default::default()
646
- };
647
-
648
- let input = "The model has 100 layers and processes the data in 10 seconds";
649
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
650
-
651
- assert!(
652
- result.contains("100"),
653
- "Should preserve number '100'. Result: {}",
654
- result
655
- );
656
- assert!(result.contains("10"), "Should preserve number '10'. Result: {}", result);
657
-
658
- assert!(
659
- result.contains("model") || result.contains("layers") || result.contains("processes"),
660
- "Should preserve content words. Result: {}",
661
- result
662
- );
663
- }
664
-
665
- #[test]
666
- fn test_stopwords_removal_consistency_across_calls() {
667
- let config = TokenReductionConfig {
668
- level: ReductionLevel::Moderate,
669
- use_simd: false,
670
- ..Default::default()
671
- };
672
-
673
- let input = "The machine learning model is trained on the dataset";
674
-
675
- let result1 = reduce_tokens(input, &config, Some("en")).unwrap();
676
- let result2 = reduce_tokens(input, &config, Some("en")).unwrap();
677
- let result3 = reduce_tokens(input, &config, Some("en")).unwrap();
678
-
679
- assert_eq!(result1, result2, "Results should be consistent across calls");
680
- assert_eq!(result2, result3, "Results should be consistent across calls");
681
- }
682
-
683
- #[test]
684
- fn test_stopwords_with_long_text() {
685
- let config = TokenReductionConfig {
686
- level: ReductionLevel::Moderate,
687
- use_simd: false,
688
- enable_parallel: false,
689
- ..Default::default()
690
- };
691
-
692
- let paragraph = "The machine learning model is trained on the large dataset. \
693
- The training process uses the neural network architecture. \
694
- The system processes the data efficiently and achieves the best performance. ";
695
- let input = paragraph.repeat(10);
696
-
697
- let result = reduce_tokens(&input, &config, Some("en")).unwrap();
698
-
699
- assert!(
700
- result.len() < input.len(),
701
- "Long stopword-heavy text should be reduced. Input: {} chars, Result: {} chars",
702
- input.len(),
703
- result.len()
704
- );
705
-
706
- let original_stopwords = count_stopwords(&input, "en");
707
- let result_stopwords = count_stopwords(&result, "en");
708
-
709
- assert!(
710
- result_stopwords < original_stopwords,
711
- "Should remove stopwords from long text. Original: {}, Result: {}",
712
- original_stopwords,
713
- result_stopwords
714
- );
715
- }
716
-
717
- #[test]
718
- fn test_get_stopwords_with_fallback_in_reduction() {
719
- let primary_stopwords = get_stopwords_with_fallback("xyz", "en");
720
- assert!(primary_stopwords.is_some(), "Should fallback to English");
721
-
722
- let en_stopwords = get_stopwords("en").unwrap();
723
- assert_eq!(
724
- primary_stopwords.unwrap().len(),
725
- en_stopwords.len(),
726
- "Fallback should return English stopwords"
727
- );
728
-
729
- let config = TokenReductionConfig {
730
- level: ReductionLevel::Moderate,
731
- use_simd: false,
732
- ..Default::default()
733
- };
734
-
735
- let input = "The system is processing the data";
736
- let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
737
-
738
- assert!(
739
- !result.contains(" the ") && !result.contains(" is "),
740
- "Should use fallback stopwords. Result: {}",
741
- result
742
- );
743
- }
744
-
745
- #[test]
746
- fn test_stopwords_registry_completeness() {
747
- assert_eq!(STOPWORDS.len(), 64, "Should have exactly 64 language stopword sets");
748
-
749
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
750
- assert!(en_stopwords.len() >= 70, "English should have at least 70 stopwords");
751
-
752
- assert!(en_stopwords.contains("the"), "Should contain 'the'");
753
- assert!(en_stopwords.contains("is"), "Should contain 'is'");
754
- assert!(en_stopwords.contains("and"), "Should contain 'and'");
755
- assert!(en_stopwords.contains("a"), "Should contain 'a'");
756
- assert!(en_stopwords.contains("an"), "Should contain 'an'");
757
- assert!(en_stopwords.contains("of"), "Should contain 'of'");
758
- assert!(en_stopwords.contains("to"), "Should contain 'to'");
759
- assert!(en_stopwords.contains("in"), "Should contain 'in'");
760
- assert!(en_stopwords.contains("for"), "Should contain 'for'");
761
- }
762
-
763
- #[test]
764
- fn test_token_reduction_handles_nan_threshold() {
765
- let mut config = TokenReductionConfig {
766
- level: ReductionLevel::Maximum,
767
- semantic_threshold: f32::NAN,
768
- enable_semantic_clustering: true,
769
- target_reduction: Some(0.5),
770
- ..Default::default()
771
- };
772
-
773
- config.language_hint = Some("en".to_string());
774
- let input = "Critical system update highlights performance improvements across distributed modules.";
775
-
776
- let result = reduce_tokens(input, &config, Some("en")).unwrap_or_else(|_| String::new());
777
- assert!(
778
- result.chars().all(|c| !c.is_control()),
779
- "Result should not contain unexpected control characters"
780
- );
781
- }
782
-
783
- #[test]
784
- fn test_token_reduction_handles_multibyte_utf8() {
785
- let config = TokenReductionConfig {
786
- level: ReductionLevel::Moderate,
787
- language_hint: Some("ja".to_string()),
788
- ..Default::default()
789
- };
790
-
791
- let input = "品質管理は重要です。🚀 高速抽出と漢字処理が求められています。";
792
- let result = reduce_tokens(input, &config, Some("ja")).unwrap();
793
-
794
- assert!(
795
- result.contains("品質管理") || result.contains("漢字処理"),
796
- "Important multibyte terms should survive reduction: {}",
797
- result
798
- );
799
- }
800
-
801
- #[test]
802
- fn test_token_reduction_concurrent_access() {
803
- use std::sync::Arc;
804
-
805
- let config = Arc::new(TokenReductionConfig {
806
- level: ReductionLevel::Aggressive,
807
- enable_parallel: true,
808
- ..Default::default()
809
- });
810
-
811
- let input = "Concurrent reduction ensures thread safety without deadlocks or panics.";
812
-
813
- std::thread::scope(|scope| {
814
- for _ in 0..8 {
815
- let cfg = Arc::clone(&config);
816
- scope.spawn(move || {
817
- let reduced = reduce_tokens(input, &cfg, Some("en")).unwrap();
818
- assert!(!reduced.is_empty());
819
- });
820
- }
821
- });
822
- }
823
- #[test]
824
- fn demo_stopwords_effectiveness() {
825
- use kreuzberg::stopwords::get_stopwords;
826
- use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
827
-
828
- let en_text = "The machine learning model is trained on the large dataset and achieves good performance";
829
- let en_config = TokenReductionConfig {
830
- level: ReductionLevel::Moderate,
831
- use_simd: false,
832
- ..Default::default()
833
- };
834
- let en_result = reduce_tokens(en_text, &en_config, Some("en")).unwrap();
835
-
836
- println!("\n=== English Example ===");
837
- println!("BEFORE: {} chars", en_text.len());
838
- println!("{}", en_text);
839
- println!(
840
- "\nAFTER: {} chars ({}% reduction)",
841
- en_result.len(),
842
- 100 - (en_result.len() * 100 / en_text.len())
843
- );
844
- println!("{}", en_result);
845
-
846
- let zh_text = "这个人工智能系统可以处理自然语言";
847
- let zh_config = TokenReductionConfig {
848
- level: ReductionLevel::Moderate,
849
- use_simd: false,
850
- ..Default::default()
851
- };
852
- let zh_result = reduce_tokens(zh_text, &zh_config, Some("zh")).unwrap();
853
-
854
- println!("\n=== Chinese Example ===");
855
- println!("BEFORE: {}", zh_text);
856
- println!("AFTER: {}", zh_result);
857
-
858
- let text = "The artificial intelligence system processes the natural language efficiently";
859
-
860
- println!("\n=== Reduction Level Comparison ===");
861
- println!("ORIGINAL: {}", text);
862
-
863
- for level in [
864
- ReductionLevel::Light,
865
- ReductionLevel::Moderate,
866
- ReductionLevel::Aggressive,
867
- ] {
868
- let config = TokenReductionConfig {
869
- level,
870
- use_simd: false,
871
- ..Default::default()
872
- };
873
- let result = reduce_tokens(text, &config, Some("en")).unwrap();
874
- println!(
875
- "{:?}: {} chars -> {} chars ({}% reduction)",
876
- level,
877
- text.len(),
878
- result.len(),
879
- 100 - (result.len() * 100 / text.len())
880
- );
881
- println!(" {}", result);
882
- }
883
-
884
- let stopwords = get_stopwords("en").unwrap();
885
- println!("\n=== Stopwords Stats ===");
886
- println!("English stopwords: {}", stopwords.len());
887
- println!("Sample stopwords: {:?}", stopwords.iter().take(10).collect::<Vec<_>>());
888
- }
1
+ //! Integration tests for stopwords with token reduction and keywords extraction.
2
+ #![cfg(all(feature = "stopwords", feature = "quality"))]
3
+ //!
4
+ //! These tests verify that stopwords are properly integrated across different features:
5
+ //! - Token reduction at all ReductionLevels
6
+ //! - Keywords extraction (YAKE and RAKE algorithms)
7
+ //! - CJK text processing
8
+ //! - Multi-language documents
9
+ //! - Language fallback mechanisms
10
+ //! - Custom stopwords
11
+
12
+ use kreuzberg::stopwords::{STOPWORDS, get_stopwords, get_stopwords_with_fallback};
13
+ use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
14
+
15
+ #[cfg(any(feature = "keywords-yake", feature = "keywords-rake"))]
16
+ use kreuzberg::keywords::{KeywordConfig, extract_keywords};
17
+
18
+ use std::collections::HashMap;
19
+
20
+ fn count_stopwords(text: &str, lang: &str) -> usize {
21
+ let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
22
+ let words: Vec<&str> = text.split_whitespace().collect();
23
+
24
+ words
25
+ .iter()
26
+ .filter(|word| {
27
+ let clean = word
28
+ .chars()
29
+ .filter(|c| c.is_alphabetic())
30
+ .collect::<String>()
31
+ .to_lowercase();
32
+
33
+ !clean.is_empty() && stopwords.contains(&clean)
34
+ })
35
+ .count()
36
+ }
37
+
38
+ fn extract_content_words(text: &str, lang: &str) -> Vec<String> {
39
+ let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
40
+ let words: Vec<&str> = text.split_whitespace().collect();
41
+
42
+ words
43
+ .iter()
44
+ .filter_map(|word| {
45
+ let clean = word
46
+ .chars()
47
+ .filter(|c| c.is_alphabetic())
48
+ .collect::<String>()
49
+ .to_lowercase();
50
+
51
+ if !clean.is_empty() && !stopwords.contains(&clean) && clean.len() > 1 {
52
+ Some(clean)
53
+ } else {
54
+ None
55
+ }
56
+ })
57
+ .collect()
58
+ }
59
+
60
+ #[test]
61
+ fn test_stopwords_removed_during_moderate_token_reduction() {
62
+ let config = TokenReductionConfig {
63
+ level: ReductionLevel::Moderate,
64
+ language_hint: Some("en".to_string()),
65
+ use_simd: false,
66
+ ..Default::default()
67
+ };
68
+
69
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
70
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
71
+
72
+ assert!(!result.contains(" the "), "Should remove 'the'. Result: {}", result);
73
+ assert!(!result.contains(" is "), "Should remove 'is'. Result: {}", result);
74
+ assert!(!result.contains(" and "), "Should remove 'and'. Result: {}", result);
75
+
76
+ assert!(result.contains("quick"), "Should preserve 'quick'. Result: {}", result);
77
+ assert!(result.contains("brown"), "Should preserve 'brown'. Result: {}", result);
78
+ assert!(result.contains("fox"), "Should preserve 'fox'. Result: {}", result);
79
+ assert!(
80
+ result.contains("jumping"),
81
+ "Should preserve 'jumping'. Result: {}",
82
+ result
83
+ );
84
+ assert!(result.contains("lazy"), "Should preserve 'lazy'. Result: {}", result);
85
+
86
+ let original_stopwords = count_stopwords(input, "en");
87
+ let result_stopwords = count_stopwords(&result, "en");
88
+
89
+ assert!(
90
+ result_stopwords < original_stopwords,
91
+ "Result should have fewer stopwords than original. Original: {}, Result: {}",
92
+ original_stopwords,
93
+ result_stopwords
94
+ );
95
+ }
96
+
97
+ #[test]
98
+ fn test_stopwords_across_reduction_levels() {
99
+ let text = "The machine learning model is trained on the large dataset and achieves good performance";
100
+
101
+ let light_config = TokenReductionConfig {
102
+ level: ReductionLevel::Light,
103
+ use_simd: false,
104
+ ..Default::default()
105
+ };
106
+ let light_result = reduce_tokens(text, &light_config, Some("en")).unwrap();
107
+
108
+ let light_stopwords = count_stopwords(&light_result, "en");
109
+ assert!(light_stopwords > 0, "Light reduction should preserve some stopwords");
110
+
111
+ let moderate_config = TokenReductionConfig {
112
+ level: ReductionLevel::Moderate,
113
+ use_simd: false,
114
+ ..Default::default()
115
+ };
116
+ let moderate_result = reduce_tokens(text, &moderate_config, Some("en")).unwrap();
117
+
118
+ let moderate_stopwords = count_stopwords(&moderate_result, "en");
119
+ assert!(
120
+ moderate_stopwords < light_stopwords,
121
+ "Moderate reduction should remove more stopwords than light. Light: {}, Moderate: {}",
122
+ light_stopwords,
123
+ moderate_stopwords
124
+ );
125
+
126
+ let aggressive_config = TokenReductionConfig {
127
+ level: ReductionLevel::Aggressive,
128
+ use_simd: false,
129
+ ..Default::default()
130
+ };
131
+ let aggressive_result = reduce_tokens(text, &aggressive_config, Some("en")).unwrap();
132
+
133
+ assert!(
134
+ aggressive_result.len() <= moderate_result.len(),
135
+ "Aggressive reduction should be more aggressive than moderate"
136
+ );
137
+ }
138
+
139
+ #[test]
140
+ fn test_stopwords_preserve_semantic_meaning() {
141
+ let config = TokenReductionConfig {
142
+ level: ReductionLevel::Moderate,
143
+ use_simd: false,
144
+ ..Default::default()
145
+ };
146
+
147
+ let input =
148
+ "The artificial intelligence system is processing the natural language text for extracting meaningful insights";
149
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
150
+
151
+ let content_words = extract_content_words(&result, "en");
152
+
153
+ assert!(
154
+ content_words.contains(&"artificial".to_string()) || result.contains("artificial"),
155
+ "Should preserve 'artificial'. Result: {}",
156
+ result
157
+ );
158
+ assert!(
159
+ content_words.contains(&"intelligence".to_string()) || result.contains("intelligence"),
160
+ "Should preserve 'intelligence'. Result: {}",
161
+ result
162
+ );
163
+ assert!(
164
+ content_words.contains(&"processing".to_string()) || result.contains("processing"),
165
+ "Should preserve 'processing'. Result: {}",
166
+ result
167
+ );
168
+ assert!(
169
+ content_words.contains(&"natural".to_string()) || result.contains("natural"),
170
+ "Should preserve 'natural'. Result: {}",
171
+ result
172
+ );
173
+ assert!(
174
+ content_words.contains(&"language".to_string()) || result.contains("language"),
175
+ "Should preserve 'language'. Result: {}",
176
+ result
177
+ );
178
+ }
179
+
180
+ #[test]
181
+ fn test_stopwords_with_multiple_languages() {
182
+ let en_config = TokenReductionConfig {
183
+ level: ReductionLevel::Moderate,
184
+ use_simd: false,
185
+ ..Default::default()
186
+ };
187
+ let en_input = "The computer science program is very comprehensive and includes many courses";
188
+ let en_result = reduce_tokens(en_input, &en_config, Some("en")).unwrap();
189
+
190
+ let en_original_stopwords = count_stopwords(en_input, "en");
191
+ let en_result_stopwords = count_stopwords(&en_result, "en");
192
+ assert!(
193
+ en_result_stopwords < en_original_stopwords,
194
+ "English stopwords should be removed"
195
+ );
196
+
197
+ let es_config = TokenReductionConfig {
198
+ level: ReductionLevel::Moderate,
199
+ use_simd: false,
200
+ ..Default::default()
201
+ };
202
+ let es_input = "El programa de ciencias de la computación es muy completo y tiene muchos cursos";
203
+ let es_result = reduce_tokens(es_input, &es_config, Some("es")).unwrap();
204
+
205
+ let es_original_stopwords = count_stopwords(es_input, "es");
206
+ let es_result_stopwords = count_stopwords(&es_result, "es");
207
+ assert!(
208
+ es_result_stopwords < es_original_stopwords,
209
+ "Spanish stopwords should be removed"
210
+ );
211
+
212
+ assert!(
213
+ es_result.contains("programa") || es_result.contains("ciencias") || es_result.contains("computación"),
214
+ "Should preserve Spanish content words. Result: {}",
215
+ es_result
216
+ );
217
+
218
+ let de_config = TokenReductionConfig {
219
+ level: ReductionLevel::Moderate,
220
+ use_simd: false,
221
+ ..Default::default()
222
+ };
223
+ let de_input = "Die künstliche Intelligenz ist ein wichtiges Forschungsgebiet der Informatik";
224
+ let de_result = reduce_tokens(de_input, &de_config, Some("de")).unwrap();
225
+
226
+ let de_original_stopwords = count_stopwords(de_input, "de");
227
+ let de_result_stopwords = count_stopwords(&de_result, "de");
228
+ assert!(
229
+ de_result_stopwords < de_original_stopwords,
230
+ "German stopwords should be removed"
231
+ );
232
+ }
233
+
234
+ #[test]
235
+ fn test_language_fallback_to_english_stopwords() {
236
+ let config = TokenReductionConfig {
237
+ level: ReductionLevel::Moderate,
238
+ use_simd: false,
239
+ ..Default::default()
240
+ };
241
+
242
+ let input = "The system is processing the data with the algorithm";
243
+ let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
244
+
245
+ let original_stopwords = count_stopwords(input, "en");
246
+ let result_stopwords = count_stopwords(&result, "en");
247
+
248
+ assert!(
249
+ result_stopwords < original_stopwords,
250
+ "Should fallback to English stopwords for unsupported language"
251
+ );
252
+ }
253
+
254
+ #[test]
255
+ fn test_custom_stopwords_integration() {
256
+ let mut custom_stopwords = HashMap::new();
257
+ custom_stopwords.insert(
258
+ "en".to_string(),
259
+ vec!["algorithm".to_string(), "system".to_string(), "data".to_string()],
260
+ );
261
+
262
+ let config = TokenReductionConfig {
263
+ level: ReductionLevel::Moderate,
264
+ use_simd: false,
265
+ custom_stopwords: Some(custom_stopwords),
266
+ ..Default::default()
267
+ };
268
+
269
+ let input = "The algorithm processes the data in the system efficiently";
270
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
271
+
272
+ assert!(
273
+ !result.contains("algorithm"),
274
+ "Should remove custom stopword 'algorithm'. Result: {}",
275
+ result
276
+ );
277
+ assert!(
278
+ !result.contains("system"),
279
+ "Should remove custom stopword 'system'. Result: {}",
280
+ result
281
+ );
282
+ assert!(
283
+ !result.contains("data"),
284
+ "Should remove custom stopword 'data'. Result: {}",
285
+ result
286
+ );
287
+
288
+ assert!(
289
+ result.contains("processes") || result.contains("efficiently"),
290
+ "Should preserve non-stopword content. Result: {}",
291
+ result
292
+ );
293
+ }
294
+
295
+ #[test]
296
+ fn test_stopwords_with_chinese_text() {
297
+ let config = TokenReductionConfig {
298
+ level: ReductionLevel::Moderate,
299
+ use_simd: false,
300
+ ..Default::default()
301
+ };
302
+
303
+ let input = "这个人工智能系统可以处理自然语言";
304
+ let result = reduce_tokens(input, &config, Some("zh")).unwrap();
305
+
306
+ assert!(
307
+ !result.is_empty(),
308
+ "Chinese text should be processed. Result: {}",
309
+ result
310
+ );
311
+
312
+ assert!(
313
+ result.contains("人工") || result.contains("智能") || result.contains("语言"),
314
+ "Should preserve important Chinese terms. Result: {}",
315
+ result
316
+ );
317
+ }
318
+
319
+ #[test]
320
+ fn test_stopwords_with_mixed_cjk_english() {
321
+ let config = TokenReductionConfig {
322
+ level: ReductionLevel::Moderate,
323
+ use_simd: false,
324
+ ..Default::default()
325
+ };
326
+
327
+ let input = "The machine learning model 机器学习模型 is processing data efficiently";
328
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
329
+
330
+ assert!(
331
+ !result.contains(" the ") && !result.contains("The "),
332
+ "Should remove English 'the'. Result: {}",
333
+ result
334
+ );
335
+
336
+ assert!(
337
+ result.contains("machine") || result.contains("learning"),
338
+ "Should preserve English content. Result: {}",
339
+ result
340
+ );
341
+
342
+ assert!(
343
+ result.contains("机器") || result.contains("学习") || result.contains("模型"),
344
+ "Should preserve Chinese content. Result: {}",
345
+ result
346
+ );
347
+ }
348
+
349
+ #[test]
350
+ fn test_stopwords_with_japanese_text() {
351
+ let config = TokenReductionConfig {
352
+ level: ReductionLevel::Moderate,
353
+ use_simd: false,
354
+ ..Default::default()
355
+ };
356
+
357
+ let input = "人工知能技術の研究開発";
358
+ let result = reduce_tokens(input, &config, Some("ja")).unwrap();
359
+
360
+ assert!(
361
+ !result.is_empty(),
362
+ "Japanese text should be processed. Result: {}",
363
+ result
364
+ );
365
+ }
366
+
367
+ #[test]
368
+ fn test_stopwords_with_korean_text() {
369
+ let config = TokenReductionConfig {
370
+ level: ReductionLevel::Moderate,
371
+ use_simd: false,
372
+ ..Default::default()
373
+ };
374
+
375
+ let input = "인공 지능 기술 개발";
376
+ let result = reduce_tokens(input, &config, Some("ko")).unwrap();
377
+
378
+ assert!(
379
+ !result.is_empty(),
380
+ "Korean text should be processed. Result: {}",
381
+ result
382
+ );
383
+ }
384
+
385
+ #[cfg(feature = "keywords-rake")]
386
+ #[test]
387
+ fn test_stopwords_excluded_from_rake_keywords() {
388
+ let text = "The machine learning model is trained on a large dataset. \
389
+ The model uses neural networks and deep learning algorithms. \
390
+ The training process requires significant computational resources.";
391
+
392
+ let config = KeywordConfig::rake().with_language("en").with_max_keywords(10);
393
+
394
+ let keywords = extract_keywords(text, &config).unwrap();
395
+
396
+ assert!(!keywords.is_empty(), "Should extract keywords");
397
+
398
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
399
+
400
+ for keyword in &keywords {
401
+ let words: Vec<&str> = keyword.text.split_whitespace().collect();
402
+
403
+ let all_stopwords = words.iter().all(|word| {
404
+ let clean = word
405
+ .chars()
406
+ .filter(|c| c.is_alphabetic())
407
+ .collect::<String>()
408
+ .to_lowercase();
409
+ en_stopwords.contains(&clean)
410
+ });
411
+
412
+ assert!(
413
+ !all_stopwords,
414
+ "Keyword '{}' should not be composed entirely of stopwords",
415
+ keyword.text
416
+ );
417
+ }
418
+
419
+ let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
420
+
421
+ assert!(
422
+ keyword_texts.iter().any(|k| k.contains("machine learning")
423
+ || k.contains("neural networks")
424
+ || k.contains("deep learning")
425
+ || k.contains("dataset")
426
+ || k.contains("model")
427
+ || k.contains("training")),
428
+ "Should extract meaningful technical keywords. Got: {:?}",
429
+ keyword_texts
430
+ );
431
+ }
432
+
433
+ #[cfg(feature = "keywords-yake")]
434
+ #[test]
435
+ fn test_stopwords_excluded_from_yake_keywords() {
436
+ let text = "Natural language processing enables computers to understand human language. \
437
+ Deep learning models achieve state-of-the-art performance in text analysis. \
438
+ These systems can extract meaningful information from large text corpora.";
439
+
440
+ let config = KeywordConfig::yake().with_language("en").with_max_keywords(10);
441
+
442
+ let keywords = extract_keywords(text, &config).unwrap();
443
+
444
+ assert!(!keywords.is_empty(), "Should extract keywords");
445
+
446
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
447
+
448
+ for keyword in &keywords {
449
+ let has_content_word = keyword.text.split_whitespace().any(|word| {
450
+ let clean = word
451
+ .chars()
452
+ .filter(|c| c.is_alphabetic())
453
+ .collect::<String>()
454
+ .to_lowercase();
455
+ !clean.is_empty() && !en_stopwords.contains(&clean)
456
+ });
457
+
458
+ assert!(
459
+ has_content_word,
460
+ "Keyword '{}' should contain at least one content word (non-stopword)",
461
+ keyword.text
462
+ );
463
+ }
464
+ }
465
+
466
+ #[cfg(feature = "keywords-rake")]
467
+ #[test]
468
+ fn test_keywords_respect_language_specific_stopwords() {
469
+ let spanish_text = "El aprendizaje automático es una rama de la inteligencia artificial. \
470
+ Los modelos de aprendizaje profundo logran un rendimiento excepcional. \
471
+ Estos sistemas pueden procesar grandes cantidades de datos.";
472
+
473
+ let config = KeywordConfig::rake().with_language("es").with_max_keywords(8);
474
+
475
+ let keywords = extract_keywords(spanish_text, &config).unwrap();
476
+
477
+ assert!(!keywords.is_empty(), "Should extract Spanish keywords");
478
+
479
+ let es_stopwords = get_stopwords("es").expect("Spanish stopwords must exist");
480
+
481
+ for keyword in &keywords {
482
+ let words: Vec<&str> = keyword.text.split_whitespace().collect();
483
+ let all_stopwords = words.iter().all(|word| {
484
+ let clean = word
485
+ .chars()
486
+ .filter(|c| c.is_alphabetic())
487
+ .collect::<String>()
488
+ .to_lowercase();
489
+ es_stopwords.contains(&clean)
490
+ });
491
+
492
+ assert!(
493
+ !all_stopwords,
494
+ "Spanish keyword '{}' should not be all stopwords",
495
+ keyword.text
496
+ );
497
+ }
498
+
499
+ let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
500
+ assert!(
501
+ keyword_texts.iter().any(|k| k.contains("aprendizaje")
502
+ || k.contains("inteligencia")
503
+ || k.contains("modelos")
504
+ || k.contains("datos")),
505
+ "Should extract meaningful Spanish keywords. Got: {:?}",
506
+ keyword_texts
507
+ );
508
+ }
509
+
510
+ #[test]
511
+ fn test_all_stopwords_text_reduction() {
512
+ let config = TokenReductionConfig {
513
+ level: ReductionLevel::Moderate,
514
+ use_simd: false,
515
+ ..Default::default()
516
+ };
517
+
518
+ let input = "the is a an and or but of to in for on at by";
519
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
520
+
521
+ assert!(
522
+ result.len() < input.len(),
523
+ "Text of all stopwords should be significantly reduced"
524
+ );
525
+ }
526
+
527
+ #[test]
528
+ fn test_no_stopwords_text_reduction() {
529
+ let config = TokenReductionConfig {
530
+ level: ReductionLevel::Moderate,
531
+ use_simd: false,
532
+ ..Default::default()
533
+ };
534
+
535
+ let input = "PyTorch TensorFlow CUDA GPU optimization benchmark performance metrics";
536
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
537
+
538
+ let input_words: Vec<&str> = input.split_whitespace().collect();
539
+ let result_lower = result.to_lowercase();
540
+
541
+ for word in input_words {
542
+ let word_lower = word.to_lowercase();
543
+ assert!(
544
+ result_lower.contains(&word_lower),
545
+ "Technical term '{}' should be preserved. Result: {}",
546
+ word,
547
+ result
548
+ );
549
+ }
550
+ }
551
+
552
+ #[test]
553
+ fn test_mixed_case_stopwords_removal() {
554
+ let config = TokenReductionConfig {
555
+ level: ReductionLevel::Moderate,
556
+ use_simd: false,
557
+ ..Default::default()
558
+ };
559
+
560
+ let input = "The SYSTEM Is Processing The DATA With The ALGORITHM";
561
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
562
+
563
+ let result_words: Vec<&str> = result.split_whitespace().collect();
564
+ assert!(
565
+ !result_words.contains(&"the"),
566
+ "Should remove lowercase 'the'. Result: {}",
567
+ result
568
+ );
569
+ assert!(
570
+ !result_words.contains(&"is"),
571
+ "Should remove lowercase 'is'. Result: {}",
572
+ result
573
+ );
574
+
575
+ assert!(
576
+ result.contains("SYSTEM"),
577
+ "Should preserve 'SYSTEM'. Result: {}",
578
+ result
579
+ );
580
+ assert!(result.contains("DATA"), "Should preserve 'DATA'. Result: {}", result);
581
+ assert!(
582
+ result.contains("ALGORITHM"),
583
+ "Should preserve 'ALGORITHM'. Result: {}",
584
+ result
585
+ );
586
+ }
587
+
588
+ #[test]
589
+ fn test_reduce_tokens_function_with_stopwords() {
590
+ let config = TokenReductionConfig {
591
+ level: ReductionLevel::Moderate,
592
+ use_simd: false,
593
+ ..Default::default()
594
+ };
595
+
596
+ let text = "The artificial intelligence system processes the natural language efficiently";
597
+ let result = reduce_tokens(text, &config, Some("en")).unwrap();
598
+
599
+ let original_stopwords = count_stopwords(text, "en");
600
+ let result_stopwords = count_stopwords(&result, "en");
601
+
602
+ assert!(
603
+ result_stopwords < original_stopwords,
604
+ "reduce_tokens should remove stopwords. Original: {}, Result: {}",
605
+ original_stopwords,
606
+ result_stopwords
607
+ );
608
+
609
+ assert!(
610
+ result.contains("artificial") || result.contains("intelligence"),
611
+ "Should preserve content words. Result: {}",
612
+ result
613
+ );
614
+ }
615
+
616
+ #[test]
617
+ fn test_stopwords_with_punctuation() {
618
+ let config = TokenReductionConfig {
619
+ level: ReductionLevel::Moderate,
620
+ use_simd: false,
621
+ ..Default::default()
622
+ };
623
+
624
+ let input = "The system, which is processing the data, uses the algorithm.";
625
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
626
+
627
+ assert!(
628
+ !result.contains(" the ") || result.split_whitespace().filter(|w| w.contains("the")).count() < 3,
629
+ "Should remove most instances of 'the'. Result: {}",
630
+ result
631
+ );
632
+
633
+ assert!(
634
+ result.contains("system") || result.contains("processing") || result.contains("algorithm"),
635
+ "Should preserve content words. Result: {}",
636
+ result
637
+ );
638
+ }
639
+
640
+ #[test]
641
+ fn test_stopwords_with_numbers() {
642
+ let config = TokenReductionConfig {
643
+ level: ReductionLevel::Moderate,
644
+ use_simd: false,
645
+ ..Default::default()
646
+ };
647
+
648
+ let input = "The model has 100 layers and processes the data in 10 seconds";
649
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
650
+
651
+ assert!(
652
+ result.contains("100"),
653
+ "Should preserve number '100'. Result: {}",
654
+ result
655
+ );
656
+ assert!(result.contains("10"), "Should preserve number '10'. Result: {}", result);
657
+
658
+ assert!(
659
+ result.contains("model") || result.contains("layers") || result.contains("processes"),
660
+ "Should preserve content words. Result: {}",
661
+ result
662
+ );
663
+ }
664
+
665
+ #[test]
666
+ fn test_stopwords_removal_consistency_across_calls() {
667
+ let config = TokenReductionConfig {
668
+ level: ReductionLevel::Moderate,
669
+ use_simd: false,
670
+ ..Default::default()
671
+ };
672
+
673
+ let input = "The machine learning model is trained on the dataset";
674
+
675
+ let result1 = reduce_tokens(input, &config, Some("en")).unwrap();
676
+ let result2 = reduce_tokens(input, &config, Some("en")).unwrap();
677
+ let result3 = reduce_tokens(input, &config, Some("en")).unwrap();
678
+
679
+ assert_eq!(result1, result2, "Results should be consistent across calls");
680
+ assert_eq!(result2, result3, "Results should be consistent across calls");
681
+ }
682
+
683
+ #[test]
684
+ fn test_stopwords_with_long_text() {
685
+ let config = TokenReductionConfig {
686
+ level: ReductionLevel::Moderate,
687
+ use_simd: false,
688
+ enable_parallel: false,
689
+ ..Default::default()
690
+ };
691
+
692
+ let paragraph = "The machine learning model is trained on the large dataset. \
693
+ The training process uses the neural network architecture. \
694
+ The system processes the data efficiently and achieves the best performance. ";
695
+ let input = paragraph.repeat(10);
696
+
697
+ let result = reduce_tokens(&input, &config, Some("en")).unwrap();
698
+
699
+ assert!(
700
+ result.len() < input.len(),
701
+ "Long stopword-heavy text should be reduced. Input: {} chars, Result: {} chars",
702
+ input.len(),
703
+ result.len()
704
+ );
705
+
706
+ let original_stopwords = count_stopwords(&input, "en");
707
+ let result_stopwords = count_stopwords(&result, "en");
708
+
709
+ assert!(
710
+ result_stopwords < original_stopwords,
711
+ "Should remove stopwords from long text. Original: {}, Result: {}",
712
+ original_stopwords,
713
+ result_stopwords
714
+ );
715
+ }
716
+
717
+ #[test]
718
+ fn test_get_stopwords_with_fallback_in_reduction() {
719
+ let primary_stopwords = get_stopwords_with_fallback("xyz", "en");
720
+ assert!(primary_stopwords.is_some(), "Should fallback to English");
721
+
722
+ let en_stopwords = get_stopwords("en").unwrap();
723
+ assert_eq!(
724
+ primary_stopwords.unwrap().len(),
725
+ en_stopwords.len(),
726
+ "Fallback should return English stopwords"
727
+ );
728
+
729
+ let config = TokenReductionConfig {
730
+ level: ReductionLevel::Moderate,
731
+ use_simd: false,
732
+ ..Default::default()
733
+ };
734
+
735
+ let input = "The system is processing the data";
736
+ let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
737
+
738
+ assert!(
739
+ !result.contains(" the ") && !result.contains(" is "),
740
+ "Should use fallback stopwords. Result: {}",
741
+ result
742
+ );
743
+ }
744
+
745
+ #[test]
746
+ fn test_stopwords_registry_completeness() {
747
+ assert_eq!(STOPWORDS.len(), 64, "Should have exactly 64 language stopword sets");
748
+
749
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
750
+ assert!(en_stopwords.len() >= 70, "English should have at least 70 stopwords");
751
+
752
+ assert!(en_stopwords.contains("the"), "Should contain 'the'");
753
+ assert!(en_stopwords.contains("is"), "Should contain 'is'");
754
+ assert!(en_stopwords.contains("and"), "Should contain 'and'");
755
+ assert!(en_stopwords.contains("a"), "Should contain 'a'");
756
+ assert!(en_stopwords.contains("an"), "Should contain 'an'");
757
+ assert!(en_stopwords.contains("of"), "Should contain 'of'");
758
+ assert!(en_stopwords.contains("to"), "Should contain 'to'");
759
+ assert!(en_stopwords.contains("in"), "Should contain 'in'");
760
+ assert!(en_stopwords.contains("for"), "Should contain 'for'");
761
+ }
762
+
763
+ #[test]
764
+ fn test_token_reduction_handles_nan_threshold() {
765
+ let mut config = TokenReductionConfig {
766
+ level: ReductionLevel::Maximum,
767
+ semantic_threshold: f32::NAN,
768
+ enable_semantic_clustering: true,
769
+ target_reduction: Some(0.5),
770
+ ..Default::default()
771
+ };
772
+
773
+ config.language_hint = Some("en".to_string());
774
+ let input = "Critical system update highlights performance improvements across distributed modules.";
775
+
776
+ let result = reduce_tokens(input, &config, Some("en")).unwrap_or_else(|_| String::new());
777
+ assert!(
778
+ result.chars().all(|c| !c.is_control()),
779
+ "Result should not contain unexpected control characters"
780
+ );
781
+ }
782
+
783
+ #[test]
784
+ fn test_token_reduction_handles_multibyte_utf8() {
785
+ let config = TokenReductionConfig {
786
+ level: ReductionLevel::Moderate,
787
+ language_hint: Some("ja".to_string()),
788
+ ..Default::default()
789
+ };
790
+
791
+ let input = "品質管理は重要です。🚀 高速抽出と漢字処理が求められています。";
792
+ let result = reduce_tokens(input, &config, Some("ja")).unwrap();
793
+
794
+ assert!(
795
+ result.contains("品質管理") || result.contains("漢字処理"),
796
+ "Important multibyte terms should survive reduction: {}",
797
+ result
798
+ );
799
+ }
800
+
801
+ #[test]
802
+ fn test_token_reduction_concurrent_access() {
803
+ use std::sync::Arc;
804
+
805
+ let config = Arc::new(TokenReductionConfig {
806
+ level: ReductionLevel::Aggressive,
807
+ enable_parallel: true,
808
+ ..Default::default()
809
+ });
810
+
811
+ let input = "Concurrent reduction ensures thread safety without deadlocks or panics.";
812
+
813
+ std::thread::scope(|scope| {
814
+ for _ in 0..8 {
815
+ let cfg = Arc::clone(&config);
816
+ scope.spawn(move || {
817
+ let reduced = reduce_tokens(input, &cfg, Some("en")).unwrap();
818
+ assert!(!reduced.is_empty());
819
+ });
820
+ }
821
+ });
822
+ }
823
+ #[test]
824
+ fn demo_stopwords_effectiveness() {
825
+ use kreuzberg::stopwords::get_stopwords;
826
+ use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
827
+
828
+ let en_text = "The machine learning model is trained on the large dataset and achieves good performance";
829
+ let en_config = TokenReductionConfig {
830
+ level: ReductionLevel::Moderate,
831
+ use_simd: false,
832
+ ..Default::default()
833
+ };
834
+ let en_result = reduce_tokens(en_text, &en_config, Some("en")).unwrap();
835
+
836
+ println!("\n=== English Example ===");
837
+ println!("BEFORE: {} chars", en_text.len());
838
+ println!("{}", en_text);
839
+ println!(
840
+ "\nAFTER: {} chars ({}% reduction)",
841
+ en_result.len(),
842
+ 100 - (en_result.len() * 100 / en_text.len())
843
+ );
844
+ println!("{}", en_result);
845
+
846
+ let zh_text = "这个人工智能系统可以处理自然语言";
847
+ let zh_config = TokenReductionConfig {
848
+ level: ReductionLevel::Moderate,
849
+ use_simd: false,
850
+ ..Default::default()
851
+ };
852
+ let zh_result = reduce_tokens(zh_text, &zh_config, Some("zh")).unwrap();
853
+
854
+ println!("\n=== Chinese Example ===");
855
+ println!("BEFORE: {}", zh_text);
856
+ println!("AFTER: {}", zh_result);
857
+
858
+ let text = "The artificial intelligence system processes the natural language efficiently";
859
+
860
+ println!("\n=== Reduction Level Comparison ===");
861
+ println!("ORIGINAL: {}", text);
862
+
863
+ for level in [
864
+ ReductionLevel::Light,
865
+ ReductionLevel::Moderate,
866
+ ReductionLevel::Aggressive,
867
+ ] {
868
+ let config = TokenReductionConfig {
869
+ level,
870
+ use_simd: false,
871
+ ..Default::default()
872
+ };
873
+ let result = reduce_tokens(text, &config, Some("en")).unwrap();
874
+ println!(
875
+ "{:?}: {} chars -> {} chars ({}% reduction)",
876
+ level,
877
+ text.len(),
878
+ result.len(),
879
+ 100 - (result.len() * 100 / text.len())
880
+ );
881
+ println!(" {}", result);
882
+ }
883
+
884
+ let stopwords = get_stopwords("en").unwrap();
885
+ println!("\n=== Stopwords Stats ===");
886
+ println!("English stopwords: {}", stopwords.len());
887
+ println!("Sample stopwords: {:?}", stopwords.iter().take(10).collect::<Vec<_>>());
888
+ }