kreuzberg 4.0.0.pre.rc.11 → 4.0.0.pre.rc.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (369) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -14
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -1
  5. data/.rubocop.yml +538 -538
  6. data/Gemfile +8 -8
  7. data/Gemfile.lock +2 -105
  8. data/README.md +454 -454
  9. data/Rakefile +25 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -341
  12. data/ext/kreuzberg_rb/extconf.rb +45 -45
  13. data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
  14. data/ext/kreuzberg_rb/native/Cargo.lock +6941 -6941
  15. data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
  16. data/ext/kreuzberg_rb/native/README.md +425 -425
  17. data/ext/kreuzberg_rb/native/build.rs +15 -15
  18. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  19. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  20. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  21. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  22. data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
  23. data/extconf.rb +28 -28
  24. data/kreuzberg.gemspec +214 -214
  25. data/lib/kreuzberg/api_proxy.rb +142 -142
  26. data/lib/kreuzberg/cache_api.rb +81 -81
  27. data/lib/kreuzberg/cli.rb +55 -55
  28. data/lib/kreuzberg/cli_proxy.rb +127 -127
  29. data/lib/kreuzberg/config.rb +724 -724
  30. data/lib/kreuzberg/error_context.rb +80 -80
  31. data/lib/kreuzberg/errors.rb +118 -118
  32. data/lib/kreuzberg/extraction_api.rb +340 -340
  33. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  34. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  35. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  36. data/lib/kreuzberg/result.rb +279 -279
  37. data/lib/kreuzberg/setup_lib_path.rb +80 -80
  38. data/lib/kreuzberg/validator_protocol.rb +89 -89
  39. data/lib/kreuzberg/version.rb +5 -5
  40. data/lib/kreuzberg.rb +109 -109
  41. data/lib/{libpdfium.dylib → pdfium.dll} +0 -0
  42. data/sig/kreuzberg/internal.rbs +184 -184
  43. data/sig/kreuzberg.rbs +546 -546
  44. data/spec/binding/cache_spec.rb +227 -227
  45. data/spec/binding/cli_proxy_spec.rb +85 -85
  46. data/spec/binding/cli_spec.rb +55 -55
  47. data/spec/binding/config_spec.rb +345 -345
  48. data/spec/binding/config_validation_spec.rb +283 -283
  49. data/spec/binding/error_handling_spec.rb +213 -213
  50. data/spec/binding/errors_spec.rb +66 -66
  51. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  52. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  53. data/spec/binding/plugins/validator_spec.rb +274 -274
  54. data/spec/fixtures/config.toml +39 -39
  55. data/spec/fixtures/config.yaml +41 -41
  56. data/spec/fixtures/invalid_config.toml +4 -4
  57. data/spec/smoke/package_spec.rb +178 -178
  58. data/spec/spec_helper.rb +42 -42
  59. data/vendor/Cargo.toml +2 -1
  60. data/vendor/kreuzberg/Cargo.toml +2 -2
  61. data/vendor/kreuzberg/README.md +230 -230
  62. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
  63. data/vendor/kreuzberg/build.rs +843 -843
  64. data/vendor/kreuzberg/src/api/error.rs +81 -81
  65. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  66. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  67. data/vendor/kreuzberg/src/api/server.rs +353 -353
  68. data/vendor/kreuzberg/src/api/types.rs +170 -170
  69. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
  70. data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
  71. data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
  72. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
  73. data/vendor/kreuzberg/src/core/config.rs +1080 -1080
  74. data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
  75. data/vendor/kreuzberg/src/core/io.rs +329 -329
  76. data/vendor/kreuzberg/src/core/mime.rs +605 -605
  77. data/vendor/kreuzberg/src/core/mod.rs +47 -47
  78. data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
  79. data/vendor/kreuzberg/src/embeddings.rs +500 -500
  80. data/vendor/kreuzberg/src/error.rs +431 -431
  81. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  82. data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
  83. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  84. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  85. data/vendor/kreuzberg/src/extraction/html.rs +601 -601
  86. data/vendor/kreuzberg/src/extraction/image.rs +491 -491
  87. data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -562
  88. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
  89. data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
  90. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  91. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  92. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  93. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
  94. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
  95. data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
  96. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  97. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  98. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  99. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  100. data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
  101. data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
  102. data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
  103. data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
  104. data/vendor/kreuzberg/src/extractors/email.rs +157 -157
  105. data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
  106. data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
  107. data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
  108. data/vendor/kreuzberg/src/extractors/html.rs +407 -407
  109. data/vendor/kreuzberg/src/extractors/image.rs +219 -219
  110. data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
  111. data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
  112. data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
  113. data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
  114. data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
  115. data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
  116. data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
  117. data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
  118. data/vendor/kreuzberg/src/extractors/pdf.rs +749 -722
  119. data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
  120. data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
  121. data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
  122. data/vendor/kreuzberg/src/extractors/security.rs +484 -484
  123. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
  124. data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
  125. data/vendor/kreuzberg/src/extractors/text.rs +265 -265
  126. data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
  127. data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
  128. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  129. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  130. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  131. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  132. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  133. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  134. data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
  135. data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
  136. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  137. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  138. data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
  139. data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
  140. data/vendor/kreuzberg/src/lib.rs +113 -113
  141. data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
  142. data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
  143. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  144. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  145. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  146. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  147. data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
  148. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  149. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  150. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
  151. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  152. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  153. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  154. data/vendor/kreuzberg/src/panic_context.rs +154 -154
  155. data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
  156. data/vendor/kreuzberg/src/pdf/bundled.rs +346 -346
  157. data/vendor/kreuzberg/src/pdf/error.rs +130 -130
  158. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  159. data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
  160. data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
  161. data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
  162. data/vendor/kreuzberg/src/pdf/table.rs +420 -420
  163. data/vendor/kreuzberg/src/pdf/text.rs +240 -240
  164. data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
  165. data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
  166. data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
  167. data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
  168. data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
  169. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  170. data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
  171. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  172. data/vendor/kreuzberg/src/text/mod.rs +25 -25
  173. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  174. data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
  175. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  176. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  177. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  178. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  179. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  180. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  181. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  182. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  183. data/vendor/kreuzberg/src/types.rs +1055 -1055
  184. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  185. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  186. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  187. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  188. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  189. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  190. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  191. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  192. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  193. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  194. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  195. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  196. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  198. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  199. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  200. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  201. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  202. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  203. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  204. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  205. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  206. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  207. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  208. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  209. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  210. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  211. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  212. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  213. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  214. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  215. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  216. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  217. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  218. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  219. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  220. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  221. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  222. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  223. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  224. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  225. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  226. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  227. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  228. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  229. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  230. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  231. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  232. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  233. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  234. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  235. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  236. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  237. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  238. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  239. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  240. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  241. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  242. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  243. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  244. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  245. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  246. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  247. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  248. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  249. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  250. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  251. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
  252. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  253. data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
  254. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
  255. data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
  256. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
  257. data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
  258. data/vendor/kreuzberg/tests/config_features.rs +612 -612
  259. data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
  260. data/vendor/kreuzberg/tests/core_integration.rs +510 -510
  261. data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
  262. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
  263. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
  264. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
  265. data/vendor/kreuzberg/tests/email_integration.rs +327 -327
  266. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
  267. data/vendor/kreuzberg/tests/error_handling.rs +402 -402
  268. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
  269. data/vendor/kreuzberg/tests/format_integration.rs +164 -164
  270. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  271. data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
  272. data/vendor/kreuzberg/tests/image_integration.rs +255 -255
  273. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
  274. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
  275. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
  276. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  277. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  278. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
  279. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
  280. data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
  281. data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
  282. data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
  283. data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
  284. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  285. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
  286. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
  287. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
  288. data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
  289. data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
  290. data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
  291. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
  292. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
  293. data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
  294. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  295. data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
  296. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
  297. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
  298. data/vendor/kreuzberg/tests/security_validation.rs +416 -416
  299. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  300. data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
  301. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
  302. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
  303. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  304. data/vendor/kreuzberg-ffi/Cargo.toml +3 -3
  305. data/vendor/kreuzberg-ffi/README.md +851 -851
  306. data/vendor/kreuzberg-ffi/build.rs +176 -176
  307. data/vendor/kreuzberg-ffi/cbindgen.toml +27 -27
  308. data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +12 -12
  309. data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -12
  310. data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -1087
  311. data/vendor/kreuzberg-ffi/src/lib.rs +3616 -3616
  312. data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -247
  313. data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -48
  314. data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -299
  315. data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -346
  316. data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -232
  317. data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -470
  318. data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
  319. data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
  320. data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
  321. data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
  322. data/vendor/kreuzberg-tesseract/LICENSE +22 -22
  323. data/vendor/kreuzberg-tesseract/README.md +399 -399
  324. data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
  325. data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
  326. data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
  327. data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
  328. data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
  329. data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
  330. data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
  331. data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
  332. data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
  333. data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
  334. data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
  335. data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
  336. data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
  337. data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
  338. data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
  339. data/vendor/rb-sys/Cargo.lock +393 -393
  340. data/vendor/rb-sys/Cargo.toml +70 -70
  341. data/vendor/rb-sys/Cargo.toml.orig +57 -57
  342. data/vendor/rb-sys/LICENSE-APACHE +190 -190
  343. data/vendor/rb-sys/LICENSE-MIT +21 -21
  344. data/vendor/rb-sys/build/features.rs +111 -111
  345. data/vendor/rb-sys/build/main.rs +286 -286
  346. data/vendor/rb-sys/build/stable_api_config.rs +155 -155
  347. data/vendor/rb-sys/build/version.rs +50 -50
  348. data/vendor/rb-sys/readme.md +36 -36
  349. data/vendor/rb-sys/src/bindings.rs +21 -21
  350. data/vendor/rb-sys/src/hidden.rs +11 -11
  351. data/vendor/rb-sys/src/lib.rs +35 -35
  352. data/vendor/rb-sys/src/macros.rs +371 -371
  353. data/vendor/rb-sys/src/memory.rs +53 -53
  354. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
  355. data/vendor/rb-sys/src/special_consts.rs +31 -31
  356. data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
  357. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
  358. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
  359. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
  360. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
  361. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
  362. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
  363. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
  364. data/vendor/rb-sys/src/stable_api.rs +260 -260
  365. data/vendor/rb-sys/src/symbol.rs +31 -31
  366. data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
  367. data/vendor/rb-sys/src/utils.rs +89 -89
  368. data/vendor/rb-sys/src/value_type.rs +7 -7
  369. metadata +7 -80
@@ -1,888 +1,888 @@
1
- //! Integration tests for stopwords with token reduction and keywords extraction.
2
- #![cfg(all(feature = "stopwords", feature = "quality"))]
3
- //!
4
- //! These tests verify that stopwords are properly integrated across different features:
5
- //! - Token reduction at all ReductionLevels
6
- //! - Keywords extraction (YAKE and RAKE algorithms)
7
- //! - CJK text processing
8
- //! - Multi-language documents
9
- //! - Language fallback mechanisms
10
- //! - Custom stopwords
11
-
12
- use kreuzberg::stopwords::{STOPWORDS, get_stopwords, get_stopwords_with_fallback};
13
- use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
14
-
15
- #[cfg(any(feature = "keywords-yake", feature = "keywords-rake"))]
16
- use kreuzberg::keywords::{KeywordConfig, extract_keywords};
17
-
18
- use std::collections::HashMap;
19
-
20
- fn count_stopwords(text: &str, lang: &str) -> usize {
21
- let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
22
- let words: Vec<&str> = text.split_whitespace().collect();
23
-
24
- words
25
- .iter()
26
- .filter(|word| {
27
- let clean = word
28
- .chars()
29
- .filter(|c| c.is_alphabetic())
30
- .collect::<String>()
31
- .to_lowercase();
32
-
33
- !clean.is_empty() && stopwords.contains(&clean)
34
- })
35
- .count()
36
- }
37
-
38
- fn extract_content_words(text: &str, lang: &str) -> Vec<String> {
39
- let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
40
- let words: Vec<&str> = text.split_whitespace().collect();
41
-
42
- words
43
- .iter()
44
- .filter_map(|word| {
45
- let clean = word
46
- .chars()
47
- .filter(|c| c.is_alphabetic())
48
- .collect::<String>()
49
- .to_lowercase();
50
-
51
- if !clean.is_empty() && !stopwords.contains(&clean) && clean.len() > 1 {
52
- Some(clean)
53
- } else {
54
- None
55
- }
56
- })
57
- .collect()
58
- }
59
-
60
- #[test]
61
- fn test_stopwords_removed_during_moderate_token_reduction() {
62
- let config = TokenReductionConfig {
63
- level: ReductionLevel::Moderate,
64
- language_hint: Some("en".to_string()),
65
- use_simd: false,
66
- ..Default::default()
67
- };
68
-
69
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
70
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
71
-
72
- assert!(!result.contains(" the "), "Should remove 'the'. Result: {}", result);
73
- assert!(!result.contains(" is "), "Should remove 'is'. Result: {}", result);
74
- assert!(!result.contains(" and "), "Should remove 'and'. Result: {}", result);
75
-
76
- assert!(result.contains("quick"), "Should preserve 'quick'. Result: {}", result);
77
- assert!(result.contains("brown"), "Should preserve 'brown'. Result: {}", result);
78
- assert!(result.contains("fox"), "Should preserve 'fox'. Result: {}", result);
79
- assert!(
80
- result.contains("jumping"),
81
- "Should preserve 'jumping'. Result: {}",
82
- result
83
- );
84
- assert!(result.contains("lazy"), "Should preserve 'lazy'. Result: {}", result);
85
-
86
- let original_stopwords = count_stopwords(input, "en");
87
- let result_stopwords = count_stopwords(&result, "en");
88
-
89
- assert!(
90
- result_stopwords < original_stopwords,
91
- "Result should have fewer stopwords than original. Original: {}, Result: {}",
92
- original_stopwords,
93
- result_stopwords
94
- );
95
- }
96
-
97
- #[test]
98
- fn test_stopwords_across_reduction_levels() {
99
- let text = "The machine learning model is trained on the large dataset and achieves good performance";
100
-
101
- let light_config = TokenReductionConfig {
102
- level: ReductionLevel::Light,
103
- use_simd: false,
104
- ..Default::default()
105
- };
106
- let light_result = reduce_tokens(text, &light_config, Some("en")).unwrap();
107
-
108
- let light_stopwords = count_stopwords(&light_result, "en");
109
- assert!(light_stopwords > 0, "Light reduction should preserve some stopwords");
110
-
111
- let moderate_config = TokenReductionConfig {
112
- level: ReductionLevel::Moderate,
113
- use_simd: false,
114
- ..Default::default()
115
- };
116
- let moderate_result = reduce_tokens(text, &moderate_config, Some("en")).unwrap();
117
-
118
- let moderate_stopwords = count_stopwords(&moderate_result, "en");
119
- assert!(
120
- moderate_stopwords < light_stopwords,
121
- "Moderate reduction should remove more stopwords than light. Light: {}, Moderate: {}",
122
- light_stopwords,
123
- moderate_stopwords
124
- );
125
-
126
- let aggressive_config = TokenReductionConfig {
127
- level: ReductionLevel::Aggressive,
128
- use_simd: false,
129
- ..Default::default()
130
- };
131
- let aggressive_result = reduce_tokens(text, &aggressive_config, Some("en")).unwrap();
132
-
133
- assert!(
134
- aggressive_result.len() <= moderate_result.len(),
135
- "Aggressive reduction should be more aggressive than moderate"
136
- );
137
- }
138
-
139
- #[test]
140
- fn test_stopwords_preserve_semantic_meaning() {
141
- let config = TokenReductionConfig {
142
- level: ReductionLevel::Moderate,
143
- use_simd: false,
144
- ..Default::default()
145
- };
146
-
147
- let input =
148
- "The artificial intelligence system is processing the natural language text for extracting meaningful insights";
149
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
150
-
151
- let content_words = extract_content_words(&result, "en");
152
-
153
- assert!(
154
- content_words.contains(&"artificial".to_string()) || result.contains("artificial"),
155
- "Should preserve 'artificial'. Result: {}",
156
- result
157
- );
158
- assert!(
159
- content_words.contains(&"intelligence".to_string()) || result.contains("intelligence"),
160
- "Should preserve 'intelligence'. Result: {}",
161
- result
162
- );
163
- assert!(
164
- content_words.contains(&"processing".to_string()) || result.contains("processing"),
165
- "Should preserve 'processing'. Result: {}",
166
- result
167
- );
168
- assert!(
169
- content_words.contains(&"natural".to_string()) || result.contains("natural"),
170
- "Should preserve 'natural'. Result: {}",
171
- result
172
- );
173
- assert!(
174
- content_words.contains(&"language".to_string()) || result.contains("language"),
175
- "Should preserve 'language'. Result: {}",
176
- result
177
- );
178
- }
179
-
180
- #[test]
181
- fn test_stopwords_with_multiple_languages() {
182
- let en_config = TokenReductionConfig {
183
- level: ReductionLevel::Moderate,
184
- use_simd: false,
185
- ..Default::default()
186
- };
187
- let en_input = "The computer science program is very comprehensive and includes many courses";
188
- let en_result = reduce_tokens(en_input, &en_config, Some("en")).unwrap();
189
-
190
- let en_original_stopwords = count_stopwords(en_input, "en");
191
- let en_result_stopwords = count_stopwords(&en_result, "en");
192
- assert!(
193
- en_result_stopwords < en_original_stopwords,
194
- "English stopwords should be removed"
195
- );
196
-
197
- let es_config = TokenReductionConfig {
198
- level: ReductionLevel::Moderate,
199
- use_simd: false,
200
- ..Default::default()
201
- };
202
- let es_input = "El programa de ciencias de la computación es muy completo y tiene muchos cursos";
203
- let es_result = reduce_tokens(es_input, &es_config, Some("es")).unwrap();
204
-
205
- let es_original_stopwords = count_stopwords(es_input, "es");
206
- let es_result_stopwords = count_stopwords(&es_result, "es");
207
- assert!(
208
- es_result_stopwords < es_original_stopwords,
209
- "Spanish stopwords should be removed"
210
- );
211
-
212
- assert!(
213
- es_result.contains("programa") || es_result.contains("ciencias") || es_result.contains("computación"),
214
- "Should preserve Spanish content words. Result: {}",
215
- es_result
216
- );
217
-
218
- let de_config = TokenReductionConfig {
219
- level: ReductionLevel::Moderate,
220
- use_simd: false,
221
- ..Default::default()
222
- };
223
- let de_input = "Die künstliche Intelligenz ist ein wichtiges Forschungsgebiet der Informatik";
224
- let de_result = reduce_tokens(de_input, &de_config, Some("de")).unwrap();
225
-
226
- let de_original_stopwords = count_stopwords(de_input, "de");
227
- let de_result_stopwords = count_stopwords(&de_result, "de");
228
- assert!(
229
- de_result_stopwords < de_original_stopwords,
230
- "German stopwords should be removed"
231
- );
232
- }
233
-
234
- #[test]
235
- fn test_language_fallback_to_english_stopwords() {
236
- let config = TokenReductionConfig {
237
- level: ReductionLevel::Moderate,
238
- use_simd: false,
239
- ..Default::default()
240
- };
241
-
242
- let input = "The system is processing the data with the algorithm";
243
- let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
244
-
245
- let original_stopwords = count_stopwords(input, "en");
246
- let result_stopwords = count_stopwords(&result, "en");
247
-
248
- assert!(
249
- result_stopwords < original_stopwords,
250
- "Should fallback to English stopwords for unsupported language"
251
- );
252
- }
253
-
254
- #[test]
255
- fn test_custom_stopwords_integration() {
256
- let mut custom_stopwords = HashMap::new();
257
- custom_stopwords.insert(
258
- "en".to_string(),
259
- vec!["algorithm".to_string(), "system".to_string(), "data".to_string()],
260
- );
261
-
262
- let config = TokenReductionConfig {
263
- level: ReductionLevel::Moderate,
264
- use_simd: false,
265
- custom_stopwords: Some(custom_stopwords),
266
- ..Default::default()
267
- };
268
-
269
- let input = "The algorithm processes the data in the system efficiently";
270
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
271
-
272
- assert!(
273
- !result.contains("algorithm"),
274
- "Should remove custom stopword 'algorithm'. Result: {}",
275
- result
276
- );
277
- assert!(
278
- !result.contains("system"),
279
- "Should remove custom stopword 'system'. Result: {}",
280
- result
281
- );
282
- assert!(
283
- !result.contains("data"),
284
- "Should remove custom stopword 'data'. Result: {}",
285
- result
286
- );
287
-
288
- assert!(
289
- result.contains("processes") || result.contains("efficiently"),
290
- "Should preserve non-stopword content. Result: {}",
291
- result
292
- );
293
- }
294
-
295
- #[test]
296
- fn test_stopwords_with_chinese_text() {
297
- let config = TokenReductionConfig {
298
- level: ReductionLevel::Moderate,
299
- use_simd: false,
300
- ..Default::default()
301
- };
302
-
303
- let input = "这个人工智能系统可以处理自然语言";
304
- let result = reduce_tokens(input, &config, Some("zh")).unwrap();
305
-
306
- assert!(
307
- !result.is_empty(),
308
- "Chinese text should be processed. Result: {}",
309
- result
310
- );
311
-
312
- assert!(
313
- result.contains("人工") || result.contains("智能") || result.contains("语言"),
314
- "Should preserve important Chinese terms. Result: {}",
315
- result
316
- );
317
- }
318
-
319
- #[test]
320
- fn test_stopwords_with_mixed_cjk_english() {
321
- let config = TokenReductionConfig {
322
- level: ReductionLevel::Moderate,
323
- use_simd: false,
324
- ..Default::default()
325
- };
326
-
327
- let input = "The machine learning model 机器学习模型 is processing data efficiently";
328
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
329
-
330
- assert!(
331
- !result.contains(" the ") && !result.contains("The "),
332
- "Should remove English 'the'. Result: {}",
333
- result
334
- );
335
-
336
- assert!(
337
- result.contains("machine") || result.contains("learning"),
338
- "Should preserve English content. Result: {}",
339
- result
340
- );
341
-
342
- assert!(
343
- result.contains("机器") || result.contains("学习") || result.contains("模型"),
344
- "Should preserve Chinese content. Result: {}",
345
- result
346
- );
347
- }
348
-
349
- #[test]
350
- fn test_stopwords_with_japanese_text() {
351
- let config = TokenReductionConfig {
352
- level: ReductionLevel::Moderate,
353
- use_simd: false,
354
- ..Default::default()
355
- };
356
-
357
- let input = "人工知能技術の研究開発";
358
- let result = reduce_tokens(input, &config, Some("ja")).unwrap();
359
-
360
- assert!(
361
- !result.is_empty(),
362
- "Japanese text should be processed. Result: {}",
363
- result
364
- );
365
- }
366
-
367
- #[test]
368
- fn test_stopwords_with_korean_text() {
369
- let config = TokenReductionConfig {
370
- level: ReductionLevel::Moderate,
371
- use_simd: false,
372
- ..Default::default()
373
- };
374
-
375
- let input = "인공 지능 기술 개발";
376
- let result = reduce_tokens(input, &config, Some("ko")).unwrap();
377
-
378
- assert!(
379
- !result.is_empty(),
380
- "Korean text should be processed. Result: {}",
381
- result
382
- );
383
- }
384
-
385
- #[cfg(feature = "keywords-rake")]
386
- #[test]
387
- fn test_stopwords_excluded_from_rake_keywords() {
388
- let text = "The machine learning model is trained on a large dataset. \
389
- The model uses neural networks and deep learning algorithms. \
390
- The training process requires significant computational resources.";
391
-
392
- let config = KeywordConfig::rake().with_language("en").with_max_keywords(10);
393
-
394
- let keywords = extract_keywords(text, &config).unwrap();
395
-
396
- assert!(!keywords.is_empty(), "Should extract keywords");
397
-
398
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
399
-
400
- for keyword in &keywords {
401
- let words: Vec<&str> = keyword.text.split_whitespace().collect();
402
-
403
- let all_stopwords = words.iter().all(|word| {
404
- let clean = word
405
- .chars()
406
- .filter(|c| c.is_alphabetic())
407
- .collect::<String>()
408
- .to_lowercase();
409
- en_stopwords.contains(&clean)
410
- });
411
-
412
- assert!(
413
- !all_stopwords,
414
- "Keyword '{}' should not be composed entirely of stopwords",
415
- keyword.text
416
- );
417
- }
418
-
419
- let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
420
-
421
- assert!(
422
- keyword_texts.iter().any(|k| k.contains("machine learning")
423
- || k.contains("neural networks")
424
- || k.contains("deep learning")
425
- || k.contains("dataset")
426
- || k.contains("model")
427
- || k.contains("training")),
428
- "Should extract meaningful technical keywords. Got: {:?}",
429
- keyword_texts
430
- );
431
- }
432
-
433
- #[cfg(feature = "keywords-yake")]
434
- #[test]
435
- fn test_stopwords_excluded_from_yake_keywords() {
436
- let text = "Natural language processing enables computers to understand human language. \
437
- Deep learning models achieve state-of-the-art performance in text analysis. \
438
- These systems can extract meaningful information from large text corpora.";
439
-
440
- let config = KeywordConfig::yake().with_language("en").with_max_keywords(10);
441
-
442
- let keywords = extract_keywords(text, &config).unwrap();
443
-
444
- assert!(!keywords.is_empty(), "Should extract keywords");
445
-
446
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
447
-
448
- for keyword in &keywords {
449
- let has_content_word = keyword.text.split_whitespace().any(|word| {
450
- let clean = word
451
- .chars()
452
- .filter(|c| c.is_alphabetic())
453
- .collect::<String>()
454
- .to_lowercase();
455
- !clean.is_empty() && !en_stopwords.contains(&clean)
456
- });
457
-
458
- assert!(
459
- has_content_word,
460
- "Keyword '{}' should contain at least one content word (non-stopword)",
461
- keyword.text
462
- );
463
- }
464
- }
465
-
466
- #[cfg(feature = "keywords-rake")]
467
- #[test]
468
- fn test_keywords_respect_language_specific_stopwords() {
469
- let spanish_text = "El aprendizaje automático es una rama de la inteligencia artificial. \
470
- Los modelos de aprendizaje profundo logran un rendimiento excepcional. \
471
- Estos sistemas pueden procesar grandes cantidades de datos.";
472
-
473
- let config = KeywordConfig::rake().with_language("es").with_max_keywords(8);
474
-
475
- let keywords = extract_keywords(spanish_text, &config).unwrap();
476
-
477
- assert!(!keywords.is_empty(), "Should extract Spanish keywords");
478
-
479
- let es_stopwords = get_stopwords("es").expect("Spanish stopwords must exist");
480
-
481
- for keyword in &keywords {
482
- let words: Vec<&str> = keyword.text.split_whitespace().collect();
483
- let all_stopwords = words.iter().all(|word| {
484
- let clean = word
485
- .chars()
486
- .filter(|c| c.is_alphabetic())
487
- .collect::<String>()
488
- .to_lowercase();
489
- es_stopwords.contains(&clean)
490
- });
491
-
492
- assert!(
493
- !all_stopwords,
494
- "Spanish keyword '{}' should not be all stopwords",
495
- keyword.text
496
- );
497
- }
498
-
499
- let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
500
- assert!(
501
- keyword_texts.iter().any(|k| k.contains("aprendizaje")
502
- || k.contains("inteligencia")
503
- || k.contains("modelos")
504
- || k.contains("datos")),
505
- "Should extract meaningful Spanish keywords. Got: {:?}",
506
- keyword_texts
507
- );
508
- }
509
-
510
- #[test]
511
- fn test_all_stopwords_text_reduction() {
512
- let config = TokenReductionConfig {
513
- level: ReductionLevel::Moderate,
514
- use_simd: false,
515
- ..Default::default()
516
- };
517
-
518
- let input = "the is a an and or but of to in for on at by";
519
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
520
-
521
- assert!(
522
- result.len() < input.len(),
523
- "Text of all stopwords should be significantly reduced"
524
- );
525
- }
526
-
527
- #[test]
528
- fn test_no_stopwords_text_reduction() {
529
- let config = TokenReductionConfig {
530
- level: ReductionLevel::Moderate,
531
- use_simd: false,
532
- ..Default::default()
533
- };
534
-
535
- let input = "PyTorch TensorFlow CUDA GPU optimization benchmark performance metrics";
536
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
537
-
538
- let input_words: Vec<&str> = input.split_whitespace().collect();
539
- let result_lower = result.to_lowercase();
540
-
541
- for word in input_words {
542
- let word_lower = word.to_lowercase();
543
- assert!(
544
- result_lower.contains(&word_lower),
545
- "Technical term '{}' should be preserved. Result: {}",
546
- word,
547
- result
548
- );
549
- }
550
- }
551
-
552
- #[test]
553
- fn test_mixed_case_stopwords_removal() {
554
- let config = TokenReductionConfig {
555
- level: ReductionLevel::Moderate,
556
- use_simd: false,
557
- ..Default::default()
558
- };
559
-
560
- let input = "The SYSTEM Is Processing The DATA With The ALGORITHM";
561
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
562
-
563
- let result_words: Vec<&str> = result.split_whitespace().collect();
564
- assert!(
565
- !result_words.contains(&"the"),
566
- "Should remove lowercase 'the'. Result: {}",
567
- result
568
- );
569
- assert!(
570
- !result_words.contains(&"is"),
571
- "Should remove lowercase 'is'. Result: {}",
572
- result
573
- );
574
-
575
- assert!(
576
- result.contains("SYSTEM"),
577
- "Should preserve 'SYSTEM'. Result: {}",
578
- result
579
- );
580
- assert!(result.contains("DATA"), "Should preserve 'DATA'. Result: {}", result);
581
- assert!(
582
- result.contains("ALGORITHM"),
583
- "Should preserve 'ALGORITHM'. Result: {}",
584
- result
585
- );
586
- }
587
-
588
- #[test]
589
- fn test_reduce_tokens_function_with_stopwords() {
590
- let config = TokenReductionConfig {
591
- level: ReductionLevel::Moderate,
592
- use_simd: false,
593
- ..Default::default()
594
- };
595
-
596
- let text = "The artificial intelligence system processes the natural language efficiently";
597
- let result = reduce_tokens(text, &config, Some("en")).unwrap();
598
-
599
- let original_stopwords = count_stopwords(text, "en");
600
- let result_stopwords = count_stopwords(&result, "en");
601
-
602
- assert!(
603
- result_stopwords < original_stopwords,
604
- "reduce_tokens should remove stopwords. Original: {}, Result: {}",
605
- original_stopwords,
606
- result_stopwords
607
- );
608
-
609
- assert!(
610
- result.contains("artificial") || result.contains("intelligence"),
611
- "Should preserve content words. Result: {}",
612
- result
613
- );
614
- }
615
-
616
- #[test]
617
- fn test_stopwords_with_punctuation() {
618
- let config = TokenReductionConfig {
619
- level: ReductionLevel::Moderate,
620
- use_simd: false,
621
- ..Default::default()
622
- };
623
-
624
- let input = "The system, which is processing the data, uses the algorithm.";
625
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
626
-
627
- assert!(
628
- !result.contains(" the ") || result.split_whitespace().filter(|w| w.contains("the")).count() < 3,
629
- "Should remove most instances of 'the'. Result: {}",
630
- result
631
- );
632
-
633
- assert!(
634
- result.contains("system") || result.contains("processing") || result.contains("algorithm"),
635
- "Should preserve content words. Result: {}",
636
- result
637
- );
638
- }
639
-
640
- #[test]
641
- fn test_stopwords_with_numbers() {
642
- let config = TokenReductionConfig {
643
- level: ReductionLevel::Moderate,
644
- use_simd: false,
645
- ..Default::default()
646
- };
647
-
648
- let input = "The model has 100 layers and processes the data in 10 seconds";
649
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
650
-
651
- assert!(
652
- result.contains("100"),
653
- "Should preserve number '100'. Result: {}",
654
- result
655
- );
656
- assert!(result.contains("10"), "Should preserve number '10'. Result: {}", result);
657
-
658
- assert!(
659
- result.contains("model") || result.contains("layers") || result.contains("processes"),
660
- "Should preserve content words. Result: {}",
661
- result
662
- );
663
- }
664
-
665
- #[test]
666
- fn test_stopwords_removal_consistency_across_calls() {
667
- let config = TokenReductionConfig {
668
- level: ReductionLevel::Moderate,
669
- use_simd: false,
670
- ..Default::default()
671
- };
672
-
673
- let input = "The machine learning model is trained on the dataset";
674
-
675
- let result1 = reduce_tokens(input, &config, Some("en")).unwrap();
676
- let result2 = reduce_tokens(input, &config, Some("en")).unwrap();
677
- let result3 = reduce_tokens(input, &config, Some("en")).unwrap();
678
-
679
- assert_eq!(result1, result2, "Results should be consistent across calls");
680
- assert_eq!(result2, result3, "Results should be consistent across calls");
681
- }
682
-
683
- #[test]
684
- fn test_stopwords_with_long_text() {
685
- let config = TokenReductionConfig {
686
- level: ReductionLevel::Moderate,
687
- use_simd: false,
688
- enable_parallel: false,
689
- ..Default::default()
690
- };
691
-
692
- let paragraph = "The machine learning model is trained on the large dataset. \
693
- The training process uses the neural network architecture. \
694
- The system processes the data efficiently and achieves the best performance. ";
695
- let input = paragraph.repeat(10);
696
-
697
- let result = reduce_tokens(&input, &config, Some("en")).unwrap();
698
-
699
- assert!(
700
- result.len() < input.len(),
701
- "Long stopword-heavy text should be reduced. Input: {} chars, Result: {} chars",
702
- input.len(),
703
- result.len()
704
- );
705
-
706
- let original_stopwords = count_stopwords(&input, "en");
707
- let result_stopwords = count_stopwords(&result, "en");
708
-
709
- assert!(
710
- result_stopwords < original_stopwords,
711
- "Should remove stopwords from long text. Original: {}, Result: {}",
712
- original_stopwords,
713
- result_stopwords
714
- );
715
- }
716
-
717
- #[test]
718
- fn test_get_stopwords_with_fallback_in_reduction() {
719
- let primary_stopwords = get_stopwords_with_fallback("xyz", "en");
720
- assert!(primary_stopwords.is_some(), "Should fallback to English");
721
-
722
- let en_stopwords = get_stopwords("en").unwrap();
723
- assert_eq!(
724
- primary_stopwords.unwrap().len(),
725
- en_stopwords.len(),
726
- "Fallback should return English stopwords"
727
- );
728
-
729
- let config = TokenReductionConfig {
730
- level: ReductionLevel::Moderate,
731
- use_simd: false,
732
- ..Default::default()
733
- };
734
-
735
- let input = "The system is processing the data";
736
- let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
737
-
738
- assert!(
739
- !result.contains(" the ") && !result.contains(" is "),
740
- "Should use fallback stopwords. Result: {}",
741
- result
742
- );
743
- }
744
-
745
- #[test]
746
- fn test_stopwords_registry_completeness() {
747
- assert_eq!(STOPWORDS.len(), 64, "Should have exactly 64 language stopword sets");
748
-
749
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
750
- assert!(en_stopwords.len() >= 70, "English should have at least 70 stopwords");
751
-
752
- assert!(en_stopwords.contains("the"), "Should contain 'the'");
753
- assert!(en_stopwords.contains("is"), "Should contain 'is'");
754
- assert!(en_stopwords.contains("and"), "Should contain 'and'");
755
- assert!(en_stopwords.contains("a"), "Should contain 'a'");
756
- assert!(en_stopwords.contains("an"), "Should contain 'an'");
757
- assert!(en_stopwords.contains("of"), "Should contain 'of'");
758
- assert!(en_stopwords.contains("to"), "Should contain 'to'");
759
- assert!(en_stopwords.contains("in"), "Should contain 'in'");
760
- assert!(en_stopwords.contains("for"), "Should contain 'for'");
761
- }
762
-
763
- #[test]
764
- fn test_token_reduction_handles_nan_threshold() {
765
- let mut config = TokenReductionConfig {
766
- level: ReductionLevel::Maximum,
767
- semantic_threshold: f32::NAN,
768
- enable_semantic_clustering: true,
769
- target_reduction: Some(0.5),
770
- ..Default::default()
771
- };
772
-
773
- config.language_hint = Some("en".to_string());
774
- let input = "Critical system update highlights performance improvements across distributed modules.";
775
-
776
- let result = reduce_tokens(input, &config, Some("en")).unwrap_or_else(|_| String::new());
777
- assert!(
778
- result.chars().all(|c| !c.is_control()),
779
- "Result should not contain unexpected control characters"
780
- );
781
- }
782
-
783
- #[test]
784
- fn test_token_reduction_handles_multibyte_utf8() {
785
- let config = TokenReductionConfig {
786
- level: ReductionLevel::Moderate,
787
- language_hint: Some("ja".to_string()),
788
- ..Default::default()
789
- };
790
-
791
- let input = "品質管理は重要です。🚀 高速抽出と漢字処理が求められています。";
792
- let result = reduce_tokens(input, &config, Some("ja")).unwrap();
793
-
794
- assert!(
795
- result.contains("品質管理") || result.contains("漢字処理"),
796
- "Important multibyte terms should survive reduction: {}",
797
- result
798
- );
799
- }
800
-
801
- #[test]
802
- fn test_token_reduction_concurrent_access() {
803
- use std::sync::Arc;
804
-
805
- let config = Arc::new(TokenReductionConfig {
806
- level: ReductionLevel::Aggressive,
807
- enable_parallel: true,
808
- ..Default::default()
809
- });
810
-
811
- let input = "Concurrent reduction ensures thread safety without deadlocks or panics.";
812
-
813
- std::thread::scope(|scope| {
814
- for _ in 0..8 {
815
- let cfg = Arc::clone(&config);
816
- scope.spawn(move || {
817
- let reduced = reduce_tokens(input, &cfg, Some("en")).unwrap();
818
- assert!(!reduced.is_empty());
819
- });
820
- }
821
- });
822
- }
823
- #[test]
824
- fn demo_stopwords_effectiveness() {
825
- use kreuzberg::stopwords::get_stopwords;
826
- use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
827
-
828
- let en_text = "The machine learning model is trained on the large dataset and achieves good performance";
829
- let en_config = TokenReductionConfig {
830
- level: ReductionLevel::Moderate,
831
- use_simd: false,
832
- ..Default::default()
833
- };
834
- let en_result = reduce_tokens(en_text, &en_config, Some("en")).unwrap();
835
-
836
- println!("\n=== English Example ===");
837
- println!("BEFORE: {} chars", en_text.len());
838
- println!("{}", en_text);
839
- println!(
840
- "\nAFTER: {} chars ({}% reduction)",
841
- en_result.len(),
842
- 100 - (en_result.len() * 100 / en_text.len())
843
- );
844
- println!("{}", en_result);
845
-
846
- let zh_text = "这个人工智能系统可以处理自然语言";
847
- let zh_config = TokenReductionConfig {
848
- level: ReductionLevel::Moderate,
849
- use_simd: false,
850
- ..Default::default()
851
- };
852
- let zh_result = reduce_tokens(zh_text, &zh_config, Some("zh")).unwrap();
853
-
854
- println!("\n=== Chinese Example ===");
855
- println!("BEFORE: {}", zh_text);
856
- println!("AFTER: {}", zh_result);
857
-
858
- let text = "The artificial intelligence system processes the natural language efficiently";
859
-
860
- println!("\n=== Reduction Level Comparison ===");
861
- println!("ORIGINAL: {}", text);
862
-
863
- for level in [
864
- ReductionLevel::Light,
865
- ReductionLevel::Moderate,
866
- ReductionLevel::Aggressive,
867
- ] {
868
- let config = TokenReductionConfig {
869
- level,
870
- use_simd: false,
871
- ..Default::default()
872
- };
873
- let result = reduce_tokens(text, &config, Some("en")).unwrap();
874
- println!(
875
- "{:?}: {} chars -> {} chars ({}% reduction)",
876
- level,
877
- text.len(),
878
- result.len(),
879
- 100 - (result.len() * 100 / text.len())
880
- );
881
- println!(" {}", result);
882
- }
883
-
884
- let stopwords = get_stopwords("en").unwrap();
885
- println!("\n=== Stopwords Stats ===");
886
- println!("English stopwords: {}", stopwords.len());
887
- println!("Sample stopwords: {:?}", stopwords.iter().take(10).collect::<Vec<_>>());
888
- }
1
+ //! Integration tests for stopwords with token reduction and keywords extraction.
2
+ #![cfg(all(feature = "stopwords", feature = "quality"))]
3
+ //!
4
+ //! These tests verify that stopwords are properly integrated across different features:
5
+ //! - Token reduction at all ReductionLevels
6
+ //! - Keywords extraction (YAKE and RAKE algorithms)
7
+ //! - CJK text processing
8
+ //! - Multi-language documents
9
+ //! - Language fallback mechanisms
10
+ //! - Custom stopwords
11
+
12
+ use kreuzberg::stopwords::{STOPWORDS, get_stopwords, get_stopwords_with_fallback};
13
+ use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
14
+
15
+ #[cfg(any(feature = "keywords-yake", feature = "keywords-rake"))]
16
+ use kreuzberg::keywords::{KeywordConfig, extract_keywords};
17
+
18
+ use std::collections::HashMap;
19
+
20
+ fn count_stopwords(text: &str, lang: &str) -> usize {
21
+ let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
22
+ let words: Vec<&str> = text.split_whitespace().collect();
23
+
24
+ words
25
+ .iter()
26
+ .filter(|word| {
27
+ let clean = word
28
+ .chars()
29
+ .filter(|c| c.is_alphabetic())
30
+ .collect::<String>()
31
+ .to_lowercase();
32
+
33
+ !clean.is_empty() && stopwords.contains(&clean)
34
+ })
35
+ .count()
36
+ }
37
+
38
+ fn extract_content_words(text: &str, lang: &str) -> Vec<String> {
39
+ let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
40
+ let words: Vec<&str> = text.split_whitespace().collect();
41
+
42
+ words
43
+ .iter()
44
+ .filter_map(|word| {
45
+ let clean = word
46
+ .chars()
47
+ .filter(|c| c.is_alphabetic())
48
+ .collect::<String>()
49
+ .to_lowercase();
50
+
51
+ if !clean.is_empty() && !stopwords.contains(&clean) && clean.len() > 1 {
52
+ Some(clean)
53
+ } else {
54
+ None
55
+ }
56
+ })
57
+ .collect()
58
+ }
59
+
60
+ #[test]
61
+ fn test_stopwords_removed_during_moderate_token_reduction() {
62
+ let config = TokenReductionConfig {
63
+ level: ReductionLevel::Moderate,
64
+ language_hint: Some("en".to_string()),
65
+ use_simd: false,
66
+ ..Default::default()
67
+ };
68
+
69
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
70
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
71
+
72
+ assert!(!result.contains(" the "), "Should remove 'the'. Result: {}", result);
73
+ assert!(!result.contains(" is "), "Should remove 'is'. Result: {}", result);
74
+ assert!(!result.contains(" and "), "Should remove 'and'. Result: {}", result);
75
+
76
+ assert!(result.contains("quick"), "Should preserve 'quick'. Result: {}", result);
77
+ assert!(result.contains("brown"), "Should preserve 'brown'. Result: {}", result);
78
+ assert!(result.contains("fox"), "Should preserve 'fox'. Result: {}", result);
79
+ assert!(
80
+ result.contains("jumping"),
81
+ "Should preserve 'jumping'. Result: {}",
82
+ result
83
+ );
84
+ assert!(result.contains("lazy"), "Should preserve 'lazy'. Result: {}", result);
85
+
86
+ let original_stopwords = count_stopwords(input, "en");
87
+ let result_stopwords = count_stopwords(&result, "en");
88
+
89
+ assert!(
90
+ result_stopwords < original_stopwords,
91
+ "Result should have fewer stopwords than original. Original: {}, Result: {}",
92
+ original_stopwords,
93
+ result_stopwords
94
+ );
95
+ }
96
+
97
+ #[test]
98
+ fn test_stopwords_across_reduction_levels() {
99
+ let text = "The machine learning model is trained on the large dataset and achieves good performance";
100
+
101
+ let light_config = TokenReductionConfig {
102
+ level: ReductionLevel::Light,
103
+ use_simd: false,
104
+ ..Default::default()
105
+ };
106
+ let light_result = reduce_tokens(text, &light_config, Some("en")).unwrap();
107
+
108
+ let light_stopwords = count_stopwords(&light_result, "en");
109
+ assert!(light_stopwords > 0, "Light reduction should preserve some stopwords");
110
+
111
+ let moderate_config = TokenReductionConfig {
112
+ level: ReductionLevel::Moderate,
113
+ use_simd: false,
114
+ ..Default::default()
115
+ };
116
+ let moderate_result = reduce_tokens(text, &moderate_config, Some("en")).unwrap();
117
+
118
+ let moderate_stopwords = count_stopwords(&moderate_result, "en");
119
+ assert!(
120
+ moderate_stopwords < light_stopwords,
121
+ "Moderate reduction should remove more stopwords than light. Light: {}, Moderate: {}",
122
+ light_stopwords,
123
+ moderate_stopwords
124
+ );
125
+
126
+ let aggressive_config = TokenReductionConfig {
127
+ level: ReductionLevel::Aggressive,
128
+ use_simd: false,
129
+ ..Default::default()
130
+ };
131
+ let aggressive_result = reduce_tokens(text, &aggressive_config, Some("en")).unwrap();
132
+
133
+ assert!(
134
+ aggressive_result.len() <= moderate_result.len(),
135
+ "Aggressive reduction should be more aggressive than moderate"
136
+ );
137
+ }
138
+
139
+ #[test]
140
+ fn test_stopwords_preserve_semantic_meaning() {
141
+ let config = TokenReductionConfig {
142
+ level: ReductionLevel::Moderate,
143
+ use_simd: false,
144
+ ..Default::default()
145
+ };
146
+
147
+ let input =
148
+ "The artificial intelligence system is processing the natural language text for extracting meaningful insights";
149
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
150
+
151
+ let content_words = extract_content_words(&result, "en");
152
+
153
+ assert!(
154
+ content_words.contains(&"artificial".to_string()) || result.contains("artificial"),
155
+ "Should preserve 'artificial'. Result: {}",
156
+ result
157
+ );
158
+ assert!(
159
+ content_words.contains(&"intelligence".to_string()) || result.contains("intelligence"),
160
+ "Should preserve 'intelligence'. Result: {}",
161
+ result
162
+ );
163
+ assert!(
164
+ content_words.contains(&"processing".to_string()) || result.contains("processing"),
165
+ "Should preserve 'processing'. Result: {}",
166
+ result
167
+ );
168
+ assert!(
169
+ content_words.contains(&"natural".to_string()) || result.contains("natural"),
170
+ "Should preserve 'natural'. Result: {}",
171
+ result
172
+ );
173
+ assert!(
174
+ content_words.contains(&"language".to_string()) || result.contains("language"),
175
+ "Should preserve 'language'. Result: {}",
176
+ result
177
+ );
178
+ }
179
+
180
+ #[test]
181
+ fn test_stopwords_with_multiple_languages() {
182
+ let en_config = TokenReductionConfig {
183
+ level: ReductionLevel::Moderate,
184
+ use_simd: false,
185
+ ..Default::default()
186
+ };
187
+ let en_input = "The computer science program is very comprehensive and includes many courses";
188
+ let en_result = reduce_tokens(en_input, &en_config, Some("en")).unwrap();
189
+
190
+ let en_original_stopwords = count_stopwords(en_input, "en");
191
+ let en_result_stopwords = count_stopwords(&en_result, "en");
192
+ assert!(
193
+ en_result_stopwords < en_original_stopwords,
194
+ "English stopwords should be removed"
195
+ );
196
+
197
+ let es_config = TokenReductionConfig {
198
+ level: ReductionLevel::Moderate,
199
+ use_simd: false,
200
+ ..Default::default()
201
+ };
202
+ let es_input = "El programa de ciencias de la computación es muy completo y tiene muchos cursos";
203
+ let es_result = reduce_tokens(es_input, &es_config, Some("es")).unwrap();
204
+
205
+ let es_original_stopwords = count_stopwords(es_input, "es");
206
+ let es_result_stopwords = count_stopwords(&es_result, "es");
207
+ assert!(
208
+ es_result_stopwords < es_original_stopwords,
209
+ "Spanish stopwords should be removed"
210
+ );
211
+
212
+ assert!(
213
+ es_result.contains("programa") || es_result.contains("ciencias") || es_result.contains("computación"),
214
+ "Should preserve Spanish content words. Result: {}",
215
+ es_result
216
+ );
217
+
218
+ let de_config = TokenReductionConfig {
219
+ level: ReductionLevel::Moderate,
220
+ use_simd: false,
221
+ ..Default::default()
222
+ };
223
+ let de_input = "Die künstliche Intelligenz ist ein wichtiges Forschungsgebiet der Informatik";
224
+ let de_result = reduce_tokens(de_input, &de_config, Some("de")).unwrap();
225
+
226
+ let de_original_stopwords = count_stopwords(de_input, "de");
227
+ let de_result_stopwords = count_stopwords(&de_result, "de");
228
+ assert!(
229
+ de_result_stopwords < de_original_stopwords,
230
+ "German stopwords should be removed"
231
+ );
232
+ }
233
+
234
+ #[test]
235
+ fn test_language_fallback_to_english_stopwords() {
236
+ let config = TokenReductionConfig {
237
+ level: ReductionLevel::Moderate,
238
+ use_simd: false,
239
+ ..Default::default()
240
+ };
241
+
242
+ let input = "The system is processing the data with the algorithm";
243
+ let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
244
+
245
+ let original_stopwords = count_stopwords(input, "en");
246
+ let result_stopwords = count_stopwords(&result, "en");
247
+
248
+ assert!(
249
+ result_stopwords < original_stopwords,
250
+ "Should fallback to English stopwords for unsupported language"
251
+ );
252
+ }
253
+
254
+ #[test]
255
+ fn test_custom_stopwords_integration() {
256
+ let mut custom_stopwords = HashMap::new();
257
+ custom_stopwords.insert(
258
+ "en".to_string(),
259
+ vec!["algorithm".to_string(), "system".to_string(), "data".to_string()],
260
+ );
261
+
262
+ let config = TokenReductionConfig {
263
+ level: ReductionLevel::Moderate,
264
+ use_simd: false,
265
+ custom_stopwords: Some(custom_stopwords),
266
+ ..Default::default()
267
+ };
268
+
269
+ let input = "The algorithm processes the data in the system efficiently";
270
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
271
+
272
+ assert!(
273
+ !result.contains("algorithm"),
274
+ "Should remove custom stopword 'algorithm'. Result: {}",
275
+ result
276
+ );
277
+ assert!(
278
+ !result.contains("system"),
279
+ "Should remove custom stopword 'system'. Result: {}",
280
+ result
281
+ );
282
+ assert!(
283
+ !result.contains("data"),
284
+ "Should remove custom stopword 'data'. Result: {}",
285
+ result
286
+ );
287
+
288
+ assert!(
289
+ result.contains("processes") || result.contains("efficiently"),
290
+ "Should preserve non-stopword content. Result: {}",
291
+ result
292
+ );
293
+ }
294
+
295
+ #[test]
296
+ fn test_stopwords_with_chinese_text() {
297
+ let config = TokenReductionConfig {
298
+ level: ReductionLevel::Moderate,
299
+ use_simd: false,
300
+ ..Default::default()
301
+ };
302
+
303
+ let input = "这个人工智能系统可以处理自然语言";
304
+ let result = reduce_tokens(input, &config, Some("zh")).unwrap();
305
+
306
+ assert!(
307
+ !result.is_empty(),
308
+ "Chinese text should be processed. Result: {}",
309
+ result
310
+ );
311
+
312
+ assert!(
313
+ result.contains("人工") || result.contains("智能") || result.contains("语言"),
314
+ "Should preserve important Chinese terms. Result: {}",
315
+ result
316
+ );
317
+ }
318
+
319
+ #[test]
320
+ fn test_stopwords_with_mixed_cjk_english() {
321
+ let config = TokenReductionConfig {
322
+ level: ReductionLevel::Moderate,
323
+ use_simd: false,
324
+ ..Default::default()
325
+ };
326
+
327
+ let input = "The machine learning model 机器学习模型 is processing data efficiently";
328
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
329
+
330
+ assert!(
331
+ !result.contains(" the ") && !result.contains("The "),
332
+ "Should remove English 'the'. Result: {}",
333
+ result
334
+ );
335
+
336
+ assert!(
337
+ result.contains("machine") || result.contains("learning"),
338
+ "Should preserve English content. Result: {}",
339
+ result
340
+ );
341
+
342
+ assert!(
343
+ result.contains("机器") || result.contains("学习") || result.contains("模型"),
344
+ "Should preserve Chinese content. Result: {}",
345
+ result
346
+ );
347
+ }
348
+
349
+ #[test]
350
+ fn test_stopwords_with_japanese_text() {
351
+ let config = TokenReductionConfig {
352
+ level: ReductionLevel::Moderate,
353
+ use_simd: false,
354
+ ..Default::default()
355
+ };
356
+
357
+ let input = "人工知能技術の研究開発";
358
+ let result = reduce_tokens(input, &config, Some("ja")).unwrap();
359
+
360
+ assert!(
361
+ !result.is_empty(),
362
+ "Japanese text should be processed. Result: {}",
363
+ result
364
+ );
365
+ }
366
+
367
+ #[test]
368
+ fn test_stopwords_with_korean_text() {
369
+ let config = TokenReductionConfig {
370
+ level: ReductionLevel::Moderate,
371
+ use_simd: false,
372
+ ..Default::default()
373
+ };
374
+
375
+ let input = "인공 지능 기술 개발";
376
+ let result = reduce_tokens(input, &config, Some("ko")).unwrap();
377
+
378
+ assert!(
379
+ !result.is_empty(),
380
+ "Korean text should be processed. Result: {}",
381
+ result
382
+ );
383
+ }
384
+
385
+ #[cfg(feature = "keywords-rake")]
386
+ #[test]
387
+ fn test_stopwords_excluded_from_rake_keywords() {
388
+ let text = "The machine learning model is trained on a large dataset. \
389
+ The model uses neural networks and deep learning algorithms. \
390
+ The training process requires significant computational resources.";
391
+
392
+ let config = KeywordConfig::rake().with_language("en").with_max_keywords(10);
393
+
394
+ let keywords = extract_keywords(text, &config).unwrap();
395
+
396
+ assert!(!keywords.is_empty(), "Should extract keywords");
397
+
398
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
399
+
400
+ for keyword in &keywords {
401
+ let words: Vec<&str> = keyword.text.split_whitespace().collect();
402
+
403
+ let all_stopwords = words.iter().all(|word| {
404
+ let clean = word
405
+ .chars()
406
+ .filter(|c| c.is_alphabetic())
407
+ .collect::<String>()
408
+ .to_lowercase();
409
+ en_stopwords.contains(&clean)
410
+ });
411
+
412
+ assert!(
413
+ !all_stopwords,
414
+ "Keyword '{}' should not be composed entirely of stopwords",
415
+ keyword.text
416
+ );
417
+ }
418
+
419
+ let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
420
+
421
+ assert!(
422
+ keyword_texts.iter().any(|k| k.contains("machine learning")
423
+ || k.contains("neural networks")
424
+ || k.contains("deep learning")
425
+ || k.contains("dataset")
426
+ || k.contains("model")
427
+ || k.contains("training")),
428
+ "Should extract meaningful technical keywords. Got: {:?}",
429
+ keyword_texts
430
+ );
431
+ }
432
+
433
+ #[cfg(feature = "keywords-yake")]
434
+ #[test]
435
+ fn test_stopwords_excluded_from_yake_keywords() {
436
+ let text = "Natural language processing enables computers to understand human language. \
437
+ Deep learning models achieve state-of-the-art performance in text analysis. \
438
+ These systems can extract meaningful information from large text corpora.";
439
+
440
+ let config = KeywordConfig::yake().with_language("en").with_max_keywords(10);
441
+
442
+ let keywords = extract_keywords(text, &config).unwrap();
443
+
444
+ assert!(!keywords.is_empty(), "Should extract keywords");
445
+
446
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
447
+
448
+ for keyword in &keywords {
449
+ let has_content_word = keyword.text.split_whitespace().any(|word| {
450
+ let clean = word
451
+ .chars()
452
+ .filter(|c| c.is_alphabetic())
453
+ .collect::<String>()
454
+ .to_lowercase();
455
+ !clean.is_empty() && !en_stopwords.contains(&clean)
456
+ });
457
+
458
+ assert!(
459
+ has_content_word,
460
+ "Keyword '{}' should contain at least one content word (non-stopword)",
461
+ keyword.text
462
+ );
463
+ }
464
+ }
465
+
466
+ #[cfg(feature = "keywords-rake")]
467
+ #[test]
468
+ fn test_keywords_respect_language_specific_stopwords() {
469
+ let spanish_text = "El aprendizaje automático es una rama de la inteligencia artificial. \
470
+ Los modelos de aprendizaje profundo logran un rendimiento excepcional. \
471
+ Estos sistemas pueden procesar grandes cantidades de datos.";
472
+
473
+ let config = KeywordConfig::rake().with_language("es").with_max_keywords(8);
474
+
475
+ let keywords = extract_keywords(spanish_text, &config).unwrap();
476
+
477
+ assert!(!keywords.is_empty(), "Should extract Spanish keywords");
478
+
479
+ let es_stopwords = get_stopwords("es").expect("Spanish stopwords must exist");
480
+
481
+ for keyword in &keywords {
482
+ let words: Vec<&str> = keyword.text.split_whitespace().collect();
483
+ let all_stopwords = words.iter().all(|word| {
484
+ let clean = word
485
+ .chars()
486
+ .filter(|c| c.is_alphabetic())
487
+ .collect::<String>()
488
+ .to_lowercase();
489
+ es_stopwords.contains(&clean)
490
+ });
491
+
492
+ assert!(
493
+ !all_stopwords,
494
+ "Spanish keyword '{}' should not be all stopwords",
495
+ keyword.text
496
+ );
497
+ }
498
+
499
+ let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
500
+ assert!(
501
+ keyword_texts.iter().any(|k| k.contains("aprendizaje")
502
+ || k.contains("inteligencia")
503
+ || k.contains("modelos")
504
+ || k.contains("datos")),
505
+ "Should extract meaningful Spanish keywords. Got: {:?}",
506
+ keyword_texts
507
+ );
508
+ }
509
+
510
+ #[test]
511
+ fn test_all_stopwords_text_reduction() {
512
+ let config = TokenReductionConfig {
513
+ level: ReductionLevel::Moderate,
514
+ use_simd: false,
515
+ ..Default::default()
516
+ };
517
+
518
+ let input = "the is a an and or but of to in for on at by";
519
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
520
+
521
+ assert!(
522
+ result.len() < input.len(),
523
+ "Text of all stopwords should be significantly reduced"
524
+ );
525
+ }
526
+
527
+ #[test]
528
+ fn test_no_stopwords_text_reduction() {
529
+ let config = TokenReductionConfig {
530
+ level: ReductionLevel::Moderate,
531
+ use_simd: false,
532
+ ..Default::default()
533
+ };
534
+
535
+ let input = "PyTorch TensorFlow CUDA GPU optimization benchmark performance metrics";
536
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
537
+
538
+ let input_words: Vec<&str> = input.split_whitespace().collect();
539
+ let result_lower = result.to_lowercase();
540
+
541
+ for word in input_words {
542
+ let word_lower = word.to_lowercase();
543
+ assert!(
544
+ result_lower.contains(&word_lower),
545
+ "Technical term '{}' should be preserved. Result: {}",
546
+ word,
547
+ result
548
+ );
549
+ }
550
+ }
551
+
552
+ #[test]
553
+ fn test_mixed_case_stopwords_removal() {
554
+ let config = TokenReductionConfig {
555
+ level: ReductionLevel::Moderate,
556
+ use_simd: false,
557
+ ..Default::default()
558
+ };
559
+
560
+ let input = "The SYSTEM Is Processing The DATA With The ALGORITHM";
561
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
562
+
563
+ let result_words: Vec<&str> = result.split_whitespace().collect();
564
+ assert!(
565
+ !result_words.contains(&"the"),
566
+ "Should remove lowercase 'the'. Result: {}",
567
+ result
568
+ );
569
+ assert!(
570
+ !result_words.contains(&"is"),
571
+ "Should remove lowercase 'is'. Result: {}",
572
+ result
573
+ );
574
+
575
+ assert!(
576
+ result.contains("SYSTEM"),
577
+ "Should preserve 'SYSTEM'. Result: {}",
578
+ result
579
+ );
580
+ assert!(result.contains("DATA"), "Should preserve 'DATA'. Result: {}", result);
581
+ assert!(
582
+ result.contains("ALGORITHM"),
583
+ "Should preserve 'ALGORITHM'. Result: {}",
584
+ result
585
+ );
586
+ }
587
+
588
+ #[test]
589
+ fn test_reduce_tokens_function_with_stopwords() {
590
+ let config = TokenReductionConfig {
591
+ level: ReductionLevel::Moderate,
592
+ use_simd: false,
593
+ ..Default::default()
594
+ };
595
+
596
+ let text = "The artificial intelligence system processes the natural language efficiently";
597
+ let result = reduce_tokens(text, &config, Some("en")).unwrap();
598
+
599
+ let original_stopwords = count_stopwords(text, "en");
600
+ let result_stopwords = count_stopwords(&result, "en");
601
+
602
+ assert!(
603
+ result_stopwords < original_stopwords,
604
+ "reduce_tokens should remove stopwords. Original: {}, Result: {}",
605
+ original_stopwords,
606
+ result_stopwords
607
+ );
608
+
609
+ assert!(
610
+ result.contains("artificial") || result.contains("intelligence"),
611
+ "Should preserve content words. Result: {}",
612
+ result
613
+ );
614
+ }
615
+
616
+ #[test]
617
+ fn test_stopwords_with_punctuation() {
618
+ let config = TokenReductionConfig {
619
+ level: ReductionLevel::Moderate,
620
+ use_simd: false,
621
+ ..Default::default()
622
+ };
623
+
624
+ let input = "The system, which is processing the data, uses the algorithm.";
625
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
626
+
627
+ assert!(
628
+ !result.contains(" the ") || result.split_whitespace().filter(|w| w.contains("the")).count() < 3,
629
+ "Should remove most instances of 'the'. Result: {}",
630
+ result
631
+ );
632
+
633
+ assert!(
634
+ result.contains("system") || result.contains("processing") || result.contains("algorithm"),
635
+ "Should preserve content words. Result: {}",
636
+ result
637
+ );
638
+ }
639
+
640
+ #[test]
641
+ fn test_stopwords_with_numbers() {
642
+ let config = TokenReductionConfig {
643
+ level: ReductionLevel::Moderate,
644
+ use_simd: false,
645
+ ..Default::default()
646
+ };
647
+
648
+ let input = "The model has 100 layers and processes the data in 10 seconds";
649
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
650
+
651
+ assert!(
652
+ result.contains("100"),
653
+ "Should preserve number '100'. Result: {}",
654
+ result
655
+ );
656
+ assert!(result.contains("10"), "Should preserve number '10'. Result: {}", result);
657
+
658
+ assert!(
659
+ result.contains("model") || result.contains("layers") || result.contains("processes"),
660
+ "Should preserve content words. Result: {}",
661
+ result
662
+ );
663
+ }
664
+
665
+ #[test]
666
+ fn test_stopwords_removal_consistency_across_calls() {
667
+ let config = TokenReductionConfig {
668
+ level: ReductionLevel::Moderate,
669
+ use_simd: false,
670
+ ..Default::default()
671
+ };
672
+
673
+ let input = "The machine learning model is trained on the dataset";
674
+
675
+ let result1 = reduce_tokens(input, &config, Some("en")).unwrap();
676
+ let result2 = reduce_tokens(input, &config, Some("en")).unwrap();
677
+ let result3 = reduce_tokens(input, &config, Some("en")).unwrap();
678
+
679
+ assert_eq!(result1, result2, "Results should be consistent across calls");
680
+ assert_eq!(result2, result3, "Results should be consistent across calls");
681
+ }
682
+
683
+ #[test]
684
+ fn test_stopwords_with_long_text() {
685
+ let config = TokenReductionConfig {
686
+ level: ReductionLevel::Moderate,
687
+ use_simd: false,
688
+ enable_parallel: false,
689
+ ..Default::default()
690
+ };
691
+
692
+ let paragraph = "The machine learning model is trained on the large dataset. \
693
+ The training process uses the neural network architecture. \
694
+ The system processes the data efficiently and achieves the best performance. ";
695
+ let input = paragraph.repeat(10);
696
+
697
+ let result = reduce_tokens(&input, &config, Some("en")).unwrap();
698
+
699
+ assert!(
700
+ result.len() < input.len(),
701
+ "Long stopword-heavy text should be reduced. Input: {} chars, Result: {} chars",
702
+ input.len(),
703
+ result.len()
704
+ );
705
+
706
+ let original_stopwords = count_stopwords(&input, "en");
707
+ let result_stopwords = count_stopwords(&result, "en");
708
+
709
+ assert!(
710
+ result_stopwords < original_stopwords,
711
+ "Should remove stopwords from long text. Original: {}, Result: {}",
712
+ original_stopwords,
713
+ result_stopwords
714
+ );
715
+ }
716
+
717
+ #[test]
718
+ fn test_get_stopwords_with_fallback_in_reduction() {
719
+ let primary_stopwords = get_stopwords_with_fallback("xyz", "en");
720
+ assert!(primary_stopwords.is_some(), "Should fallback to English");
721
+
722
+ let en_stopwords = get_stopwords("en").unwrap();
723
+ assert_eq!(
724
+ primary_stopwords.unwrap().len(),
725
+ en_stopwords.len(),
726
+ "Fallback should return English stopwords"
727
+ );
728
+
729
+ let config = TokenReductionConfig {
730
+ level: ReductionLevel::Moderate,
731
+ use_simd: false,
732
+ ..Default::default()
733
+ };
734
+
735
+ let input = "The system is processing the data";
736
+ let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
737
+
738
+ assert!(
739
+ !result.contains(" the ") && !result.contains(" is "),
740
+ "Should use fallback stopwords. Result: {}",
741
+ result
742
+ );
743
+ }
744
+
745
+ #[test]
746
+ fn test_stopwords_registry_completeness() {
747
+ assert_eq!(STOPWORDS.len(), 64, "Should have exactly 64 language stopword sets");
748
+
749
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
750
+ assert!(en_stopwords.len() >= 70, "English should have at least 70 stopwords");
751
+
752
+ assert!(en_stopwords.contains("the"), "Should contain 'the'");
753
+ assert!(en_stopwords.contains("is"), "Should contain 'is'");
754
+ assert!(en_stopwords.contains("and"), "Should contain 'and'");
755
+ assert!(en_stopwords.contains("a"), "Should contain 'a'");
756
+ assert!(en_stopwords.contains("an"), "Should contain 'an'");
757
+ assert!(en_stopwords.contains("of"), "Should contain 'of'");
758
+ assert!(en_stopwords.contains("to"), "Should contain 'to'");
759
+ assert!(en_stopwords.contains("in"), "Should contain 'in'");
760
+ assert!(en_stopwords.contains("for"), "Should contain 'for'");
761
+ }
762
+
763
+ #[test]
764
+ fn test_token_reduction_handles_nan_threshold() {
765
+ let mut config = TokenReductionConfig {
766
+ level: ReductionLevel::Maximum,
767
+ semantic_threshold: f32::NAN,
768
+ enable_semantic_clustering: true,
769
+ target_reduction: Some(0.5),
770
+ ..Default::default()
771
+ };
772
+
773
+ config.language_hint = Some("en".to_string());
774
+ let input = "Critical system update highlights performance improvements across distributed modules.";
775
+
776
+ let result = reduce_tokens(input, &config, Some("en")).unwrap_or_else(|_| String::new());
777
+ assert!(
778
+ result.chars().all(|c| !c.is_control()),
779
+ "Result should not contain unexpected control characters"
780
+ );
781
+ }
782
+
783
+ #[test]
784
+ fn test_token_reduction_handles_multibyte_utf8() {
785
+ let config = TokenReductionConfig {
786
+ level: ReductionLevel::Moderate,
787
+ language_hint: Some("ja".to_string()),
788
+ ..Default::default()
789
+ };
790
+
791
+ let input = "品質管理は重要です。🚀 高速抽出と漢字処理が求められています。";
792
+ let result = reduce_tokens(input, &config, Some("ja")).unwrap();
793
+
794
+ assert!(
795
+ result.contains("品質管理") || result.contains("漢字処理"),
796
+ "Important multibyte terms should survive reduction: {}",
797
+ result
798
+ );
799
+ }
800
+
801
+ #[test]
802
+ fn test_token_reduction_concurrent_access() {
803
+ use std::sync::Arc;
804
+
805
+ let config = Arc::new(TokenReductionConfig {
806
+ level: ReductionLevel::Aggressive,
807
+ enable_parallel: true,
808
+ ..Default::default()
809
+ });
810
+
811
+ let input = "Concurrent reduction ensures thread safety without deadlocks or panics.";
812
+
813
+ std::thread::scope(|scope| {
814
+ for _ in 0..8 {
815
+ let cfg = Arc::clone(&config);
816
+ scope.spawn(move || {
817
+ let reduced = reduce_tokens(input, &cfg, Some("en")).unwrap();
818
+ assert!(!reduced.is_empty());
819
+ });
820
+ }
821
+ });
822
+ }
823
+ #[test]
824
+ fn demo_stopwords_effectiveness() {
825
+ use kreuzberg::stopwords::get_stopwords;
826
+ use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
827
+
828
+ let en_text = "The machine learning model is trained on the large dataset and achieves good performance";
829
+ let en_config = TokenReductionConfig {
830
+ level: ReductionLevel::Moderate,
831
+ use_simd: false,
832
+ ..Default::default()
833
+ };
834
+ let en_result = reduce_tokens(en_text, &en_config, Some("en")).unwrap();
835
+
836
+ println!("\n=== English Example ===");
837
+ println!("BEFORE: {} chars", en_text.len());
838
+ println!("{}", en_text);
839
+ println!(
840
+ "\nAFTER: {} chars ({}% reduction)",
841
+ en_result.len(),
842
+ 100 - (en_result.len() * 100 / en_text.len())
843
+ );
844
+ println!("{}", en_result);
845
+
846
+ let zh_text = "这个人工智能系统可以处理自然语言";
847
+ let zh_config = TokenReductionConfig {
848
+ level: ReductionLevel::Moderate,
849
+ use_simd: false,
850
+ ..Default::default()
851
+ };
852
+ let zh_result = reduce_tokens(zh_text, &zh_config, Some("zh")).unwrap();
853
+
854
+ println!("\n=== Chinese Example ===");
855
+ println!("BEFORE: {}", zh_text);
856
+ println!("AFTER: {}", zh_result);
857
+
858
+ let text = "The artificial intelligence system processes the natural language efficiently";
859
+
860
+ println!("\n=== Reduction Level Comparison ===");
861
+ println!("ORIGINAL: {}", text);
862
+
863
+ for level in [
864
+ ReductionLevel::Light,
865
+ ReductionLevel::Moderate,
866
+ ReductionLevel::Aggressive,
867
+ ] {
868
+ let config = TokenReductionConfig {
869
+ level,
870
+ use_simd: false,
871
+ ..Default::default()
872
+ };
873
+ let result = reduce_tokens(text, &config, Some("en")).unwrap();
874
+ println!(
875
+ "{:?}: {} chars -> {} chars ({}% reduction)",
876
+ level,
877
+ text.len(),
878
+ result.len(),
879
+ 100 - (result.len() * 100 / text.len())
880
+ );
881
+ println!(" {}", result);
882
+ }
883
+
884
+ let stopwords = get_stopwords("en").unwrap();
885
+ println!("\n=== Stopwords Stats ===");
886
+ println!("English stopwords: {}", stopwords.len());
887
+ println!("Sample stopwords: {:?}", stopwords.iter().take(10).collect::<Vec<_>>());
888
+ }