kreuzberg 4.0.0.rc2 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (446) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -14
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -1
  5. data/.rubocop.yml +543 -538
  6. data/Gemfile +8 -8
  7. data/Gemfile.lock +194 -6
  8. data/README.md +396 -426
  9. data/Rakefile +34 -25
  10. data/Steepfile +51 -47
  11. data/examples/async_patterns.rb +283 -341
  12. data/ext/kreuzberg_rb/extconf.rb +65 -45
  13. data/ext/kreuzberg_rb/native/.cargo/config.toml +23 -0
  14. data/ext/kreuzberg_rb/native/Cargo.lock +7619 -6535
  15. data/ext/kreuzberg_rb/native/Cargo.toml +75 -44
  16. data/ext/kreuzberg_rb/native/README.md +425 -425
  17. data/ext/kreuzberg_rb/native/build.rs +15 -15
  18. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  19. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  20. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  21. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  22. data/ext/kreuzberg_rb/native/src/lib.rs +3802 -2998
  23. data/extconf.rb +60 -28
  24. data/kreuzberg.gemspec +199 -148
  25. data/lib/kreuzberg/api_proxy.rb +126 -142
  26. data/lib/kreuzberg/cache_api.rb +67 -46
  27. data/lib/kreuzberg/cli.rb +47 -55
  28. data/lib/kreuzberg/cli_proxy.rb +117 -127
  29. data/lib/kreuzberg/config.rb +936 -691
  30. data/lib/kreuzberg/error_context.rb +136 -32
  31. data/lib/kreuzberg/errors.rb +116 -118
  32. data/lib/kreuzberg/extraction_api.rb +313 -85
  33. data/lib/kreuzberg/mcp_proxy.rb +177 -186
  34. data/lib/kreuzberg/ocr_backend_protocol.rb +40 -113
  35. data/lib/kreuzberg/post_processor_protocol.rb +15 -86
  36. data/lib/kreuzberg/result.rb +334 -216
  37. data/lib/kreuzberg/setup_lib_path.rb +99 -80
  38. data/lib/kreuzberg/types.rb +170 -0
  39. data/lib/kreuzberg/validator_protocol.rb +16 -89
  40. data/lib/kreuzberg/version.rb +5 -5
  41. data/lib/kreuzberg.rb +96 -103
  42. data/lib/libpdfium.so +0 -0
  43. data/sig/kreuzberg/internal.rbs +184 -184
  44. data/sig/kreuzberg.rbs +561 -520
  45. data/spec/binding/async_operations_spec.rb +473 -0
  46. data/spec/binding/batch_operations_spec.rb +595 -0
  47. data/spec/binding/batch_spec.rb +359 -0
  48. data/spec/binding/cache_spec.rb +227 -227
  49. data/spec/binding/cli_proxy_spec.rb +85 -85
  50. data/spec/binding/cli_spec.rb +55 -55
  51. data/spec/binding/config_result_spec.rb +377 -0
  52. data/spec/binding/config_spec.rb +419 -345
  53. data/spec/binding/config_validation_spec.rb +377 -283
  54. data/spec/binding/embeddings_spec.rb +816 -0
  55. data/spec/binding/error_handling_spec.rb +399 -213
  56. data/spec/binding/error_recovery_spec.rb +488 -0
  57. data/spec/binding/errors_spec.rb +66 -66
  58. data/spec/binding/font_config_spec.rb +220 -0
  59. data/spec/binding/images_spec.rb +738 -0
  60. data/spec/binding/keywords_extraction_spec.rb +600 -0
  61. data/spec/binding/metadata_types_spec.rb +1228 -0
  62. data/spec/binding/pages_extraction_spec.rb +471 -0
  63. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  64. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  65. data/spec/binding/plugins/validator_spec.rb +273 -274
  66. data/spec/binding/tables_spec.rb +641 -0
  67. data/spec/fixtures/config.toml +38 -39
  68. data/spec/fixtures/config.yaml +41 -41
  69. data/spec/fixtures/invalid_config.toml +3 -4
  70. data/spec/smoke/package_spec.rb +177 -178
  71. data/spec/spec_helper.rb +40 -42
  72. data/spec/unit/config/chunking_config_spec.rb +213 -0
  73. data/spec/unit/config/embedding_config_spec.rb +343 -0
  74. data/spec/unit/config/extraction_config_spec.rb +438 -0
  75. data/spec/unit/config/font_config_spec.rb +285 -0
  76. data/spec/unit/config/hierarchy_config_spec.rb +314 -0
  77. data/spec/unit/config/image_extraction_config_spec.rb +209 -0
  78. data/spec/unit/config/image_preprocessing_config_spec.rb +249 -0
  79. data/spec/unit/config/keyword_config_spec.rb +229 -0
  80. data/spec/unit/config/language_detection_config_spec.rb +258 -0
  81. data/spec/unit/config/ocr_config_spec.rb +171 -0
  82. data/spec/unit/config/page_config_spec.rb +221 -0
  83. data/spec/unit/config/pdf_config_spec.rb +267 -0
  84. data/spec/unit/config/postprocessor_config_spec.rb +290 -0
  85. data/spec/unit/config/tesseract_config_spec.rb +181 -0
  86. data/spec/unit/config/token_reduction_config_spec.rb +251 -0
  87. data/test/metadata_types_test.rb +959 -0
  88. data/vendor/Cargo.toml +61 -0
  89. data/vendor/kreuzberg/Cargo.toml +259 -204
  90. data/vendor/kreuzberg/README.md +263 -175
  91. data/vendor/kreuzberg/build.rs +782 -474
  92. data/vendor/kreuzberg/examples/bench_fixes.rs +71 -0
  93. data/vendor/kreuzberg/examples/test_pdfium_fork.rs +62 -0
  94. data/vendor/kreuzberg/src/api/error.rs +81 -81
  95. data/vendor/kreuzberg/src/api/handlers.rs +320 -199
  96. data/vendor/kreuzberg/src/api/mod.rs +94 -79
  97. data/vendor/kreuzberg/src/api/server.rs +518 -353
  98. data/vendor/kreuzberg/src/api/types.rs +206 -170
  99. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
  100. data/vendor/kreuzberg/src/chunking/mod.rs +2303 -677
  101. data/vendor/kreuzberg/src/chunking/processor.rs +219 -0
  102. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
  103. data/vendor/kreuzberg/src/core/batch_optimizations.rs +385 -0
  104. data/vendor/kreuzberg/src/core/config.rs +1914 -1032
  105. data/vendor/kreuzberg/src/core/config_validation.rs +949 -0
  106. data/vendor/kreuzberg/src/core/extractor.rs +1200 -1024
  107. data/vendor/kreuzberg/src/core/formats.rs +235 -0
  108. data/vendor/kreuzberg/src/core/io.rs +329 -329
  109. data/vendor/kreuzberg/src/core/mime.rs +605 -605
  110. data/vendor/kreuzberg/src/core/mod.rs +61 -45
  111. data/vendor/kreuzberg/src/core/pipeline.rs +1223 -984
  112. data/vendor/kreuzberg/src/core/server_config.rs +1220 -0
  113. data/vendor/kreuzberg/src/embeddings.rs +471 -432
  114. data/vendor/kreuzberg/src/error.rs +431 -431
  115. data/vendor/kreuzberg/src/extraction/archive.rs +959 -954
  116. data/vendor/kreuzberg/src/extraction/capacity.rs +263 -0
  117. data/vendor/kreuzberg/src/extraction/docx.rs +404 -40
  118. data/vendor/kreuzberg/src/extraction/email.rs +855 -854
  119. data/vendor/kreuzberg/src/extraction/excel.rs +697 -688
  120. data/vendor/kreuzberg/src/extraction/html.rs +1830 -553
  121. data/vendor/kreuzberg/src/extraction/image.rs +492 -368
  122. data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -563
  123. data/vendor/kreuzberg/src/extraction/markdown.rs +216 -213
  124. data/vendor/kreuzberg/src/extraction/mod.rs +93 -81
  125. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  126. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  127. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  128. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
  129. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -287
  130. data/vendor/kreuzberg/src/extraction/pptx.rs +3102 -3000
  131. data/vendor/kreuzberg/src/extraction/structured.rs +491 -490
  132. data/vendor/kreuzberg/src/extraction/table.rs +329 -328
  133. data/vendor/kreuzberg/src/extraction/text.rs +277 -269
  134. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  135. data/vendor/kreuzberg/src/extractors/archive.rs +447 -446
  136. data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -469
  137. data/vendor/kreuzberg/src/extractors/docbook.rs +504 -502
  138. data/vendor/kreuzberg/src/extractors/docx.rs +400 -367
  139. data/vendor/kreuzberg/src/extractors/email.rs +157 -143
  140. data/vendor/kreuzberg/src/extractors/epub.rs +696 -707
  141. data/vendor/kreuzberg/src/extractors/excel.rs +385 -343
  142. data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -491
  143. data/vendor/kreuzberg/src/extractors/html.rs +419 -393
  144. data/vendor/kreuzberg/src/extractors/image.rs +219 -198
  145. data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1051
  146. data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -367
  147. data/vendor/kreuzberg/src/extractors/latex.rs +653 -652
  148. data/vendor/kreuzberg/src/extractors/markdown.rs +701 -700
  149. data/vendor/kreuzberg/src/extractors/mod.rs +429 -365
  150. data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
  151. data/vendor/kreuzberg/src/extractors/opml.rs +635 -634
  152. data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -528
  153. data/vendor/kreuzberg/src/extractors/pdf.rs +761 -493
  154. data/vendor/kreuzberg/src/extractors/pptx.rs +279 -248
  155. data/vendor/kreuzberg/src/extractors/rst.rs +577 -576
  156. data/vendor/kreuzberg/src/extractors/rtf.rs +809 -810
  157. data/vendor/kreuzberg/src/extractors/security.rs +484 -484
  158. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
  159. data/vendor/kreuzberg/src/extractors/structured.rs +142 -140
  160. data/vendor/kreuzberg/src/extractors/text.rs +265 -260
  161. data/vendor/kreuzberg/src/extractors/typst.rs +651 -650
  162. data/vendor/kreuzberg/src/extractors/xml.rs +147 -135
  163. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  164. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  165. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  166. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  167. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  168. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  169. data/vendor/kreuzberg/src/keywords/processor.rs +275 -267
  170. data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
  171. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  172. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  173. data/vendor/kreuzberg/src/language_detection/mod.rs +985 -942
  174. data/vendor/kreuzberg/src/language_detection/processor.rs +218 -0
  175. data/vendor/kreuzberg/src/lib.rs +114 -105
  176. data/vendor/kreuzberg/src/mcp/mod.rs +35 -32
  177. data/vendor/kreuzberg/src/mcp/server.rs +2090 -1968
  178. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  179. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  180. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  181. data/vendor/kreuzberg/src/ocr/language_registry.rs +520 -0
  182. data/vendor/kreuzberg/src/ocr/mod.rs +60 -58
  183. data/vendor/kreuzberg/src/ocr/processor.rs +858 -863
  184. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  185. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  186. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +456 -450
  187. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  188. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  189. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  190. data/vendor/kreuzberg/src/panic_context.rs +154 -154
  191. data/vendor/kreuzberg/src/pdf/bindings.rs +306 -0
  192. data/vendor/kreuzberg/src/pdf/bundled.rs +408 -0
  193. data/vendor/kreuzberg/src/pdf/error.rs +214 -122
  194. data/vendor/kreuzberg/src/pdf/fonts.rs +358 -0
  195. data/vendor/kreuzberg/src/pdf/hierarchy.rs +903 -0
  196. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  197. data/vendor/kreuzberg/src/pdf/metadata.rs +509 -346
  198. data/vendor/kreuzberg/src/pdf/mod.rs +81 -50
  199. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  200. data/vendor/kreuzberg/src/pdf/table.rs +417 -393
  201. data/vendor/kreuzberg/src/pdf/text.rs +553 -158
  202. data/vendor/kreuzberg/src/plugins/extractor.rs +1042 -1013
  203. data/vendor/kreuzberg/src/plugins/mod.rs +212 -209
  204. data/vendor/kreuzberg/src/plugins/ocr.rs +637 -620
  205. data/vendor/kreuzberg/src/plugins/processor.rs +650 -642
  206. data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1337
  207. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  208. data/vendor/kreuzberg/src/plugins/validator.rs +967 -956
  209. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  210. data/vendor/kreuzberg/src/text/mod.rs +27 -19
  211. data/vendor/kreuzberg/src/text/quality.rs +710 -697
  212. data/vendor/kreuzberg/src/text/quality_processor.rs +231 -0
  213. data/vendor/kreuzberg/src/text/string_utils.rs +229 -217
  214. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  215. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  216. data/vendor/kreuzberg/src/text/token_reduction/core.rs +832 -796
  217. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +923 -902
  218. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  219. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  220. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +148 -147
  221. data/vendor/kreuzberg/src/text/utf8_validation.rs +193 -0
  222. data/vendor/kreuzberg/src/types.rs +1713 -903
  223. data/vendor/kreuzberg/src/utils/mod.rs +31 -17
  224. data/vendor/kreuzberg/src/utils/pool.rs +503 -0
  225. data/vendor/kreuzberg/src/utils/pool_sizing.rs +364 -0
  226. data/vendor/kreuzberg/src/utils/quality.rs +968 -959
  227. data/vendor/kreuzberg/src/utils/string_pool.rs +761 -0
  228. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  229. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  230. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  231. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  232. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  233. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  234. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  235. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  236. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  237. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  238. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  239. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  240. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  241. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  242. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  243. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  244. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  245. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  246. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  247. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  248. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  249. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  250. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  251. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  252. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  253. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  254. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  255. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  256. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  257. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  258. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  259. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  260. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  261. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  262. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  263. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  264. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  265. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  266. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  267. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  268. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  269. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  270. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  271. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  272. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  273. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  274. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  275. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  276. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  277. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  278. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  279. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  280. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  281. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  282. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  283. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  284. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  285. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  286. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  287. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  288. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  289. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  290. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  291. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  292. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  293. data/vendor/kreuzberg/tests/api_embed.rs +360 -0
  294. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
  295. data/vendor/kreuzberg/tests/api_large_pdf_extraction.rs +471 -0
  296. data/vendor/kreuzberg/tests/api_large_pdf_extraction_diagnostics.rs +289 -0
  297. data/vendor/kreuzberg/tests/api_tests.rs +1472 -966
  298. data/vendor/kreuzberg/tests/archive_integration.rs +545 -543
  299. data/vendor/kreuzberg/tests/batch_orchestration.rs +587 -556
  300. data/vendor/kreuzberg/tests/batch_pooling_benchmark.rs +154 -0
  301. data/vendor/kreuzberg/tests/batch_processing.rs +328 -316
  302. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
  303. data/vendor/kreuzberg/tests/concurrency_stress.rs +541 -525
  304. data/vendor/kreuzberg/tests/config_features.rs +612 -598
  305. data/vendor/kreuzberg/tests/config_integration_test.rs +753 -0
  306. data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -415
  307. data/vendor/kreuzberg/tests/core_integration.rs +519 -510
  308. data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
  309. data/vendor/kreuzberg/tests/data/hierarchy_ground_truth.json +294 -0
  310. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -498
  311. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
  312. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
  313. data/vendor/kreuzberg/tests/email_integration.rs +327 -325
  314. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
  315. data/vendor/kreuzberg/tests/error_handling.rs +402 -393
  316. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
  317. data/vendor/kreuzberg/tests/format_integration.rs +165 -159
  318. data/vendor/kreuzberg/tests/helpers/mod.rs +202 -142
  319. data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
  320. data/vendor/kreuzberg/tests/image_integration.rs +255 -253
  321. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
  322. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
  323. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
  324. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  325. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  326. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
  327. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
  328. data/vendor/kreuzberg/tests/mime_detection.rs +429 -428
  329. data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -510
  330. data/vendor/kreuzberg/tests/ocr_errors.rs +698 -676
  331. data/vendor/kreuzberg/tests/ocr_language_registry.rs +191 -0
  332. data/vendor/kreuzberg/tests/ocr_quality.rs +629 -627
  333. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  334. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -695
  335. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
  336. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
  337. data/vendor/kreuzberg/tests/page_markers.rs +297 -0
  338. data/vendor/kreuzberg/tests/pdf_hierarchy_detection.rs +301 -0
  339. data/vendor/kreuzberg/tests/pdf_hierarchy_quality.rs +589 -0
  340. data/vendor/kreuzberg/tests/pdf_integration.rs +45 -43
  341. data/vendor/kreuzberg/tests/pdf_ocr_triggering.rs +301 -0
  342. data/vendor/kreuzberg/tests/pdf_text_merging.rs +475 -0
  343. data/vendor/kreuzberg/tests/pdfium_linking.rs +340 -0
  344. data/vendor/kreuzberg/tests/pipeline_integration.rs +1446 -1411
  345. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -771
  346. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +577 -560
  347. data/vendor/kreuzberg/tests/plugin_system.rs +927 -921
  348. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  349. data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -586
  350. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -692
  351. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -776
  352. data/vendor/kreuzberg/tests/security_validation.rs +416 -415
  353. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  354. data/vendor/kreuzberg/tests/test_fastembed.rs +631 -609
  355. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1259
  356. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -647
  357. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  358. data/vendor/kreuzberg-ffi/Cargo.toml +67 -0
  359. data/vendor/kreuzberg-ffi/README.md +851 -0
  360. data/vendor/kreuzberg-ffi/benches/result_view_benchmark.rs +227 -0
  361. data/vendor/kreuzberg-ffi/build.rs +168 -0
  362. data/vendor/kreuzberg-ffi/cbindgen.toml +37 -0
  363. data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -0
  364. data/vendor/kreuzberg-ffi/kreuzberg.h +3012 -0
  365. data/vendor/kreuzberg-ffi/src/batch_streaming.rs +588 -0
  366. data/vendor/kreuzberg-ffi/src/config.rs +1341 -0
  367. data/vendor/kreuzberg-ffi/src/error.rs +901 -0
  368. data/vendor/kreuzberg-ffi/src/extraction.rs +555 -0
  369. data/vendor/kreuzberg-ffi/src/helpers.rs +879 -0
  370. data/vendor/kreuzberg-ffi/src/lib.rs +977 -0
  371. data/vendor/kreuzberg-ffi/src/memory.rs +493 -0
  372. data/vendor/kreuzberg-ffi/src/mime.rs +329 -0
  373. data/vendor/kreuzberg-ffi/src/panic_shield.rs +265 -0
  374. data/vendor/kreuzberg-ffi/src/plugins/document_extractor.rs +442 -0
  375. data/vendor/kreuzberg-ffi/src/plugins/mod.rs +14 -0
  376. data/vendor/kreuzberg-ffi/src/plugins/ocr_backend.rs +628 -0
  377. data/vendor/kreuzberg-ffi/src/plugins/post_processor.rs +438 -0
  378. data/vendor/kreuzberg-ffi/src/plugins/validator.rs +329 -0
  379. data/vendor/kreuzberg-ffi/src/result.rs +510 -0
  380. data/vendor/kreuzberg-ffi/src/result_pool.rs +639 -0
  381. data/vendor/kreuzberg-ffi/src/result_view.rs +773 -0
  382. data/vendor/kreuzberg-ffi/src/string_intern.rs +568 -0
  383. data/vendor/kreuzberg-ffi/src/types.rs +363 -0
  384. data/vendor/kreuzberg-ffi/src/util.rs +210 -0
  385. data/vendor/kreuzberg-ffi/src/validation.rs +848 -0
  386. data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -0
  387. data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -0
  388. data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -0
  389. data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -0
  390. data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -0
  391. data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -0
  392. data/vendor/kreuzberg-tesseract/.crate-ignore +2 -0
  393. data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -0
  394. data/vendor/kreuzberg-tesseract/Cargo.toml +57 -0
  395. data/vendor/{rb-sys/LICENSE-MIT → kreuzberg-tesseract/LICENSE} +22 -21
  396. data/vendor/kreuzberg-tesseract/README.md +399 -0
  397. data/vendor/kreuzberg-tesseract/build.rs +1127 -0
  398. data/vendor/kreuzberg-tesseract/patches/README.md +71 -0
  399. data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -0
  400. data/vendor/kreuzberg-tesseract/src/api.rs +1371 -0
  401. data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -0
  402. data/vendor/kreuzberg-tesseract/src/enums.rs +297 -0
  403. data/vendor/kreuzberg-tesseract/src/error.rs +81 -0
  404. data/vendor/kreuzberg-tesseract/src/lib.rs +145 -0
  405. data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -0
  406. data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -0
  407. data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -0
  408. data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -0
  409. data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -0
  410. data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -0
  411. metadata +196 -45
  412. data/vendor/kreuzberg/benches/otel_overhead.rs +0 -48
  413. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +0 -738
  414. data/vendor/rb-sys/.cargo-ok +0 -1
  415. data/vendor/rb-sys/.cargo_vcs_info.json +0 -6
  416. data/vendor/rb-sys/Cargo.lock +0 -393
  417. data/vendor/rb-sys/Cargo.toml +0 -70
  418. data/vendor/rb-sys/Cargo.toml.orig +0 -57
  419. data/vendor/rb-sys/LICENSE-APACHE +0 -190
  420. data/vendor/rb-sys/bin/release.sh +0 -21
  421. data/vendor/rb-sys/build/features.rs +0 -108
  422. data/vendor/rb-sys/build/main.rs +0 -246
  423. data/vendor/rb-sys/build/stable_api_config.rs +0 -153
  424. data/vendor/rb-sys/build/version.rs +0 -48
  425. data/vendor/rb-sys/readme.md +0 -36
  426. data/vendor/rb-sys/src/bindings.rs +0 -21
  427. data/vendor/rb-sys/src/hidden.rs +0 -11
  428. data/vendor/rb-sys/src/lib.rs +0 -34
  429. data/vendor/rb-sys/src/macros.rs +0 -371
  430. data/vendor/rb-sys/src/memory.rs +0 -53
  431. data/vendor/rb-sys/src/ruby_abi_version.rs +0 -38
  432. data/vendor/rb-sys/src/special_consts.rs +0 -31
  433. data/vendor/rb-sys/src/stable_api/compiled.c +0 -179
  434. data/vendor/rb-sys/src/stable_api/compiled.rs +0 -257
  435. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +0 -316
  436. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +0 -316
  437. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +0 -324
  438. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +0 -317
  439. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +0 -315
  440. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +0 -326
  441. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +0 -327
  442. data/vendor/rb-sys/src/stable_api.rs +0 -261
  443. data/vendor/rb-sys/src/symbol.rs +0 -31
  444. data/vendor/rb-sys/src/tracking_allocator.rs +0 -332
  445. data/vendor/rb-sys/src/utils.rs +0 -89
  446. data/vendor/rb-sys/src/value_type.rs +0 -7
@@ -1,888 +1,888 @@
1
- //! Integration tests for stopwords with token reduction and keywords extraction.
2
- #![cfg(all(feature = "stopwords", feature = "quality"))]
3
- //!
4
- //! These tests verify that stopwords are properly integrated across different features:
5
- //! - Token reduction at all ReductionLevels
6
- //! - Keywords extraction (YAKE and RAKE algorithms)
7
- //! - CJK text processing
8
- //! - Multi-language documents
9
- //! - Language fallback mechanisms
10
- //! - Custom stopwords
11
-
12
- use kreuzberg::stopwords::{STOPWORDS, get_stopwords, get_stopwords_with_fallback};
13
- use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
14
-
15
- #[cfg(any(feature = "keywords-yake", feature = "keywords-rake"))]
16
- use kreuzberg::keywords::{KeywordConfig, extract_keywords};
17
-
18
- use std::collections::HashMap;
19
-
20
- fn count_stopwords(text: &str, lang: &str) -> usize {
21
- let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
22
- let words: Vec<&str> = text.split_whitespace().collect();
23
-
24
- words
25
- .iter()
26
- .filter(|word| {
27
- let clean = word
28
- .chars()
29
- .filter(|c| c.is_alphabetic())
30
- .collect::<String>()
31
- .to_lowercase();
32
-
33
- !clean.is_empty() && stopwords.contains(&clean)
34
- })
35
- .count()
36
- }
37
-
38
- fn extract_content_words(text: &str, lang: &str) -> Vec<String> {
39
- let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
40
- let words: Vec<&str> = text.split_whitespace().collect();
41
-
42
- words
43
- .iter()
44
- .filter_map(|word| {
45
- let clean = word
46
- .chars()
47
- .filter(|c| c.is_alphabetic())
48
- .collect::<String>()
49
- .to_lowercase();
50
-
51
- if !clean.is_empty() && !stopwords.contains(&clean) && clean.len() > 1 {
52
- Some(clean)
53
- } else {
54
- None
55
- }
56
- })
57
- .collect()
58
- }
59
-
60
- #[test]
61
- fn test_stopwords_removed_during_moderate_token_reduction() {
62
- let config = TokenReductionConfig {
63
- level: ReductionLevel::Moderate,
64
- language_hint: Some("en".to_string()),
65
- use_simd: false,
66
- ..Default::default()
67
- };
68
-
69
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
70
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
71
-
72
- assert!(!result.contains(" the "), "Should remove 'the'. Result: {}", result);
73
- assert!(!result.contains(" is "), "Should remove 'is'. Result: {}", result);
74
- assert!(!result.contains(" and "), "Should remove 'and'. Result: {}", result);
75
-
76
- assert!(result.contains("quick"), "Should preserve 'quick'. Result: {}", result);
77
- assert!(result.contains("brown"), "Should preserve 'brown'. Result: {}", result);
78
- assert!(result.contains("fox"), "Should preserve 'fox'. Result: {}", result);
79
- assert!(
80
- result.contains("jumping"),
81
- "Should preserve 'jumping'. Result: {}",
82
- result
83
- );
84
- assert!(result.contains("lazy"), "Should preserve 'lazy'. Result: {}", result);
85
-
86
- let original_stopwords = count_stopwords(input, "en");
87
- let result_stopwords = count_stopwords(&result, "en");
88
-
89
- assert!(
90
- result_stopwords < original_stopwords,
91
- "Result should have fewer stopwords than original. Original: {}, Result: {}",
92
- original_stopwords,
93
- result_stopwords
94
- );
95
- }
96
-
97
- #[test]
98
- fn test_stopwords_across_reduction_levels() {
99
- let text = "The machine learning model is trained on the large dataset and achieves good performance";
100
-
101
- let light_config = TokenReductionConfig {
102
- level: ReductionLevel::Light,
103
- use_simd: false,
104
- ..Default::default()
105
- };
106
- let light_result = reduce_tokens(text, &light_config, Some("en")).unwrap();
107
-
108
- let light_stopwords = count_stopwords(&light_result, "en");
109
- assert!(light_stopwords > 0, "Light reduction should preserve some stopwords");
110
-
111
- let moderate_config = TokenReductionConfig {
112
- level: ReductionLevel::Moderate,
113
- use_simd: false,
114
- ..Default::default()
115
- };
116
- let moderate_result = reduce_tokens(text, &moderate_config, Some("en")).unwrap();
117
-
118
- let moderate_stopwords = count_stopwords(&moderate_result, "en");
119
- assert!(
120
- moderate_stopwords < light_stopwords,
121
- "Moderate reduction should remove more stopwords than light. Light: {}, Moderate: {}",
122
- light_stopwords,
123
- moderate_stopwords
124
- );
125
-
126
- let aggressive_config = TokenReductionConfig {
127
- level: ReductionLevel::Aggressive,
128
- use_simd: false,
129
- ..Default::default()
130
- };
131
- let aggressive_result = reduce_tokens(text, &aggressive_config, Some("en")).unwrap();
132
-
133
- assert!(
134
- aggressive_result.len() <= moderate_result.len(),
135
- "Aggressive reduction should be more aggressive than moderate"
136
- );
137
- }
138
-
139
- #[test]
140
- fn test_stopwords_preserve_semantic_meaning() {
141
- let config = TokenReductionConfig {
142
- level: ReductionLevel::Moderate,
143
- use_simd: false,
144
- ..Default::default()
145
- };
146
-
147
- let input =
148
- "The artificial intelligence system is processing the natural language text for extracting meaningful insights";
149
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
150
-
151
- let content_words = extract_content_words(&result, "en");
152
-
153
- assert!(
154
- content_words.contains(&"artificial".to_string()) || result.contains("artificial"),
155
- "Should preserve 'artificial'. Result: {}",
156
- result
157
- );
158
- assert!(
159
- content_words.contains(&"intelligence".to_string()) || result.contains("intelligence"),
160
- "Should preserve 'intelligence'. Result: {}",
161
- result
162
- );
163
- assert!(
164
- content_words.contains(&"processing".to_string()) || result.contains("processing"),
165
- "Should preserve 'processing'. Result: {}",
166
- result
167
- );
168
- assert!(
169
- content_words.contains(&"natural".to_string()) || result.contains("natural"),
170
- "Should preserve 'natural'. Result: {}",
171
- result
172
- );
173
- assert!(
174
- content_words.contains(&"language".to_string()) || result.contains("language"),
175
- "Should preserve 'language'. Result: {}",
176
- result
177
- );
178
- }
179
-
180
- #[test]
181
- fn test_stopwords_with_multiple_languages() {
182
- let en_config = TokenReductionConfig {
183
- level: ReductionLevel::Moderate,
184
- use_simd: false,
185
- ..Default::default()
186
- };
187
- let en_input = "The computer science program is very comprehensive and includes many courses";
188
- let en_result = reduce_tokens(en_input, &en_config, Some("en")).unwrap();
189
-
190
- let en_original_stopwords = count_stopwords(en_input, "en");
191
- let en_result_stopwords = count_stopwords(&en_result, "en");
192
- assert!(
193
- en_result_stopwords < en_original_stopwords,
194
- "English stopwords should be removed"
195
- );
196
-
197
- let es_config = TokenReductionConfig {
198
- level: ReductionLevel::Moderate,
199
- use_simd: false,
200
- ..Default::default()
201
- };
202
- let es_input = "El programa de ciencias de la computación es muy completo y tiene muchos cursos";
203
- let es_result = reduce_tokens(es_input, &es_config, Some("es")).unwrap();
204
-
205
- let es_original_stopwords = count_stopwords(es_input, "es");
206
- let es_result_stopwords = count_stopwords(&es_result, "es");
207
- assert!(
208
- es_result_stopwords < es_original_stopwords,
209
- "Spanish stopwords should be removed"
210
- );
211
-
212
- assert!(
213
- es_result.contains("programa") || es_result.contains("ciencias") || es_result.contains("computación"),
214
- "Should preserve Spanish content words. Result: {}",
215
- es_result
216
- );
217
-
218
- let de_config = TokenReductionConfig {
219
- level: ReductionLevel::Moderate,
220
- use_simd: false,
221
- ..Default::default()
222
- };
223
- let de_input = "Die künstliche Intelligenz ist ein wichtiges Forschungsgebiet der Informatik";
224
- let de_result = reduce_tokens(de_input, &de_config, Some("de")).unwrap();
225
-
226
- let de_original_stopwords = count_stopwords(de_input, "de");
227
- let de_result_stopwords = count_stopwords(&de_result, "de");
228
- assert!(
229
- de_result_stopwords < de_original_stopwords,
230
- "German stopwords should be removed"
231
- );
232
- }
233
-
234
- #[test]
235
- fn test_language_fallback_to_english_stopwords() {
236
- let config = TokenReductionConfig {
237
- level: ReductionLevel::Moderate,
238
- use_simd: false,
239
- ..Default::default()
240
- };
241
-
242
- let input = "The system is processing the data with the algorithm";
243
- let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
244
-
245
- let original_stopwords = count_stopwords(input, "en");
246
- let result_stopwords = count_stopwords(&result, "en");
247
-
248
- assert!(
249
- result_stopwords < original_stopwords,
250
- "Should fallback to English stopwords for unsupported language"
251
- );
252
- }
253
-
254
- #[test]
255
- fn test_custom_stopwords_integration() {
256
- let mut custom_stopwords = HashMap::new();
257
- custom_stopwords.insert(
258
- "en".to_string(),
259
- vec!["algorithm".to_string(), "system".to_string(), "data".to_string()],
260
- );
261
-
262
- let config = TokenReductionConfig {
263
- level: ReductionLevel::Moderate,
264
- use_simd: false,
265
- custom_stopwords: Some(custom_stopwords),
266
- ..Default::default()
267
- };
268
-
269
- let input = "The algorithm processes the data in the system efficiently";
270
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
271
-
272
- assert!(
273
- !result.contains("algorithm"),
274
- "Should remove custom stopword 'algorithm'. Result: {}",
275
- result
276
- );
277
- assert!(
278
- !result.contains("system"),
279
- "Should remove custom stopword 'system'. Result: {}",
280
- result
281
- );
282
- assert!(
283
- !result.contains("data"),
284
- "Should remove custom stopword 'data'. Result: {}",
285
- result
286
- );
287
-
288
- assert!(
289
- result.contains("processes") || result.contains("efficiently"),
290
- "Should preserve non-stopword content. Result: {}",
291
- result
292
- );
293
- }
294
-
295
- #[test]
296
- fn test_stopwords_with_chinese_text() {
297
- let config = TokenReductionConfig {
298
- level: ReductionLevel::Moderate,
299
- use_simd: false,
300
- ..Default::default()
301
- };
302
-
303
- let input = "这个人工智能系统可以处理自然语言";
304
- let result = reduce_tokens(input, &config, Some("zh")).unwrap();
305
-
306
- assert!(
307
- !result.is_empty(),
308
- "Chinese text should be processed. Result: {}",
309
- result
310
- );
311
-
312
- assert!(
313
- result.contains("人工") || result.contains("智能") || result.contains("语言"),
314
- "Should preserve important Chinese terms. Result: {}",
315
- result
316
- );
317
- }
318
-
319
- #[test]
320
- fn test_stopwords_with_mixed_cjk_english() {
321
- let config = TokenReductionConfig {
322
- level: ReductionLevel::Moderate,
323
- use_simd: false,
324
- ..Default::default()
325
- };
326
-
327
- let input = "The machine learning model 机器学习模型 is processing data efficiently";
328
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
329
-
330
- assert!(
331
- !result.contains(" the ") && !result.contains("The "),
332
- "Should remove English 'the'. Result: {}",
333
- result
334
- );
335
-
336
- assert!(
337
- result.contains("machine") || result.contains("learning"),
338
- "Should preserve English content. Result: {}",
339
- result
340
- );
341
-
342
- assert!(
343
- result.contains("机器") || result.contains("学习") || result.contains("模型"),
344
- "Should preserve Chinese content. Result: {}",
345
- result
346
- );
347
- }
348
-
349
- #[test]
350
- fn test_stopwords_with_japanese_text() {
351
- let config = TokenReductionConfig {
352
- level: ReductionLevel::Moderate,
353
- use_simd: false,
354
- ..Default::default()
355
- };
356
-
357
- let input = "人工知能技術の研究開発";
358
- let result = reduce_tokens(input, &config, Some("ja")).unwrap();
359
-
360
- assert!(
361
- !result.is_empty(),
362
- "Japanese text should be processed. Result: {}",
363
- result
364
- );
365
- }
366
-
367
- #[test]
368
- fn test_stopwords_with_korean_text() {
369
- let config = TokenReductionConfig {
370
- level: ReductionLevel::Moderate,
371
- use_simd: false,
372
- ..Default::default()
373
- };
374
-
375
- let input = "인공 지능 기술 개발";
376
- let result = reduce_tokens(input, &config, Some("ko")).unwrap();
377
-
378
- assert!(
379
- !result.is_empty(),
380
- "Korean text should be processed. Result: {}",
381
- result
382
- );
383
- }
384
-
385
- #[cfg(feature = "keywords-rake")]
386
- #[test]
387
- fn test_stopwords_excluded_from_rake_keywords() {
388
- let text = "The machine learning model is trained on a large dataset. \
389
- The model uses neural networks and deep learning algorithms. \
390
- The training process requires significant computational resources.";
391
-
392
- let config = KeywordConfig::rake().with_language("en").with_max_keywords(10);
393
-
394
- let keywords = extract_keywords(text, &config).unwrap();
395
-
396
- assert!(!keywords.is_empty(), "Should extract keywords");
397
-
398
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
399
-
400
- for keyword in &keywords {
401
- let words: Vec<&str> = keyword.text.split_whitespace().collect();
402
-
403
- let all_stopwords = words.iter().all(|word| {
404
- let clean = word
405
- .chars()
406
- .filter(|c| c.is_alphabetic())
407
- .collect::<String>()
408
- .to_lowercase();
409
- en_stopwords.contains(&clean)
410
- });
411
-
412
- assert!(
413
- !all_stopwords,
414
- "Keyword '{}' should not be composed entirely of stopwords",
415
- keyword.text
416
- );
417
- }
418
-
419
- let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
420
-
421
- assert!(
422
- keyword_texts.iter().any(|k| k.contains("machine learning")
423
- || k.contains("neural networks")
424
- || k.contains("deep learning")
425
- || k.contains("dataset")
426
- || k.contains("model")
427
- || k.contains("training")),
428
- "Should extract meaningful technical keywords. Got: {:?}",
429
- keyword_texts
430
- );
431
- }
432
-
433
- #[cfg(feature = "keywords-yake")]
434
- #[test]
435
- fn test_stopwords_excluded_from_yake_keywords() {
436
- let text = "Natural language processing enables computers to understand human language. \
437
- Deep learning models achieve state-of-the-art performance in text analysis. \
438
- These systems can extract meaningful information from large text corpora.";
439
-
440
- let config = KeywordConfig::yake().with_language("en").with_max_keywords(10);
441
-
442
- let keywords = extract_keywords(text, &config).unwrap();
443
-
444
- assert!(!keywords.is_empty(), "Should extract keywords");
445
-
446
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
447
-
448
- for keyword in &keywords {
449
- let has_content_word = keyword.text.split_whitespace().any(|word| {
450
- let clean = word
451
- .chars()
452
- .filter(|c| c.is_alphabetic())
453
- .collect::<String>()
454
- .to_lowercase();
455
- !clean.is_empty() && !en_stopwords.contains(&clean)
456
- });
457
-
458
- assert!(
459
- has_content_word,
460
- "Keyword '{}' should contain at least one content word (non-stopword)",
461
- keyword.text
462
- );
463
- }
464
- }
465
-
466
- #[cfg(feature = "keywords-rake")]
467
- #[test]
468
- fn test_keywords_respect_language_specific_stopwords() {
469
- let spanish_text = "El aprendizaje automático es una rama de la inteligencia artificial. \
470
- Los modelos de aprendizaje profundo logran un rendimiento excepcional. \
471
- Estos sistemas pueden procesar grandes cantidades de datos.";
472
-
473
- let config = KeywordConfig::rake().with_language("es").with_max_keywords(8);
474
-
475
- let keywords = extract_keywords(spanish_text, &config).unwrap();
476
-
477
- assert!(!keywords.is_empty(), "Should extract Spanish keywords");
478
-
479
- let es_stopwords = get_stopwords("es").expect("Spanish stopwords must exist");
480
-
481
- for keyword in &keywords {
482
- let words: Vec<&str> = keyword.text.split_whitespace().collect();
483
- let all_stopwords = words.iter().all(|word| {
484
- let clean = word
485
- .chars()
486
- .filter(|c| c.is_alphabetic())
487
- .collect::<String>()
488
- .to_lowercase();
489
- es_stopwords.contains(&clean)
490
- });
491
-
492
- assert!(
493
- !all_stopwords,
494
- "Spanish keyword '{}' should not be all stopwords",
495
- keyword.text
496
- );
497
- }
498
-
499
- let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
500
- assert!(
501
- keyword_texts.iter().any(|k| k.contains("aprendizaje")
502
- || k.contains("inteligencia")
503
- || k.contains("modelos")
504
- || k.contains("datos")),
505
- "Should extract meaningful Spanish keywords. Got: {:?}",
506
- keyword_texts
507
- );
508
- }
509
-
510
- #[test]
511
- fn test_all_stopwords_text_reduction() {
512
- let config = TokenReductionConfig {
513
- level: ReductionLevel::Moderate,
514
- use_simd: false,
515
- ..Default::default()
516
- };
517
-
518
- let input = "the is a an and or but of to in for on at by";
519
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
520
-
521
- assert!(
522
- result.len() < input.len(),
523
- "Text of all stopwords should be significantly reduced"
524
- );
525
- }
526
-
527
- #[test]
528
- fn test_no_stopwords_text_reduction() {
529
- let config = TokenReductionConfig {
530
- level: ReductionLevel::Moderate,
531
- use_simd: false,
532
- ..Default::default()
533
- };
534
-
535
- let input = "PyTorch TensorFlow CUDA GPU optimization benchmark performance metrics";
536
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
537
-
538
- let input_words: Vec<&str> = input.split_whitespace().collect();
539
- let result_lower = result.to_lowercase();
540
-
541
- for word in input_words {
542
- let word_lower = word.to_lowercase();
543
- assert!(
544
- result_lower.contains(&word_lower),
545
- "Technical term '{}' should be preserved. Result: {}",
546
- word,
547
- result
548
- );
549
- }
550
- }
551
-
552
- #[test]
553
- fn test_mixed_case_stopwords_removal() {
554
- let config = TokenReductionConfig {
555
- level: ReductionLevel::Moderate,
556
- use_simd: false,
557
- ..Default::default()
558
- };
559
-
560
- let input = "The SYSTEM Is Processing The DATA With The ALGORITHM";
561
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
562
-
563
- let result_words: Vec<&str> = result.split_whitespace().collect();
564
- assert!(
565
- !result_words.contains(&"the"),
566
- "Should remove lowercase 'the'. Result: {}",
567
- result
568
- );
569
- assert!(
570
- !result_words.contains(&"is"),
571
- "Should remove lowercase 'is'. Result: {}",
572
- result
573
- );
574
-
575
- assert!(
576
- result.contains("SYSTEM"),
577
- "Should preserve 'SYSTEM'. Result: {}",
578
- result
579
- );
580
- assert!(result.contains("DATA"), "Should preserve 'DATA'. Result: {}", result);
581
- assert!(
582
- result.contains("ALGORITHM"),
583
- "Should preserve 'ALGORITHM'. Result: {}",
584
- result
585
- );
586
- }
587
-
588
- #[test]
589
- fn test_reduce_tokens_function_with_stopwords() {
590
- let config = TokenReductionConfig {
591
- level: ReductionLevel::Moderate,
592
- use_simd: false,
593
- ..Default::default()
594
- };
595
-
596
- let text = "The artificial intelligence system processes the natural language efficiently";
597
- let result = reduce_tokens(text, &config, Some("en")).unwrap();
598
-
599
- let original_stopwords = count_stopwords(text, "en");
600
- let result_stopwords = count_stopwords(&result, "en");
601
-
602
- assert!(
603
- result_stopwords < original_stopwords,
604
- "reduce_tokens should remove stopwords. Original: {}, Result: {}",
605
- original_stopwords,
606
- result_stopwords
607
- );
608
-
609
- assert!(
610
- result.contains("artificial") || result.contains("intelligence"),
611
- "Should preserve content words. Result: {}",
612
- result
613
- );
614
- }
615
-
616
- #[test]
617
- fn test_stopwords_with_punctuation() {
618
- let config = TokenReductionConfig {
619
- level: ReductionLevel::Moderate,
620
- use_simd: false,
621
- ..Default::default()
622
- };
623
-
624
- let input = "The system, which is processing the data, uses the algorithm.";
625
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
626
-
627
- assert!(
628
- !result.contains(" the ") || result.split_whitespace().filter(|w| w.contains("the")).count() < 3,
629
- "Should remove most instances of 'the'. Result: {}",
630
- result
631
- );
632
-
633
- assert!(
634
- result.contains("system") || result.contains("processing") || result.contains("algorithm"),
635
- "Should preserve content words. Result: {}",
636
- result
637
- );
638
- }
639
-
640
- #[test]
641
- fn test_stopwords_with_numbers() {
642
- let config = TokenReductionConfig {
643
- level: ReductionLevel::Moderate,
644
- use_simd: false,
645
- ..Default::default()
646
- };
647
-
648
- let input = "The model has 100 layers and processes the data in 10 seconds";
649
- let result = reduce_tokens(input, &config, Some("en")).unwrap();
650
-
651
- assert!(
652
- result.contains("100"),
653
- "Should preserve number '100'. Result: {}",
654
- result
655
- );
656
- assert!(result.contains("10"), "Should preserve number '10'. Result: {}", result);
657
-
658
- assert!(
659
- result.contains("model") || result.contains("layers") || result.contains("processes"),
660
- "Should preserve content words. Result: {}",
661
- result
662
- );
663
- }
664
-
665
- #[test]
666
- fn test_stopwords_removal_consistency_across_calls() {
667
- let config = TokenReductionConfig {
668
- level: ReductionLevel::Moderate,
669
- use_simd: false,
670
- ..Default::default()
671
- };
672
-
673
- let input = "The machine learning model is trained on the dataset";
674
-
675
- let result1 = reduce_tokens(input, &config, Some("en")).unwrap();
676
- let result2 = reduce_tokens(input, &config, Some("en")).unwrap();
677
- let result3 = reduce_tokens(input, &config, Some("en")).unwrap();
678
-
679
- assert_eq!(result1, result2, "Results should be consistent across calls");
680
- assert_eq!(result2, result3, "Results should be consistent across calls");
681
- }
682
-
683
- #[test]
684
- fn test_stopwords_with_long_text() {
685
- let config = TokenReductionConfig {
686
- level: ReductionLevel::Moderate,
687
- use_simd: false,
688
- enable_parallel: false,
689
- ..Default::default()
690
- };
691
-
692
- let paragraph = "The machine learning model is trained on the large dataset. \
693
- The training process uses the neural network architecture. \
694
- The system processes the data efficiently and achieves the best performance. ";
695
- let input = paragraph.repeat(10);
696
-
697
- let result = reduce_tokens(&input, &config, Some("en")).unwrap();
698
-
699
- assert!(
700
- result.len() < input.len(),
701
- "Long stopword-heavy text should be reduced. Input: {} chars, Result: {} chars",
702
- input.len(),
703
- result.len()
704
- );
705
-
706
- let original_stopwords = count_stopwords(&input, "en");
707
- let result_stopwords = count_stopwords(&result, "en");
708
-
709
- assert!(
710
- result_stopwords < original_stopwords,
711
- "Should remove stopwords from long text. Original: {}, Result: {}",
712
- original_stopwords,
713
- result_stopwords
714
- );
715
- }
716
-
717
- #[test]
718
- fn test_get_stopwords_with_fallback_in_reduction() {
719
- let primary_stopwords = get_stopwords_with_fallback("xyz", "en");
720
- assert!(primary_stopwords.is_some(), "Should fallback to English");
721
-
722
- let en_stopwords = get_stopwords("en").unwrap();
723
- assert_eq!(
724
- primary_stopwords.unwrap().len(),
725
- en_stopwords.len(),
726
- "Fallback should return English stopwords"
727
- );
728
-
729
- let config = TokenReductionConfig {
730
- level: ReductionLevel::Moderate,
731
- use_simd: false,
732
- ..Default::default()
733
- };
734
-
735
- let input = "The system is processing the data";
736
- let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
737
-
738
- assert!(
739
- !result.contains(" the ") && !result.contains(" is "),
740
- "Should use fallback stopwords. Result: {}",
741
- result
742
- );
743
- }
744
-
745
- #[test]
746
- fn test_stopwords_registry_completeness() {
747
- assert_eq!(STOPWORDS.len(), 64, "Should have exactly 64 language stopword sets");
748
-
749
- let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
750
- assert!(en_stopwords.len() >= 70, "English should have at least 70 stopwords");
751
-
752
- assert!(en_stopwords.contains("the"), "Should contain 'the'");
753
- assert!(en_stopwords.contains("is"), "Should contain 'is'");
754
- assert!(en_stopwords.contains("and"), "Should contain 'and'");
755
- assert!(en_stopwords.contains("a"), "Should contain 'a'");
756
- assert!(en_stopwords.contains("an"), "Should contain 'an'");
757
- assert!(en_stopwords.contains("of"), "Should contain 'of'");
758
- assert!(en_stopwords.contains("to"), "Should contain 'to'");
759
- assert!(en_stopwords.contains("in"), "Should contain 'in'");
760
- assert!(en_stopwords.contains("for"), "Should contain 'for'");
761
- }
762
-
763
- #[test]
764
- fn test_token_reduction_handles_nan_threshold() {
765
- let mut config = TokenReductionConfig {
766
- level: ReductionLevel::Maximum,
767
- semantic_threshold: f32::NAN,
768
- enable_semantic_clustering: true,
769
- target_reduction: Some(0.5),
770
- ..Default::default()
771
- };
772
-
773
- config.language_hint = Some("en".to_string());
774
- let input = "Critical system update highlights performance improvements across distributed modules.";
775
-
776
- let result = reduce_tokens(input, &config, Some("en")).unwrap_or_else(|_| String::new());
777
- assert!(
778
- result.chars().all(|c| !c.is_control()),
779
- "Result should not contain unexpected control characters"
780
- );
781
- }
782
-
783
- #[test]
784
- fn test_token_reduction_handles_multibyte_utf8() {
785
- let config = TokenReductionConfig {
786
- level: ReductionLevel::Moderate,
787
- language_hint: Some("ja".to_string()),
788
- ..Default::default()
789
- };
790
-
791
- let input = "品質管理は重要です。🚀 高速抽出と漢字処理が求められています。";
792
- let result = reduce_tokens(input, &config, Some("ja")).unwrap();
793
-
794
- assert!(
795
- result.contains("品質管理") || result.contains("漢字処理"),
796
- "Important multibyte terms should survive reduction: {}",
797
- result
798
- );
799
- }
800
-
801
- #[test]
802
- fn test_token_reduction_concurrent_access() {
803
- use std::sync::Arc;
804
-
805
- let config = Arc::new(TokenReductionConfig {
806
- level: ReductionLevel::Aggressive,
807
- enable_parallel: true,
808
- ..Default::default()
809
- });
810
-
811
- let input = "Concurrent reduction ensures thread safety without deadlocks or panics.";
812
-
813
- std::thread::scope(|scope| {
814
- for _ in 0..8 {
815
- let cfg = Arc::clone(&config);
816
- scope.spawn(move || {
817
- let reduced = reduce_tokens(input, &cfg, Some("en")).unwrap();
818
- assert!(!reduced.is_empty());
819
- });
820
- }
821
- });
822
- }
823
- #[test]
824
- fn demo_stopwords_effectiveness() {
825
- use kreuzberg::stopwords::get_stopwords;
826
- use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
827
-
828
- let en_text = "The machine learning model is trained on the large dataset and achieves good performance";
829
- let en_config = TokenReductionConfig {
830
- level: ReductionLevel::Moderate,
831
- use_simd: false,
832
- ..Default::default()
833
- };
834
- let en_result = reduce_tokens(en_text, &en_config, Some("en")).unwrap();
835
-
836
- println!("\n=== English Example ===");
837
- println!("BEFORE: {} chars", en_text.len());
838
- println!("{}", en_text);
839
- println!(
840
- "\nAFTER: {} chars ({}% reduction)",
841
- en_result.len(),
842
- 100 - (en_result.len() * 100 / en_text.len())
843
- );
844
- println!("{}", en_result);
845
-
846
- let zh_text = "这个人工智能系统可以处理自然语言";
847
- let zh_config = TokenReductionConfig {
848
- level: ReductionLevel::Moderate,
849
- use_simd: false,
850
- ..Default::default()
851
- };
852
- let zh_result = reduce_tokens(zh_text, &zh_config, Some("zh")).unwrap();
853
-
854
- println!("\n=== Chinese Example ===");
855
- println!("BEFORE: {}", zh_text);
856
- println!("AFTER: {}", zh_result);
857
-
858
- let text = "The artificial intelligence system processes the natural language efficiently";
859
-
860
- println!("\n=== Reduction Level Comparison ===");
861
- println!("ORIGINAL: {}", text);
862
-
863
- for level in [
864
- ReductionLevel::Light,
865
- ReductionLevel::Moderate,
866
- ReductionLevel::Aggressive,
867
- ] {
868
- let config = TokenReductionConfig {
869
- level,
870
- use_simd: false,
871
- ..Default::default()
872
- };
873
- let result = reduce_tokens(text, &config, Some("en")).unwrap();
874
- println!(
875
- "{:?}: {} chars -> {} chars ({}% reduction)",
876
- level,
877
- text.len(),
878
- result.len(),
879
- 100 - (result.len() * 100 / text.len())
880
- );
881
- println!(" {}", result);
882
- }
883
-
884
- let stopwords = get_stopwords("en").unwrap();
885
- println!("\n=== Stopwords Stats ===");
886
- println!("English stopwords: {}", stopwords.len());
887
- println!("Sample stopwords: {:?}", stopwords.iter().take(10).collect::<Vec<_>>());
888
- }
1
+ //! Integration tests for stopwords with token reduction and keywords extraction.
2
+ #![cfg(all(feature = "stopwords", feature = "quality"))]
3
+ //!
4
+ //! These tests verify that stopwords are properly integrated across different features:
5
+ //! - Token reduction at all ReductionLevels
6
+ //! - Keywords extraction (YAKE and RAKE algorithms)
7
+ //! - CJK text processing
8
+ //! - Multi-language documents
9
+ //! - Language fallback mechanisms
10
+ //! - Custom stopwords
11
+
12
+ use kreuzberg::stopwords::{STOPWORDS, get_stopwords, get_stopwords_with_fallback};
13
+ use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
14
+
15
+ #[cfg(any(feature = "keywords-yake", feature = "keywords-rake"))]
16
+ use kreuzberg::keywords::{KeywordConfig, extract_keywords};
17
+
18
+ use std::collections::HashMap;
19
+
20
+ fn count_stopwords(text: &str, lang: &str) -> usize {
21
+ let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
22
+ let words: Vec<&str> = text.split_whitespace().collect();
23
+
24
+ words
25
+ .iter()
26
+ .filter(|word| {
27
+ let clean = word
28
+ .chars()
29
+ .filter(|c| c.is_alphabetic())
30
+ .collect::<String>()
31
+ .to_lowercase();
32
+
33
+ !clean.is_empty() && stopwords.contains(&clean)
34
+ })
35
+ .count()
36
+ }
37
+
38
+ fn extract_content_words(text: &str, lang: &str) -> Vec<String> {
39
+ let stopwords = get_stopwords(lang).expect("Stopwords must exist for language");
40
+ let words: Vec<&str> = text.split_whitespace().collect();
41
+
42
+ words
43
+ .iter()
44
+ .filter_map(|word| {
45
+ let clean = word
46
+ .chars()
47
+ .filter(|c| c.is_alphabetic())
48
+ .collect::<String>()
49
+ .to_lowercase();
50
+
51
+ if !clean.is_empty() && !stopwords.contains(&clean) && clean.len() > 1 {
52
+ Some(clean)
53
+ } else {
54
+ None
55
+ }
56
+ })
57
+ .collect()
58
+ }
59
+
60
+ #[test]
61
+ fn test_stopwords_removed_during_moderate_token_reduction() {
62
+ let config = TokenReductionConfig {
63
+ level: ReductionLevel::Moderate,
64
+ language_hint: Some("en".to_string()),
65
+ use_simd: false,
66
+ ..Default::default()
67
+ };
68
+
69
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
70
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
71
+
72
+ assert!(!result.contains(" the "), "Should remove 'the'. Result: {}", result);
73
+ assert!(!result.contains(" is "), "Should remove 'is'. Result: {}", result);
74
+ assert!(!result.contains(" and "), "Should remove 'and'. Result: {}", result);
75
+
76
+ assert!(result.contains("quick"), "Should preserve 'quick'. Result: {}", result);
77
+ assert!(result.contains("brown"), "Should preserve 'brown'. Result: {}", result);
78
+ assert!(result.contains("fox"), "Should preserve 'fox'. Result: {}", result);
79
+ assert!(
80
+ result.contains("jumping"),
81
+ "Should preserve 'jumping'. Result: {}",
82
+ result
83
+ );
84
+ assert!(result.contains("lazy"), "Should preserve 'lazy'. Result: {}", result);
85
+
86
+ let original_stopwords = count_stopwords(input, "en");
87
+ let result_stopwords = count_stopwords(&result, "en");
88
+
89
+ assert!(
90
+ result_stopwords < original_stopwords,
91
+ "Result should have fewer stopwords than original. Original: {}, Result: {}",
92
+ original_stopwords,
93
+ result_stopwords
94
+ );
95
+ }
96
+
97
+ #[test]
98
+ fn test_stopwords_across_reduction_levels() {
99
+ let text = "The machine learning model is trained on the large dataset and achieves good performance";
100
+
101
+ let light_config = TokenReductionConfig {
102
+ level: ReductionLevel::Light,
103
+ use_simd: false,
104
+ ..Default::default()
105
+ };
106
+ let light_result = reduce_tokens(text, &light_config, Some("en")).unwrap();
107
+
108
+ let light_stopwords = count_stopwords(&light_result, "en");
109
+ assert!(light_stopwords > 0, "Light reduction should preserve some stopwords");
110
+
111
+ let moderate_config = TokenReductionConfig {
112
+ level: ReductionLevel::Moderate,
113
+ use_simd: false,
114
+ ..Default::default()
115
+ };
116
+ let moderate_result = reduce_tokens(text, &moderate_config, Some("en")).unwrap();
117
+
118
+ let moderate_stopwords = count_stopwords(&moderate_result, "en");
119
+ assert!(
120
+ moderate_stopwords < light_stopwords,
121
+ "Moderate reduction should remove more stopwords than light. Light: {}, Moderate: {}",
122
+ light_stopwords,
123
+ moderate_stopwords
124
+ );
125
+
126
+ let aggressive_config = TokenReductionConfig {
127
+ level: ReductionLevel::Aggressive,
128
+ use_simd: false,
129
+ ..Default::default()
130
+ };
131
+ let aggressive_result = reduce_tokens(text, &aggressive_config, Some("en")).unwrap();
132
+
133
+ assert!(
134
+ aggressive_result.len() <= moderate_result.len(),
135
+ "Aggressive reduction should be more aggressive than moderate"
136
+ );
137
+ }
138
+
139
+ #[test]
140
+ fn test_stopwords_preserve_semantic_meaning() {
141
+ let config = TokenReductionConfig {
142
+ level: ReductionLevel::Moderate,
143
+ use_simd: false,
144
+ ..Default::default()
145
+ };
146
+
147
+ let input =
148
+ "The artificial intelligence system is processing the natural language text for extracting meaningful insights";
149
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
150
+
151
+ let content_words = extract_content_words(&result, "en");
152
+
153
+ assert!(
154
+ content_words.contains(&"artificial".to_string()) || result.contains("artificial"),
155
+ "Should preserve 'artificial'. Result: {}",
156
+ result
157
+ );
158
+ assert!(
159
+ content_words.contains(&"intelligence".to_string()) || result.contains("intelligence"),
160
+ "Should preserve 'intelligence'. Result: {}",
161
+ result
162
+ );
163
+ assert!(
164
+ content_words.contains(&"processing".to_string()) || result.contains("processing"),
165
+ "Should preserve 'processing'. Result: {}",
166
+ result
167
+ );
168
+ assert!(
169
+ content_words.contains(&"natural".to_string()) || result.contains("natural"),
170
+ "Should preserve 'natural'. Result: {}",
171
+ result
172
+ );
173
+ assert!(
174
+ content_words.contains(&"language".to_string()) || result.contains("language"),
175
+ "Should preserve 'language'. Result: {}",
176
+ result
177
+ );
178
+ }
179
+
180
+ #[test]
181
+ fn test_stopwords_with_multiple_languages() {
182
+ let en_config = TokenReductionConfig {
183
+ level: ReductionLevel::Moderate,
184
+ use_simd: false,
185
+ ..Default::default()
186
+ };
187
+ let en_input = "The computer science program is very comprehensive and includes many courses";
188
+ let en_result = reduce_tokens(en_input, &en_config, Some("en")).unwrap();
189
+
190
+ let en_original_stopwords = count_stopwords(en_input, "en");
191
+ let en_result_stopwords = count_stopwords(&en_result, "en");
192
+ assert!(
193
+ en_result_stopwords < en_original_stopwords,
194
+ "English stopwords should be removed"
195
+ );
196
+
197
+ let es_config = TokenReductionConfig {
198
+ level: ReductionLevel::Moderate,
199
+ use_simd: false,
200
+ ..Default::default()
201
+ };
202
+ let es_input = "El programa de ciencias de la computación es muy completo y tiene muchos cursos";
203
+ let es_result = reduce_tokens(es_input, &es_config, Some("es")).unwrap();
204
+
205
+ let es_original_stopwords = count_stopwords(es_input, "es");
206
+ let es_result_stopwords = count_stopwords(&es_result, "es");
207
+ assert!(
208
+ es_result_stopwords < es_original_stopwords,
209
+ "Spanish stopwords should be removed"
210
+ );
211
+
212
+ assert!(
213
+ es_result.contains("programa") || es_result.contains("ciencias") || es_result.contains("computación"),
214
+ "Should preserve Spanish content words. Result: {}",
215
+ es_result
216
+ );
217
+
218
+ let de_config = TokenReductionConfig {
219
+ level: ReductionLevel::Moderate,
220
+ use_simd: false,
221
+ ..Default::default()
222
+ };
223
+ let de_input = "Die künstliche Intelligenz ist ein wichtiges Forschungsgebiet der Informatik";
224
+ let de_result = reduce_tokens(de_input, &de_config, Some("de")).unwrap();
225
+
226
+ let de_original_stopwords = count_stopwords(de_input, "de");
227
+ let de_result_stopwords = count_stopwords(&de_result, "de");
228
+ assert!(
229
+ de_result_stopwords < de_original_stopwords,
230
+ "German stopwords should be removed"
231
+ );
232
+ }
233
+
234
+ #[test]
235
+ fn test_language_fallback_to_english_stopwords() {
236
+ let config = TokenReductionConfig {
237
+ level: ReductionLevel::Moderate,
238
+ use_simd: false,
239
+ ..Default::default()
240
+ };
241
+
242
+ let input = "The system is processing the data with the algorithm";
243
+ let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
244
+
245
+ let original_stopwords = count_stopwords(input, "en");
246
+ let result_stopwords = count_stopwords(&result, "en");
247
+
248
+ assert!(
249
+ result_stopwords < original_stopwords,
250
+ "Should fallback to English stopwords for unsupported language"
251
+ );
252
+ }
253
+
254
+ #[test]
255
+ fn test_custom_stopwords_integration() {
256
+ let mut custom_stopwords = HashMap::new();
257
+ custom_stopwords.insert(
258
+ "en".to_string(),
259
+ vec!["algorithm".to_string(), "system".to_string(), "data".to_string()],
260
+ );
261
+
262
+ let config = TokenReductionConfig {
263
+ level: ReductionLevel::Moderate,
264
+ use_simd: false,
265
+ custom_stopwords: Some(custom_stopwords),
266
+ ..Default::default()
267
+ };
268
+
269
+ let input = "The algorithm processes the data in the system efficiently";
270
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
271
+
272
+ assert!(
273
+ !result.contains("algorithm"),
274
+ "Should remove custom stopword 'algorithm'. Result: {}",
275
+ result
276
+ );
277
+ assert!(
278
+ !result.contains("system"),
279
+ "Should remove custom stopword 'system'. Result: {}",
280
+ result
281
+ );
282
+ assert!(
283
+ !result.contains("data"),
284
+ "Should remove custom stopword 'data'. Result: {}",
285
+ result
286
+ );
287
+
288
+ assert!(
289
+ result.contains("processes") || result.contains("efficiently"),
290
+ "Should preserve non-stopword content. Result: {}",
291
+ result
292
+ );
293
+ }
294
+
295
+ #[test]
296
+ fn test_stopwords_with_chinese_text() {
297
+ let config = TokenReductionConfig {
298
+ level: ReductionLevel::Moderate,
299
+ use_simd: false,
300
+ ..Default::default()
301
+ };
302
+
303
+ let input = "这个人工智能系统可以处理自然语言";
304
+ let result = reduce_tokens(input, &config, Some("zh")).unwrap();
305
+
306
+ assert!(
307
+ !result.is_empty(),
308
+ "Chinese text should be processed. Result: {}",
309
+ result
310
+ );
311
+
312
+ assert!(
313
+ result.contains("人工") || result.contains("智能") || result.contains("语言"),
314
+ "Should preserve important Chinese terms. Result: {}",
315
+ result
316
+ );
317
+ }
318
+
319
+ #[test]
320
+ fn test_stopwords_with_mixed_cjk_english() {
321
+ let config = TokenReductionConfig {
322
+ level: ReductionLevel::Moderate,
323
+ use_simd: false,
324
+ ..Default::default()
325
+ };
326
+
327
+ let input = "The machine learning model 机器学习模型 is processing data efficiently";
328
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
329
+
330
+ assert!(
331
+ !result.contains(" the ") && !result.contains("The "),
332
+ "Should remove English 'the'. Result: {}",
333
+ result
334
+ );
335
+
336
+ assert!(
337
+ result.contains("machine") || result.contains("learning"),
338
+ "Should preserve English content. Result: {}",
339
+ result
340
+ );
341
+
342
+ assert!(
343
+ result.contains("机器") || result.contains("学习") || result.contains("模型"),
344
+ "Should preserve Chinese content. Result: {}",
345
+ result
346
+ );
347
+ }
348
+
349
+ #[test]
350
+ fn test_stopwords_with_japanese_text() {
351
+ let config = TokenReductionConfig {
352
+ level: ReductionLevel::Moderate,
353
+ use_simd: false,
354
+ ..Default::default()
355
+ };
356
+
357
+ let input = "人工知能技術の研究開発";
358
+ let result = reduce_tokens(input, &config, Some("ja")).unwrap();
359
+
360
+ assert!(
361
+ !result.is_empty(),
362
+ "Japanese text should be processed. Result: {}",
363
+ result
364
+ );
365
+ }
366
+
367
+ #[test]
368
+ fn test_stopwords_with_korean_text() {
369
+ let config = TokenReductionConfig {
370
+ level: ReductionLevel::Moderate,
371
+ use_simd: false,
372
+ ..Default::default()
373
+ };
374
+
375
+ let input = "인공 지능 기술 개발";
376
+ let result = reduce_tokens(input, &config, Some("ko")).unwrap();
377
+
378
+ assert!(
379
+ !result.is_empty(),
380
+ "Korean text should be processed. Result: {}",
381
+ result
382
+ );
383
+ }
384
+
385
+ #[cfg(feature = "keywords-rake")]
386
+ #[test]
387
+ fn test_stopwords_excluded_from_rake_keywords() {
388
+ let text = "The machine learning model is trained on a large dataset. \
389
+ The model uses neural networks and deep learning algorithms. \
390
+ The training process requires significant computational resources.";
391
+
392
+ let config = KeywordConfig::rake().with_language("en").with_max_keywords(10);
393
+
394
+ let keywords = extract_keywords(text, &config).unwrap();
395
+
396
+ assert!(!keywords.is_empty(), "Should extract keywords");
397
+
398
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
399
+
400
+ for keyword in &keywords {
401
+ let words: Vec<&str> = keyword.text.split_whitespace().collect();
402
+
403
+ let all_stopwords = words.iter().all(|word| {
404
+ let clean = word
405
+ .chars()
406
+ .filter(|c| c.is_alphabetic())
407
+ .collect::<String>()
408
+ .to_lowercase();
409
+ en_stopwords.contains(&clean)
410
+ });
411
+
412
+ assert!(
413
+ !all_stopwords,
414
+ "Keyword '{}' should not be composed entirely of stopwords",
415
+ keyword.text
416
+ );
417
+ }
418
+
419
+ let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
420
+
421
+ assert!(
422
+ keyword_texts.iter().any(|k| k.contains("machine learning")
423
+ || k.contains("neural networks")
424
+ || k.contains("deep learning")
425
+ || k.contains("dataset")
426
+ || k.contains("model")
427
+ || k.contains("training")),
428
+ "Should extract meaningful technical keywords. Got: {:?}",
429
+ keyword_texts
430
+ );
431
+ }
432
+
433
+ #[cfg(feature = "keywords-yake")]
434
+ #[test]
435
+ fn test_stopwords_excluded_from_yake_keywords() {
436
+ let text = "Natural language processing enables computers to understand human language. \
437
+ Deep learning models achieve state-of-the-art performance in text analysis. \
438
+ These systems can extract meaningful information from large text corpora.";
439
+
440
+ let config = KeywordConfig::yake().with_language("en").with_max_keywords(10);
441
+
442
+ let keywords = extract_keywords(text, &config).unwrap();
443
+
444
+ assert!(!keywords.is_empty(), "Should extract keywords");
445
+
446
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
447
+
448
+ for keyword in &keywords {
449
+ let has_content_word = keyword.text.split_whitespace().any(|word| {
450
+ let clean = word
451
+ .chars()
452
+ .filter(|c| c.is_alphabetic())
453
+ .collect::<String>()
454
+ .to_lowercase();
455
+ !clean.is_empty() && !en_stopwords.contains(&clean)
456
+ });
457
+
458
+ assert!(
459
+ has_content_word,
460
+ "Keyword '{}' should contain at least one content word (non-stopword)",
461
+ keyword.text
462
+ );
463
+ }
464
+ }
465
+
466
+ #[cfg(feature = "keywords-rake")]
467
+ #[test]
468
+ fn test_keywords_respect_language_specific_stopwords() {
469
+ let spanish_text = "El aprendizaje automático es una rama de la inteligencia artificial. \
470
+ Los modelos de aprendizaje profundo logran un rendimiento excepcional. \
471
+ Estos sistemas pueden procesar grandes cantidades de datos.";
472
+
473
+ let config = KeywordConfig::rake().with_language("es").with_max_keywords(8);
474
+
475
+ let keywords = extract_keywords(spanish_text, &config).unwrap();
476
+
477
+ assert!(!keywords.is_empty(), "Should extract Spanish keywords");
478
+
479
+ let es_stopwords = get_stopwords("es").expect("Spanish stopwords must exist");
480
+
481
+ for keyword in &keywords {
482
+ let words: Vec<&str> = keyword.text.split_whitespace().collect();
483
+ let all_stopwords = words.iter().all(|word| {
484
+ let clean = word
485
+ .chars()
486
+ .filter(|c| c.is_alphabetic())
487
+ .collect::<String>()
488
+ .to_lowercase();
489
+ es_stopwords.contains(&clean)
490
+ });
491
+
492
+ assert!(
493
+ !all_stopwords,
494
+ "Spanish keyword '{}' should not be all stopwords",
495
+ keyword.text
496
+ );
497
+ }
498
+
499
+ let keyword_texts: Vec<String> = keywords.iter().map(|k| k.text.to_lowercase()).collect();
500
+ assert!(
501
+ keyword_texts.iter().any(|k| k.contains("aprendizaje")
502
+ || k.contains("inteligencia")
503
+ || k.contains("modelos")
504
+ || k.contains("datos")),
505
+ "Should extract meaningful Spanish keywords. Got: {:?}",
506
+ keyword_texts
507
+ );
508
+ }
509
+
510
+ #[test]
511
+ fn test_all_stopwords_text_reduction() {
512
+ let config = TokenReductionConfig {
513
+ level: ReductionLevel::Moderate,
514
+ use_simd: false,
515
+ ..Default::default()
516
+ };
517
+
518
+ let input = "the is a an and or but of to in for on at by";
519
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
520
+
521
+ assert!(
522
+ result.len() < input.len(),
523
+ "Text of all stopwords should be significantly reduced"
524
+ );
525
+ }
526
+
527
+ #[test]
528
+ fn test_no_stopwords_text_reduction() {
529
+ let config = TokenReductionConfig {
530
+ level: ReductionLevel::Moderate,
531
+ use_simd: false,
532
+ ..Default::default()
533
+ };
534
+
535
+ let input = "PyTorch TensorFlow CUDA GPU optimization benchmark performance metrics";
536
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
537
+
538
+ let input_words: Vec<&str> = input.split_whitespace().collect();
539
+ let result_lower = result.to_lowercase();
540
+
541
+ for word in input_words {
542
+ let word_lower = word.to_lowercase();
543
+ assert!(
544
+ result_lower.contains(&word_lower),
545
+ "Technical term '{}' should be preserved. Result: {}",
546
+ word,
547
+ result
548
+ );
549
+ }
550
+ }
551
+
552
+ #[test]
553
+ fn test_mixed_case_stopwords_removal() {
554
+ let config = TokenReductionConfig {
555
+ level: ReductionLevel::Moderate,
556
+ use_simd: false,
557
+ ..Default::default()
558
+ };
559
+
560
+ let input = "The SYSTEM Is Processing The DATA With The ALGORITHM";
561
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
562
+
563
+ let result_words: Vec<&str> = result.split_whitespace().collect();
564
+ assert!(
565
+ !result_words.contains(&"the"),
566
+ "Should remove lowercase 'the'. Result: {}",
567
+ result
568
+ );
569
+ assert!(
570
+ !result_words.contains(&"is"),
571
+ "Should remove lowercase 'is'. Result: {}",
572
+ result
573
+ );
574
+
575
+ assert!(
576
+ result.contains("SYSTEM"),
577
+ "Should preserve 'SYSTEM'. Result: {}",
578
+ result
579
+ );
580
+ assert!(result.contains("DATA"), "Should preserve 'DATA'. Result: {}", result);
581
+ assert!(
582
+ result.contains("ALGORITHM"),
583
+ "Should preserve 'ALGORITHM'. Result: {}",
584
+ result
585
+ );
586
+ }
587
+
588
+ #[test]
589
+ fn test_reduce_tokens_function_with_stopwords() {
590
+ let config = TokenReductionConfig {
591
+ level: ReductionLevel::Moderate,
592
+ use_simd: false,
593
+ ..Default::default()
594
+ };
595
+
596
+ let text = "The artificial intelligence system processes the natural language efficiently";
597
+ let result = reduce_tokens(text, &config, Some("en")).unwrap();
598
+
599
+ let original_stopwords = count_stopwords(text, "en");
600
+ let result_stopwords = count_stopwords(&result, "en");
601
+
602
+ assert!(
603
+ result_stopwords < original_stopwords,
604
+ "reduce_tokens should remove stopwords. Original: {}, Result: {}",
605
+ original_stopwords,
606
+ result_stopwords
607
+ );
608
+
609
+ assert!(
610
+ result.contains("artificial") || result.contains("intelligence"),
611
+ "Should preserve content words. Result: {}",
612
+ result
613
+ );
614
+ }
615
+
616
+ #[test]
617
+ fn test_stopwords_with_punctuation() {
618
+ let config = TokenReductionConfig {
619
+ level: ReductionLevel::Moderate,
620
+ use_simd: false,
621
+ ..Default::default()
622
+ };
623
+
624
+ let input = "The system, which is processing the data, uses the algorithm.";
625
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
626
+
627
+ assert!(
628
+ !result.contains(" the ") || result.split_whitespace().filter(|w| w.contains("the")).count() < 3,
629
+ "Should remove most instances of 'the'. Result: {}",
630
+ result
631
+ );
632
+
633
+ assert!(
634
+ result.contains("system") || result.contains("processing") || result.contains("algorithm"),
635
+ "Should preserve content words. Result: {}",
636
+ result
637
+ );
638
+ }
639
+
640
+ #[test]
641
+ fn test_stopwords_with_numbers() {
642
+ let config = TokenReductionConfig {
643
+ level: ReductionLevel::Moderate,
644
+ use_simd: false,
645
+ ..Default::default()
646
+ };
647
+
648
+ let input = "The model has 100 layers and processes the data in 10 seconds";
649
+ let result = reduce_tokens(input, &config, Some("en")).unwrap();
650
+
651
+ assert!(
652
+ result.contains("100"),
653
+ "Should preserve number '100'. Result: {}",
654
+ result
655
+ );
656
+ assert!(result.contains("10"), "Should preserve number '10'. Result: {}", result);
657
+
658
+ assert!(
659
+ result.contains("model") || result.contains("layers") || result.contains("processes"),
660
+ "Should preserve content words. Result: {}",
661
+ result
662
+ );
663
+ }
664
+
665
+ #[test]
666
+ fn test_stopwords_removal_consistency_across_calls() {
667
+ let config = TokenReductionConfig {
668
+ level: ReductionLevel::Moderate,
669
+ use_simd: false,
670
+ ..Default::default()
671
+ };
672
+
673
+ let input = "The machine learning model is trained on the dataset";
674
+
675
+ let result1 = reduce_tokens(input, &config, Some("en")).unwrap();
676
+ let result2 = reduce_tokens(input, &config, Some("en")).unwrap();
677
+ let result3 = reduce_tokens(input, &config, Some("en")).unwrap();
678
+
679
+ assert_eq!(result1, result2, "Results should be consistent across calls");
680
+ assert_eq!(result2, result3, "Results should be consistent across calls");
681
+ }
682
+
683
+ #[test]
684
+ fn test_stopwords_with_long_text() {
685
+ let config = TokenReductionConfig {
686
+ level: ReductionLevel::Moderate,
687
+ use_simd: false,
688
+ enable_parallel: false,
689
+ ..Default::default()
690
+ };
691
+
692
+ let paragraph = "The machine learning model is trained on the large dataset. \
693
+ The training process uses the neural network architecture. \
694
+ The system processes the data efficiently and achieves the best performance. ";
695
+ let input = paragraph.repeat(10);
696
+
697
+ let result = reduce_tokens(&input, &config, Some("en")).unwrap();
698
+
699
+ assert!(
700
+ result.len() < input.len(),
701
+ "Long stopword-heavy text should be reduced. Input: {} chars, Result: {} chars",
702
+ input.len(),
703
+ result.len()
704
+ );
705
+
706
+ let original_stopwords = count_stopwords(&input, "en");
707
+ let result_stopwords = count_stopwords(&result, "en");
708
+
709
+ assert!(
710
+ result_stopwords < original_stopwords,
711
+ "Should remove stopwords from long text. Original: {}, Result: {}",
712
+ original_stopwords,
713
+ result_stopwords
714
+ );
715
+ }
716
+
717
+ #[test]
718
+ fn test_get_stopwords_with_fallback_in_reduction() {
719
+ let primary_stopwords = get_stopwords_with_fallback("xyz", "en");
720
+ assert!(primary_stopwords.is_some(), "Should fallback to English");
721
+
722
+ let en_stopwords = get_stopwords("en").unwrap();
723
+ assert_eq!(
724
+ primary_stopwords.unwrap().len(),
725
+ en_stopwords.len(),
726
+ "Fallback should return English stopwords"
727
+ );
728
+
729
+ let config = TokenReductionConfig {
730
+ level: ReductionLevel::Moderate,
731
+ use_simd: false,
732
+ ..Default::default()
733
+ };
734
+
735
+ let input = "The system is processing the data";
736
+ let result = reduce_tokens(input, &config, Some("xyz")).unwrap();
737
+
738
+ assert!(
739
+ !result.contains(" the ") && !result.contains(" is "),
740
+ "Should use fallback stopwords. Result: {}",
741
+ result
742
+ );
743
+ }
744
+
745
+ #[test]
746
+ fn test_stopwords_registry_completeness() {
747
+ assert_eq!(STOPWORDS.len(), 64, "Should have exactly 64 language stopword sets");
748
+
749
+ let en_stopwords = get_stopwords("en").expect("English stopwords must exist");
750
+ assert!(en_stopwords.len() >= 70, "English should have at least 70 stopwords");
751
+
752
+ assert!(en_stopwords.contains("the"), "Should contain 'the'");
753
+ assert!(en_stopwords.contains("is"), "Should contain 'is'");
754
+ assert!(en_stopwords.contains("and"), "Should contain 'and'");
755
+ assert!(en_stopwords.contains("a"), "Should contain 'a'");
756
+ assert!(en_stopwords.contains("an"), "Should contain 'an'");
757
+ assert!(en_stopwords.contains("of"), "Should contain 'of'");
758
+ assert!(en_stopwords.contains("to"), "Should contain 'to'");
759
+ assert!(en_stopwords.contains("in"), "Should contain 'in'");
760
+ assert!(en_stopwords.contains("for"), "Should contain 'for'");
761
+ }
762
+
763
+ #[test]
764
+ fn test_token_reduction_handles_nan_threshold() {
765
+ let mut config = TokenReductionConfig {
766
+ level: ReductionLevel::Maximum,
767
+ semantic_threshold: f32::NAN,
768
+ enable_semantic_clustering: true,
769
+ target_reduction: Some(0.5),
770
+ ..Default::default()
771
+ };
772
+
773
+ config.language_hint = Some("en".to_string());
774
+ let input = "Critical system update highlights performance improvements across distributed modules.";
775
+
776
+ let result = reduce_tokens(input, &config, Some("en")).unwrap_or_else(|_| String::new());
777
+ assert!(
778
+ result.chars().all(|c| !c.is_control()),
779
+ "Result should not contain unexpected control characters"
780
+ );
781
+ }
782
+
783
+ #[test]
784
+ fn test_token_reduction_handles_multibyte_utf8() {
785
+ let config = TokenReductionConfig {
786
+ level: ReductionLevel::Moderate,
787
+ language_hint: Some("ja".to_string()),
788
+ ..Default::default()
789
+ };
790
+
791
+ let input = "品質管理は重要です。🚀 高速抽出と漢字処理が求められています。";
792
+ let result = reduce_tokens(input, &config, Some("ja")).unwrap();
793
+
794
+ assert!(
795
+ result.contains("品質管理") || result.contains("漢字処理"),
796
+ "Important multibyte terms should survive reduction: {}",
797
+ result
798
+ );
799
+ }
800
+
801
+ #[test]
802
+ fn test_token_reduction_concurrent_access() {
803
+ use std::sync::Arc;
804
+
805
+ let config = Arc::new(TokenReductionConfig {
806
+ level: ReductionLevel::Aggressive,
807
+ enable_parallel: true,
808
+ ..Default::default()
809
+ });
810
+
811
+ let input = "Concurrent reduction ensures thread safety without deadlocks or panics.";
812
+
813
+ std::thread::scope(|scope| {
814
+ for _ in 0..8 {
815
+ let cfg = Arc::clone(&config);
816
+ scope.spawn(move || {
817
+ let reduced = reduce_tokens(input, &cfg, Some("en")).unwrap();
818
+ assert!(!reduced.is_empty());
819
+ });
820
+ }
821
+ });
822
+ }
823
+ #[test]
824
+ fn demo_stopwords_effectiveness() {
825
+ use kreuzberg::stopwords::get_stopwords;
826
+ use kreuzberg::text::token_reduction::{ReductionLevel, TokenReductionConfig, reduce_tokens};
827
+
828
+ let en_text = "The machine learning model is trained on the large dataset and achieves good performance";
829
+ let en_config = TokenReductionConfig {
830
+ level: ReductionLevel::Moderate,
831
+ use_simd: false,
832
+ ..Default::default()
833
+ };
834
+ let en_result = reduce_tokens(en_text, &en_config, Some("en")).unwrap();
835
+
836
+ println!("\n=== English Example ===");
837
+ println!("BEFORE: {} chars", en_text.len());
838
+ println!("{}", en_text);
839
+ println!(
840
+ "\nAFTER: {} chars ({}% reduction)",
841
+ en_result.len(),
842
+ 100 - (en_result.len() * 100 / en_text.len())
843
+ );
844
+ println!("{}", en_result);
845
+
846
+ let zh_text = "这个人工智能系统可以处理自然语言";
847
+ let zh_config = TokenReductionConfig {
848
+ level: ReductionLevel::Moderate,
849
+ use_simd: false,
850
+ ..Default::default()
851
+ };
852
+ let zh_result = reduce_tokens(zh_text, &zh_config, Some("zh")).unwrap();
853
+
854
+ println!("\n=== Chinese Example ===");
855
+ println!("BEFORE: {}", zh_text);
856
+ println!("AFTER: {}", zh_result);
857
+
858
+ let text = "The artificial intelligence system processes the natural language efficiently";
859
+
860
+ println!("\n=== Reduction Level Comparison ===");
861
+ println!("ORIGINAL: {}", text);
862
+
863
+ for level in [
864
+ ReductionLevel::Light,
865
+ ReductionLevel::Moderate,
866
+ ReductionLevel::Aggressive,
867
+ ] {
868
+ let config = TokenReductionConfig {
869
+ level,
870
+ use_simd: false,
871
+ ..Default::default()
872
+ };
873
+ let result = reduce_tokens(text, &config, Some("en")).unwrap();
874
+ println!(
875
+ "{:?}: {} chars -> {} chars ({}% reduction)",
876
+ level,
877
+ text.len(),
878
+ result.len(),
879
+ 100 - (result.len() * 100 / text.len())
880
+ );
881
+ println!(" {}", result);
882
+ }
883
+
884
+ let stopwords = get_stopwords("en").unwrap();
885
+ println!("\n=== Stopwords Stats ===");
886
+ println!("English stopwords: {}", stopwords.len());
887
+ println!("Sample stopwords: {:?}", stopwords.iter().take(10).collect::<Vec<_>>());
888
+ }