kreuzberg 4.0.0.rc1 → 4.0.0.rc2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (342) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -8
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -534
  5. data/.rubocop.yml +538 -0
  6. data/Gemfile +8 -9
  7. data/Gemfile.lock +9 -109
  8. data/README.md +426 -421
  9. data/Rakefile +25 -25
  10. data/Steepfile +47 -47
  11. data/examples/async_patterns.rb +341 -340
  12. data/ext/kreuzberg_rb/extconf.rb +45 -35
  13. data/ext/kreuzberg_rb/native/Cargo.lock +6535 -0
  14. data/ext/kreuzberg_rb/native/Cargo.toml +44 -36
  15. data/ext/kreuzberg_rb/native/README.md +425 -425
  16. data/ext/kreuzberg_rb/native/build.rs +15 -17
  17. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  18. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  19. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  20. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  21. data/ext/kreuzberg_rb/native/src/lib.rs +2998 -2939
  22. data/extconf.rb +28 -28
  23. data/kreuzberg.gemspec +148 -105
  24. data/lib/kreuzberg/api_proxy.rb +142 -142
  25. data/lib/kreuzberg/cache_api.rb +46 -45
  26. data/lib/kreuzberg/cli.rb +55 -55
  27. data/lib/kreuzberg/cli_proxy.rb +127 -127
  28. data/lib/kreuzberg/config.rb +691 -684
  29. data/lib/kreuzberg/error_context.rb +32 -0
  30. data/lib/kreuzberg/errors.rb +118 -50
  31. data/lib/kreuzberg/extraction_api.rb +85 -84
  32. data/lib/kreuzberg/mcp_proxy.rb +186 -186
  33. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
  34. data/lib/kreuzberg/post_processor_protocol.rb +86 -86
  35. data/lib/kreuzberg/result.rb +216 -216
  36. data/lib/kreuzberg/setup_lib_path.rb +80 -79
  37. data/lib/kreuzberg/validator_protocol.rb +89 -89
  38. data/lib/kreuzberg/version.rb +5 -5
  39. data/lib/kreuzberg.rb +103 -82
  40. data/sig/kreuzberg/internal.rbs +184 -184
  41. data/sig/kreuzberg.rbs +520 -468
  42. data/spec/binding/cache_spec.rb +227 -227
  43. data/spec/binding/cli_proxy_spec.rb +85 -87
  44. data/spec/binding/cli_spec.rb +55 -54
  45. data/spec/binding/config_spec.rb +345 -345
  46. data/spec/binding/config_validation_spec.rb +283 -283
  47. data/spec/binding/error_handling_spec.rb +213 -213
  48. data/spec/binding/errors_spec.rb +66 -66
  49. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  50. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  51. data/spec/binding/plugins/validator_spec.rb +274 -274
  52. data/spec/fixtures/config.toml +39 -39
  53. data/spec/fixtures/config.yaml +41 -42
  54. data/spec/fixtures/invalid_config.toml +4 -4
  55. data/spec/smoke/package_spec.rb +178 -178
  56. data/spec/spec_helper.rb +42 -42
  57. data/vendor/kreuzberg/Cargo.toml +204 -134
  58. data/vendor/kreuzberg/README.md +175 -175
  59. data/vendor/kreuzberg/benches/otel_overhead.rs +48 -0
  60. data/vendor/kreuzberg/build.rs +474 -460
  61. data/vendor/kreuzberg/src/api/error.rs +81 -81
  62. data/vendor/kreuzberg/src/api/handlers.rs +199 -199
  63. data/vendor/kreuzberg/src/api/mod.rs +79 -79
  64. data/vendor/kreuzberg/src/api/server.rs +353 -353
  65. data/vendor/kreuzberg/src/api/types.rs +170 -170
  66. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1143
  67. data/vendor/kreuzberg/src/chunking/mod.rs +677 -677
  68. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -35
  69. data/vendor/kreuzberg/src/core/config.rs +1032 -1032
  70. data/vendor/kreuzberg/src/core/extractor.rs +1024 -903
  71. data/vendor/kreuzberg/src/core/io.rs +329 -327
  72. data/vendor/kreuzberg/src/core/mime.rs +605 -615
  73. data/vendor/kreuzberg/src/core/mod.rs +45 -42
  74. data/vendor/kreuzberg/src/core/pipeline.rs +984 -906
  75. data/vendor/kreuzberg/src/embeddings.rs +432 -323
  76. data/vendor/kreuzberg/src/error.rs +431 -431
  77. data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
  78. data/vendor/kreuzberg/src/extraction/docx.rs +40 -40
  79. data/vendor/kreuzberg/src/extraction/email.rs +854 -854
  80. data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
  81. data/vendor/kreuzberg/src/extraction/html.rs +553 -553
  82. data/vendor/kreuzberg/src/extraction/image.rs +368 -368
  83. data/vendor/kreuzberg/src/extraction/libreoffice.rs +563 -564
  84. data/vendor/kreuzberg/src/extraction/markdown.rs +213 -0
  85. data/vendor/kreuzberg/src/extraction/mod.rs +81 -77
  86. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  87. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  88. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  89. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -128
  90. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +287 -0
  91. data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -3000
  92. data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
  93. data/vendor/kreuzberg/src/extraction/table.rs +328 -328
  94. data/vendor/kreuzberg/src/extraction/text.rs +269 -269
  95. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  96. data/vendor/kreuzberg/src/extractors/archive.rs +446 -425
  97. data/vendor/kreuzberg/src/extractors/bibtex.rs +469 -0
  98. data/vendor/kreuzberg/src/extractors/docbook.rs +502 -0
  99. data/vendor/kreuzberg/src/extractors/docx.rs +367 -479
  100. data/vendor/kreuzberg/src/extractors/email.rs +143 -129
  101. data/vendor/kreuzberg/src/extractors/epub.rs +707 -0
  102. data/vendor/kreuzberg/src/extractors/excel.rs +343 -344
  103. data/vendor/kreuzberg/src/extractors/fictionbook.rs +491 -0
  104. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +738 -0
  105. data/vendor/kreuzberg/src/extractors/html.rs +393 -410
  106. data/vendor/kreuzberg/src/extractors/image.rs +198 -195
  107. data/vendor/kreuzberg/src/extractors/jats.rs +1051 -0
  108. data/vendor/kreuzberg/src/extractors/jupyter.rs +367 -0
  109. data/vendor/kreuzberg/src/extractors/latex.rs +652 -0
  110. data/vendor/kreuzberg/src/extractors/markdown.rs +700 -0
  111. data/vendor/kreuzberg/src/extractors/mod.rs +365 -268
  112. data/vendor/kreuzberg/src/extractors/odt.rs +628 -0
  113. data/vendor/kreuzberg/src/extractors/opml.rs +634 -0
  114. data/vendor/kreuzberg/src/extractors/orgmode.rs +528 -0
  115. data/vendor/kreuzberg/src/extractors/pdf.rs +493 -496
  116. data/vendor/kreuzberg/src/extractors/pptx.rs +248 -234
  117. data/vendor/kreuzberg/src/extractors/rst.rs +576 -0
  118. data/vendor/kreuzberg/src/extractors/rtf.rs +810 -0
  119. data/vendor/kreuzberg/src/extractors/security.rs +484 -0
  120. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -0
  121. data/vendor/kreuzberg/src/extractors/structured.rs +140 -126
  122. data/vendor/kreuzberg/src/extractors/text.rs +260 -242
  123. data/vendor/kreuzberg/src/extractors/typst.rs +650 -0
  124. data/vendor/kreuzberg/src/extractors/xml.rs +135 -128
  125. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  126. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  127. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  128. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  129. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  130. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  131. data/vendor/kreuzberg/src/keywords/processor.rs +267 -267
  132. data/vendor/kreuzberg/src/keywords/rake.rs +293 -294
  133. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  134. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  135. data/vendor/kreuzberg/src/language_detection/mod.rs +942 -942
  136. data/vendor/kreuzberg/src/lib.rs +105 -102
  137. data/vendor/kreuzberg/src/mcp/mod.rs +32 -32
  138. data/vendor/kreuzberg/src/mcp/server.rs +1968 -1966
  139. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  140. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  141. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  142. data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
  143. data/vendor/kreuzberg/src/ocr/processor.rs +863 -847
  144. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  145. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  146. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -450
  147. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  148. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  149. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  150. data/vendor/kreuzberg/src/panic_context.rs +154 -0
  151. data/vendor/kreuzberg/src/pdf/error.rs +122 -122
  152. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  153. data/vendor/kreuzberg/src/pdf/metadata.rs +346 -346
  154. data/vendor/kreuzberg/src/pdf/mod.rs +50 -50
  155. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  156. data/vendor/kreuzberg/src/pdf/table.rs +393 -420
  157. data/vendor/kreuzberg/src/pdf/text.rs +158 -161
  158. data/vendor/kreuzberg/src/plugins/extractor.rs +1013 -1010
  159. data/vendor/kreuzberg/src/plugins/mod.rs +209 -209
  160. data/vendor/kreuzberg/src/plugins/ocr.rs +620 -629
  161. data/vendor/kreuzberg/src/plugins/processor.rs +642 -641
  162. data/vendor/kreuzberg/src/plugins/registry.rs +1337 -1324
  163. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  164. data/vendor/kreuzberg/src/plugins/validator.rs +956 -955
  165. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  166. data/vendor/kreuzberg/src/text/mod.rs +19 -19
  167. data/vendor/kreuzberg/src/text/quality.rs +697 -697
  168. data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
  169. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  170. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  171. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
  172. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
  173. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  174. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  175. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
  176. data/vendor/kreuzberg/src/types.rs +903 -873
  177. data/vendor/kreuzberg/src/utils/mod.rs +17 -17
  178. data/vendor/kreuzberg/src/utils/quality.rs +959 -959
  179. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  180. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  181. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  182. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  183. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  184. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  185. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  186. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  187. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  188. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  189. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  190. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  191. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  192. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  193. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  194. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  195. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  196. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  197. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  198. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  199. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  200. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  201. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  202. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  203. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  204. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  205. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  206. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  207. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  208. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  209. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  210. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  211. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  212. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  213. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  214. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  215. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  216. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  217. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  218. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  219. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  220. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  221. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  222. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  223. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  224. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  225. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  226. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  227. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  228. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  229. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  230. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  231. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  232. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  233. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  234. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  235. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  236. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  237. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  238. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  239. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  240. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  241. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  242. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  243. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  244. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -0
  245. data/vendor/kreuzberg/tests/api_tests.rs +966 -966
  246. data/vendor/kreuzberg/tests/archive_integration.rs +543 -543
  247. data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -542
  248. data/vendor/kreuzberg/tests/batch_processing.rs +316 -304
  249. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -0
  250. data/vendor/kreuzberg/tests/concurrency_stress.rs +525 -509
  251. data/vendor/kreuzberg/tests/config_features.rs +598 -580
  252. data/vendor/kreuzberg/tests/config_loading_tests.rs +415 -439
  253. data/vendor/kreuzberg/tests/core_integration.rs +510 -493
  254. data/vendor/kreuzberg/tests/csv_integration.rs +414 -424
  255. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +498 -0
  256. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -124
  257. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -0
  258. data/vendor/kreuzberg/tests/email_integration.rs +325 -325
  259. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -0
  260. data/vendor/kreuzberg/tests/error_handling.rs +393 -393
  261. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -0
  262. data/vendor/kreuzberg/tests/format_integration.rs +159 -159
  263. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
  264. data/vendor/kreuzberg/tests/html_table_test.rs +551 -0
  265. data/vendor/kreuzberg/tests/image_integration.rs +253 -253
  266. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -0
  267. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -0
  268. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -0
  269. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  270. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  271. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -0
  272. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -0
  273. data/vendor/kreuzberg/tests/mime_detection.rs +428 -428
  274. data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -510
  275. data/vendor/kreuzberg/tests/ocr_errors.rs +676 -676
  276. data/vendor/kreuzberg/tests/ocr_quality.rs +627 -627
  277. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  278. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +695 -0
  279. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -0
  280. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -0
  281. data/vendor/kreuzberg/tests/pdf_integration.rs +43 -43
  282. data/vendor/kreuzberg/tests/pipeline_integration.rs +1411 -1412
  283. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -771
  284. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -561
  285. data/vendor/kreuzberg/tests/plugin_system.rs +921 -921
  286. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  287. data/vendor/kreuzberg/tests/registry_integration_tests.rs +586 -607
  288. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +692 -0
  289. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +776 -0
  290. data/vendor/kreuzberg/tests/security_validation.rs +415 -404
  291. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  292. data/vendor/kreuzberg/tests/test_fastembed.rs +609 -609
  293. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1259 -0
  294. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +647 -0
  295. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  296. data/vendor/rb-sys/.cargo-ok +1 -0
  297. data/vendor/rb-sys/.cargo_vcs_info.json +6 -0
  298. data/vendor/rb-sys/Cargo.lock +393 -0
  299. data/vendor/rb-sys/Cargo.toml +70 -0
  300. data/vendor/rb-sys/Cargo.toml.orig +57 -0
  301. data/vendor/rb-sys/LICENSE-APACHE +190 -0
  302. data/vendor/rb-sys/LICENSE-MIT +21 -0
  303. data/vendor/rb-sys/bin/release.sh +21 -0
  304. data/vendor/rb-sys/build/features.rs +108 -0
  305. data/vendor/rb-sys/build/main.rs +246 -0
  306. data/vendor/rb-sys/build/stable_api_config.rs +153 -0
  307. data/vendor/rb-sys/build/version.rs +48 -0
  308. data/vendor/rb-sys/readme.md +36 -0
  309. data/vendor/rb-sys/src/bindings.rs +21 -0
  310. data/vendor/rb-sys/src/hidden.rs +11 -0
  311. data/vendor/rb-sys/src/lib.rs +34 -0
  312. data/vendor/rb-sys/src/macros.rs +371 -0
  313. data/vendor/rb-sys/src/memory.rs +53 -0
  314. data/vendor/rb-sys/src/ruby_abi_version.rs +38 -0
  315. data/vendor/rb-sys/src/special_consts.rs +31 -0
  316. data/vendor/rb-sys/src/stable_api/compiled.c +179 -0
  317. data/vendor/rb-sys/src/stable_api/compiled.rs +257 -0
  318. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +316 -0
  319. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +316 -0
  320. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +324 -0
  321. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +317 -0
  322. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +315 -0
  323. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +326 -0
  324. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +327 -0
  325. data/vendor/rb-sys/src/stable_api.rs +261 -0
  326. data/vendor/rb-sys/src/symbol.rs +31 -0
  327. data/vendor/rb-sys/src/tracking_allocator.rs +332 -0
  328. data/vendor/rb-sys/src/utils.rs +89 -0
  329. data/vendor/rb-sys/src/value_type.rs +7 -0
  330. metadata +90 -95
  331. data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
  332. data/spec/examples.txt +0 -104
  333. data/vendor/kreuzberg/src/bin/profile_extract.rs +0 -455
  334. data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +0 -275
  335. data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +0 -178
  336. data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +0 -491
  337. data/vendor/kreuzberg/src/extraction/pandoc/server.rs +0 -496
  338. data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +0 -1188
  339. data/vendor/kreuzberg/src/extraction/pandoc/version.rs +0 -162
  340. data/vendor/kreuzberg/src/extractors/pandoc.rs +0 -201
  341. data/vendor/kreuzberg/tests/chunking_offset_demo.rs +0 -92
  342. data/vendor/kreuzberg/tests/pandoc_integration.rs +0 -503
@@ -1,796 +1,796 @@
1
- use crate::error::Result;
2
- use crate::text::token_reduction::{
3
- cjk_utils::CjkTokenizer,
4
- config::{ReductionLevel, TokenReductionConfig},
5
- filters::FilterPipeline,
6
- semantic::SemanticAnalyzer,
7
- simd_text::{SimdTextProcessor, chunk_text_for_parallel},
8
- };
9
- use once_cell::sync::Lazy;
10
- use rayon::prelude::*;
11
- use regex::Regex;
12
- use std::sync::Arc;
13
- use unicode_normalization::UnicodeNormalization;
14
-
15
- static REPEATED_EXCLAMATION: Lazy<Regex> =
16
- Lazy::new(|| Regex::new(r"[!]{2,}").expect("Repeated exclamation regex pattern is valid and should compile"));
17
- static REPEATED_QUESTION: Lazy<Regex> =
18
- Lazy::new(|| Regex::new(r"[?]{2,}").expect("Repeated question regex pattern is valid and should compile"));
19
- static REPEATED_COMMA: Lazy<Regex> =
20
- Lazy::new(|| Regex::new(r"[,]{2,}").expect("Repeated comma regex pattern is valid and should compile"));
21
-
22
- /// Bonus added for sentences at the beginning or end of the document
23
- const SENTENCE_EDGE_POSITION_BONUS: f32 = 0.3;
24
-
25
- /// Bonus added for sentences with ideal word count (neither too short nor too long)
26
- const IDEAL_WORD_COUNT_BONUS: f32 = 0.2;
27
-
28
- /// Minimum word count for ideal sentence length
29
- const MIN_IDEAL_WORD_COUNT: usize = 3;
30
-
31
- /// Maximum word count for ideal sentence length
32
- const MAX_IDEAL_WORD_COUNT: usize = 25;
33
-
34
- /// Weight multiplier for numeric content density in sentences
35
- const NUMERIC_CONTENT_WEIGHT: f32 = 0.3;
36
-
37
- /// Weight multiplier for capitalized/acronym word density in sentences
38
- const CAPS_ACRONYM_WEIGHT: f32 = 0.25;
39
-
40
- /// Weight multiplier for long word density in sentences
41
- const LONG_WORD_WEIGHT: f32 = 0.2;
42
-
43
- /// Minimum character length for a word to be considered "long"
44
- const LONG_WORD_THRESHOLD: usize = 8;
45
-
46
- /// Weight multiplier for punctuation density in sentences
47
- const PUNCTUATION_DENSITY_WEIGHT: f32 = 0.15;
48
-
49
- /// Weight multiplier for word diversity ratio (unique words / total words)
50
- const DIVERSITY_RATIO_WEIGHT: f32 = 0.15;
51
-
52
- /// Weight multiplier for character entropy (measure of text randomness/information)
53
- const CHAR_ENTROPY_WEIGHT: f32 = 0.1;
54
-
55
- pub struct TokenReducer {
56
- config: Arc<TokenReductionConfig>,
57
- text_processor: SimdTextProcessor,
58
- filter_pipeline: FilterPipeline,
59
- semantic_analyzer: Option<SemanticAnalyzer>,
60
- cjk_tokenizer: CjkTokenizer,
61
- language: String,
62
- }
63
-
64
- impl TokenReducer {
65
- pub fn new(config: &TokenReductionConfig, language_hint: Option<&str>) -> Result<Self> {
66
- let config = Arc::new(config.clone());
67
- let language = language_hint
68
- .or(config.language_hint.as_deref())
69
- .unwrap_or("en")
70
- .to_string();
71
-
72
- let text_processor = SimdTextProcessor::new();
73
- let filter_pipeline = FilterPipeline::new(&config, &language)?;
74
-
75
- let semantic_analyzer = if matches!(config.level, ReductionLevel::Aggressive | ReductionLevel::Maximum) {
76
- Some(SemanticAnalyzer::new(&language))
77
- } else {
78
- None
79
- };
80
-
81
- Ok(Self {
82
- config,
83
- text_processor,
84
- filter_pipeline,
85
- semantic_analyzer,
86
- cjk_tokenizer: CjkTokenizer::new(),
87
- language,
88
- })
89
- }
90
-
91
- /// Get the language code being used for stopwords and semantic analysis.
92
- pub fn language(&self) -> &str {
93
- &self.language
94
- }
95
-
96
- pub fn reduce(&self, text: &str) -> String {
97
- if text.is_empty() || matches!(self.config.level, ReductionLevel::Off) {
98
- return text.to_string();
99
- }
100
-
101
- let working_text = if text.is_ascii() {
102
- text
103
- } else {
104
- &text.nfc().collect::<String>()
105
- };
106
-
107
- match self.config.level {
108
- ReductionLevel::Off => working_text.to_string(),
109
- ReductionLevel::Light => self.apply_light_reduction_optimized(working_text),
110
- ReductionLevel::Moderate => self.apply_moderate_reduction_optimized(working_text),
111
- ReductionLevel::Aggressive => self.apply_aggressive_reduction_optimized(working_text),
112
- ReductionLevel::Maximum => self.apply_maximum_reduction_optimized(working_text),
113
- }
114
- }
115
-
116
- pub fn batch_reduce(&self, texts: &[&str]) -> Vec<String> {
117
- if !self.config.enable_parallel || texts.len() < 2 {
118
- return texts.iter().map(|text| self.reduce(text)).collect();
119
- }
120
-
121
- texts.par_iter().map(|text| self.reduce(text)).collect()
122
- }
123
-
124
- fn apply_light_reduction_optimized(&self, text: &str) -> String {
125
- let mut result = if self.config.use_simd {
126
- self.text_processor.clean_punctuation(text)
127
- } else {
128
- self.clean_punctuation_optimized(text)
129
- };
130
-
131
- result = self.filter_pipeline.apply_light_filters(&result);
132
- result.trim().to_string()
133
- }
134
-
135
- fn apply_moderate_reduction_optimized(&self, text: &str) -> String {
136
- let mut result = self.apply_light_reduction_optimized(text);
137
-
138
- result = if self.config.enable_parallel && text.len() > 1000 {
139
- self.apply_parallel_moderate_reduction(&result)
140
- } else {
141
- self.filter_pipeline.apply_moderate_filters(&result)
142
- };
143
-
144
- result
145
- }
146
-
147
- fn apply_aggressive_reduction_optimized(&self, text: &str) -> String {
148
- let mut result = self.apply_moderate_reduction_optimized(text);
149
-
150
- result = self.remove_additional_common_words(&result);
151
- result = self.apply_sentence_selection(&result);
152
-
153
- if let Some(ref analyzer) = self.semantic_analyzer {
154
- result = analyzer.apply_semantic_filtering(&result, self.config.semantic_threshold);
155
- }
156
-
157
- result
158
- }
159
-
160
- fn apply_maximum_reduction_optimized(&self, text: &str) -> String {
161
- let mut result = self.apply_aggressive_reduction_optimized(text);
162
-
163
- if let Some(ref analyzer) = self.semantic_analyzer
164
- && self.config.enable_semantic_clustering
165
- {
166
- result = analyzer.apply_hypernym_compression(&result, self.config.target_reduction);
167
- }
168
-
169
- result
170
- }
171
-
172
- fn apply_parallel_moderate_reduction(&self, text: &str) -> String {
173
- let num_threads = rayon::current_num_threads();
174
- let chunks = chunk_text_for_parallel(text, num_threads);
175
-
176
- let processed_chunks: Vec<String> = chunks
177
- .par_iter()
178
- .map(|chunk| self.filter_pipeline.apply_moderate_filters(chunk))
179
- .collect();
180
-
181
- processed_chunks.join(" ")
182
- }
183
-
184
- fn clean_punctuation_optimized(&self, text: &str) -> String {
185
- let mut result = text.to_string();
186
-
187
- result = REPEATED_EXCLAMATION.replace_all(&result, "!").to_string();
188
- result = REPEATED_QUESTION.replace_all(&result, "?").to_string();
189
- result = REPEATED_COMMA.replace_all(&result, ",").to_string();
190
-
191
- result
192
- }
193
-
194
- fn remove_additional_common_words(&self, text: &str) -> String {
195
- let words = self.universal_tokenize(text);
196
-
197
- if words.len() < 4 {
198
- return text.to_string();
199
- }
200
-
201
- let mut word_freq = std::collections::HashMap::new();
202
- let mut word_lengths = Vec::new();
203
-
204
- for word in &words {
205
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
206
- word.to_lowercase()
207
- } else {
208
- word.chars()
209
- .filter(|c| c.is_alphabetic())
210
- .collect::<String>()
211
- .to_lowercase()
212
- };
213
-
214
- if !clean_word.is_empty() {
215
- *word_freq.entry(clean_word.clone()).or_insert(0) += 1;
216
- word_lengths.push(clean_word.chars().count());
217
- }
218
- }
219
-
220
- let avg_length = if !word_lengths.is_empty() {
221
- word_lengths.iter().sum::<usize>() as f32 / word_lengths.len() as f32
222
- } else {
223
- 5.0
224
- };
225
-
226
- let original_count = words.len();
227
-
228
- let filtered_words: Vec<String> = words
229
- .iter()
230
- .filter(|word| {
231
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
232
- word.to_lowercase()
233
- } else {
234
- word.chars()
235
- .filter(|c| c.is_alphabetic())
236
- .collect::<String>()
237
- .to_lowercase()
238
- };
239
-
240
- if clean_word.is_empty() {
241
- return true;
242
- }
243
-
244
- let freq = word_freq.get(&clean_word).unwrap_or(&0);
245
- let word_len = clean_word.chars().count() as f32;
246
-
247
- self.has_important_characteristics(word)
248
- || (*freq <= 2 && word_len >= avg_length * 0.8)
249
- || (word_len >= avg_length * 1.5)
250
- })
251
- .cloned()
252
- .collect();
253
-
254
- let has_cjk_content = text.chars().any(|c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
255
- let fallback_threshold = if has_cjk_content {
256
- original_count / 5
257
- } else {
258
- original_count / 3
259
- };
260
-
261
- if filtered_words.len() < fallback_threshold {
262
- let fallback_words: Vec<String> = words
263
- .iter()
264
- .filter(|word| {
265
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
266
- (*word).clone()
267
- } else {
268
- word.chars().filter(|c| c.is_alphabetic()).collect::<String>()
269
- };
270
-
271
- clean_word.is_empty() || clean_word.chars().count() >= 3 || self.has_important_characteristics(word)
272
- })
273
- .cloned()
274
- .collect();
275
- self.smart_join(&fallback_words, has_cjk_content)
276
- } else {
277
- self.smart_join(&filtered_words, has_cjk_content)
278
- }
279
- }
280
-
281
- fn smart_join(&self, tokens: &[String], has_cjk_content: bool) -> String {
282
- if has_cjk_content {
283
- tokens.join("")
284
- } else {
285
- tokens.join(" ")
286
- }
287
- }
288
-
289
- fn has_important_characteristics(&self, word: &str) -> bool {
290
- if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
291
- return true;
292
- }
293
-
294
- if word.chars().any(|c| c.is_numeric()) {
295
- return true;
296
- }
297
-
298
- if word.len() > 10 {
299
- return true;
300
- }
301
-
302
- let uppercase_count = word.chars().filter(|c| c.is_uppercase()).count();
303
- if uppercase_count > 1 && uppercase_count < word.len() {
304
- return true;
305
- }
306
-
307
- if self.has_cjk_importance(word) {
308
- return true;
309
- }
310
-
311
- false
312
- }
313
-
314
- fn has_cjk_importance(&self, word: &str) -> bool {
315
- let chars: Vec<char> = word.chars().collect();
316
-
317
- let has_cjk = chars.iter().any(|&c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
318
- if !has_cjk {
319
- return false;
320
- }
321
-
322
- let important_radicals = [
323
- '学', '智', '能', '技', '术', '法', '算', '理', '科', '研', '究', '发', '展', '系', '统', '模', '型', '方',
324
- '式', '过', '程', '结', '构', '功', '效', '应', '分', '析', '计', '算', '数', '据', '信', '息', '处', '理',
325
- '语', '言', '文', '生', '成', '产', '用', '作', '为', '成', '变', '化', '转', '换', '提', '高', '网', '络',
326
- '神', '经', '机', '器', '人', '工', '智', '能', '自', '然', '复',
327
- ];
328
-
329
- for &char in &chars {
330
- if important_radicals.contains(&char) {
331
- return true;
332
- }
333
- }
334
-
335
- if chars.len() == 2 && has_cjk {
336
- let has_technical = chars.iter().any(|&c| {
337
- let code = c as u32;
338
- (0x4E00..=0x4FFF).contains(&code)
339
- || (0x5000..=0x51FF).contains(&code)
340
- || (0x6700..=0x68FF).contains(&code)
341
- || (0x7500..=0x76FF).contains(&code)
342
- });
343
-
344
- if has_technical {
345
- return true;
346
- }
347
- }
348
-
349
- false
350
- }
351
-
352
- fn apply_sentence_selection(&self, text: &str) -> String {
353
- let sentences: Vec<&str> = text
354
- .split(['.', '!', '?'])
355
- .map(|s| s.trim())
356
- .filter(|s| !s.is_empty())
357
- .collect();
358
-
359
- if sentences.len() <= 2 {
360
- return text.to_string();
361
- }
362
-
363
- let mut scored_sentences: Vec<(usize, f32, &str)> = sentences
364
- .iter()
365
- .enumerate()
366
- .map(|(i, sentence)| {
367
- let score = self.score_sentence_importance(sentence, i, sentences.len());
368
- (i, score, *sentence)
369
- })
370
- .collect();
371
-
372
- scored_sentences.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
373
-
374
- let keep_count = ((sentences.len() as f32 * 0.4).ceil() as usize).max(1);
375
- let mut selected_indices: Vec<usize> = scored_sentences[..keep_count].iter().map(|(i, _, _)| *i).collect();
376
-
377
- selected_indices.sort();
378
-
379
- let selected_sentences: Vec<&str> = selected_indices
380
- .iter()
381
- .filter_map(|&i| sentences.get(i))
382
- .copied()
383
- .collect();
384
-
385
- if selected_sentences.is_empty() {
386
- text.to_string()
387
- } else {
388
- selected_sentences.join(". ")
389
- }
390
- }
391
-
392
- fn score_sentence_importance(&self, sentence: &str, position: usize, total_sentences: usize) -> f32 {
393
- let mut score = 0.0;
394
-
395
- if position == 0 || position == total_sentences - 1 {
396
- score += SENTENCE_EDGE_POSITION_BONUS;
397
- }
398
-
399
- let words: Vec<&str> = sentence.split_whitespace().collect();
400
- if words.is_empty() {
401
- return score;
402
- }
403
-
404
- let word_count = words.len();
405
- if (MIN_IDEAL_WORD_COUNT..=MAX_IDEAL_WORD_COUNT).contains(&word_count) {
406
- score += IDEAL_WORD_COUNT_BONUS;
407
- }
408
-
409
- let mut numeric_count = 0;
410
- let mut caps_count = 0;
411
- let mut long_word_count = 0;
412
- let mut punct_density = 0;
413
-
414
- for word in &words {
415
- if word.chars().any(|c| c.is_numeric()) {
416
- numeric_count += 1;
417
- }
418
-
419
- if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
420
- caps_count += 1;
421
- }
422
-
423
- if word.len() > LONG_WORD_THRESHOLD {
424
- long_word_count += 1;
425
- }
426
-
427
- punct_density += word.chars().filter(|c| c.is_ascii_punctuation()).count();
428
- }
429
-
430
- score += (numeric_count as f32 / words.len() as f32) * NUMERIC_CONTENT_WEIGHT;
431
- score += (caps_count as f32 / words.len() as f32) * CAPS_ACRONYM_WEIGHT;
432
- score += (long_word_count as f32 / words.len() as f32) * LONG_WORD_WEIGHT;
433
- score += (punct_density as f32 / sentence.len() as f32) * PUNCTUATION_DENSITY_WEIGHT;
434
-
435
- let unique_words: std::collections::HashSet<_> = words
436
- .iter()
437
- .map(|w| {
438
- w.chars()
439
- .filter(|c| c.is_alphabetic())
440
- .collect::<String>()
441
- .to_lowercase()
442
- })
443
- .collect();
444
- let diversity_ratio = unique_words.len() as f32 / words.len() as f32;
445
- score += diversity_ratio * DIVERSITY_RATIO_WEIGHT;
446
-
447
- let char_entropy = self.calculate_char_entropy(sentence);
448
- score += char_entropy * CHAR_ENTROPY_WEIGHT;
449
-
450
- score
451
- }
452
-
453
- fn universal_tokenize(&self, text: &str) -> Vec<String> {
454
- self.cjk_tokenizer.tokenize_mixed_text(text)
455
- }
456
-
457
- fn calculate_char_entropy(&self, text: &str) -> f32 {
458
- let chars: Vec<char> = text.chars().collect();
459
- if chars.is_empty() {
460
- return 0.0;
461
- }
462
-
463
- let mut char_freq = std::collections::HashMap::new();
464
- for &ch in &chars {
465
- let lowercase_ch = ch
466
- .to_lowercase()
467
- .next()
468
- .expect("to_lowercase() must yield at least one character for valid Unicode");
469
- *char_freq.entry(lowercase_ch).or_insert(0) += 1;
470
- }
471
-
472
- let total_chars = chars.len() as f32;
473
- char_freq
474
- .values()
475
- .map(|&freq| {
476
- let p = freq as f32 / total_chars;
477
- if p > 0.0 { -p * p.log2() } else { 0.0 }
478
- })
479
- .sum::<f32>()
480
- .min(5.0)
481
- }
482
- }
483
-
484
- #[cfg(test)]
485
- mod tests {
486
- use super::*;
487
-
488
- #[test]
489
- fn test_light_reduction() {
490
- let config = TokenReductionConfig {
491
- level: ReductionLevel::Light,
492
- use_simd: false,
493
- ..Default::default()
494
- };
495
-
496
- let reducer = TokenReducer::new(&config, None).unwrap();
497
- let input = "Hello world!!! How are you???";
498
- let result = reducer.reduce(input);
499
-
500
- assert!(result.len() < input.len());
501
- assert!(!result.contains(" "));
502
- }
503
-
504
- #[test]
505
- fn test_moderate_reduction() {
506
- let config = TokenReductionConfig {
507
- level: ReductionLevel::Moderate,
508
- use_simd: false,
509
- ..Default::default()
510
- };
511
-
512
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
513
- let input = "The quick brown fox is jumping over the lazy dog";
514
- let result = reducer.reduce(input);
515
-
516
- assert!(result.len() < input.len());
517
- assert!(result.contains("quick"));
518
- assert!(result.contains("brown"));
519
- assert!(result.contains("fox"));
520
- }
521
-
522
- #[test]
523
- fn test_batch_processing() {
524
- let config = TokenReductionConfig {
525
- level: ReductionLevel::Light,
526
- enable_parallel: false,
527
- ..Default::default()
528
- };
529
-
530
- let reducer = TokenReducer::new(&config, None).unwrap();
531
- let inputs = vec!["Hello world!", "How are you?", "Fine, thanks!"];
532
- let results = reducer.batch_reduce(&inputs);
533
-
534
- assert_eq!(results.len(), inputs.len());
535
- for result in &results {
536
- assert!(!result.contains(" "));
537
- }
538
- }
539
-
540
- #[test]
541
- fn test_aggressive_reduction() {
542
- let config = TokenReductionConfig {
543
- level: ReductionLevel::Aggressive,
544
- use_simd: false,
545
- ..Default::default()
546
- };
547
-
548
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
549
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
550
- let result = reducer.reduce(input);
551
-
552
- assert!(result.len() < input.len());
553
- assert!(!result.is_empty());
554
- }
555
-
556
- #[test]
557
- fn test_maximum_reduction() {
558
- let config = TokenReductionConfig {
559
- level: ReductionLevel::Maximum,
560
- use_simd: false,
561
- enable_semantic_clustering: true,
562
- ..Default::default()
563
- };
564
-
565
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
566
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
567
- let result = reducer.reduce(input);
568
-
569
- assert!(result.len() < input.len());
570
- assert!(!result.is_empty());
571
- }
572
-
573
- #[test]
574
- fn test_empty_text_handling() {
575
- let config = TokenReductionConfig {
576
- level: ReductionLevel::Moderate,
577
- ..Default::default()
578
- };
579
-
580
- let reducer = TokenReducer::new(&config, None).unwrap();
581
- assert_eq!(reducer.reduce(""), "");
582
- let result = reducer.reduce(" ");
583
- assert!(result == " " || result.is_empty());
584
- }
585
-
586
- #[test]
587
- fn test_off_mode_preserves_text() {
588
- let config = TokenReductionConfig {
589
- level: ReductionLevel::Off,
590
- ..Default::default()
591
- };
592
-
593
- let reducer = TokenReducer::new(&config, None).unwrap();
594
- let input = "Text with multiple spaces!!!";
595
- assert_eq!(reducer.reduce(input), input);
596
- }
597
-
598
- #[test]
599
- fn test_parallel_batch_processing() {
600
- let config = TokenReductionConfig {
601
- level: ReductionLevel::Light,
602
- enable_parallel: true,
603
- ..Default::default()
604
- };
605
-
606
- let reducer = TokenReducer::new(&config, None).unwrap();
607
- let inputs = vec![
608
- "First text with spaces",
609
- "Second text with spaces",
610
- "Third text with spaces",
611
- ];
612
- let results = reducer.batch_reduce(&inputs);
613
-
614
- assert_eq!(results.len(), inputs.len());
615
- for result in &results {
616
- assert!(!result.contains(" "));
617
- }
618
- }
619
-
620
- #[test]
621
- fn test_cjk_text_handling() {
622
- let config = TokenReductionConfig {
623
- level: ReductionLevel::Moderate,
624
- ..Default::default()
625
- };
626
-
627
- let reducer = TokenReducer::new(&config, Some("zh")).unwrap();
628
- let input = "这是中文文本测试";
629
- let result = reducer.reduce(input);
630
-
631
- assert!(!result.is_empty());
632
- }
633
-
634
- #[test]
635
- fn test_mixed_language_text() {
636
- let config = TokenReductionConfig {
637
- level: ReductionLevel::Moderate,
638
- ..Default::default()
639
- };
640
-
641
- let reducer = TokenReducer::new(&config, None).unwrap();
642
- let input = "This is English text 这是中文 and some more English";
643
- let result = reducer.reduce(input);
644
-
645
- assert!(!result.is_empty());
646
- assert!(result.contains("English") || result.contains("中"));
647
- }
648
-
649
- #[test]
650
- fn test_punctuation_normalization() {
651
- let config = TokenReductionConfig {
652
- level: ReductionLevel::Light,
653
- ..Default::default()
654
- };
655
-
656
- let reducer = TokenReducer::new(&config, None).unwrap();
657
- let input = "Text!!!!!! with????? excessive,,,,,, punctuation";
658
- let result = reducer.reduce(input);
659
-
660
- assert!(!result.contains("!!!!!!"));
661
- assert!(!result.contains("?????"));
662
- assert!(!result.contains(",,,,,,"));
663
- }
664
-
665
- #[test]
666
- fn test_sentence_selection() {
667
- let config = TokenReductionConfig {
668
- level: ReductionLevel::Aggressive,
669
- ..Default::default()
670
- };
671
-
672
- let reducer = TokenReducer::new(&config, None).unwrap();
673
- let input = "First sentence here. Second sentence with more words. Third one. Fourth sentence is even longer than the others.";
674
- let result = reducer.reduce(input);
675
-
676
- assert!(result.len() < input.len());
677
- assert!(result.split(". ").count() < 4);
678
- }
679
-
680
- #[test]
681
- fn test_unicode_normalization_ascii() {
682
- let config = TokenReductionConfig {
683
- level: ReductionLevel::Light,
684
- ..Default::default()
685
- };
686
-
687
- let reducer = TokenReducer::new(&config, None).unwrap();
688
- let input = "Pure ASCII text without special characters";
689
- let result = reducer.reduce(input);
690
-
691
- assert!(result.contains("ASCII"));
692
- }
693
-
694
- #[test]
695
- fn test_unicode_normalization_non_ascii() {
696
- let config = TokenReductionConfig {
697
- level: ReductionLevel::Light,
698
- ..Default::default()
699
- };
700
-
701
- let reducer = TokenReducer::new(&config, None).unwrap();
702
- let input = "Café naïve résumé";
703
- let result = reducer.reduce(input);
704
-
705
- assert!(result.contains("Café") || result.contains("Cafe"));
706
- }
707
-
708
- #[test]
709
- fn test_single_text_vs_batch() {
710
- let config = TokenReductionConfig {
711
- level: ReductionLevel::Moderate,
712
- ..Default::default()
713
- };
714
-
715
- let reducer = TokenReducer::new(&config, None).unwrap();
716
- let text = "The quick brown fox jumps over the lazy dog";
717
-
718
- let single_result = reducer.reduce(text);
719
- let batch_results = reducer.batch_reduce(&[text]);
720
-
721
- assert_eq!(single_result, batch_results[0]);
722
- }
723
-
724
- #[test]
725
- fn test_important_word_preservation() {
726
- let config = TokenReductionConfig {
727
- level: ReductionLevel::Aggressive,
728
- ..Default::default()
729
- };
730
-
731
- let reducer = TokenReducer::new(&config, None).unwrap();
732
- let input = "The IMPORTANT word COVID-19 and 12345 numbers should be preserved";
733
- let result = reducer.reduce(input);
734
-
735
- assert!(result.contains("IMPORTANT") || result.contains("COVID") || result.contains("12345"));
736
- }
737
-
738
- #[test]
739
- fn test_technical_terms_preservation() {
740
- let config = TokenReductionConfig {
741
- level: ReductionLevel::Aggressive,
742
- ..Default::default()
743
- };
744
-
745
- let reducer = TokenReducer::new(&config, None).unwrap();
746
- let input = "The implementation uses PyTorch and TensorFlow frameworks";
747
- let result = reducer.reduce(input);
748
-
749
- assert!(result.contains("PyTorch") || result.contains("TensorFlow"));
750
- }
751
-
752
- #[test]
753
- fn test_calculate_char_entropy() {
754
- let config = TokenReductionConfig::default();
755
- let reducer = TokenReducer::new(&config, None).unwrap();
756
-
757
- let low_entropy = reducer.calculate_char_entropy("aaaaaaa");
758
- assert!(low_entropy < 1.0);
759
-
760
- let high_entropy = reducer.calculate_char_entropy("abcdefg123");
761
- assert!(high_entropy > low_entropy);
762
- }
763
-
764
- #[test]
765
- fn test_universal_tokenize_english() {
766
- let config = TokenReductionConfig::default();
767
- let reducer = TokenReducer::new(&config, None).unwrap();
768
-
769
- let tokens = reducer.universal_tokenize("hello world test");
770
- assert_eq!(tokens, vec!["hello", "world", "test"]);
771
- }
772
-
773
- #[test]
774
- fn test_universal_tokenize_cjk() {
775
- let config = TokenReductionConfig::default();
776
- let reducer = TokenReducer::new(&config, None).unwrap();
777
-
778
- let tokens = reducer.universal_tokenize("中文");
779
- assert!(!tokens.is_empty());
780
- }
781
-
782
- #[test]
783
- fn test_fallback_threshold() {
784
- let config = TokenReductionConfig {
785
- level: ReductionLevel::Aggressive,
786
- ..Default::default()
787
- };
788
-
789
- let reducer = TokenReducer::new(&config, None).unwrap();
790
-
791
- let input = "a the is of to in for on at by";
792
- let result = reducer.reduce(input);
793
-
794
- assert!(!result.is_empty());
795
- }
796
- }
1
+ use crate::error::Result;
2
+ use crate::text::token_reduction::{
3
+ cjk_utils::CjkTokenizer,
4
+ config::{ReductionLevel, TokenReductionConfig},
5
+ filters::FilterPipeline,
6
+ semantic::SemanticAnalyzer,
7
+ simd_text::{SimdTextProcessor, chunk_text_for_parallel},
8
+ };
9
+ use once_cell::sync::Lazy;
10
+ use rayon::prelude::*;
11
+ use regex::Regex;
12
+ use std::sync::Arc;
13
+ use unicode_normalization::UnicodeNormalization;
14
+
15
+ static REPEATED_EXCLAMATION: Lazy<Regex> =
16
+ Lazy::new(|| Regex::new(r"[!]{2,}").expect("Repeated exclamation regex pattern is valid and should compile"));
17
+ static REPEATED_QUESTION: Lazy<Regex> =
18
+ Lazy::new(|| Regex::new(r"[?]{2,}").expect("Repeated question regex pattern is valid and should compile"));
19
+ static REPEATED_COMMA: Lazy<Regex> =
20
+ Lazy::new(|| Regex::new(r"[,]{2,}").expect("Repeated comma regex pattern is valid and should compile"));
21
+
22
+ /// Bonus added for sentences at the beginning or end of the document
23
+ const SENTENCE_EDGE_POSITION_BONUS: f32 = 0.3;
24
+
25
+ /// Bonus added for sentences with ideal word count (neither too short nor too long)
26
+ const IDEAL_WORD_COUNT_BONUS: f32 = 0.2;
27
+
28
+ /// Minimum word count for ideal sentence length
29
+ const MIN_IDEAL_WORD_COUNT: usize = 3;
30
+
31
+ /// Maximum word count for ideal sentence length
32
+ const MAX_IDEAL_WORD_COUNT: usize = 25;
33
+
34
+ /// Weight multiplier for numeric content density in sentences
35
+ const NUMERIC_CONTENT_WEIGHT: f32 = 0.3;
36
+
37
+ /// Weight multiplier for capitalized/acronym word density in sentences
38
+ const CAPS_ACRONYM_WEIGHT: f32 = 0.25;
39
+
40
+ /// Weight multiplier for long word density in sentences
41
+ const LONG_WORD_WEIGHT: f32 = 0.2;
42
+
43
+ /// Minimum character length for a word to be considered "long"
44
+ const LONG_WORD_THRESHOLD: usize = 8;
45
+
46
+ /// Weight multiplier for punctuation density in sentences
47
+ const PUNCTUATION_DENSITY_WEIGHT: f32 = 0.15;
48
+
49
+ /// Weight multiplier for word diversity ratio (unique words / total words)
50
+ const DIVERSITY_RATIO_WEIGHT: f32 = 0.15;
51
+
52
+ /// Weight multiplier for character entropy (measure of text randomness/information)
53
+ const CHAR_ENTROPY_WEIGHT: f32 = 0.1;
54
+
55
+ pub struct TokenReducer {
56
+ config: Arc<TokenReductionConfig>,
57
+ text_processor: SimdTextProcessor,
58
+ filter_pipeline: FilterPipeline,
59
+ semantic_analyzer: Option<SemanticAnalyzer>,
60
+ cjk_tokenizer: CjkTokenizer,
61
+ language: String,
62
+ }
63
+
64
+ impl TokenReducer {
65
+ pub fn new(config: &TokenReductionConfig, language_hint: Option<&str>) -> Result<Self> {
66
+ let config = Arc::new(config.clone());
67
+ let language = language_hint
68
+ .or(config.language_hint.as_deref())
69
+ .unwrap_or("en")
70
+ .to_string();
71
+
72
+ let text_processor = SimdTextProcessor::new();
73
+ let filter_pipeline = FilterPipeline::new(&config, &language)?;
74
+
75
+ let semantic_analyzer = if matches!(config.level, ReductionLevel::Aggressive | ReductionLevel::Maximum) {
76
+ Some(SemanticAnalyzer::new(&language))
77
+ } else {
78
+ None
79
+ };
80
+
81
+ Ok(Self {
82
+ config,
83
+ text_processor,
84
+ filter_pipeline,
85
+ semantic_analyzer,
86
+ cjk_tokenizer: CjkTokenizer::new(),
87
+ language,
88
+ })
89
+ }
90
+
91
+ /// Get the language code being used for stopwords and semantic analysis.
92
+ pub fn language(&self) -> &str {
93
+ &self.language
94
+ }
95
+
96
+ pub fn reduce(&self, text: &str) -> String {
97
+ if text.is_empty() || matches!(self.config.level, ReductionLevel::Off) {
98
+ return text.to_string();
99
+ }
100
+
101
+ let working_text = if text.is_ascii() {
102
+ text
103
+ } else {
104
+ &text.nfc().collect::<String>()
105
+ };
106
+
107
+ match self.config.level {
108
+ ReductionLevel::Off => working_text.to_string(),
109
+ ReductionLevel::Light => self.apply_light_reduction_optimized(working_text),
110
+ ReductionLevel::Moderate => self.apply_moderate_reduction_optimized(working_text),
111
+ ReductionLevel::Aggressive => self.apply_aggressive_reduction_optimized(working_text),
112
+ ReductionLevel::Maximum => self.apply_maximum_reduction_optimized(working_text),
113
+ }
114
+ }
115
+
116
+ pub fn batch_reduce(&self, texts: &[&str]) -> Vec<String> {
117
+ if !self.config.enable_parallel || texts.len() < 2 {
118
+ return texts.iter().map(|text| self.reduce(text)).collect();
119
+ }
120
+
121
+ texts.par_iter().map(|text| self.reduce(text)).collect()
122
+ }
123
+
124
+ fn apply_light_reduction_optimized(&self, text: &str) -> String {
125
+ let mut result = if self.config.use_simd {
126
+ self.text_processor.clean_punctuation(text)
127
+ } else {
128
+ self.clean_punctuation_optimized(text)
129
+ };
130
+
131
+ result = self.filter_pipeline.apply_light_filters(&result);
132
+ result.trim().to_string()
133
+ }
134
+
135
+ fn apply_moderate_reduction_optimized(&self, text: &str) -> String {
136
+ let mut result = self.apply_light_reduction_optimized(text);
137
+
138
+ result = if self.config.enable_parallel && text.len() > 1000 {
139
+ self.apply_parallel_moderate_reduction(&result)
140
+ } else {
141
+ self.filter_pipeline.apply_moderate_filters(&result)
142
+ };
143
+
144
+ result
145
+ }
146
+
147
+ fn apply_aggressive_reduction_optimized(&self, text: &str) -> String {
148
+ let mut result = self.apply_moderate_reduction_optimized(text);
149
+
150
+ result = self.remove_additional_common_words(&result);
151
+ result = self.apply_sentence_selection(&result);
152
+
153
+ if let Some(ref analyzer) = self.semantic_analyzer {
154
+ result = analyzer.apply_semantic_filtering(&result, self.config.semantic_threshold);
155
+ }
156
+
157
+ result
158
+ }
159
+
160
+ fn apply_maximum_reduction_optimized(&self, text: &str) -> String {
161
+ let mut result = self.apply_aggressive_reduction_optimized(text);
162
+
163
+ if let Some(ref analyzer) = self.semantic_analyzer
164
+ && self.config.enable_semantic_clustering
165
+ {
166
+ result = analyzer.apply_hypernym_compression(&result, self.config.target_reduction);
167
+ }
168
+
169
+ result
170
+ }
171
+
172
+ fn apply_parallel_moderate_reduction(&self, text: &str) -> String {
173
+ let num_threads = rayon::current_num_threads();
174
+ let chunks = chunk_text_for_parallel(text, num_threads);
175
+
176
+ let processed_chunks: Vec<String> = chunks
177
+ .par_iter()
178
+ .map(|chunk| self.filter_pipeline.apply_moderate_filters(chunk))
179
+ .collect();
180
+
181
+ processed_chunks.join(" ")
182
+ }
183
+
184
+ fn clean_punctuation_optimized(&self, text: &str) -> String {
185
+ let mut result = text.to_string();
186
+
187
+ result = REPEATED_EXCLAMATION.replace_all(&result, "!").to_string();
188
+ result = REPEATED_QUESTION.replace_all(&result, "?").to_string();
189
+ result = REPEATED_COMMA.replace_all(&result, ",").to_string();
190
+
191
+ result
192
+ }
193
+
194
+ fn remove_additional_common_words(&self, text: &str) -> String {
195
+ let words = self.universal_tokenize(text);
196
+
197
+ if words.len() < 4 {
198
+ return text.to_string();
199
+ }
200
+
201
+ let mut word_freq = std::collections::HashMap::new();
202
+ let mut word_lengths = Vec::new();
203
+
204
+ for word in &words {
205
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
206
+ word.to_lowercase()
207
+ } else {
208
+ word.chars()
209
+ .filter(|c| c.is_alphabetic())
210
+ .collect::<String>()
211
+ .to_lowercase()
212
+ };
213
+
214
+ if !clean_word.is_empty() {
215
+ *word_freq.entry(clean_word.clone()).or_insert(0) += 1;
216
+ word_lengths.push(clean_word.chars().count());
217
+ }
218
+ }
219
+
220
+ let avg_length = if !word_lengths.is_empty() {
221
+ word_lengths.iter().sum::<usize>() as f32 / word_lengths.len() as f32
222
+ } else {
223
+ 5.0
224
+ };
225
+
226
+ let original_count = words.len();
227
+
228
+ let filtered_words: Vec<String> = words
229
+ .iter()
230
+ .filter(|word| {
231
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
232
+ word.to_lowercase()
233
+ } else {
234
+ word.chars()
235
+ .filter(|c| c.is_alphabetic())
236
+ .collect::<String>()
237
+ .to_lowercase()
238
+ };
239
+
240
+ if clean_word.is_empty() {
241
+ return true;
242
+ }
243
+
244
+ let freq = word_freq.get(&clean_word).unwrap_or(&0);
245
+ let word_len = clean_word.chars().count() as f32;
246
+
247
+ self.has_important_characteristics(word)
248
+ || (*freq <= 2 && word_len >= avg_length * 0.8)
249
+ || (word_len >= avg_length * 1.5)
250
+ })
251
+ .cloned()
252
+ .collect();
253
+
254
+ let has_cjk_content = text.chars().any(|c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
255
+ let fallback_threshold = if has_cjk_content {
256
+ original_count / 5
257
+ } else {
258
+ original_count / 3
259
+ };
260
+
261
+ if filtered_words.len() < fallback_threshold {
262
+ let fallback_words: Vec<String> = words
263
+ .iter()
264
+ .filter(|word| {
265
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
266
+ (*word).clone()
267
+ } else {
268
+ word.chars().filter(|c| c.is_alphabetic()).collect::<String>()
269
+ };
270
+
271
+ clean_word.is_empty() || clean_word.chars().count() >= 3 || self.has_important_characteristics(word)
272
+ })
273
+ .cloned()
274
+ .collect();
275
+ self.smart_join(&fallback_words, has_cjk_content)
276
+ } else {
277
+ self.smart_join(&filtered_words, has_cjk_content)
278
+ }
279
+ }
280
+
281
+ fn smart_join(&self, tokens: &[String], has_cjk_content: bool) -> String {
282
+ if has_cjk_content {
283
+ tokens.join("")
284
+ } else {
285
+ tokens.join(" ")
286
+ }
287
+ }
288
+
289
+ fn has_important_characteristics(&self, word: &str) -> bool {
290
+ if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
291
+ return true;
292
+ }
293
+
294
+ if word.chars().any(|c| c.is_numeric()) {
295
+ return true;
296
+ }
297
+
298
+ if word.len() > 10 {
299
+ return true;
300
+ }
301
+
302
+ let uppercase_count = word.chars().filter(|c| c.is_uppercase()).count();
303
+ if uppercase_count > 1 && uppercase_count < word.len() {
304
+ return true;
305
+ }
306
+
307
+ if self.has_cjk_importance(word) {
308
+ return true;
309
+ }
310
+
311
+ false
312
+ }
313
+
314
+ fn has_cjk_importance(&self, word: &str) -> bool {
315
+ let chars: Vec<char> = word.chars().collect();
316
+
317
+ let has_cjk = chars.iter().any(|&c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
318
+ if !has_cjk {
319
+ return false;
320
+ }
321
+
322
+ let important_radicals = [
323
+ '学', '智', '能', '技', '术', '法', '算', '理', '科', '研', '究', '发', '展', '系', '统', '模', '型', '方',
324
+ '式', '过', '程', '结', '构', '功', '效', '应', '分', '析', '计', '算', '数', '据', '信', '息', '处', '理',
325
+ '语', '言', '文', '生', '成', '产', '用', '作', '为', '成', '变', '化', '转', '换', '提', '高', '网', '络',
326
+ '神', '经', '机', '器', '人', '工', '智', '能', '自', '然', '复',
327
+ ];
328
+
329
+ for &char in &chars {
330
+ if important_radicals.contains(&char) {
331
+ return true;
332
+ }
333
+ }
334
+
335
+ if chars.len() == 2 && has_cjk {
336
+ let has_technical = chars.iter().any(|&c| {
337
+ let code = c as u32;
338
+ (0x4E00..=0x4FFF).contains(&code)
339
+ || (0x5000..=0x51FF).contains(&code)
340
+ || (0x6700..=0x68FF).contains(&code)
341
+ || (0x7500..=0x76FF).contains(&code)
342
+ });
343
+
344
+ if has_technical {
345
+ return true;
346
+ }
347
+ }
348
+
349
+ false
350
+ }
351
+
352
+ fn apply_sentence_selection(&self, text: &str) -> String {
353
+ let sentences: Vec<&str> = text
354
+ .split(['.', '!', '?'])
355
+ .map(|s| s.trim())
356
+ .filter(|s| !s.is_empty())
357
+ .collect();
358
+
359
+ if sentences.len() <= 2 {
360
+ return text.to_string();
361
+ }
362
+
363
+ let mut scored_sentences: Vec<(usize, f32, &str)> = sentences
364
+ .iter()
365
+ .enumerate()
366
+ .map(|(i, sentence)| {
367
+ let score = self.score_sentence_importance(sentence, i, sentences.len());
368
+ (i, score, *sentence)
369
+ })
370
+ .collect();
371
+
372
+ scored_sentences.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
373
+
374
+ let keep_count = ((sentences.len() as f32 * 0.4).ceil() as usize).max(1);
375
+ let mut selected_indices: Vec<usize> = scored_sentences[..keep_count].iter().map(|(i, _, _)| *i).collect();
376
+
377
+ selected_indices.sort();
378
+
379
+ let selected_sentences: Vec<&str> = selected_indices
380
+ .iter()
381
+ .filter_map(|&i| sentences.get(i))
382
+ .copied()
383
+ .collect();
384
+
385
+ if selected_sentences.is_empty() {
386
+ text.to_string()
387
+ } else {
388
+ selected_sentences.join(". ")
389
+ }
390
+ }
391
+
392
+ fn score_sentence_importance(&self, sentence: &str, position: usize, total_sentences: usize) -> f32 {
393
+ let mut score = 0.0;
394
+
395
+ if position == 0 || position == total_sentences - 1 {
396
+ score += SENTENCE_EDGE_POSITION_BONUS;
397
+ }
398
+
399
+ let words: Vec<&str> = sentence.split_whitespace().collect();
400
+ if words.is_empty() {
401
+ return score;
402
+ }
403
+
404
+ let word_count = words.len();
405
+ if (MIN_IDEAL_WORD_COUNT..=MAX_IDEAL_WORD_COUNT).contains(&word_count) {
406
+ score += IDEAL_WORD_COUNT_BONUS;
407
+ }
408
+
409
+ let mut numeric_count = 0;
410
+ let mut caps_count = 0;
411
+ let mut long_word_count = 0;
412
+ let mut punct_density = 0;
413
+
414
+ for word in &words {
415
+ if word.chars().any(|c| c.is_numeric()) {
416
+ numeric_count += 1;
417
+ }
418
+
419
+ if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
420
+ caps_count += 1;
421
+ }
422
+
423
+ if word.len() > LONG_WORD_THRESHOLD {
424
+ long_word_count += 1;
425
+ }
426
+
427
+ punct_density += word.chars().filter(|c| c.is_ascii_punctuation()).count();
428
+ }
429
+
430
+ score += (numeric_count as f32 / words.len() as f32) * NUMERIC_CONTENT_WEIGHT;
431
+ score += (caps_count as f32 / words.len() as f32) * CAPS_ACRONYM_WEIGHT;
432
+ score += (long_word_count as f32 / words.len() as f32) * LONG_WORD_WEIGHT;
433
+ score += (punct_density as f32 / sentence.len() as f32) * PUNCTUATION_DENSITY_WEIGHT;
434
+
435
+ let unique_words: std::collections::HashSet<_> = words
436
+ .iter()
437
+ .map(|w| {
438
+ w.chars()
439
+ .filter(|c| c.is_alphabetic())
440
+ .collect::<String>()
441
+ .to_lowercase()
442
+ })
443
+ .collect();
444
+ let diversity_ratio = unique_words.len() as f32 / words.len() as f32;
445
+ score += diversity_ratio * DIVERSITY_RATIO_WEIGHT;
446
+
447
+ let char_entropy = self.calculate_char_entropy(sentence);
448
+ score += char_entropy * CHAR_ENTROPY_WEIGHT;
449
+
450
+ score
451
+ }
452
+
453
+ fn universal_tokenize(&self, text: &str) -> Vec<String> {
454
+ self.cjk_tokenizer.tokenize_mixed_text(text)
455
+ }
456
+
457
+ fn calculate_char_entropy(&self, text: &str) -> f32 {
458
+ let chars: Vec<char> = text.chars().collect();
459
+ if chars.is_empty() {
460
+ return 0.0;
461
+ }
462
+
463
+ let mut char_freq = std::collections::HashMap::new();
464
+ for &ch in &chars {
465
+ let lowercase_ch = ch
466
+ .to_lowercase()
467
+ .next()
468
+ .expect("to_lowercase() must yield at least one character for valid Unicode");
469
+ *char_freq.entry(lowercase_ch).or_insert(0) += 1;
470
+ }
471
+
472
+ let total_chars = chars.len() as f32;
473
+ char_freq
474
+ .values()
475
+ .map(|&freq| {
476
+ let p = freq as f32 / total_chars;
477
+ if p > 0.0 { -p * p.log2() } else { 0.0 }
478
+ })
479
+ .sum::<f32>()
480
+ .min(5.0)
481
+ }
482
+ }
483
+
484
+ #[cfg(test)]
485
+ mod tests {
486
+ use super::*;
487
+
488
+ #[test]
489
+ fn test_light_reduction() {
490
+ let config = TokenReductionConfig {
491
+ level: ReductionLevel::Light,
492
+ use_simd: false,
493
+ ..Default::default()
494
+ };
495
+
496
+ let reducer = TokenReducer::new(&config, None).unwrap();
497
+ let input = "Hello world!!! How are you???";
498
+ let result = reducer.reduce(input);
499
+
500
+ assert!(result.len() < input.len());
501
+ assert!(!result.contains(" "));
502
+ }
503
+
504
+ #[test]
505
+ fn test_moderate_reduction() {
506
+ let config = TokenReductionConfig {
507
+ level: ReductionLevel::Moderate,
508
+ use_simd: false,
509
+ ..Default::default()
510
+ };
511
+
512
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
513
+ let input = "The quick brown fox is jumping over the lazy dog";
514
+ let result = reducer.reduce(input);
515
+
516
+ assert!(result.len() < input.len());
517
+ assert!(result.contains("quick"));
518
+ assert!(result.contains("brown"));
519
+ assert!(result.contains("fox"));
520
+ }
521
+
522
+ #[test]
523
+ fn test_batch_processing() {
524
+ let config = TokenReductionConfig {
525
+ level: ReductionLevel::Light,
526
+ enable_parallel: false,
527
+ ..Default::default()
528
+ };
529
+
530
+ let reducer = TokenReducer::new(&config, None).unwrap();
531
+ let inputs = vec!["Hello world!", "How are you?", "Fine, thanks!"];
532
+ let results = reducer.batch_reduce(&inputs);
533
+
534
+ assert_eq!(results.len(), inputs.len());
535
+ for result in &results {
536
+ assert!(!result.contains(" "));
537
+ }
538
+ }
539
+
540
+ #[test]
541
+ fn test_aggressive_reduction() {
542
+ let config = TokenReductionConfig {
543
+ level: ReductionLevel::Aggressive,
544
+ use_simd: false,
545
+ ..Default::default()
546
+ };
547
+
548
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
549
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
550
+ let result = reducer.reduce(input);
551
+
552
+ assert!(result.len() < input.len());
553
+ assert!(!result.is_empty());
554
+ }
555
+
556
+ #[test]
557
+ fn test_maximum_reduction() {
558
+ let config = TokenReductionConfig {
559
+ level: ReductionLevel::Maximum,
560
+ use_simd: false,
561
+ enable_semantic_clustering: true,
562
+ ..Default::default()
563
+ };
564
+
565
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
566
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
567
+ let result = reducer.reduce(input);
568
+
569
+ assert!(result.len() < input.len());
570
+ assert!(!result.is_empty());
571
+ }
572
+
573
+ #[test]
574
+ fn test_empty_text_handling() {
575
+ let config = TokenReductionConfig {
576
+ level: ReductionLevel::Moderate,
577
+ ..Default::default()
578
+ };
579
+
580
+ let reducer = TokenReducer::new(&config, None).unwrap();
581
+ assert_eq!(reducer.reduce(""), "");
582
+ let result = reducer.reduce(" ");
583
+ assert!(result == " " || result.is_empty());
584
+ }
585
+
586
+ #[test]
587
+ fn test_off_mode_preserves_text() {
588
+ let config = TokenReductionConfig {
589
+ level: ReductionLevel::Off,
590
+ ..Default::default()
591
+ };
592
+
593
+ let reducer = TokenReducer::new(&config, None).unwrap();
594
+ let input = "Text with multiple spaces!!!";
595
+ assert_eq!(reducer.reduce(input), input);
596
+ }
597
+
598
+ #[test]
599
+ fn test_parallel_batch_processing() {
600
+ let config = TokenReductionConfig {
601
+ level: ReductionLevel::Light,
602
+ enable_parallel: true,
603
+ ..Default::default()
604
+ };
605
+
606
+ let reducer = TokenReducer::new(&config, None).unwrap();
607
+ let inputs = vec![
608
+ "First text with spaces",
609
+ "Second text with spaces",
610
+ "Third text with spaces",
611
+ ];
612
+ let results = reducer.batch_reduce(&inputs);
613
+
614
+ assert_eq!(results.len(), inputs.len());
615
+ for result in &results {
616
+ assert!(!result.contains(" "));
617
+ }
618
+ }
619
+
620
+ #[test]
621
+ fn test_cjk_text_handling() {
622
+ let config = TokenReductionConfig {
623
+ level: ReductionLevel::Moderate,
624
+ ..Default::default()
625
+ };
626
+
627
+ let reducer = TokenReducer::new(&config, Some("zh")).unwrap();
628
+ let input = "这是中文文本测试";
629
+ let result = reducer.reduce(input);
630
+
631
+ assert!(!result.is_empty());
632
+ }
633
+
634
+ #[test]
635
+ fn test_mixed_language_text() {
636
+ let config = TokenReductionConfig {
637
+ level: ReductionLevel::Moderate,
638
+ ..Default::default()
639
+ };
640
+
641
+ let reducer = TokenReducer::new(&config, None).unwrap();
642
+ let input = "This is English text 这是中文 and some more English";
643
+ let result = reducer.reduce(input);
644
+
645
+ assert!(!result.is_empty());
646
+ assert!(result.contains("English") || result.contains("中"));
647
+ }
648
+
649
+ #[test]
650
+ fn test_punctuation_normalization() {
651
+ let config = TokenReductionConfig {
652
+ level: ReductionLevel::Light,
653
+ ..Default::default()
654
+ };
655
+
656
+ let reducer = TokenReducer::new(&config, None).unwrap();
657
+ let input = "Text!!!!!! with????? excessive,,,,,, punctuation";
658
+ let result = reducer.reduce(input);
659
+
660
+ assert!(!result.contains("!!!!!!"));
661
+ assert!(!result.contains("?????"));
662
+ assert!(!result.contains(",,,,,,"));
663
+ }
664
+
665
+ #[test]
666
+ fn test_sentence_selection() {
667
+ let config = TokenReductionConfig {
668
+ level: ReductionLevel::Aggressive,
669
+ ..Default::default()
670
+ };
671
+
672
+ let reducer = TokenReducer::new(&config, None).unwrap();
673
+ let input = "First sentence here. Second sentence with more words. Third one. Fourth sentence is even longer than the others.";
674
+ let result = reducer.reduce(input);
675
+
676
+ assert!(result.len() < input.len());
677
+ assert!(result.split(". ").count() < 4);
678
+ }
679
+
680
+ #[test]
681
+ fn test_unicode_normalization_ascii() {
682
+ let config = TokenReductionConfig {
683
+ level: ReductionLevel::Light,
684
+ ..Default::default()
685
+ };
686
+
687
+ let reducer = TokenReducer::new(&config, None).unwrap();
688
+ let input = "Pure ASCII text without special characters";
689
+ let result = reducer.reduce(input);
690
+
691
+ assert!(result.contains("ASCII"));
692
+ }
693
+
694
+ #[test]
695
+ fn test_unicode_normalization_non_ascii() {
696
+ let config = TokenReductionConfig {
697
+ level: ReductionLevel::Light,
698
+ ..Default::default()
699
+ };
700
+
701
+ let reducer = TokenReducer::new(&config, None).unwrap();
702
+ let input = "Café naïve résumé";
703
+ let result = reducer.reduce(input);
704
+
705
+ assert!(result.contains("Café") || result.contains("Cafe"));
706
+ }
707
+
708
+ #[test]
709
+ fn test_single_text_vs_batch() {
710
+ let config = TokenReductionConfig {
711
+ level: ReductionLevel::Moderate,
712
+ ..Default::default()
713
+ };
714
+
715
+ let reducer = TokenReducer::new(&config, None).unwrap();
716
+ let text = "The quick brown fox jumps over the lazy dog";
717
+
718
+ let single_result = reducer.reduce(text);
719
+ let batch_results = reducer.batch_reduce(&[text]);
720
+
721
+ assert_eq!(single_result, batch_results[0]);
722
+ }
723
+
724
+ #[test]
725
+ fn test_important_word_preservation() {
726
+ let config = TokenReductionConfig {
727
+ level: ReductionLevel::Aggressive,
728
+ ..Default::default()
729
+ };
730
+
731
+ let reducer = TokenReducer::new(&config, None).unwrap();
732
+ let input = "The IMPORTANT word COVID-19 and 12345 numbers should be preserved";
733
+ let result = reducer.reduce(input);
734
+
735
+ assert!(result.contains("IMPORTANT") || result.contains("COVID") || result.contains("12345"));
736
+ }
737
+
738
+ #[test]
739
+ fn test_technical_terms_preservation() {
740
+ let config = TokenReductionConfig {
741
+ level: ReductionLevel::Aggressive,
742
+ ..Default::default()
743
+ };
744
+
745
+ let reducer = TokenReducer::new(&config, None).unwrap();
746
+ let input = "The implementation uses PyTorch and TensorFlow frameworks";
747
+ let result = reducer.reduce(input);
748
+
749
+ assert!(result.contains("PyTorch") || result.contains("TensorFlow"));
750
+ }
751
+
752
+ #[test]
753
+ fn test_calculate_char_entropy() {
754
+ let config = TokenReductionConfig::default();
755
+ let reducer = TokenReducer::new(&config, None).unwrap();
756
+
757
+ let low_entropy = reducer.calculate_char_entropy("aaaaaaa");
758
+ assert!(low_entropy < 1.0);
759
+
760
+ let high_entropy = reducer.calculate_char_entropy("abcdefg123");
761
+ assert!(high_entropy > low_entropy);
762
+ }
763
+
764
+ #[test]
765
+ fn test_universal_tokenize_english() {
766
+ let config = TokenReductionConfig::default();
767
+ let reducer = TokenReducer::new(&config, None).unwrap();
768
+
769
+ let tokens = reducer.universal_tokenize("hello world test");
770
+ assert_eq!(tokens, vec!["hello", "world", "test"]);
771
+ }
772
+
773
+ #[test]
774
+ fn test_universal_tokenize_cjk() {
775
+ let config = TokenReductionConfig::default();
776
+ let reducer = TokenReducer::new(&config, None).unwrap();
777
+
778
+ let tokens = reducer.universal_tokenize("中文");
779
+ assert!(!tokens.is_empty());
780
+ }
781
+
782
+ #[test]
783
+ fn test_fallback_threshold() {
784
+ let config = TokenReductionConfig {
785
+ level: ReductionLevel::Aggressive,
786
+ ..Default::default()
787
+ };
788
+
789
+ let reducer = TokenReducer::new(&config, None).unwrap();
790
+
791
+ let input = "a the is of to in for on at by";
792
+ let result = reducer.reduce(input);
793
+
794
+ assert!(!result.is_empty());
795
+ }
796
+ }