kreuzberg 4.0.0.rc2 → 4.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (446) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +14 -14
  3. data/.rspec +3 -3
  4. data/.rubocop.yaml +1 -1
  5. data/.rubocop.yml +543 -538
  6. data/Gemfile +8 -8
  7. data/Gemfile.lock +194 -6
  8. data/README.md +391 -426
  9. data/Rakefile +34 -25
  10. data/Steepfile +51 -47
  11. data/examples/async_patterns.rb +283 -341
  12. data/ext/kreuzberg_rb/extconf.rb +65 -45
  13. data/ext/kreuzberg_rb/native/.cargo/config.toml +23 -0
  14. data/ext/kreuzberg_rb/native/Cargo.lock +7619 -6535
  15. data/ext/kreuzberg_rb/native/Cargo.toml +75 -44
  16. data/ext/kreuzberg_rb/native/README.md +425 -425
  17. data/ext/kreuzberg_rb/native/build.rs +15 -15
  18. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
  19. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
  20. data/ext/kreuzberg_rb/native/include/strings.h +20 -20
  21. data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
  22. data/ext/kreuzberg_rb/native/src/lib.rs +3802 -2998
  23. data/extconf.rb +60 -28
  24. data/kreuzberg.gemspec +199 -148
  25. data/lib/kreuzberg/api_proxy.rb +126 -142
  26. data/lib/kreuzberg/cache_api.rb +67 -46
  27. data/lib/kreuzberg/cli.rb +47 -55
  28. data/lib/kreuzberg/cli_proxy.rb +117 -127
  29. data/lib/kreuzberg/config.rb +936 -691
  30. data/lib/kreuzberg/error_context.rb +136 -32
  31. data/lib/kreuzberg/errors.rb +116 -118
  32. data/lib/kreuzberg/extraction_api.rb +313 -85
  33. data/lib/kreuzberg/mcp_proxy.rb +177 -186
  34. data/lib/kreuzberg/ocr_backend_protocol.rb +40 -113
  35. data/lib/kreuzberg/post_processor_protocol.rb +15 -86
  36. data/lib/kreuzberg/result.rb +334 -216
  37. data/lib/kreuzberg/setup_lib_path.rb +99 -80
  38. data/lib/kreuzberg/types.rb +170 -0
  39. data/lib/kreuzberg/validator_protocol.rb +16 -89
  40. data/lib/kreuzberg/version.rb +5 -5
  41. data/lib/kreuzberg.rb +96 -103
  42. data/lib/libpdfium.so +0 -0
  43. data/sig/kreuzberg/internal.rbs +184 -184
  44. data/sig/kreuzberg.rbs +561 -520
  45. data/spec/binding/async_operations_spec.rb +473 -0
  46. data/spec/binding/batch_operations_spec.rb +595 -0
  47. data/spec/binding/batch_spec.rb +359 -0
  48. data/spec/binding/cache_spec.rb +227 -227
  49. data/spec/binding/cli_proxy_spec.rb +85 -85
  50. data/spec/binding/cli_spec.rb +55 -55
  51. data/spec/binding/config_result_spec.rb +377 -0
  52. data/spec/binding/config_spec.rb +419 -345
  53. data/spec/binding/config_validation_spec.rb +377 -283
  54. data/spec/binding/embeddings_spec.rb +816 -0
  55. data/spec/binding/error_handling_spec.rb +399 -213
  56. data/spec/binding/error_recovery_spec.rb +488 -0
  57. data/spec/binding/errors_spec.rb +66 -66
  58. data/spec/binding/font_config_spec.rb +220 -0
  59. data/spec/binding/images_spec.rb +738 -0
  60. data/spec/binding/keywords_extraction_spec.rb +600 -0
  61. data/spec/binding/metadata_types_spec.rb +1228 -0
  62. data/spec/binding/pages_extraction_spec.rb +471 -0
  63. data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
  64. data/spec/binding/plugins/postprocessor_spec.rb +269 -269
  65. data/spec/binding/plugins/validator_spec.rb +273 -274
  66. data/spec/binding/tables_spec.rb +641 -0
  67. data/spec/fixtures/config.toml +38 -39
  68. data/spec/fixtures/config.yaml +41 -41
  69. data/spec/fixtures/invalid_config.toml +3 -4
  70. data/spec/smoke/package_spec.rb +177 -178
  71. data/spec/spec_helper.rb +40 -42
  72. data/spec/unit/config/chunking_config_spec.rb +213 -0
  73. data/spec/unit/config/embedding_config_spec.rb +343 -0
  74. data/spec/unit/config/extraction_config_spec.rb +438 -0
  75. data/spec/unit/config/font_config_spec.rb +285 -0
  76. data/spec/unit/config/hierarchy_config_spec.rb +314 -0
  77. data/spec/unit/config/image_extraction_config_spec.rb +209 -0
  78. data/spec/unit/config/image_preprocessing_config_spec.rb +249 -0
  79. data/spec/unit/config/keyword_config_spec.rb +229 -0
  80. data/spec/unit/config/language_detection_config_spec.rb +258 -0
  81. data/spec/unit/config/ocr_config_spec.rb +171 -0
  82. data/spec/unit/config/page_config_spec.rb +221 -0
  83. data/spec/unit/config/pdf_config_spec.rb +267 -0
  84. data/spec/unit/config/postprocessor_config_spec.rb +290 -0
  85. data/spec/unit/config/tesseract_config_spec.rb +181 -0
  86. data/spec/unit/config/token_reduction_config_spec.rb +251 -0
  87. data/test/metadata_types_test.rb +959 -0
  88. data/vendor/Cargo.toml +61 -0
  89. data/vendor/kreuzberg/Cargo.toml +259 -204
  90. data/vendor/kreuzberg/README.md +263 -175
  91. data/vendor/kreuzberg/build.rs +782 -474
  92. data/vendor/kreuzberg/examples/bench_fixes.rs +71 -0
  93. data/vendor/kreuzberg/examples/test_pdfium_fork.rs +62 -0
  94. data/vendor/kreuzberg/src/api/error.rs +81 -81
  95. data/vendor/kreuzberg/src/api/handlers.rs +320 -199
  96. data/vendor/kreuzberg/src/api/mod.rs +94 -79
  97. data/vendor/kreuzberg/src/api/server.rs +518 -353
  98. data/vendor/kreuzberg/src/api/types.rs +206 -170
  99. data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
  100. data/vendor/kreuzberg/src/chunking/mod.rs +2303 -677
  101. data/vendor/kreuzberg/src/chunking/processor.rs +219 -0
  102. data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
  103. data/vendor/kreuzberg/src/core/batch_optimizations.rs +385 -0
  104. data/vendor/kreuzberg/src/core/config.rs +1914 -1032
  105. data/vendor/kreuzberg/src/core/config_validation.rs +949 -0
  106. data/vendor/kreuzberg/src/core/extractor.rs +1200 -1024
  107. data/vendor/kreuzberg/src/core/formats.rs +235 -0
  108. data/vendor/kreuzberg/src/core/io.rs +329 -329
  109. data/vendor/kreuzberg/src/core/mime.rs +605 -605
  110. data/vendor/kreuzberg/src/core/mod.rs +61 -45
  111. data/vendor/kreuzberg/src/core/pipeline.rs +1223 -984
  112. data/vendor/kreuzberg/src/core/server_config.rs +1220 -0
  113. data/vendor/kreuzberg/src/embeddings.rs +471 -432
  114. data/vendor/kreuzberg/src/error.rs +431 -431
  115. data/vendor/kreuzberg/src/extraction/archive.rs +959 -954
  116. data/vendor/kreuzberg/src/extraction/capacity.rs +263 -0
  117. data/vendor/kreuzberg/src/extraction/docx.rs +404 -40
  118. data/vendor/kreuzberg/src/extraction/email.rs +855 -854
  119. data/vendor/kreuzberg/src/extraction/excel.rs +697 -688
  120. data/vendor/kreuzberg/src/extraction/html.rs +1830 -553
  121. data/vendor/kreuzberg/src/extraction/image.rs +492 -368
  122. data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -563
  123. data/vendor/kreuzberg/src/extraction/markdown.rs +216 -213
  124. data/vendor/kreuzberg/src/extraction/mod.rs +93 -81
  125. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
  126. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
  127. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
  128. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
  129. data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -287
  130. data/vendor/kreuzberg/src/extraction/pptx.rs +3102 -3000
  131. data/vendor/kreuzberg/src/extraction/structured.rs +491 -490
  132. data/vendor/kreuzberg/src/extraction/table.rs +329 -328
  133. data/vendor/kreuzberg/src/extraction/text.rs +277 -269
  134. data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
  135. data/vendor/kreuzberg/src/extractors/archive.rs +447 -446
  136. data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -469
  137. data/vendor/kreuzberg/src/extractors/docbook.rs +504 -502
  138. data/vendor/kreuzberg/src/extractors/docx.rs +400 -367
  139. data/vendor/kreuzberg/src/extractors/email.rs +157 -143
  140. data/vendor/kreuzberg/src/extractors/epub.rs +696 -707
  141. data/vendor/kreuzberg/src/extractors/excel.rs +385 -343
  142. data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -491
  143. data/vendor/kreuzberg/src/extractors/html.rs +419 -393
  144. data/vendor/kreuzberg/src/extractors/image.rs +219 -198
  145. data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1051
  146. data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -367
  147. data/vendor/kreuzberg/src/extractors/latex.rs +653 -652
  148. data/vendor/kreuzberg/src/extractors/markdown.rs +701 -700
  149. data/vendor/kreuzberg/src/extractors/mod.rs +429 -365
  150. data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
  151. data/vendor/kreuzberg/src/extractors/opml.rs +635 -634
  152. data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -528
  153. data/vendor/kreuzberg/src/extractors/pdf.rs +761 -493
  154. data/vendor/kreuzberg/src/extractors/pptx.rs +279 -248
  155. data/vendor/kreuzberg/src/extractors/rst.rs +577 -576
  156. data/vendor/kreuzberg/src/extractors/rtf.rs +809 -810
  157. data/vendor/kreuzberg/src/extractors/security.rs +484 -484
  158. data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
  159. data/vendor/kreuzberg/src/extractors/structured.rs +142 -140
  160. data/vendor/kreuzberg/src/extractors/text.rs +265 -260
  161. data/vendor/kreuzberg/src/extractors/typst.rs +651 -650
  162. data/vendor/kreuzberg/src/extractors/xml.rs +147 -135
  163. data/vendor/kreuzberg/src/image/dpi.rs +164 -164
  164. data/vendor/kreuzberg/src/image/mod.rs +6 -6
  165. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
  166. data/vendor/kreuzberg/src/image/resize.rs +89 -89
  167. data/vendor/kreuzberg/src/keywords/config.rs +154 -154
  168. data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
  169. data/vendor/kreuzberg/src/keywords/processor.rs +275 -267
  170. data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
  171. data/vendor/kreuzberg/src/keywords/types.rs +68 -68
  172. data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
  173. data/vendor/kreuzberg/src/language_detection/mod.rs +985 -942
  174. data/vendor/kreuzberg/src/language_detection/processor.rs +218 -0
  175. data/vendor/kreuzberg/src/lib.rs +114 -105
  176. data/vendor/kreuzberg/src/mcp/mod.rs +35 -32
  177. data/vendor/kreuzberg/src/mcp/server.rs +2090 -1968
  178. data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
  179. data/vendor/kreuzberg/src/ocr/error.rs +37 -37
  180. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
  181. data/vendor/kreuzberg/src/ocr/language_registry.rs +520 -0
  182. data/vendor/kreuzberg/src/ocr/mod.rs +60 -58
  183. data/vendor/kreuzberg/src/ocr/processor.rs +858 -863
  184. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
  185. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
  186. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +456 -450
  187. data/vendor/kreuzberg/src/ocr/types.rs +393 -393
  188. data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
  189. data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
  190. data/vendor/kreuzberg/src/panic_context.rs +154 -154
  191. data/vendor/kreuzberg/src/pdf/bindings.rs +306 -0
  192. data/vendor/kreuzberg/src/pdf/bundled.rs +408 -0
  193. data/vendor/kreuzberg/src/pdf/error.rs +214 -122
  194. data/vendor/kreuzberg/src/pdf/fonts.rs +358 -0
  195. data/vendor/kreuzberg/src/pdf/hierarchy.rs +903 -0
  196. data/vendor/kreuzberg/src/pdf/images.rs +139 -139
  197. data/vendor/kreuzberg/src/pdf/metadata.rs +509 -346
  198. data/vendor/kreuzberg/src/pdf/mod.rs +81 -50
  199. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -369
  200. data/vendor/kreuzberg/src/pdf/table.rs +417 -393
  201. data/vendor/kreuzberg/src/pdf/text.rs +553 -158
  202. data/vendor/kreuzberg/src/plugins/extractor.rs +1042 -1013
  203. data/vendor/kreuzberg/src/plugins/mod.rs +212 -209
  204. data/vendor/kreuzberg/src/plugins/ocr.rs +637 -620
  205. data/vendor/kreuzberg/src/plugins/processor.rs +650 -642
  206. data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1337
  207. data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
  208. data/vendor/kreuzberg/src/plugins/validator.rs +967 -956
  209. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
  210. data/vendor/kreuzberg/src/text/mod.rs +27 -19
  211. data/vendor/kreuzberg/src/text/quality.rs +710 -697
  212. data/vendor/kreuzberg/src/text/quality_processor.rs +231 -0
  213. data/vendor/kreuzberg/src/text/string_utils.rs +229 -217
  214. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
  215. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
  216. data/vendor/kreuzberg/src/text/token_reduction/core.rs +832 -796
  217. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +923 -902
  218. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
  219. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
  220. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +148 -147
  221. data/vendor/kreuzberg/src/text/utf8_validation.rs +193 -0
  222. data/vendor/kreuzberg/src/types.rs +1713 -903
  223. data/vendor/kreuzberg/src/utils/mod.rs +31 -17
  224. data/vendor/kreuzberg/src/utils/pool.rs +503 -0
  225. data/vendor/kreuzberg/src/utils/pool_sizing.rs +364 -0
  226. data/vendor/kreuzberg/src/utils/quality.rs +968 -959
  227. data/vendor/kreuzberg/src/utils/string_pool.rs +761 -0
  228. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
  229. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
  230. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
  231. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
  232. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
  233. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
  234. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
  235. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
  236. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
  237. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
  238. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
  239. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
  240. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
  241. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
  242. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
  243. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
  244. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
  245. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
  246. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
  247. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
  248. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
  249. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
  250. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
  251. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
  252. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
  253. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
  254. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
  255. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
  256. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
  257. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
  258. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
  259. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
  260. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
  261. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
  262. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
  263. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
  264. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
  265. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
  266. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
  267. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
  268. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
  269. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
  270. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
  271. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
  272. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
  273. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
  274. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
  275. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
  276. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
  277. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
  278. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
  279. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
  280. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
  281. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
  282. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
  283. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
  284. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
  285. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
  286. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
  287. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
  288. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
  289. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
  290. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
  291. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
  292. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
  293. data/vendor/kreuzberg/tests/api_embed.rs +360 -0
  294. data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
  295. data/vendor/kreuzberg/tests/api_large_pdf_extraction.rs +471 -0
  296. data/vendor/kreuzberg/tests/api_large_pdf_extraction_diagnostics.rs +289 -0
  297. data/vendor/kreuzberg/tests/api_tests.rs +1472 -966
  298. data/vendor/kreuzberg/tests/archive_integration.rs +545 -543
  299. data/vendor/kreuzberg/tests/batch_orchestration.rs +587 -556
  300. data/vendor/kreuzberg/tests/batch_pooling_benchmark.rs +154 -0
  301. data/vendor/kreuzberg/tests/batch_processing.rs +328 -316
  302. data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
  303. data/vendor/kreuzberg/tests/concurrency_stress.rs +541 -525
  304. data/vendor/kreuzberg/tests/config_features.rs +612 -598
  305. data/vendor/kreuzberg/tests/config_integration_test.rs +753 -0
  306. data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -415
  307. data/vendor/kreuzberg/tests/core_integration.rs +519 -510
  308. data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
  309. data/vendor/kreuzberg/tests/data/hierarchy_ground_truth.json +294 -0
  310. data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -498
  311. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
  312. data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
  313. data/vendor/kreuzberg/tests/email_integration.rs +327 -325
  314. data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
  315. data/vendor/kreuzberg/tests/error_handling.rs +402 -393
  316. data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
  317. data/vendor/kreuzberg/tests/format_integration.rs +165 -159
  318. data/vendor/kreuzberg/tests/helpers/mod.rs +202 -142
  319. data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
  320. data/vendor/kreuzberg/tests/image_integration.rs +255 -253
  321. data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
  322. data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
  323. data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
  324. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
  325. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
  326. data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
  327. data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
  328. data/vendor/kreuzberg/tests/mime_detection.rs +429 -428
  329. data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -510
  330. data/vendor/kreuzberg/tests/ocr_errors.rs +698 -676
  331. data/vendor/kreuzberg/tests/ocr_language_registry.rs +191 -0
  332. data/vendor/kreuzberg/tests/ocr_quality.rs +629 -627
  333. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
  334. data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -695
  335. data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
  336. data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
  337. data/vendor/kreuzberg/tests/page_markers.rs +297 -0
  338. data/vendor/kreuzberg/tests/pdf_hierarchy_detection.rs +301 -0
  339. data/vendor/kreuzberg/tests/pdf_hierarchy_quality.rs +589 -0
  340. data/vendor/kreuzberg/tests/pdf_integration.rs +45 -43
  341. data/vendor/kreuzberg/tests/pdf_ocr_triggering.rs +301 -0
  342. data/vendor/kreuzberg/tests/pdf_text_merging.rs +475 -0
  343. data/vendor/kreuzberg/tests/pdfium_linking.rs +340 -0
  344. data/vendor/kreuzberg/tests/pipeline_integration.rs +1446 -1411
  345. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -771
  346. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +577 -560
  347. data/vendor/kreuzberg/tests/plugin_system.rs +927 -921
  348. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
  349. data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -586
  350. data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -692
  351. data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -776
  352. data/vendor/kreuzberg/tests/security_validation.rs +416 -415
  353. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
  354. data/vendor/kreuzberg/tests/test_fastembed.rs +631 -609
  355. data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1259
  356. data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -647
  357. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
  358. data/vendor/kreuzberg-ffi/Cargo.toml +67 -0
  359. data/vendor/kreuzberg-ffi/README.md +851 -0
  360. data/vendor/kreuzberg-ffi/benches/result_view_benchmark.rs +227 -0
  361. data/vendor/kreuzberg-ffi/build.rs +168 -0
  362. data/vendor/kreuzberg-ffi/cbindgen.toml +37 -0
  363. data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -0
  364. data/vendor/kreuzberg-ffi/kreuzberg.h +3012 -0
  365. data/vendor/kreuzberg-ffi/src/batch_streaming.rs +588 -0
  366. data/vendor/kreuzberg-ffi/src/config.rs +1341 -0
  367. data/vendor/kreuzberg-ffi/src/error.rs +901 -0
  368. data/vendor/kreuzberg-ffi/src/extraction.rs +555 -0
  369. data/vendor/kreuzberg-ffi/src/helpers.rs +879 -0
  370. data/vendor/kreuzberg-ffi/src/lib.rs +977 -0
  371. data/vendor/kreuzberg-ffi/src/memory.rs +493 -0
  372. data/vendor/kreuzberg-ffi/src/mime.rs +329 -0
  373. data/vendor/kreuzberg-ffi/src/panic_shield.rs +265 -0
  374. data/vendor/kreuzberg-ffi/src/plugins/document_extractor.rs +442 -0
  375. data/vendor/kreuzberg-ffi/src/plugins/mod.rs +14 -0
  376. data/vendor/kreuzberg-ffi/src/plugins/ocr_backend.rs +628 -0
  377. data/vendor/kreuzberg-ffi/src/plugins/post_processor.rs +438 -0
  378. data/vendor/kreuzberg-ffi/src/plugins/validator.rs +329 -0
  379. data/vendor/kreuzberg-ffi/src/result.rs +510 -0
  380. data/vendor/kreuzberg-ffi/src/result_pool.rs +639 -0
  381. data/vendor/kreuzberg-ffi/src/result_view.rs +773 -0
  382. data/vendor/kreuzberg-ffi/src/string_intern.rs +568 -0
  383. data/vendor/kreuzberg-ffi/src/types.rs +363 -0
  384. data/vendor/kreuzberg-ffi/src/util.rs +210 -0
  385. data/vendor/kreuzberg-ffi/src/validation.rs +848 -0
  386. data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -0
  387. data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -0
  388. data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -0
  389. data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -0
  390. data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -0
  391. data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -0
  392. data/vendor/kreuzberg-tesseract/.crate-ignore +2 -0
  393. data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -0
  394. data/vendor/kreuzberg-tesseract/Cargo.toml +57 -0
  395. data/vendor/{rb-sys/LICENSE-MIT → kreuzberg-tesseract/LICENSE} +22 -21
  396. data/vendor/kreuzberg-tesseract/README.md +399 -0
  397. data/vendor/kreuzberg-tesseract/build.rs +1127 -0
  398. data/vendor/kreuzberg-tesseract/patches/README.md +71 -0
  399. data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -0
  400. data/vendor/kreuzberg-tesseract/src/api.rs +1371 -0
  401. data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -0
  402. data/vendor/kreuzberg-tesseract/src/enums.rs +297 -0
  403. data/vendor/kreuzberg-tesseract/src/error.rs +81 -0
  404. data/vendor/kreuzberg-tesseract/src/lib.rs +145 -0
  405. data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -0
  406. data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -0
  407. data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -0
  408. data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -0
  409. data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -0
  410. data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -0
  411. metadata +196 -45
  412. data/vendor/kreuzberg/benches/otel_overhead.rs +0 -48
  413. data/vendor/kreuzberg/src/extractors/fictionbook.rs.backup2 +0 -738
  414. data/vendor/rb-sys/.cargo-ok +0 -1
  415. data/vendor/rb-sys/.cargo_vcs_info.json +0 -6
  416. data/vendor/rb-sys/Cargo.lock +0 -393
  417. data/vendor/rb-sys/Cargo.toml +0 -70
  418. data/vendor/rb-sys/Cargo.toml.orig +0 -57
  419. data/vendor/rb-sys/LICENSE-APACHE +0 -190
  420. data/vendor/rb-sys/bin/release.sh +0 -21
  421. data/vendor/rb-sys/build/features.rs +0 -108
  422. data/vendor/rb-sys/build/main.rs +0 -246
  423. data/vendor/rb-sys/build/stable_api_config.rs +0 -153
  424. data/vendor/rb-sys/build/version.rs +0 -48
  425. data/vendor/rb-sys/readme.md +0 -36
  426. data/vendor/rb-sys/src/bindings.rs +0 -21
  427. data/vendor/rb-sys/src/hidden.rs +0 -11
  428. data/vendor/rb-sys/src/lib.rs +0 -34
  429. data/vendor/rb-sys/src/macros.rs +0 -371
  430. data/vendor/rb-sys/src/memory.rs +0 -53
  431. data/vendor/rb-sys/src/ruby_abi_version.rs +0 -38
  432. data/vendor/rb-sys/src/special_consts.rs +0 -31
  433. data/vendor/rb-sys/src/stable_api/compiled.c +0 -179
  434. data/vendor/rb-sys/src/stable_api/compiled.rs +0 -257
  435. data/vendor/rb-sys/src/stable_api/ruby_2_6.rs +0 -316
  436. data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +0 -316
  437. data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +0 -324
  438. data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +0 -317
  439. data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +0 -315
  440. data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +0 -326
  441. data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +0 -327
  442. data/vendor/rb-sys/src/stable_api.rs +0 -261
  443. data/vendor/rb-sys/src/symbol.rs +0 -31
  444. data/vendor/rb-sys/src/tracking_allocator.rs +0 -332
  445. data/vendor/rb-sys/src/utils.rs +0 -89
  446. data/vendor/rb-sys/src/value_type.rs +0 -7
@@ -1,796 +1,832 @@
1
- use crate::error::Result;
2
- use crate::text::token_reduction::{
3
- cjk_utils::CjkTokenizer,
4
- config::{ReductionLevel, TokenReductionConfig},
5
- filters::FilterPipeline,
6
- semantic::SemanticAnalyzer,
7
- simd_text::{SimdTextProcessor, chunk_text_for_parallel},
8
- };
9
- use once_cell::sync::Lazy;
10
- use rayon::prelude::*;
11
- use regex::Regex;
12
- use std::sync::Arc;
13
- use unicode_normalization::UnicodeNormalization;
14
-
15
- static REPEATED_EXCLAMATION: Lazy<Regex> =
16
- Lazy::new(|| Regex::new(r"[!]{2,}").expect("Repeated exclamation regex pattern is valid and should compile"));
17
- static REPEATED_QUESTION: Lazy<Regex> =
18
- Lazy::new(|| Regex::new(r"[?]{2,}").expect("Repeated question regex pattern is valid and should compile"));
19
- static REPEATED_COMMA: Lazy<Regex> =
20
- Lazy::new(|| Regex::new(r"[,]{2,}").expect("Repeated comma regex pattern is valid and should compile"));
21
-
22
- /// Bonus added for sentences at the beginning or end of the document
23
- const SENTENCE_EDGE_POSITION_BONUS: f32 = 0.3;
24
-
25
- /// Bonus added for sentences with ideal word count (neither too short nor too long)
26
- const IDEAL_WORD_COUNT_BONUS: f32 = 0.2;
27
-
28
- /// Minimum word count for ideal sentence length
29
- const MIN_IDEAL_WORD_COUNT: usize = 3;
30
-
31
- /// Maximum word count for ideal sentence length
32
- const MAX_IDEAL_WORD_COUNT: usize = 25;
33
-
34
- /// Weight multiplier for numeric content density in sentences
35
- const NUMERIC_CONTENT_WEIGHT: f32 = 0.3;
36
-
37
- /// Weight multiplier for capitalized/acronym word density in sentences
38
- const CAPS_ACRONYM_WEIGHT: f32 = 0.25;
39
-
40
- /// Weight multiplier for long word density in sentences
41
- const LONG_WORD_WEIGHT: f32 = 0.2;
42
-
43
- /// Minimum character length for a word to be considered "long"
44
- const LONG_WORD_THRESHOLD: usize = 8;
45
-
46
- /// Weight multiplier for punctuation density in sentences
47
- const PUNCTUATION_DENSITY_WEIGHT: f32 = 0.15;
48
-
49
- /// Weight multiplier for word diversity ratio (unique words / total words)
50
- const DIVERSITY_RATIO_WEIGHT: f32 = 0.15;
51
-
52
- /// Weight multiplier for character entropy (measure of text randomness/information)
53
- const CHAR_ENTROPY_WEIGHT: f32 = 0.1;
54
-
55
- pub struct TokenReducer {
56
- config: Arc<TokenReductionConfig>,
57
- text_processor: SimdTextProcessor,
58
- filter_pipeline: FilterPipeline,
59
- semantic_analyzer: Option<SemanticAnalyzer>,
60
- cjk_tokenizer: CjkTokenizer,
61
- language: String,
62
- }
63
-
64
- impl TokenReducer {
65
- pub fn new(config: &TokenReductionConfig, language_hint: Option<&str>) -> Result<Self> {
66
- let config = Arc::new(config.clone());
67
- let language = language_hint
68
- .or(config.language_hint.as_deref())
69
- .unwrap_or("en")
70
- .to_string();
71
-
72
- let text_processor = SimdTextProcessor::new();
73
- let filter_pipeline = FilterPipeline::new(&config, &language)?;
74
-
75
- let semantic_analyzer = if matches!(config.level, ReductionLevel::Aggressive | ReductionLevel::Maximum) {
76
- Some(SemanticAnalyzer::new(&language))
77
- } else {
78
- None
79
- };
80
-
81
- Ok(Self {
82
- config,
83
- text_processor,
84
- filter_pipeline,
85
- semantic_analyzer,
86
- cjk_tokenizer: CjkTokenizer::new(),
87
- language,
88
- })
89
- }
90
-
91
- /// Get the language code being used for stopwords and semantic analysis.
92
- pub fn language(&self) -> &str {
93
- &self.language
94
- }
95
-
96
- pub fn reduce(&self, text: &str) -> String {
97
- if text.is_empty() || matches!(self.config.level, ReductionLevel::Off) {
98
- return text.to_string();
99
- }
100
-
101
- let working_text = if text.is_ascii() {
102
- text
103
- } else {
104
- &text.nfc().collect::<String>()
105
- };
106
-
107
- match self.config.level {
108
- ReductionLevel::Off => working_text.to_string(),
109
- ReductionLevel::Light => self.apply_light_reduction_optimized(working_text),
110
- ReductionLevel::Moderate => self.apply_moderate_reduction_optimized(working_text),
111
- ReductionLevel::Aggressive => self.apply_aggressive_reduction_optimized(working_text),
112
- ReductionLevel::Maximum => self.apply_maximum_reduction_optimized(working_text),
113
- }
114
- }
115
-
116
- pub fn batch_reduce(&self, texts: &[&str]) -> Vec<String> {
117
- if !self.config.enable_parallel || texts.len() < 2 {
118
- return texts.iter().map(|text| self.reduce(text)).collect();
119
- }
120
-
121
- texts.par_iter().map(|text| self.reduce(text)).collect()
122
- }
123
-
124
- fn apply_light_reduction_optimized(&self, text: &str) -> String {
125
- let mut result = if self.config.use_simd {
126
- self.text_processor.clean_punctuation(text)
127
- } else {
128
- self.clean_punctuation_optimized(text)
129
- };
130
-
131
- result = self.filter_pipeline.apply_light_filters(&result);
132
- result.trim().to_string()
133
- }
134
-
135
- fn apply_moderate_reduction_optimized(&self, text: &str) -> String {
136
- let mut result = self.apply_light_reduction_optimized(text);
137
-
138
- result = if self.config.enable_parallel && text.len() > 1000 {
139
- self.apply_parallel_moderate_reduction(&result)
140
- } else {
141
- self.filter_pipeline.apply_moderate_filters(&result)
142
- };
143
-
144
- result
145
- }
146
-
147
- fn apply_aggressive_reduction_optimized(&self, text: &str) -> String {
148
- let mut result = self.apply_moderate_reduction_optimized(text);
149
-
150
- result = self.remove_additional_common_words(&result);
151
- result = self.apply_sentence_selection(&result);
152
-
153
- if let Some(ref analyzer) = self.semantic_analyzer {
154
- result = analyzer.apply_semantic_filtering(&result, self.config.semantic_threshold);
155
- }
156
-
157
- result
158
- }
159
-
160
- fn apply_maximum_reduction_optimized(&self, text: &str) -> String {
161
- let mut result = self.apply_aggressive_reduction_optimized(text);
162
-
163
- if let Some(ref analyzer) = self.semantic_analyzer
164
- && self.config.enable_semantic_clustering
165
- {
166
- result = analyzer.apply_hypernym_compression(&result, self.config.target_reduction);
167
- }
168
-
169
- result
170
- }
171
-
172
- fn apply_parallel_moderate_reduction(&self, text: &str) -> String {
173
- let num_threads = rayon::current_num_threads();
174
- let chunks = chunk_text_for_parallel(text, num_threads);
175
-
176
- let processed_chunks: Vec<String> = chunks
177
- .par_iter()
178
- .map(|chunk| self.filter_pipeline.apply_moderate_filters(chunk))
179
- .collect();
180
-
181
- processed_chunks.join(" ")
182
- }
183
-
184
- fn clean_punctuation_optimized(&self, text: &str) -> String {
185
- let mut result = text.to_string();
186
-
187
- result = REPEATED_EXCLAMATION.replace_all(&result, "!").to_string();
188
- result = REPEATED_QUESTION.replace_all(&result, "?").to_string();
189
- result = REPEATED_COMMA.replace_all(&result, ",").to_string();
190
-
191
- result
192
- }
193
-
194
- fn remove_additional_common_words(&self, text: &str) -> String {
195
- let words = self.universal_tokenize(text);
196
-
197
- if words.len() < 4 {
198
- return text.to_string();
199
- }
200
-
201
- let mut word_freq = std::collections::HashMap::new();
202
- let mut word_lengths = Vec::new();
203
-
204
- for word in &words {
205
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
206
- word.to_lowercase()
207
- } else {
208
- word.chars()
209
- .filter(|c| c.is_alphabetic())
210
- .collect::<String>()
211
- .to_lowercase()
212
- };
213
-
214
- if !clean_word.is_empty() {
215
- *word_freq.entry(clean_word.clone()).or_insert(0) += 1;
216
- word_lengths.push(clean_word.chars().count());
217
- }
218
- }
219
-
220
- let avg_length = if !word_lengths.is_empty() {
221
- word_lengths.iter().sum::<usize>() as f32 / word_lengths.len() as f32
222
- } else {
223
- 5.0
224
- };
225
-
226
- let original_count = words.len();
227
-
228
- let filtered_words: Vec<String> = words
229
- .iter()
230
- .filter(|word| {
231
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
232
- word.to_lowercase()
233
- } else {
234
- word.chars()
235
- .filter(|c| c.is_alphabetic())
236
- .collect::<String>()
237
- .to_lowercase()
238
- };
239
-
240
- if clean_word.is_empty() {
241
- return true;
242
- }
243
-
244
- let freq = word_freq.get(&clean_word).unwrap_or(&0);
245
- let word_len = clean_word.chars().count() as f32;
246
-
247
- self.has_important_characteristics(word)
248
- || (*freq <= 2 && word_len >= avg_length * 0.8)
249
- || (word_len >= avg_length * 1.5)
250
- })
251
- .cloned()
252
- .collect();
253
-
254
- let has_cjk_content = text.chars().any(|c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
255
- let fallback_threshold = if has_cjk_content {
256
- original_count / 5
257
- } else {
258
- original_count / 3
259
- };
260
-
261
- if filtered_words.len() < fallback_threshold {
262
- let fallback_words: Vec<String> = words
263
- .iter()
264
- .filter(|word| {
265
- let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
266
- (*word).clone()
267
- } else {
268
- word.chars().filter(|c| c.is_alphabetic()).collect::<String>()
269
- };
270
-
271
- clean_word.is_empty() || clean_word.chars().count() >= 3 || self.has_important_characteristics(word)
272
- })
273
- .cloned()
274
- .collect();
275
- self.smart_join(&fallback_words, has_cjk_content)
276
- } else {
277
- self.smart_join(&filtered_words, has_cjk_content)
278
- }
279
- }
280
-
281
- fn smart_join(&self, tokens: &[String], has_cjk_content: bool) -> String {
282
- if has_cjk_content {
283
- tokens.join("")
284
- } else {
285
- tokens.join(" ")
286
- }
287
- }
288
-
289
- fn has_important_characteristics(&self, word: &str) -> bool {
290
- if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
291
- return true;
292
- }
293
-
294
- if word.chars().any(|c| c.is_numeric()) {
295
- return true;
296
- }
297
-
298
- if word.len() > 10 {
299
- return true;
300
- }
301
-
302
- let uppercase_count = word.chars().filter(|c| c.is_uppercase()).count();
303
- if uppercase_count > 1 && uppercase_count < word.len() {
304
- return true;
305
- }
306
-
307
- if self.has_cjk_importance(word) {
308
- return true;
309
- }
310
-
311
- false
312
- }
313
-
314
- fn has_cjk_importance(&self, word: &str) -> bool {
315
- let chars: Vec<char> = word.chars().collect();
316
-
317
- let has_cjk = chars.iter().any(|&c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
318
- if !has_cjk {
319
- return false;
320
- }
321
-
322
- let important_radicals = [
323
- '学', '智', '能', '技', '术', '法', '算', '理', '科', '研', '究', '发', '展', '系', '统', '模', '型', '方',
324
- '式', '过', '程', '结', '构', '功', '效', '应', '分', '析', '计', '算', '数', '据', '信', '息', '处', '理',
325
- '语', '言', '文', '生', '成', '产', '用', '作', '为', '成', '变', '化', '转', '换', '提', '高', '网', '络',
326
- '神', '经', '机', '器', '人', '工', '智', '能', '自', '然', '复',
327
- ];
328
-
329
- for &char in &chars {
330
- if important_radicals.contains(&char) {
331
- return true;
332
- }
333
- }
334
-
335
- if chars.len() == 2 && has_cjk {
336
- let has_technical = chars.iter().any(|&c| {
337
- let code = c as u32;
338
- (0x4E00..=0x4FFF).contains(&code)
339
- || (0x5000..=0x51FF).contains(&code)
340
- || (0x6700..=0x68FF).contains(&code)
341
- || (0x7500..=0x76FF).contains(&code)
342
- });
343
-
344
- if has_technical {
345
- return true;
346
- }
347
- }
348
-
349
- false
350
- }
351
-
352
- fn apply_sentence_selection(&self, text: &str) -> String {
353
- let sentences: Vec<&str> = text
354
- .split(['.', '!', '?'])
355
- .map(|s| s.trim())
356
- .filter(|s| !s.is_empty())
357
- .collect();
358
-
359
- if sentences.len() <= 2 {
360
- return text.to_string();
361
- }
362
-
363
- let mut scored_sentences: Vec<(usize, f32, &str)> = sentences
364
- .iter()
365
- .enumerate()
366
- .map(|(i, sentence)| {
367
- let score = self.score_sentence_importance(sentence, i, sentences.len());
368
- (i, score, *sentence)
369
- })
370
- .collect();
371
-
372
- scored_sentences.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
373
-
374
- let keep_count = ((sentences.len() as f32 * 0.4).ceil() as usize).max(1);
375
- let mut selected_indices: Vec<usize> = scored_sentences[..keep_count].iter().map(|(i, _, _)| *i).collect();
376
-
377
- selected_indices.sort();
378
-
379
- let selected_sentences: Vec<&str> = selected_indices
380
- .iter()
381
- .filter_map(|&i| sentences.get(i))
382
- .copied()
383
- .collect();
384
-
385
- if selected_sentences.is_empty() {
386
- text.to_string()
387
- } else {
388
- selected_sentences.join(". ")
389
- }
390
- }
391
-
392
- fn score_sentence_importance(&self, sentence: &str, position: usize, total_sentences: usize) -> f32 {
393
- let mut score = 0.0;
394
-
395
- if position == 0 || position == total_sentences - 1 {
396
- score += SENTENCE_EDGE_POSITION_BONUS;
397
- }
398
-
399
- let words: Vec<&str> = sentence.split_whitespace().collect();
400
- if words.is_empty() {
401
- return score;
402
- }
403
-
404
- let word_count = words.len();
405
- if (MIN_IDEAL_WORD_COUNT..=MAX_IDEAL_WORD_COUNT).contains(&word_count) {
406
- score += IDEAL_WORD_COUNT_BONUS;
407
- }
408
-
409
- let mut numeric_count = 0;
410
- let mut caps_count = 0;
411
- let mut long_word_count = 0;
412
- let mut punct_density = 0;
413
-
414
- for word in &words {
415
- if word.chars().any(|c| c.is_numeric()) {
416
- numeric_count += 1;
417
- }
418
-
419
- if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
420
- caps_count += 1;
421
- }
422
-
423
- if word.len() > LONG_WORD_THRESHOLD {
424
- long_word_count += 1;
425
- }
426
-
427
- punct_density += word.chars().filter(|c| c.is_ascii_punctuation()).count();
428
- }
429
-
430
- score += (numeric_count as f32 / words.len() as f32) * NUMERIC_CONTENT_WEIGHT;
431
- score += (caps_count as f32 / words.len() as f32) * CAPS_ACRONYM_WEIGHT;
432
- score += (long_word_count as f32 / words.len() as f32) * LONG_WORD_WEIGHT;
433
- score += (punct_density as f32 / sentence.len() as f32) * PUNCTUATION_DENSITY_WEIGHT;
434
-
435
- let unique_words: std::collections::HashSet<_> = words
436
- .iter()
437
- .map(|w| {
438
- w.chars()
439
- .filter(|c| c.is_alphabetic())
440
- .collect::<String>()
441
- .to_lowercase()
442
- })
443
- .collect();
444
- let diversity_ratio = unique_words.len() as f32 / words.len() as f32;
445
- score += diversity_ratio * DIVERSITY_RATIO_WEIGHT;
446
-
447
- let char_entropy = self.calculate_char_entropy(sentence);
448
- score += char_entropy * CHAR_ENTROPY_WEIGHT;
449
-
450
- score
451
- }
452
-
453
- fn universal_tokenize(&self, text: &str) -> Vec<String> {
454
- self.cjk_tokenizer.tokenize_mixed_text(text)
455
- }
456
-
457
- fn calculate_char_entropy(&self, text: &str) -> f32 {
458
- let chars: Vec<char> = text.chars().collect();
459
- if chars.is_empty() {
460
- return 0.0;
461
- }
462
-
463
- let mut char_freq = std::collections::HashMap::new();
464
- for &ch in &chars {
465
- let lowercase_ch = ch
466
- .to_lowercase()
467
- .next()
468
- .expect("to_lowercase() must yield at least one character for valid Unicode");
469
- *char_freq.entry(lowercase_ch).or_insert(0) += 1;
470
- }
471
-
472
- let total_chars = chars.len() as f32;
473
- char_freq
474
- .values()
475
- .map(|&freq| {
476
- let p = freq as f32 / total_chars;
477
- if p > 0.0 { -p * p.log2() } else { 0.0 }
478
- })
479
- .sum::<f32>()
480
- .min(5.0)
481
- }
482
- }
483
-
484
- #[cfg(test)]
485
- mod tests {
486
- use super::*;
487
-
488
- #[test]
489
- fn test_light_reduction() {
490
- let config = TokenReductionConfig {
491
- level: ReductionLevel::Light,
492
- use_simd: false,
493
- ..Default::default()
494
- };
495
-
496
- let reducer = TokenReducer::new(&config, None).unwrap();
497
- let input = "Hello world!!! How are you???";
498
- let result = reducer.reduce(input);
499
-
500
- assert!(result.len() < input.len());
501
- assert!(!result.contains(" "));
502
- }
503
-
504
- #[test]
505
- fn test_moderate_reduction() {
506
- let config = TokenReductionConfig {
507
- level: ReductionLevel::Moderate,
508
- use_simd: false,
509
- ..Default::default()
510
- };
511
-
512
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
513
- let input = "The quick brown fox is jumping over the lazy dog";
514
- let result = reducer.reduce(input);
515
-
516
- assert!(result.len() < input.len());
517
- assert!(result.contains("quick"));
518
- assert!(result.contains("brown"));
519
- assert!(result.contains("fox"));
520
- }
521
-
522
- #[test]
523
- fn test_batch_processing() {
524
- let config = TokenReductionConfig {
525
- level: ReductionLevel::Light,
526
- enable_parallel: false,
527
- ..Default::default()
528
- };
529
-
530
- let reducer = TokenReducer::new(&config, None).unwrap();
531
- let inputs = vec!["Hello world!", "How are you?", "Fine, thanks!"];
532
- let results = reducer.batch_reduce(&inputs);
533
-
534
- assert_eq!(results.len(), inputs.len());
535
- for result in &results {
536
- assert!(!result.contains(" "));
537
- }
538
- }
539
-
540
- #[test]
541
- fn test_aggressive_reduction() {
542
- let config = TokenReductionConfig {
543
- level: ReductionLevel::Aggressive,
544
- use_simd: false,
545
- ..Default::default()
546
- };
547
-
548
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
549
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
550
- let result = reducer.reduce(input);
551
-
552
- assert!(result.len() < input.len());
553
- assert!(!result.is_empty());
554
- }
555
-
556
- #[test]
557
- fn test_maximum_reduction() {
558
- let config = TokenReductionConfig {
559
- level: ReductionLevel::Maximum,
560
- use_simd: false,
561
- enable_semantic_clustering: true,
562
- ..Default::default()
563
- };
564
-
565
- let reducer = TokenReducer::new(&config, Some("en")).unwrap();
566
- let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
567
- let result = reducer.reduce(input);
568
-
569
- assert!(result.len() < input.len());
570
- assert!(!result.is_empty());
571
- }
572
-
573
- #[test]
574
- fn test_empty_text_handling() {
575
- let config = TokenReductionConfig {
576
- level: ReductionLevel::Moderate,
577
- ..Default::default()
578
- };
579
-
580
- let reducer = TokenReducer::new(&config, None).unwrap();
581
- assert_eq!(reducer.reduce(""), "");
582
- let result = reducer.reduce(" ");
583
- assert!(result == " " || result.is_empty());
584
- }
585
-
586
- #[test]
587
- fn test_off_mode_preserves_text() {
588
- let config = TokenReductionConfig {
589
- level: ReductionLevel::Off,
590
- ..Default::default()
591
- };
592
-
593
- let reducer = TokenReducer::new(&config, None).unwrap();
594
- let input = "Text with multiple spaces!!!";
595
- assert_eq!(reducer.reduce(input), input);
596
- }
597
-
598
- #[test]
599
- fn test_parallel_batch_processing() {
600
- let config = TokenReductionConfig {
601
- level: ReductionLevel::Light,
602
- enable_parallel: true,
603
- ..Default::default()
604
- };
605
-
606
- let reducer = TokenReducer::new(&config, None).unwrap();
607
- let inputs = vec![
608
- "First text with spaces",
609
- "Second text with spaces",
610
- "Third text with spaces",
611
- ];
612
- let results = reducer.batch_reduce(&inputs);
613
-
614
- assert_eq!(results.len(), inputs.len());
615
- for result in &results {
616
- assert!(!result.contains(" "));
617
- }
618
- }
619
-
620
- #[test]
621
- fn test_cjk_text_handling() {
622
- let config = TokenReductionConfig {
623
- level: ReductionLevel::Moderate,
624
- ..Default::default()
625
- };
626
-
627
- let reducer = TokenReducer::new(&config, Some("zh")).unwrap();
628
- let input = "这是中文文本测试";
629
- let result = reducer.reduce(input);
630
-
631
- assert!(!result.is_empty());
632
- }
633
-
634
- #[test]
635
- fn test_mixed_language_text() {
636
- let config = TokenReductionConfig {
637
- level: ReductionLevel::Moderate,
638
- ..Default::default()
639
- };
640
-
641
- let reducer = TokenReducer::new(&config, None).unwrap();
642
- let input = "This is English text 这是中文 and some more English";
643
- let result = reducer.reduce(input);
644
-
645
- assert!(!result.is_empty());
646
- assert!(result.contains("English") || result.contains("中"));
647
- }
648
-
649
- #[test]
650
- fn test_punctuation_normalization() {
651
- let config = TokenReductionConfig {
652
- level: ReductionLevel::Light,
653
- ..Default::default()
654
- };
655
-
656
- let reducer = TokenReducer::new(&config, None).unwrap();
657
- let input = "Text!!!!!! with????? excessive,,,,,, punctuation";
658
- let result = reducer.reduce(input);
659
-
660
- assert!(!result.contains("!!!!!!"));
661
- assert!(!result.contains("?????"));
662
- assert!(!result.contains(",,,,,,"));
663
- }
664
-
665
- #[test]
666
- fn test_sentence_selection() {
667
- let config = TokenReductionConfig {
668
- level: ReductionLevel::Aggressive,
669
- ..Default::default()
670
- };
671
-
672
- let reducer = TokenReducer::new(&config, None).unwrap();
673
- let input = "First sentence here. Second sentence with more words. Third one. Fourth sentence is even longer than the others.";
674
- let result = reducer.reduce(input);
675
-
676
- assert!(result.len() < input.len());
677
- assert!(result.split(". ").count() < 4);
678
- }
679
-
680
- #[test]
681
- fn test_unicode_normalization_ascii() {
682
- let config = TokenReductionConfig {
683
- level: ReductionLevel::Light,
684
- ..Default::default()
685
- };
686
-
687
- let reducer = TokenReducer::new(&config, None).unwrap();
688
- let input = "Pure ASCII text without special characters";
689
- let result = reducer.reduce(input);
690
-
691
- assert!(result.contains("ASCII"));
692
- }
693
-
694
- #[test]
695
- fn test_unicode_normalization_non_ascii() {
696
- let config = TokenReductionConfig {
697
- level: ReductionLevel::Light,
698
- ..Default::default()
699
- };
700
-
701
- let reducer = TokenReducer::new(&config, None).unwrap();
702
- let input = "Café naïve résumé";
703
- let result = reducer.reduce(input);
704
-
705
- assert!(result.contains("Café") || result.contains("Cafe"));
706
- }
707
-
708
- #[test]
709
- fn test_single_text_vs_batch() {
710
- let config = TokenReductionConfig {
711
- level: ReductionLevel::Moderate,
712
- ..Default::default()
713
- };
714
-
715
- let reducer = TokenReducer::new(&config, None).unwrap();
716
- let text = "The quick brown fox jumps over the lazy dog";
717
-
718
- let single_result = reducer.reduce(text);
719
- let batch_results = reducer.batch_reduce(&[text]);
720
-
721
- assert_eq!(single_result, batch_results[0]);
722
- }
723
-
724
- #[test]
725
- fn test_important_word_preservation() {
726
- let config = TokenReductionConfig {
727
- level: ReductionLevel::Aggressive,
728
- ..Default::default()
729
- };
730
-
731
- let reducer = TokenReducer::new(&config, None).unwrap();
732
- let input = "The IMPORTANT word COVID-19 and 12345 numbers should be preserved";
733
- let result = reducer.reduce(input);
734
-
735
- assert!(result.contains("IMPORTANT") || result.contains("COVID") || result.contains("12345"));
736
- }
737
-
738
- #[test]
739
- fn test_technical_terms_preservation() {
740
- let config = TokenReductionConfig {
741
- level: ReductionLevel::Aggressive,
742
- ..Default::default()
743
- };
744
-
745
- let reducer = TokenReducer::new(&config, None).unwrap();
746
- let input = "The implementation uses PyTorch and TensorFlow frameworks";
747
- let result = reducer.reduce(input);
748
-
749
- assert!(result.contains("PyTorch") || result.contains("TensorFlow"));
750
- }
751
-
752
- #[test]
753
- fn test_calculate_char_entropy() {
754
- let config = TokenReductionConfig::default();
755
- let reducer = TokenReducer::new(&config, None).unwrap();
756
-
757
- let low_entropy = reducer.calculate_char_entropy("aaaaaaa");
758
- assert!(low_entropy < 1.0);
759
-
760
- let high_entropy = reducer.calculate_char_entropy("abcdefg123");
761
- assert!(high_entropy > low_entropy);
762
- }
763
-
764
- #[test]
765
- fn test_universal_tokenize_english() {
766
- let config = TokenReductionConfig::default();
767
- let reducer = TokenReducer::new(&config, None).unwrap();
768
-
769
- let tokens = reducer.universal_tokenize("hello world test");
770
- assert_eq!(tokens, vec!["hello", "world", "test"]);
771
- }
772
-
773
- #[test]
774
- fn test_universal_tokenize_cjk() {
775
- let config = TokenReductionConfig::default();
776
- let reducer = TokenReducer::new(&config, None).unwrap();
777
-
778
- let tokens = reducer.universal_tokenize("中文");
779
- assert!(!tokens.is_empty());
780
- }
781
-
782
- #[test]
783
- fn test_fallback_threshold() {
784
- let config = TokenReductionConfig {
785
- level: ReductionLevel::Aggressive,
786
- ..Default::default()
787
- };
788
-
789
- let reducer = TokenReducer::new(&config, None).unwrap();
790
-
791
- let input = "a the is of to in for on at by";
792
- let result = reducer.reduce(input);
793
-
794
- assert!(!result.is_empty());
795
- }
796
- }
1
+ use crate::error::Result;
2
+ use crate::text::token_reduction::{
3
+ cjk_utils::CjkTokenizer,
4
+ config::{ReductionLevel, TokenReductionConfig},
5
+ filters::FilterPipeline,
6
+ semantic::SemanticAnalyzer,
7
+ simd_text::{SimdTextProcessor, chunk_text_for_parallel},
8
+ };
9
+ use ahash::AHashMap;
10
+ use once_cell::sync::Lazy;
11
+ use rayon::prelude::*;
12
+ use regex::Regex;
13
+ use std::sync::Arc;
14
+ use unicode_normalization::UnicodeNormalization;
15
+
16
+ static REPEATED_EXCLAMATION: Lazy<Regex> =
17
+ Lazy::new(|| Regex::new(r"[!]{2,}").expect("Repeated exclamation regex pattern is valid and should compile"));
18
+ static REPEATED_QUESTION: Lazy<Regex> =
19
+ Lazy::new(|| Regex::new(r"[?]{2,}").expect("Repeated question regex pattern is valid and should compile"));
20
+ static REPEATED_COMMA: Lazy<Regex> =
21
+ Lazy::new(|| Regex::new(r"[,]{2,}").expect("Repeated comma regex pattern is valid and should compile"));
22
+
23
+ /// Bonus added for sentences at the beginning or end of the document
24
+ const SENTENCE_EDGE_POSITION_BONUS: f32 = 0.3;
25
+
26
+ /// Bonus added for sentences with ideal word count (neither too short nor too long)
27
+ const IDEAL_WORD_COUNT_BONUS: f32 = 0.2;
28
+
29
+ /// Minimum word count for ideal sentence length
30
+ const MIN_IDEAL_WORD_COUNT: usize = 3;
31
+
32
+ /// Maximum word count for ideal sentence length
33
+ const MAX_IDEAL_WORD_COUNT: usize = 25;
34
+
35
+ /// Weight multiplier for numeric content density in sentences
36
+ const NUMERIC_CONTENT_WEIGHT: f32 = 0.3;
37
+
38
+ /// Weight multiplier for capitalized/acronym word density in sentences
39
+ const CAPS_ACRONYM_WEIGHT: f32 = 0.25;
40
+
41
+ /// Weight multiplier for long word density in sentences
42
+ const LONG_WORD_WEIGHT: f32 = 0.2;
43
+
44
+ /// Minimum character length for a word to be considered "long"
45
+ const LONG_WORD_THRESHOLD: usize = 8;
46
+
47
+ /// Weight multiplier for punctuation density in sentences
48
+ const PUNCTUATION_DENSITY_WEIGHT: f32 = 0.15;
49
+
50
+ /// Weight multiplier for word diversity ratio (unique words / total words)
51
+ const DIVERSITY_RATIO_WEIGHT: f32 = 0.15;
52
+
53
+ /// Weight multiplier for character entropy (measure of text randomness/information)
54
+ const CHAR_ENTROPY_WEIGHT: f32 = 0.1;
55
+
56
+ pub struct TokenReducer {
57
+ config: Arc<TokenReductionConfig>,
58
+ text_processor: SimdTextProcessor,
59
+ filter_pipeline: FilterPipeline,
60
+ semantic_analyzer: Option<SemanticAnalyzer>,
61
+ cjk_tokenizer: CjkTokenizer,
62
+ language: String,
63
+ }
64
+
65
+ impl TokenReducer {
66
+ pub fn new(config: &TokenReductionConfig, language_hint: Option<&str>) -> Result<Self> {
67
+ let config = Arc::new(config.clone());
68
+ let language = language_hint
69
+ .or(config.language_hint.as_deref())
70
+ .unwrap_or("en")
71
+ .to_string();
72
+
73
+ let text_processor = SimdTextProcessor::new();
74
+ let filter_pipeline = FilterPipeline::new(&config, &language)?;
75
+
76
+ let semantic_analyzer = if matches!(config.level, ReductionLevel::Aggressive | ReductionLevel::Maximum) {
77
+ Some(SemanticAnalyzer::new(&language))
78
+ } else {
79
+ None
80
+ };
81
+
82
+ Ok(Self {
83
+ config,
84
+ text_processor,
85
+ filter_pipeline,
86
+ semantic_analyzer,
87
+ cjk_tokenizer: CjkTokenizer::new(),
88
+ language,
89
+ })
90
+ }
91
+
92
+ /// Get the language code being used for stopwords and semantic analysis.
93
+ pub fn language(&self) -> &str {
94
+ &self.language
95
+ }
96
+
97
+ pub fn reduce(&self, text: &str) -> String {
98
+ if text.is_empty() || matches!(self.config.level, ReductionLevel::Off) {
99
+ return text.to_string();
100
+ }
101
+
102
+ let nfc_string;
103
+ let working_text = if text.is_ascii() {
104
+ text
105
+ } else {
106
+ nfc_string = text.nfc().collect::<String>();
107
+ &nfc_string
108
+ };
109
+
110
+ match self.config.level {
111
+ ReductionLevel::Off => working_text.to_string(),
112
+ ReductionLevel::Light => self.apply_light_reduction_optimized(working_text),
113
+ ReductionLevel::Moderate => self.apply_moderate_reduction_optimized(working_text),
114
+ ReductionLevel::Aggressive => self.apply_aggressive_reduction_optimized(working_text),
115
+ ReductionLevel::Maximum => self.apply_maximum_reduction_optimized(working_text),
116
+ }
117
+ }
118
+
119
+ pub fn batch_reduce(&self, texts: &[&str]) -> Vec<String> {
120
+ if !self.config.enable_parallel || texts.len() < 2 {
121
+ return texts.iter().map(|text| self.reduce(text)).collect();
122
+ }
123
+
124
+ texts.par_iter().map(|text| self.reduce(text)).collect()
125
+ }
126
+
127
+ fn apply_light_reduction_optimized(&self, text: &str) -> String {
128
+ let mut result = if self.config.use_simd {
129
+ self.text_processor.clean_punctuation(text)
130
+ } else {
131
+ self.clean_punctuation_optimized(text)
132
+ };
133
+
134
+ result = self.filter_pipeline.apply_light_filters(&result);
135
+ result.trim().to_string()
136
+ }
137
+
138
+ fn apply_moderate_reduction_optimized(&self, text: &str) -> String {
139
+ let mut result = self.apply_light_reduction_optimized(text);
140
+
141
+ result = if self.config.enable_parallel && text.len() > 1000 {
142
+ self.apply_parallel_moderate_reduction(&result)
143
+ } else {
144
+ self.filter_pipeline.apply_moderate_filters(&result)
145
+ };
146
+
147
+ result
148
+ }
149
+
150
+ fn apply_aggressive_reduction_optimized(&self, text: &str) -> String {
151
+ let mut result = self.apply_moderate_reduction_optimized(text);
152
+
153
+ result = self.remove_additional_common_words(&result);
154
+ result = self.apply_sentence_selection(&result);
155
+
156
+ if let Some(ref analyzer) = self.semantic_analyzer {
157
+ result = analyzer.apply_semantic_filtering(&result, self.config.semantic_threshold);
158
+ }
159
+
160
+ result
161
+ }
162
+
163
+ fn apply_maximum_reduction_optimized(&self, text: &str) -> String {
164
+ let mut result = self.apply_aggressive_reduction_optimized(text);
165
+
166
+ if let Some(ref analyzer) = self.semantic_analyzer
167
+ && self.config.enable_semantic_clustering
168
+ {
169
+ result = analyzer.apply_hypernym_compression(&result, self.config.target_reduction);
170
+ }
171
+
172
+ result
173
+ }
174
+
175
+ fn apply_parallel_moderate_reduction(&self, text: &str) -> String {
176
+ let num_threads = rayon::current_num_threads();
177
+ let chunks = chunk_text_for_parallel(text, num_threads);
178
+
179
+ let processed_chunks: Vec<String> = chunks
180
+ .par_iter()
181
+ .map(|chunk| self.filter_pipeline.apply_moderate_filters(chunk))
182
+ .collect();
183
+
184
+ processed_chunks.join(" ")
185
+ }
186
+
187
+ fn clean_punctuation_optimized(&self, text: &str) -> String {
188
+ use std::borrow::Cow;
189
+
190
+ let mut result = Cow::Borrowed(text);
191
+
192
+ if REPEATED_EXCLAMATION.is_match(&result) {
193
+ result = Cow::Owned(REPEATED_EXCLAMATION.replace_all(&result, "!").into_owned());
194
+ }
195
+ if REPEATED_QUESTION.is_match(&result) {
196
+ result = Cow::Owned(REPEATED_QUESTION.replace_all(&result, "?").into_owned());
197
+ }
198
+ if REPEATED_COMMA.is_match(&result) {
199
+ result = Cow::Owned(REPEATED_COMMA.replace_all(&result, ",").into_owned());
200
+ }
201
+
202
+ result.into_owned()
203
+ }
204
+
205
+ fn remove_additional_common_words(&self, text: &str) -> String {
206
+ let words = self.universal_tokenize(text);
207
+
208
+ if words.len() < 4 {
209
+ return text.to_string();
210
+ }
211
+
212
+ let estimated_unique = (words.len() as f32 * 0.7).ceil() as usize;
213
+ let mut word_freq = AHashMap::with_capacity(estimated_unique);
214
+
215
+ let mut word_lengths = Vec::with_capacity(words.len());
216
+
217
+ for word in &words {
218
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
219
+ word.to_lowercase()
220
+ } else {
221
+ word.chars()
222
+ .filter(|c| c.is_alphabetic())
223
+ .collect::<String>()
224
+ .to_lowercase()
225
+ };
226
+
227
+ if !clean_word.is_empty() {
228
+ *word_freq.entry(clean_word.clone()).or_insert(0) += 1;
229
+ word_lengths.push(clean_word.chars().count());
230
+ }
231
+ }
232
+
233
+ let avg_length = if !word_lengths.is_empty() {
234
+ word_lengths.iter().sum::<usize>() as f32 / word_lengths.len() as f32
235
+ } else {
236
+ 5.0
237
+ };
238
+
239
+ let original_count = words.len();
240
+ let has_cjk_content = text.chars().any(|c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
241
+
242
+ let mut filtered_words = Vec::with_capacity(words.len());
243
+ for word in &words {
244
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
245
+ word.to_lowercase()
246
+ } else {
247
+ word.chars()
248
+ .filter(|c| c.is_alphabetic())
249
+ .collect::<String>()
250
+ .to_lowercase()
251
+ };
252
+
253
+ if clean_word.is_empty() {
254
+ filtered_words.push(word.clone());
255
+ } else {
256
+ let freq = word_freq.get(&clean_word).unwrap_or(&0);
257
+ let word_len = clean_word.chars().count() as f32;
258
+
259
+ if self.has_important_characteristics(word)
260
+ || (*freq <= 2 && word_len >= avg_length * 0.8)
261
+ || (word_len >= avg_length * 1.5)
262
+ {
263
+ filtered_words.push(word.clone());
264
+ }
265
+ }
266
+ }
267
+
268
+ let fallback_threshold = if has_cjk_content {
269
+ original_count / 5
270
+ } else {
271
+ original_count / 3
272
+ };
273
+
274
+ if filtered_words.len() < fallback_threshold {
275
+ let mut fallback_words = Vec::with_capacity(words.len());
276
+ for word in &words {
277
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
278
+ word.to_lowercase()
279
+ } else {
280
+ word.chars().filter(|c| c.is_alphabetic()).collect::<String>()
281
+ };
282
+
283
+ if clean_word.is_empty() || clean_word.chars().count() >= 3 || self.has_important_characteristics(word)
284
+ {
285
+ fallback_words.push(word.clone());
286
+ }
287
+ }
288
+ self.smart_join(&fallback_words, has_cjk_content)
289
+ } else {
290
+ self.smart_join(&filtered_words, has_cjk_content)
291
+ }
292
+ }
293
+
294
+ fn smart_join(&self, tokens: &[String], has_cjk_content: bool) -> String {
295
+ if has_cjk_content {
296
+ tokens.join("")
297
+ } else {
298
+ tokens.join(" ")
299
+ }
300
+ }
301
+
302
+ fn has_important_characteristics(&self, word: &str) -> bool {
303
+ if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
304
+ return true;
305
+ }
306
+
307
+ if word.chars().any(|c| c.is_numeric()) {
308
+ return true;
309
+ }
310
+
311
+ if word.len() > 10 {
312
+ return true;
313
+ }
314
+
315
+ let uppercase_count = word.chars().filter(|c| c.is_uppercase()).count();
316
+ if uppercase_count > 1 && uppercase_count < word.len() {
317
+ return true;
318
+ }
319
+
320
+ if self.has_cjk_importance(word) {
321
+ return true;
322
+ }
323
+
324
+ false
325
+ }
326
+
327
+ fn has_cjk_importance(&self, word: &str) -> bool {
328
+ let chars: Vec<char> = word.chars().collect();
329
+
330
+ let has_cjk = chars.iter().any(|&c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
331
+ if !has_cjk {
332
+ return false;
333
+ }
334
+
335
+ let important_radicals = [
336
+ '学', '智', '能', '技', '术', '法', '算', '理', '科', '研', '究', '发', '展', '系', '统', '模', '型', '方',
337
+ '式', '过', '程', '结', '构', '功', '效', '应', '分', '析', '计', '算', '数', '据', '信', '息', '处', '理',
338
+ '语', '言', '文', '生', '成', '产', '用', '作', '为', '成', '变', '化', '转', '换', '提', '高', '网', '络',
339
+ '神', '经', '机', '器', '人', '工', '智', '能', '自', '然', '复',
340
+ ];
341
+
342
+ for &char in &chars {
343
+ if important_radicals.contains(&char) {
344
+ return true;
345
+ }
346
+ }
347
+
348
+ if chars.len() == 2 && has_cjk {
349
+ let has_technical = chars.iter().any(|&c| {
350
+ let code = c as u32;
351
+ (0x4E00..=0x4FFF).contains(&code)
352
+ || (0x5000..=0x51FF).contains(&code)
353
+ || (0x6700..=0x68FF).contains(&code)
354
+ || (0x7500..=0x76FF).contains(&code)
355
+ });
356
+
357
+ if has_technical {
358
+ return true;
359
+ }
360
+ }
361
+
362
+ false
363
+ }
364
+
365
+ fn apply_sentence_selection(&self, text: &str) -> String {
366
+ let sentences: Vec<&str> = text
367
+ .split(['.', '!', '?'])
368
+ .map(|s| s.trim())
369
+ .filter(|s| !s.is_empty())
370
+ .collect();
371
+
372
+ if sentences.len() <= 2 {
373
+ return text.to_string();
374
+ }
375
+
376
+ let mut scored_sentences: Vec<(usize, f32, &str)> = sentences
377
+ .iter()
378
+ .enumerate()
379
+ .map(|(i, sentence)| {
380
+ let score = self.score_sentence_importance(sentence, i, sentences.len());
381
+ (i, score, *sentence)
382
+ })
383
+ .collect();
384
+
385
+ scored_sentences.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
386
+
387
+ let keep_count = ((sentences.len() as f32 * 0.4).ceil() as usize).max(1);
388
+ let mut selected_indices: Vec<usize> = scored_sentences[..keep_count].iter().map(|(i, _, _)| *i).collect();
389
+
390
+ selected_indices.sort();
391
+
392
+ let selected_sentences: Vec<&str> = selected_indices
393
+ .iter()
394
+ .filter_map(|&i| sentences.get(i))
395
+ .copied()
396
+ .collect();
397
+
398
+ if selected_sentences.is_empty() {
399
+ text.to_string()
400
+ } else {
401
+ selected_sentences.join(". ")
402
+ }
403
+ }
404
+
405
+ fn score_sentence_importance(&self, sentence: &str, position: usize, total_sentences: usize) -> f32 {
406
+ let mut score = 0.0;
407
+
408
+ if position == 0 || position == total_sentences - 1 {
409
+ score += SENTENCE_EDGE_POSITION_BONUS;
410
+ }
411
+
412
+ let words: Vec<&str> = sentence.split_whitespace().collect();
413
+ if words.is_empty() {
414
+ return score;
415
+ }
416
+
417
+ let word_count = words.len();
418
+ if (MIN_IDEAL_WORD_COUNT..=MAX_IDEAL_WORD_COUNT).contains(&word_count) {
419
+ score += IDEAL_WORD_COUNT_BONUS;
420
+ }
421
+
422
+ let mut numeric_count = 0;
423
+ let mut caps_count = 0;
424
+ let mut long_word_count = 0;
425
+ let mut punct_density = 0;
426
+
427
+ for word in &words {
428
+ if word.chars().any(|c| c.is_numeric()) {
429
+ numeric_count += 1;
430
+ }
431
+
432
+ if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
433
+ caps_count += 1;
434
+ }
435
+
436
+ if word.len() > LONG_WORD_THRESHOLD {
437
+ long_word_count += 1;
438
+ }
439
+
440
+ punct_density += word.chars().filter(|c| c.is_ascii_punctuation()).count();
441
+ }
442
+
443
+ score += (numeric_count as f32 / words.len() as f32) * NUMERIC_CONTENT_WEIGHT;
444
+ score += (caps_count as f32 / words.len() as f32) * CAPS_ACRONYM_WEIGHT;
445
+ score += (long_word_count as f32 / words.len() as f32) * LONG_WORD_WEIGHT;
446
+ score += (punct_density as f32 / sentence.len() as f32) * PUNCTUATION_DENSITY_WEIGHT;
447
+
448
+ let estimated_unique = (words.len() as f32 * 0.6).ceil() as usize;
449
+ let mut unique_words: ahash::AHashSet<String> = ahash::AHashSet::with_capacity(estimated_unique.max(10));
450
+
451
+ for w in &words {
452
+ let clean = w
453
+ .chars()
454
+ .filter(|c| c.is_alphabetic())
455
+ .collect::<String>()
456
+ .to_lowercase();
457
+ unique_words.insert(clean);
458
+
459
+ if unique_words.len() >= estimated_unique {
460
+ break;
461
+ }
462
+ }
463
+
464
+ let final_unique_count = if unique_words.len() >= estimated_unique {
465
+ unique_words.len()
466
+ } else {
467
+ for w in &words {
468
+ let clean = w
469
+ .chars()
470
+ .filter(|c| c.is_alphabetic())
471
+ .collect::<String>()
472
+ .to_lowercase();
473
+ unique_words.insert(clean);
474
+ }
475
+ unique_words.len()
476
+ };
477
+
478
+ let diversity_ratio = final_unique_count as f32 / words.len() as f32;
479
+ score += diversity_ratio * DIVERSITY_RATIO_WEIGHT;
480
+
481
+ let char_entropy = self.calculate_char_entropy(sentence);
482
+ score += char_entropy * CHAR_ENTROPY_WEIGHT;
483
+
484
+ score
485
+ }
486
+
487
+ fn universal_tokenize(&self, text: &str) -> Vec<String> {
488
+ self.cjk_tokenizer.tokenize_mixed_text(text)
489
+ }
490
+
491
+ fn calculate_char_entropy(&self, text: &str) -> f32 {
492
+ let chars: Vec<char> = text.chars().collect();
493
+ if chars.is_empty() {
494
+ return 0.0;
495
+ }
496
+
497
+ let estimated_unique = (chars.len() as f32 * 0.1).ceil() as usize;
498
+ let mut char_freq = AHashMap::with_capacity(estimated_unique.max(26));
499
+
500
+ for &ch in &chars {
501
+ let lowercase_ch = ch
502
+ .to_lowercase()
503
+ .next()
504
+ .expect("to_lowercase() must yield at least one character for valid Unicode");
505
+ *char_freq.entry(lowercase_ch).or_insert(0) += 1;
506
+ }
507
+
508
+ let total_chars = chars.len() as f32;
509
+ char_freq
510
+ .values()
511
+ .map(|&freq| {
512
+ let p = freq as f32 / total_chars;
513
+ if p > 0.0 { -p * p.log2() } else { 0.0 }
514
+ })
515
+ .sum::<f32>()
516
+ .min(5.0)
517
+ }
518
+ }
519
+
520
+ #[cfg(test)]
521
+ mod tests {
522
+ use super::*;
523
+
524
+ #[test]
525
+ fn test_light_reduction() {
526
+ let config = TokenReductionConfig {
527
+ level: ReductionLevel::Light,
528
+ use_simd: false,
529
+ ..Default::default()
530
+ };
531
+
532
+ let reducer = TokenReducer::new(&config, None).unwrap();
533
+ let input = "Hello world!!! How are you???";
534
+ let result = reducer.reduce(input);
535
+
536
+ assert!(result.len() < input.len());
537
+ assert!(!result.contains(" "));
538
+ }
539
+
540
+ #[test]
541
+ fn test_moderate_reduction() {
542
+ let config = TokenReductionConfig {
543
+ level: ReductionLevel::Moderate,
544
+ use_simd: false,
545
+ ..Default::default()
546
+ };
547
+
548
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
549
+ let input = "The quick brown fox is jumping over the lazy dog";
550
+ let result = reducer.reduce(input);
551
+
552
+ assert!(result.len() < input.len());
553
+ assert!(result.contains("quick"));
554
+ assert!(result.contains("brown"));
555
+ assert!(result.contains("fox"));
556
+ }
557
+
558
+ #[test]
559
+ fn test_batch_processing() {
560
+ let config = TokenReductionConfig {
561
+ level: ReductionLevel::Light,
562
+ enable_parallel: false,
563
+ ..Default::default()
564
+ };
565
+
566
+ let reducer = TokenReducer::new(&config, None).unwrap();
567
+ let inputs = vec!["Hello world!", "How are you?", "Fine, thanks!"];
568
+ let results = reducer.batch_reduce(&inputs);
569
+
570
+ assert_eq!(results.len(), inputs.len());
571
+ for result in &results {
572
+ assert!(!result.contains(" "));
573
+ }
574
+ }
575
+
576
+ #[test]
577
+ fn test_aggressive_reduction() {
578
+ let config = TokenReductionConfig {
579
+ level: ReductionLevel::Aggressive,
580
+ use_simd: false,
581
+ ..Default::default()
582
+ };
583
+
584
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
585
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
586
+ let result = reducer.reduce(input);
587
+
588
+ assert!(result.len() < input.len());
589
+ assert!(!result.is_empty());
590
+ }
591
+
592
+ #[test]
593
+ fn test_maximum_reduction() {
594
+ let config = TokenReductionConfig {
595
+ level: ReductionLevel::Maximum,
596
+ use_simd: false,
597
+ enable_semantic_clustering: true,
598
+ ..Default::default()
599
+ };
600
+
601
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
602
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
603
+ let result = reducer.reduce(input);
604
+
605
+ assert!(result.len() < input.len());
606
+ assert!(!result.is_empty());
607
+ }
608
+
609
+ #[test]
610
+ fn test_empty_text_handling() {
611
+ let config = TokenReductionConfig {
612
+ level: ReductionLevel::Moderate,
613
+ ..Default::default()
614
+ };
615
+
616
+ let reducer = TokenReducer::new(&config, None).unwrap();
617
+ assert_eq!(reducer.reduce(""), "");
618
+ let result = reducer.reduce(" ");
619
+ assert!(result == " " || result.is_empty());
620
+ }
621
+
622
+ #[test]
623
+ fn test_off_mode_preserves_text() {
624
+ let config = TokenReductionConfig {
625
+ level: ReductionLevel::Off,
626
+ ..Default::default()
627
+ };
628
+
629
+ let reducer = TokenReducer::new(&config, None).unwrap();
630
+ let input = "Text with multiple spaces!!!";
631
+ assert_eq!(reducer.reduce(input), input);
632
+ }
633
+
634
+ #[test]
635
+ fn test_parallel_batch_processing() {
636
+ let config = TokenReductionConfig {
637
+ level: ReductionLevel::Light,
638
+ enable_parallel: true,
639
+ ..Default::default()
640
+ };
641
+
642
+ let reducer = TokenReducer::new(&config, None).unwrap();
643
+ let inputs = vec![
644
+ "First text with spaces",
645
+ "Second text with spaces",
646
+ "Third text with spaces",
647
+ ];
648
+ let results = reducer.batch_reduce(&inputs);
649
+
650
+ assert_eq!(results.len(), inputs.len());
651
+ for result in &results {
652
+ assert!(!result.contains(" "));
653
+ }
654
+ }
655
+
656
+ #[test]
657
+ fn test_cjk_text_handling() {
658
+ let config = TokenReductionConfig {
659
+ level: ReductionLevel::Moderate,
660
+ ..Default::default()
661
+ };
662
+
663
+ let reducer = TokenReducer::new(&config, Some("zh")).unwrap();
664
+ let input = "这是中文文本测试";
665
+ let result = reducer.reduce(input);
666
+
667
+ assert!(!result.is_empty());
668
+ }
669
+
670
+ #[test]
671
+ fn test_mixed_language_text() {
672
+ let config = TokenReductionConfig {
673
+ level: ReductionLevel::Moderate,
674
+ ..Default::default()
675
+ };
676
+
677
+ let reducer = TokenReducer::new(&config, None).unwrap();
678
+ let input = "This is English text 这是中文 and some more English";
679
+ let result = reducer.reduce(input);
680
+
681
+ assert!(!result.is_empty());
682
+ assert!(result.contains("English") || result.contains("中"));
683
+ }
684
+
685
+ #[test]
686
+ fn test_punctuation_normalization() {
687
+ let config = TokenReductionConfig {
688
+ level: ReductionLevel::Light,
689
+ ..Default::default()
690
+ };
691
+
692
+ let reducer = TokenReducer::new(&config, None).unwrap();
693
+ let input = "Text!!!!!! with????? excessive,,,,,, punctuation";
694
+ let result = reducer.reduce(input);
695
+
696
+ assert!(!result.contains("!!!!!!"));
697
+ assert!(!result.contains("?????"));
698
+ assert!(!result.contains(",,,,,,"));
699
+ }
700
+
701
+ #[test]
702
+ fn test_sentence_selection() {
703
+ let config = TokenReductionConfig {
704
+ level: ReductionLevel::Aggressive,
705
+ ..Default::default()
706
+ };
707
+
708
+ let reducer = TokenReducer::new(&config, None).unwrap();
709
+ let input = "First sentence here. Second sentence with more words. Third one. Fourth sentence is even longer than the others.";
710
+ let result = reducer.reduce(input);
711
+
712
+ assert!(result.len() < input.len());
713
+ assert!(result.split(". ").count() < 4);
714
+ }
715
+
716
+ #[test]
717
+ fn test_unicode_normalization_ascii() {
718
+ let config = TokenReductionConfig {
719
+ level: ReductionLevel::Light,
720
+ ..Default::default()
721
+ };
722
+
723
+ let reducer = TokenReducer::new(&config, None).unwrap();
724
+ let input = "Pure ASCII text without special characters";
725
+ let result = reducer.reduce(input);
726
+
727
+ assert!(result.contains("ASCII"));
728
+ }
729
+
730
+ #[test]
731
+ fn test_unicode_normalization_non_ascii() {
732
+ let config = TokenReductionConfig {
733
+ level: ReductionLevel::Light,
734
+ ..Default::default()
735
+ };
736
+
737
+ let reducer = TokenReducer::new(&config, None).unwrap();
738
+ let input = "Café naïve résumé";
739
+ let result = reducer.reduce(input);
740
+
741
+ assert!(result.contains("Café") || result.contains("Cafe"));
742
+ }
743
+
744
+ #[test]
745
+ fn test_single_text_vs_batch() {
746
+ let config = TokenReductionConfig {
747
+ level: ReductionLevel::Moderate,
748
+ ..Default::default()
749
+ };
750
+
751
+ let reducer = TokenReducer::new(&config, None).unwrap();
752
+ let text = "The quick brown fox jumps over the lazy dog";
753
+
754
+ let single_result = reducer.reduce(text);
755
+ let batch_results = reducer.batch_reduce(&[text]);
756
+
757
+ assert_eq!(single_result, batch_results[0]);
758
+ }
759
+
760
+ #[test]
761
+ fn test_important_word_preservation() {
762
+ let config = TokenReductionConfig {
763
+ level: ReductionLevel::Aggressive,
764
+ ..Default::default()
765
+ };
766
+
767
+ let reducer = TokenReducer::new(&config, None).unwrap();
768
+ let input = "The IMPORTANT word COVID-19 and 12345 numbers should be preserved";
769
+ let result = reducer.reduce(input);
770
+
771
+ assert!(result.contains("IMPORTANT") || result.contains("COVID") || result.contains("12345"));
772
+ }
773
+
774
+ #[test]
775
+ fn test_technical_terms_preservation() {
776
+ let config = TokenReductionConfig {
777
+ level: ReductionLevel::Aggressive,
778
+ ..Default::default()
779
+ };
780
+
781
+ let reducer = TokenReducer::new(&config, None).unwrap();
782
+ let input = "The implementation uses PyTorch and TensorFlow frameworks";
783
+ let result = reducer.reduce(input);
784
+
785
+ assert!(result.contains("PyTorch") || result.contains("TensorFlow"));
786
+ }
787
+
788
+ #[test]
789
+ fn test_calculate_char_entropy() {
790
+ let config = TokenReductionConfig::default();
791
+ let reducer = TokenReducer::new(&config, None).unwrap();
792
+
793
+ let low_entropy = reducer.calculate_char_entropy("aaaaaaa");
794
+ assert!(low_entropy < 1.0);
795
+
796
+ let high_entropy = reducer.calculate_char_entropy("abcdefg123");
797
+ assert!(high_entropy > low_entropy);
798
+ }
799
+
800
+ #[test]
801
+ fn test_universal_tokenize_english() {
802
+ let config = TokenReductionConfig::default();
803
+ let reducer = TokenReducer::new(&config, None).unwrap();
804
+
805
+ let tokens = reducer.universal_tokenize("hello world test");
806
+ assert_eq!(tokens, vec!["hello", "world", "test"]);
807
+ }
808
+
809
+ #[test]
810
+ fn test_universal_tokenize_cjk() {
811
+ let config = TokenReductionConfig::default();
812
+ let reducer = TokenReducer::new(&config, None).unwrap();
813
+
814
+ let tokens = reducer.universal_tokenize("中文");
815
+ assert!(!tokens.is_empty());
816
+ }
817
+
818
+ #[test]
819
+ fn test_fallback_threshold() {
820
+ let config = TokenReductionConfig {
821
+ level: ReductionLevel::Aggressive,
822
+ ..Default::default()
823
+ };
824
+
825
+ let reducer = TokenReducer::new(&config, None).unwrap();
826
+
827
+ let input = "a the is of to in for on at by";
828
+ let result = reducer.reduce(input);
829
+
830
+ assert!(!result.is_empty());
831
+ }
832
+ }