kreuzberg 4.0.0.rc1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (265) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +8 -0
  3. data/.rspec +3 -0
  4. data/.rubocop.yaml +534 -0
  5. data/Gemfile +9 -0
  6. data/Gemfile.lock +157 -0
  7. data/README.md +421 -0
  8. data/Rakefile +25 -0
  9. data/Steepfile +47 -0
  10. data/examples/async_patterns.rb +340 -0
  11. data/ext/kreuzberg_rb/extconf.rb +35 -0
  12. data/ext/kreuzberg_rb/native/Cargo.toml +36 -0
  13. data/ext/kreuzberg_rb/native/README.md +425 -0
  14. data/ext/kreuzberg_rb/native/build.rs +17 -0
  15. data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -0
  16. data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -0
  17. data/ext/kreuzberg_rb/native/include/strings.h +20 -0
  18. data/ext/kreuzberg_rb/native/include/unistd.h +47 -0
  19. data/ext/kreuzberg_rb/native/src/lib.rs +2939 -0
  20. data/extconf.rb +28 -0
  21. data/kreuzberg.gemspec +105 -0
  22. data/lib/kreuzberg/api_proxy.rb +142 -0
  23. data/lib/kreuzberg/cache_api.rb +45 -0
  24. data/lib/kreuzberg/cli.rb +55 -0
  25. data/lib/kreuzberg/cli_proxy.rb +127 -0
  26. data/lib/kreuzberg/config.rb +684 -0
  27. data/lib/kreuzberg/errors.rb +50 -0
  28. data/lib/kreuzberg/extraction_api.rb +84 -0
  29. data/lib/kreuzberg/mcp_proxy.rb +186 -0
  30. data/lib/kreuzberg/ocr_backend_protocol.rb +113 -0
  31. data/lib/kreuzberg/post_processor_protocol.rb +86 -0
  32. data/lib/kreuzberg/result.rb +216 -0
  33. data/lib/kreuzberg/setup_lib_path.rb +79 -0
  34. data/lib/kreuzberg/validator_protocol.rb +89 -0
  35. data/lib/kreuzberg/version.rb +5 -0
  36. data/lib/kreuzberg.rb +82 -0
  37. data/pkg/kreuzberg-4.0.0.rc1.gem +0 -0
  38. data/sig/kreuzberg/internal.rbs +184 -0
  39. data/sig/kreuzberg.rbs +468 -0
  40. data/spec/binding/cache_spec.rb +227 -0
  41. data/spec/binding/cli_proxy_spec.rb +87 -0
  42. data/spec/binding/cli_spec.rb +54 -0
  43. data/spec/binding/config_spec.rb +345 -0
  44. data/spec/binding/config_validation_spec.rb +283 -0
  45. data/spec/binding/error_handling_spec.rb +213 -0
  46. data/spec/binding/errors_spec.rb +66 -0
  47. data/spec/binding/plugins/ocr_backend_spec.rb +307 -0
  48. data/spec/binding/plugins/postprocessor_spec.rb +269 -0
  49. data/spec/binding/plugins/validator_spec.rb +274 -0
  50. data/spec/examples.txt +104 -0
  51. data/spec/fixtures/config.toml +39 -0
  52. data/spec/fixtures/config.yaml +42 -0
  53. data/spec/fixtures/invalid_config.toml +4 -0
  54. data/spec/smoke/package_spec.rb +178 -0
  55. data/spec/spec_helper.rb +42 -0
  56. data/vendor/kreuzberg/Cargo.toml +134 -0
  57. data/vendor/kreuzberg/README.md +175 -0
  58. data/vendor/kreuzberg/build.rs +460 -0
  59. data/vendor/kreuzberg/src/api/error.rs +81 -0
  60. data/vendor/kreuzberg/src/api/handlers.rs +199 -0
  61. data/vendor/kreuzberg/src/api/mod.rs +79 -0
  62. data/vendor/kreuzberg/src/api/server.rs +353 -0
  63. data/vendor/kreuzberg/src/api/types.rs +170 -0
  64. data/vendor/kreuzberg/src/bin/profile_extract.rs +455 -0
  65. data/vendor/kreuzberg/src/cache/mod.rs +1143 -0
  66. data/vendor/kreuzberg/src/chunking/mod.rs +677 -0
  67. data/vendor/kreuzberg/src/core/batch_mode.rs +35 -0
  68. data/vendor/kreuzberg/src/core/config.rs +1032 -0
  69. data/vendor/kreuzberg/src/core/extractor.rs +903 -0
  70. data/vendor/kreuzberg/src/core/io.rs +327 -0
  71. data/vendor/kreuzberg/src/core/mime.rs +615 -0
  72. data/vendor/kreuzberg/src/core/mod.rs +42 -0
  73. data/vendor/kreuzberg/src/core/pipeline.rs +906 -0
  74. data/vendor/kreuzberg/src/embeddings.rs +323 -0
  75. data/vendor/kreuzberg/src/error.rs +431 -0
  76. data/vendor/kreuzberg/src/extraction/archive.rs +954 -0
  77. data/vendor/kreuzberg/src/extraction/docx.rs +40 -0
  78. data/vendor/kreuzberg/src/extraction/email.rs +854 -0
  79. data/vendor/kreuzberg/src/extraction/excel.rs +688 -0
  80. data/vendor/kreuzberg/src/extraction/html.rs +553 -0
  81. data/vendor/kreuzberg/src/extraction/image.rs +368 -0
  82. data/vendor/kreuzberg/src/extraction/libreoffice.rs +564 -0
  83. data/vendor/kreuzberg/src/extraction/mod.rs +77 -0
  84. data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -0
  85. data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -0
  86. data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -0
  87. data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +128 -0
  88. data/vendor/kreuzberg/src/extraction/pandoc/batch.rs +275 -0
  89. data/vendor/kreuzberg/src/extraction/pandoc/mime_types.rs +178 -0
  90. data/vendor/kreuzberg/src/extraction/pandoc/mod.rs +491 -0
  91. data/vendor/kreuzberg/src/extraction/pandoc/server.rs +496 -0
  92. data/vendor/kreuzberg/src/extraction/pandoc/subprocess.rs +1188 -0
  93. data/vendor/kreuzberg/src/extraction/pandoc/version.rs +162 -0
  94. data/vendor/kreuzberg/src/extraction/pptx.rs +3000 -0
  95. data/vendor/kreuzberg/src/extraction/structured.rs +490 -0
  96. data/vendor/kreuzberg/src/extraction/table.rs +328 -0
  97. data/vendor/kreuzberg/src/extraction/text.rs +269 -0
  98. data/vendor/kreuzberg/src/extraction/xml.rs +333 -0
  99. data/vendor/kreuzberg/src/extractors/archive.rs +425 -0
  100. data/vendor/kreuzberg/src/extractors/docx.rs +479 -0
  101. data/vendor/kreuzberg/src/extractors/email.rs +129 -0
  102. data/vendor/kreuzberg/src/extractors/excel.rs +344 -0
  103. data/vendor/kreuzberg/src/extractors/html.rs +410 -0
  104. data/vendor/kreuzberg/src/extractors/image.rs +195 -0
  105. data/vendor/kreuzberg/src/extractors/mod.rs +268 -0
  106. data/vendor/kreuzberg/src/extractors/pandoc.rs +201 -0
  107. data/vendor/kreuzberg/src/extractors/pdf.rs +496 -0
  108. data/vendor/kreuzberg/src/extractors/pptx.rs +234 -0
  109. data/vendor/kreuzberg/src/extractors/structured.rs +126 -0
  110. data/vendor/kreuzberg/src/extractors/text.rs +242 -0
  111. data/vendor/kreuzberg/src/extractors/xml.rs +128 -0
  112. data/vendor/kreuzberg/src/image/dpi.rs +164 -0
  113. data/vendor/kreuzberg/src/image/mod.rs +6 -0
  114. data/vendor/kreuzberg/src/image/preprocessing.rs +417 -0
  115. data/vendor/kreuzberg/src/image/resize.rs +89 -0
  116. data/vendor/kreuzberg/src/keywords/config.rs +154 -0
  117. data/vendor/kreuzberg/src/keywords/mod.rs +237 -0
  118. data/vendor/kreuzberg/src/keywords/processor.rs +267 -0
  119. data/vendor/kreuzberg/src/keywords/rake.rs +294 -0
  120. data/vendor/kreuzberg/src/keywords/types.rs +68 -0
  121. data/vendor/kreuzberg/src/keywords/yake.rs +163 -0
  122. data/vendor/kreuzberg/src/language_detection/mod.rs +942 -0
  123. data/vendor/kreuzberg/src/lib.rs +102 -0
  124. data/vendor/kreuzberg/src/mcp/mod.rs +32 -0
  125. data/vendor/kreuzberg/src/mcp/server.rs +1966 -0
  126. data/vendor/kreuzberg/src/ocr/cache.rs +469 -0
  127. data/vendor/kreuzberg/src/ocr/error.rs +37 -0
  128. data/vendor/kreuzberg/src/ocr/hocr.rs +216 -0
  129. data/vendor/kreuzberg/src/ocr/mod.rs +58 -0
  130. data/vendor/kreuzberg/src/ocr/processor.rs +847 -0
  131. data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -0
  132. data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -0
  133. data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +450 -0
  134. data/vendor/kreuzberg/src/ocr/types.rs +393 -0
  135. data/vendor/kreuzberg/src/ocr/utils.rs +47 -0
  136. data/vendor/kreuzberg/src/ocr/validation.rs +206 -0
  137. data/vendor/kreuzberg/src/pdf/error.rs +122 -0
  138. data/vendor/kreuzberg/src/pdf/images.rs +139 -0
  139. data/vendor/kreuzberg/src/pdf/metadata.rs +346 -0
  140. data/vendor/kreuzberg/src/pdf/mod.rs +50 -0
  141. data/vendor/kreuzberg/src/pdf/rendering.rs +369 -0
  142. data/vendor/kreuzberg/src/pdf/table.rs +420 -0
  143. data/vendor/kreuzberg/src/pdf/text.rs +161 -0
  144. data/vendor/kreuzberg/src/plugins/extractor.rs +1010 -0
  145. data/vendor/kreuzberg/src/plugins/mod.rs +209 -0
  146. data/vendor/kreuzberg/src/plugins/ocr.rs +629 -0
  147. data/vendor/kreuzberg/src/plugins/processor.rs +641 -0
  148. data/vendor/kreuzberg/src/plugins/registry.rs +1324 -0
  149. data/vendor/kreuzberg/src/plugins/traits.rs +258 -0
  150. data/vendor/kreuzberg/src/plugins/validator.rs +955 -0
  151. data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -0
  152. data/vendor/kreuzberg/src/text/mod.rs +19 -0
  153. data/vendor/kreuzberg/src/text/quality.rs +697 -0
  154. data/vendor/kreuzberg/src/text/string_utils.rs +217 -0
  155. data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -0
  156. data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -0
  157. data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -0
  158. data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -0
  159. data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -0
  160. data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -0
  161. data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -0
  162. data/vendor/kreuzberg/src/types.rs +873 -0
  163. data/vendor/kreuzberg/src/utils/mod.rs +17 -0
  164. data/vendor/kreuzberg/src/utils/quality.rs +959 -0
  165. data/vendor/kreuzberg/src/utils/string_utils.rs +381 -0
  166. data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -0
  167. data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -0
  168. data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -0
  169. data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -0
  170. data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -0
  171. data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -0
  172. data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -0
  173. data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -0
  174. data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -0
  175. data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -0
  176. data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -0
  177. data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -0
  178. data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -0
  179. data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -0
  180. data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -0
  181. data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -0
  182. data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -0
  183. data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -0
  184. data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -0
  185. data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -0
  186. data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -0
  187. data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -0
  188. data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -0
  189. data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -0
  190. data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -0
  191. data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -0
  192. data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -0
  193. data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -0
  194. data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -0
  195. data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -0
  196. data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -0
  197. data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -0
  198. data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -0
  199. data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -0
  200. data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -0
  201. data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -0
  202. data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -0
  203. data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -0
  204. data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -0
  205. data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -0
  206. data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -0
  207. data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -0
  208. data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -0
  209. data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -0
  210. data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -0
  211. data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -0
  212. data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -0
  213. data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -0
  214. data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -0
  215. data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -0
  216. data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -0
  217. data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -0
  218. data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -0
  219. data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -0
  220. data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -0
  221. data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -0
  222. data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -0
  223. data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -0
  224. data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -0
  225. data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -0
  226. data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -0
  227. data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -0
  228. data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -0
  229. data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -0
  230. data/vendor/kreuzberg/tests/api_tests.rs +966 -0
  231. data/vendor/kreuzberg/tests/archive_integration.rs +543 -0
  232. data/vendor/kreuzberg/tests/batch_orchestration.rs +542 -0
  233. data/vendor/kreuzberg/tests/batch_processing.rs +304 -0
  234. data/vendor/kreuzberg/tests/chunking_offset_demo.rs +92 -0
  235. data/vendor/kreuzberg/tests/concurrency_stress.rs +509 -0
  236. data/vendor/kreuzberg/tests/config_features.rs +580 -0
  237. data/vendor/kreuzberg/tests/config_loading_tests.rs +439 -0
  238. data/vendor/kreuzberg/tests/core_integration.rs +493 -0
  239. data/vendor/kreuzberg/tests/csv_integration.rs +424 -0
  240. data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +124 -0
  241. data/vendor/kreuzberg/tests/email_integration.rs +325 -0
  242. data/vendor/kreuzberg/tests/error_handling.rs +393 -0
  243. data/vendor/kreuzberg/tests/format_integration.rs +159 -0
  244. data/vendor/kreuzberg/tests/helpers/mod.rs +142 -0
  245. data/vendor/kreuzberg/tests/image_integration.rs +253 -0
  246. data/vendor/kreuzberg/tests/keywords_integration.rs +479 -0
  247. data/vendor/kreuzberg/tests/keywords_quality.rs +509 -0
  248. data/vendor/kreuzberg/tests/mime_detection.rs +428 -0
  249. data/vendor/kreuzberg/tests/ocr_configuration.rs +510 -0
  250. data/vendor/kreuzberg/tests/ocr_errors.rs +676 -0
  251. data/vendor/kreuzberg/tests/ocr_quality.rs +627 -0
  252. data/vendor/kreuzberg/tests/ocr_stress.rs +469 -0
  253. data/vendor/kreuzberg/tests/pandoc_integration.rs +503 -0
  254. data/vendor/kreuzberg/tests/pdf_integration.rs +43 -0
  255. data/vendor/kreuzberg/tests/pipeline_integration.rs +1412 -0
  256. data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +771 -0
  257. data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +561 -0
  258. data/vendor/kreuzberg/tests/plugin_system.rs +921 -0
  259. data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -0
  260. data/vendor/kreuzberg/tests/registry_integration_tests.rs +607 -0
  261. data/vendor/kreuzberg/tests/security_validation.rs +404 -0
  262. data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -0
  263. data/vendor/kreuzberg/tests/test_fastembed.rs +609 -0
  264. data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -0
  265. metadata +471 -0
@@ -0,0 +1,796 @@
1
+ use crate::error::Result;
2
+ use crate::text::token_reduction::{
3
+ cjk_utils::CjkTokenizer,
4
+ config::{ReductionLevel, TokenReductionConfig},
5
+ filters::FilterPipeline,
6
+ semantic::SemanticAnalyzer,
7
+ simd_text::{SimdTextProcessor, chunk_text_for_parallel},
8
+ };
9
+ use once_cell::sync::Lazy;
10
+ use rayon::prelude::*;
11
+ use regex::Regex;
12
+ use std::sync::Arc;
13
+ use unicode_normalization::UnicodeNormalization;
14
+
15
+ static REPEATED_EXCLAMATION: Lazy<Regex> =
16
+ Lazy::new(|| Regex::new(r"[!]{2,}").expect("Repeated exclamation regex pattern is valid and should compile"));
17
+ static REPEATED_QUESTION: Lazy<Regex> =
18
+ Lazy::new(|| Regex::new(r"[?]{2,}").expect("Repeated question regex pattern is valid and should compile"));
19
+ static REPEATED_COMMA: Lazy<Regex> =
20
+ Lazy::new(|| Regex::new(r"[,]{2,}").expect("Repeated comma regex pattern is valid and should compile"));
21
+
22
+ /// Bonus added for sentences at the beginning or end of the document
23
+ const SENTENCE_EDGE_POSITION_BONUS: f32 = 0.3;
24
+
25
+ /// Bonus added for sentences with ideal word count (neither too short nor too long)
26
+ const IDEAL_WORD_COUNT_BONUS: f32 = 0.2;
27
+
28
+ /// Minimum word count for ideal sentence length
29
+ const MIN_IDEAL_WORD_COUNT: usize = 3;
30
+
31
+ /// Maximum word count for ideal sentence length
32
+ const MAX_IDEAL_WORD_COUNT: usize = 25;
33
+
34
+ /// Weight multiplier for numeric content density in sentences
35
+ const NUMERIC_CONTENT_WEIGHT: f32 = 0.3;
36
+
37
+ /// Weight multiplier for capitalized/acronym word density in sentences
38
+ const CAPS_ACRONYM_WEIGHT: f32 = 0.25;
39
+
40
+ /// Weight multiplier for long word density in sentences
41
+ const LONG_WORD_WEIGHT: f32 = 0.2;
42
+
43
+ /// Minimum character length for a word to be considered "long"
44
+ const LONG_WORD_THRESHOLD: usize = 8;
45
+
46
+ /// Weight multiplier for punctuation density in sentences
47
+ const PUNCTUATION_DENSITY_WEIGHT: f32 = 0.15;
48
+
49
+ /// Weight multiplier for word diversity ratio (unique words / total words)
50
+ const DIVERSITY_RATIO_WEIGHT: f32 = 0.15;
51
+
52
+ /// Weight multiplier for character entropy (measure of text randomness/information)
53
+ const CHAR_ENTROPY_WEIGHT: f32 = 0.1;
54
+
55
+ pub struct TokenReducer {
56
+ config: Arc<TokenReductionConfig>,
57
+ text_processor: SimdTextProcessor,
58
+ filter_pipeline: FilterPipeline,
59
+ semantic_analyzer: Option<SemanticAnalyzer>,
60
+ cjk_tokenizer: CjkTokenizer,
61
+ language: String,
62
+ }
63
+
64
+ impl TokenReducer {
65
+ pub fn new(config: &TokenReductionConfig, language_hint: Option<&str>) -> Result<Self> {
66
+ let config = Arc::new(config.clone());
67
+ let language = language_hint
68
+ .or(config.language_hint.as_deref())
69
+ .unwrap_or("en")
70
+ .to_string();
71
+
72
+ let text_processor = SimdTextProcessor::new();
73
+ let filter_pipeline = FilterPipeline::new(&config, &language)?;
74
+
75
+ let semantic_analyzer = if matches!(config.level, ReductionLevel::Aggressive | ReductionLevel::Maximum) {
76
+ Some(SemanticAnalyzer::new(&language))
77
+ } else {
78
+ None
79
+ };
80
+
81
+ Ok(Self {
82
+ config,
83
+ text_processor,
84
+ filter_pipeline,
85
+ semantic_analyzer,
86
+ cjk_tokenizer: CjkTokenizer::new(),
87
+ language,
88
+ })
89
+ }
90
+
91
+ /// Get the language code being used for stopwords and semantic analysis.
92
+ pub fn language(&self) -> &str {
93
+ &self.language
94
+ }
95
+
96
+ pub fn reduce(&self, text: &str) -> String {
97
+ if text.is_empty() || matches!(self.config.level, ReductionLevel::Off) {
98
+ return text.to_string();
99
+ }
100
+
101
+ let working_text = if text.is_ascii() {
102
+ text
103
+ } else {
104
+ &text.nfc().collect::<String>()
105
+ };
106
+
107
+ match self.config.level {
108
+ ReductionLevel::Off => working_text.to_string(),
109
+ ReductionLevel::Light => self.apply_light_reduction_optimized(working_text),
110
+ ReductionLevel::Moderate => self.apply_moderate_reduction_optimized(working_text),
111
+ ReductionLevel::Aggressive => self.apply_aggressive_reduction_optimized(working_text),
112
+ ReductionLevel::Maximum => self.apply_maximum_reduction_optimized(working_text),
113
+ }
114
+ }
115
+
116
+ pub fn batch_reduce(&self, texts: &[&str]) -> Vec<String> {
117
+ if !self.config.enable_parallel || texts.len() < 2 {
118
+ return texts.iter().map(|text| self.reduce(text)).collect();
119
+ }
120
+
121
+ texts.par_iter().map(|text| self.reduce(text)).collect()
122
+ }
123
+
124
+ fn apply_light_reduction_optimized(&self, text: &str) -> String {
125
+ let mut result = if self.config.use_simd {
126
+ self.text_processor.clean_punctuation(text)
127
+ } else {
128
+ self.clean_punctuation_optimized(text)
129
+ };
130
+
131
+ result = self.filter_pipeline.apply_light_filters(&result);
132
+ result.trim().to_string()
133
+ }
134
+
135
+ fn apply_moderate_reduction_optimized(&self, text: &str) -> String {
136
+ let mut result = self.apply_light_reduction_optimized(text);
137
+
138
+ result = if self.config.enable_parallel && text.len() > 1000 {
139
+ self.apply_parallel_moderate_reduction(&result)
140
+ } else {
141
+ self.filter_pipeline.apply_moderate_filters(&result)
142
+ };
143
+
144
+ result
145
+ }
146
+
147
+ fn apply_aggressive_reduction_optimized(&self, text: &str) -> String {
148
+ let mut result = self.apply_moderate_reduction_optimized(text);
149
+
150
+ result = self.remove_additional_common_words(&result);
151
+ result = self.apply_sentence_selection(&result);
152
+
153
+ if let Some(ref analyzer) = self.semantic_analyzer {
154
+ result = analyzer.apply_semantic_filtering(&result, self.config.semantic_threshold);
155
+ }
156
+
157
+ result
158
+ }
159
+
160
+ fn apply_maximum_reduction_optimized(&self, text: &str) -> String {
161
+ let mut result = self.apply_aggressive_reduction_optimized(text);
162
+
163
+ if let Some(ref analyzer) = self.semantic_analyzer
164
+ && self.config.enable_semantic_clustering
165
+ {
166
+ result = analyzer.apply_hypernym_compression(&result, self.config.target_reduction);
167
+ }
168
+
169
+ result
170
+ }
171
+
172
+ fn apply_parallel_moderate_reduction(&self, text: &str) -> String {
173
+ let num_threads = rayon::current_num_threads();
174
+ let chunks = chunk_text_for_parallel(text, num_threads);
175
+
176
+ let processed_chunks: Vec<String> = chunks
177
+ .par_iter()
178
+ .map(|chunk| self.filter_pipeline.apply_moderate_filters(chunk))
179
+ .collect();
180
+
181
+ processed_chunks.join(" ")
182
+ }
183
+
184
+ fn clean_punctuation_optimized(&self, text: &str) -> String {
185
+ let mut result = text.to_string();
186
+
187
+ result = REPEATED_EXCLAMATION.replace_all(&result, "!").to_string();
188
+ result = REPEATED_QUESTION.replace_all(&result, "?").to_string();
189
+ result = REPEATED_COMMA.replace_all(&result, ",").to_string();
190
+
191
+ result
192
+ }
193
+
194
+ fn remove_additional_common_words(&self, text: &str) -> String {
195
+ let words = self.universal_tokenize(text);
196
+
197
+ if words.len() < 4 {
198
+ return text.to_string();
199
+ }
200
+
201
+ let mut word_freq = std::collections::HashMap::new();
202
+ let mut word_lengths = Vec::new();
203
+
204
+ for word in &words {
205
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
206
+ word.to_lowercase()
207
+ } else {
208
+ word.chars()
209
+ .filter(|c| c.is_alphabetic())
210
+ .collect::<String>()
211
+ .to_lowercase()
212
+ };
213
+
214
+ if !clean_word.is_empty() {
215
+ *word_freq.entry(clean_word.clone()).or_insert(0) += 1;
216
+ word_lengths.push(clean_word.chars().count());
217
+ }
218
+ }
219
+
220
+ let avg_length = if !word_lengths.is_empty() {
221
+ word_lengths.iter().sum::<usize>() as f32 / word_lengths.len() as f32
222
+ } else {
223
+ 5.0
224
+ };
225
+
226
+ let original_count = words.len();
227
+
228
+ let filtered_words: Vec<String> = words
229
+ .iter()
230
+ .filter(|word| {
231
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
232
+ word.to_lowercase()
233
+ } else {
234
+ word.chars()
235
+ .filter(|c| c.is_alphabetic())
236
+ .collect::<String>()
237
+ .to_lowercase()
238
+ };
239
+
240
+ if clean_word.is_empty() {
241
+ return true;
242
+ }
243
+
244
+ let freq = word_freq.get(&clean_word).unwrap_or(&0);
245
+ let word_len = clean_word.chars().count() as f32;
246
+
247
+ self.has_important_characteristics(word)
248
+ || (*freq <= 2 && word_len >= avg_length * 0.8)
249
+ || (word_len >= avg_length * 1.5)
250
+ })
251
+ .cloned()
252
+ .collect();
253
+
254
+ let has_cjk_content = text.chars().any(|c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
255
+ let fallback_threshold = if has_cjk_content {
256
+ original_count / 5
257
+ } else {
258
+ original_count / 3
259
+ };
260
+
261
+ if filtered_words.len() < fallback_threshold {
262
+ let fallback_words: Vec<String> = words
263
+ .iter()
264
+ .filter(|word| {
265
+ let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
266
+ (*word).clone()
267
+ } else {
268
+ word.chars().filter(|c| c.is_alphabetic()).collect::<String>()
269
+ };
270
+
271
+ clean_word.is_empty() || clean_word.chars().count() >= 3 || self.has_important_characteristics(word)
272
+ })
273
+ .cloned()
274
+ .collect();
275
+ self.smart_join(&fallback_words, has_cjk_content)
276
+ } else {
277
+ self.smart_join(&filtered_words, has_cjk_content)
278
+ }
279
+ }
280
+
281
+ fn smart_join(&self, tokens: &[String], has_cjk_content: bool) -> String {
282
+ if has_cjk_content {
283
+ tokens.join("")
284
+ } else {
285
+ tokens.join(" ")
286
+ }
287
+ }
288
+
289
+ fn has_important_characteristics(&self, word: &str) -> bool {
290
+ if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
291
+ return true;
292
+ }
293
+
294
+ if word.chars().any(|c| c.is_numeric()) {
295
+ return true;
296
+ }
297
+
298
+ if word.len() > 10 {
299
+ return true;
300
+ }
301
+
302
+ let uppercase_count = word.chars().filter(|c| c.is_uppercase()).count();
303
+ if uppercase_count > 1 && uppercase_count < word.len() {
304
+ return true;
305
+ }
306
+
307
+ if self.has_cjk_importance(word) {
308
+ return true;
309
+ }
310
+
311
+ false
312
+ }
313
+
314
+ fn has_cjk_importance(&self, word: &str) -> bool {
315
+ let chars: Vec<char> = word.chars().collect();
316
+
317
+ let has_cjk = chars.iter().any(|&c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
318
+ if !has_cjk {
319
+ return false;
320
+ }
321
+
322
+ let important_radicals = [
323
+ '学', '智', '能', '技', '术', '法', '算', '理', '科', '研', '究', '发', '展', '系', '统', '模', '型', '方',
324
+ '式', '过', '程', '结', '构', '功', '效', '应', '分', '析', '计', '算', '数', '据', '信', '息', '处', '理',
325
+ '语', '言', '文', '生', '成', '产', '用', '作', '为', '成', '变', '化', '转', '换', '提', '高', '网', '络',
326
+ '神', '经', '机', '器', '人', '工', '智', '能', '自', '然', '复',
327
+ ];
328
+
329
+ for &char in &chars {
330
+ if important_radicals.contains(&char) {
331
+ return true;
332
+ }
333
+ }
334
+
335
+ if chars.len() == 2 && has_cjk {
336
+ let has_technical = chars.iter().any(|&c| {
337
+ let code = c as u32;
338
+ (0x4E00..=0x4FFF).contains(&code)
339
+ || (0x5000..=0x51FF).contains(&code)
340
+ || (0x6700..=0x68FF).contains(&code)
341
+ || (0x7500..=0x76FF).contains(&code)
342
+ });
343
+
344
+ if has_technical {
345
+ return true;
346
+ }
347
+ }
348
+
349
+ false
350
+ }
351
+
352
+ fn apply_sentence_selection(&self, text: &str) -> String {
353
+ let sentences: Vec<&str> = text
354
+ .split(['.', '!', '?'])
355
+ .map(|s| s.trim())
356
+ .filter(|s| !s.is_empty())
357
+ .collect();
358
+
359
+ if sentences.len() <= 2 {
360
+ return text.to_string();
361
+ }
362
+
363
+ let mut scored_sentences: Vec<(usize, f32, &str)> = sentences
364
+ .iter()
365
+ .enumerate()
366
+ .map(|(i, sentence)| {
367
+ let score = self.score_sentence_importance(sentence, i, sentences.len());
368
+ (i, score, *sentence)
369
+ })
370
+ .collect();
371
+
372
+ scored_sentences.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
373
+
374
+ let keep_count = ((sentences.len() as f32 * 0.4).ceil() as usize).max(1);
375
+ let mut selected_indices: Vec<usize> = scored_sentences[..keep_count].iter().map(|(i, _, _)| *i).collect();
376
+
377
+ selected_indices.sort();
378
+
379
+ let selected_sentences: Vec<&str> = selected_indices
380
+ .iter()
381
+ .filter_map(|&i| sentences.get(i))
382
+ .copied()
383
+ .collect();
384
+
385
+ if selected_sentences.is_empty() {
386
+ text.to_string()
387
+ } else {
388
+ selected_sentences.join(". ")
389
+ }
390
+ }
391
+
392
+ fn score_sentence_importance(&self, sentence: &str, position: usize, total_sentences: usize) -> f32 {
393
+ let mut score = 0.0;
394
+
395
+ if position == 0 || position == total_sentences - 1 {
396
+ score += SENTENCE_EDGE_POSITION_BONUS;
397
+ }
398
+
399
+ let words: Vec<&str> = sentence.split_whitespace().collect();
400
+ if words.is_empty() {
401
+ return score;
402
+ }
403
+
404
+ let word_count = words.len();
405
+ if (MIN_IDEAL_WORD_COUNT..=MAX_IDEAL_WORD_COUNT).contains(&word_count) {
406
+ score += IDEAL_WORD_COUNT_BONUS;
407
+ }
408
+
409
+ let mut numeric_count = 0;
410
+ let mut caps_count = 0;
411
+ let mut long_word_count = 0;
412
+ let mut punct_density = 0;
413
+
414
+ for word in &words {
415
+ if word.chars().any(|c| c.is_numeric()) {
416
+ numeric_count += 1;
417
+ }
418
+
419
+ if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
420
+ caps_count += 1;
421
+ }
422
+
423
+ if word.len() > LONG_WORD_THRESHOLD {
424
+ long_word_count += 1;
425
+ }
426
+
427
+ punct_density += word.chars().filter(|c| c.is_ascii_punctuation()).count();
428
+ }
429
+
430
+ score += (numeric_count as f32 / words.len() as f32) * NUMERIC_CONTENT_WEIGHT;
431
+ score += (caps_count as f32 / words.len() as f32) * CAPS_ACRONYM_WEIGHT;
432
+ score += (long_word_count as f32 / words.len() as f32) * LONG_WORD_WEIGHT;
433
+ score += (punct_density as f32 / sentence.len() as f32) * PUNCTUATION_DENSITY_WEIGHT;
434
+
435
+ let unique_words: std::collections::HashSet<_> = words
436
+ .iter()
437
+ .map(|w| {
438
+ w.chars()
439
+ .filter(|c| c.is_alphabetic())
440
+ .collect::<String>()
441
+ .to_lowercase()
442
+ })
443
+ .collect();
444
+ let diversity_ratio = unique_words.len() as f32 / words.len() as f32;
445
+ score += diversity_ratio * DIVERSITY_RATIO_WEIGHT;
446
+
447
+ let char_entropy = self.calculate_char_entropy(sentence);
448
+ score += char_entropy * CHAR_ENTROPY_WEIGHT;
449
+
450
+ score
451
+ }
452
+
453
+ fn universal_tokenize(&self, text: &str) -> Vec<String> {
454
+ self.cjk_tokenizer.tokenize_mixed_text(text)
455
+ }
456
+
457
+ fn calculate_char_entropy(&self, text: &str) -> f32 {
458
+ let chars: Vec<char> = text.chars().collect();
459
+ if chars.is_empty() {
460
+ return 0.0;
461
+ }
462
+
463
+ let mut char_freq = std::collections::HashMap::new();
464
+ for &ch in &chars {
465
+ let lowercase_ch = ch
466
+ .to_lowercase()
467
+ .next()
468
+ .expect("to_lowercase() must yield at least one character for valid Unicode");
469
+ *char_freq.entry(lowercase_ch).or_insert(0) += 1;
470
+ }
471
+
472
+ let total_chars = chars.len() as f32;
473
+ char_freq
474
+ .values()
475
+ .map(|&freq| {
476
+ let p = freq as f32 / total_chars;
477
+ if p > 0.0 { -p * p.log2() } else { 0.0 }
478
+ })
479
+ .sum::<f32>()
480
+ .min(5.0)
481
+ }
482
+ }
483
+
484
+ #[cfg(test)]
485
+ mod tests {
486
+ use super::*;
487
+
488
+ #[test]
489
+ fn test_light_reduction() {
490
+ let config = TokenReductionConfig {
491
+ level: ReductionLevel::Light,
492
+ use_simd: false,
493
+ ..Default::default()
494
+ };
495
+
496
+ let reducer = TokenReducer::new(&config, None).unwrap();
497
+ let input = "Hello world!!! How are you???";
498
+ let result = reducer.reduce(input);
499
+
500
+ assert!(result.len() < input.len());
501
+ assert!(!result.contains(" "));
502
+ }
503
+
504
+ #[test]
505
+ fn test_moderate_reduction() {
506
+ let config = TokenReductionConfig {
507
+ level: ReductionLevel::Moderate,
508
+ use_simd: false,
509
+ ..Default::default()
510
+ };
511
+
512
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
513
+ let input = "The quick brown fox is jumping over the lazy dog";
514
+ let result = reducer.reduce(input);
515
+
516
+ assert!(result.len() < input.len());
517
+ assert!(result.contains("quick"));
518
+ assert!(result.contains("brown"));
519
+ assert!(result.contains("fox"));
520
+ }
521
+
522
+ #[test]
523
+ fn test_batch_processing() {
524
+ let config = TokenReductionConfig {
525
+ level: ReductionLevel::Light,
526
+ enable_parallel: false,
527
+ ..Default::default()
528
+ };
529
+
530
+ let reducer = TokenReducer::new(&config, None).unwrap();
531
+ let inputs = vec!["Hello world!", "How are you?", "Fine, thanks!"];
532
+ let results = reducer.batch_reduce(&inputs);
533
+
534
+ assert_eq!(results.len(), inputs.len());
535
+ for result in &results {
536
+ assert!(!result.contains(" "));
537
+ }
538
+ }
539
+
540
+ #[test]
541
+ fn test_aggressive_reduction() {
542
+ let config = TokenReductionConfig {
543
+ level: ReductionLevel::Aggressive,
544
+ use_simd: false,
545
+ ..Default::default()
546
+ };
547
+
548
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
549
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
550
+ let result = reducer.reduce(input);
551
+
552
+ assert!(result.len() < input.len());
553
+ assert!(!result.is_empty());
554
+ }
555
+
556
+ #[test]
557
+ fn test_maximum_reduction() {
558
+ let config = TokenReductionConfig {
559
+ level: ReductionLevel::Maximum,
560
+ use_simd: false,
561
+ enable_semantic_clustering: true,
562
+ ..Default::default()
563
+ };
564
+
565
+ let reducer = TokenReducer::new(&config, Some("en")).unwrap();
566
+ let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
567
+ let result = reducer.reduce(input);
568
+
569
+ assert!(result.len() < input.len());
570
+ assert!(!result.is_empty());
571
+ }
572
+
573
+ #[test]
574
+ fn test_empty_text_handling() {
575
+ let config = TokenReductionConfig {
576
+ level: ReductionLevel::Moderate,
577
+ ..Default::default()
578
+ };
579
+
580
+ let reducer = TokenReducer::new(&config, None).unwrap();
581
+ assert_eq!(reducer.reduce(""), "");
582
+ let result = reducer.reduce(" ");
583
+ assert!(result == " " || result.is_empty());
584
+ }
585
+
586
+ #[test]
587
+ fn test_off_mode_preserves_text() {
588
+ let config = TokenReductionConfig {
589
+ level: ReductionLevel::Off,
590
+ ..Default::default()
591
+ };
592
+
593
+ let reducer = TokenReducer::new(&config, None).unwrap();
594
+ let input = "Text with multiple spaces!!!";
595
+ assert_eq!(reducer.reduce(input), input);
596
+ }
597
+
598
+ #[test]
599
+ fn test_parallel_batch_processing() {
600
+ let config = TokenReductionConfig {
601
+ level: ReductionLevel::Light,
602
+ enable_parallel: true,
603
+ ..Default::default()
604
+ };
605
+
606
+ let reducer = TokenReducer::new(&config, None).unwrap();
607
+ let inputs = vec![
608
+ "First text with spaces",
609
+ "Second text with spaces",
610
+ "Third text with spaces",
611
+ ];
612
+ let results = reducer.batch_reduce(&inputs);
613
+
614
+ assert_eq!(results.len(), inputs.len());
615
+ for result in &results {
616
+ assert!(!result.contains(" "));
617
+ }
618
+ }
619
+
620
+ #[test]
621
+ fn test_cjk_text_handling() {
622
+ let config = TokenReductionConfig {
623
+ level: ReductionLevel::Moderate,
624
+ ..Default::default()
625
+ };
626
+
627
+ let reducer = TokenReducer::new(&config, Some("zh")).unwrap();
628
+ let input = "这是中文文本测试";
629
+ let result = reducer.reduce(input);
630
+
631
+ assert!(!result.is_empty());
632
+ }
633
+
634
+ #[test]
635
+ fn test_mixed_language_text() {
636
+ let config = TokenReductionConfig {
637
+ level: ReductionLevel::Moderate,
638
+ ..Default::default()
639
+ };
640
+
641
+ let reducer = TokenReducer::new(&config, None).unwrap();
642
+ let input = "This is English text 这是中文 and some more English";
643
+ let result = reducer.reduce(input);
644
+
645
+ assert!(!result.is_empty());
646
+ assert!(result.contains("English") || result.contains("中"));
647
+ }
648
+
649
+ #[test]
650
+ fn test_punctuation_normalization() {
651
+ let config = TokenReductionConfig {
652
+ level: ReductionLevel::Light,
653
+ ..Default::default()
654
+ };
655
+
656
+ let reducer = TokenReducer::new(&config, None).unwrap();
657
+ let input = "Text!!!!!! with????? excessive,,,,,, punctuation";
658
+ let result = reducer.reduce(input);
659
+
660
+ assert!(!result.contains("!!!!!!"));
661
+ assert!(!result.contains("?????"));
662
+ assert!(!result.contains(",,,,,,"));
663
+ }
664
+
665
+ #[test]
666
+ fn test_sentence_selection() {
667
+ let config = TokenReductionConfig {
668
+ level: ReductionLevel::Aggressive,
669
+ ..Default::default()
670
+ };
671
+
672
+ let reducer = TokenReducer::new(&config, None).unwrap();
673
+ let input = "First sentence here. Second sentence with more words. Third one. Fourth sentence is even longer than the others.";
674
+ let result = reducer.reduce(input);
675
+
676
+ assert!(result.len() < input.len());
677
+ assert!(result.split(". ").count() < 4);
678
+ }
679
+
680
+ #[test]
681
+ fn test_unicode_normalization_ascii() {
682
+ let config = TokenReductionConfig {
683
+ level: ReductionLevel::Light,
684
+ ..Default::default()
685
+ };
686
+
687
+ let reducer = TokenReducer::new(&config, None).unwrap();
688
+ let input = "Pure ASCII text without special characters";
689
+ let result = reducer.reduce(input);
690
+
691
+ assert!(result.contains("ASCII"));
692
+ }
693
+
694
+ #[test]
695
+ fn test_unicode_normalization_non_ascii() {
696
+ let config = TokenReductionConfig {
697
+ level: ReductionLevel::Light,
698
+ ..Default::default()
699
+ };
700
+
701
+ let reducer = TokenReducer::new(&config, None).unwrap();
702
+ let input = "Café naïve résumé";
703
+ let result = reducer.reduce(input);
704
+
705
+ assert!(result.contains("Café") || result.contains("Cafe"));
706
+ }
707
+
708
+ #[test]
709
+ fn test_single_text_vs_batch() {
710
+ let config = TokenReductionConfig {
711
+ level: ReductionLevel::Moderate,
712
+ ..Default::default()
713
+ };
714
+
715
+ let reducer = TokenReducer::new(&config, None).unwrap();
716
+ let text = "The quick brown fox jumps over the lazy dog";
717
+
718
+ let single_result = reducer.reduce(text);
719
+ let batch_results = reducer.batch_reduce(&[text]);
720
+
721
+ assert_eq!(single_result, batch_results[0]);
722
+ }
723
+
724
+ #[test]
725
+ fn test_important_word_preservation() {
726
+ let config = TokenReductionConfig {
727
+ level: ReductionLevel::Aggressive,
728
+ ..Default::default()
729
+ };
730
+
731
+ let reducer = TokenReducer::new(&config, None).unwrap();
732
+ let input = "The IMPORTANT word COVID-19 and 12345 numbers should be preserved";
733
+ let result = reducer.reduce(input);
734
+
735
+ assert!(result.contains("IMPORTANT") || result.contains("COVID") || result.contains("12345"));
736
+ }
737
+
738
+ #[test]
739
+ fn test_technical_terms_preservation() {
740
+ let config = TokenReductionConfig {
741
+ level: ReductionLevel::Aggressive,
742
+ ..Default::default()
743
+ };
744
+
745
+ let reducer = TokenReducer::new(&config, None).unwrap();
746
+ let input = "The implementation uses PyTorch and TensorFlow frameworks";
747
+ let result = reducer.reduce(input);
748
+
749
+ assert!(result.contains("PyTorch") || result.contains("TensorFlow"));
750
+ }
751
+
752
+ #[test]
753
+ fn test_calculate_char_entropy() {
754
+ let config = TokenReductionConfig::default();
755
+ let reducer = TokenReducer::new(&config, None).unwrap();
756
+
757
+ let low_entropy = reducer.calculate_char_entropy("aaaaaaa");
758
+ assert!(low_entropy < 1.0);
759
+
760
+ let high_entropy = reducer.calculate_char_entropy("abcdefg123");
761
+ assert!(high_entropy > low_entropy);
762
+ }
763
+
764
+ #[test]
765
+ fn test_universal_tokenize_english() {
766
+ let config = TokenReductionConfig::default();
767
+ let reducer = TokenReducer::new(&config, None).unwrap();
768
+
769
+ let tokens = reducer.universal_tokenize("hello world test");
770
+ assert_eq!(tokens, vec!["hello", "world", "test"]);
771
+ }
772
+
773
+ #[test]
774
+ fn test_universal_tokenize_cjk() {
775
+ let config = TokenReductionConfig::default();
776
+ let reducer = TokenReducer::new(&config, None).unwrap();
777
+
778
+ let tokens = reducer.universal_tokenize("中文");
779
+ assert!(!tokens.is_empty());
780
+ }
781
+
782
+ #[test]
783
+ fn test_fallback_threshold() {
784
+ let config = TokenReductionConfig {
785
+ level: ReductionLevel::Aggressive,
786
+ ..Default::default()
787
+ };
788
+
789
+ let reducer = TokenReducer::new(&config, None).unwrap();
790
+
791
+ let input = "a the is of to in for on at by";
792
+ let result = reducer.reduce(input);
793
+
794
+ assert!(!result.is_empty());
795
+ }
796
+ }