kreuzberg 4.0.0.pre.rc.11 → 4.0.0.pre.rc.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +14 -14
- data/.rspec +3 -3
- data/.rubocop.yaml +1 -1
- data/.rubocop.yml +538 -538
- data/Gemfile +8 -8
- data/Gemfile.lock +2 -105
- data/README.md +454 -454
- data/Rakefile +25 -25
- data/Steepfile +47 -47
- data/examples/async_patterns.rb +341 -341
- data/ext/kreuzberg_rb/extconf.rb +45 -45
- data/ext/kreuzberg_rb/native/.cargo/config.toml +2 -2
- data/ext/kreuzberg_rb/native/Cargo.lock +6941 -6941
- data/ext/kreuzberg_rb/native/Cargo.toml +54 -54
- data/ext/kreuzberg_rb/native/README.md +425 -425
- data/ext/kreuzberg_rb/native/build.rs +15 -15
- data/ext/kreuzberg_rb/native/include/ieeefp.h +11 -11
- data/ext/kreuzberg_rb/native/include/msvc_compat/strings.h +14 -14
- data/ext/kreuzberg_rb/native/include/strings.h +20 -20
- data/ext/kreuzberg_rb/native/include/unistd.h +47 -47
- data/ext/kreuzberg_rb/native/src/lib.rs +3158 -3158
- data/extconf.rb +28 -28
- data/kreuzberg.gemspec +214 -214
- data/lib/kreuzberg/api_proxy.rb +142 -142
- data/lib/kreuzberg/cache_api.rb +81 -81
- data/lib/kreuzberg/cli.rb +55 -55
- data/lib/kreuzberg/cli_proxy.rb +127 -127
- data/lib/kreuzberg/config.rb +724 -724
- data/lib/kreuzberg/error_context.rb +80 -80
- data/lib/kreuzberg/errors.rb +118 -118
- data/lib/kreuzberg/extraction_api.rb +340 -340
- data/lib/kreuzberg/mcp_proxy.rb +186 -186
- data/lib/kreuzberg/ocr_backend_protocol.rb +113 -113
- data/lib/kreuzberg/post_processor_protocol.rb +86 -86
- data/lib/kreuzberg/result.rb +279 -279
- data/lib/kreuzberg/setup_lib_path.rb +80 -80
- data/lib/kreuzberg/validator_protocol.rb +89 -89
- data/lib/kreuzberg/version.rb +5 -5
- data/lib/kreuzberg.rb +109 -109
- data/lib/{libpdfium.dylib → pdfium.dll} +0 -0
- data/sig/kreuzberg/internal.rbs +184 -184
- data/sig/kreuzberg.rbs +546 -546
- data/spec/binding/cache_spec.rb +227 -227
- data/spec/binding/cli_proxy_spec.rb +85 -85
- data/spec/binding/cli_spec.rb +55 -55
- data/spec/binding/config_spec.rb +345 -345
- data/spec/binding/config_validation_spec.rb +283 -283
- data/spec/binding/error_handling_spec.rb +213 -213
- data/spec/binding/errors_spec.rb +66 -66
- data/spec/binding/plugins/ocr_backend_spec.rb +307 -307
- data/spec/binding/plugins/postprocessor_spec.rb +269 -269
- data/spec/binding/plugins/validator_spec.rb +274 -274
- data/spec/fixtures/config.toml +39 -39
- data/spec/fixtures/config.yaml +41 -41
- data/spec/fixtures/invalid_config.toml +4 -4
- data/spec/smoke/package_spec.rb +178 -178
- data/spec/spec_helper.rb +42 -42
- data/vendor/Cargo.toml +2 -1
- data/vendor/kreuzberg/Cargo.toml +2 -2
- data/vendor/kreuzberg/README.md +230 -230
- data/vendor/kreuzberg/benches/otel_overhead.rs +48 -48
- data/vendor/kreuzberg/build.rs +843 -843
- data/vendor/kreuzberg/src/api/error.rs +81 -81
- data/vendor/kreuzberg/src/api/handlers.rs +199 -199
- data/vendor/kreuzberg/src/api/mod.rs +79 -79
- data/vendor/kreuzberg/src/api/server.rs +353 -353
- data/vendor/kreuzberg/src/api/types.rs +170 -170
- data/vendor/kreuzberg/src/cache/mod.rs +1167 -1167
- data/vendor/kreuzberg/src/chunking/mod.rs +1877 -1877
- data/vendor/kreuzberg/src/chunking/processor.rs +220 -220
- data/vendor/kreuzberg/src/core/batch_mode.rs +95 -95
- data/vendor/kreuzberg/src/core/config.rs +1080 -1080
- data/vendor/kreuzberg/src/core/extractor.rs +1156 -1156
- data/vendor/kreuzberg/src/core/io.rs +329 -329
- data/vendor/kreuzberg/src/core/mime.rs +605 -605
- data/vendor/kreuzberg/src/core/mod.rs +47 -47
- data/vendor/kreuzberg/src/core/pipeline.rs +1184 -1184
- data/vendor/kreuzberg/src/embeddings.rs +500 -500
- data/vendor/kreuzberg/src/error.rs +431 -431
- data/vendor/kreuzberg/src/extraction/archive.rs +954 -954
- data/vendor/kreuzberg/src/extraction/docx.rs +398 -398
- data/vendor/kreuzberg/src/extraction/email.rs +854 -854
- data/vendor/kreuzberg/src/extraction/excel.rs +688 -688
- data/vendor/kreuzberg/src/extraction/html.rs +601 -601
- data/vendor/kreuzberg/src/extraction/image.rs +491 -491
- data/vendor/kreuzberg/src/extraction/libreoffice.rs +574 -562
- data/vendor/kreuzberg/src/extraction/markdown.rs +213 -213
- data/vendor/kreuzberg/src/extraction/mod.rs +81 -81
- data/vendor/kreuzberg/src/extraction/office_metadata/app_properties.rs +398 -398
- data/vendor/kreuzberg/src/extraction/office_metadata/core_properties.rs +247 -247
- data/vendor/kreuzberg/src/extraction/office_metadata/custom_properties.rs +240 -240
- data/vendor/kreuzberg/src/extraction/office_metadata/mod.rs +130 -130
- data/vendor/kreuzberg/src/extraction/office_metadata/odt_properties.rs +284 -284
- data/vendor/kreuzberg/src/extraction/pptx.rs +3100 -3100
- data/vendor/kreuzberg/src/extraction/structured.rs +490 -490
- data/vendor/kreuzberg/src/extraction/table.rs +328 -328
- data/vendor/kreuzberg/src/extraction/text.rs +269 -269
- data/vendor/kreuzberg/src/extraction/xml.rs +333 -333
- data/vendor/kreuzberg/src/extractors/archive.rs +447 -447
- data/vendor/kreuzberg/src/extractors/bibtex.rs +470 -470
- data/vendor/kreuzberg/src/extractors/docbook.rs +504 -504
- data/vendor/kreuzberg/src/extractors/docx.rs +400 -400
- data/vendor/kreuzberg/src/extractors/email.rs +157 -157
- data/vendor/kreuzberg/src/extractors/epub.rs +708 -708
- data/vendor/kreuzberg/src/extractors/excel.rs +345 -345
- data/vendor/kreuzberg/src/extractors/fictionbook.rs +492 -492
- data/vendor/kreuzberg/src/extractors/html.rs +407 -407
- data/vendor/kreuzberg/src/extractors/image.rs +219 -219
- data/vendor/kreuzberg/src/extractors/jats.rs +1054 -1054
- data/vendor/kreuzberg/src/extractors/jupyter.rs +368 -368
- data/vendor/kreuzberg/src/extractors/latex.rs +653 -653
- data/vendor/kreuzberg/src/extractors/markdown.rs +701 -701
- data/vendor/kreuzberg/src/extractors/mod.rs +429 -429
- data/vendor/kreuzberg/src/extractors/odt.rs +628 -628
- data/vendor/kreuzberg/src/extractors/opml.rs +635 -635
- data/vendor/kreuzberg/src/extractors/orgmode.rs +529 -529
- data/vendor/kreuzberg/src/extractors/pdf.rs +749 -722
- data/vendor/kreuzberg/src/extractors/pptx.rs +267 -267
- data/vendor/kreuzberg/src/extractors/rst.rs +577 -577
- data/vendor/kreuzberg/src/extractors/rtf.rs +809 -809
- data/vendor/kreuzberg/src/extractors/security.rs +484 -484
- data/vendor/kreuzberg/src/extractors/security_tests.rs +367 -367
- data/vendor/kreuzberg/src/extractors/structured.rs +142 -142
- data/vendor/kreuzberg/src/extractors/text.rs +265 -265
- data/vendor/kreuzberg/src/extractors/typst.rs +651 -651
- data/vendor/kreuzberg/src/extractors/xml.rs +147 -147
- data/vendor/kreuzberg/src/image/dpi.rs +164 -164
- data/vendor/kreuzberg/src/image/mod.rs +6 -6
- data/vendor/kreuzberg/src/image/preprocessing.rs +417 -417
- data/vendor/kreuzberg/src/image/resize.rs +89 -89
- data/vendor/kreuzberg/src/keywords/config.rs +154 -154
- data/vendor/kreuzberg/src/keywords/mod.rs +237 -237
- data/vendor/kreuzberg/src/keywords/processor.rs +275 -275
- data/vendor/kreuzberg/src/keywords/rake.rs +293 -293
- data/vendor/kreuzberg/src/keywords/types.rs +68 -68
- data/vendor/kreuzberg/src/keywords/yake.rs +163 -163
- data/vendor/kreuzberg/src/language_detection/mod.rs +985 -985
- data/vendor/kreuzberg/src/language_detection/processor.rs +219 -219
- data/vendor/kreuzberg/src/lib.rs +113 -113
- data/vendor/kreuzberg/src/mcp/mod.rs +35 -35
- data/vendor/kreuzberg/src/mcp/server.rs +2076 -2076
- data/vendor/kreuzberg/src/ocr/cache.rs +469 -469
- data/vendor/kreuzberg/src/ocr/error.rs +37 -37
- data/vendor/kreuzberg/src/ocr/hocr.rs +216 -216
- data/vendor/kreuzberg/src/ocr/mod.rs +58 -58
- data/vendor/kreuzberg/src/ocr/processor.rs +863 -863
- data/vendor/kreuzberg/src/ocr/table/mod.rs +4 -4
- data/vendor/kreuzberg/src/ocr/table/tsv_parser.rs +144 -144
- data/vendor/kreuzberg/src/ocr/tesseract_backend.rs +452 -452
- data/vendor/kreuzberg/src/ocr/types.rs +393 -393
- data/vendor/kreuzberg/src/ocr/utils.rs +47 -47
- data/vendor/kreuzberg/src/ocr/validation.rs +206 -206
- data/vendor/kreuzberg/src/panic_context.rs +154 -154
- data/vendor/kreuzberg/src/pdf/bindings.rs +44 -44
- data/vendor/kreuzberg/src/pdf/bundled.rs +346 -346
- data/vendor/kreuzberg/src/pdf/error.rs +130 -130
- data/vendor/kreuzberg/src/pdf/images.rs +139 -139
- data/vendor/kreuzberg/src/pdf/metadata.rs +489 -489
- data/vendor/kreuzberg/src/pdf/mod.rs +68 -68
- data/vendor/kreuzberg/src/pdf/rendering.rs +368 -368
- data/vendor/kreuzberg/src/pdf/table.rs +420 -420
- data/vendor/kreuzberg/src/pdf/text.rs +240 -240
- data/vendor/kreuzberg/src/plugins/extractor.rs +1044 -1044
- data/vendor/kreuzberg/src/plugins/mod.rs +212 -212
- data/vendor/kreuzberg/src/plugins/ocr.rs +639 -639
- data/vendor/kreuzberg/src/plugins/processor.rs +650 -650
- data/vendor/kreuzberg/src/plugins/registry.rs +1339 -1339
- data/vendor/kreuzberg/src/plugins/traits.rs +258 -258
- data/vendor/kreuzberg/src/plugins/validator.rs +967 -967
- data/vendor/kreuzberg/src/stopwords/mod.rs +1470 -1470
- data/vendor/kreuzberg/src/text/mod.rs +25 -25
- data/vendor/kreuzberg/src/text/quality.rs +697 -697
- data/vendor/kreuzberg/src/text/quality_processor.rs +219 -219
- data/vendor/kreuzberg/src/text/string_utils.rs +217 -217
- data/vendor/kreuzberg/src/text/token_reduction/cjk_utils.rs +164 -164
- data/vendor/kreuzberg/src/text/token_reduction/config.rs +100 -100
- data/vendor/kreuzberg/src/text/token_reduction/core.rs +796 -796
- data/vendor/kreuzberg/src/text/token_reduction/filters.rs +902 -902
- data/vendor/kreuzberg/src/text/token_reduction/mod.rs +160 -160
- data/vendor/kreuzberg/src/text/token_reduction/semantic.rs +619 -619
- data/vendor/kreuzberg/src/text/token_reduction/simd_text.rs +147 -147
- data/vendor/kreuzberg/src/types.rs +1055 -1055
- data/vendor/kreuzberg/src/utils/mod.rs +17 -17
- data/vendor/kreuzberg/src/utils/quality.rs +959 -959
- data/vendor/kreuzberg/src/utils/string_utils.rs +381 -381
- data/vendor/kreuzberg/stopwords/af_stopwords.json +53 -53
- data/vendor/kreuzberg/stopwords/ar_stopwords.json +482 -482
- data/vendor/kreuzberg/stopwords/bg_stopwords.json +261 -261
- data/vendor/kreuzberg/stopwords/bn_stopwords.json +400 -400
- data/vendor/kreuzberg/stopwords/br_stopwords.json +1205 -1205
- data/vendor/kreuzberg/stopwords/ca_stopwords.json +280 -280
- data/vendor/kreuzberg/stopwords/cs_stopwords.json +425 -425
- data/vendor/kreuzberg/stopwords/da_stopwords.json +172 -172
- data/vendor/kreuzberg/stopwords/de_stopwords.json +622 -622
- data/vendor/kreuzberg/stopwords/el_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/en_stopwords.json +1300 -1300
- data/vendor/kreuzberg/stopwords/eo_stopwords.json +175 -175
- data/vendor/kreuzberg/stopwords/es_stopwords.json +734 -734
- data/vendor/kreuzberg/stopwords/et_stopwords.json +37 -37
- data/vendor/kreuzberg/stopwords/eu_stopwords.json +100 -100
- data/vendor/kreuzberg/stopwords/fa_stopwords.json +801 -801
- data/vendor/kreuzberg/stopwords/fi_stopwords.json +849 -849
- data/vendor/kreuzberg/stopwords/fr_stopwords.json +693 -693
- data/vendor/kreuzberg/stopwords/ga_stopwords.json +111 -111
- data/vendor/kreuzberg/stopwords/gl_stopwords.json +162 -162
- data/vendor/kreuzberg/stopwords/gu_stopwords.json +226 -226
- data/vendor/kreuzberg/stopwords/ha_stopwords.json +41 -41
- data/vendor/kreuzberg/stopwords/he_stopwords.json +196 -196
- data/vendor/kreuzberg/stopwords/hi_stopwords.json +227 -227
- data/vendor/kreuzberg/stopwords/hr_stopwords.json +181 -181
- data/vendor/kreuzberg/stopwords/hu_stopwords.json +791 -791
- data/vendor/kreuzberg/stopwords/hy_stopwords.json +47 -47
- data/vendor/kreuzberg/stopwords/id_stopwords.json +760 -760
- data/vendor/kreuzberg/stopwords/it_stopwords.json +634 -634
- data/vendor/kreuzberg/stopwords/ja_stopwords.json +136 -136
- data/vendor/kreuzberg/stopwords/kn_stopwords.json +84 -84
- data/vendor/kreuzberg/stopwords/ko_stopwords.json +681 -681
- data/vendor/kreuzberg/stopwords/ku_stopwords.json +64 -64
- data/vendor/kreuzberg/stopwords/la_stopwords.json +51 -51
- data/vendor/kreuzberg/stopwords/lt_stopwords.json +476 -476
- data/vendor/kreuzberg/stopwords/lv_stopwords.json +163 -163
- data/vendor/kreuzberg/stopwords/ml_stopwords.json +1 -1
- data/vendor/kreuzberg/stopwords/mr_stopwords.json +101 -101
- data/vendor/kreuzberg/stopwords/ms_stopwords.json +477 -477
- data/vendor/kreuzberg/stopwords/ne_stopwords.json +490 -490
- data/vendor/kreuzberg/stopwords/nl_stopwords.json +415 -415
- data/vendor/kreuzberg/stopwords/no_stopwords.json +223 -223
- data/vendor/kreuzberg/stopwords/pl_stopwords.json +331 -331
- data/vendor/kreuzberg/stopwords/pt_stopwords.json +562 -562
- data/vendor/kreuzberg/stopwords/ro_stopwords.json +436 -436
- data/vendor/kreuzberg/stopwords/ru_stopwords.json +561 -561
- data/vendor/kreuzberg/stopwords/si_stopwords.json +193 -193
- data/vendor/kreuzberg/stopwords/sk_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sl_stopwords.json +448 -448
- data/vendor/kreuzberg/stopwords/so_stopwords.json +32 -32
- data/vendor/kreuzberg/stopwords/st_stopwords.json +33 -33
- data/vendor/kreuzberg/stopwords/sv_stopwords.json +420 -420
- data/vendor/kreuzberg/stopwords/sw_stopwords.json +76 -76
- data/vendor/kreuzberg/stopwords/ta_stopwords.json +129 -129
- data/vendor/kreuzberg/stopwords/te_stopwords.json +54 -54
- data/vendor/kreuzberg/stopwords/th_stopwords.json +118 -118
- data/vendor/kreuzberg/stopwords/tl_stopwords.json +149 -149
- data/vendor/kreuzberg/stopwords/tr_stopwords.json +506 -506
- data/vendor/kreuzberg/stopwords/uk_stopwords.json +75 -75
- data/vendor/kreuzberg/stopwords/ur_stopwords.json +519 -519
- data/vendor/kreuzberg/stopwords/vi_stopwords.json +647 -647
- data/vendor/kreuzberg/stopwords/yo_stopwords.json +62 -62
- data/vendor/kreuzberg/stopwords/zh_stopwords.json +796 -796
- data/vendor/kreuzberg/stopwords/zu_stopwords.json +31 -31
- data/vendor/kreuzberg/tests/api_extract_multipart.rs +52 -52
- data/vendor/kreuzberg/tests/api_tests.rs +966 -966
- data/vendor/kreuzberg/tests/archive_integration.rs +545 -545
- data/vendor/kreuzberg/tests/batch_orchestration.rs +556 -556
- data/vendor/kreuzberg/tests/batch_processing.rs +318 -318
- data/vendor/kreuzberg/tests/bibtex_parity_test.rs +421 -421
- data/vendor/kreuzberg/tests/concurrency_stress.rs +533 -533
- data/vendor/kreuzberg/tests/config_features.rs +612 -612
- data/vendor/kreuzberg/tests/config_loading_tests.rs +416 -416
- data/vendor/kreuzberg/tests/core_integration.rs +510 -510
- data/vendor/kreuzberg/tests/csv_integration.rs +414 -414
- data/vendor/kreuzberg/tests/docbook_extractor_tests.rs +500 -500
- data/vendor/kreuzberg/tests/docx_metadata_extraction_test.rs +122 -122
- data/vendor/kreuzberg/tests/docx_vs_pandoc_comparison.rs +370 -370
- data/vendor/kreuzberg/tests/email_integration.rs +327 -327
- data/vendor/kreuzberg/tests/epub_native_extractor_tests.rs +275 -275
- data/vendor/kreuzberg/tests/error_handling.rs +402 -402
- data/vendor/kreuzberg/tests/fictionbook_extractor_tests.rs +228 -228
- data/vendor/kreuzberg/tests/format_integration.rs +164 -164
- data/vendor/kreuzberg/tests/helpers/mod.rs +142 -142
- data/vendor/kreuzberg/tests/html_table_test.rs +551 -551
- data/vendor/kreuzberg/tests/image_integration.rs +255 -255
- data/vendor/kreuzberg/tests/instrumentation_test.rs +139 -139
- data/vendor/kreuzberg/tests/jats_extractor_tests.rs +639 -639
- data/vendor/kreuzberg/tests/jupyter_extractor_tests.rs +704 -704
- data/vendor/kreuzberg/tests/keywords_integration.rs +479 -479
- data/vendor/kreuzberg/tests/keywords_quality.rs +509 -509
- data/vendor/kreuzberg/tests/latex_extractor_tests.rs +496 -496
- data/vendor/kreuzberg/tests/markdown_extractor_tests.rs +490 -490
- data/vendor/kreuzberg/tests/mime_detection.rs +429 -429
- data/vendor/kreuzberg/tests/ocr_configuration.rs +514 -514
- data/vendor/kreuzberg/tests/ocr_errors.rs +698 -698
- data/vendor/kreuzberg/tests/ocr_quality.rs +629 -629
- data/vendor/kreuzberg/tests/ocr_stress.rs +469 -469
- data/vendor/kreuzberg/tests/odt_extractor_tests.rs +674 -674
- data/vendor/kreuzberg/tests/opml_extractor_tests.rs +616 -616
- data/vendor/kreuzberg/tests/orgmode_extractor_tests.rs +822 -822
- data/vendor/kreuzberg/tests/pdf_integration.rs +45 -45
- data/vendor/kreuzberg/tests/pdfium_linking.rs +374 -374
- data/vendor/kreuzberg/tests/pipeline_integration.rs +1436 -1436
- data/vendor/kreuzberg/tests/plugin_ocr_backend_test.rs +776 -776
- data/vendor/kreuzberg/tests/plugin_postprocessor_test.rs +560 -560
- data/vendor/kreuzberg/tests/plugin_system.rs +927 -927
- data/vendor/kreuzberg/tests/plugin_validator_test.rs +783 -783
- data/vendor/kreuzberg/tests/registry_integration_tests.rs +587 -587
- data/vendor/kreuzberg/tests/rst_extractor_tests.rs +694 -694
- data/vendor/kreuzberg/tests/rtf_extractor_tests.rs +775 -775
- data/vendor/kreuzberg/tests/security_validation.rs +416 -416
- data/vendor/kreuzberg/tests/stopwords_integration_test.rs +888 -888
- data/vendor/kreuzberg/tests/test_fastembed.rs +631 -631
- data/vendor/kreuzberg/tests/typst_behavioral_tests.rs +1260 -1260
- data/vendor/kreuzberg/tests/typst_extractor_tests.rs +648 -648
- data/vendor/kreuzberg/tests/xlsx_metadata_extraction_test.rs +87 -87
- data/vendor/kreuzberg-ffi/Cargo.toml +3 -3
- data/vendor/kreuzberg-ffi/README.md +851 -851
- data/vendor/kreuzberg-ffi/build.rs +176 -176
- data/vendor/kreuzberg-ffi/cbindgen.toml +27 -27
- data/vendor/kreuzberg-ffi/kreuzberg-ffi-install.pc +12 -12
- data/vendor/kreuzberg-ffi/kreuzberg-ffi.pc.in +12 -12
- data/vendor/kreuzberg-ffi/kreuzberg.h +1087 -1087
- data/vendor/kreuzberg-ffi/src/lib.rs +3616 -3616
- data/vendor/kreuzberg-ffi/src/panic_shield.rs +247 -247
- data/vendor/kreuzberg-ffi/tests.disabled/README.md +48 -48
- data/vendor/kreuzberg-ffi/tests.disabled/config_loading_tests.rs +299 -299
- data/vendor/kreuzberg-ffi/tests.disabled/config_tests.rs +346 -346
- data/vendor/kreuzberg-ffi/tests.disabled/extractor_tests.rs +232 -232
- data/vendor/kreuzberg-ffi/tests.disabled/plugin_registration_tests.rs +470 -470
- data/vendor/kreuzberg-tesseract/.commitlintrc.json +13 -13
- data/vendor/kreuzberg-tesseract/.crate-ignore +2 -2
- data/vendor/kreuzberg-tesseract/Cargo.lock +2933 -2933
- data/vendor/kreuzberg-tesseract/Cargo.toml +2 -2
- data/vendor/kreuzberg-tesseract/LICENSE +22 -22
- data/vendor/kreuzberg-tesseract/README.md +399 -399
- data/vendor/kreuzberg-tesseract/build.rs +1354 -1354
- data/vendor/kreuzberg-tesseract/patches/README.md +71 -71
- data/vendor/kreuzberg-tesseract/patches/tesseract.diff +199 -199
- data/vendor/kreuzberg-tesseract/src/api.rs +1371 -1371
- data/vendor/kreuzberg-tesseract/src/choice_iterator.rs +77 -77
- data/vendor/kreuzberg-tesseract/src/enums.rs +297 -297
- data/vendor/kreuzberg-tesseract/src/error.rs +81 -81
- data/vendor/kreuzberg-tesseract/src/lib.rs +145 -145
- data/vendor/kreuzberg-tesseract/src/monitor.rs +57 -57
- data/vendor/kreuzberg-tesseract/src/mutable_iterator.rs +197 -197
- data/vendor/kreuzberg-tesseract/src/page_iterator.rs +253 -253
- data/vendor/kreuzberg-tesseract/src/result_iterator.rs +286 -286
- data/vendor/kreuzberg-tesseract/src/result_renderer.rs +183 -183
- data/vendor/kreuzberg-tesseract/tests/integration_test.rs +211 -211
- data/vendor/rb-sys/.cargo_vcs_info.json +5 -5
- data/vendor/rb-sys/Cargo.lock +393 -393
- data/vendor/rb-sys/Cargo.toml +70 -70
- data/vendor/rb-sys/Cargo.toml.orig +57 -57
- data/vendor/rb-sys/LICENSE-APACHE +190 -190
- data/vendor/rb-sys/LICENSE-MIT +21 -21
- data/vendor/rb-sys/build/features.rs +111 -111
- data/vendor/rb-sys/build/main.rs +286 -286
- data/vendor/rb-sys/build/stable_api_config.rs +155 -155
- data/vendor/rb-sys/build/version.rs +50 -50
- data/vendor/rb-sys/readme.md +36 -36
- data/vendor/rb-sys/src/bindings.rs +21 -21
- data/vendor/rb-sys/src/hidden.rs +11 -11
- data/vendor/rb-sys/src/lib.rs +35 -35
- data/vendor/rb-sys/src/macros.rs +371 -371
- data/vendor/rb-sys/src/memory.rs +53 -53
- data/vendor/rb-sys/src/ruby_abi_version.rs +38 -38
- data/vendor/rb-sys/src/special_consts.rs +31 -31
- data/vendor/rb-sys/src/stable_api/compiled.c +179 -179
- data/vendor/rb-sys/src/stable_api/compiled.rs +257 -257
- data/vendor/rb-sys/src/stable_api/ruby_2_7.rs +324 -324
- data/vendor/rb-sys/src/stable_api/ruby_3_0.rs +332 -332
- data/vendor/rb-sys/src/stable_api/ruby_3_1.rs +325 -325
- data/vendor/rb-sys/src/stable_api/ruby_3_2.rs +323 -323
- data/vendor/rb-sys/src/stable_api/ruby_3_3.rs +339 -339
- data/vendor/rb-sys/src/stable_api/ruby_3_4.rs +339 -339
- data/vendor/rb-sys/src/stable_api.rs +260 -260
- data/vendor/rb-sys/src/symbol.rs +31 -31
- data/vendor/rb-sys/src/tracking_allocator.rs +330 -330
- data/vendor/rb-sys/src/utils.rs +89 -89
- data/vendor/rb-sys/src/value_type.rs +7 -7
- metadata +7 -80
|
@@ -1,796 +1,796 @@
|
|
|
1
|
-
use crate::error::Result;
|
|
2
|
-
use crate::text::token_reduction::{
|
|
3
|
-
cjk_utils::CjkTokenizer,
|
|
4
|
-
config::{ReductionLevel, TokenReductionConfig},
|
|
5
|
-
filters::FilterPipeline,
|
|
6
|
-
semantic::SemanticAnalyzer,
|
|
7
|
-
simd_text::{SimdTextProcessor, chunk_text_for_parallel},
|
|
8
|
-
};
|
|
9
|
-
use once_cell::sync::Lazy;
|
|
10
|
-
use rayon::prelude::*;
|
|
11
|
-
use regex::Regex;
|
|
12
|
-
use std::sync::Arc;
|
|
13
|
-
use unicode_normalization::UnicodeNormalization;
|
|
14
|
-
|
|
15
|
-
static REPEATED_EXCLAMATION: Lazy<Regex> =
|
|
16
|
-
Lazy::new(|| Regex::new(r"[!]{2,}").expect("Repeated exclamation regex pattern is valid and should compile"));
|
|
17
|
-
static REPEATED_QUESTION: Lazy<Regex> =
|
|
18
|
-
Lazy::new(|| Regex::new(r"[?]{2,}").expect("Repeated question regex pattern is valid and should compile"));
|
|
19
|
-
static REPEATED_COMMA: Lazy<Regex> =
|
|
20
|
-
Lazy::new(|| Regex::new(r"[,]{2,}").expect("Repeated comma regex pattern is valid and should compile"));
|
|
21
|
-
|
|
22
|
-
/// Bonus added for sentences at the beginning or end of the document
|
|
23
|
-
const SENTENCE_EDGE_POSITION_BONUS: f32 = 0.3;
|
|
24
|
-
|
|
25
|
-
/// Bonus added for sentences with ideal word count (neither too short nor too long)
|
|
26
|
-
const IDEAL_WORD_COUNT_BONUS: f32 = 0.2;
|
|
27
|
-
|
|
28
|
-
/// Minimum word count for ideal sentence length
|
|
29
|
-
const MIN_IDEAL_WORD_COUNT: usize = 3;
|
|
30
|
-
|
|
31
|
-
/// Maximum word count for ideal sentence length
|
|
32
|
-
const MAX_IDEAL_WORD_COUNT: usize = 25;
|
|
33
|
-
|
|
34
|
-
/// Weight multiplier for numeric content density in sentences
|
|
35
|
-
const NUMERIC_CONTENT_WEIGHT: f32 = 0.3;
|
|
36
|
-
|
|
37
|
-
/// Weight multiplier for capitalized/acronym word density in sentences
|
|
38
|
-
const CAPS_ACRONYM_WEIGHT: f32 = 0.25;
|
|
39
|
-
|
|
40
|
-
/// Weight multiplier for long word density in sentences
|
|
41
|
-
const LONG_WORD_WEIGHT: f32 = 0.2;
|
|
42
|
-
|
|
43
|
-
/// Minimum character length for a word to be considered "long"
|
|
44
|
-
const LONG_WORD_THRESHOLD: usize = 8;
|
|
45
|
-
|
|
46
|
-
/// Weight multiplier for punctuation density in sentences
|
|
47
|
-
const PUNCTUATION_DENSITY_WEIGHT: f32 = 0.15;
|
|
48
|
-
|
|
49
|
-
/// Weight multiplier for word diversity ratio (unique words / total words)
|
|
50
|
-
const DIVERSITY_RATIO_WEIGHT: f32 = 0.15;
|
|
51
|
-
|
|
52
|
-
/// Weight multiplier for character entropy (measure of text randomness/information)
|
|
53
|
-
const CHAR_ENTROPY_WEIGHT: f32 = 0.1;
|
|
54
|
-
|
|
55
|
-
pub struct TokenReducer {
|
|
56
|
-
config: Arc<TokenReductionConfig>,
|
|
57
|
-
text_processor: SimdTextProcessor,
|
|
58
|
-
filter_pipeline: FilterPipeline,
|
|
59
|
-
semantic_analyzer: Option<SemanticAnalyzer>,
|
|
60
|
-
cjk_tokenizer: CjkTokenizer,
|
|
61
|
-
language: String,
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
impl TokenReducer {
|
|
65
|
-
pub fn new(config: &TokenReductionConfig, language_hint: Option<&str>) -> Result<Self> {
|
|
66
|
-
let config = Arc::new(config.clone());
|
|
67
|
-
let language = language_hint
|
|
68
|
-
.or(config.language_hint.as_deref())
|
|
69
|
-
.unwrap_or("en")
|
|
70
|
-
.to_string();
|
|
71
|
-
|
|
72
|
-
let text_processor = SimdTextProcessor::new();
|
|
73
|
-
let filter_pipeline = FilterPipeline::new(&config, &language)?;
|
|
74
|
-
|
|
75
|
-
let semantic_analyzer = if matches!(config.level, ReductionLevel::Aggressive | ReductionLevel::Maximum) {
|
|
76
|
-
Some(SemanticAnalyzer::new(&language))
|
|
77
|
-
} else {
|
|
78
|
-
None
|
|
79
|
-
};
|
|
80
|
-
|
|
81
|
-
Ok(Self {
|
|
82
|
-
config,
|
|
83
|
-
text_processor,
|
|
84
|
-
filter_pipeline,
|
|
85
|
-
semantic_analyzer,
|
|
86
|
-
cjk_tokenizer: CjkTokenizer::new(),
|
|
87
|
-
language,
|
|
88
|
-
})
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
/// Get the language code being used for stopwords and semantic analysis.
|
|
92
|
-
pub fn language(&self) -> &str {
|
|
93
|
-
&self.language
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
pub fn reduce(&self, text: &str) -> String {
|
|
97
|
-
if text.is_empty() || matches!(self.config.level, ReductionLevel::Off) {
|
|
98
|
-
return text.to_string();
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
let working_text = if text.is_ascii() {
|
|
102
|
-
text
|
|
103
|
-
} else {
|
|
104
|
-
&text.nfc().collect::<String>()
|
|
105
|
-
};
|
|
106
|
-
|
|
107
|
-
match self.config.level {
|
|
108
|
-
ReductionLevel::Off => working_text.to_string(),
|
|
109
|
-
ReductionLevel::Light => self.apply_light_reduction_optimized(working_text),
|
|
110
|
-
ReductionLevel::Moderate => self.apply_moderate_reduction_optimized(working_text),
|
|
111
|
-
ReductionLevel::Aggressive => self.apply_aggressive_reduction_optimized(working_text),
|
|
112
|
-
ReductionLevel::Maximum => self.apply_maximum_reduction_optimized(working_text),
|
|
113
|
-
}
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
pub fn batch_reduce(&self, texts: &[&str]) -> Vec<String> {
|
|
117
|
-
if !self.config.enable_parallel || texts.len() < 2 {
|
|
118
|
-
return texts.iter().map(|text| self.reduce(text)).collect();
|
|
119
|
-
}
|
|
120
|
-
|
|
121
|
-
texts.par_iter().map(|text| self.reduce(text)).collect()
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
fn apply_light_reduction_optimized(&self, text: &str) -> String {
|
|
125
|
-
let mut result = if self.config.use_simd {
|
|
126
|
-
self.text_processor.clean_punctuation(text)
|
|
127
|
-
} else {
|
|
128
|
-
self.clean_punctuation_optimized(text)
|
|
129
|
-
};
|
|
130
|
-
|
|
131
|
-
result = self.filter_pipeline.apply_light_filters(&result);
|
|
132
|
-
result.trim().to_string()
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
fn apply_moderate_reduction_optimized(&self, text: &str) -> String {
|
|
136
|
-
let mut result = self.apply_light_reduction_optimized(text);
|
|
137
|
-
|
|
138
|
-
result = if self.config.enable_parallel && text.len() > 1000 {
|
|
139
|
-
self.apply_parallel_moderate_reduction(&result)
|
|
140
|
-
} else {
|
|
141
|
-
self.filter_pipeline.apply_moderate_filters(&result)
|
|
142
|
-
};
|
|
143
|
-
|
|
144
|
-
result
|
|
145
|
-
}
|
|
146
|
-
|
|
147
|
-
fn apply_aggressive_reduction_optimized(&self, text: &str) -> String {
|
|
148
|
-
let mut result = self.apply_moderate_reduction_optimized(text);
|
|
149
|
-
|
|
150
|
-
result = self.remove_additional_common_words(&result);
|
|
151
|
-
result = self.apply_sentence_selection(&result);
|
|
152
|
-
|
|
153
|
-
if let Some(ref analyzer) = self.semantic_analyzer {
|
|
154
|
-
result = analyzer.apply_semantic_filtering(&result, self.config.semantic_threshold);
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
result
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
fn apply_maximum_reduction_optimized(&self, text: &str) -> String {
|
|
161
|
-
let mut result = self.apply_aggressive_reduction_optimized(text);
|
|
162
|
-
|
|
163
|
-
if let Some(ref analyzer) = self.semantic_analyzer
|
|
164
|
-
&& self.config.enable_semantic_clustering
|
|
165
|
-
{
|
|
166
|
-
result = analyzer.apply_hypernym_compression(&result, self.config.target_reduction);
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
result
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
fn apply_parallel_moderate_reduction(&self, text: &str) -> String {
|
|
173
|
-
let num_threads = rayon::current_num_threads();
|
|
174
|
-
let chunks = chunk_text_for_parallel(text, num_threads);
|
|
175
|
-
|
|
176
|
-
let processed_chunks: Vec<String> = chunks
|
|
177
|
-
.par_iter()
|
|
178
|
-
.map(|chunk| self.filter_pipeline.apply_moderate_filters(chunk))
|
|
179
|
-
.collect();
|
|
180
|
-
|
|
181
|
-
processed_chunks.join(" ")
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
fn clean_punctuation_optimized(&self, text: &str) -> String {
|
|
185
|
-
let mut result = text.to_string();
|
|
186
|
-
|
|
187
|
-
result = REPEATED_EXCLAMATION.replace_all(&result, "!").to_string();
|
|
188
|
-
result = REPEATED_QUESTION.replace_all(&result, "?").to_string();
|
|
189
|
-
result = REPEATED_COMMA.replace_all(&result, ",").to_string();
|
|
190
|
-
|
|
191
|
-
result
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
fn remove_additional_common_words(&self, text: &str) -> String {
|
|
195
|
-
let words = self.universal_tokenize(text);
|
|
196
|
-
|
|
197
|
-
if words.len() < 4 {
|
|
198
|
-
return text.to_string();
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
let mut word_freq = std::collections::HashMap::new();
|
|
202
|
-
let mut word_lengths = Vec::new();
|
|
203
|
-
|
|
204
|
-
for word in &words {
|
|
205
|
-
let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
|
|
206
|
-
word.to_lowercase()
|
|
207
|
-
} else {
|
|
208
|
-
word.chars()
|
|
209
|
-
.filter(|c| c.is_alphabetic())
|
|
210
|
-
.collect::<String>()
|
|
211
|
-
.to_lowercase()
|
|
212
|
-
};
|
|
213
|
-
|
|
214
|
-
if !clean_word.is_empty() {
|
|
215
|
-
*word_freq.entry(clean_word.clone()).or_insert(0) += 1;
|
|
216
|
-
word_lengths.push(clean_word.chars().count());
|
|
217
|
-
}
|
|
218
|
-
}
|
|
219
|
-
|
|
220
|
-
let avg_length = if !word_lengths.is_empty() {
|
|
221
|
-
word_lengths.iter().sum::<usize>() as f32 / word_lengths.len() as f32
|
|
222
|
-
} else {
|
|
223
|
-
5.0
|
|
224
|
-
};
|
|
225
|
-
|
|
226
|
-
let original_count = words.len();
|
|
227
|
-
|
|
228
|
-
let filtered_words: Vec<String> = words
|
|
229
|
-
.iter()
|
|
230
|
-
.filter(|word| {
|
|
231
|
-
let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
|
|
232
|
-
word.to_lowercase()
|
|
233
|
-
} else {
|
|
234
|
-
word.chars()
|
|
235
|
-
.filter(|c| c.is_alphabetic())
|
|
236
|
-
.collect::<String>()
|
|
237
|
-
.to_lowercase()
|
|
238
|
-
};
|
|
239
|
-
|
|
240
|
-
if clean_word.is_empty() {
|
|
241
|
-
return true;
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
let freq = word_freq.get(&clean_word).unwrap_or(&0);
|
|
245
|
-
let word_len = clean_word.chars().count() as f32;
|
|
246
|
-
|
|
247
|
-
self.has_important_characteristics(word)
|
|
248
|
-
|| (*freq <= 2 && word_len >= avg_length * 0.8)
|
|
249
|
-
|| (word_len >= avg_length * 1.5)
|
|
250
|
-
})
|
|
251
|
-
.cloned()
|
|
252
|
-
.collect();
|
|
253
|
-
|
|
254
|
-
let has_cjk_content = text.chars().any(|c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
|
|
255
|
-
let fallback_threshold = if has_cjk_content {
|
|
256
|
-
original_count / 5
|
|
257
|
-
} else {
|
|
258
|
-
original_count / 3
|
|
259
|
-
};
|
|
260
|
-
|
|
261
|
-
if filtered_words.len() < fallback_threshold {
|
|
262
|
-
let fallback_words: Vec<String> = words
|
|
263
|
-
.iter()
|
|
264
|
-
.filter(|word| {
|
|
265
|
-
let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
|
|
266
|
-
(*word).clone()
|
|
267
|
-
} else {
|
|
268
|
-
word.chars().filter(|c| c.is_alphabetic()).collect::<String>()
|
|
269
|
-
};
|
|
270
|
-
|
|
271
|
-
clean_word.is_empty() || clean_word.chars().count() >= 3 || self.has_important_characteristics(word)
|
|
272
|
-
})
|
|
273
|
-
.cloned()
|
|
274
|
-
.collect();
|
|
275
|
-
self.smart_join(&fallback_words, has_cjk_content)
|
|
276
|
-
} else {
|
|
277
|
-
self.smart_join(&filtered_words, has_cjk_content)
|
|
278
|
-
}
|
|
279
|
-
}
|
|
280
|
-
|
|
281
|
-
fn smart_join(&self, tokens: &[String], has_cjk_content: bool) -> String {
|
|
282
|
-
if has_cjk_content {
|
|
283
|
-
tokens.join("")
|
|
284
|
-
} else {
|
|
285
|
-
tokens.join(" ")
|
|
286
|
-
}
|
|
287
|
-
}
|
|
288
|
-
|
|
289
|
-
fn has_important_characteristics(&self, word: &str) -> bool {
|
|
290
|
-
if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
|
|
291
|
-
return true;
|
|
292
|
-
}
|
|
293
|
-
|
|
294
|
-
if word.chars().any(|c| c.is_numeric()) {
|
|
295
|
-
return true;
|
|
296
|
-
}
|
|
297
|
-
|
|
298
|
-
if word.len() > 10 {
|
|
299
|
-
return true;
|
|
300
|
-
}
|
|
301
|
-
|
|
302
|
-
let uppercase_count = word.chars().filter(|c| c.is_uppercase()).count();
|
|
303
|
-
if uppercase_count > 1 && uppercase_count < word.len() {
|
|
304
|
-
return true;
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
if self.has_cjk_importance(word) {
|
|
308
|
-
return true;
|
|
309
|
-
}
|
|
310
|
-
|
|
311
|
-
false
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
fn has_cjk_importance(&self, word: &str) -> bool {
|
|
315
|
-
let chars: Vec<char> = word.chars().collect();
|
|
316
|
-
|
|
317
|
-
let has_cjk = chars.iter().any(|&c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
|
|
318
|
-
if !has_cjk {
|
|
319
|
-
return false;
|
|
320
|
-
}
|
|
321
|
-
|
|
322
|
-
let important_radicals = [
|
|
323
|
-
'学', '智', '能', '技', '术', '法', '算', '理', '科', '研', '究', '发', '展', '系', '统', '模', '型', '方',
|
|
324
|
-
'式', '过', '程', '结', '构', '功', '效', '应', '分', '析', '计', '算', '数', '据', '信', '息', '处', '理',
|
|
325
|
-
'语', '言', '文', '生', '成', '产', '用', '作', '为', '成', '变', '化', '转', '换', '提', '高', '网', '络',
|
|
326
|
-
'神', '经', '机', '器', '人', '工', '智', '能', '自', '然', '复',
|
|
327
|
-
];
|
|
328
|
-
|
|
329
|
-
for &char in &chars {
|
|
330
|
-
if important_radicals.contains(&char) {
|
|
331
|
-
return true;
|
|
332
|
-
}
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
if chars.len() == 2 && has_cjk {
|
|
336
|
-
let has_technical = chars.iter().any(|&c| {
|
|
337
|
-
let code = c as u32;
|
|
338
|
-
(0x4E00..=0x4FFF).contains(&code)
|
|
339
|
-
|| (0x5000..=0x51FF).contains(&code)
|
|
340
|
-
|| (0x6700..=0x68FF).contains(&code)
|
|
341
|
-
|| (0x7500..=0x76FF).contains(&code)
|
|
342
|
-
});
|
|
343
|
-
|
|
344
|
-
if has_technical {
|
|
345
|
-
return true;
|
|
346
|
-
}
|
|
347
|
-
}
|
|
348
|
-
|
|
349
|
-
false
|
|
350
|
-
}
|
|
351
|
-
|
|
352
|
-
fn apply_sentence_selection(&self, text: &str) -> String {
|
|
353
|
-
let sentences: Vec<&str> = text
|
|
354
|
-
.split(['.', '!', '?'])
|
|
355
|
-
.map(|s| s.trim())
|
|
356
|
-
.filter(|s| !s.is_empty())
|
|
357
|
-
.collect();
|
|
358
|
-
|
|
359
|
-
if sentences.len() <= 2 {
|
|
360
|
-
return text.to_string();
|
|
361
|
-
}
|
|
362
|
-
|
|
363
|
-
let mut scored_sentences: Vec<(usize, f32, &str)> = sentences
|
|
364
|
-
.iter()
|
|
365
|
-
.enumerate()
|
|
366
|
-
.map(|(i, sentence)| {
|
|
367
|
-
let score = self.score_sentence_importance(sentence, i, sentences.len());
|
|
368
|
-
(i, score, *sentence)
|
|
369
|
-
})
|
|
370
|
-
.collect();
|
|
371
|
-
|
|
372
|
-
scored_sentences.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
|
|
373
|
-
|
|
374
|
-
let keep_count = ((sentences.len() as f32 * 0.4).ceil() as usize).max(1);
|
|
375
|
-
let mut selected_indices: Vec<usize> = scored_sentences[..keep_count].iter().map(|(i, _, _)| *i).collect();
|
|
376
|
-
|
|
377
|
-
selected_indices.sort();
|
|
378
|
-
|
|
379
|
-
let selected_sentences: Vec<&str> = selected_indices
|
|
380
|
-
.iter()
|
|
381
|
-
.filter_map(|&i| sentences.get(i))
|
|
382
|
-
.copied()
|
|
383
|
-
.collect();
|
|
384
|
-
|
|
385
|
-
if selected_sentences.is_empty() {
|
|
386
|
-
text.to_string()
|
|
387
|
-
} else {
|
|
388
|
-
selected_sentences.join(". ")
|
|
389
|
-
}
|
|
390
|
-
}
|
|
391
|
-
|
|
392
|
-
fn score_sentence_importance(&self, sentence: &str, position: usize, total_sentences: usize) -> f32 {
|
|
393
|
-
let mut score = 0.0;
|
|
394
|
-
|
|
395
|
-
if position == 0 || position == total_sentences - 1 {
|
|
396
|
-
score += SENTENCE_EDGE_POSITION_BONUS;
|
|
397
|
-
}
|
|
398
|
-
|
|
399
|
-
let words: Vec<&str> = sentence.split_whitespace().collect();
|
|
400
|
-
if words.is_empty() {
|
|
401
|
-
return score;
|
|
402
|
-
}
|
|
403
|
-
|
|
404
|
-
let word_count = words.len();
|
|
405
|
-
if (MIN_IDEAL_WORD_COUNT..=MAX_IDEAL_WORD_COUNT).contains(&word_count) {
|
|
406
|
-
score += IDEAL_WORD_COUNT_BONUS;
|
|
407
|
-
}
|
|
408
|
-
|
|
409
|
-
let mut numeric_count = 0;
|
|
410
|
-
let mut caps_count = 0;
|
|
411
|
-
let mut long_word_count = 0;
|
|
412
|
-
let mut punct_density = 0;
|
|
413
|
-
|
|
414
|
-
for word in &words {
|
|
415
|
-
if word.chars().any(|c| c.is_numeric()) {
|
|
416
|
-
numeric_count += 1;
|
|
417
|
-
}
|
|
418
|
-
|
|
419
|
-
if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
|
|
420
|
-
caps_count += 1;
|
|
421
|
-
}
|
|
422
|
-
|
|
423
|
-
if word.len() > LONG_WORD_THRESHOLD {
|
|
424
|
-
long_word_count += 1;
|
|
425
|
-
}
|
|
426
|
-
|
|
427
|
-
punct_density += word.chars().filter(|c| c.is_ascii_punctuation()).count();
|
|
428
|
-
}
|
|
429
|
-
|
|
430
|
-
score += (numeric_count as f32 / words.len() as f32) * NUMERIC_CONTENT_WEIGHT;
|
|
431
|
-
score += (caps_count as f32 / words.len() as f32) * CAPS_ACRONYM_WEIGHT;
|
|
432
|
-
score += (long_word_count as f32 / words.len() as f32) * LONG_WORD_WEIGHT;
|
|
433
|
-
score += (punct_density as f32 / sentence.len() as f32) * PUNCTUATION_DENSITY_WEIGHT;
|
|
434
|
-
|
|
435
|
-
let unique_words: std::collections::HashSet<_> = words
|
|
436
|
-
.iter()
|
|
437
|
-
.map(|w| {
|
|
438
|
-
w.chars()
|
|
439
|
-
.filter(|c| c.is_alphabetic())
|
|
440
|
-
.collect::<String>()
|
|
441
|
-
.to_lowercase()
|
|
442
|
-
})
|
|
443
|
-
.collect();
|
|
444
|
-
let diversity_ratio = unique_words.len() as f32 / words.len() as f32;
|
|
445
|
-
score += diversity_ratio * DIVERSITY_RATIO_WEIGHT;
|
|
446
|
-
|
|
447
|
-
let char_entropy = self.calculate_char_entropy(sentence);
|
|
448
|
-
score += char_entropy * CHAR_ENTROPY_WEIGHT;
|
|
449
|
-
|
|
450
|
-
score
|
|
451
|
-
}
|
|
452
|
-
|
|
453
|
-
fn universal_tokenize(&self, text: &str) -> Vec<String> {
|
|
454
|
-
self.cjk_tokenizer.tokenize_mixed_text(text)
|
|
455
|
-
}
|
|
456
|
-
|
|
457
|
-
fn calculate_char_entropy(&self, text: &str) -> f32 {
|
|
458
|
-
let chars: Vec<char> = text.chars().collect();
|
|
459
|
-
if chars.is_empty() {
|
|
460
|
-
return 0.0;
|
|
461
|
-
}
|
|
462
|
-
|
|
463
|
-
let mut char_freq = std::collections::HashMap::new();
|
|
464
|
-
for &ch in &chars {
|
|
465
|
-
let lowercase_ch = ch
|
|
466
|
-
.to_lowercase()
|
|
467
|
-
.next()
|
|
468
|
-
.expect("to_lowercase() must yield at least one character for valid Unicode");
|
|
469
|
-
*char_freq.entry(lowercase_ch).or_insert(0) += 1;
|
|
470
|
-
}
|
|
471
|
-
|
|
472
|
-
let total_chars = chars.len() as f32;
|
|
473
|
-
char_freq
|
|
474
|
-
.values()
|
|
475
|
-
.map(|&freq| {
|
|
476
|
-
let p = freq as f32 / total_chars;
|
|
477
|
-
if p > 0.0 { -p * p.log2() } else { 0.0 }
|
|
478
|
-
})
|
|
479
|
-
.sum::<f32>()
|
|
480
|
-
.min(5.0)
|
|
481
|
-
}
|
|
482
|
-
}
|
|
483
|
-
|
|
484
|
-
#[cfg(test)]
|
|
485
|
-
mod tests {
|
|
486
|
-
use super::*;
|
|
487
|
-
|
|
488
|
-
#[test]
|
|
489
|
-
fn test_light_reduction() {
|
|
490
|
-
let config = TokenReductionConfig {
|
|
491
|
-
level: ReductionLevel::Light,
|
|
492
|
-
use_simd: false,
|
|
493
|
-
..Default::default()
|
|
494
|
-
};
|
|
495
|
-
|
|
496
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
497
|
-
let input = "Hello world!!! How are you???";
|
|
498
|
-
let result = reducer.reduce(input);
|
|
499
|
-
|
|
500
|
-
assert!(result.len() < input.len());
|
|
501
|
-
assert!(!result.contains(" "));
|
|
502
|
-
}
|
|
503
|
-
|
|
504
|
-
#[test]
|
|
505
|
-
fn test_moderate_reduction() {
|
|
506
|
-
let config = TokenReductionConfig {
|
|
507
|
-
level: ReductionLevel::Moderate,
|
|
508
|
-
use_simd: false,
|
|
509
|
-
..Default::default()
|
|
510
|
-
};
|
|
511
|
-
|
|
512
|
-
let reducer = TokenReducer::new(&config, Some("en")).unwrap();
|
|
513
|
-
let input = "The quick brown fox is jumping over the lazy dog";
|
|
514
|
-
let result = reducer.reduce(input);
|
|
515
|
-
|
|
516
|
-
assert!(result.len() < input.len());
|
|
517
|
-
assert!(result.contains("quick"));
|
|
518
|
-
assert!(result.contains("brown"));
|
|
519
|
-
assert!(result.contains("fox"));
|
|
520
|
-
}
|
|
521
|
-
|
|
522
|
-
#[test]
|
|
523
|
-
fn test_batch_processing() {
|
|
524
|
-
let config = TokenReductionConfig {
|
|
525
|
-
level: ReductionLevel::Light,
|
|
526
|
-
enable_parallel: false,
|
|
527
|
-
..Default::default()
|
|
528
|
-
};
|
|
529
|
-
|
|
530
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
531
|
-
let inputs = vec!["Hello world!", "How are you?", "Fine, thanks!"];
|
|
532
|
-
let results = reducer.batch_reduce(&inputs);
|
|
533
|
-
|
|
534
|
-
assert_eq!(results.len(), inputs.len());
|
|
535
|
-
for result in &results {
|
|
536
|
-
assert!(!result.contains(" "));
|
|
537
|
-
}
|
|
538
|
-
}
|
|
539
|
-
|
|
540
|
-
#[test]
|
|
541
|
-
fn test_aggressive_reduction() {
|
|
542
|
-
let config = TokenReductionConfig {
|
|
543
|
-
level: ReductionLevel::Aggressive,
|
|
544
|
-
use_simd: false,
|
|
545
|
-
..Default::default()
|
|
546
|
-
};
|
|
547
|
-
|
|
548
|
-
let reducer = TokenReducer::new(&config, Some("en")).unwrap();
|
|
549
|
-
let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
|
|
550
|
-
let result = reducer.reduce(input);
|
|
551
|
-
|
|
552
|
-
assert!(result.len() < input.len());
|
|
553
|
-
assert!(!result.is_empty());
|
|
554
|
-
}
|
|
555
|
-
|
|
556
|
-
#[test]
|
|
557
|
-
fn test_maximum_reduction() {
|
|
558
|
-
let config = TokenReductionConfig {
|
|
559
|
-
level: ReductionLevel::Maximum,
|
|
560
|
-
use_simd: false,
|
|
561
|
-
enable_semantic_clustering: true,
|
|
562
|
-
..Default::default()
|
|
563
|
-
};
|
|
564
|
-
|
|
565
|
-
let reducer = TokenReducer::new(&config, Some("en")).unwrap();
|
|
566
|
-
let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
|
|
567
|
-
let result = reducer.reduce(input);
|
|
568
|
-
|
|
569
|
-
assert!(result.len() < input.len());
|
|
570
|
-
assert!(!result.is_empty());
|
|
571
|
-
}
|
|
572
|
-
|
|
573
|
-
#[test]
|
|
574
|
-
fn test_empty_text_handling() {
|
|
575
|
-
let config = TokenReductionConfig {
|
|
576
|
-
level: ReductionLevel::Moderate,
|
|
577
|
-
..Default::default()
|
|
578
|
-
};
|
|
579
|
-
|
|
580
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
581
|
-
assert_eq!(reducer.reduce(""), "");
|
|
582
|
-
let result = reducer.reduce(" ");
|
|
583
|
-
assert!(result == " " || result.is_empty());
|
|
584
|
-
}
|
|
585
|
-
|
|
586
|
-
#[test]
|
|
587
|
-
fn test_off_mode_preserves_text() {
|
|
588
|
-
let config = TokenReductionConfig {
|
|
589
|
-
level: ReductionLevel::Off,
|
|
590
|
-
..Default::default()
|
|
591
|
-
};
|
|
592
|
-
|
|
593
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
594
|
-
let input = "Text with multiple spaces!!!";
|
|
595
|
-
assert_eq!(reducer.reduce(input), input);
|
|
596
|
-
}
|
|
597
|
-
|
|
598
|
-
#[test]
|
|
599
|
-
fn test_parallel_batch_processing() {
|
|
600
|
-
let config = TokenReductionConfig {
|
|
601
|
-
level: ReductionLevel::Light,
|
|
602
|
-
enable_parallel: true,
|
|
603
|
-
..Default::default()
|
|
604
|
-
};
|
|
605
|
-
|
|
606
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
607
|
-
let inputs = vec![
|
|
608
|
-
"First text with spaces",
|
|
609
|
-
"Second text with spaces",
|
|
610
|
-
"Third text with spaces",
|
|
611
|
-
];
|
|
612
|
-
let results = reducer.batch_reduce(&inputs);
|
|
613
|
-
|
|
614
|
-
assert_eq!(results.len(), inputs.len());
|
|
615
|
-
for result in &results {
|
|
616
|
-
assert!(!result.contains(" "));
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
|
|
620
|
-
#[test]
|
|
621
|
-
fn test_cjk_text_handling() {
|
|
622
|
-
let config = TokenReductionConfig {
|
|
623
|
-
level: ReductionLevel::Moderate,
|
|
624
|
-
..Default::default()
|
|
625
|
-
};
|
|
626
|
-
|
|
627
|
-
let reducer = TokenReducer::new(&config, Some("zh")).unwrap();
|
|
628
|
-
let input = "这是中文文本测试";
|
|
629
|
-
let result = reducer.reduce(input);
|
|
630
|
-
|
|
631
|
-
assert!(!result.is_empty());
|
|
632
|
-
}
|
|
633
|
-
|
|
634
|
-
#[test]
|
|
635
|
-
fn test_mixed_language_text() {
|
|
636
|
-
let config = TokenReductionConfig {
|
|
637
|
-
level: ReductionLevel::Moderate,
|
|
638
|
-
..Default::default()
|
|
639
|
-
};
|
|
640
|
-
|
|
641
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
642
|
-
let input = "This is English text 这是中文 and some more English";
|
|
643
|
-
let result = reducer.reduce(input);
|
|
644
|
-
|
|
645
|
-
assert!(!result.is_empty());
|
|
646
|
-
assert!(result.contains("English") || result.contains("中"));
|
|
647
|
-
}
|
|
648
|
-
|
|
649
|
-
#[test]
|
|
650
|
-
fn test_punctuation_normalization() {
|
|
651
|
-
let config = TokenReductionConfig {
|
|
652
|
-
level: ReductionLevel::Light,
|
|
653
|
-
..Default::default()
|
|
654
|
-
};
|
|
655
|
-
|
|
656
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
657
|
-
let input = "Text!!!!!! with????? excessive,,,,,, punctuation";
|
|
658
|
-
let result = reducer.reduce(input);
|
|
659
|
-
|
|
660
|
-
assert!(!result.contains("!!!!!!"));
|
|
661
|
-
assert!(!result.contains("?????"));
|
|
662
|
-
assert!(!result.contains(",,,,,,"));
|
|
663
|
-
}
|
|
664
|
-
|
|
665
|
-
#[test]
|
|
666
|
-
fn test_sentence_selection() {
|
|
667
|
-
let config = TokenReductionConfig {
|
|
668
|
-
level: ReductionLevel::Aggressive,
|
|
669
|
-
..Default::default()
|
|
670
|
-
};
|
|
671
|
-
|
|
672
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
673
|
-
let input = "First sentence here. Second sentence with more words. Third one. Fourth sentence is even longer than the others.";
|
|
674
|
-
let result = reducer.reduce(input);
|
|
675
|
-
|
|
676
|
-
assert!(result.len() < input.len());
|
|
677
|
-
assert!(result.split(". ").count() < 4);
|
|
678
|
-
}
|
|
679
|
-
|
|
680
|
-
#[test]
|
|
681
|
-
fn test_unicode_normalization_ascii() {
|
|
682
|
-
let config = TokenReductionConfig {
|
|
683
|
-
level: ReductionLevel::Light,
|
|
684
|
-
..Default::default()
|
|
685
|
-
};
|
|
686
|
-
|
|
687
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
688
|
-
let input = "Pure ASCII text without special characters";
|
|
689
|
-
let result = reducer.reduce(input);
|
|
690
|
-
|
|
691
|
-
assert!(result.contains("ASCII"));
|
|
692
|
-
}
|
|
693
|
-
|
|
694
|
-
#[test]
|
|
695
|
-
fn test_unicode_normalization_non_ascii() {
|
|
696
|
-
let config = TokenReductionConfig {
|
|
697
|
-
level: ReductionLevel::Light,
|
|
698
|
-
..Default::default()
|
|
699
|
-
};
|
|
700
|
-
|
|
701
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
702
|
-
let input = "Café naïve résumé";
|
|
703
|
-
let result = reducer.reduce(input);
|
|
704
|
-
|
|
705
|
-
assert!(result.contains("Café") || result.contains("Cafe"));
|
|
706
|
-
}
|
|
707
|
-
|
|
708
|
-
#[test]
|
|
709
|
-
fn test_single_text_vs_batch() {
|
|
710
|
-
let config = TokenReductionConfig {
|
|
711
|
-
level: ReductionLevel::Moderate,
|
|
712
|
-
..Default::default()
|
|
713
|
-
};
|
|
714
|
-
|
|
715
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
716
|
-
let text = "The quick brown fox jumps over the lazy dog";
|
|
717
|
-
|
|
718
|
-
let single_result = reducer.reduce(text);
|
|
719
|
-
let batch_results = reducer.batch_reduce(&[text]);
|
|
720
|
-
|
|
721
|
-
assert_eq!(single_result, batch_results[0]);
|
|
722
|
-
}
|
|
723
|
-
|
|
724
|
-
#[test]
|
|
725
|
-
fn test_important_word_preservation() {
|
|
726
|
-
let config = TokenReductionConfig {
|
|
727
|
-
level: ReductionLevel::Aggressive,
|
|
728
|
-
..Default::default()
|
|
729
|
-
};
|
|
730
|
-
|
|
731
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
732
|
-
let input = "The IMPORTANT word COVID-19 and 12345 numbers should be preserved";
|
|
733
|
-
let result = reducer.reduce(input);
|
|
734
|
-
|
|
735
|
-
assert!(result.contains("IMPORTANT") || result.contains("COVID") || result.contains("12345"));
|
|
736
|
-
}
|
|
737
|
-
|
|
738
|
-
#[test]
|
|
739
|
-
fn test_technical_terms_preservation() {
|
|
740
|
-
let config = TokenReductionConfig {
|
|
741
|
-
level: ReductionLevel::Aggressive,
|
|
742
|
-
..Default::default()
|
|
743
|
-
};
|
|
744
|
-
|
|
745
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
746
|
-
let input = "The implementation uses PyTorch and TensorFlow frameworks";
|
|
747
|
-
let result = reducer.reduce(input);
|
|
748
|
-
|
|
749
|
-
assert!(result.contains("PyTorch") || result.contains("TensorFlow"));
|
|
750
|
-
}
|
|
751
|
-
|
|
752
|
-
#[test]
|
|
753
|
-
fn test_calculate_char_entropy() {
|
|
754
|
-
let config = TokenReductionConfig::default();
|
|
755
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
756
|
-
|
|
757
|
-
let low_entropy = reducer.calculate_char_entropy("aaaaaaa");
|
|
758
|
-
assert!(low_entropy < 1.0);
|
|
759
|
-
|
|
760
|
-
let high_entropy = reducer.calculate_char_entropy("abcdefg123");
|
|
761
|
-
assert!(high_entropy > low_entropy);
|
|
762
|
-
}
|
|
763
|
-
|
|
764
|
-
#[test]
|
|
765
|
-
fn test_universal_tokenize_english() {
|
|
766
|
-
let config = TokenReductionConfig::default();
|
|
767
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
768
|
-
|
|
769
|
-
let tokens = reducer.universal_tokenize("hello world test");
|
|
770
|
-
assert_eq!(tokens, vec!["hello", "world", "test"]);
|
|
771
|
-
}
|
|
772
|
-
|
|
773
|
-
#[test]
|
|
774
|
-
fn test_universal_tokenize_cjk() {
|
|
775
|
-
let config = TokenReductionConfig::default();
|
|
776
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
777
|
-
|
|
778
|
-
let tokens = reducer.universal_tokenize("中文");
|
|
779
|
-
assert!(!tokens.is_empty());
|
|
780
|
-
}
|
|
781
|
-
|
|
782
|
-
#[test]
|
|
783
|
-
fn test_fallback_threshold() {
|
|
784
|
-
let config = TokenReductionConfig {
|
|
785
|
-
level: ReductionLevel::Aggressive,
|
|
786
|
-
..Default::default()
|
|
787
|
-
};
|
|
788
|
-
|
|
789
|
-
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
790
|
-
|
|
791
|
-
let input = "a the is of to in for on at by";
|
|
792
|
-
let result = reducer.reduce(input);
|
|
793
|
-
|
|
794
|
-
assert!(!result.is_empty());
|
|
795
|
-
}
|
|
796
|
-
}
|
|
1
|
+
use crate::error::Result;
|
|
2
|
+
use crate::text::token_reduction::{
|
|
3
|
+
cjk_utils::CjkTokenizer,
|
|
4
|
+
config::{ReductionLevel, TokenReductionConfig},
|
|
5
|
+
filters::FilterPipeline,
|
|
6
|
+
semantic::SemanticAnalyzer,
|
|
7
|
+
simd_text::{SimdTextProcessor, chunk_text_for_parallel},
|
|
8
|
+
};
|
|
9
|
+
use once_cell::sync::Lazy;
|
|
10
|
+
use rayon::prelude::*;
|
|
11
|
+
use regex::Regex;
|
|
12
|
+
use std::sync::Arc;
|
|
13
|
+
use unicode_normalization::UnicodeNormalization;
|
|
14
|
+
|
|
15
|
+
static REPEATED_EXCLAMATION: Lazy<Regex> =
|
|
16
|
+
Lazy::new(|| Regex::new(r"[!]{2,}").expect("Repeated exclamation regex pattern is valid and should compile"));
|
|
17
|
+
static REPEATED_QUESTION: Lazy<Regex> =
|
|
18
|
+
Lazy::new(|| Regex::new(r"[?]{2,}").expect("Repeated question regex pattern is valid and should compile"));
|
|
19
|
+
static REPEATED_COMMA: Lazy<Regex> =
|
|
20
|
+
Lazy::new(|| Regex::new(r"[,]{2,}").expect("Repeated comma regex pattern is valid and should compile"));
|
|
21
|
+
|
|
22
|
+
/// Bonus added for sentences at the beginning or end of the document
|
|
23
|
+
const SENTENCE_EDGE_POSITION_BONUS: f32 = 0.3;
|
|
24
|
+
|
|
25
|
+
/// Bonus added for sentences with ideal word count (neither too short nor too long)
|
|
26
|
+
const IDEAL_WORD_COUNT_BONUS: f32 = 0.2;
|
|
27
|
+
|
|
28
|
+
/// Minimum word count for ideal sentence length
|
|
29
|
+
const MIN_IDEAL_WORD_COUNT: usize = 3;
|
|
30
|
+
|
|
31
|
+
/// Maximum word count for ideal sentence length
|
|
32
|
+
const MAX_IDEAL_WORD_COUNT: usize = 25;
|
|
33
|
+
|
|
34
|
+
/// Weight multiplier for numeric content density in sentences
|
|
35
|
+
const NUMERIC_CONTENT_WEIGHT: f32 = 0.3;
|
|
36
|
+
|
|
37
|
+
/// Weight multiplier for capitalized/acronym word density in sentences
|
|
38
|
+
const CAPS_ACRONYM_WEIGHT: f32 = 0.25;
|
|
39
|
+
|
|
40
|
+
/// Weight multiplier for long word density in sentences
|
|
41
|
+
const LONG_WORD_WEIGHT: f32 = 0.2;
|
|
42
|
+
|
|
43
|
+
/// Minimum character length for a word to be considered "long"
|
|
44
|
+
const LONG_WORD_THRESHOLD: usize = 8;
|
|
45
|
+
|
|
46
|
+
/// Weight multiplier for punctuation density in sentences
|
|
47
|
+
const PUNCTUATION_DENSITY_WEIGHT: f32 = 0.15;
|
|
48
|
+
|
|
49
|
+
/// Weight multiplier for word diversity ratio (unique words / total words)
|
|
50
|
+
const DIVERSITY_RATIO_WEIGHT: f32 = 0.15;
|
|
51
|
+
|
|
52
|
+
/// Weight multiplier for character entropy (measure of text randomness/information)
|
|
53
|
+
const CHAR_ENTROPY_WEIGHT: f32 = 0.1;
|
|
54
|
+
|
|
55
|
+
pub struct TokenReducer {
|
|
56
|
+
config: Arc<TokenReductionConfig>,
|
|
57
|
+
text_processor: SimdTextProcessor,
|
|
58
|
+
filter_pipeline: FilterPipeline,
|
|
59
|
+
semantic_analyzer: Option<SemanticAnalyzer>,
|
|
60
|
+
cjk_tokenizer: CjkTokenizer,
|
|
61
|
+
language: String,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
impl TokenReducer {
|
|
65
|
+
pub fn new(config: &TokenReductionConfig, language_hint: Option<&str>) -> Result<Self> {
|
|
66
|
+
let config = Arc::new(config.clone());
|
|
67
|
+
let language = language_hint
|
|
68
|
+
.or(config.language_hint.as_deref())
|
|
69
|
+
.unwrap_or("en")
|
|
70
|
+
.to_string();
|
|
71
|
+
|
|
72
|
+
let text_processor = SimdTextProcessor::new();
|
|
73
|
+
let filter_pipeline = FilterPipeline::new(&config, &language)?;
|
|
74
|
+
|
|
75
|
+
let semantic_analyzer = if matches!(config.level, ReductionLevel::Aggressive | ReductionLevel::Maximum) {
|
|
76
|
+
Some(SemanticAnalyzer::new(&language))
|
|
77
|
+
} else {
|
|
78
|
+
None
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
Ok(Self {
|
|
82
|
+
config,
|
|
83
|
+
text_processor,
|
|
84
|
+
filter_pipeline,
|
|
85
|
+
semantic_analyzer,
|
|
86
|
+
cjk_tokenizer: CjkTokenizer::new(),
|
|
87
|
+
language,
|
|
88
|
+
})
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/// Get the language code being used for stopwords and semantic analysis.
|
|
92
|
+
pub fn language(&self) -> &str {
|
|
93
|
+
&self.language
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
pub fn reduce(&self, text: &str) -> String {
|
|
97
|
+
if text.is_empty() || matches!(self.config.level, ReductionLevel::Off) {
|
|
98
|
+
return text.to_string();
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
let working_text = if text.is_ascii() {
|
|
102
|
+
text
|
|
103
|
+
} else {
|
|
104
|
+
&text.nfc().collect::<String>()
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
match self.config.level {
|
|
108
|
+
ReductionLevel::Off => working_text.to_string(),
|
|
109
|
+
ReductionLevel::Light => self.apply_light_reduction_optimized(working_text),
|
|
110
|
+
ReductionLevel::Moderate => self.apply_moderate_reduction_optimized(working_text),
|
|
111
|
+
ReductionLevel::Aggressive => self.apply_aggressive_reduction_optimized(working_text),
|
|
112
|
+
ReductionLevel::Maximum => self.apply_maximum_reduction_optimized(working_text),
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
pub fn batch_reduce(&self, texts: &[&str]) -> Vec<String> {
|
|
117
|
+
if !self.config.enable_parallel || texts.len() < 2 {
|
|
118
|
+
return texts.iter().map(|text| self.reduce(text)).collect();
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
texts.par_iter().map(|text| self.reduce(text)).collect()
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
fn apply_light_reduction_optimized(&self, text: &str) -> String {
|
|
125
|
+
let mut result = if self.config.use_simd {
|
|
126
|
+
self.text_processor.clean_punctuation(text)
|
|
127
|
+
} else {
|
|
128
|
+
self.clean_punctuation_optimized(text)
|
|
129
|
+
};
|
|
130
|
+
|
|
131
|
+
result = self.filter_pipeline.apply_light_filters(&result);
|
|
132
|
+
result.trim().to_string()
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
fn apply_moderate_reduction_optimized(&self, text: &str) -> String {
|
|
136
|
+
let mut result = self.apply_light_reduction_optimized(text);
|
|
137
|
+
|
|
138
|
+
result = if self.config.enable_parallel && text.len() > 1000 {
|
|
139
|
+
self.apply_parallel_moderate_reduction(&result)
|
|
140
|
+
} else {
|
|
141
|
+
self.filter_pipeline.apply_moderate_filters(&result)
|
|
142
|
+
};
|
|
143
|
+
|
|
144
|
+
result
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
fn apply_aggressive_reduction_optimized(&self, text: &str) -> String {
|
|
148
|
+
let mut result = self.apply_moderate_reduction_optimized(text);
|
|
149
|
+
|
|
150
|
+
result = self.remove_additional_common_words(&result);
|
|
151
|
+
result = self.apply_sentence_selection(&result);
|
|
152
|
+
|
|
153
|
+
if let Some(ref analyzer) = self.semantic_analyzer {
|
|
154
|
+
result = analyzer.apply_semantic_filtering(&result, self.config.semantic_threshold);
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
result
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
fn apply_maximum_reduction_optimized(&self, text: &str) -> String {
|
|
161
|
+
let mut result = self.apply_aggressive_reduction_optimized(text);
|
|
162
|
+
|
|
163
|
+
if let Some(ref analyzer) = self.semantic_analyzer
|
|
164
|
+
&& self.config.enable_semantic_clustering
|
|
165
|
+
{
|
|
166
|
+
result = analyzer.apply_hypernym_compression(&result, self.config.target_reduction);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
result
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
fn apply_parallel_moderate_reduction(&self, text: &str) -> String {
|
|
173
|
+
let num_threads = rayon::current_num_threads();
|
|
174
|
+
let chunks = chunk_text_for_parallel(text, num_threads);
|
|
175
|
+
|
|
176
|
+
let processed_chunks: Vec<String> = chunks
|
|
177
|
+
.par_iter()
|
|
178
|
+
.map(|chunk| self.filter_pipeline.apply_moderate_filters(chunk))
|
|
179
|
+
.collect();
|
|
180
|
+
|
|
181
|
+
processed_chunks.join(" ")
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
fn clean_punctuation_optimized(&self, text: &str) -> String {
|
|
185
|
+
let mut result = text.to_string();
|
|
186
|
+
|
|
187
|
+
result = REPEATED_EXCLAMATION.replace_all(&result, "!").to_string();
|
|
188
|
+
result = REPEATED_QUESTION.replace_all(&result, "?").to_string();
|
|
189
|
+
result = REPEATED_COMMA.replace_all(&result, ",").to_string();
|
|
190
|
+
|
|
191
|
+
result
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
fn remove_additional_common_words(&self, text: &str) -> String {
|
|
195
|
+
let words = self.universal_tokenize(text);
|
|
196
|
+
|
|
197
|
+
if words.len() < 4 {
|
|
198
|
+
return text.to_string();
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
let mut word_freq = std::collections::HashMap::new();
|
|
202
|
+
let mut word_lengths = Vec::new();
|
|
203
|
+
|
|
204
|
+
for word in &words {
|
|
205
|
+
let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
|
|
206
|
+
word.to_lowercase()
|
|
207
|
+
} else {
|
|
208
|
+
word.chars()
|
|
209
|
+
.filter(|c| c.is_alphabetic())
|
|
210
|
+
.collect::<String>()
|
|
211
|
+
.to_lowercase()
|
|
212
|
+
};
|
|
213
|
+
|
|
214
|
+
if !clean_word.is_empty() {
|
|
215
|
+
*word_freq.entry(clean_word.clone()).or_insert(0) += 1;
|
|
216
|
+
word_lengths.push(clean_word.chars().count());
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
let avg_length = if !word_lengths.is_empty() {
|
|
221
|
+
word_lengths.iter().sum::<usize>() as f32 / word_lengths.len() as f32
|
|
222
|
+
} else {
|
|
223
|
+
5.0
|
|
224
|
+
};
|
|
225
|
+
|
|
226
|
+
let original_count = words.len();
|
|
227
|
+
|
|
228
|
+
let filtered_words: Vec<String> = words
|
|
229
|
+
.iter()
|
|
230
|
+
.filter(|word| {
|
|
231
|
+
let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
|
|
232
|
+
word.to_lowercase()
|
|
233
|
+
} else {
|
|
234
|
+
word.chars()
|
|
235
|
+
.filter(|c| c.is_alphabetic())
|
|
236
|
+
.collect::<String>()
|
|
237
|
+
.to_lowercase()
|
|
238
|
+
};
|
|
239
|
+
|
|
240
|
+
if clean_word.is_empty() {
|
|
241
|
+
return true;
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
let freq = word_freq.get(&clean_word).unwrap_or(&0);
|
|
245
|
+
let word_len = clean_word.chars().count() as f32;
|
|
246
|
+
|
|
247
|
+
self.has_important_characteristics(word)
|
|
248
|
+
|| (*freq <= 2 && word_len >= avg_length * 0.8)
|
|
249
|
+
|| (word_len >= avg_length * 1.5)
|
|
250
|
+
})
|
|
251
|
+
.cloned()
|
|
252
|
+
.collect();
|
|
253
|
+
|
|
254
|
+
let has_cjk_content = text.chars().any(|c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
|
|
255
|
+
let fallback_threshold = if has_cjk_content {
|
|
256
|
+
original_count / 5
|
|
257
|
+
} else {
|
|
258
|
+
original_count / 3
|
|
259
|
+
};
|
|
260
|
+
|
|
261
|
+
if filtered_words.len() < fallback_threshold {
|
|
262
|
+
let fallback_words: Vec<String> = words
|
|
263
|
+
.iter()
|
|
264
|
+
.filter(|word| {
|
|
265
|
+
let clean_word = if word.chars().all(|c| c.is_alphabetic()) {
|
|
266
|
+
(*word).clone()
|
|
267
|
+
} else {
|
|
268
|
+
word.chars().filter(|c| c.is_alphabetic()).collect::<String>()
|
|
269
|
+
};
|
|
270
|
+
|
|
271
|
+
clean_word.is_empty() || clean_word.chars().count() >= 3 || self.has_important_characteristics(word)
|
|
272
|
+
})
|
|
273
|
+
.cloned()
|
|
274
|
+
.collect();
|
|
275
|
+
self.smart_join(&fallback_words, has_cjk_content)
|
|
276
|
+
} else {
|
|
277
|
+
self.smart_join(&filtered_words, has_cjk_content)
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
fn smart_join(&self, tokens: &[String], has_cjk_content: bool) -> String {
|
|
282
|
+
if has_cjk_content {
|
|
283
|
+
tokens.join("")
|
|
284
|
+
} else {
|
|
285
|
+
tokens.join(" ")
|
|
286
|
+
}
|
|
287
|
+
}
|
|
288
|
+
|
|
289
|
+
fn has_important_characteristics(&self, word: &str) -> bool {
|
|
290
|
+
if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
|
|
291
|
+
return true;
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
if word.chars().any(|c| c.is_numeric()) {
|
|
295
|
+
return true;
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
if word.len() > 10 {
|
|
299
|
+
return true;
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
let uppercase_count = word.chars().filter(|c| c.is_uppercase()).count();
|
|
303
|
+
if uppercase_count > 1 && uppercase_count < word.len() {
|
|
304
|
+
return true;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
if self.has_cjk_importance(word) {
|
|
308
|
+
return true;
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
false
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
fn has_cjk_importance(&self, word: &str) -> bool {
|
|
315
|
+
let chars: Vec<char> = word.chars().collect();
|
|
316
|
+
|
|
317
|
+
let has_cjk = chars.iter().any(|&c| c as u32 >= 0x4E00 && (c as u32) <= 0x9FFF);
|
|
318
|
+
if !has_cjk {
|
|
319
|
+
return false;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
let important_radicals = [
|
|
323
|
+
'学', '智', '能', '技', '术', '法', '算', '理', '科', '研', '究', '发', '展', '系', '统', '模', '型', '方',
|
|
324
|
+
'式', '过', '程', '结', '构', '功', '效', '应', '分', '析', '计', '算', '数', '据', '信', '息', '处', '理',
|
|
325
|
+
'语', '言', '文', '生', '成', '产', '用', '作', '为', '成', '变', '化', '转', '换', '提', '高', '网', '络',
|
|
326
|
+
'神', '经', '机', '器', '人', '工', '智', '能', '自', '然', '复',
|
|
327
|
+
];
|
|
328
|
+
|
|
329
|
+
for &char in &chars {
|
|
330
|
+
if important_radicals.contains(&char) {
|
|
331
|
+
return true;
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
if chars.len() == 2 && has_cjk {
|
|
336
|
+
let has_technical = chars.iter().any(|&c| {
|
|
337
|
+
let code = c as u32;
|
|
338
|
+
(0x4E00..=0x4FFF).contains(&code)
|
|
339
|
+
|| (0x5000..=0x51FF).contains(&code)
|
|
340
|
+
|| (0x6700..=0x68FF).contains(&code)
|
|
341
|
+
|| (0x7500..=0x76FF).contains(&code)
|
|
342
|
+
});
|
|
343
|
+
|
|
344
|
+
if has_technical {
|
|
345
|
+
return true;
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
false
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
fn apply_sentence_selection(&self, text: &str) -> String {
|
|
353
|
+
let sentences: Vec<&str> = text
|
|
354
|
+
.split(['.', '!', '?'])
|
|
355
|
+
.map(|s| s.trim())
|
|
356
|
+
.filter(|s| !s.is_empty())
|
|
357
|
+
.collect();
|
|
358
|
+
|
|
359
|
+
if sentences.len() <= 2 {
|
|
360
|
+
return text.to_string();
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
let mut scored_sentences: Vec<(usize, f32, &str)> = sentences
|
|
364
|
+
.iter()
|
|
365
|
+
.enumerate()
|
|
366
|
+
.map(|(i, sentence)| {
|
|
367
|
+
let score = self.score_sentence_importance(sentence, i, sentences.len());
|
|
368
|
+
(i, score, *sentence)
|
|
369
|
+
})
|
|
370
|
+
.collect();
|
|
371
|
+
|
|
372
|
+
scored_sentences.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(std::cmp::Ordering::Equal));
|
|
373
|
+
|
|
374
|
+
let keep_count = ((sentences.len() as f32 * 0.4).ceil() as usize).max(1);
|
|
375
|
+
let mut selected_indices: Vec<usize> = scored_sentences[..keep_count].iter().map(|(i, _, _)| *i).collect();
|
|
376
|
+
|
|
377
|
+
selected_indices.sort();
|
|
378
|
+
|
|
379
|
+
let selected_sentences: Vec<&str> = selected_indices
|
|
380
|
+
.iter()
|
|
381
|
+
.filter_map(|&i| sentences.get(i))
|
|
382
|
+
.copied()
|
|
383
|
+
.collect();
|
|
384
|
+
|
|
385
|
+
if selected_sentences.is_empty() {
|
|
386
|
+
text.to_string()
|
|
387
|
+
} else {
|
|
388
|
+
selected_sentences.join(". ")
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
fn score_sentence_importance(&self, sentence: &str, position: usize, total_sentences: usize) -> f32 {
|
|
393
|
+
let mut score = 0.0;
|
|
394
|
+
|
|
395
|
+
if position == 0 || position == total_sentences - 1 {
|
|
396
|
+
score += SENTENCE_EDGE_POSITION_BONUS;
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
let words: Vec<&str> = sentence.split_whitespace().collect();
|
|
400
|
+
if words.is_empty() {
|
|
401
|
+
return score;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
let word_count = words.len();
|
|
405
|
+
if (MIN_IDEAL_WORD_COUNT..=MAX_IDEAL_WORD_COUNT).contains(&word_count) {
|
|
406
|
+
score += IDEAL_WORD_COUNT_BONUS;
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
let mut numeric_count = 0;
|
|
410
|
+
let mut caps_count = 0;
|
|
411
|
+
let mut long_word_count = 0;
|
|
412
|
+
let mut punct_density = 0;
|
|
413
|
+
|
|
414
|
+
for word in &words {
|
|
415
|
+
if word.chars().any(|c| c.is_numeric()) {
|
|
416
|
+
numeric_count += 1;
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
if word.len() > 1 && word.chars().all(|c| c.is_uppercase()) {
|
|
420
|
+
caps_count += 1;
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
if word.len() > LONG_WORD_THRESHOLD {
|
|
424
|
+
long_word_count += 1;
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
punct_density += word.chars().filter(|c| c.is_ascii_punctuation()).count();
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
score += (numeric_count as f32 / words.len() as f32) * NUMERIC_CONTENT_WEIGHT;
|
|
431
|
+
score += (caps_count as f32 / words.len() as f32) * CAPS_ACRONYM_WEIGHT;
|
|
432
|
+
score += (long_word_count as f32 / words.len() as f32) * LONG_WORD_WEIGHT;
|
|
433
|
+
score += (punct_density as f32 / sentence.len() as f32) * PUNCTUATION_DENSITY_WEIGHT;
|
|
434
|
+
|
|
435
|
+
let unique_words: std::collections::HashSet<_> = words
|
|
436
|
+
.iter()
|
|
437
|
+
.map(|w| {
|
|
438
|
+
w.chars()
|
|
439
|
+
.filter(|c| c.is_alphabetic())
|
|
440
|
+
.collect::<String>()
|
|
441
|
+
.to_lowercase()
|
|
442
|
+
})
|
|
443
|
+
.collect();
|
|
444
|
+
let diversity_ratio = unique_words.len() as f32 / words.len() as f32;
|
|
445
|
+
score += diversity_ratio * DIVERSITY_RATIO_WEIGHT;
|
|
446
|
+
|
|
447
|
+
let char_entropy = self.calculate_char_entropy(sentence);
|
|
448
|
+
score += char_entropy * CHAR_ENTROPY_WEIGHT;
|
|
449
|
+
|
|
450
|
+
score
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
fn universal_tokenize(&self, text: &str) -> Vec<String> {
|
|
454
|
+
self.cjk_tokenizer.tokenize_mixed_text(text)
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
fn calculate_char_entropy(&self, text: &str) -> f32 {
|
|
458
|
+
let chars: Vec<char> = text.chars().collect();
|
|
459
|
+
if chars.is_empty() {
|
|
460
|
+
return 0.0;
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
let mut char_freq = std::collections::HashMap::new();
|
|
464
|
+
for &ch in &chars {
|
|
465
|
+
let lowercase_ch = ch
|
|
466
|
+
.to_lowercase()
|
|
467
|
+
.next()
|
|
468
|
+
.expect("to_lowercase() must yield at least one character for valid Unicode");
|
|
469
|
+
*char_freq.entry(lowercase_ch).or_insert(0) += 1;
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
let total_chars = chars.len() as f32;
|
|
473
|
+
char_freq
|
|
474
|
+
.values()
|
|
475
|
+
.map(|&freq| {
|
|
476
|
+
let p = freq as f32 / total_chars;
|
|
477
|
+
if p > 0.0 { -p * p.log2() } else { 0.0 }
|
|
478
|
+
})
|
|
479
|
+
.sum::<f32>()
|
|
480
|
+
.min(5.0)
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
#[cfg(test)]
|
|
485
|
+
mod tests {
|
|
486
|
+
use super::*;
|
|
487
|
+
|
|
488
|
+
#[test]
|
|
489
|
+
fn test_light_reduction() {
|
|
490
|
+
let config = TokenReductionConfig {
|
|
491
|
+
level: ReductionLevel::Light,
|
|
492
|
+
use_simd: false,
|
|
493
|
+
..Default::default()
|
|
494
|
+
};
|
|
495
|
+
|
|
496
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
497
|
+
let input = "Hello world!!! How are you???";
|
|
498
|
+
let result = reducer.reduce(input);
|
|
499
|
+
|
|
500
|
+
assert!(result.len() < input.len());
|
|
501
|
+
assert!(!result.contains(" "));
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
#[test]
|
|
505
|
+
fn test_moderate_reduction() {
|
|
506
|
+
let config = TokenReductionConfig {
|
|
507
|
+
level: ReductionLevel::Moderate,
|
|
508
|
+
use_simd: false,
|
|
509
|
+
..Default::default()
|
|
510
|
+
};
|
|
511
|
+
|
|
512
|
+
let reducer = TokenReducer::new(&config, Some("en")).unwrap();
|
|
513
|
+
let input = "The quick brown fox is jumping over the lazy dog";
|
|
514
|
+
let result = reducer.reduce(input);
|
|
515
|
+
|
|
516
|
+
assert!(result.len() < input.len());
|
|
517
|
+
assert!(result.contains("quick"));
|
|
518
|
+
assert!(result.contains("brown"));
|
|
519
|
+
assert!(result.contains("fox"));
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
#[test]
|
|
523
|
+
fn test_batch_processing() {
|
|
524
|
+
let config = TokenReductionConfig {
|
|
525
|
+
level: ReductionLevel::Light,
|
|
526
|
+
enable_parallel: false,
|
|
527
|
+
..Default::default()
|
|
528
|
+
};
|
|
529
|
+
|
|
530
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
531
|
+
let inputs = vec!["Hello world!", "How are you?", "Fine, thanks!"];
|
|
532
|
+
let results = reducer.batch_reduce(&inputs);
|
|
533
|
+
|
|
534
|
+
assert_eq!(results.len(), inputs.len());
|
|
535
|
+
for result in &results {
|
|
536
|
+
assert!(!result.contains(" "));
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
#[test]
|
|
541
|
+
fn test_aggressive_reduction() {
|
|
542
|
+
let config = TokenReductionConfig {
|
|
543
|
+
level: ReductionLevel::Aggressive,
|
|
544
|
+
use_simd: false,
|
|
545
|
+
..Default::default()
|
|
546
|
+
};
|
|
547
|
+
|
|
548
|
+
let reducer = TokenReducer::new(&config, Some("en")).unwrap();
|
|
549
|
+
let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
|
|
550
|
+
let result = reducer.reduce(input);
|
|
551
|
+
|
|
552
|
+
assert!(result.len() < input.len());
|
|
553
|
+
assert!(!result.is_empty());
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
#[test]
|
|
557
|
+
fn test_maximum_reduction() {
|
|
558
|
+
let config = TokenReductionConfig {
|
|
559
|
+
level: ReductionLevel::Maximum,
|
|
560
|
+
use_simd: false,
|
|
561
|
+
enable_semantic_clustering: true,
|
|
562
|
+
..Default::default()
|
|
563
|
+
};
|
|
564
|
+
|
|
565
|
+
let reducer = TokenReducer::new(&config, Some("en")).unwrap();
|
|
566
|
+
let input = "The quick brown fox is jumping over the lazy dog and running through the forest";
|
|
567
|
+
let result = reducer.reduce(input);
|
|
568
|
+
|
|
569
|
+
assert!(result.len() < input.len());
|
|
570
|
+
assert!(!result.is_empty());
|
|
571
|
+
}
|
|
572
|
+
|
|
573
|
+
#[test]
|
|
574
|
+
fn test_empty_text_handling() {
|
|
575
|
+
let config = TokenReductionConfig {
|
|
576
|
+
level: ReductionLevel::Moderate,
|
|
577
|
+
..Default::default()
|
|
578
|
+
};
|
|
579
|
+
|
|
580
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
581
|
+
assert_eq!(reducer.reduce(""), "");
|
|
582
|
+
let result = reducer.reduce(" ");
|
|
583
|
+
assert!(result == " " || result.is_empty());
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
#[test]
|
|
587
|
+
fn test_off_mode_preserves_text() {
|
|
588
|
+
let config = TokenReductionConfig {
|
|
589
|
+
level: ReductionLevel::Off,
|
|
590
|
+
..Default::default()
|
|
591
|
+
};
|
|
592
|
+
|
|
593
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
594
|
+
let input = "Text with multiple spaces!!!";
|
|
595
|
+
assert_eq!(reducer.reduce(input), input);
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
#[test]
|
|
599
|
+
fn test_parallel_batch_processing() {
|
|
600
|
+
let config = TokenReductionConfig {
|
|
601
|
+
level: ReductionLevel::Light,
|
|
602
|
+
enable_parallel: true,
|
|
603
|
+
..Default::default()
|
|
604
|
+
};
|
|
605
|
+
|
|
606
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
607
|
+
let inputs = vec![
|
|
608
|
+
"First text with spaces",
|
|
609
|
+
"Second text with spaces",
|
|
610
|
+
"Third text with spaces",
|
|
611
|
+
];
|
|
612
|
+
let results = reducer.batch_reduce(&inputs);
|
|
613
|
+
|
|
614
|
+
assert_eq!(results.len(), inputs.len());
|
|
615
|
+
for result in &results {
|
|
616
|
+
assert!(!result.contains(" "));
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
#[test]
|
|
621
|
+
fn test_cjk_text_handling() {
|
|
622
|
+
let config = TokenReductionConfig {
|
|
623
|
+
level: ReductionLevel::Moderate,
|
|
624
|
+
..Default::default()
|
|
625
|
+
};
|
|
626
|
+
|
|
627
|
+
let reducer = TokenReducer::new(&config, Some("zh")).unwrap();
|
|
628
|
+
let input = "这是中文文本测试";
|
|
629
|
+
let result = reducer.reduce(input);
|
|
630
|
+
|
|
631
|
+
assert!(!result.is_empty());
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
#[test]
|
|
635
|
+
fn test_mixed_language_text() {
|
|
636
|
+
let config = TokenReductionConfig {
|
|
637
|
+
level: ReductionLevel::Moderate,
|
|
638
|
+
..Default::default()
|
|
639
|
+
};
|
|
640
|
+
|
|
641
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
642
|
+
let input = "This is English text 这是中文 and some more English";
|
|
643
|
+
let result = reducer.reduce(input);
|
|
644
|
+
|
|
645
|
+
assert!(!result.is_empty());
|
|
646
|
+
assert!(result.contains("English") || result.contains("中"));
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
#[test]
|
|
650
|
+
fn test_punctuation_normalization() {
|
|
651
|
+
let config = TokenReductionConfig {
|
|
652
|
+
level: ReductionLevel::Light,
|
|
653
|
+
..Default::default()
|
|
654
|
+
};
|
|
655
|
+
|
|
656
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
657
|
+
let input = "Text!!!!!! with????? excessive,,,,,, punctuation";
|
|
658
|
+
let result = reducer.reduce(input);
|
|
659
|
+
|
|
660
|
+
assert!(!result.contains("!!!!!!"));
|
|
661
|
+
assert!(!result.contains("?????"));
|
|
662
|
+
assert!(!result.contains(",,,,,,"));
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
#[test]
|
|
666
|
+
fn test_sentence_selection() {
|
|
667
|
+
let config = TokenReductionConfig {
|
|
668
|
+
level: ReductionLevel::Aggressive,
|
|
669
|
+
..Default::default()
|
|
670
|
+
};
|
|
671
|
+
|
|
672
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
673
|
+
let input = "First sentence here. Second sentence with more words. Third one. Fourth sentence is even longer than the others.";
|
|
674
|
+
let result = reducer.reduce(input);
|
|
675
|
+
|
|
676
|
+
assert!(result.len() < input.len());
|
|
677
|
+
assert!(result.split(". ").count() < 4);
|
|
678
|
+
}
|
|
679
|
+
|
|
680
|
+
#[test]
|
|
681
|
+
fn test_unicode_normalization_ascii() {
|
|
682
|
+
let config = TokenReductionConfig {
|
|
683
|
+
level: ReductionLevel::Light,
|
|
684
|
+
..Default::default()
|
|
685
|
+
};
|
|
686
|
+
|
|
687
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
688
|
+
let input = "Pure ASCII text without special characters";
|
|
689
|
+
let result = reducer.reduce(input);
|
|
690
|
+
|
|
691
|
+
assert!(result.contains("ASCII"));
|
|
692
|
+
}
|
|
693
|
+
|
|
694
|
+
#[test]
|
|
695
|
+
fn test_unicode_normalization_non_ascii() {
|
|
696
|
+
let config = TokenReductionConfig {
|
|
697
|
+
level: ReductionLevel::Light,
|
|
698
|
+
..Default::default()
|
|
699
|
+
};
|
|
700
|
+
|
|
701
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
702
|
+
let input = "Café naïve résumé";
|
|
703
|
+
let result = reducer.reduce(input);
|
|
704
|
+
|
|
705
|
+
assert!(result.contains("Café") || result.contains("Cafe"));
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
#[test]
|
|
709
|
+
fn test_single_text_vs_batch() {
|
|
710
|
+
let config = TokenReductionConfig {
|
|
711
|
+
level: ReductionLevel::Moderate,
|
|
712
|
+
..Default::default()
|
|
713
|
+
};
|
|
714
|
+
|
|
715
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
716
|
+
let text = "The quick brown fox jumps over the lazy dog";
|
|
717
|
+
|
|
718
|
+
let single_result = reducer.reduce(text);
|
|
719
|
+
let batch_results = reducer.batch_reduce(&[text]);
|
|
720
|
+
|
|
721
|
+
assert_eq!(single_result, batch_results[0]);
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
#[test]
|
|
725
|
+
fn test_important_word_preservation() {
|
|
726
|
+
let config = TokenReductionConfig {
|
|
727
|
+
level: ReductionLevel::Aggressive,
|
|
728
|
+
..Default::default()
|
|
729
|
+
};
|
|
730
|
+
|
|
731
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
732
|
+
let input = "The IMPORTANT word COVID-19 and 12345 numbers should be preserved";
|
|
733
|
+
let result = reducer.reduce(input);
|
|
734
|
+
|
|
735
|
+
assert!(result.contains("IMPORTANT") || result.contains("COVID") || result.contains("12345"));
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
#[test]
|
|
739
|
+
fn test_technical_terms_preservation() {
|
|
740
|
+
let config = TokenReductionConfig {
|
|
741
|
+
level: ReductionLevel::Aggressive,
|
|
742
|
+
..Default::default()
|
|
743
|
+
};
|
|
744
|
+
|
|
745
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
746
|
+
let input = "The implementation uses PyTorch and TensorFlow frameworks";
|
|
747
|
+
let result = reducer.reduce(input);
|
|
748
|
+
|
|
749
|
+
assert!(result.contains("PyTorch") || result.contains("TensorFlow"));
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
#[test]
|
|
753
|
+
fn test_calculate_char_entropy() {
|
|
754
|
+
let config = TokenReductionConfig::default();
|
|
755
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
756
|
+
|
|
757
|
+
let low_entropy = reducer.calculate_char_entropy("aaaaaaa");
|
|
758
|
+
assert!(low_entropy < 1.0);
|
|
759
|
+
|
|
760
|
+
let high_entropy = reducer.calculate_char_entropy("abcdefg123");
|
|
761
|
+
assert!(high_entropy > low_entropy);
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
#[test]
|
|
765
|
+
fn test_universal_tokenize_english() {
|
|
766
|
+
let config = TokenReductionConfig::default();
|
|
767
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
768
|
+
|
|
769
|
+
let tokens = reducer.universal_tokenize("hello world test");
|
|
770
|
+
assert_eq!(tokens, vec!["hello", "world", "test"]);
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
#[test]
|
|
774
|
+
fn test_universal_tokenize_cjk() {
|
|
775
|
+
let config = TokenReductionConfig::default();
|
|
776
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
777
|
+
|
|
778
|
+
let tokens = reducer.universal_tokenize("中文");
|
|
779
|
+
assert!(!tokens.is_empty());
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
#[test]
|
|
783
|
+
fn test_fallback_threshold() {
|
|
784
|
+
let config = TokenReductionConfig {
|
|
785
|
+
level: ReductionLevel::Aggressive,
|
|
786
|
+
..Default::default()
|
|
787
|
+
};
|
|
788
|
+
|
|
789
|
+
let reducer = TokenReducer::new(&config, None).unwrap();
|
|
790
|
+
|
|
791
|
+
let input = "a the is of to in for on at by";
|
|
792
|
+
let result = reducer.reduce(input);
|
|
793
|
+
|
|
794
|
+
assert!(!result.is_empty());
|
|
795
|
+
}
|
|
796
|
+
}
|